hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2529dc53e5cd9ca70e79a3921134c3237fe3ea48
| 972
|
py
|
Python
|
scripts/run_iqtree.py
|
fmalmeida/PopPIPE
|
0012ff55c9959f1c7ac359412ca22d9c1948d00c
|
[
"Apache-2.0"
] | 7
|
2020-05-04T10:16:25.000Z
|
2021-05-09T20:22:19.000Z
|
scripts/run_iqtree.py
|
fmalmeida/PopPIPE
|
0012ff55c9959f1c7ac359412ca22d9c1948d00c
|
[
"Apache-2.0"
] | 4
|
2020-07-01T09:07:47.000Z
|
2021-05-19T12:04:37.000Z
|
scripts/run_iqtree.py
|
fmalmeida/PopPIPE
|
0012ff55c9959f1c7ac359412ca22d9c1948d00c
|
[
"Apache-2.0"
] | 2
|
2021-05-09T00:34:29.000Z
|
2021-11-21T22:23:17.000Z
|
import subprocess
import dendropy
from shutil import copyfile
if snakemake.params.enabled:
iqtree_cmd = "iqtree --quiet -s " + snakemake.input.alignment + " -t " + snakemake.input.start_tree + \
" -T " + str(snakemake.threads) + " --prefix " + snakemake.params.prefix
if snakemake.params.mode == "full":
iqtree_cmd += " -m " + snakemake.params.model
elif snakemake.params.mode == "fast":
iqtree_cmd += " --fast"
subprocess.run(iqtree_cmd, shell=True, check=True)
else:
copyfile(snakemake.input.start_tree, snakemake.output.unrooted)
tree = dendropy.Tree.get(path=snakemake.output.unrooted, schema="newick")
tree.reroot_at_midpoint(update_bipartitions=True, suppress_unifurcations=False)
tree.reroot_at_midpoint(update_bipartitions=True, suppress_unifurcations=False)
tree.write(path=str(snakemake.output.rooted),
schema="newick",
suppress_rooting=True,
unquoted_underscores=True)
| 42.26087
| 107
| 0.70679
|
import subprocess
import dendropy
from shutil import copyfile
if snakemake.params.enabled:
iqtree_cmd = "iqtree --quiet -s " + snakemake.input.alignment + " -t " + snakemake.input.start_tree + \
" -T " + str(snakemake.threads) + " --prefix " + snakemake.params.prefix
if snakemake.params.mode == "full":
iqtree_cmd += " -m " + snakemake.params.model
elif snakemake.params.mode == "fast":
iqtree_cmd += " --fast"
subprocess.run(iqtree_cmd, shell=True, check=True)
else:
copyfile(snakemake.input.start_tree, snakemake.output.unrooted)
tree = dendropy.Tree.get(path=snakemake.output.unrooted, schema="newick")
tree.reroot_at_midpoint(update_bipartitions=True, suppress_unifurcations=False)
tree.reroot_at_midpoint(update_bipartitions=True, suppress_unifurcations=False)
tree.write(path=str(snakemake.output.rooted),
schema="newick",
suppress_rooting=True,
unquoted_underscores=True)
| 0
| 0
| 0
|
daae40433f08ca14b1c202d237fcaef60659f854
| 1,411
|
py
|
Python
|
example/toolbox/management/commands/analyzeuniquekeys.py
|
rkiddy/django-calaccess-raw-data
|
dab2bf103b713eee6d76295ffbd2d5d58c2796e4
|
[
"MIT"
] | 48
|
2015-01-10T18:06:03.000Z
|
2022-01-27T16:48:29.000Z
|
example/toolbox/management/commands/analyzeuniquekeys.py
|
rkiddy/django-calaccess-raw-data
|
dab2bf103b713eee6d76295ffbd2d5d58c2796e4
|
[
"MIT"
] | 1,193
|
2015-01-07T06:35:20.000Z
|
2021-06-19T11:43:20.000Z
|
example/toolbox/management/commands/analyzeuniquekeys.py
|
rkiddy/django-calaccess-raw-data
|
dab2bf103b713eee6d76295ffbd2d5d58c2796e4
|
[
"MIT"
] | 163
|
2015-01-10T18:06:09.000Z
|
2022-01-14T00:29:12.000Z
|
import os
import time
import calculate
from github import Github
from django.conf import settings
from calaccess_raw import get_model_list
from calaccess_raw.management.commands import CalAccessCommand
from django.contrib.humanize.templatetags.humanize import intcomma
| 31.355556
| 76
| 0.581857
|
import os
import time
import calculate
from github import Github
from django.conf import settings
from calaccess_raw import get_model_list
from calaccess_raw.management.commands import CalAccessCommand
from django.contrib.humanize.templatetags.humanize import intcomma
class Command(CalAccessCommand):
help = 'Analyze how many model lack a UNIQUE_KEY definition'
def handle(self, *args, **kwargs):
"""
Make it happen.
"""
# Loop through all the models and find any fields without docs
missing_list = []
model_count = 0
for m in get_model_list():
model_count += 1
if m.UNIQUE_KEY is None:
self.log("Missing: %s.%s" % (
m().klass_group,
m().klass_name,
)
)
missing_list.append(m)
# If everything is done, declare victory
missing_count = len(missing_list)
if not missing_count:
self.success("All %s models have a UNIQUE_KEY!" % missing_count)
return False
# If not, loop through the missing and create issues
self.failure(
"%s/%s (%d%%) of models lack a UNIQUE_KEY" % (
intcomma(missing_count),
model_count,
calculate.percentage(missing_count, model_count)
)
)
| 0
| 1,118
| 23
|
e407015ffc89e6d1d5d878c1de0fb0517395640a
| 213
|
py
|
Python
|
Python/frequency_counter.py
|
Gaminee/Data_Structures_and_Algorithms.github.io
|
f25d368e17dfcbe81079f286012355281af8b390
|
[
"Unlicense"
] | 77
|
2020-10-01T10:06:59.000Z
|
2021-11-08T08:57:18.000Z
|
Python/frequency_counter.py
|
Gaminee/Data_Structures_and_Algorithms.github.io
|
f25d368e17dfcbe81079f286012355281af8b390
|
[
"Unlicense"
] | 64
|
2020-10-01T09:27:58.000Z
|
2020-12-02T10:47:01.000Z
|
Python/frequency_counter.py
|
Gaminee/Data_Structures_and_Algorithms.github.io
|
f25d368e17dfcbe81079f286012355281af8b390
|
[
"Unlicense"
] | 327
|
2020-09-26T17:06:03.000Z
|
2021-10-09T06:04:39.000Z
|
L = [11,22,66,22,11,44,55,66,88,77,22,11,44,22,33,77,55,44]
print('The given list is: ')
print(L)
D = {}
for item in L:
if item not in D:
D[item] = L.count(item)
print('Frequency of different items is:')
print(D)
| 21.3
| 59
| 0.647887
|
L = [11,22,66,22,11,44,55,66,88,77,22,11,44,22,33,77,55,44]
print('The given list is: ')
print(L)
D = {}
for item in L:
if item not in D:
D[item] = L.count(item)
print('Frequency of different items is:')
print(D)
| 0
| 0
| 0
|
180fc1c1d07b42afefb6638e91e474bd5069892e
| 5,407
|
py
|
Python
|
server/datasource/models/candles.py
|
yizhang7210/Acre
|
c98cf8a4fdfb223a1958e8e61df759f889a1b13f
|
[
"MIT"
] | 2
|
2017-11-27T21:55:21.000Z
|
2017-12-30T03:34:40.000Z
|
server/datasource/models/candles.py
|
yizhang7210/Acre
|
c98cf8a4fdfb223a1958e8e61df759f889a1b13f
|
[
"MIT"
] | 30
|
2017-09-06T12:00:08.000Z
|
2018-06-20T22:47:46.000Z
|
server/datasource/models/candles.py
|
yizhang7210/Acre
|
c98cf8a4fdfb223a1958e8e61df759f889a1b13f
|
[
"MIT"
] | 1
|
2021-04-05T13:59:37.000Z
|
2021-04-05T13:59:37.000Z
|
""" Data model and data access methods for Candles.
"""
import pytz
from core.models.instruments import Instrument
from django.db import models
class Candle(models.Model):
""" Candle data model.
"""
# pylint: disable=too-many-instance-attributes
instrument = models.ForeignKey(Instrument, on_delete=models.PROTECT)
start_time = models.DateTimeField()
volume = models.PositiveIntegerField()
granularity = models.CharField(max_length=5)
open_bid = models.DecimalField(max_digits=12, decimal_places=6)
high_bid = models.DecimalField(max_digits=12, decimal_places=6)
low_bid = models.DecimalField(max_digits=12, decimal_places=6)
close_bid = models.DecimalField(max_digits=12, decimal_places=6)
open_ask = models.DecimalField(max_digits=12, decimal_places=6)
high_ask = models.DecimalField(max_digits=12, decimal_places=6)
low_ask = models.DecimalField(max_digits=12, decimal_places=6)
close_ask = models.DecimalField(max_digits=12, decimal_places=6)
def create_one(**kwargs):
""" Create a Candle object with the given fields.
Args:
Named arguments.
instrument: Instrument object.
start_time: Datetime object. Candle start time.
volume: Positive integer.
granularity: String. 'D' for Daily.
bid: Dictionary with 'o', 'h', 'l', 'c'
ask: Dictionary with 'o', 'h', 'l', 'c'
Returns:
Candle object with the given fields.
"""
if 'bid' in kwargs:
bid = kwargs.get('bid')
del kwargs['bid']
if bid is not None:
kwargs['open_bid'] = bid.get('o')
kwargs['high_bid'] = bid.get('h')
kwargs['low_bid'] = bid.get('l')
kwargs['close_bid'] = bid.get('c')
if 'ask' in kwargs:
ask = kwargs.get('ask')
del kwargs['ask']
if ask is not None:
kwargs['open_ask'] = ask.get('o')
kwargs['high_ask'] = ask.get('h')
kwargs['low_ask'] = ask.get('l')
kwargs['close_ask'] = ask.get('c')
if 'start_time' in kwargs:
kwargs['start_time'] = add_timezone(kwargs.get('start_time'))
return Candle(**kwargs)
def delete_all():
""" Delete all candles in the database.
Args:
None.
"""
return Candle.objects.all().delete()
def get_all(order_by):
""" Returns all candles in the database.
Args:
order_by: List of strings to order the candles by.
Returns:
List of all Candle objects (QuerySet).
"""
return Candle.objects.all().order_by(*order_by)
def get_candles(**kwargs):
""" Retrieve a list of candles with given conditions.
Args:
kwargs: Named arguments for filtering candles.
instrument: Instrument object. Filter by this instrument.
start: Datetime. Filter candles with later time than 'start'.
end: Datetime. Filter candles with earlier time than 'end'.
granularity: String. Granularity of the querying candle.
order_by: String. Space delimited string of fields to order by.
Returns:
List of Candle objects satisfying the conditions (QuerySet).
"""
candles = Candle.objects.all()
if kwargs.get('instrument') is not None:
candles = candles.filter(instrument=kwargs.get('instrument'))
if kwargs.get('start') is not None:
start_time = add_timezone(kwargs.get('start'))
candles = candles.filter(start_time__gte=start_time)
if kwargs.get('end') is not None:
end_time = add_timezone(kwargs.get('end'))
candles = candles.filter(start_time__lte=end_time)
if kwargs.get('granularity') is not None:
candles = candles.filter(granularity=kwargs.get('granularity'))
if kwargs.get('order_by') is not None:
candles = candles.order_by(kwargs.get('order_by'))
return candles
def get_last(**kwargs):
""" Retrieve the latest candle of given instrument and granularity.
Args:
kwargs: Named arguments for filtering candles.
instrument: Instrument object.
granularity: String. The granularity of the candles.
before: Datetime. Get the last candle before this time.
Returns:
Candle object if exists or None.
"""
candles = get_candles(
instrument=kwargs.get('instrument'),
granularity=kwargs.get('granularity'),
end=kwargs.get('before'),
order_by='-start_time')
if candles:
return candles[0]
def add_timezone(time_record):
""" Add a default America/New_York timezone info to a datetime object.
Args:
time_record: Datetime object.
Returns:
Datetime object with a timezone if time_record did not have tzinfo,
otherwise return time_record itself.
"""
if time_record.tzname() is None:
return time_record.replace(tzinfo=pytz.timezone('America/New_York'))
return time_record
def insert_many(candles):
""" Bulk insert a list of candles.
Args:
candles: List of Candle objects to be inserted.
"""
Candle.objects.bulk_create(candles)
| 31.994083
| 79
| 0.62567
|
""" Data model and data access methods for Candles.
"""
import pytz
from core.models.instruments import Instrument
from django.db import models
class Candle(models.Model):
""" Candle data model.
"""
# pylint: disable=too-many-instance-attributes
instrument = models.ForeignKey(Instrument, on_delete=models.PROTECT)
start_time = models.DateTimeField()
volume = models.PositiveIntegerField()
granularity = models.CharField(max_length=5)
open_bid = models.DecimalField(max_digits=12, decimal_places=6)
high_bid = models.DecimalField(max_digits=12, decimal_places=6)
low_bid = models.DecimalField(max_digits=12, decimal_places=6)
close_bid = models.DecimalField(max_digits=12, decimal_places=6)
open_ask = models.DecimalField(max_digits=12, decimal_places=6)
high_ask = models.DecimalField(max_digits=12, decimal_places=6)
low_ask = models.DecimalField(max_digits=12, decimal_places=6)
close_ask = models.DecimalField(max_digits=12, decimal_places=6)
class Meta:
unique_together = (('instrument', 'start_time', 'granularity'), )
def create_one(**kwargs):
""" Create a Candle object with the given fields.
Args:
Named arguments.
instrument: Instrument object.
start_time: Datetime object. Candle start time.
volume: Positive integer.
granularity: String. 'D' for Daily.
bid: Dictionary with 'o', 'h', 'l', 'c'
ask: Dictionary with 'o', 'h', 'l', 'c'
Returns:
Candle object with the given fields.
"""
if 'bid' in kwargs:
bid = kwargs.get('bid')
del kwargs['bid']
if bid is not None:
kwargs['open_bid'] = bid.get('o')
kwargs['high_bid'] = bid.get('h')
kwargs['low_bid'] = bid.get('l')
kwargs['close_bid'] = bid.get('c')
if 'ask' in kwargs:
ask = kwargs.get('ask')
del kwargs['ask']
if ask is not None:
kwargs['open_ask'] = ask.get('o')
kwargs['high_ask'] = ask.get('h')
kwargs['low_ask'] = ask.get('l')
kwargs['close_ask'] = ask.get('c')
if 'start_time' in kwargs:
kwargs['start_time'] = add_timezone(kwargs.get('start_time'))
return Candle(**kwargs)
def delete_all():
""" Delete all candles in the database.
Args:
None.
"""
return Candle.objects.all().delete()
def get_all(order_by):
""" Returns all candles in the database.
Args:
order_by: List of strings to order the candles by.
Returns:
List of all Candle objects (QuerySet).
"""
return Candle.objects.all().order_by(*order_by)
def get_candles(**kwargs):
""" Retrieve a list of candles with given conditions.
Args:
kwargs: Named arguments for filtering candles.
instrument: Instrument object. Filter by this instrument.
start: Datetime. Filter candles with later time than 'start'.
end: Datetime. Filter candles with earlier time than 'end'.
granularity: String. Granularity of the querying candle.
order_by: String. Space delimited string of fields to order by.
Returns:
List of Candle objects satisfying the conditions (QuerySet).
"""
candles = Candle.objects.all()
if kwargs.get('instrument') is not None:
candles = candles.filter(instrument=kwargs.get('instrument'))
if kwargs.get('start') is not None:
start_time = add_timezone(kwargs.get('start'))
candles = candles.filter(start_time__gte=start_time)
if kwargs.get('end') is not None:
end_time = add_timezone(kwargs.get('end'))
candles = candles.filter(start_time__lte=end_time)
if kwargs.get('granularity') is not None:
candles = candles.filter(granularity=kwargs.get('granularity'))
if kwargs.get('order_by') is not None:
candles = candles.order_by(kwargs.get('order_by'))
return candles
def get_last(**kwargs):
""" Retrieve the latest candle of given instrument and granularity.
Args:
kwargs: Named arguments for filtering candles.
instrument: Instrument object.
granularity: String. The granularity of the candles.
before: Datetime. Get the last candle before this time.
Returns:
Candle object if exists or None.
"""
candles = get_candles(
instrument=kwargs.get('instrument'),
granularity=kwargs.get('granularity'),
end=kwargs.get('before'),
order_by='-start_time')
if candles:
return candles[0]
def add_timezone(time_record):
""" Add a default America/New_York timezone info to a datetime object.
Args:
time_record: Datetime object.
Returns:
Datetime object with a timezone if time_record did not have tzinfo,
otherwise return time_record itself.
"""
if time_record.tzname() is None:
return time_record.replace(tzinfo=pytz.timezone('America/New_York'))
return time_record
def insert_many(candles):
""" Bulk insert a list of candles.
Args:
candles: List of Candle objects to be inserted.
"""
Candle.objects.bulk_create(candles)
| 0
| 64
| 27
|
40e01bc00a83ff41fbf3dca04cf9b0eb07326e3a
| 14,663
|
py
|
Python
|
models/Transformers/BertModel_backup.py
|
suhasgupta791/mids-w251-final-project
|
aa1ef80685c6d9b5fc8a444e438078150cc0d96c
|
[
"Apache-2.0"
] | null | null | null |
models/Transformers/BertModel_backup.py
|
suhasgupta791/mids-w251-final-project
|
aa1ef80685c6d9b5fc8a444e438078150cc0d96c
|
[
"Apache-2.0"
] | null | null | null |
models/Transformers/BertModel_backup.py
|
suhasgupta791/mids-w251-final-project
|
aa1ef80685c6d9b5fc8a444e438078150cc0d96c
|
[
"Apache-2.0"
] | 1
|
2020-02-14T01:10:43.000Z
|
2020-02-14T01:10:43.000Z
|
#!/usr/bin/env python
# coding: utf-8
import itertools
import random
import numpy as np
import sys, os
import pandas as pd
import torch
from torchsummary import summary
from torchtext import data
import torch.nn as nn
import torch.utils.data
from torch.utils.data import Dataset, TensorDataset,DataLoader, RandomSampler
from torch.utils.tensorboard import SummaryWriter
import torchvision
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, classification_report, confusion_matrix
from tqdm import tqdm, tqdm_notebook
import warnings
warnings.filterwarnings(action='once')
import pickle
import shutil
import time
import matplotlib.pyplot as plt
import tensorflow as tf
# Import transformers specific packages
from transformers import BertTokenizer, BertModel, BertConfig
from transformers import BertForSequenceClassification, BertForTokenClassification
from transformers import AdamW,get_linear_schedule_with_warmup, pipeline
# Import package for data parallelism to train on multi-GPU machines
from models.Transformers.parallel import DataParallelModel, DataParallelCriterion
# Check if cuda is available
# Set the device and empty cache
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device =='cuda':
from apex import amp
torch.cuda.empty_cache()
torch.backends.cudnn.deterministic = True
# Class for model training and inference
| 47.14791
| 156
| 0.613994
|
#!/usr/bin/env python
# coding: utf-8
import itertools
import random
import numpy as np
import sys, os
import pandas as pd
import torch
from torchsummary import summary
from torchtext import data
import torch.nn as nn
import torch.utils.data
from torch.utils.data import Dataset, TensorDataset,DataLoader, RandomSampler
from torch.utils.tensorboard import SummaryWriter
import torchvision
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, classification_report, confusion_matrix
from tqdm import tqdm, tqdm_notebook
import warnings
warnings.filterwarnings(action='once')
import pickle
import shutil
import time
import matplotlib.pyplot as plt
import tensorflow as tf
# Import transformers specific packages
from transformers import BertTokenizer, BertModel, BertConfig
from transformers import BertForSequenceClassification, BertForTokenClassification
from transformers import AdamW,get_linear_schedule_with_warmup, pipeline
# Import package for data parallelism to train on multi-GPU machines
from models.Transformers.parallel import DataParallelModel, DataParallelCriterion
# Check if cuda is available
# Set the device and empty cache
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device =='cuda':
from apex import amp
torch.cuda.empty_cache()
torch.backends.cudnn.deterministic = True
# Class for model training and inference
class Bert_Model():
def __init__(self,train_df,bert_model_name,bert_model_path,
tokenizer,test_df=None,
max_seq_length=128,seed=1234):
if max_seq_length > tokenizer.max_model_input_sizes[bert_model_name]:
print("Max sequence length specified > 512!!... resetting to 128")
print("If you don't want this then set max_seq_length to <= 512")
self._MAX_SEQUENCE_LENGTH = 128
else:
self._MAX_SEQUENCE_LENGTH = max_seq_length
self._SEED = seed
self._WORK_DIR = "/root/models/Tranformer_based/workingdir/"
self._bert_model_path=bert_model_path
self._bert_model_name=bert_model_name
self._train_data=train_df
if test_df:
self._test_size=test_df
else:
self._test_size=0
self._tokenizer = tokenizer
self._training_stats = []
def tokenize(self,text_array):
''' Returns tokenized IDs and attention mask
The transformers encode_plus method returns the following:
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
attention_mask: list[int] if return_attention_mask is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
}'''
all_tokens=[]
all_attention_mask=[]
for i,text in enumerate(tqdm(text_array)):
encoded = self._tokenizer.encode_plus(
text,
add_special_tokens=True,
max_length=self._MAX_SEQUENCE_LENGTH,
pad_to_max_length=True)
tokens = torch.tensor(encoded['input_ids'])
attention_mask = torch.tensor(encoded['attention_mask'])
all_tokens.append(tokens)
all_attention_mask.append(attention_mask)
return all_tokens,all_attention_mask
def initialize_model_for_training(self,dataset_len,EPOCHS=1,model_seed=21000,lr=2e-5,batch_size=32,
accumulation_steps=2):
# Setup model parameters
np.random.seed(model_seed)
torch.manual_seed(model_seed)
torch.cuda.manual_seed(model_seed)
torch.backends.cudnn.deterministic = True
# Empty cache
torch.cuda.empty_cache()
model = BertForSequenceClassification.from_pretrained(self._bert_model_path,
cache_dir=None,
num_labels=2,
output_attentions = False,
output_hidden_states = False)
model = model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
num_train_optimization_steps = int(EPOCHS*dataset_len/batch_size/accumulation_steps)
optimizer = AdamW(optimizer_grouped_parameters, lr=lr,eps=1e-8,correct_bias=False) # To reproduce BertAdam specific behavior set correct_bias=False
scheduler = get_linear_schedule_with_warmup(optimizer,num_warmup_steps=10,num_training_steps=num_train_optimization_steps) # PyTorch scheduler
if device == 'cuda' :
model, optimizer = amp.initialize(model,optimizer,opt_level="O1",verbosity=0)
### Parallel GPU processing
#model = DataParallelModel(model) # using balanced data parallalism script
model = torch.nn.DataParallel(model) # using native pytorch
# criterion = nn.CrossEntropyLoss()
criterion = nn.BCELoss()
model = model.train()
model.zero_grad()
optimizer.zero_grad()
return model,optimizer,scheduler,criterion,EPOCHS
def run_training(self,model,train_dataLoader,valid_dataLoader,optimizer,scheduler,criterion,
EPOCHS=1,tr_batch_size=32,accumulation_steps=20,evaluation_steps=80,pred_thres=0.5,
logdir='./logs'):
# Data Structure for training statistics
training_stats=[]
validation_stats=[]
tr_loss = 0.
tr_accuracy = 0.
tr_auc = 0.
tr_f1 = 0.
tq = tqdm(range(EPOCHS),total=EPOCHS,leave=False)
global_step = 0
for epoch in tq:
print("--Training--")
tk0 = tqdm(enumerate(train_dataLoader),total=len(train_dataLoader),leave=True)
for step,(x_batch,attn_mask,y_batch) in tk0:
outputs = model(x_batch.to(device),
token_type_ids=None,
attention_mask=attn_mask.to(device),
labels=y_batch.to(device))
lossf,y_pred = outputs
predicted_probs,predicted_labels = self.classifyWithThreshold(y_pred,y_batch)
# Apply the additional layers
# Parallel GPU processing
#parallel_loss_criterion = DataParallelCriterion(criterion)
# Loss
loss = criterion(predicted_probs,torch.tensor(y_batch, dtype=torch.float, device=device)) # when using torch data parallel
loss = loss.mean() # Mean the loss from multiple GPUs and take care of the batch
#loss = parallel_loss_criterion(y_pred,y_batch.to(device))/accumulation_steps # when using balanced data parallel script
if device == 'cuda':
with amp.scale_loss(loss,optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()/accumulation_steps # accumulate the global loss (divide by gloabal step to reflect moving average)
# Accuracy
acc = torch.mean((predicted_labels == y_batch.to(device)).to(torch.float)).item() # accuracy for the whole batch
tr_accuracy += acc/accumulation_steps
# AUC Score
c_report_dict = self.class_report(predicted_labels,y_batch)
# print(self.conf_matrix(predicted_labels,y_batch))
f1_score = c_report_dict['1']['f1-score']
tr_f1 +=f1_score/accumulation_steps
auc = self.compute_auc_score(y_pred[:,1],y_batch)
tr_auc += auc/accumulation_steps
tk0.set_postfix(step=global_step+1,loss=loss.item(),accuracy=acc) # display running backward loss
tk0.set_postfix(step=global_step+1,loss=loss.item(),accuracy=acc) # display running backward loss
if (step+1) % accumulation_steps == 0: # Wait for several backward steps
# Write training stats to tensorboard
self.summaryWriter("train",tr_loss,tr_accuracy,tr_auc,tr_f1,global_step,logdir)
# Zero out the evaluation metrics after several backward steps
tr_accuracy = 0
tr_loss = 0
tr_auc = 0
tr_f1 = 0
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # clip the norm to 1.0 to prevent exploding gradients
optimizer.step() # Now we can do an optimizer step
scheduler.step()
model.zero_grad()
global_step+=1 # increment forward step count
training_stats.append(
{
'step': global_step,
'train_loss': tr_loss/global_step,
'train_acc': tr_accuracy/global_step,
'train_auc': tr_auc/global_step,
})
#Run evaluation after several forward passes (determined by evaluation_steps)
if (step+1) % evaluation_steps ==0:
print("--I-- Running Validation")
eval_loss,eval_accuracy,eval_auc,eval_f1=self.run_eval(model,valid_dataLoader,global_step,criterion)
validation_stats.append(
{
'step': global_step,
'valid_loss': eval_loss,
'valid accuracy': eval_accuracy,
'valid auc score': eval_auc,
})
# Write training stats to tensorboard
self.summaryWriter("eval",eval_loss,eval_accuracy,eval_auc,eval_f1,global_step,logdir)
tq.set_postfix(train_loss=tr_loss,train_accuracy=tr_accuracy,train_auc=tr_auc,leave=False)
return model,training_stats,validation_stats
def run_eval(self,model,valid_dataLoader,global_step,criterion):
avg_loss = 0.
eval_accuracy = 0.
eval_loss = 0.
eval_auc = 0.
eval_f1 = 0.
nb_eval_steps = 0
tk0 = tqdm(enumerate(valid_dataLoader),total=len(valid_dataLoader),leave=True)
for step,(x_batch, attn_mask,y_batch) in tk0:
model.eval()
with torch.no_grad():
outputs = model(x_batch.to(device),
token_type_ids=None,
attention_mask=attn_mask.to(device),
labels=y_batch.to(device))
loss, y_pred = outputs
predicted_probs,predicted_labels = self.classifyWithThreshold(y_pred,y_batch)
# Loss
loss = criterion(predicted_probs,torch.tensor(y_batch, dtype=torch.float, device=device)) # when using torch data parallel
loss = loss.mean() # Mean the loss from multiple GPUs and to take care of batch size
eval_loss += loss.item()
# Accuracy
# Accuracy
eval_accuracy += torch.mean((predicted_labels == y_batch.to(device)).to(torch.float)).item() # accuracy for the whole batch
# AUC Score
auc = self.compute_auc_score(y_pred[:,1],y_batch.to(device))
eval_auc += auc
# F1 Score
c_report_dict = self.class_report(predicted_labels,y_batch)
f1_score = c_report_dict['1']['f1-score']
eval_f1 += f1_score
# tmp_eval_auc = self.compute_auc_score(predicted_labels, label_ids) ## ROC AUC Score
# Increment total eval step count
nb_eval_steps += 1
# Normalize to the number of steps
avg_loss = eval_loss/nb_eval_steps
avg_accuracy = eval_accuracy/nb_eval_steps
avg_auc = eval_auc/nb_eval_steps
avg_f1 = eval_f1/nb_eval_steps
tk0.set_postfix(step=global_step,avg_loss=avg_loss,avg_accuracy=avg_accuracy,avg_auc=avg_auc)
return avg_loss,avg_accuracy,avg_auc,avg_f1
# Function to calculate the accuracy of predictions vs labels
def flat_accuracy(self,preds,labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat)
def compute_auc_score(self,preds,labels):
labels = labels.cpu().numpy()
preds = preds.detach().cpu().numpy()
auc_score = roc_auc_score(labels.flatten(),preds.flatten())
return auc_score
def class_report(self,preds,labels):
c_report = classification_report(labels.flatten(),preds.detach().cpu().numpy().flatten(),output_dict=True,zero_division=0)
return c_report
def conf_matrix(self,preds,labels):
conf_matrix = confusion_matrix(labels.flatten(),preds.detach().cpu().numpy().flatten())
return conf_matrix
def classifyWithThreshold(self,preds,labels):
pass
pred_after_sigmoid = torch.sigmoid(preds) # Apply the sigmoid to the logits from output of Bert
pred_probs,pred_classes = torch.max(pred_after_sigmoid,dim=-1)
return pred_probs,pred_classes
def summaryWriter(self,name,loss,acc,auc,f1_score,n_iter,logdir):
# Writer will output to ./runs/ directory by default
writer = SummaryWriter(logdir)
writer.add_scalar('Loss/'+name,loss,n_iter)
writer.add_scalar('Accuracy/'+name,acc,n_iter)
writer.add_scalar('ROC_AUC_Score/'+name,auc,n_iter)
writer.add_scalar('F1_Score/'+name,f1_score,n_iter)
writer.close()
| 11,566
| 1,677
| 22
|
e0179e878f7eca8bff86090f5fe6aea6db8c2820
| 25,793
|
py
|
Python
|
acoular/tbeamform.py
|
ndimubanzisenga/acoular
|
c7abace657d2602f9a4e9d2e4e1fabe44ec3927b
|
[
"BSD-3-Clause"
] | 1
|
2019-08-30T22:45:09.000Z
|
2019-08-30T22:45:09.000Z
|
acoular/tbeamform.py
|
ndimubanzisenga/acoular
|
c7abace657d2602f9a4e9d2e4e1fabe44ec3927b
|
[
"BSD-3-Clause"
] | null | null | null |
acoular/tbeamform.py
|
ndimubanzisenga/acoular
|
c7abace657d2602f9a4e9d2e4e1fabe44ec3927b
|
[
"BSD-3-Clause"
] | 1
|
2019-08-30T03:29:09.000Z
|
2019-08-30T03:29:09.000Z
|
# -*- coding: utf-8 -*-
#pylint: disable-msg=E0611, E1101, C0103, R0901, R0902, R0903, R0904, W0232
#------------------------------------------------------------------------------
# Copyright (c) 2007-2014, Acoular Development Team.
#------------------------------------------------------------------------------
"""Implements beamformers in the time domain.
.. autosummary::
:toctree: generated/
BeamformerTime
BeamformerTimeTraj
BeamformerTimeSq
BeamformerTimeSqTraj
IntegratorSectorTime
"""
# imports from other packages
from numpy import array, newaxis, empty, sqrt, arange, clip, r_, zeros, \
histogram, unique, cross, dot
from traits.api import Float, CArray, Property, Trait, Bool, Delegate, \
cached_property, List
from traitsui.api import View, Item
from traitsui.menu import OKCancelButtons
# acoular imports
from .internal import digest
from .grids import RectGrid
from .microphones import MicGeom
from .environments import Environment
from .trajectory import Trajectory
from .tprocess import TimeInOut
def const_power_weight( bf ):
"""
Internal helper function for :class:`BeamformerTime`
Provides microphone weighting
to make the power per unit area of the
microphone array geometry constant.
Parameters
----------
bf: :class:`BeamformerTime` object
Returns
-------
array of floats
The weight factors.
"""
r = bf.env.r( bf.c, zeros((3, 1)), bf.mpos.mpos) # distances to center
# round the relative distances to one decimal place
r = (r/r.max()).round(decimals=1)
ru, ind = unique(r, return_inverse=True)
ru = (ru[1:]+ru[:-1])/2
count, bins = histogram(r, r_[0, ru, 1.5*r.max()-0.5*ru[-1]])
bins *= bins
weights = sqrt((bins[1:]-bins[:-1])/count)
weights /= weights.mean()
return weights[ind]
# possible choices for spatial weights
possible_weights = {'none':None,
'power':const_power_weight}
class BeamformerTime( TimeInOut ):
"""
Provides a basic time domain beamformer with time signal output
for a spatially fixed grid.
"""
#: :class:`~acoular.grids.Grid`-derived object that provides the grid locations.
grid = Trait(RectGrid,
desc="beamforming grid")
#: Number of channels in output (=number of grid points).
numchannels = Delegate('grid', 'size')
#: :class:`~acoular.microphones.MicGeom` object that provides the microphone locations.
mpos= Trait(MicGeom,
desc="microphone geometry")
#: :class:`~acoular.environments.Environment` or derived object,
#: which provides information about the sound propagation in the medium.
env = Trait(Environment(), Environment)
#: Spatial weighting function.
weights = Trait('none', possible_weights,
desc="spatial weighting function")
# (from timedomain.possible_weights)
#: The speed of sound, defaults to 343 m/s
c = Float(343.,
desc="speed of sound")
#: Sound travel distances from microphone array center to grid
#: points (readonly).
r0 = Property(
desc="array center to grid distances")
#: Sound travel distances from array microphones to grid
#: points (readonly).
rm = Property(
desc="array center to grid distances")
# internal identifier
digest = Property(
depends_on = ['mpos.digest', 'grid.digest', 'source.digest', 'c', \
'env.digest', 'weights', '__class__'],
)
traits_view = View(
[
[Item('mpos{}', style='custom')],
[Item('grid', style='custom'), '-<>'],
[Item('c', label='speed of sound')],
[Item('env{}', style='custom')],
[Item('weights{}', style='custom')],
'|'
],
title='Beamformer options',
buttons = OKCancelButtons
)
@cached_property
#@cached_property
#@cached_property
def result( self, num=2048 ):
"""
Python generator that yields the beamformer output block-wise.
Parameters
----------
num : integer, defaults to 2048
This parameter defines the size of the blocks to be yielded
(i.e. the number of samples per block)
Returns
-------
Samples in blocks of shape (num, :attr:`numchannels`).
:attr:`numchannels` is usually very large.
The last block may be shorter than num.
"""
if self.weights_:
w = self.weights_(self)[newaxis]
else:
w = 1.0
c = self.c/self.sample_freq
delays = self.rm/c
d_index = array(delays, dtype=int) # integer index
d_interp1 = delays % 1 # 1st coeff for lin interpolation between samples
d_interp2 = 1-d_interp1 # 2nd coeff for lin interpolation
d_index2 = arange(self.mpos.num_mics)
# amp = (self.rm/self.r0[:, newaxis]) # multiplication factor
amp = (w/(self.rm*self.rm)).sum(1) * self.r0
amp = 1.0/(amp[:, newaxis]*self.rm) # multiplication factor
d_interp1 *= amp # premultiplication, to save later ops
d_interp2 *= amp
dmin = d_index.min() # minimum index
dmax = d_index.max()+1 # maximum index
aoff = dmax-dmin # index span
#working copy of data:
zi = empty((aoff+num, self.source.numchannels), dtype=float)
o = empty((num, self.grid.size), dtype=float) # output array
offset = aoff # start offset for working array
ooffset = 0 # offset for output array
for block in self.source.result(num):
ns = block.shape[0] # numbers of samples and channels
maxoffset = ns-dmin # ns - aoff +aoff -dmin
zi[aoff:aoff+ns] = block * w # copy data to working array
# loop over data samples
while offset < maxoffset:
# yield output array if full
if ooffset == num:
yield o
ooffset = 0
# the next line needs to be implemented faster
o[ooffset] = (zi[offset+d_index, d_index2]*d_interp1 + \
zi[offset+d_index+1, d_index2]*d_interp2).sum(-1)
offset += 1
ooffset += 1
# copy remaining samples in front of next block
zi[0:aoff] = zi[-aoff:]
offset -= num
# remaining data chunk
yield o[:ooffset]
class BeamformerTimeSq( BeamformerTime ):
"""
Provides a time domain beamformer with time-dependend
power signal output and possible autopower removal
for a spatially fixed grid.
"""
#: Boolean flag, if 'True' (default), the main diagonal is removed before beamforming.
r_diag = Bool(True,
desc="removal of diagonal")
# internal identifier
digest = Property(
depends_on = ['mpos.digest', 'grid.digest', 'source.digest', 'r_diag', \
'c', 'env.digest', 'weights', '__class__'],
)
traits_view = View(
[
[Item('mpos{}', style='custom')],
[Item('grid', style='custom'), '-<>'],
[Item('r_diag', label='diagonal removed')],
[Item('c', label='speed of sound')],
[Item('env{}', style='custom')],
[Item('weights{}', style='custom')],
'|'
],
title='Beamformer options',
buttons = OKCancelButtons
)
@cached_property
# generator, delivers the beamformer result
def result( self, num=2048 ):
"""
Python generator that yields the *squared* beamformer
output with optional removal of autocorrelation block-wise.
Parameters
----------
num : integer, defaults to 2048
This parameter defines the size of the blocks to be yielded
(i.e. the number of samples per block)
Returns
-------
Samples in blocks of shape \
(num, :attr:`~BeamformerTime.numchannels`).
:attr:`~BeamformerTime.numchannels` is usually very
large (number of grid points).
The last block may be shorter than num.
"""
if self.weights_:
w = self.weights_(self)[newaxis]
else:
w = 1.0
c = self.c/self.source.sample_freq
delays = self.rm/c
d_index = array(delays, dtype=int) # integer index
d_interp1 = delays % 1 # 1st coeff for lin interpolation between samples
d_interp2 = 1-d_interp1 # 2nd coeff for lin interpolation
d_index2 = arange(self.mpos.num_mics)
# amp = (self.rm/self.r0[:, newaxis]) # multiplication factor
amp = (w/(self.rm*self.rm)).sum(1) * self.r0
amp = 1.0/(amp[:, newaxis]*self.rm) # multiplication factor
d_interp1 *= amp # premultiplication, to save later ops
d_interp2 *= amp
dmin = d_index.min() # minimum index
dmax = d_index.max()+1 # maximum index
# print dmin, dmax
aoff = dmax-dmin # index span
#working copy of data:
zi = empty((aoff+num, self.source.numchannels), dtype=float)
o = empty((num, self.grid.size), dtype=float) # output array
temp = empty((self.grid.size, self.source.numchannels), dtype=float)
offset = aoff # start offset for working array
ooffset = 0 # offset for output array
for block in self.source.result(num):
ns = block.shape[0] # numbers of samples and channels
maxoffset = ns-dmin # ns - aoff +aoff -dmin
zi[aoff:aoff+ns] = block * w # copy data to working array
# loop over data samples
while offset < maxoffset:
# yield output array if full
if ooffset == num:
yield o
ooffset = 0
# the next line needs to be implemented faster
temp[:, :] = (zi[offset+d_index, d_index2]*d_interp1 \
+ zi[offset+d_index+1, d_index2]*d_interp2)
if self.r_diag:
# simple sum and remove autopower
o[ooffset] = clip(temp.sum(-1)**2 - \
(temp**2).sum(-1), 1e-100, 1e+100)
else:
# simple sum
o[ooffset] = temp.sum(-1)**2
offset += 1
ooffset += 1
# copy remaining samples in front of next block
zi[0:aoff] = zi[-aoff:]
offset -= num
# remaining data chunk
yield o[:ooffset]
class BeamformerTimeTraj( BeamformerTime ):
"""
Provides a basic time domain beamformer with time signal output
for a grid moving along a trajectory
"""
#: :class:`~acoular.trajectory.Trajectory` or derived object.
#: Start time is assumed to be the same as for the samples.
trajectory = Trait(Trajectory,
desc="trajectory of the grid center")
#: Reference vector, perpendicular to the y-axis of moving grid.
rvec = CArray( dtype=float, shape=(3, ), value=array((0, 0, 0)),
desc="reference vector")
# internal identifier
digest = Property(
depends_on = ['mpos.digest', 'grid.digest', 'source.digest', \
'c', 'weights', 'rvec', 'env.digest', 'trajectory.digest', \
'__class__'],
)
traits_view = View(
[
[Item('mpos{}', style='custom')],
[Item('grid', style='custom'), '-<>'],
[Item('trajectory{}', style='custom')],
[Item('c', label='speed of sound')],
[Item('env{}', style='custom')],
[Item('weights{}', style='custom')],
'|'
],
title='Beamformer options',
buttons = OKCancelButtons
)
@cached_property
def result( self, num=2048 ):
"""
Python generator that yields the beamformer
output block-wise.
Optional removal of autocorrelation.
The "moving" grid can be translated and optionally rotated.
Parameters
----------
num : integer, defaults to 2048
This parameter defines the size of the blocks to be yielded
(i.e. the number of samples per block)
Returns
-------
Samples in blocks of shape \
(num, :attr:`~BeamformerTime.numchannels`).
:attr:`~BeamformerTime.numchannels` is usually very \
large (number of grid points).
The last block may be shorter than num. \
The output starts for signals that were emitted from the grid at t=0.
"""
if self.weights_:
w = self.weights_(self)[newaxis]
else:
w = 1.0
c = self.c/self.source.sample_freq
# temp array for the grid co-ordinates
gpos = self.grid.pos()
# max delay span = sum of
# max diagonal lengths of circumscribing cuboids for grid and micarray
dmax = sqrt(((gpos.max(1)-gpos.min(1))**2).sum())
dmax += sqrt(((self.mpos.mpos.max(1)-self.mpos.mpos.min(1))**2).sum())
dmax = int(dmax/c)+1 # max index span
zi = empty((dmax+num, self.source.numchannels), \
dtype=float) #working copy of data
o = empty((num, self.grid.size), dtype=float) # output array
temp = empty((self.grid.size, self.source.numchannels), dtype=float)
d_index2 = arange(self.mpos.num_mics, dtype=int) # second index (static)
offset = dmax+num # start offset for working array
ooffset = 0 # offset for output array
# generators for trajectory, starting at time zero
start_t = 0.0
g = self.trajectory.traj( start_t, delta_t=1/self.source.sample_freq)
g1 = self.trajectory.traj( start_t, delta_t=1/self.source.sample_freq,
der=1)
rflag = (self.rvec == 0).all() #flag translation vs. rotation
data = self.source.result(num)
flag = True
while flag:
# yield output array if full
if ooffset == num:
yield o
ooffset = 0
if rflag:
# grid is only translated, not rotated
tpos = gpos + array(g.next())[:, newaxis]
else:
# grid is both translated and rotated
loc = array(g.next()) #translation array([0., 0.4, 1.])
dx = array(g1.next()) #direction vector (new x-axis)
dy = cross(self.rvec, dx) # new y-axis
dz = cross(dx, dy) # new z-axis
RM = array((dx, dy, dz)).T # rotation matrix
RM /= sqrt((RM*RM).sum(0)) # column normalized
tpos = dot(RM, gpos)+loc[:, newaxis] # rotation+translation
rm = self.env.r( self.c, tpos, self.mpos.mpos)
r0 = self.env.r( self.c, tpos)
delays = rm/c
d_index = array(delays, dtype=int) # integer index
d_interp1 = delays % 1 # 1st coeff for lin interpolation
d_interp2 = 1-d_interp1 # 2nd coeff for lin interpolation
amp = (w/(rm*rm)).sum(1) * r0
amp = 1.0/(amp[:, newaxis]*rm) # multiplication factor
# now, we have to make sure that the needed data is available
while offset+d_index.max()+2>dmax+num:
# copy remaining samples in front of next block
zi[0:dmax] = zi[-dmax:]
# the offset is adjusted by one block length
offset -= num
# test if data generator is exhausted
try:
# get next data
block = data.next()
except StopIteration:
print loc
flag = False
break
# samples in the block, equals to num except for the last block
ns = block.shape[0]
zi[dmax:dmax+ns] = block * w# copy data to working array
else:
# the next line needs to be implemented faster
# it eats half of the time
temp[:, :] = (zi[offset+d_index, d_index2]*d_interp1 \
+ zi[offset+d_index+1, d_index2]*d_interp2)*amp
o[ooffset] = temp.sum(-1)
offset += 1
ooffset += 1
# remaining data chunk
yield o[:ooffset]
class BeamformerTimeSqTraj( BeamformerTimeSq ):
"""
Provides a time domain beamformer with time-dependent
power signal output and possible autopower removal
for a grid moving along a trajectory.
"""
#: :class:`~acoular.trajectory.Trajectory` or derived object.
#: Start time is assumed to be the same as for the samples.
trajectory = Trait(Trajectory,
desc="trajectory of the grid center")
#: Reference vector, perpendicular to the y-axis of moving grid.
rvec = CArray( dtype=float, shape=(3, ), value=array((0, 0, 0)),
desc="reference vector")
# internal identifier
digest = Property(
depends_on = ['mpos.digest', 'grid.digest', 'source.digest', 'r_diag', \
'c', 'weights', 'rvec', 'env.digest', 'trajectory.digest', \
'__class__'],
)
traits_view = View(
[
[Item('mpos{}', style='custom')],
[Item('grid', style='custom'), '-<>'],
[Item('trajectory{}', style='custom')],
[Item('r_diag', label='diagonal removed')],
[Item('c', label='speed of sound')],
[Item('env{}', style='custom')],
[Item('weights{}', style='custom')],
'|'
],
title='Beamformer options',
buttons = OKCancelButtons
)
@cached_property
def result( self, num=2048 ):
"""
Python generator that yields the *squared* beamformer
output block-wise.
Optional removal of autocorrelation.
The "moving" grid can be translated and optionally rotated.
Parameters
----------
num : integer, defaults to 2048
This parameter defines the size of the blocks to be yielded
(i.e. the number of samples per block)
Returns
-------
Samples in blocks of shape \
(num, :attr:`~BeamformerTime.numchannels`).
:attr:`~BeamformerTime.numchannels` is usually very \
large (number of grid points).
The last block may be shorter than num. \
The output starts for signals that were emitted from the grid at t=0.
"""
if self.weights_:
w = self.weights_(self)[newaxis]
else:
w = 1.0
c = self.c/self.source.sample_freq
# temp array for the grid co-ordinates
gpos = self.grid.pos()
# max delay span = sum of
# max diagonal lengths of circumscribing cuboids for grid and micarray
dmax = sqrt(((gpos.max(1)-gpos.min(1))**2).sum())
dmax += sqrt(((self.mpos.mpos.max(1)-self.mpos.mpos.min(1))**2).sum())
dmax = int(dmax/c)+1 # max index span
zi = empty((dmax+num, self.source.numchannels), \
dtype=float) #working copy of data
o = empty((num, self.grid.size), dtype=float) # output array
temp = empty((self.grid.size, self.source.numchannels), dtype=float)
d_index2 = arange(self.mpos.num_mics, dtype=int) # second index (static)
offset = dmax+num # start offset for working array
ooffset = 0 # offset for output array
# generators for trajectory, starting at time zero
start_t = 0.0
g = self.trajectory.traj( start_t, delta_t=1/self.source.sample_freq)
g1 = self.trajectory.traj( start_t, delta_t=1/self.source.sample_freq,
der=1)
rflag = (self.rvec == 0).all() #flag translation vs. rotation
data = self.source.result(num)
flag = True
while flag:
# yield output array if full
if ooffset == num:
yield o
ooffset = 0
if rflag:
# grid is only translated, not rotated
tpos = gpos + array(g.next())[:, newaxis]
else:
# grid is both translated and rotated
loc = array(g.next()) #translation
dx = array(g1.next()) #direction vector (new x-axis)
dy = cross(self.rvec, dx) # new y-axis
dz = cross(dx, dy) # new z-axis
RM = array((dx, dy, dz)).T # rotation matrix
RM /= sqrt((RM*RM).sum(0)) # column normalized
tpos = dot(RM, gpos)+loc[:, newaxis] # rotation+translation
rm = self.env.r( self.c, tpos, self.mpos.mpos)
r0 = self.env.r( self.c, tpos)
delays = rm/c
d_index = array(delays, dtype=int) # integer index
d_interp1 = delays % 1 # 1st coeff for lin interpolation
d_interp2 = 1-d_interp1 # 2nd coeff for lin interpolation
amp = (w/(rm*rm)).sum(1) * r0
amp = 1.0/(amp[:, newaxis]*rm) # multiplication factor
# now, we have to make sure that the needed data is available
while offset+d_index.max()+2>dmax+num:
# copy remaining samples in front of next block
zi[0:dmax] = zi[-dmax:]
# the offset is adjusted by one block length
offset -= num
# test if data generator is exhausted
try:
# get next data
block = data.next()
except StopIteration:
flag = False
break
# samples in the block, equals to num except for the last block
ns = block.shape[0]
zi[dmax:dmax+ns] = block * w# copy data to working array
else:
# the next line needs to be implemented faster
# it eats half of the time
temp[:, :] = (zi[offset+d_index, d_index2]*d_interp1 \
+ zi[offset+d_index+1, d_index2]*d_interp2)*amp
if self.r_diag:
# simple sum and remove autopower
o[ooffset] = clip(temp.sum(-1)**2 - \
(temp**2).sum(-1), 1e-100, 1e+100)
else:
# simple sum
o[ooffset] = temp.sum(-1)**2
offset += 1
ooffset += 1
# remaining data chunk
yield o[:ooffset]
class IntegratorSectorTime( TimeInOut ):
"""
Provides an Integrator in the time domain.
"""
#: :class:`~acoular.grids.Grid`-derived object that provides the grid locations.
grid = Trait(RectGrid,
desc="beamforming grid")
#: List of sectors in grid
sectors = List()
#: Clipping, in Dezibel relative to maximum (negative values)
clip = Float(-350.0)
#: Number of channels in output (= number of sectors).
numchannels = Property( depends_on = ['sectors', ])
# internal identifier
digest = Property(
depends_on = ['sectors', 'clip', 'grid.digest', 'source.digest', \
'__class__'],
)
traits_view = View(
[
[Item('sectors', style='custom')],
[Item('grid', style='custom'), '-<>'],
'|'
],
title='Integrator',
buttons = OKCancelButtons
)
@cached_property
@cached_property
def result( self, num=1 ):
"""
Python generator that yields the source output integrated over the given
sectors, block-wise.
Parameters
----------
num : integer, defaults to 1
This parameter defines the size of the blocks to be yielded
(i.e. the number of samples per block)
Returns
-------
Samples in blocks of shape (num, :attr:`numchannels`).
:attr:`numchannels` is the number of sectors.
The last block may be shorter than num.
"""
inds = [self.grid.indices(*sector) for sector in self.sectors]
gshape = self.grid.shape
o = empty((num, self.numchannels), dtype=float) # output array
for r in self.source.result(num):
ns = r.shape[0]
mapshape = (ns,) + gshape
rmax = r.max()
rmin = rmax * 10**(self.clip/10.0)
r = where(r>rmin, r, 0.0)
i = 0
for ind in inds:
h = r[:].reshape(mapshape)[ (s_[:],) + ind ]
o[:ns, i] = h.reshape(h.shape[0], -1).sum(axis=1)
i += 1
yield o[:ns]
| 37.435414
| 91
| 0.541426
|
# -*- coding: utf-8 -*-
#pylint: disable-msg=E0611, E1101, C0103, R0901, R0902, R0903, R0904, W0232
#------------------------------------------------------------------------------
# Copyright (c) 2007-2014, Acoular Development Team.
#------------------------------------------------------------------------------
"""Implements beamformers in the time domain.
.. autosummary::
:toctree: generated/
BeamformerTime
BeamformerTimeTraj
BeamformerTimeSq
BeamformerTimeSqTraj
IntegratorSectorTime
"""
# imports from other packages
from numpy import array, newaxis, empty, sqrt, arange, clip, r_, zeros, \
histogram, unique, cross, dot
from traits.api import Float, CArray, Property, Trait, Bool, Delegate, \
cached_property, List
from traitsui.api import View, Item
from traitsui.menu import OKCancelButtons
# acoular imports
from .internal import digest
from .grids import RectGrid
from .microphones import MicGeom
from .environments import Environment
from .trajectory import Trajectory
from .tprocess import TimeInOut
def const_power_weight( bf ):
"""
Internal helper function for :class:`BeamformerTime`
Provides microphone weighting
to make the power per unit area of the
microphone array geometry constant.
Parameters
----------
bf: :class:`BeamformerTime` object
Returns
-------
array of floats
The weight factors.
"""
r = bf.env.r( bf.c, zeros((3, 1)), bf.mpos.mpos) # distances to center
# round the relative distances to one decimal place
r = (r/r.max()).round(decimals=1)
ru, ind = unique(r, return_inverse=True)
ru = (ru[1:]+ru[:-1])/2
count, bins = histogram(r, r_[0, ru, 1.5*r.max()-0.5*ru[-1]])
bins *= bins
weights = sqrt((bins[1:]-bins[:-1])/count)
weights /= weights.mean()
return weights[ind]
# possible choices for spatial weights
possible_weights = {'none':None,
'power':const_power_weight}
class BeamformerTime( TimeInOut ):
"""
Provides a basic time domain beamformer with time signal output
for a spatially fixed grid.
"""
#: :class:`~acoular.grids.Grid`-derived object that provides the grid locations.
grid = Trait(RectGrid,
desc="beamforming grid")
#: Number of channels in output (=number of grid points).
numchannels = Delegate('grid', 'size')
#: :class:`~acoular.microphones.MicGeom` object that provides the microphone locations.
mpos= Trait(MicGeom,
desc="microphone geometry")
#: :class:`~acoular.environments.Environment` or derived object,
#: which provides information about the sound propagation in the medium.
env = Trait(Environment(), Environment)
#: Spatial weighting function.
weights = Trait('none', possible_weights,
desc="spatial weighting function")
# (from timedomain.possible_weights)
#: The speed of sound, defaults to 343 m/s
c = Float(343.,
desc="speed of sound")
#: Sound travel distances from microphone array center to grid
#: points (readonly).
r0 = Property(
desc="array center to grid distances")
#: Sound travel distances from array microphones to grid
#: points (readonly).
rm = Property(
desc="array center to grid distances")
# internal identifier
digest = Property(
depends_on = ['mpos.digest', 'grid.digest', 'source.digest', 'c', \
'env.digest', 'weights', '__class__'],
)
traits_view = View(
[
[Item('mpos{}', style='custom')],
[Item('grid', style='custom'), '-<>'],
[Item('c', label='speed of sound')],
[Item('env{}', style='custom')],
[Item('weights{}', style='custom')],
'|'
],
title='Beamformer options',
buttons = OKCancelButtons
)
@cached_property
def _get_digest( self ):
return digest(self)
#@cached_property
def _get_r0 ( self ):
return self.env.r( self.c, self.grid.pos())
#@cached_property
def _get_rm ( self ):
return self.env.r( self.c, self.grid.pos(), self.mpos.mpos)
def result( self, num=2048 ):
"""
Python generator that yields the beamformer output block-wise.
Parameters
----------
num : integer, defaults to 2048
This parameter defines the size of the blocks to be yielded
(i.e. the number of samples per block)
Returns
-------
Samples in blocks of shape (num, :attr:`numchannels`).
:attr:`numchannels` is usually very large.
The last block may be shorter than num.
"""
if self.weights_:
w = self.weights_(self)[newaxis]
else:
w = 1.0
c = self.c/self.sample_freq
delays = self.rm/c
d_index = array(delays, dtype=int) # integer index
d_interp1 = delays % 1 # 1st coeff for lin interpolation between samples
d_interp2 = 1-d_interp1 # 2nd coeff for lin interpolation
d_index2 = arange(self.mpos.num_mics)
# amp = (self.rm/self.r0[:, newaxis]) # multiplication factor
amp = (w/(self.rm*self.rm)).sum(1) * self.r0
amp = 1.0/(amp[:, newaxis]*self.rm) # multiplication factor
d_interp1 *= amp # premultiplication, to save later ops
d_interp2 *= amp
dmin = d_index.min() # minimum index
dmax = d_index.max()+1 # maximum index
aoff = dmax-dmin # index span
#working copy of data:
zi = empty((aoff+num, self.source.numchannels), dtype=float)
o = empty((num, self.grid.size), dtype=float) # output array
offset = aoff # start offset for working array
ooffset = 0 # offset for output array
for block in self.source.result(num):
ns = block.shape[0] # numbers of samples and channels
maxoffset = ns-dmin # ns - aoff +aoff -dmin
zi[aoff:aoff+ns] = block * w # copy data to working array
# loop over data samples
while offset < maxoffset:
# yield output array if full
if ooffset == num:
yield o
ooffset = 0
# the next line needs to be implemented faster
o[ooffset] = (zi[offset+d_index, d_index2]*d_interp1 + \
zi[offset+d_index+1, d_index2]*d_interp2).sum(-1)
offset += 1
ooffset += 1
# copy remaining samples in front of next block
zi[0:aoff] = zi[-aoff:]
offset -= num
# remaining data chunk
yield o[:ooffset]
class BeamformerTimeSq( BeamformerTime ):
"""
Provides a time domain beamformer with time-dependend
power signal output and possible autopower removal
for a spatially fixed grid.
"""
#: Boolean flag, if 'True' (default), the main diagonal is removed before beamforming.
r_diag = Bool(True,
desc="removal of diagonal")
# internal identifier
digest = Property(
depends_on = ['mpos.digest', 'grid.digest', 'source.digest', 'r_diag', \
'c', 'env.digest', 'weights', '__class__'],
)
traits_view = View(
[
[Item('mpos{}', style='custom')],
[Item('grid', style='custom'), '-<>'],
[Item('r_diag', label='diagonal removed')],
[Item('c', label='speed of sound')],
[Item('env{}', style='custom')],
[Item('weights{}', style='custom')],
'|'
],
title='Beamformer options',
buttons = OKCancelButtons
)
@cached_property
def _get_digest( self ):
return digest(self)
# generator, delivers the beamformer result
def result( self, num=2048 ):
"""
Python generator that yields the *squared* beamformer
output with optional removal of autocorrelation block-wise.
Parameters
----------
num : integer, defaults to 2048
This parameter defines the size of the blocks to be yielded
(i.e. the number of samples per block)
Returns
-------
Samples in blocks of shape \
(num, :attr:`~BeamformerTime.numchannels`).
:attr:`~BeamformerTime.numchannels` is usually very
large (number of grid points).
The last block may be shorter than num.
"""
if self.weights_:
w = self.weights_(self)[newaxis]
else:
w = 1.0
c = self.c/self.source.sample_freq
delays = self.rm/c
d_index = array(delays, dtype=int) # integer index
d_interp1 = delays % 1 # 1st coeff for lin interpolation between samples
d_interp2 = 1-d_interp1 # 2nd coeff for lin interpolation
d_index2 = arange(self.mpos.num_mics)
# amp = (self.rm/self.r0[:, newaxis]) # multiplication factor
amp = (w/(self.rm*self.rm)).sum(1) * self.r0
amp = 1.0/(amp[:, newaxis]*self.rm) # multiplication factor
d_interp1 *= amp # premultiplication, to save later ops
d_interp2 *= amp
dmin = d_index.min() # minimum index
dmax = d_index.max()+1 # maximum index
# print dmin, dmax
aoff = dmax-dmin # index span
#working copy of data:
zi = empty((aoff+num, self.source.numchannels), dtype=float)
o = empty((num, self.grid.size), dtype=float) # output array
temp = empty((self.grid.size, self.source.numchannels), dtype=float)
offset = aoff # start offset for working array
ooffset = 0 # offset for output array
for block in self.source.result(num):
ns = block.shape[0] # numbers of samples and channels
maxoffset = ns-dmin # ns - aoff +aoff -dmin
zi[aoff:aoff+ns] = block * w # copy data to working array
# loop over data samples
while offset < maxoffset:
# yield output array if full
if ooffset == num:
yield o
ooffset = 0
# the next line needs to be implemented faster
temp[:, :] = (zi[offset+d_index, d_index2]*d_interp1 \
+ zi[offset+d_index+1, d_index2]*d_interp2)
if self.r_diag:
# simple sum and remove autopower
o[ooffset] = clip(temp.sum(-1)**2 - \
(temp**2).sum(-1), 1e-100, 1e+100)
else:
# simple sum
o[ooffset] = temp.sum(-1)**2
offset += 1
ooffset += 1
# copy remaining samples in front of next block
zi[0:aoff] = zi[-aoff:]
offset -= num
# remaining data chunk
yield o[:ooffset]
class BeamformerTimeTraj( BeamformerTime ):
"""
Provides a basic time domain beamformer with time signal output
for a grid moving along a trajectory
"""
#: :class:`~acoular.trajectory.Trajectory` or derived object.
#: Start time is assumed to be the same as for the samples.
trajectory = Trait(Trajectory,
desc="trajectory of the grid center")
#: Reference vector, perpendicular to the y-axis of moving grid.
rvec = CArray( dtype=float, shape=(3, ), value=array((0, 0, 0)),
desc="reference vector")
# internal identifier
digest = Property(
depends_on = ['mpos.digest', 'grid.digest', 'source.digest', \
'c', 'weights', 'rvec', 'env.digest', 'trajectory.digest', \
'__class__'],
)
traits_view = View(
[
[Item('mpos{}', style='custom')],
[Item('grid', style='custom'), '-<>'],
[Item('trajectory{}', style='custom')],
[Item('c', label='speed of sound')],
[Item('env{}', style='custom')],
[Item('weights{}', style='custom')],
'|'
],
title='Beamformer options',
buttons = OKCancelButtons
)
@cached_property
def _get_digest( self ):
return digest(self)
def result( self, num=2048 ):
"""
Python generator that yields the beamformer
output block-wise.
Optional removal of autocorrelation.
The "moving" grid can be translated and optionally rotated.
Parameters
----------
num : integer, defaults to 2048
This parameter defines the size of the blocks to be yielded
(i.e. the number of samples per block)
Returns
-------
Samples in blocks of shape \
(num, :attr:`~BeamformerTime.numchannels`).
:attr:`~BeamformerTime.numchannels` is usually very \
large (number of grid points).
The last block may be shorter than num. \
The output starts for signals that were emitted from the grid at t=0.
"""
if self.weights_:
w = self.weights_(self)[newaxis]
else:
w = 1.0
c = self.c/self.source.sample_freq
# temp array for the grid co-ordinates
gpos = self.grid.pos()
# max delay span = sum of
# max diagonal lengths of circumscribing cuboids for grid and micarray
dmax = sqrt(((gpos.max(1)-gpos.min(1))**2).sum())
dmax += sqrt(((self.mpos.mpos.max(1)-self.mpos.mpos.min(1))**2).sum())
dmax = int(dmax/c)+1 # max index span
zi = empty((dmax+num, self.source.numchannels), \
dtype=float) #working copy of data
o = empty((num, self.grid.size), dtype=float) # output array
temp = empty((self.grid.size, self.source.numchannels), dtype=float)
d_index2 = arange(self.mpos.num_mics, dtype=int) # second index (static)
offset = dmax+num # start offset for working array
ooffset = 0 # offset for output array
# generators for trajectory, starting at time zero
start_t = 0.0
g = self.trajectory.traj( start_t, delta_t=1/self.source.sample_freq)
g1 = self.trajectory.traj( start_t, delta_t=1/self.source.sample_freq,
der=1)
rflag = (self.rvec == 0).all() #flag translation vs. rotation
data = self.source.result(num)
flag = True
while flag:
# yield output array if full
if ooffset == num:
yield o
ooffset = 0
if rflag:
# grid is only translated, not rotated
tpos = gpos + array(g.next())[:, newaxis]
else:
# grid is both translated and rotated
loc = array(g.next()) #translation array([0., 0.4, 1.])
dx = array(g1.next()) #direction vector (new x-axis)
dy = cross(self.rvec, dx) # new y-axis
dz = cross(dx, dy) # new z-axis
RM = array((dx, dy, dz)).T # rotation matrix
RM /= sqrt((RM*RM).sum(0)) # column normalized
tpos = dot(RM, gpos)+loc[:, newaxis] # rotation+translation
rm = self.env.r( self.c, tpos, self.mpos.mpos)
r0 = self.env.r( self.c, tpos)
delays = rm/c
d_index = array(delays, dtype=int) # integer index
d_interp1 = delays % 1 # 1st coeff for lin interpolation
d_interp2 = 1-d_interp1 # 2nd coeff for lin interpolation
amp = (w/(rm*rm)).sum(1) * r0
amp = 1.0/(amp[:, newaxis]*rm) # multiplication factor
# now, we have to make sure that the needed data is available
while offset+d_index.max()+2>dmax+num:
# copy remaining samples in front of next block
zi[0:dmax] = zi[-dmax:]
# the offset is adjusted by one block length
offset -= num
# test if data generator is exhausted
try:
# get next data
block = data.next()
except StopIteration:
print loc
flag = False
break
# samples in the block, equals to num except for the last block
ns = block.shape[0]
zi[dmax:dmax+ns] = block * w# copy data to working array
else:
# the next line needs to be implemented faster
# it eats half of the time
temp[:, :] = (zi[offset+d_index, d_index2]*d_interp1 \
+ zi[offset+d_index+1, d_index2]*d_interp2)*amp
o[ooffset] = temp.sum(-1)
offset += 1
ooffset += 1
# remaining data chunk
yield o[:ooffset]
class BeamformerTimeSqTraj( BeamformerTimeSq ):
"""
Provides a time domain beamformer with time-dependent
power signal output and possible autopower removal
for a grid moving along a trajectory.
"""
#: :class:`~acoular.trajectory.Trajectory` or derived object.
#: Start time is assumed to be the same as for the samples.
trajectory = Trait(Trajectory,
desc="trajectory of the grid center")
#: Reference vector, perpendicular to the y-axis of moving grid.
rvec = CArray( dtype=float, shape=(3, ), value=array((0, 0, 0)),
desc="reference vector")
# internal identifier
digest = Property(
depends_on = ['mpos.digest', 'grid.digest', 'source.digest', 'r_diag', \
'c', 'weights', 'rvec', 'env.digest', 'trajectory.digest', \
'__class__'],
)
traits_view = View(
[
[Item('mpos{}', style='custom')],
[Item('grid', style='custom'), '-<>'],
[Item('trajectory{}', style='custom')],
[Item('r_diag', label='diagonal removed')],
[Item('c', label='speed of sound')],
[Item('env{}', style='custom')],
[Item('weights{}', style='custom')],
'|'
],
title='Beamformer options',
buttons = OKCancelButtons
)
@cached_property
def _get_digest( self ):
return digest(self)
def result( self, num=2048 ):
"""
Python generator that yields the *squared* beamformer
output block-wise.
Optional removal of autocorrelation.
The "moving" grid can be translated and optionally rotated.
Parameters
----------
num : integer, defaults to 2048
This parameter defines the size of the blocks to be yielded
(i.e. the number of samples per block)
Returns
-------
Samples in blocks of shape \
(num, :attr:`~BeamformerTime.numchannels`).
:attr:`~BeamformerTime.numchannels` is usually very \
large (number of grid points).
The last block may be shorter than num. \
The output starts for signals that were emitted from the grid at t=0.
"""
if self.weights_:
w = self.weights_(self)[newaxis]
else:
w = 1.0
c = self.c/self.source.sample_freq
# temp array for the grid co-ordinates
gpos = self.grid.pos()
# max delay span = sum of
# max diagonal lengths of circumscribing cuboids for grid and micarray
dmax = sqrt(((gpos.max(1)-gpos.min(1))**2).sum())
dmax += sqrt(((self.mpos.mpos.max(1)-self.mpos.mpos.min(1))**2).sum())
dmax = int(dmax/c)+1 # max index span
zi = empty((dmax+num, self.source.numchannels), \
dtype=float) #working copy of data
o = empty((num, self.grid.size), dtype=float) # output array
temp = empty((self.grid.size, self.source.numchannels), dtype=float)
d_index2 = arange(self.mpos.num_mics, dtype=int) # second index (static)
offset = dmax+num # start offset for working array
ooffset = 0 # offset for output array
# generators for trajectory, starting at time zero
start_t = 0.0
g = self.trajectory.traj( start_t, delta_t=1/self.source.sample_freq)
g1 = self.trajectory.traj( start_t, delta_t=1/self.source.sample_freq,
der=1)
rflag = (self.rvec == 0).all() #flag translation vs. rotation
data = self.source.result(num)
flag = True
while flag:
# yield output array if full
if ooffset == num:
yield o
ooffset = 0
if rflag:
# grid is only translated, not rotated
tpos = gpos + array(g.next())[:, newaxis]
else:
# grid is both translated and rotated
loc = array(g.next()) #translation
dx = array(g1.next()) #direction vector (new x-axis)
dy = cross(self.rvec, dx) # new y-axis
dz = cross(dx, dy) # new z-axis
RM = array((dx, dy, dz)).T # rotation matrix
RM /= sqrt((RM*RM).sum(0)) # column normalized
tpos = dot(RM, gpos)+loc[:, newaxis] # rotation+translation
rm = self.env.r( self.c, tpos, self.mpos.mpos)
r0 = self.env.r( self.c, tpos)
delays = rm/c
d_index = array(delays, dtype=int) # integer index
d_interp1 = delays % 1 # 1st coeff for lin interpolation
d_interp2 = 1-d_interp1 # 2nd coeff for lin interpolation
amp = (w/(rm*rm)).sum(1) * r0
amp = 1.0/(amp[:, newaxis]*rm) # multiplication factor
# now, we have to make sure that the needed data is available
while offset+d_index.max()+2>dmax+num:
# copy remaining samples in front of next block
zi[0:dmax] = zi[-dmax:]
# the offset is adjusted by one block length
offset -= num
# test if data generator is exhausted
try:
# get next data
block = data.next()
except StopIteration:
flag = False
break
# samples in the block, equals to num except for the last block
ns = block.shape[0]
zi[dmax:dmax+ns] = block * w# copy data to working array
else:
# the next line needs to be implemented faster
# it eats half of the time
temp[:, :] = (zi[offset+d_index, d_index2]*d_interp1 \
+ zi[offset+d_index+1, d_index2]*d_interp2)*amp
if self.r_diag:
# simple sum and remove autopower
o[ooffset] = clip(temp.sum(-1)**2 - \
(temp**2).sum(-1), 1e-100, 1e+100)
else:
# simple sum
o[ooffset] = temp.sum(-1)**2
offset += 1
ooffset += 1
# remaining data chunk
yield o[:ooffset]
class IntegratorSectorTime( TimeInOut ):
"""
Provides an Integrator in the time domain.
"""
#: :class:`~acoular.grids.Grid`-derived object that provides the grid locations.
grid = Trait(RectGrid,
desc="beamforming grid")
#: List of sectors in grid
sectors = List()
#: Clipping, in Dezibel relative to maximum (negative values)
clip = Float(-350.0)
#: Number of channels in output (= number of sectors).
numchannels = Property( depends_on = ['sectors', ])
# internal identifier
digest = Property(
depends_on = ['sectors', 'clip', 'grid.digest', 'source.digest', \
'__class__'],
)
traits_view = View(
[
[Item('sectors', style='custom')],
[Item('grid', style='custom'), '-<>'],
'|'
],
title='Integrator',
buttons = OKCancelButtons
)
@cached_property
def _get_digest( self ):
return digest(self)
@cached_property
def _get_numchannels ( self ):
return len(self.sectors)
def result( self, num=1 ):
"""
Python generator that yields the source output integrated over the given
sectors, block-wise.
Parameters
----------
num : integer, defaults to 1
This parameter defines the size of the blocks to be yielded
(i.e. the number of samples per block)
Returns
-------
Samples in blocks of shape (num, :attr:`numchannels`).
:attr:`numchannels` is the number of sectors.
The last block may be shorter than num.
"""
inds = [self.grid.indices(*sector) for sector in self.sectors]
gshape = self.grid.shape
o = empty((num, self.numchannels), dtype=float) # output array
for r in self.source.result(num):
ns = r.shape[0]
mapshape = (ns,) + gshape
rmax = r.max()
rmin = rmax * 10**(self.clip/10.0)
r = where(r>rmin, r, 0.0)
i = 0
for ind in inds:
h = r[:].reshape(mapshape)[ (s_[:],) + ind ]
o[:ns, i] = h.reshape(h.shape[0], -1).sum(axis=1)
i += 1
yield o[:ns]
| 317
| 0
| 208
|
2aec0d6638ad1f50011ab2d5daffdf486d01bf2e
| 774
|
py
|
Python
|
src/classifier/bin.py
|
Glavin001/IssueBot
|
4dfb31bda009e254b38dc8394669f7debeac65a4
|
[
"MIT"
] | 15
|
2016-06-08T02:21:19.000Z
|
2019-10-16T19:14:41.000Z
|
src/classifier/bin.py
|
Glavin001/IssueBot
|
4dfb31bda009e254b38dc8394669f7debeac65a4
|
[
"MIT"
] | 23
|
2015-03-15T04:35:33.000Z
|
2019-01-18T15:38:31.000Z
|
src/classifier/bin.py
|
Glavin001/IssueBot
|
4dfb31bda009e254b38dc8394669f7debeac65a4
|
[
"MIT"
] | 7
|
2015-06-16T19:35:21.000Z
|
2021-08-09T06:13:16.000Z
|
# Dependencies
import sys, json
import classifier
ignore_labels = ['duplicate', 'in-progress', 'pending-publication', 'published', 'waiting-for-user-information', 'high priority']
# simple JSON echo script
for line in sys.stdin:
payload = json.loads(line)
( action, params ) = payload
results = {}
if action == "train_labels":
( user, repo, issues, ignore_labels ) = params
results = classifier.train_issues(user, repo, issues, ignore_labels)
elif action == "predict_labels":
( user, repo, issues ) = params
results = classifier.predict_labels_for_issues(user, repo, issues)
elif action == "similarity":
issues = params[0]
results = classifier.issue_similarity(issues)
print json.dumps(results)
| 33.652174
| 129
| 0.674419
|
# Dependencies
import sys, json
import classifier
ignore_labels = ['duplicate', 'in-progress', 'pending-publication', 'published', 'waiting-for-user-information', 'high priority']
# simple JSON echo script
for line in sys.stdin:
payload = json.loads(line)
( action, params ) = payload
results = {}
if action == "train_labels":
( user, repo, issues, ignore_labels ) = params
results = classifier.train_issues(user, repo, issues, ignore_labels)
elif action == "predict_labels":
( user, repo, issues ) = params
results = classifier.predict_labels_for_issues(user, repo, issues)
elif action == "similarity":
issues = params[0]
results = classifier.issue_similarity(issues)
print json.dumps(results)
| 0
| 0
| 0
|
d8da40a9c00637fbb4cc02915ca28a22d0d1e041
| 462
|
py
|
Python
|
tests/test_dash_compoment_utils.py
|
robpoll/webviz-config
|
220a4ade8d2a8f5351a302080707e28e6d216cb5
|
[
"MIT"
] | null | null | null |
tests/test_dash_compoment_utils.py
|
robpoll/webviz-config
|
220a4ade8d2a8f5351a302080707e28e6d216cb5
|
[
"MIT"
] | null | null | null |
tests/test_dash_compoment_utils.py
|
robpoll/webviz-config
|
220a4ade8d2a8f5351a302080707e28e6d216cb5
|
[
"MIT"
] | null | null | null |
import pytest
from webviz_config.utils._dash_component_utils import calculate_slider_step
@pytest.mark.parametrize(
"min_value,max_value,steps,res",
[
(5, 10, 100, 0.01),
(-10, -5, 100, 0.01),
(-10, 10, 100, 0.1)
]
)
| 22
| 75
| 0.621212
|
import pytest
from webviz_config.utils._dash_component_utils import calculate_slider_step
@pytest.mark.parametrize(
"min_value,max_value,steps,res",
[
(5, 10, 100, 0.01),
(-10, -5, 100, 0.01),
(-10, 10, 100, 0.1)
]
)
def test_calculate_slider_step(min_value, max_value, steps, res):
assert(
calculate_slider_step(
min_value=min_value, max_value=max_value, steps=steps
)
== res
)
| 184
| 0
| 22
|
f48ac06ade6c3699af7d7457c6a52059497ed0d7
| 31,685
|
py
|
Python
|
data/crud.py
|
delta-reporter/delta-core
|
e8fdcf01d3fd246c08fa30bbed84d66a85099167
|
[
"Apache-2.0"
] | 1
|
2021-03-12T10:55:48.000Z
|
2021-03-12T10:55:48.000Z
|
data/crud.py
|
delta-reporter/delta-core
|
e8fdcf01d3fd246c08fa30bbed84d66a85099167
|
[
"Apache-2.0"
] | 29
|
2020-04-20T10:20:20.000Z
|
2021-06-04T10:17:17.000Z
|
data/crud.py
|
delta-reporter/delta-core
|
e8fdcf01d3fd246c08fa30bbed84d66a85099167
|
[
"Apache-2.0"
] | 3
|
2020-05-25T14:43:47.000Z
|
2021-09-02T15:27:31.000Z
|
import models
import datetime
from app import db
from data import constants
from logzero import logger
from sqlalchemy import exc
from sqlalchemy.sql import func
from data.subqueries import TestCounts
import re
import os
import pytz
utc = pytz.UTC
| 33.108673
| 98
| 0.59716
|
import models
import datetime
from app import db
from data import constants
from logzero import logger
from sqlalchemy import exc
from sqlalchemy.sql import func
from data.subqueries import TestCounts
import re
import os
import pytz
utc = pytz.UTC
def session_commit():
try:
db.session.commit()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
class Create:
@staticmethod
def create_project(name, status):
project = models.Project(
name=name,
project_status_id=constants.Constants.project_status["Active"]
if not status
else constants.Constants.project_status[status],
)
db.session.add(project)
session_commit()
return project.id
@staticmethod
def create_launch(name, data, project_id):
launch = models.Launch(
name=name,
data=data,
launch_status_id=constants.Constants.launch_status["In Process"],
project_id=project_id,
)
db.session.add(launch)
session_commit()
return launch.id
@staticmethod
def create_test_run(data, start_datetime, test_type, environment, launch_id):
test_run = models.TestRun(
data=data,
start_datetime=start_datetime,
test_type=test_type,
environment=environment,
test_run_status_id=constants.Constants.test_run_status["Running"],
launch_id=launch_id,
)
db.session.add(test_run)
session_commit()
return test_run.id
@staticmethod
def create_test_suite(name, project_id, data, test_type):
test_suite = models.TestSuite(
name=name, project_id=project_id, data=data, test_type=test_type
)
db.session.add(test_suite)
session_commit()
return test_suite.id
@staticmethod
def create_test_suite_history(data, start_datetime, test_run_id, test_suite_id):
test_suite_history = models.TestSuiteHistory(
data=data,
start_datetime=start_datetime,
test_suite_status_id=constants.Constants.test_suite_status["Running"],
test_run_id=test_run_id,
test_suite_id=test_suite_id,
)
db.session.add(test_suite_history)
session_commit()
return test_suite_history.id
@staticmethod
def create_test(name, data, test_suite_id):
test = models.MotherTest(name=name, data=data, test_suite_id=test_suite_id)
db.session.add(test)
session_commit()
return test.id
@staticmethod
def create_test_history(
start_datetime, test_id, test_run_id, test_suite_history_id, parameters, status
):
test_history = models.Test(
start_datetime=start_datetime,
mother_test_id=test_id,
test_status_id=constants.Constants.test_status["Running"]
if not status
else constants.Constants.test_status[status],
test_resolution_id=constants.Constants.test_resolution["Not set"],
test_run_id=test_run_id,
test_suite_history_id=test_suite_history_id,
parameters=parameters,
)
db.session.add(test_history)
session_commit()
return test_history.id
@staticmethod
def create_test_retry(**kwargs):
test_retry = models.TestRetries(
test_id=kwargs.get("test_history_id"),
retry_count=kwargs.get("retry_count"),
start_datetime=kwargs.get("start_datetime"),
end_datetime=kwargs.get("end_datetime"),
trace=kwargs.get("trace"),
message=kwargs.get("message"),
error_type=kwargs.get("error_type"),
media=kwargs.get("media"),
)
db.session.add(test_retry)
session_commit()
return test_retry.id
@staticmethod
def store_media_file(name, type, file):
new_file = models.Media(name=name, type=type, data=file)
db.session.add(new_file)
session_commit()
Delete.delete_media_older_than_days()
return new_file.id
@staticmethod
def create_note(mother_test_id, note_text, added_by):
note = models.Notes(
mother_test_id=mother_test_id, note_text=note_text, added_by=added_by
)
db.session.add(note)
session_commit()
return note.id
@staticmethod
def create_smart_link(
project_id,
environment,
smart_link,
label,
color,
filtered,
location,
datetime_format,
):
smart_link_element = models.SmartLinks(
project_id=project_id,
environment=environment,
smart_link=smart_link,
label=label,
color=color,
filtered=filtered,
location_id=location,
datetime_format=datetime_format,
)
db.session.add(smart_link_element)
session_commit()
return smart_link_element.id
class Read:
@staticmethod
def projects():
try:
projects = models.Project.query.all()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
projects = None
return projects
@staticmethod
def project_by_id(project_id):
try:
project = models.Project.query.filter_by(id=project_id).first()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
project = None
return project
@staticmethod
def project_by_name(project_name):
try:
project = models.Project.query.filter_by(name=project_name).first()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
project = None
return project
@staticmethod
def launch_by_id(launch_id):
try:
launch = models.Launch.query.filter_by(id=launch_id).first()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
launch = None
return launch
@staticmethod
def launch_by_project_id(project_id):
t_counts = TestCounts()
try:
launch = (
db.session.query(
models.Launch,
models.TestRun,
t_counts.total_tests_by_test_run_id.c.tests_count,
t_counts.failed_tests_by_test_run_id.c.failed_tests_count,
t_counts.passed_tests_by_test_run_id.c.passed_tests_count,
t_counts.running_tests_by_test_run_id.c.running_tests_count,
t_counts.incomplete_tests_by_test_run_id.c.incomplete_tests_count,
t_counts.skipped_tests_by_test_run_id.c.skipped_tests_count,
)
.outerjoin(
t_counts.total_tests_by_test_run_id,
models.TestRun.id
== t_counts.total_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.failed_tests_by_test_run_id,
models.TestRun.id
== t_counts.failed_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.passed_tests_by_test_run_id,
models.TestRun.id
== t_counts.passed_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.running_tests_by_test_run_id,
models.TestRun.id
== t_counts.running_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.incomplete_tests_by_test_run_id,
models.TestRun.id
== t_counts.incomplete_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.skipped_tests_by_test_run_id,
models.TestRun.id
== t_counts.skipped_tests_by_test_run_id.c.test_run_id,
)
.filter(models.TestRun.launch_id == models.Launch.id)
.filter(models.Launch.project_id == project_id)
.order_by(models.Launch.id.desc())
.limit(100)
.all()
)
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
launch = None
return launch
@staticmethod
def test_run_by_id(test_run_id):
t_counts = TestCounts()
try:
test_run = (
db.session.query(
models.TestRun,
t_counts.total_tests_by_test_run_id.c.tests_count,
t_counts.failed_tests_by_test_run_id.c.failed_tests_count,
t_counts.passed_tests_by_test_run_id.c.passed_tests_count,
t_counts.running_tests_by_test_run_id.c.running_tests_count,
t_counts.incomplete_tests_by_test_run_id.c.incomplete_tests_count,
t_counts.skipped_tests_by_test_run_id.c.skipped_tests_count,
)
.outerjoin(
t_counts.total_tests_by_test_run_id,
models.TestRun.id
== t_counts.total_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.failed_tests_by_test_run_id,
models.TestRun.id
== t_counts.failed_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.passed_tests_by_test_run_id,
models.TestRun.id
== t_counts.passed_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.running_tests_by_test_run_id,
models.TestRun.id
== t_counts.running_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.incomplete_tests_by_test_run_id,
models.TestRun.id
== t_counts.incomplete_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.skipped_tests_by_test_run_id,
models.TestRun.id
== t_counts.skipped_tests_by_test_run_id.c.test_run_id,
)
.filter(models.TestRun.id == test_run_id)
.order_by(models.TestRun.id)
)
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_run = None
return test_run
@staticmethod
def simple_test_run_by_id(test_run_id):
try:
test_run = models.TestRun.query.filter_by(id=test_run_id).first()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_run = None
return test_run.__dict__
@staticmethod
def test_run_by_launch_id(launch_id):
t_counts = TestCounts()
try:
test_run = (
db.session.query(
models.TestRun,
t_counts.total_tests_by_test_run_id.c.tests_count,
t_counts.failed_tests_by_test_run_id.c.failed_tests_count,
t_counts.passed_tests_by_test_run_id.c.passed_tests_count,
t_counts.running_tests_by_test_run_id.c.running_tests_count,
t_counts.incomplete_tests_by_test_run_id.c.incomplete_tests_count,
t_counts.skipped_tests_by_test_run_id.c.skipped_tests_count,
)
.outerjoin(
t_counts.total_tests_by_test_run_id,
models.TestRun.id
== t_counts.total_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.failed_tests_by_test_run_id,
models.TestRun.id
== t_counts.failed_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.passed_tests_by_test_run_id,
models.TestRun.id
== t_counts.passed_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.running_tests_by_test_run_id,
models.TestRun.id
== t_counts.running_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.incomplete_tests_by_test_run_id,
models.TestRun.id
== t_counts.incomplete_tests_by_test_run_id.c.test_run_id,
)
.outerjoin(
t_counts.skipped_tests_by_test_run_id,
models.TestRun.id
== t_counts.skipped_tests_by_test_run_id.c.test_run_id,
)
.filter(models.TestRun.launch_id == launch_id)
.order_by(models.TestRun.id)
)
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_run = None
return test_run
@staticmethod
def test_runs_failed_by_launch_id(launch_id):
try:
failed_runs = models.TestRun.query.filter_by(
launch_id=launch_id,
test_run_status_id=constants.Constants.test_run_status["Failed"],
).all()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
failed_runs = None
return failed_runs
@staticmethod
def test_suite_by_id(test_suite_id):
try:
test_suite = models.TestSuite.query.filter_by(id=test_suite_id).first()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_suite = None
return test_suite
@staticmethod
def test_suite_by_name_project_test_type(name, project_id, test_type):
try:
test_suite = models.TestSuite.query.filter_by(
name=name, project_id=project_id, test_type=test_type
).first()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_suite = None
return test_suite
@staticmethod
def test_suite_history_by_suite_id_test_run(test_run_id, test_suite_id):
try:
test_suite_history = models.TestSuiteHistory.query.filter_by(
test_run_id=test_run_id, test_suite_id=test_suite_id
).first()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_suite_history = None
return test_suite_history
@staticmethod
def mother_test_by_name(test_name):
try:
mother_test = models.MotherTest.query.filter_by(name=test_name).first()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
mother_test = None
return mother_test
@staticmethod
def test_by_id(test_id):
try:
test = models.Test.query.filter_by(id=test_id).first()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test = None
return test.__dict__
@staticmethod
def test_suite_history_by_test_run(test_run_id):
try:
test_suite_history = (
db.session.query(models.TestRun, models.TestSuiteHistory)
.filter(models.TestRun.id == models.TestSuiteHistory.test_run_id)
.filter(models.TestRun.id == test_run_id)
.all()
)
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_suite_history = None
return test_suite_history
@staticmethod
def test_suite_history_by_test_status_and_test_run_id(
test_suite_status_id, test_run_id
):
try:
test_suite_history = (
db.session.query(models.TestRun, models.TestSuiteHistory)
.filter(models.TestRun.id == models.TestSuiteHistory.test_run_id)
.filter(models.TestRun.id == test_run_id)
.filter(
models.TestSuiteHistory.test_suite_status_id == test_suite_status_id
)
.all()
)
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_suite_history = None
return test_suite_history
@staticmethod
def test_history_by_test_id_and_test_run_id(test_id, test_run_id):
try:
test_history = models.Test.query.filter_by(
mother_test_id=test_id, test_run_id=test_run_id
).first()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_history = None
return test_history
@staticmethod
def test_suite_history_by_array_of_test_statuses_and_test_run_id(
test_statuses_ids, test_run_id
):
array_of_statuses = re.findall(
"\d+", test_statuses_ids
) # getting all numbers from string
t_counts = TestCounts()
try:
test_history = (
db.session.query(
models.TestSuiteHistory,
t_counts.total_tests_by_test_suite_history_id.c.tests_count,
t_counts.failed_tests_by_test_suite_history_id.c.failed_tests_count,
t_counts.passed_tests_by_test_suite_history_id.c.passed_tests_count,
t_counts.running_tests_by_test_suite_history_id.c.running_tests_count,
t_counts.incomplete_tests_by_test_suite_history_id.c.incomplete_tests_count,
t_counts.skipped_tests_by_test_suite_history_id.c.skipped_tests_count,
)
.outerjoin(
t_counts.total_tests_by_test_suite_history_id,
models.TestSuiteHistory.id
== t_counts.total_tests_by_test_suite_history_id.c.test_suite_history_id,
)
.outerjoin(
t_counts.failed_tests_by_test_suite_history_id,
models.TestSuiteHistory.id
== t_counts.failed_tests_by_test_suite_history_id.c.test_suite_history_id,
)
.outerjoin(
t_counts.passed_tests_by_test_suite_history_id,
models.TestSuiteHistory.id
== t_counts.passed_tests_by_test_suite_history_id.c.test_suite_history_id,
)
.outerjoin(
t_counts.running_tests_by_test_suite_history_id,
models.TestSuiteHistory.id
== t_counts.running_tests_by_test_suite_history_id.c.test_suite_history_id,
)
.outerjoin(
t_counts.incomplete_tests_by_test_suite_history_id,
models.TestSuiteHistory.id
== t_counts.incomplete_tests_by_test_suite_history_id.c.test_suite_history_id,
)
.outerjoin(
t_counts.skipped_tests_by_test_suite_history_id,
models.TestSuiteHistory.id
== t_counts.skipped_tests_by_test_suite_history_id.c.test_suite_history_id,
)
.filter(models.TestRun.id == models.TestSuiteHistory.test_run_id)
.filter(models.TestSuiteHistory.test_run_id == models.Test.test_run_id)
.filter(models.TestSuiteHistory.id == models.Test.test_suite_history_id)
.filter(models.TestRun.id == test_run_id)
.filter(models.Test.test_status_id.in_(array_of_statuses))
.join(models.TestSuite)
.order_by(models.TestSuite.name.asc())
.all()
)
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_history = None
return test_history
@staticmethod
def tests_by_array_of_test_statuses_and_test_suite_history_id(
test_statuses_ids, test_suite_history_id
):
array_of_statuses = re.findall(
"\d+", test_statuses_ids
) # getting all numbers from string
try:
test_history = (
db.session.query(models.Test)
.filter(models.Test.test_suite_history_id == test_suite_history_id)
.filter(models.Test.test_status_id.in_(array_of_statuses))
.order_by(models.Test.id.desc())
.all()
)
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_history = None
return test_history
@staticmethod
def test_history_by_test_resolution_id(test_resolution_id):
try:
test_history = models.Test.query.filter_by(
test_resolution_id=test_resolution_id
).all()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_history = None
return test_history
@staticmethod
def test_history_by_test_suite_id(test_suite_id):
try:
test_history = (
db.session.query(models.Test, models.Test, models.TestSuite)
.filter(models.MotherTest.test_suite_id == models.TestSuite.id)
.filter(models.Test.test_id == models.MotherTest.id)
.filter(models.MotherTest.test_suite_id == test_suite_id)
.all()
)
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_history = None
return test_history
@staticmethod
def test_history_failed_by_test_suite_history_id(test_suite_history_id):
try:
test_history = (
db.session.query(models.Test)
.filter(models.Test.test_suite_history_id == models.TestSuiteHistory.id)
.filter(
models.Test.test_status_id
== constants.Constants.test_status["Failed"]
)
.filter(models.TestSuiteHistory.id == test_suite_history_id)
.all()
)
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_history = None
return test_history
@staticmethod
def test_retries_by_test_history_id(test_history_id):
try:
test_retries = models.TestRetries.query.filter_by(
test_history_id=test_history_id
).all()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_retries = None
return test_retries
@staticmethod
def file_by_media_id(media_id):
try:
file_data = models.Media.query.filter_by(id=media_id).first()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
return file_data
@staticmethod
def test_history_by_test_id(test_id):
try:
test_history = (
models.Test.query.filter(
models.Test.mother_test_id == test_id,
models.Test.test_status_id
!= constants.Constants.test_status["Running"],
)
.order_by(models.Test.end_datetime.desc())
.limit(10)
.all()
)
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
test_history = None
return test_history
@staticmethod
def notes_by_mother_test_id(mother_test_id):
try:
notes = models.Notes.query.filter_by(mother_test_id=mother_test_id).all()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
notes = None
return notes
@staticmethod
def smart_links_by_project_id(project_id):
try:
smart_links = models.SmartLinks.query.filter_by(project_id=project_id).all()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
smart_links = None
return smart_links
@staticmethod
def smart_links_by_project_id_and_location(project_id, location):
try:
smart_links = models.SmartLinks.query.filter_by(
project_id=project_id, location_id=location
).all()
except exc.SQLAlchemyError as e:
logger.error(e)
db.session.rollback()
smart_links = None
return smart_links
class Update:
@staticmethod
def update_test_history(
test_history_id, end_datetime, trace, file, message, error_type, test_status,
):
test_history = db.session.query(models.Test).get(test_history_id)
test_history.end_datetime = end_datetime
test_history.trace = trace
test_history.file = file
test_history.message = message
test_history.error_type = error_type
test_history.test_status_id = constants.Constants.test_status.get(test_status)
session_commit()
return test_history
@staticmethod
def increase_test_history_retry(test_history_id):
test_history = db.session.query(models.Test).get(test_history_id)
test_history.retries = (
1 if test_history.retries is None else test_history.retries + 1
)
session_commit()
return test_history.retries
@staticmethod
def clean_test_history_media(test_history_id):
test_history = db.session.query(models.Test).get(test_history_id)
test_history.media = None
session_commit()
@staticmethod
def update_test_history_resolution(test_id, test_resolution):
test_history = db.session.query(models.Test).get(test_id)
test_history.test_resolution_id = constants.Constants.test_resolution.get(
test_resolution
)
session_commit()
return test_history
@staticmethod
def update_general_test_resolution(mother_test_id, test_resolution):
test = db.session.query(models.MotherTest).get(mother_test_id)
test.test_resolution_id = constants.Constants.test_resolution.get(
test_resolution
)
session_commit()
return test
@staticmethod
def update_test_suite_history(test_suite_history_id, end_datetime, data):
failed_test = Read.test_history_failed_by_test_suite_history_id(
test_suite_history_id
)
if failed_test:
test_suite_status = constants.Constants.test_suite_status["Failed"]
else:
test_suite_status = constants.Constants.test_suite_status["Successful"]
test_suite_history = db.session.query(models.TestSuiteHistory).get(
test_suite_history_id
)
test_suite_history.end_datetime = end_datetime
test_suite_history.data = data
test_suite_history.test_suite_status_id = test_suite_status
session_commit()
return test_suite_history.id
@staticmethod
def update_test_run(test_run_id, end_datetime, test_run_status):
test_run = db.session.query(models.TestRun).get(test_run_id)
test_run.end_datetime = end_datetime
if test_run_status is not None:
test_run.test_run_status_id = constants.Constants.test_run_status.get(
test_run_status
)
session_commit()
return test_run.id
@staticmethod
def update_test_run_data(test_run_id, data):
test_run = db.session.query(models.TestRun).get(test_run_id)
test_run.data = data
session_commit()
return test_run.id
@staticmethod
def update_launch(launch_id, launch_status):
launch = db.session.query(models.Launch).get(launch_id)
launch.launch_status_id = constants.Constants.launch_status.get(launch_status)
session_commit()
return launch
@staticmethod
def add_media_to_test_history(test_history_id, media):
test_history = db.session.query(models.Test).get(test_history_id)
if test_history.media:
test_history.media.append(media)
else:
test_history.media = [media]
session_commit()
return test_history.id
@staticmethod
def update_test_data(test_id, data):
test = db.session.query(models.Test).get(test_id)
test.data = data
session_commit()
return test.id
@staticmethod
def update_project_name(id, name):
project = db.session.query(models.Project).get(id)
if project.name != name:
project.name = name
session_commit()
return project.name
@staticmethod
def update_test_flaky_flag(id, is_flaky):
test = db.session.query(models.MotherTest).get(id)
test.is_flaky = is_flaky
session_commit()
return test
@staticmethod
def update_smart_link(id, environment, smart_link, label, color, type, location):
smart_link_object = db.session.query(models.SmartLinks).get(id)
smart_link_object.environment = environment
smart_link_object.smart_link = smart_link
smart_link_object.label = label
smart_link_object.color = color
smart_link_object.type_id = type
smart_link_object.location_id = location
session_commit()
return smart_link_object
class Delete:
@staticmethod
def delete_project(project_id):
project = db.session.query(models.Project).get(project_id)
if project is None:
return "Project already deleted"
db.session.delete(project)
session_commit()
return "Project deleted successfully"
@staticmethod
def delete_smart_link(smart_link_id):
smart_link = db.session.query(models.SmartLinks).get(smart_link_id)
if smart_link is None:
return "SmartLink already deleted"
db.session.delete(smart_link)
session_commit()
return "SmartLink deleted successfully"
@staticmethod
def delete_media_older_than_days():
days = os.environ["DAYS_OLD_MEDIA_DELETE"]
epoch_time = utc.localize(datetime.datetime.today()) - datetime.timedelta(
days=int(days)
)
amount = models.Media.query.filter(
models.Media.created_datetime <= epoch_time
).count()
models.Media.query.filter(models.Media.created_datetime <= epoch_time).delete()
db.session.commit()
logger.warning(
"{} elements, older than {} days were deleted ".format(amount, days)
)
| 28,834
| 2,482
| 115
|
746790b2f99b3eeda9ba31ceed4b0eb4232aa515
| 59
|
py
|
Python
|
settings.py
|
joevgear/test_rail_util
|
fb5e6d24798025de47e12499921d533286e64cf8
|
[
"MIT"
] | null | null | null |
settings.py
|
joevgear/test_rail_util
|
fb5e6d24798025de47e12499921d533286e64cf8
|
[
"MIT"
] | null | null | null |
settings.py
|
joevgear/test_rail_util
|
fb5e6d24798025de47e12499921d533286e64cf8
|
[
"MIT"
] | null | null | null |
TESTRAIL_API_TOKEN = ""
TESTRAIL_URL = ""
TESTRAIL_PWD = ""
| 19.666667
| 23
| 0.711864
|
TESTRAIL_API_TOKEN = ""
TESTRAIL_URL = ""
TESTRAIL_PWD = ""
| 0
| 0
| 0
|
dc0db130e806210874050218cce2f5ab9cc337fb
| 45,500
|
py
|
Python
|
experiment_setup.py
|
cambridgeltl/link-prediction_with_deep-learning
|
ff227fcc20ffa5e75ae456d1e8d533fd8204635d
|
[
"MIT"
] | 28
|
2017-11-17T16:36:23.000Z
|
2022-01-06T12:30:47.000Z
|
experiment_setup.py
|
cambridgeltl/link-prediction_with_deep-learning
|
ff227fcc20ffa5e75ae456d1e8d533fd8204635d
|
[
"MIT"
] | 2
|
2019-12-30T18:22:41.000Z
|
2020-04-15T07:40:35.000Z
|
experiment_setup.py
|
cambridgeltl/link-prediction_with_deep-learning
|
ff227fcc20ffa5e75ae456d1e8d533fd8204635d
|
[
"MIT"
] | 15
|
2018-06-07T13:09:53.000Z
|
2021-03-07T07:27:33.000Z
|
import sys
import csv
import random
"""
Sets up the data for the Link Prediction experiment.
Given the raw data file, it: 1)removes a specified amount of edges for representation induction, 2) creates a specified amount of negative examples from the remaining edges,
3) splits the positive and negative examples into train, dev and test sets as specified 4) writes the relevant train, dev and test files.
"""
#e.g of split: {'p1': [('date', '=', 1980)]}
#In MATADOR dataset, all Chemical identifiers can be integers but no proteins can be
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 46.571136
| 525
| 0.594396
|
import sys
import csv
import random
"""
Sets up the data for the Link Prediction experiment.
Given the raw data file, it: 1)removes a specified amount of edges for representation induction, 2) creates a specified amount of negative examples from the remaining edges,
3) splits the positive and negative examples into train, dev and test sets as specified 4) writes the relevant train, dev and test files.
"""
def argparser():
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-f', '--input-file', help='Input .tsv file')
ap.add_argument('-n', '--negative-ratio', default=1, help='Ratio of negative to positive examples to create (default 1)')
ap.add_argument('-tf', '--train-filename', default='train.tsv', help='name of file for training data (default train.tsv)')
ap.add_argument('-df', '--devel-filename', default='devel.tsv', help='name of file for development data (default devel.tsv)')
ap.add_argument('-tef', '--test-filename', default='test.tsv', help='name of file for testing data (default test.tsv)')
ap.add_argument('-vf', '--vertices-filename', default='vertices.txt', help='name of file containing mapping of all vertex number to name (default vertices.txt)')
ap.add_argument('-tgf', '--train-graph-filename', default='train_adj_mat.adjlist', help='name of file to store graph for representation induction for training (default train_adj_mat.adjlist)')
ap.add_argument('-tegf', '--test-graph-filename', default='test_adj_mat.adjlist', help='name of file to store graph for representation induction for testing (default test_adj_mat.adjlist)')
ap.add_argument('-s', '--split', default='70:10:20', help='Train/devel/test split (default 70:10:20)')
ap.add_argument('-il', '--induction_learning_split', default='50:50', help='Split for inducing representations and learning model (default 50:50)')
ap.add_argument('-v', '--values', default='0:1', help='Values for labels (default 0:1)')
ap.add_argument('-l', '--labels', default='O:I-LINK', help='Labels for values (default O:I-LINK)')
ap.add_argument('-x', '--indices', default='0:1', help='Index of tsv file where entities can be found (default 0:1)')
ap.add_argument('-a', '--attributes', default='score', help='Names of link attributes to be read from data (default score)')
ap.add_argument('-ci', '--col_indices', default='0:1', help='Index of tsv file where information on entities and attributes are (default 0:1)')
ap.add_argument('-cl', '--col_labels', default='entity1,entity2', help='Labels of the data in the tsv file where entities and attributes are (default 0:1)')
ap.add_argument('-sc', '--split_criteria', default=None, help='How to split the data for processing of form: split_name1:criteria_name,operator,criteria_value::split_name2:criteria1_name,operator1,criteria1_value|criteria2_name,operator2,criteria2_value (default None)')
ap.add_argument('-is', '--induction_split_names', default=None, help='Names of splits to be used for induction of form name1:name2 (default None)')
ap.add_argument('-tr', '--train_split_names', default=None, help='Names of splits to be used as training data of form name1:name2 (default None)')
ap.add_argument('-dn', '--devel_split_names', default=None, help='Names of splits to be used as development data of form name1:name2 (default None)')
ap.add_argument('-te', '--test_split_names', default=None, help='Names of splits to be used as testing data of form name1:name2 (default None)')
ap.add_argument('-b', '--balance-classes', default=False, help='Whether or not to balance the amount of examples of included class (default False)')
ap.add_argument('-c', '--maintain-connection', default=False, help='Whether or not to maintain connectivity in induced representations graph (default False)')
ap.add_argument('-g', '--graph_format', default='adjlist', help='Format of matrix representation output {adjlist, edgelist, line} (default adjlist)')
ap.add_argument('-gb', '--graph_bipartite', default=False, help='Process graph as bipartitie or not esp for creating negatives')
ap.add_argument('-sg', '--save-graph', default='True', help='Whether or not to save graph adjacency list in a file (default True)')
return ap
def read_data(input_data_file, attributes, col_labels, col_indices):
col_indices_vals = col_indices.split(':')
col_labels_vals = col_labels.split(',')
assert(len(col_indices_vals) == len(col_labels_vals)), "Lenghts of Labels ({}) and Indices ({}) do not match.".format(col_labels, col_indices)
indices = {}
for label, index in zip(col_labels_vals, col_indices_vals):
indices[label] = index
split_indices = col_indices.split(':')
assert(len(split_indices) >= 2), "Incorrect length for indices: {}".format(len(split_indices))
attribute_indices = {}
for attribute in attributes.split(':'):
attribute_indices[attribute] = -1
for num, index in enumerate(split_indices):
if num == 0:
entity1_index = int(index)
elif num == 1:
entity2_index = int(index)
else:
attribute_index_index = num - 2 #2 values of the indices (entity 1 and 2) are not part of the attributes list
attribute_indices[attributes.split(':')[attribute_index_index]] = int(index)
with open(input_data_file) as tsv:
entity1_lst = []
entity2_lst = []
self_referential_edges = 0
data = {}
for ind, line in enumerate(csv.reader(tsv, delimiter="\t")): #quoting=csv.QUOTE_NONE - If req to make data work, examine data
attribute_values = {}
entity1 = line[entity1_index].replace(' ', '_')
entity2 = line[entity2_index].replace(' ', '_')
#Check for attribute values
for attribute, index in attribute_indices.iteritems():
attribute_values[attribute] = line[index]
if entity1 == entity2:
self_referential_edges += 1
key1 = "%s::%s" % (entity1, entity2)
key2 = "%s::%s" % (entity2, entity1)
score = attribute_values['score'] if 'score' in attribute_values else None
if key1 not in data and key2 not in data:
if score:
try:
if int(score) == 0: #Remove possible unconnected nodes
continue
except:
continue
else:
if attributes == '':
attribute_values = 1
#Shuffle node order in key
if random.choice([0,1]) == 1:
data[key1] = attribute_values
else:
data[key2] = attribute_values
entity1_lst.append(entity1)
entity2_lst.append(entity2)
print("\n%s nodes read. %s edges read." % (len(set(entity1_lst + entity2_lst)), len(data)))
print("{}/{:,} ({}%) edges were self-referential.".format(self_referential_edges, len(data), (self_referential_edges/float(len(data)))*100 ))
return data
def _equals(data, criteria, criteria_value):
edges = []
for edge, attributes in data.iteritems():
assert(criteria in attributes), "Criteria {} not in attributes.".format(criteria)
if int(attributes[criteria]) == criteria_value:
edges.append(edge)
return edges
def _greaterthan(data, criteria, criteria_value):
edges = []
for edge, attributes in data.iteritems():
assert(criteria in attributes), "Criteria {} not in attributes.".format(criteria)
if int(attributes[criteria]) > criteria_value:
edges.append(edge)
return edges
def _lessthan(data, criteria, criteria_value):
edges = []
for edge, attributes in data.iteritems():
assert(criteria in attributes), "Criteria {} not in attributes.".format(criteria)
if int(attributes[criteria]) < criteria_value:
edges.append(edge)
return edges
#e.g of split: {'p1': [('date', '=', 1980)]}
def get_splits(data, split_criteria=None):
induction_edges, train_edges, test_edges = {}, {}, {}
#split data
split_data = {}
for name, criteria_lst in split_criteria.iteritems():
split_data[name] = []
for ind, criteria in enumerate(criteria_lst):
valid = []
if criteria[1] == '=':
valid += _equals(data, criteria[0], criteria[2])
elif criteria[1] == '>':
valid += _greaterthan(data, criteria[0], criteria[2])
elif criteria[1] == '<':
valid += _lessthan(data, criteria[0], criteria[2])
elif criteria[1] == '>=':
valid += _greaterthan(data, criteria[0], criteria[2]) + _equals(data, criteria[0], criteria[2])
elif criteria[1] == '<=':
valid += _lessthan(data, criteria[0], criteria[2]) + _equals(data, criteria[0], criteria[2])
else:
print("ERROR: Invalid criteria {} used.".format(criteria[1]))
if ind > 0:
valid = set(valid).intersection(set(split_data[name]))
split_data[name] = list(valid)
else:
split_data[name] += list(valid)
return split_data
def get_induction_learning_edges(data, induction_learning_split, maintain_connection):
induction_edges, learning_edges = {}, {}
induction_split = int(induction_learning_split.split(':')[0])
learning_split = int(induction_learning_split.split(':')[1])
keys = data.keys()
if maintain_connection:
induction_keys = []
for key in keys:
entity1 = key.split('::')[0]
entity2 = key.split('::')[1]
if entity1 in induction_keys and entity2 in induction_keys:
#Both already have some connection
current_induction_ratio = (float(len(induction_edges))/(len(learning_edges) + len(induction_edges))) * 100
current_learning_ratio = (float(len(learning_edges))/(len(learning_edges) + len(induction_edges))) * 100
if current_induction_ratio > induction_split and abs(current_induction_ratio - induction_split) > 5:
#Induction edges ratio far above specified split place in learning
learning_edges[key] = data[key]
elif current_learning_ratio > learning_split and abs(current_learning_ratio - learning_split) > 5:
#learning edges ratio far above specified split place in induction
induction_edges[key] = data[key]
else:
#Both already have some connection, and ratio roughly in line with specified split place randomly
place = random.randrange(100)
if place < induction_split:
induction_edges[key] = data[key]
else:
learning_edges[key] = data[key]
else:
induction_edges[key] = data[key]
induction_keys.append(entity1)
induction_keys.append(entity2)
else:
for key in keys:
place = random.randrange(100)
if place < induction_split:
induction_edges[key] = data[key]
else:
learning_edges[key] = data[key]
print("\n%s Induction Edges. %s Learning edges." % (len(induction_edges), len(learning_edges)))
return induction_edges, learning_edges
def create_adjacency_matrix_file(induction_edges, train_edges, test_edges, train_graph_filename, test_graph_filename, graph_format, vertices_filename, save_graph):
print("Start of create adj matrices function: Training adj-mat edges %s. Testing adj-mat edges: %s" % (len(induction_edges), len(induction_edges) + len(train_edges)))
train_adj_mat = {} #Only induction edges
test_adj_mat = {} #Induction and training edges to create embeddings for testing
vertices = {}
node_index = 0
#Add induction edges to training adj_mat
for key, weight in induction_edges.iteritems():
entity1 = key.split('::')[0]
entity2 = key.split('::')[1]
if entity1 not in vertices:
node_index += 1
ent1_vertex_ind = node_index
vertices[entity1] = node_index
else:
ent1_vertex_ind = vertices[entity1]
if entity2 not in vertices:
node_index += 1
ent2_vertex_ind = node_index
vertices[entity2] = node_index
else:
ent2_vertex_ind = vertices[entity2]
if ent1_vertex_ind in train_adj_mat:
train_adj_mat[ent1_vertex_ind].append(str(ent2_vertex_ind))
else:
train_adj_mat[ent1_vertex_ind] = [str(ent2_vertex_ind)]
if ent2_vertex_ind in train_adj_mat:
train_adj_mat[ent2_vertex_ind].append(str(ent1_vertex_ind))
else:
train_adj_mat[ent2_vertex_ind] = [str(ent1_vertex_ind)]
print("\n%s induction vertices read." % len(vertices))
#Copy all train adj_mat to test adj_mat
for key, value in train_adj_mat.iteritems():
if key not in test_adj_mat:
test_adj_mat[key] = value
#Add training edges to testing adj_mat
new_addition = 0
already_added = 0
for key, weight in train_edges.iteritems():
entity1 = key.split('::')[0]
entity2 = key.split('::')[1]
if entity1 not in vertices:
node_index += 1
ent1_vertex_ind = node_index
vertices[entity1] = node_index
else:
ent1_vertex_ind = vertices[entity1]
if entity2 not in vertices:
node_index += 1
ent2_vertex_ind = node_index
vertices[entity2] = node_index
else:
ent2_vertex_ind = vertices[entity2]
if ent1_vertex_ind in test_adj_mat:
if str(ent2_vertex_ind) not in test_adj_mat[ent1_vertex_ind]:
test_adj_mat[ent1_vertex_ind].append(str(ent2_vertex_ind))
else:
new_addition += 1
test_adj_mat[ent1_vertex_ind] = [str(ent2_vertex_ind)]
if ent2_vertex_ind in test_adj_mat:
if str(ent1_vertex_ind) not in test_adj_mat[ent2_vertex_ind]:
test_adj_mat[ent2_vertex_ind].append(str(ent1_vertex_ind))
else:
new_addition += 1
test_adj_mat[ent2_vertex_ind] = [str(ent1_vertex_ind)]
print("\n%s induction and train vertices read." % len(vertices))
print("\nAfter adding training edges: %s vertices in Train Matrix representation. %s vertices in Test Matrix representation." % (len(train_adj_mat), len(test_adj_mat)))
#Add possible left out nodes to adj_mats (possible if connectivity not required)
unconnected_nodes = 0
connected_nodes = {}
learning_edges = {}
for edge_dict in [train_edges, test_edges]:
for key, value in edge_dict.iteritems():
learning_edges[key] = value
for key, weight in learning_edges.iteritems():
entity1 = key.split('::')[0]
entity2 = key.split('::')[1]
if entity1 not in vertices:
node_index += 1
unconnected_nodes += 1
vertices[entity1] = node_index
#Must be a totally new entity, add to both matrices
train_adj_mat[node_index] = []
test_adj_mat[node_index] = []
else:
n_index = vertices[entity1]
if n_index not in train_adj_mat:
train_adj_mat[n_index] = []
if n_index not in test_adj_mat:
test_adj_mat[n_index] = []
if entity1 not in connected_nodes:
connected_nodes[entity1] = 1
if entity2 not in vertices:
node_index += 1
unconnected_nodes += 1
vertices[entity2] = node_index
#Must be a totally new entity, add to both matrices
train_adj_mat[node_index] = []
test_adj_mat[node_index] = []
else:
n_index = vertices[entity2]
if n_index not in train_adj_mat:
train_adj_mat[n_index] = []
if n_index not in test_adj_mat:
test_adj_mat[n_index] = []
if entity2 not in connected_nodes:
connected_nodes[entity2] = 1
print("Ratio unconnected nodes to connected in learning is {:,}:{:,}.".format(unconnected_nodes, len(connected_nodes)))
print("\n%s total vertices read. %s vertices in Train Matrix representation. %s vertices in Test Matrix representation." % (len(vertices), len(train_adj_mat), len(test_adj_mat)))
train_output = ""
test_output = ""
train_output_cnt = 0
test_output_cnt = 0
for graph_format in ['adjlist', 'edgelist', 'line', 'sdne']:
train_output = ""
test_output = ""
train_output_cnt = 0
test_output_cnt = 0
train_graph_filename = 'train_adj_mat.{}'.format(graph_format)
test_graph_filename = 'test_adj_mat.{}'.format(graph_format)
if graph_format == 'adjlist':
for key, value in train_adj_mat.iteritems():
if len(value) > 0:
train_output_cnt += 1
train_output += "%s %s\n" % (key, ' '.join(value))
else:
train_output += "%s\n" % (key)
for key, value in test_adj_mat.iteritems():
if len(value) > 0:
test_output_cnt += 1
test_output += "%s %s\n" % (key, ' '.join(value))
else:
test_output += "%s\n" % (key)
print("Non zero train: %s. Non zero test: %s" % (train_output_cnt, test_output_cnt))
elif graph_format == 'edgelist':
for key, value in train_adj_mat.iteritems():
if len(value) > 0:
for node_edge in value:
train_output += "%s %s\n" % (key, node_edge)
else:
train_output += "%s %s\n" % (key, key) #Hack to create an edge for nodes which have no edges as node2vec does not ceate embeddings for them. NEEDED??
for key, value in test_adj_mat.iteritems():
if len(value) > 0:
for node_edge in value:
test_output += "%s %s\n" % (key, node_edge)
else:
test_output += "%s %s\n" % (key, key) #Hack to create an edge for nodes which have no edges as node2vec does not ceate embeddings for them. NEEDED??
elif graph_format == 'line':
#TODO: Add proper scores here
for key, value in train_adj_mat.iteritems():
if len(value) > 0:
for node_edge in value:
train_output += "%s %s %s\n" % (key, node_edge, 1)
else:
train_output += "%s %s %s\n" % (key, key, 0)
for key, value in test_adj_mat.iteritems():
if len(value) > 0:
for node_edge in value:
test_output += "%s %s %s\n" % (key, node_edge, 1)
else:
test_output += "%s %s %s\n" % (key, key, 0)
elif graph_format == 'sdne':
#Needs node and edge count at top of file and nodeid must begin at 0
node_cnt = len(train_adj_mat)
edge_cnt = 0
for key, value in train_adj_mat.iteritems():
key = str(int(key) - 1)
if len(value) > 0:
for node_edge in value:
node_edge = str(int(node_edge) - 1)
train_output += "%s %s\n" % (key, node_edge)
edge_cnt += 1
else:
train_output += "%s %s\n" % (key, key)
edge_cnt += 1
train_output = "%s %s\n" % (node_cnt, edge_cnt) + train_output
node_cnt = len(test_adj_mat)
edge_cnt = 0
for key, value in test_adj_mat.iteritems():
key = str(int(key) - 1)
if len(value) > 0:
for node_edge in value:
node_edge = str(int(node_edge) - 1)
test_output += "%s %s\n" % (key, node_edge)
edge_cnt += 1
else:
test_output += "%s %s\n" % (key, key)
edge_cnt += 1
test_output = "%s %s\n" % (node_cnt, edge_cnt) + test_output
fil2 = open(train_graph_filename, 'w')
fil2.write(train_output)
test_graph = open(test_graph_filename, 'w')
test_graph.write(test_output)
voutput = ""
fil3 = open(vertices_filename, 'w')
for vertex, index in vertices.iteritems():
voutput += "%s %s\n" % (index, vertex)
fil3.write(voutput)
if save_graph:
graph_output = ""
for key, value in test_adj_mat.iteritems():
if len(value) > 0:
graph_output += "%s %s\n" % (key, ' '.join(value))
else:
graph_output += "%s\n" % (key)
graph_file = open('graph.adjlist', 'w')
graph_file.write(graph_output)
graph_file.close()
def create_learning_splits(learning_edges, negative_ratio, balance_classes, train_filename, devel_filename, test_filename, tdt_split, values, labels, separation_fn=None):
keys = learning_edges.keys()
entity_set = set([k for key in keys for k in key.split('::')])
print("\n%s vertices in learning." % len(entity_set))
#Create different sets if graph is bipartite. Avoids creating unrealistic (easier) negatives.
entity1_lst, entity2_lst = None, None
if separation_fn:
entity1_lst, entity2_lst = separation_fn(entity_set)
#Create data files
train = open(train_filename, 'w')
devel = open(devel_filename, 'w')
test = open(test_filename, 'w')
header = "node1\tnode2\tlabel\n"
train_output = "%s" % header
devel_output = "%s" % header
test_output = "%s" % header
train_max = int(tdt_split.split(':')[0])
devel_max = train_max + int(tdt_split.split(':')[1])
test_max = devel_max + int(tdt_split.split(':')[2])
values = [int(val) for val in values.split(':')]
labels = labels.split(':')
assert (len(values) == len(labels)), "%s values and %s labels" % (len(values), len(labels))
#Split edges into train, devel, test
added_cnts = {}
train_cnts = {}
devel_cnts = {}
test_cnts = {}
train_entities = []
test_entities = []
train_edges = {}
devel_edges = {}
test_edges = {}
for key, value in learning_edges.iteritems():
if balance_classes:
if value in added_cnts:
if added_cnts[value] <= least_cnts:
added_cnts[value] += 1
else:
added_cnts[value] = 1
if added_cnts[value] > least_cnts:
continue
entity1 = key.split('::')[0]
entity2 = key.split('::')[1]
place = random.randrange(100)
if place < train_max:
train_edges[key] = 1
if value in train_cnts:
train_cnts[value] += 1
else:
train_cnts[value] = 1
train_entities.append(entity1)
train_entities.append(entity2)
elif place < devel_max:
devel_edges[key] = 1
if value in devel_cnts:
devel_cnts[value] += 1
else:
devel_cnts[value] = 1
elif place < test_max:
test_edges[key] = 1
if value in test_cnts:
test_cnts[value] += 1
else:
test_cnts[value] = 1
test_entities.append(entity1)
test_entities.append(entity2)
else:
print("WARNING: Item not placed in train, devel or test due to place index of: %s" % place)
#Create train examples
pos_cnt = len(train_edges)
entity_lst = list(entity_set)
noise_edges = 0
while len(train_edges) < pos_cnt * (int(negative_ratio) + 1):
ent1 = random.choice(entity_lst) if not entity1_lst else random.choice(entity1_lst)
ent2 = random.choice(entity_lst) if not entity2_lst else random.choice(entity2_lst)
if ent1 != ent2:
key1 = "%s::%s" % (ent1, ent2)
key2 = "%s::%s" % (ent2, ent1)
if key1 not in train_edges and key2 not in train_edges:
#Shuffle node order in key
if random.choice([0,1]) == 1:
train_edges[key1] = 0
#Track whether it is a 'noise edge'
if key1 in devel_edges or key1 in test_edges:
noise_edges += 1
else:
train_edges[key2] = 0
#Track whether it is a 'noise edge'
if key2 in devel_edges or key2 in test_edges:
noise_edges += 1
print("There were {}/{:,} ({}%) noise edges in training data.".format(noise_edges, len(train_edges), (noise_edges/float(len(train_edges)))*100 if len(train_edges) > 0 else 0))
#Create devel examples
pos_cnt = len(devel_edges)
noise_edges = 0
while len(devel_edges) < pos_cnt * (int(negative_ratio) + 1):
ent1 = random.choice(entity_lst) if not entity1_lst else random.choice(entity1_lst)
ent2 = random.choice(entity_lst) if not entity2_lst else random.choice(entity2_lst)
if ent1 != ent2:
key1 = "%s::%s" % (ent1, ent2)
key2 = "%s::%s" % (ent2, ent1)
if key1 not in train_edges and key1 not in devel_edges and key2 not in train_edges and key2 not in devel_edges:
#Shuffle node order in key
if random.choice([0,1]) == 1:
devel_edges[key1] = 0
#Track whether it is a 'noise edge'
if key1 in test_edges:
noise_edges += 1
else:
devel_edges[key2] = 0
#Track whether it is a 'noise edge'
if key2 in test_edges:
noise_edges += 1
print("There were {}/{:,} ({}%) noise edges in devel data.".format(noise_edges, len(devel_edges), (noise_edges/float(len(devel_edges)))*100 if len(devel_edges) > 0 else 0))
#Create test examples
pos_cnt = len(test_edges)
noise_edges = 0
while len(test_edges) < pos_cnt * (int(negative_ratio) + 1):
ent1 = random.choice(entity_lst) if not entity1_lst else random.choice(entity1_lst)
ent2 = random.choice(entity_lst) if not entity2_lst else random.choice(entity2_lst)
if ent1 != ent2:
key1 = "%s::%s" % (ent1, ent2)
key2 = "%s::%s" % (ent2, ent1)
if key1 not in train_edges and key1 not in devel_edges and key1 not in test_edges \
and key2 not in train_edges and key2 not in devel_edges and key2 not in test_edges:
#Shuffle node order in key
if random.choice([0,1]) == 1:
test_edges[key1] = 0
else:
test_edges[key2] = 0
train_cnts = {}
train_entities = []
for key, value in train_edges.iteritems():
entity1 = key.split('::')[0]
entity2 = key.split('::')[1]
assert (value in values), "Value %s not in values." % value
label = labels[values.index(value)]
entry = "%s\t%s\t%s\n" % (entity1, entity2, label)
train_output += entry
if value in train_cnts:
train_cnts[value] += 1
else:
train_cnts[value] = 1
train_entities.append(entity1)
train_entities.append(entity2)
devel_cnts = {}
devel_entities = []
for key, value in devel_edges.iteritems():
entity1 = key.split('::')[0]
entity2 = key.split('::')[1]
assert (value in values), "Value %s not in values." % value
label = labels[values.index(value)]
entry = "%s\t%s\t%s\n" % (entity1, entity2, label)
devel_output += entry
if value in devel_cnts:
devel_cnts[value] += 1
else:
devel_cnts[value] = 1
devel_entities.append(entity1)
devel_entities.append(entity2)
test_cnts = {}
test_entities = []
print("There are %s keys in test edges" % len(test_edges))
for key, value in test_edges.iteritems():
entity1 = key.split('::')[0]
entity2 = key.split('::')[1]
assert (value in values), "Value %s not in values." % value
label = labels[values.index(value)]
entry = "%s\t%s\t%s\n" % (entity1, entity2, label)
test_output += entry
if value in test_cnts:
test_cnts[value] += 1
else:
test_cnts[value] = 1
test_entities.append(entity1)
test_entities.append(entity2)
train_set = set(train_entities)
test_set = set(test_entities)
train_set_size = len(train_set)
test_set_size = len(test_set)
train_test_intersection_size = len(train_set.intersection(test_set))
print("\nEntities in train only: {:,}".format(train_set_size - train_test_intersection_size))
print("Entities in test only: {:,}".format(test_set_size - train_test_intersection_size))
print("Entities in both train and test: {:,}".format(train_test_intersection_size))
print("\nClass items counts:")
print("Train counts:")
for k, v in train_cnts.iteritems():
print("{}\t: {:,}".format(k,v))
print("Devel counts:")
for k, v in devel_cnts.iteritems():
print("{}\t: {:,}".format(k,v))
print("Test counts:")
for k, v in test_cnts.iteritems():
print("{}\t: {:,}".format(k,v))
train.write(train_output)
devel.write(devel_output)
test.write(test_output)
#Get only positive train edges
pos_train = {}
for edge, score in train_edges.iteritems():
if score > 0:
pos_train[edge] = score
pos_test = {}
for edge, score in test_edges.iteritems():
if score > 0:
pos_test[edge] = score
print("End of creating learning splits. Train edges count: %s. Test edges count: %s" % (len(pos_train), len(pos_test)))
return pos_train, pos_test
def create_learning_files(induction_edges, train_edges, devel_edges, test_edges, negative_ratio, train_filename, devel_filename, test_filename, values, labels, fold_values=True, separation_fn=None):
keys = induction_edges.keys() + train_edges.keys() + devel_edges.keys() + test_edges.keys()
entity_set = set([k for key in keys for k in key.split('::')])
print("\n%s vertices in learning." % len(entity_set))
entity1_lst, entity2_lst = None, None
if separation_fn:
entity1_lst, entity2_lst = separation_fn(entity_set)
if fold_values:
for edge_set in [train_edges, devel_edges, test_edges]:
for key, value in edge_set.iteritems():
if value > 0:
edge_set[key] = 1
else:
edge_set[key] = 0
#Create data files
train = open(train_filename, 'w')
devel = open(devel_filename, 'w')
test = open(test_filename, 'w')
header = "node1\tnode2\tlabel\n"
train_output = "%s" % header
devel_output = "%s" % header
test_output = "%s" % header
values = [int(val) for val in values.split(':')]
labels = labels.split(':')
assert (len(values) == len(labels)), "%s values and %s labels" % (len(values), len(labels))
#Create train examples
pos_cnt = len(train_edges)
entity_lst = list(entity_set)
noise_edges = 0
while len(train_edges) < pos_cnt * (int(negative_ratio) + 1):
ent1 = random.choice(entity_lst) if not entity1_lst else random.choice(entity1_lst)
ent2 = random.choice(entity_lst) if not entity2_lst else random.choice(entity2_lst)
if ent1 != ent2:
key1 = "%s::%s" % (ent1, ent2)
key2 = "%s::%s" % (ent2, ent1)
if key1 not in induction_edges and key1 not in train_edges and key2 not in induction_edges and key2 not in train_edges:
#Shuffle node order in key
if random.choice([0,1]) == 1:
train_edges[key1] = 0
#Track whether it is a 'noise edge'
if key1 in devel_edges or key1 in test_edges:
noise_edges += 1
else:
train_edges[key2] = 0
#Track whether it is a 'noise edge'
if key2 in devel_edges or key2 in test_edges:
noise_edges += 1
print("There were {}/{:,} ({}%) noise edges in training data.".format(noise_edges, len(train_edges), (noise_edges/float(len(train_edges)))*100 if len(train_edges) > 0 else 0))
#Create devel examples
pos_cnt = len(devel_edges)
noise_edges = 0
while len(devel_edges) < pos_cnt * (int(negative_ratio) + 1):
ent1 = random.choice(entity_lst) if not entity1_lst else random.choice(entity1_lst)
ent2 = random.choice(entity_lst) if not entity2_lst else random.choice(entity2_lst)
if ent1 != ent2:
key1 = "%s::%s" % (ent1, ent2)
key2 = "%s::%s" % (ent2, ent1)
if key1 not in induction_edges and key1 not in train_edges and key1 not in devel_edges and key2 not in induction_edges and key2 not in train_edges and key2 not in devel_edges:
#Shuffle node order in key
if random.choice([0,1]) == 1:
devel_edges[key1] = 0
#Track whether it is a 'noise edge'
if key1 in test_edges:
noise_edges += 1
else:
devel_edges[key2] = 0
#Track whether it is a 'noise edge'
if key2 in test_edges:
noise_edges += 1
print("There were {}/{:,} ({}%) noise edges in devel data.".format(noise_edges, len(devel_edges), (noise_edges/float(len(devel_edges)))*100 if len(devel_edges) > 0 else 0))
#Create test examples
pos_cnt = len(test_edges)
noise_edges = 0
while len(test_edges) < pos_cnt * (int(negative_ratio) + 1):
ent1 = random.choice(entity_lst) if not entity1_lst else random.choice(entity1_lst)
ent2 = random.choice(entity_lst) if not entity2_lst else random.choice(entity2_lst)
if ent1 != ent2:
key1 = "%s::%s" % (ent1, ent2)
key2 = "%s::%s" % (ent2, ent1)
if key1 not in induction_edges and key1 not in train_edges and key1 not in devel_edges and key1 not in test_edges \
and key2 not in induction_edges and key2 not in train_edges and key2 not in devel_edges and key2 not in test_edges:
#Shuffle node order in key
if random.choice([0,1]) == 1:
test_edges[key1] = 0
else:
test_edges[key2] = 0
train_cnts = {}
train_entities = []
for key, value in train_edges.iteritems():
entity1 = key.split('::')[0]
entity2 = key.split('::')[1]
assert (value in values), "Value %s not in values." % value
label = labels[values.index(value)]
entry = "%s\t%s\t%s\n" % (entity1, entity2, label)
train_output += entry
if value in train_cnts:
train_cnts[value] += 1
else:
train_cnts[value] = 1
train_entities.append(entity1)
train_entities.append(entity2)
devel_cnts = {}
devel_entities = []
for key, value in devel_edges.iteritems():
entity1 = key.split('::')[0]
entity2 = key.split('::')[1]
assert (value in values), "Value %s not in values." % value
label = labels[values.index(value)]
entry = "%s\t%s\t%s\n" % (entity1, entity2, label)
devel_output += entry
if value in devel_cnts:
devel_cnts[value] += 1
else:
devel_cnts[value] = 1
devel_entities.append(entity1)
devel_entities.append(entity2)
test_cnts = {}
test_entities = []
for key, value in test_edges.iteritems():
entity1 = key.split('::')[0]
entity2 = key.split('::')[1]
assert (value in values), "Value %s not in values." % value
label = labels[values.index(value)]
entry = "%s\t%s\t%s\n" % (entity1, entity2, label)
test_output += entry
if value in test_cnts:
test_cnts[value] += 1
else:
test_cnts[value] = 1
test_entities.append(entity1)
test_entities.append(entity2)
train_set = set(train_entities)
test_set = set(test_entities)
train_set_size = len(train_set)
test_set_size = len(test_set)
train_test_intersection_size = len(train_set.intersection(test_set))
print("\nEntities in train only: {:,}".format(train_set_size - train_test_intersection_size))
print("Entities in test only: {:,}".format(test_set_size - train_test_intersection_size))
print("Entities in both train and test: {:,}".format(train_test_intersection_size))
print("\nClass items counts:")
print("Train counts:")
for k, v in train_cnts.iteritems():
print("{}\t: {:,}".format(k,v))
print("Devel counts:")
for k, v in devel_cnts.iteritems():
print("{}\t: {:,}".format(k,v))
print("Test counts:")
for k, v in test_cnts.iteritems():
print("{}\t: {:,}".format(k,v))
train.write(train_output)
devel.write(devel_output)
test.write(test_output)
return train_edges, test_edges
#In MATADOR dataset, all Chemical identifiers can be integers but no proteins can be
def matador_separation(entities):
proteins = []
chemicals = []
for entity_name in entities:
try:
int(entity_name)
chemicals.append(entity_name)
except:
proteins.append(entity_name)
print("For MATADOR dataset: %s proteins and %s chemicals." % (len(proteins), len(chemicals)))
return proteins, chemicals
def setup_experiment(input_data_file, attributes, col_labels, col_indices, split_criteria, induction_split_names, train_split_names, devel_split_names, test_split_names, train_graph_filename, test_graph_filename, graph_format,
vertices_filename, train_filename, devel_filename, test_filename, negative_ratio, values, labels, induction_learning_split, maintain_connection, balance_classes, tdt_split, save_graph, separation_fn=None):
#split_name1:criteria_name,operator,criteria_value::split_name2:criteria_name,operator,criteria_value
#Prepare split criteria data structure
split_criteria_dict = None
if split_criteria:
split_criteria_dict = {}
criteria_lst = split_criteria.split('::')
for criteria in criteria_lst:
split = criteria.split(':')
assert(len(split) == 2), "Criteria split into {} is invalid.".format(len(split))
split_name = split[0]
split_details_lst = split[1].split('|')
split_criteria_dict[split_name] = []
for detail in split_details_lst:
split_details = detail.split(',')
assert(len(split_details) == 3), "Criteria details split into {} is invalid.".format(len(split_details))
criteria_name = split_details[0]
operator = split_details[1]
criteria_value = split_details[2]
split_criteria_dict[split_name].append((criteria_name, operator, int(criteria_value)))
data = read_data(input_data_file, attributes, col_labels, col_indices)
if split_criteria:
print("Splitting by criteria.")
attribute = attributes.split(':')
if len(attribute) > 1:
raise NotImplemented('Multiple Attributes not implemented.')
else:
attribute = attribute[0]
split_data = get_splits(data, split_criteria_dict)
assert(induction_split_names and train_split_names and test_split_names), "induction_split_names and train_split_names and devel_split_names and test_split_names must be set if split_criteria set."
induction_edges = {}
learning_edges = {}
train_edges = {}
devel_edges = {}
test_edges = {}
induction_split_names = induction_split_names.split(':')
train_split_names = train_split_names.split(':')
devel_split_names = devel_split_names.split(':') if devel_split_names else []
test_split_names = test_split_names.split(':')
for name in induction_split_names:
if name not in split_data:
print("ERROR: Name {} not in data (Split Names: {}).".format(name, split_data.keys()))
break
for key in split_data[name]:
induction_edges[key] = data[key][attribute]
print("{:,} induction edges.".format(len(induction_edges)))
for name in train_split_names:
if name not in split_data:
print("ERROR: Name {} not in data (Split Names: {}).".format(name, split_data.keys()))
break
for key in split_data[name]:
train_edges[key] = data[key][attribute]
for name in devel_split_names:
if name not in split_data:
print("ERROR: Name {} not in data (Split Names: {}).".format(name, split_data.keys()))
break
for key in split_data[name]:
devel_edges[key] = data[key][attribute]
for name in test_split_names:
if name not in split_data:
print("ERROR: Name {} not in data (Split Names: {}).".format(name, split_data.keys()))
break
for key in split_data[name]:
test_edges[key] = data[key][attribute]
for edge_set in [train_edges, devel_edges, test_edges]:
for key in edge_set:
learning_edges[key] = data[key][attribute]
create_adjacency_matrix_file(induction_edges, train_edges, test_edges, train_graph_filename, test_graph_filename, graph_format, vertices_filename, save_graph)
create_learning_files(induction_edges, train_edges, devel_edges, test_edges, negative_ratio, train_filename, devel_filename, test_filename, values, labels, separation_fn=separation_fn)
else:
print("Splitting by size.")
induction_edges, learning_edges = get_induction_learning_edges(data, induction_learning_split, maintain_connection)
train_edges, test_edges = create_learning_splits(learning_edges, negative_ratio, balance_classes, train_filename, devel_filename, test_filename, tdt_split, values, labels, separation_fn=separation_fn)
create_adjacency_matrix_file(induction_edges, train_edges, test_edges, train_graph_filename, test_graph_filename, graph_format, vertices_filename, save_graph)
def main(argv):
args = argparser().parse_args(argv[1:])
if args.graph_bipartite:
setup_experiment(args.input_file, args.attributes, args.col_labels, args.col_indices, args.split_criteria, args.induction_split_names, args.train_split_names, args.devel_split_names, args.test_split_names, args.train_graph_filename,
args.test_graph_filename, args.graph_format, args.vertices_filename, args.train_filename, args.devel_filename, args.test_filename, args.negative_ratio, args.values, args.labels, args.induction_learning_split, args.maintain_connection, args.balance_classes, args.split, args.save_graph, matador_separation)
else:
setup_experiment(args.input_file, args.attributes, args.col_labels, args.col_indices, args.split_criteria, args.induction_split_names, args.train_split_names, args.devel_split_names, args.test_split_names, args.train_graph_filename,args.test_graph_filename, args.graph_format, args.vertices_filename, args.train_filename, args.devel_filename, args.test_filename, args.negative_ratio, args.values, args.labels, args.induction_learning_split, args.maintain_connection, args.balance_classes, args.split, args.save_graph)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 44,573
| 0
| 309
|
4605f51ceb7bfa7b5d4f7657c9b428121e83d39b
| 784
|
py
|
Python
|
tools/lines.py
|
mamaddeveloper/teleadmin
|
a5823a7d27f7304825757a1e3cc7a5f50b5534ec
|
[
"MIT"
] | null | null | null |
tools/lines.py
|
mamaddeveloper/teleadmin
|
a5823a7d27f7304825757a1e3cc7a5f50b5534ec
|
[
"MIT"
] | null | null | null |
tools/lines.py
|
mamaddeveloper/teleadmin
|
a5823a7d27f7304825757a1e3cc7a5f50b5534ec
|
[
"MIT"
] | null | null | null |
import random
import codecs
| 26.133333
| 97
| 0.612245
|
import random
import codecs
class LinesAbstract:
def __init__(self, path_or_list):
if isinstance(path_or_list, str):
self.lines = list([line.strip() for line in codecs.open(path_or_list, "r", "utf-8")])
else:
self.lines = list(path_or_list)
self.max = len(self.lines)
def __next__(self):
raise Exception("Abstract")
class LinesSeq(LinesAbstract):
def __init__(self, path):
super().__init__(path)
self.__index = -1
def __next__(self):
self.__index += 1
if self.__index >= self.max:
self.__index = 0
return self.lines[self.__index]
class LinesSeqRnd(LinesSeq):
def __init__(self, path):
super().__init__(path)
random.shuffle(self.lines)
| 540
| 15
| 201
|
e57b119c7f3bb96c441604c0b54c53af20889096
| 526
|
py
|
Python
|
intro_list.py
|
BeenashPervaiz/Command_Line_Task
|
a603fbdd06717ff157ecd72881d08329413fd82c
|
[
"MIT"
] | null | null | null |
intro_list.py
|
BeenashPervaiz/Command_Line_Task
|
a603fbdd06717ff157ecd72881d08329413fd82c
|
[
"MIT"
] | null | null | null |
intro_list.py
|
BeenashPervaiz/Command_Line_Task
|
a603fbdd06717ff157ecd72881d08329413fd82c
|
[
"MIT"
] | null | null | null |
#list ----> we store int,float,string..
#ordered collection of item --->DAta structure
numbers = [1, 2, 3, 4, 5]
print(numbers)
print(numbers[2])#we can also access by indexing
words = ["Beenash", 'Pervaiz', "Hanan"]
print(words)
print(words[:2]) # by slicing
mixed = [1, 2, 3, 4, "five", "six",2.5, None]
print(mixed)
print(mixed[-1])# negative indexing
#change the data of the list
mixed[1] = 'two'
print(mixed)
#if we change the almost total data of list
numbers[:5] = ['one', 'two','three','four','five']
print(numbers)
| 26.3
| 50
| 0.665399
|
#list ----> we store int,float,string..
#ordered collection of item --->DAta structure
numbers = [1, 2, 3, 4, 5]
print(numbers)
print(numbers[2])#we can also access by indexing
words = ["Beenash", 'Pervaiz', "Hanan"]
print(words)
print(words[:2]) # by slicing
mixed = [1, 2, 3, 4, "five", "six",2.5, None]
print(mixed)
print(mixed[-1])# negative indexing
#change the data of the list
mixed[1] = 'two'
print(mixed)
#if we change the almost total data of list
numbers[:5] = ['one', 'two','three','four','five']
print(numbers)
| 0
| 0
| 0
|
8967e76251d4e2585ac4bb6805bc0086542f758a
| 136
|
py
|
Python
|
lms_aadi_postgres/contacts/contacts_controllers/contacts_delete.py
|
hcmuleva/personal-profile
|
051b5a2f36b927951691f48abe584beb8bc25440
|
[
"MIT"
] | null | null | null |
lms_aadi_postgres/contacts/contacts_controllers/contacts_delete.py
|
hcmuleva/personal-profile
|
051b5a2f36b927951691f48abe584beb8bc25440
|
[
"MIT"
] | 3
|
2020-07-13T17:46:32.000Z
|
2020-07-26T10:30:59.000Z
|
lms_aadi_postgres/contacts/contacts_controllers/contacts_delete.py
|
hcmuleva/personal-profile
|
051b5a2f36b927951691f48abe584beb8bc25440
|
[
"MIT"
] | null | null | null |
from contacts.contacts_modules import delete_contact
to_delete = delete_contact.DeleteContact()
delete = to_delete.delete_contact("1")
| 27.2
| 52
| 0.838235
|
from contacts.contacts_modules import delete_contact
to_delete = delete_contact.DeleteContact()
delete = to_delete.delete_contact("1")
| 0
| 0
| 0
|
7a7177762b8c827b4e6e320a52cbf73bac71fbbc
| 9,322
|
py
|
Python
|
autolung/main_window.py
|
jcalendo/autolung
|
9a3a10d2f4d5618e91d7c805b04211faa6f54300
|
[
"MIT"
] | null | null | null |
autolung/main_window.py
|
jcalendo/autolung
|
9a3a10d2f4d5618e91d7c805b04211faa6f54300
|
[
"MIT"
] | null | null | null |
autolung/main_window.py
|
jcalendo/autolung
|
9a3a10d2f4d5618e91d7c805b04211faa6f54300
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 56.49697
| 201
| 0.695559
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(742, 736)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.config_button = QtWidgets.QPushButton(self.centralwidget)
self.config_button.setGeometry(QtCore.QRect(600, 160, 75, 31))
self.config_button.setObjectName("config_button")
self.output_button = QtWidgets.QPushButton(self.centralwidget)
self.output_button.setGeometry(QtCore.QRect(600, 200, 75, 31))
self.output_button.setObjectName("output_button")
self.img_directory_button = QtWidgets.QPushButton(self.centralwidget)
self.img_directory_button.setGeometry(QtCore.QRect(600, 120, 75, 31))
self.img_directory_button.setObjectName("img_directory_button")
self.header_label = QtWidgets.QLabel(self.centralwidget)
self.header_label.setGeometry(QtCore.QRect(200, 0, 351, 41))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.header_label.setFont(font)
self.header_label.setObjectName("header_label")
self.subheader_label = QtWidgets.QLabel(self.centralwidget)
self.subheader_label.setGeometry(QtCore.QRect(30, 30, 691, 41))
font = QtGui.QFont()
font.setItalic(True)
self.subheader_label.setFont(font)
self.subheader_label.setObjectName("subheader_label")
self.dir_label = QtWidgets.QLabel(self.centralwidget)
self.dir_label.setGeometry(QtCore.QRect(30, 120, 241, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.dir_label.setFont(font)
self.dir_label.setObjectName("dir_label")
self.config_label = QtWidgets.QLabel(self.centralwidget)
self.config_label.setGeometry(QtCore.QRect(30, 160, 281, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.config_label.setFont(font)
self.config_label.setObjectName("config_label")
self.output_label = QtWidgets.QLabel(self.centralwidget)
self.output_label.setGeometry(QtCore.QRect(30, 200, 281, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.output_label.setFont(font)
self.output_label.setObjectName("output_label")
self.preview_label = QtWidgets.QLabel(self.centralwidget)
self.preview_label.setGeometry(QtCore.QRect(30, 240, 211, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.preview_label.setFont(font)
self.preview_label.setObjectName("preview_label")
self.yes_radioButton = QtWidgets.QRadioButton(self.centralwidget)
self.yes_radioButton.setGeometry(QtCore.QRect(320, 250, 41, 17))
self.yes_radioButton.setObjectName("yes_radioButton")
self.no_radioButton = QtWidgets.QRadioButton(self.centralwidget)
self.no_radioButton.setGeometry(QtCore.QRect(390, 250, 41, 17))
self.no_radioButton.setObjectName("no_radioButton")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(0, 80, 801, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.img_dir_text = QtWidgets.QTextEdit(self.centralwidget)
self.img_dir_text.setGeometry(QtCore.QRect(320, 120, 271, 31))
self.img_dir_text.setObjectName("img_dir_text")
self.config_text = QtWidgets.QTextEdit(self.centralwidget)
self.config_text.setGeometry(QtCore.QRect(320, 160, 271, 31))
self.config_text.setObjectName("config_text")
self.output_text = QtWidgets.QTextEdit(self.centralwidget)
self.output_text.setGeometry(QtCore.QRect(320, 200, 271, 31))
self.output_text.setObjectName("output_text")
self.img_dir_help = QtWidgets.QToolButton(self.centralwidget)
self.img_dir_help.setGeometry(QtCore.QRect(690, 120, 31, 31))
self.img_dir_help.setObjectName("img_dir_help")
self.config_help = QtWidgets.QToolButton(self.centralwidget)
self.config_help.setGeometry(QtCore.QRect(690, 160, 31, 31))
self.config_help.setObjectName("config_help")
self.output_help = QtWidgets.QToolButton(self.centralwidget)
self.output_help.setGeometry(QtCore.QRect(690, 200, 31, 31))
self.output_help.setObjectName("output_help")
self.preview_help = QtWidgets.QToolButton(self.centralwidget)
self.preview_help.setGeometry(QtCore.QRect(690, 240, 31, 31))
self.preview_help.setObjectName("preview_help")
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setGeometry(QtCore.QRect(0, 290, 781, 16))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.run_button = QtWidgets.QPushButton(self.centralwidget)
self.run_button.setGeometry(QtCore.QRect(30, 310, 331, 41))
self.run_button.setObjectName("run_button")
self.quit_button = QtWidgets.QPushButton(self.centralwidget)
self.quit_button.setGeometry(QtCore.QRect(380, 310, 341, 41))
self.quit_button.setObjectName("quit_button")
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setGeometry(QtCore.QRect(30, 370, 691, 261))
self.textBrowser.setObjectName("textBrowser")
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setGeometry(QtCore.QRect(30, 650, 691, 41))
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 742, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionClose = QtWidgets.QAction(MainWindow)
self.actionClose.setObjectName("actionClose")
self.actionHelp = QtWidgets.QAction(MainWindow)
self.actionHelp.setObjectName("actionHelp")
self.menuFile.addAction(self.actionClose)
self.menuHelp.addAction(self.actionHelp)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.config_button.setText(_translate("MainWindow", "Select"))
self.output_button.setText(_translate("MainWindow", "Select"))
self.img_directory_button.setText(_translate("MainWindow", "Select"))
self.header_label.setText(_translate("MainWindow", "Autolung - automated lung image analysis"))
self.subheader_label.setText(_translate("MainWindow", "Written by Gennaro Calendo for the Laboratory of Marla R. Wolfson, MS, PhD at Lewis Katz School of Medicine at Temple University, 2019"))
self.dir_label.setText(_translate("MainWindow", "Select the folder containing lung images:"))
self.config_label.setText(_translate("MainWindow", "Select the configuration file for this image set:"))
self.output_label.setText(_translate("MainWindow", "Select the folder where the results will be saved:"))
self.preview_label.setText(_translate("MainWindow", "Would you like to save QC images?"))
self.yes_radioButton.setText(_translate("MainWindow", "Yes"))
self.no_radioButton.setText(_translate("MainWindow", "No"))
self.img_dir_help.setText(_translate("MainWindow", "?"))
self.config_help.setText(_translate("MainWindow", "?"))
self.output_help.setText(_translate("MainWindow", "?"))
self.preview_help.setText(_translate("MainWindow", "?"))
self.run_button.setText(_translate("MainWindow", "Start Analysis"))
self.quit_button.setText(_translate("MainWindow", "Quit"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuHelp.setTitle(_translate("MainWindow", "About"))
self.actionClose.setText(_translate("MainWindow", "Close"))
self.actionHelp.setText(_translate("MainWindow", "About"))
| 8,977
| 7
| 81
|
5ed2b6d22c8702f6b6e62de9b35b38767fb15a11
| 3,372
|
py
|
Python
|
IoT_Web/iotweb/views/devices_view.py
|
ertis-research/reliable-iot
|
c7a1f6bb69099797e2136522dbdda94c2e6a4895
|
[
"MIT"
] | 1
|
2019-04-26T10:28:57.000Z
|
2019-04-26T10:28:57.000Z
|
IoT_Web/iotweb/views/devices_view.py
|
ertis-research/reliable-iot
|
c7a1f6bb69099797e2136522dbdda94c2e6a4895
|
[
"MIT"
] | null | null | null |
IoT_Web/iotweb/views/devices_view.py
|
ertis-research/reliable-iot
|
c7a1f6bb69099797e2136522dbdda94c2e6a4895
|
[
"MIT"
] | 1
|
2019-04-26T10:29:35.000Z
|
2019-04-26T10:29:35.000Z
|
from django.shortcuts import redirect
from django.http import HttpResponse
from django.template import loader
from http.server import HTTPStatus
from .User import User
import iotweb.views.urls_and_messages as UM
import requests
import json
def devices(request, shdw_id):
"""
GET request: renders the physical devices page
POST request: It's a request to delete one physical device
"""
user = User.get_instance()
if request.POST: # REQUEST TO DELETE DEVICE
url = UM.DB_URL + 'deletePhysicalDevice/{}/'.format(request.POST['device_id'])
headers = {'Authorization': 'Token {}'.format(user.user_token)}
req = requests.get(url=url, headers=headers)
if req.status_code == 200:
return redirect('/viewDevices/{}/'.format(shdw_id))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req.status_code,
'message': req.text,
'error_name': HTTPStatus(req.status_code).phrase,
'back': '/viewDevices/{}/'.format(shdw_id)
}
if req.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
else: # GET - RENDER THE TEMPLATE WITH PHYSICAL DEVICES
template = loader.get_template('../templates/physical_devices.html')
url = UM.DB_URL + 'getShadowDevices/{}/'.format(shdw_id)
headers = {'Authorization': 'Token {}'.format(user.user_token)}
req = requests.get(url=url, headers=headers)
if req.status_code == 200:
devices_list = json.loads(req.text)['devices']
context = {'devices': [], 'shadow_id': shdw_id, 'email': user.user_email}
if devices_list:
for device in devices_list:
json_object = json.loads(device)
# CHECK THIS AGAIN
url_token = UM.DB_URL + 'getTokenById/{}/'.format(json_object['token'])
res_tok = requests.get(url=url_token, headers=headers)
token = json.loads(res_tok.text)['token']
json_object['token'] = token # we replace token id with token value
json_object['id'] = json_object['_id']
url_status = UM.DB_URL + 'getDeviceStatus/{}/'.format(json_object['_id'])
req_status = requests.get(url=url_status, headers=headers)
json_object['STATUS'] = json.loads(req_status.text)['status']
context['devices'].append(json_object)
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req.status_code,
'message': req.text,
'error_name': HTTPStatus(req.status_code).phrase,
'back': '/profile/'
}
if req.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
| 39.670588
| 93
| 0.577995
|
from django.shortcuts import redirect
from django.http import HttpResponse
from django.template import loader
from http.server import HTTPStatus
from .User import User
import iotweb.views.urls_and_messages as UM
import requests
import json
def devices(request, shdw_id):
"""
GET request: renders the physical devices page
POST request: It's a request to delete one physical device
"""
user = User.get_instance()
if request.POST: # REQUEST TO DELETE DEVICE
url = UM.DB_URL + 'deletePhysicalDevice/{}/'.format(request.POST['device_id'])
headers = {'Authorization': 'Token {}'.format(user.user_token)}
req = requests.get(url=url, headers=headers)
if req.status_code == 200:
return redirect('/viewDevices/{}/'.format(shdw_id))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req.status_code,
'message': req.text,
'error_name': HTTPStatus(req.status_code).phrase,
'back': '/viewDevices/{}/'.format(shdw_id)
}
if req.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
else: # GET - RENDER THE TEMPLATE WITH PHYSICAL DEVICES
template = loader.get_template('../templates/physical_devices.html')
url = UM.DB_URL + 'getShadowDevices/{}/'.format(shdw_id)
headers = {'Authorization': 'Token {}'.format(user.user_token)}
req = requests.get(url=url, headers=headers)
if req.status_code == 200:
devices_list = json.loads(req.text)['devices']
context = {'devices': [], 'shadow_id': shdw_id, 'email': user.user_email}
if devices_list:
for device in devices_list:
json_object = json.loads(device)
# CHECK THIS AGAIN
url_token = UM.DB_URL + 'getTokenById/{}/'.format(json_object['token'])
res_tok = requests.get(url=url_token, headers=headers)
token = json.loads(res_tok.text)['token']
json_object['token'] = token # we replace token id with token value
json_object['id'] = json_object['_id']
url_status = UM.DB_URL + 'getDeviceStatus/{}/'.format(json_object['_id'])
req_status = requests.get(url=url_status, headers=headers)
json_object['STATUS'] = json.loads(req_status.text)['status']
context['devices'].append(json_object)
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req.status_code,
'message': req.text,
'error_name': HTTPStatus(req.status_code).phrase,
'back': '/profile/'
}
if req.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
| 0
| 0
| 0
|
3ee833341127b144f309c6b92f9f4b2ca5b72308
| 2,786
|
py
|
Python
|
mail_editor/helpers.py
|
maykinmedia/maykin-email-templates
|
946538cf07e8fbb6df577fe5ea22b970bf54b4a2
|
[
"BSD-3-Clause"
] | 4
|
2019-03-11T04:36:05.000Z
|
2021-08-13T07:58:53.000Z
|
mail_editor/helpers.py
|
maykinmedia/maykin-email-templates
|
946538cf07e8fbb6df577fe5ea22b970bf54b4a2
|
[
"BSD-3-Clause"
] | 15
|
2017-03-02T12:16:20.000Z
|
2022-01-21T15:03:44.000Z
|
mail_editor/helpers.py
|
maykinmedia/maykin-email-templates
|
946538cf07e8fbb6df577fe5ea22b970bf54b4a2
|
[
"BSD-3-Clause"
] | 3
|
2019-09-29T05:45:26.000Z
|
2020-08-20T03:15:17.000Z
|
import logging
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.template import loader
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from .models import MailTemplate
try:
from django.template.exceptions import TemplateDoesNotExist, TemplateSyntaxError
except ImportError:
from django.template.base import TemplateDoesNotExist, TemplateSyntaxError
logger = logging.getLogger(__name__)
| 31.303371
| 122
| 0.714645
|
import logging
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.template import loader
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from .models import MailTemplate
try:
from django.template.exceptions import TemplateDoesNotExist, TemplateSyntaxError
except ImportError:
from django.template.base import TemplateDoesNotExist, TemplateSyntaxError
logger = logging.getLogger(__name__)
def find_template(template_name, language=None):
if language:
template, _created = MailTemplate.objects.get_or_create(template_type=template_name, language=language, defaults={
'subject': get_subject(template_name),
'body': get_body(template_name),
'base_template_path': get_base_template_path(template_name),
})
else:
base_qs = MailTemplate.objects.filter(template_type=template_name, language__isnull=True)
if base_qs.exists():
template = base_qs.first()
else:
template = MailTemplate.objects.create(
template_type=template_name,
subject=get_subject(template_name),
body=get_body(template_name),
base_template_path=get_base_template_path(template_name),
)
return template
def get_subject(template_name):
config = settings.MAIL_EDITOR_CONF
template_config = config.get(template_name)
if template_config:
subject = template_config.get('subject_default')
if subject:
return subject
return _('Please fix this template')
def get_body(template_name):
config = settings.MAIL_EDITOR_CONF
template_config = config.get(template_name)
default = _('Your content here...')
if template_config:
body = template_config.get('body_default')
if body:
default = body
template = loader.get_template('mail/_outer_table.html')
current_site = get_current_site(None)
return template.render({'domain': current_site.domain, 'default': mark_safe(default)}, None)
def get_base_template_path(template_name):
config = settings.MAIL_EDITOR_CONF
template_config = config.get(template_name)
if template_config:
return template_config.get('base_template', '')
return ''
def base_template_loader(template_path, context):
default_path = 'mail/_base.html'
if not template_path:
template_path = default_path
try:
return loader.render_to_string(template_path, context)
except (TemplateDoesNotExist, TemplateSyntaxError) as e:
logging.exception("Base template could not be rendered")
return loader.render_to_string(default_path, context)
| 2,157
| 0
| 115
|
9f358abb718b928d28ae4fa0e34eca5edd3395b9
| 2,547
|
py
|
Python
|
Dell PowerVault MD32xxi/md32xx.py
|
tomaprzem/zabbix
|
8eb46c72393636e919ac70e1c75f051d9ad04e3d
|
[
"MIT"
] | null | null | null |
Dell PowerVault MD32xxi/md32xx.py
|
tomaprzem/zabbix
|
8eb46c72393636e919ac70e1c75f051d9ad04e3d
|
[
"MIT"
] | null | null | null |
Dell PowerVault MD32xxi/md32xx.py
|
tomaprzem/zabbix
|
8eb46c72393636e919ac70e1c75f051d9ad04e3d
|
[
"MIT"
] | null | null | null |
import subprocess
import sys
import getopt
import json
from pyzabbix import ZabbixMetric, ZabbixSender
ipa = ''
ipb = ''
host = ''
try:
opts, args = getopt.getopt(sys.argv[1:], "ha:b:n:")
except getopt.GetoptError:
print 'md32xx.py -a <IPControlerA> -b <IPControlerB> -n <HostNameInZabbix>'
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print 'md32xx.py -a <IPControlerA> -b <IPControlerB> -n <HostNameInZabbix>'
print 'Parameters:'
print '-a\t\tIP Controler A'
print '-b\t\tIP Controler B'
print '-n\t\tHostName in Zabbix'
print '-h\t\tThis help'
sys.exit()
elif opt == "-a":
ipa = arg
elif opt == "-b":
ipb = arg
elif opt == "-n":
host = arg
# else
# print 'md32xx.py -a <IPControlerA> -b <IPControlerB> -n <HostNameInZabbix>'
# sys.exit(2)
SMCLI = "/opt/IBM_DS/client/SMcli"
CONN = ipa + " " + ipb
cmd = SMCLI + " " + CONN + " -S -c \"set session performanceMonitorInterval=3 performanceMonitorIterations=1;show allLogicalDrives performanceStats;\""
#print cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
strings = out.split("\n",7)[7]
#print strings
packet = []
output = []
for str in strings.splitlines():
arr = str.split(',')
output.append({"{#VALUE}": arr[0].replace("\"","")})
packet.append(ZabbixMetric(host, 'total.ios['+arr[0].replace("\"","")+']',arr[1].replace("\"", "")))
packet.append(ZabbixMetric(host, 'read['+arr[0].replace("\"","")+']',arr[2].replace("\"", "")))
packet.append(ZabbixMetric(host, 'read.cache.hit['+arr[0].replace("\"","")+']',arr[3].replace("\"", "")))
packet.append(ZabbixMetric(host, 'write.cache.hit['+arr[0].replace("\"","")+']',arr[4].replace("\"", "")))
packet.append(ZabbixMetric(host, 'ssd.cache.hit['+arr[0].replace("\"","")+']',arr[5].replace("\"", "")))
packet.append(ZabbixMetric(host, 'current.MBs['+arr[0].replace("\"","")+']',arr[6].replace("\"", "")))
packet.append(ZabbixMetric(host, 'max.MBs['+arr[0].replace("\"","")+']',arr[7].replace("\"", "")))
packet.append(ZabbixMetric(host, 'current.ios['+arr[0].replace("\"","")+']',arr[8].replace("\"", "")))
packet.append(ZabbixMetric(host, 'max.ios['+arr[0].replace("\"","")+']',arr[9].replace("\"", "")))
if arr[0].replace("\"","") == "STORAGE SUBSYSTEM TOTALS":
break
#print packet
ZabbixSender(zabbix_server='192.168.10.45', zabbix_port=10051).send(packet)
print '{"data":'
print json.dumps(output)
print '}'
| 41.754098
| 151
| 0.598351
|
import subprocess
import sys
import getopt
import json
from pyzabbix import ZabbixMetric, ZabbixSender
ipa = ''
ipb = ''
host = ''
try:
opts, args = getopt.getopt(sys.argv[1:], "ha:b:n:")
except getopt.GetoptError:
print 'md32xx.py -a <IPControlerA> -b <IPControlerB> -n <HostNameInZabbix>'
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print 'md32xx.py -a <IPControlerA> -b <IPControlerB> -n <HostNameInZabbix>'
print 'Parameters:'
print '-a\t\tIP Controler A'
print '-b\t\tIP Controler B'
print '-n\t\tHostName in Zabbix'
print '-h\t\tThis help'
sys.exit()
elif opt == "-a":
ipa = arg
elif opt == "-b":
ipb = arg
elif opt == "-n":
host = arg
# else
# print 'md32xx.py -a <IPControlerA> -b <IPControlerB> -n <HostNameInZabbix>'
# sys.exit(2)
SMCLI = "/opt/IBM_DS/client/SMcli"
CONN = ipa + " " + ipb
cmd = SMCLI + " " + CONN + " -S -c \"set session performanceMonitorInterval=3 performanceMonitorIterations=1;show allLogicalDrives performanceStats;\""
#print cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
strings = out.split("\n",7)[7]
#print strings
packet = []
output = []
for str in strings.splitlines():
arr = str.split(',')
output.append({"{#VALUE}": arr[0].replace("\"","")})
packet.append(ZabbixMetric(host, 'total.ios['+arr[0].replace("\"","")+']',arr[1].replace("\"", "")))
packet.append(ZabbixMetric(host, 'read['+arr[0].replace("\"","")+']',arr[2].replace("\"", "")))
packet.append(ZabbixMetric(host, 'read.cache.hit['+arr[0].replace("\"","")+']',arr[3].replace("\"", "")))
packet.append(ZabbixMetric(host, 'write.cache.hit['+arr[0].replace("\"","")+']',arr[4].replace("\"", "")))
packet.append(ZabbixMetric(host, 'ssd.cache.hit['+arr[0].replace("\"","")+']',arr[5].replace("\"", "")))
packet.append(ZabbixMetric(host, 'current.MBs['+arr[0].replace("\"","")+']',arr[6].replace("\"", "")))
packet.append(ZabbixMetric(host, 'max.MBs['+arr[0].replace("\"","")+']',arr[7].replace("\"", "")))
packet.append(ZabbixMetric(host, 'current.ios['+arr[0].replace("\"","")+']',arr[8].replace("\"", "")))
packet.append(ZabbixMetric(host, 'max.ios['+arr[0].replace("\"","")+']',arr[9].replace("\"", "")))
if arr[0].replace("\"","") == "STORAGE SUBSYSTEM TOTALS":
break
#print packet
ZabbixSender(zabbix_server='192.168.10.45', zabbix_port=10051).send(packet)
print '{"data":'
print json.dumps(output)
print '}'
| 0
| 0
| 0
|
97789f2d81d078c1154b5841a8595992e15aa21b
| 3,331
|
py
|
Python
|
spd-2.4/twitter_handle.py
|
Sukhrobjon/career-lab
|
67bc38c0eb3b47248157f618cb34ed2bdd44fc78
|
[
"MIT"
] | null | null | null |
spd-2.4/twitter_handle.py
|
Sukhrobjon/career-lab
|
67bc38c0eb3b47248157f618cb34ed2bdd44fc78
|
[
"MIT"
] | null | null | null |
spd-2.4/twitter_handle.py
|
Sukhrobjon/career-lab
|
67bc38c0eb3b47248157f618cb34ed2bdd44fc78
|
[
"MIT"
] | null | null | null |
from heapq import nlargest
# NOTE: this solution assumes the handles are only english letters
handles = ['DogeCoin', 'YangGang', 'HodlForLife',
'fakeDonaldDrumpf', 'GodIsLove', 'BernieOrBust']
new_user = 'iLoveDogs'
obj = SimilarAccounts()
result1 = obj.make_anagram(new_user)
result2 = obj.make_anagram('DogeCoin')
anagram_score = obj.get_score_with_anagram(new_user, 'GodIsLove')
set_score = obj.get_score_with_set(new_user, 'GodIsLove')
print(f"anagram score:", anagram_score)
print(f"set score:", set_score)
k_handles = obj.suggest(new_user, handles, 2)
print(k_handles)
| 32.339806
| 84
| 0.600721
|
from heapq import nlargest
class SimilarAccounts():
def suggest(self, new_user: str, handles: list, k: int = 2):
"""
Returns the two most similar account handles associated with
new_user handle
Args:
new_user(str): New user's handle
handles(list): Existing users handles
k(int): number of similar accout handles
Returns:
handle(tuple): two most similar score accounts
"""
# stores the handle and score as (handle, score)
scores = []
for handle in handles:
score = self.get_score_with_set(new_user, handle)
scores.append((handle, score))
# get the k largest handles
k_handles = nlargest(k, scores, lambda score: score[1])
# printing out only the handles, not scores
return [h[0] for h in k_handles]
def get_score_with_anagram(self, handle1, handle2):
"""
Get the similarity score between two user handles
+1 for each letter in the alphabet that occurs in both handles but
scoring –1 for each letter that occurs in only one handle
"""
anagram1 = self.make_anagram(handle1)
anagram2 = self.make_anagram(handle2)
print(f"{handle1} anagram: {anagram1}")
print(f"{handle2} anagram: {anagram2}")
score = 0
for i in range(len(anagram1)):
if anagram1[i] >= 1 and anagram2[i] > 0:
score += 1
elif anagram1[i] >= 1 and anagram2[i] == 0:
score -= 1
elif anagram2[i] >= 1 and anagram1[i] > 0:
score += 1
elif anagram2[i] >= 1 and anagram1[i] == 0:
score -= 1
return score
def get_score_with_set(self, handle1, handle2):
handle1 = set(handle1.lower())
handle2 = set(handle2.lower())
# print(f'handle1 set: {handle1}, \nhandle2 set: {handle2}')
# number of -1s, all unmatch letters
sym_diff = handle1.symmetric_difference(handle2)
# number of +1s all matching letters
intersection = handle1.intersection(handle2)
# print(f"symmetric difference: {sym_diff}, \nintersection: {intersection}")
score = len(intersection) - len(sym_diff)
return (score)
def make_anagram(self, word):
"""
Create a anagram representation of the word.
NOTE: It processes words contain only lowercase letters
"""
# hashing 26 English letters
anagram = [0] * 26
offset = ord('a')
for letter in word:
index = ord(letter.lower()) - offset
anagram[index] += 1
return anagram
# NOTE: this solution assumes the handles are only english letters
handles = ['DogeCoin', 'YangGang', 'HodlForLife',
'fakeDonaldDrumpf', 'GodIsLove', 'BernieOrBust']
new_user = 'iLoveDogs'
obj = SimilarAccounts()
result1 = obj.make_anagram(new_user)
result2 = obj.make_anagram('DogeCoin')
anagram_score = obj.get_score_with_anagram(new_user, 'GodIsLove')
set_score = obj.get_score_with_set(new_user, 'GodIsLove')
print(f"anagram score:", anagram_score)
print(f"set score:", set_score)
k_handles = obj.suggest(new_user, handles, 2)
print(k_handles)
| 545
| 2,175
| 23
|
5eb550b12cff3701f85e02e56962cdab42e7a5cf
| 6,175
|
py
|
Python
|
rfsoc_qpsk/dict_widget.py
|
louisliuwei/rfsoc_qpsk
|
53aaced242367b0ac5dd4f629f132f158095d83f
|
[
"BSD-3-Clause"
] | 1
|
2020-02-25T01:16:41.000Z
|
2020-02-25T01:16:41.000Z
|
signal_gen/dict_widget.py
|
adriankaisinclair/signal_gen_proj
|
ecad285b367bca7e4dbc0fbf7059e1dcef6778e8
|
[
"BSD-3-Clause"
] | null | null | null |
signal_gen/dict_widget.py
|
adriankaisinclair/signal_gen_proj
|
ecad285b367bca7e4dbc0fbf7059e1dcef6778e8
|
[
"BSD-3-Clause"
] | null | null | null |
"""Configuration data container with interactive ipywidgets GUI"""
import json
import ipywidgets
import jsonschema
class DictWidget():
"""Container class for configuration data
Constructed from a JSON Schema. Use like a dictionary to store and retrieve configuration data.
Will also create a ipywidget interactive representation via `gui()`. Also supports nested schemata
(i.e. a schema tha contains `object` properties, which are themselves configuration containers."""
def __init__(self, schema):
"""Construct a Configuration object from a JSON schema definition"""
self.schema = schema
self.data = {}
self.callback = None
self.children = {}
# Create GUI
# Widget objects are collected in a dictionary (for update in __setitem__())
# as well as a list (together with their description labels to create a VBox for display).
self.widgets = {}
self.widgetlist = []
for name, props in self.schema['properties'].items():
minimum = props.get('minimum', None)
maximum = props.get('maximum', None)
description = props.get('description', '')
# Containers create new `Configuration` instances - save those children for later
if props['type'] == 'object':
subschema = {"title": name, "type": "object", "properties": props['properties']}
self.children[name] = Configuration(subschema)
else:
# Scalar data elements are displayed as is
if props['type'] == 'integer':
value = self.data.get(name, props.get('default', 0))
widget = ipywidgets.IntSlider(description=name, min=minimum, max=maximum, value=value)
elif props['type'] == 'number':
value = self.data.get(name, props.get('default', 0.0))
widget = ipywidgets.FloatSlider(description=name, min=minimum, max=maximum, value=value)
elif props['type'] == 'string':
# also supports drop down
value = self.data.get(name, props.get('default', ''))
if 'choices' in props:
widget = ipywidgets.Dropdown(options=props['choices'].split(';'), value=value, description=name)
else:
widget = ipywidgets.Text(description=name, value=value)
elif props['type'] == 'boolean':
value = self.data.get(name, props.get('default', False))
widget = ipywidgets.Checkbox(description=name, value=value)
else:
widget = ipywidgets.Label(description=name, value=f"Don't know how to draw {props['type']}")
widget.observe(self.on_value_change, names='value')
# save for self-reference
self.widgets[name] = widget
self.widgetlist.append(ipywidgets.HBox([widget, ipywidgets.Label(value=description)]))
# Add all saved children in a Tab
if self.children:
widget = ipywidgets.Tab([c._gui for c in self.children.values()])
for i, c in enumerate(self.children.keys()):
widget.set_title(i, c)
widget.observe(self.on_value_change, names='value')
# save for self-reference
self.widgets['_children'] = widget
self.widgetlist.append(widget)
# Return all widgets in a VBox
self._gui = ipywidgets.VBox(self.widgetlist)
def from_dict(self, dict_in):
"""Load configuration data from a dictionary.
Will validate input against schema used in object construction."""
jsonschema.validate(dict_in, self.schema)
for key, value in dict_in.items():
if isinstance(value, dict):
self.children[key].from_dict(value)
else:
self[key] = value
def from_json(self, json_in):
"""Load configuration data from JSON."""
temp = json.loads(json_in)
self.from_dict(temp)
def to_json(self):
"""Dump configuration data as JSON."""
if not self.data:
return None
return json.dumps(self.to_dict())
def to_dict(self):
"""Dump configuration data as dictionary."""
ret = dict(self.data)
for name, child in self.children.items():
ret[name] = child.to_dict()
return ret
def __getitem__(self, item):
"""Allow using dict syntax for object retrievel.
Will first try to locate a child configuration object. If that's not found,
it will then look for a data item."""
if item in self.children:
return self.children[item]
return self.data.__getitem__(item)
def __setitem__(self, item, value):
"""Allow using dict syntax for setting values.
Will only allow setting values in accordance with schema used for object
generation."""
if item not in self.schema['properties']:
raise KeyError(f'"{item}" not in schema')
temp = dict(self.data)
temp.__setitem__(item, value)
jsonschema.validate(temp, self.schema)
self.data.__setitem__(item, value)
# update any gui that may exist
if item in self.widgets:
self.widgets[item].value = value
def on_value_change(self, change):
"""Callback for GUI updates."""
key = change['owner'].description
self[key] = change['new']
if self.callback:
self.callback(self.to_dict()) # TODO: expensive!
def interact(self, callback=None):
"""Return an interactive ipywidgets GUI for setting configuration values.
Will call `callback` with a dictionary of data values on change."""
self.callback = callback
# Update children's callbacks, too.
for c in self.children.values():
c.callback = callback
return self._gui
| 37.652439
| 120
| 0.594818
|
"""Configuration data container with interactive ipywidgets GUI"""
import json
import ipywidgets
import jsonschema
class DictWidget():
"""Container class for configuration data
Constructed from a JSON Schema. Use like a dictionary to store and retrieve configuration data.
Will also create a ipywidget interactive representation via `gui()`. Also supports nested schemata
(i.e. a schema tha contains `object` properties, which are themselves configuration containers."""
def __init__(self, schema):
"""Construct a Configuration object from a JSON schema definition"""
self.schema = schema
self.data = {}
self.callback = None
self.children = {}
# Create GUI
# Widget objects are collected in a dictionary (for update in __setitem__())
# as well as a list (together with their description labels to create a VBox for display).
self.widgets = {}
self.widgetlist = []
for name, props in self.schema['properties'].items():
minimum = props.get('minimum', None)
maximum = props.get('maximum', None)
description = props.get('description', '')
# Containers create new `Configuration` instances - save those children for later
if props['type'] == 'object':
subschema = {"title": name, "type": "object", "properties": props['properties']}
self.children[name] = Configuration(subschema)
else:
# Scalar data elements are displayed as is
if props['type'] == 'integer':
value = self.data.get(name, props.get('default', 0))
widget = ipywidgets.IntSlider(description=name, min=minimum, max=maximum, value=value)
elif props['type'] == 'number':
value = self.data.get(name, props.get('default', 0.0))
widget = ipywidgets.FloatSlider(description=name, min=minimum, max=maximum, value=value)
elif props['type'] == 'string':
# also supports drop down
value = self.data.get(name, props.get('default', ''))
if 'choices' in props:
widget = ipywidgets.Dropdown(options=props['choices'].split(';'), value=value, description=name)
else:
widget = ipywidgets.Text(description=name, value=value)
elif props['type'] == 'boolean':
value = self.data.get(name, props.get('default', False))
widget = ipywidgets.Checkbox(description=name, value=value)
else:
widget = ipywidgets.Label(description=name, value=f"Don't know how to draw {props['type']}")
widget.observe(self.on_value_change, names='value')
# save for self-reference
self.widgets[name] = widget
self.widgetlist.append(ipywidgets.HBox([widget, ipywidgets.Label(value=description)]))
# Add all saved children in a Tab
if self.children:
widget = ipywidgets.Tab([c._gui for c in self.children.values()])
for i, c in enumerate(self.children.keys()):
widget.set_title(i, c)
widget.observe(self.on_value_change, names='value')
# save for self-reference
self.widgets['_children'] = widget
self.widgetlist.append(widget)
# Return all widgets in a VBox
self._gui = ipywidgets.VBox(self.widgetlist)
def from_dict(self, dict_in):
"""Load configuration data from a dictionary.
Will validate input against schema used in object construction."""
jsonschema.validate(dict_in, self.schema)
for key, value in dict_in.items():
if isinstance(value, dict):
self.children[key].from_dict(value)
else:
self[key] = value
def from_json(self, json_in):
"""Load configuration data from JSON."""
temp = json.loads(json_in)
self.from_dict(temp)
def to_json(self):
"""Dump configuration data as JSON."""
if not self.data:
return None
return json.dumps(self.to_dict())
def to_dict(self):
"""Dump configuration data as dictionary."""
ret = dict(self.data)
for name, child in self.children.items():
ret[name] = child.to_dict()
return ret
def __repr__(self):
return 'Configuration "' + self.schema['title'] + '" ' + str(self.data)
def __getitem__(self, item):
"""Allow using dict syntax for object retrievel.
Will first try to locate a child configuration object. If that's not found,
it will then look for a data item."""
if item in self.children:
return self.children[item]
return self.data.__getitem__(item)
def __setitem__(self, item, value):
"""Allow using dict syntax for setting values.
Will only allow setting values in accordance with schema used for object
generation."""
if item not in self.schema['properties']:
raise KeyError(f'"{item}" not in schema')
temp = dict(self.data)
temp.__setitem__(item, value)
jsonschema.validate(temp, self.schema)
self.data.__setitem__(item, value)
# update any gui that may exist
if item in self.widgets:
self.widgets[item].value = value
def on_value_change(self, change):
"""Callback for GUI updates."""
key = change['owner'].description
self[key] = change['new']
if self.callback:
self.callback(self.to_dict()) # TODO: expensive!
def interact(self, callback=None):
"""Return an interactive ipywidgets GUI for setting configuration values.
Will call `callback` with a dictionary of data values on change."""
self.callback = callback
# Update children's callbacks, too.
for c in self.children.values():
c.callback = callback
return self._gui
| 78
| 0
| 27
|
68ec29b122262b30ff0938589a6d290ac5c9c5f2
| 83
|
py
|
Python
|
nepali_company_registrar/__init__.py
|
girisagar46/nepal-company-registrar
|
09e0bf2e5f5f2ed43482d7148ddfd22d84a8000f
|
[
"MIT"
] | null | null | null |
nepali_company_registrar/__init__.py
|
girisagar46/nepal-company-registrar
|
09e0bf2e5f5f2ed43482d7148ddfd22d84a8000f
|
[
"MIT"
] | 3
|
2020-05-02T16:04:17.000Z
|
2021-12-13T20:39:06.000Z
|
nepali_company_registrar/__init__.py
|
girisagar46/nepal-company-registrar
|
09e0bf2e5f5f2ed43482d7148ddfd22d84a8000f
|
[
"MIT"
] | 2
|
2020-05-02T15:47:13.000Z
|
2020-05-04T15:19:47.000Z
|
from nepali_company_registrar.nepal_company_registrar import NepalCompanyRegistrar
| 41.5
| 82
| 0.939759
|
from nepali_company_registrar.nepal_company_registrar import NepalCompanyRegistrar
| 0
| 0
| 0
|
e1dc6ae2165d37553a1c4ef3334ea49ec6b9f0af
| 6,605
|
py
|
Python
|
bt_ig/igdata.py
|
femtotrader/bt-ig-store
|
3eb2716b0ee2bc427c5e7b49bfb2661e97e10e02
|
[
"MIT"
] | 1
|
2017-11-07T12:07:45.000Z
|
2017-11-07T12:07:45.000Z
|
bt_ig/igdata.py
|
femtotrader/bt-ig-store
|
3eb2716b0ee2bc427c5e7b49bfb2661e97e10e02
|
[
"MIT"
] | null | null | null |
bt_ig/igdata.py
|
femtotrader/bt-ig-store
|
3eb2716b0ee2bc427c5e7b49bfb2661e97e10e02
|
[
"MIT"
] | 1
|
2021-06-14T15:37:05.000Z
|
2021-06-14T15:37:05.000Z
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import datetime, timedelta
from backtrader.feed import DataBase
from backtrader import TimeFrame, date2num, num2date
from backtrader.utils.py3 import (integer_types, queue, string_types,
with_metaclass)
from backtrader.metabase import MetaParams
from . import igstore
class IGData(with_metaclass(MetaIGData, DataBase)):
'''
params:
'''
#TODO insert params
params = (
('useask', False),
('reconnections', -1),
('qcheck', 5)
)
# States for the Finite State Machine in _load
_ST_FROM, _ST_START, _ST_LIVE, _ST_HISTORBACK, _ST_OVER = range(5)
_store = igstore.IGStore
def islive(self):
'''Returns ``True`` to notify ``Cerebro`` that preloading and runonce
should be deactivated'''
return True
def setenvironment(self, env):
'''Receives an environment (cerebro) and passes it over to the store it
belongs to'''
super(IGData, self).setenvironment(env)
env.addstore(self.o)
def start(self):
'''Starts the IG connecction and gets the real contract and
contractdetails if it exists'''
super(IGData, self).start()
# Create attributes as soon as possible
self._statelivereconn = False # if reconnecting in live state
self._storedmsg = dict() # keep pending live message (under None)
self.qlive = queue.Queue()
self._state = self._ST_OVER
# Kickstart store and get queue to wait on
self.o.start(data=self)
self._start_finish()
self._state = self._ST_START # initial state for _load
self._st_start()
self._reconns = 0
def stop(self):
'''Stops and tells the store to stop'''
super(IGData, self).stop()
self.o.stop()
def _load(self):
'''
steps
1 - check if we status live. If so process message
- Check for error codes in message and change status appropriately
- Process the message as long as the status is not trying to reconnect
- Setup a backfill if data is missing.
2 - If not, is the status set to perform a backfill?
'''
if self._state == self._ST_OVER:
return False
while True:
if self._state == self._ST_LIVE:
try:
msg = (self._storedmsg.pop(None, None) or
self.qlive.get(timeout=self._qcheck))
except queue.Empty:
return None # indicate timeout situation
if msg is None: # Conn broken during historical/backfilling
self.put_notification(self.CONNBROKEN)
self.put_notification(self.DISCONNECTED)
self._state = self._ST_OVER
return False # failed
#TODO handle error messages in feed
#Check for empty data. Sometimes all the fields return None...
if msg['UTM'] is None:
return None
#self._reconns = self.p.reconnections
# Process the message according to expected return type
if not self._statelivereconn:
if self._laststatus != self.LIVE:
if self.qlive.qsize() <= 1: # very short live queue
self.put_notification(self.LIVE)
ret = self._load_tick(msg)
if ret:
return True
# could not load bar ... go and get new one
continue
elif self._state == self._ST_START:
if not self._st_start(instart=False):
self._state = self._ST_OVER
return False
#TODO
# - Check for delays in feed
# - put a self.put_notification(self.DELAYED)
# - Attempt to fill in missing data
# - Setup a backfill of some sort when starting a feed.
# - Set Dissonnected status where appropriate.
| 31.303318
| 86
| 0.563967
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import datetime, timedelta
from backtrader.feed import DataBase
from backtrader import TimeFrame, date2num, num2date
from backtrader.utils.py3 import (integer_types, queue, string_types,
with_metaclass)
from backtrader.metabase import MetaParams
from . import igstore
class MetaIGData(DataBase.__class__):
def __init__(cls, name, bases, dct):
'''Class has already been created ... register'''
# Initialize the class
super(MetaIGData, cls).__init__(name, bases, dct)
# Register with the store
igstore.IGStore.DataCls = cls
class IGData(with_metaclass(MetaIGData, DataBase)):
'''
params:
'''
#TODO insert params
params = (
('useask', False),
('reconnections', -1),
('qcheck', 5)
)
# States for the Finite State Machine in _load
_ST_FROM, _ST_START, _ST_LIVE, _ST_HISTORBACK, _ST_OVER = range(5)
_store = igstore.IGStore
def islive(self):
'''Returns ``True`` to notify ``Cerebro`` that preloading and runonce
should be deactivated'''
return True
def __init__(self, **kwargs):
self.o = self._store(**kwargs)
def setenvironment(self, env):
'''Receives an environment (cerebro) and passes it over to the store it
belongs to'''
super(IGData, self).setenvironment(env)
env.addstore(self.o)
def start(self):
'''Starts the IG connecction and gets the real contract and
contractdetails if it exists'''
super(IGData, self).start()
# Create attributes as soon as possible
self._statelivereconn = False # if reconnecting in live state
self._storedmsg = dict() # keep pending live message (under None)
self.qlive = queue.Queue()
self._state = self._ST_OVER
# Kickstart store and get queue to wait on
self.o.start(data=self)
self._start_finish()
self._state = self._ST_START # initial state for _load
self._st_start()
self._reconns = 0
def _st_start(self, instart=True, tmout=None):
#TODO Equivelent of streaming_prices
self.qlive = self.o.streaming_prices(self.p.dataname, tmout=tmout)
if self._statelivereconn:
self.put_notification(self.DELAYED)
self._state = self._ST_LIVE
if instart:
self._reconns = self.p.reconnections
return True # no return before - implicit continue
def stop(self):
'''Stops and tells the store to stop'''
super(IGData, self).stop()
self.o.stop()
def haslivedata(self):
return bool(self._storedmsg or self.qlive) # do not return the objs
def _load(self):
'''
steps
1 - check if we status live. If so process message
- Check for error codes in message and change status appropriately
- Process the message as long as the status is not trying to reconnect
- Setup a backfill if data is missing.
2 - If not, is the status set to perform a backfill?
'''
if self._state == self._ST_OVER:
return False
while True:
if self._state == self._ST_LIVE:
try:
msg = (self._storedmsg.pop(None, None) or
self.qlive.get(timeout=self._qcheck))
except queue.Empty:
return None # indicate timeout situation
if msg is None: # Conn broken during historical/backfilling
self.put_notification(self.CONNBROKEN)
self.put_notification(self.DISCONNECTED)
self._state = self._ST_OVER
return False # failed
#TODO handle error messages in feed
#Check for empty data. Sometimes all the fields return None...
if msg['UTM'] is None:
return None
#self._reconns = self.p.reconnections
# Process the message according to expected return type
if not self._statelivereconn:
if self._laststatus != self.LIVE:
if self.qlive.qsize() <= 1: # very short live queue
self.put_notification(self.LIVE)
ret = self._load_tick(msg)
if ret:
return True
# could not load bar ... go and get new one
continue
elif self._state == self._ST_START:
if not self._st_start(instart=False):
self._state = self._ST_OVER
return False
#TODO
# - Check for delays in feed
# - put a self.put_notification(self.DELAYED)
# - Attempt to fill in missing data
# - Setup a backfill of some sort when starting a feed.
# - Set Dissonnected status where appropriate.
def _load_tick(self, msg):
#print('MSG = {}'.format(msg))
#print(msg['UTM'])
dtobj = datetime.utcfromtimestamp(int(msg['UTM']) / 1000)
dt = date2num(dtobj)
try:
vol = int(msg['LTV'])
except TypeError:
vol = 0
#Check for missing Bid quote (Happens sometimes)
if msg['BID'] == None and msg['OFR']:
bid = float(msg['OFR'])
ofr = float(msg['OFR'])
#Check for missing offer quote (Happens sometimes)
elif msg['OFR'] == None and msg['BID']:
bid = float(msg['BID'])
ofr = float(msg['BID'])
else:
bid = float(msg['BID'])
ofr = float(msg['OFR'])
if dt <= self.lines.datetime[-1]:
return False # time already seen
# Common fields
self.lines.datetime[0] = dt
self.lines.volume[0] = vol
self.lines.openinterest[0] = 0.0
# Put the prices into the bar
#SOMETIME tick can be missing BID or OFFER.... Need to fallback
tick = ofr if self.p.useask else bid
self.lines.open[0] = tick
self.lines.high[0] = tick
self.lines.low[0] = tick
self.lines.close[0] = tick
self.lines.volume[0] = vol
self.lines.openinterest[0] = 0.0
return True
def _load_history(self, msg):
#TODO
pass
| 1,874
| 277
| 158
|
58b7c6c0c929dbb5526611beacb304993639d923
| 3,750
|
py
|
Python
|
modules/photons_products/lifx.py
|
Djelibeybi/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 51
|
2020-07-03T08:34:48.000Z
|
2022-03-16T10:56:08.000Z
|
modules/photons_products/lifx.py
|
delfick/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 81
|
2020-07-03T08:13:59.000Z
|
2022-03-31T23:02:54.000Z
|
modules/photons_products/lifx.py
|
Djelibeybi/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 8
|
2020-07-24T23:48:20.000Z
|
2021-05-24T17:20:16.000Z
|
from photons_products.base import Product, Capability, CapabilityValue
from photons_products.enums import VendorRegistry, Zones, Family
from photons_products import conditions as cond
class Capability(Capability):
"""
.. attribute:: is_light
Is this device a light
.. attribute:: zones
The style of zones. So strips are LINEAR and things like the candle and tile are MATRIX
.. attribute:: has_ir
Do we have infrared capability
.. attribute:: has_hev
Does this device have HEV LEDs
.. attribute:: has_color
Do we have hue control
.. attribute:: has_chain
Do we have a chain of devices
.. attribute:: has_relays
Does this device have relays
.. attribute:: has_buttons
Does this device have physical buttons
.. attribute:: has_unhandled
This product has StateUnhandled
.. attribute:: has_extended_multizone
This product supports extended multizone messages
.. attribute:: has_variable_color_temp
Do we have variable kelvin
.. attribute:: min_kelvin
The min kelvin of this product
.. attribute:: max_kelvin
The max kelvin of this product
.. attribute:: product
The product class associate with this capability
.. attribute:: firmware_major
the firmware_major associated with this product
You can create an instance of this capability with your own firmware_major by calling this instance
.. attribute:: firmware_minor
the firmware_major associated with this product
You can create an instance of this capability with your own firmware_minor by calling this instance
.. autoattribute:: photons_products.lifx.Capability.has_matrix
.. autoattribute:: photons_products.lifx.Capability.has_multizone
"""
is_light = True
zones = CapabilityValue(Zones.SINGLE)
has_ir = CapabilityValue(False)
has_hev = CapabilityValue(False)
has_color = CapabilityValue(False)
has_chain = CapabilityValue(False)
has_relays = CapabilityValue(False)
has_buttons = CapabilityValue(False)
has_unhandled = CapabilityValue(False).until(0, 0, cond.NameHas("SWITCH"), becomes=True)
has_extended_multizone = CapabilityValue(False).until(
2, 77, cond.Family(Family.LCM2), cond.Capability(has_multizone=True), becomes=True
)
has_variable_color_temp = CapabilityValue(True)
min_kelvin = CapabilityValue(2500)
max_kelvin = CapabilityValue(9000)
@property
def has_multizone(self):
"""Return whether we have LINEAR zones"""
return self.zones is Zones.LINEAR
@property
def has_matrix(self):
"""Return whether we have MATRIX zones"""
return self.zones is Zones.MATRIX
| 27.985075
| 107
| 0.651467
|
from photons_products.base import Product, Capability, CapabilityValue
from photons_products.enums import VendorRegistry, Zones, Family
from photons_products import conditions as cond
class Product(Product):
vendor = VendorRegistry.LIFX
class Capability(Capability):
"""
.. attribute:: is_light
Is this device a light
.. attribute:: zones
The style of zones. So strips are LINEAR and things like the candle and tile are MATRIX
.. attribute:: has_ir
Do we have infrared capability
.. attribute:: has_hev
Does this device have HEV LEDs
.. attribute:: has_color
Do we have hue control
.. attribute:: has_chain
Do we have a chain of devices
.. attribute:: has_relays
Does this device have relays
.. attribute:: has_buttons
Does this device have physical buttons
.. attribute:: has_unhandled
This product has StateUnhandled
.. attribute:: has_extended_multizone
This product supports extended multizone messages
.. attribute:: has_variable_color_temp
Do we have variable kelvin
.. attribute:: min_kelvin
The min kelvin of this product
.. attribute:: max_kelvin
The max kelvin of this product
.. attribute:: product
The product class associate with this capability
.. attribute:: firmware_major
the firmware_major associated with this product
You can create an instance of this capability with your own firmware_major by calling this instance
.. attribute:: firmware_minor
the firmware_major associated with this product
You can create an instance of this capability with your own firmware_minor by calling this instance
.. autoattribute:: photons_products.lifx.Capability.has_matrix
.. autoattribute:: photons_products.lifx.Capability.has_multizone
"""
is_light = True
zones = CapabilityValue(Zones.SINGLE)
has_ir = CapabilityValue(False)
has_hev = CapabilityValue(False)
has_color = CapabilityValue(False)
has_chain = CapabilityValue(False)
has_relays = CapabilityValue(False)
has_buttons = CapabilityValue(False)
has_unhandled = CapabilityValue(False).until(0, 0, cond.NameHas("SWITCH"), becomes=True)
has_extended_multizone = CapabilityValue(False).until(
2, 77, cond.Family(Family.LCM2), cond.Capability(has_multizone=True), becomes=True
)
has_variable_color_temp = CapabilityValue(True)
min_kelvin = CapabilityValue(2500)
max_kelvin = CapabilityValue(9000)
def capabilities_for_display(self):
if self.is_light:
return [
"zones",
"has_ir",
"has_hev",
"has_color",
"has_chain",
"has_matrix",
"has_relays",
"has_buttons",
"has_unhandled",
"has_multizone",
"has_extended_multizone",
"has_variable_color_temp",
"min_kelvin",
"max_kelvin",
]
else:
return ["has_relays", "has_buttons"]
@property
def has_multizone(self):
"""Return whether we have LINEAR zones"""
return self.zones is Zones.LINEAR
@property
def has_matrix(self):
"""Return whether we have MATRIX zones"""
return self.zones is Zones.MATRIX
class NonLightCapability(Capability):
is_light = False
has_ir = None
has_hev = None
has_color = None
has_chain = None
has_matrix = None
has_multizone = None
has_extended_multizone = None
has_variable_color_temp = None
max_kelvin = None
min_kelvin = None
| 576
| 313
| 73
|
06cd1a03dea8936803ea80d1da734e2a983cdfda
| 676
|
py
|
Python
|
creat_price_data_datebase.py
|
brucewong0516/bitfinex_data
|
f29a7073831c6dd63d5fcfc9d3ed0d2679268f9c
|
[
"MIT"
] | 2
|
2019-01-03T11:38:39.000Z
|
2019-04-11T14:03:25.000Z
|
creat_price_data_datebase.py
|
brucewong0516/bitfinex_data
|
f29a7073831c6dd63d5fcfc9d3ed0d2679268f9c
|
[
"MIT"
] | null | null | null |
creat_price_data_datebase.py
|
brucewong0516/bitfinex_data
|
f29a7073831c6dd63d5fcfc9d3ed0d2679268f9c
|
[
"MIT"
] | 1
|
2019-04-11T14:03:33.000Z
|
2019-04-11T14:03:33.000Z
|
# 若中断了直接再次运行即可
# 爬取历史数据建立数据库
if __name__ == '__main__':
from crypto_1min import Bitfinex_api
import threading
initially_urls_queue = Bitfinex_api().create_initially_urls_queue()
# Bitfinex_api().get_all_symbol_detail()
threads = [thread() for i in range(50)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
| 32.190476
| 94
| 0.656805
|
# 若中断了直接再次运行即可
# 爬取历史数据建立数据库
if __name__ == '__main__':
from crypto_1min import Bitfinex_api
import threading
initially_urls_queue = Bitfinex_api().create_initially_urls_queue()
# Bitfinex_api().get_all_symbol_detail()
class thread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while not initially_urls_queue.empty():
Bitfinex_api().request_for_get_history_price_data(initially_urls_queue.get())
threads = [thread() for i in range(50)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
| 184
| 10
| 93
|
89c49cc5089a630ef39091edc5eb321e74dc586b
| 1,605
|
py
|
Python
|
scraping/jobs/stock_scraper.py
|
nmccn/ddd
|
b4347c91c8776bca8dacc0d4372122b94e6ae4d5
|
[
"MIT"
] | null | null | null |
scraping/jobs/stock_scraper.py
|
nmccn/ddd
|
b4347c91c8776bca8dacc0d4372122b94e6ae4d5
|
[
"MIT"
] | null | null | null |
scraping/jobs/stock_scraper.py
|
nmccn/ddd
|
b4347c91c8776bca8dacc0d4372122b94e6ae4d5
|
[
"MIT"
] | null | null | null |
#! python3
import datetime as dt
import requests
import pandas as pd
import lxml.html as lh
'''' Class containing the code that scrapes the stock ticker/information from various stock & crypto sites'''
class StockScraper:
''' This is a function that scrapes a table from a provided web page.'''
| 23.602941
| 124
| 0.527103
|
#! python3
import datetime as dt
import requests
import pandas as pd
import lxml.html as lh
'''' Class containing the code that scrapes the stock ticker/information from various stock & crypto sites'''
class StockScraper:
def __init__(self, source, stock_or_crypto):
self.source = source
self.stock_or_crypto = stock_or_crypto
''' This is a function that scrapes a table from a provided web page.'''
def scrape_table(self):
col = []
i = 0
url = self.source
page = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
html_doc = lh.fromstring(page.content)
tr_elements = html_doc.xpath('//tr')
for t in tr_elements[0]:
i+=1
name = t.text_content()
col.append((name,[]))
# print(name)
for i in range(1, len(tr_elements)):
T = tr_elements[i]
if (len(T) != 10):
break
j = 0
for t in T.iterchildren():
data = t.text_content()
if (i > 0):
try:
data = int(data)
except:
pass
col[j][1].append(data)
j+=1
ticker_dictionary = {title:column for (title,column) in col}
ticker_dataframe = pd.DataFrame(ticker_dictionary)
# print(ticker_dataframe)
ticker_dataframe.to_csv('{}_ticker_dataframe_{}.csv'.format(self.stock_or_crypto, dt.datetime.now().hour), mode='a')
| 1,231
| 0
| 57
|
aa46a81ce18f9b351c934d3837daf97bc528b371
| 4,307
|
py
|
Python
|
2-Python_Scripts/TrojControl.py
|
mdodici/trojan-WD-pollution
|
ec79a96f0d9517a53df4c82ca1be0d5d38f3346b
|
[
"MIT"
] | null | null | null |
2-Python_Scripts/TrojControl.py
|
mdodici/trojan-WD-pollution
|
ec79a96f0d9517a53df4c82ca1be0d5d38f3346b
|
[
"MIT"
] | null | null | null |
2-Python_Scripts/TrojControl.py
|
mdodici/trojan-WD-pollution
|
ec79a96f0d9517a53df4c82ca1be0d5d38f3346b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[8]:
import rebound
import numpy as np
###############
### IMPORTS ###
###############
params = np.load('sample_params.npy')
###################
### DEFINITIONS ###
###################
radeg = np.pi/180
############################
############################
############################
### SIMULATION ###
############################
############################
############################
t_tot = 2000000
Nout = 100000
times = np.linspace(0,t_tot,Nout)
M0 = 1
num_tr = len(params[0])
sim = rebound.Simulation()
sim.add(m=M0,x=0, y=0, z=0, vx=0, vy=0, vz=0, hash='Sun')
add_tr(sim, params)
sim.add(m=9.543e-4, a=5.2, e=.04839, inc=.022689, Omega=0, omega=0, hash='jupiter')
sim.integrator = 'whfast'
sim.dt = 0.5
sim.move_to_com()
ps = sim.particles
#########################################
## Parameter tracking initialization ##
#########################################
mass = np.zeros(Nout)
x_sol = np.zeros(Nout); y_sol = np.zeros(Nout); z_sol = np.zeros(Nout)
x_sol[0] = ps['Sun'].x
y_sol[0] = ps['Sun'].y
z_sol[0] = ps['Sun'].z
x_jup = np.zeros(Nout); y_jup = np.zeros(Nout); z_jup = np.zeros(Nout)
x_jup[0] = ps['jupiter'].x
y_jup[0] = ps['jupiter'].y
z_jup[0] = ps['jupiter'].z
a_jup = np.zeros(Nout)
e_jup = np.zeros(Nout)
i_jup = np.zeros(Nout)
pmjup = np.zeros(Nout)
lmjup = np.zeros(Nout)
a_jup[0] = ps['jupiter'].a
e_jup[0] = ps['jupiter'].e
i_jup[0] = ps['jupiter'].inc
pmjup[0] = ps['jupiter'].pomega
lmjup[0] = ps['jupiter'].l
a_vals = np.zeros((num_tr, Nout))
e_vals = np.zeros((num_tr, Nout))
i_vals = np.zeros((num_tr, Nout))
omvals = np.zeros((num_tr, Nout))
pmvals = np.zeros((num_tr, Nout))
lmvals = np.zeros((num_tr, Nout))
x_vals = np.zeros((num_tr, Nout))
y_vals = np.zeros((num_tr, Nout))
z_vals = np.zeros((num_tr, Nout))
for moon in range(num_tr):
a_vals[moon,0] = ps['tr_{0}'.format(moon)].a
e_vals[moon,0] = ps['tr_{0}'.format(moon)].e
i_vals[moon,0] = ps['tr_{0}'.format(moon)].inc
lmvals[moon,0] = ps['tr_{0}'.format(moon)].l
omvals[moon,0] = ps['tr_{0}'.format(moon)].Omega
pmvals[moon,0] = ps['tr_{0}'.format(moon)].pomega
x_vals[moon,0] = ps['tr_{0}'.format(moon)].x
y_vals[moon,0] = ps['tr_{0}'.format(moon)].y
z_vals[moon,0] = ps['tr_{0}'.format(moon)].z
###########################
###########################
###########################
#### RUNNING ####
###########################
###########################
###########################
for i, time in enumerate(times):
sim.integrate(time)
sim.move_to_com()
x_sol[i] = ps['Sun'].x
y_sol[i] = ps['Sun'].y
z_sol[i] = ps['Sun'].z
x_jup[i] = ps['jupiter'].x
y_jup[i] = ps['jupiter'].y
z_jup[i] = ps['jupiter'].z
a_jup[i] = ps['jupiter'].a
e_jup[i] = ps['jupiter'].e
i_jup[i] = ps['jupiter'].inc
pmjup[i] = ps['jupiter'].pomega
lmjup[i] = ps['jupiter'].l
for moon in range(num_tr):
a_vals[moon,i] = ps['tr_{0}'.format(moon)].a
e_vals[moon,i] = ps['tr_{0}'.format(moon)].e
i_vals[moon,i] = ps['tr_{0}'.format(moon)].inc
lmvals[moon,i] = ps['tr_{0}'.format(moon)].l
omvals[moon,i] = ps['tr_{0}'.format(moon)].Omega
pmvals[moon,i] = ps['tr_{0}'.format(moon)].pomega
x_vals[moon,i] = ps['tr_{0}'.format(moon)].x
y_vals[moon,i] = ps['tr_{0}'.format(moon)].y
z_vals[moon,i] = ps['tr_{0}'.format(moon)].z
##############
## Saving ##
##############
i_vals/= radeg
i_jup /= radeg
troj_data = np.array((a_vals, e_vals, i_vals, omvals, pmvals, lmvals, x_vals, y_vals, z_vals))
plnt_data = np.array((a_jup, e_jup, i_jup, pmjup, lmjup, x_jup, y_jup, z_jup))
star_data = np.array((mass, lsol, x_sol, y_sol, z_sol))
np.save("Ctrl_Trojandata.npy", troj_data)
np.save("Ctrl_Planetdata.npy", plnt_data)
np.save("Ctrl_Stardata.npy", star_data)
np.save("Ctrl_Timesteps.npy", times)
| 23.664835
| 102
| 0.521941
|
#!/usr/bin/env python
# coding: utf-8
# In[8]:
import rebound
import numpy as np
###############
### IMPORTS ###
###############
params = np.load('sample_params.npy')
###################
### DEFINITIONS ###
###################
radeg = np.pi/180
def add_tr(sim, pars):
a = pars[0]
e = pars[1]
c = pars[2]*radeg
p = pars[3]
l = pars[4]
for i in range(len(a)):
sem = a[i]
ecc = e[i]
icl = c[i]
pme = p[i]
lam = l[i]
has = 'tr_{0}'.format(i)
sim.add(m=0, primary=sim.particles['Sun'], a=sem, e=ecc, inc=icl, pomega=pme, l=lam, hash=has)
############################
############################
############################
### SIMULATION ###
############################
############################
############################
t_tot = 2000000
Nout = 100000
times = np.linspace(0,t_tot,Nout)
M0 = 1
num_tr = len(params[0])
sim = rebound.Simulation()
sim.add(m=M0,x=0, y=0, z=0, vx=0, vy=0, vz=0, hash='Sun')
add_tr(sim, params)
sim.add(m=9.543e-4, a=5.2, e=.04839, inc=.022689, Omega=0, omega=0, hash='jupiter')
sim.integrator = 'whfast'
sim.dt = 0.5
sim.move_to_com()
ps = sim.particles
#########################################
## Parameter tracking initialization ##
#########################################
mass = np.zeros(Nout)
x_sol = np.zeros(Nout); y_sol = np.zeros(Nout); z_sol = np.zeros(Nout)
x_sol[0] = ps['Sun'].x
y_sol[0] = ps['Sun'].y
z_sol[0] = ps['Sun'].z
x_jup = np.zeros(Nout); y_jup = np.zeros(Nout); z_jup = np.zeros(Nout)
x_jup[0] = ps['jupiter'].x
y_jup[0] = ps['jupiter'].y
z_jup[0] = ps['jupiter'].z
a_jup = np.zeros(Nout)
e_jup = np.zeros(Nout)
i_jup = np.zeros(Nout)
pmjup = np.zeros(Nout)
lmjup = np.zeros(Nout)
a_jup[0] = ps['jupiter'].a
e_jup[0] = ps['jupiter'].e
i_jup[0] = ps['jupiter'].inc
pmjup[0] = ps['jupiter'].pomega
lmjup[0] = ps['jupiter'].l
a_vals = np.zeros((num_tr, Nout))
e_vals = np.zeros((num_tr, Nout))
i_vals = np.zeros((num_tr, Nout))
omvals = np.zeros((num_tr, Nout))
pmvals = np.zeros((num_tr, Nout))
lmvals = np.zeros((num_tr, Nout))
x_vals = np.zeros((num_tr, Nout))
y_vals = np.zeros((num_tr, Nout))
z_vals = np.zeros((num_tr, Nout))
for moon in range(num_tr):
a_vals[moon,0] = ps['tr_{0}'.format(moon)].a
e_vals[moon,0] = ps['tr_{0}'.format(moon)].e
i_vals[moon,0] = ps['tr_{0}'.format(moon)].inc
lmvals[moon,0] = ps['tr_{0}'.format(moon)].l
omvals[moon,0] = ps['tr_{0}'.format(moon)].Omega
pmvals[moon,0] = ps['tr_{0}'.format(moon)].pomega
x_vals[moon,0] = ps['tr_{0}'.format(moon)].x
y_vals[moon,0] = ps['tr_{0}'.format(moon)].y
z_vals[moon,0] = ps['tr_{0}'.format(moon)].z
###########################
###########################
###########################
#### RUNNING ####
###########################
###########################
###########################
for i, time in enumerate(times):
sim.integrate(time)
sim.move_to_com()
x_sol[i] = ps['Sun'].x
y_sol[i] = ps['Sun'].y
z_sol[i] = ps['Sun'].z
x_jup[i] = ps['jupiter'].x
y_jup[i] = ps['jupiter'].y
z_jup[i] = ps['jupiter'].z
a_jup[i] = ps['jupiter'].a
e_jup[i] = ps['jupiter'].e
i_jup[i] = ps['jupiter'].inc
pmjup[i] = ps['jupiter'].pomega
lmjup[i] = ps['jupiter'].l
for moon in range(num_tr):
a_vals[moon,i] = ps['tr_{0}'.format(moon)].a
e_vals[moon,i] = ps['tr_{0}'.format(moon)].e
i_vals[moon,i] = ps['tr_{0}'.format(moon)].inc
lmvals[moon,i] = ps['tr_{0}'.format(moon)].l
omvals[moon,i] = ps['tr_{0}'.format(moon)].Omega
pmvals[moon,i] = ps['tr_{0}'.format(moon)].pomega
x_vals[moon,i] = ps['tr_{0}'.format(moon)].x
y_vals[moon,i] = ps['tr_{0}'.format(moon)].y
z_vals[moon,i] = ps['tr_{0}'.format(moon)].z
##############
## Saving ##
##############
i_vals/= radeg
i_jup /= radeg
troj_data = np.array((a_vals, e_vals, i_vals, omvals, pmvals, lmvals, x_vals, y_vals, z_vals))
plnt_data = np.array((a_jup, e_jup, i_jup, pmjup, lmjup, x_jup, y_jup, z_jup))
star_data = np.array((mass, lsol, x_sol, y_sol, z_sol))
np.save("Ctrl_Trojandata.npy", troj_data)
np.save("Ctrl_Planetdata.npy", plnt_data)
np.save("Ctrl_Stardata.npy", star_data)
np.save("Ctrl_Timesteps.npy", times)
| 348
| 0
| 23
|
db093beb53a7b6ad82a8e1539915f07c674c857e
| 22,140
|
py
|
Python
|
filter_unlabel.py
|
IMAGR-LTD/noisystudent
|
c5ae6524c6ba37d3cd133107c21250c4b8fb7a28
|
[
"Apache-2.0"
] | 683
|
2020-02-14T20:50:53.000Z
|
2022-03-30T06:02:54.000Z
|
filter_unlabel.py
|
IMAGR-LTD/noisystudent
|
c5ae6524c6ba37d3cd133107c21250c4b8fb7a28
|
[
"Apache-2.0"
] | 11
|
2020-06-22T09:03:33.000Z
|
2021-11-02T16:15:00.000Z
|
filter_unlabel.py
|
IMAGR-LTD/noisystudent
|
c5ae6524c6ba37d3cd133107c21250c4b8fb7a28
|
[
"Apache-2.0"
] | 102
|
2020-02-17T10:30:41.000Z
|
2022-03-25T05:38:51.000Z
|
# coding=utf-8
# Copyright 2019 The Google NoisyStudent Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import collections
import json
import copy
import os
import time
import numpy as np
import tensorflow as tf
import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('input_dir', '', '')
flags.DEFINE_string('prediction_dir', '', '')
flags.DEFINE_string('info_dir', '', '')
flags.DEFINE_string('prelim_stats_dir', '', '')
flags.DEFINE_string('output_dir', '', '')
flags.DEFINE_integer(
'num_shards', default=128, help='')
flags.DEFINE_integer(
'only_use_num_shards', default=-1, help='')
flags.DEFINE_integer(
'shard_id', default=0, help='')
flags.DEFINE_integer(
'num_image', default=1300, help='')
flags.DEFINE_integer(
'total_replicas', default=1, help='')
flags.DEFINE_integer(
'total_label_replicas', default=-1, help='')
flags.DEFINE_integer(
'task', default=-1, help='')
flags.DEFINE_integer(
'debug', default=0, help='')
flags.DEFINE_float(
'min_threshold', default=0.0, help='')
flags.DEFINE_float(
'max_prob', default=2, help='sometimes the probability can be greater than 1 due to floating point.')
flags.DEFINE_integer(
'num_label_classes', default=1000, help='')
flags.DEFINE_integer(
'upsample', default=1, help='')
flags.DEFINE_integer(
'only_get_stats', default=0, help='')
flags.DEFINE_string('file_prefix', 'train', '')
flags.DEFINE_string(
'data_type', default='tfrecord', help='')
flags.DEFINE_integer(
'use_top', default=1, help='')
flags.DEFINE_bool(
'eval_imagenet_p', default=False, help='')
flags.DEFINE_bool(
'use_all', default=False, help='')
if __name__ == '__main__':
app.run(main)
| 34.866142
| 145
| 0.652304
|
# coding=utf-8
# Copyright 2019 The Google NoisyStudent Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import collections
import json
import copy
import os
import time
import numpy as np
import tensorflow as tf
import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('input_dir', '', '')
flags.DEFINE_string('prediction_dir', '', '')
flags.DEFINE_string('info_dir', '', '')
flags.DEFINE_string('prelim_stats_dir', '', '')
flags.DEFINE_string('output_dir', '', '')
flags.DEFINE_integer(
'num_shards', default=128, help='')
flags.DEFINE_integer(
'only_use_num_shards', default=-1, help='')
flags.DEFINE_integer(
'shard_id', default=0, help='')
flags.DEFINE_integer(
'num_image', default=1300, help='')
flags.DEFINE_integer(
'total_replicas', default=1, help='')
flags.DEFINE_integer(
'total_label_replicas', default=-1, help='')
flags.DEFINE_integer(
'task', default=-1, help='')
flags.DEFINE_integer(
'debug', default=0, help='')
flags.DEFINE_float(
'min_threshold', default=0.0, help='')
flags.DEFINE_float(
'max_prob', default=2, help='sometimes the probability can be greater than 1 due to floating point.')
flags.DEFINE_integer(
'num_label_classes', default=1000, help='')
flags.DEFINE_integer(
'upsample', default=1, help='')
flags.DEFINE_integer(
'only_get_stats', default=0, help='')
flags.DEFINE_string('file_prefix', 'train', '')
flags.DEFINE_string(
'data_type', default='tfrecord', help='')
flags.DEFINE_integer(
'use_top', default=1, help='')
flags.DEFINE_bool(
'eval_imagenet_p', default=False, help='')
flags.DEFINE_bool(
'use_all', default=False, help='')
def preprocess_jft(features):
encoded_image = features['image/encoded']
image = utils.decode_raw_image(encoded_image)
encoded_image = tf.image.encode_jpeg(
image,
format='rgb', quality=100)
features['image/encoded'] = encoded_image
return features
def input_dataset(worker_id):
filename = utils.get_filename(FLAGS.input_dir, FLAGS.file_prefix,
FLAGS.shard_id, FLAGS.num_shards)
dst = utils.get_dst_from_filename(filename, FLAGS.data_type,
FLAGS.total_label_replicas, worker_id)
dst = dst.apply(
tf.data.experimental.map_and_batch(
preprocess_jft if FLAGS.data_type == 'sstable' else lambda x: x, batch_size=1,
num_parallel_batches=16, drop_remainder=False))
dst = dst.prefetch(tf.data.experimental.AUTOTUNE)
return dst
def get_worker_id_list():
if FLAGS.debug == 1:
worker_id_list = [0]
else:
if FLAGS.task != -1:
num_label_replica_per_worker = FLAGS.total_label_replicas // FLAGS.total_replicas
worker_id_list = list(range(
FLAGS.task * num_label_replica_per_worker,
(FLAGS.task + 1) * num_label_replica_per_worker))
tf.logging.info('worker_id_list {:s}'.format(str(worker_id_list)))
else:
worker_id_list = list(range(FLAGS.total_label_replicas))
return worker_id_list
def get_label_to_image_idx():
tf.logging.info('\n\ngetting label to image idx')
label_to_image_idx = {}
num_image_for_worker = {}
for worker_id in get_worker_id_list():
with tf.gfile.Open(
os.path.join(
FLAGS.info_dir,
'info-%.5d-of-%.5d-%.5d.txt' % (
FLAGS.shard_id, FLAGS.num_shards, worker_id
))) as inf:
info = json.load(inf)
image_num = info['image_num']
num_image_for_worker[worker_id] = image_num
if image_num == 0:
continue
label_dst = utils.label_dataset(
worker_id,
FLAGS.prediction_dir, FLAGS.shard_id, FLAGS.num_shards)
iter = label_dst.make_initializable_iterator()
elem = iter.get_next()
cnt = 0
with tf.Session() as sess:
sess.run(iter.initializer)
for j in range(image_num):
features = sess.run(elem)
label_arr = features['classes']
prob_arr = features['probabilities']
for i in range(label_arr.shape[0]):
label = label_arr[i]
prob = prob_arr[i][label]
if label not in label_to_image_idx:
label_to_image_idx[label] = []
label_to_image_idx[label] += [{
'worker_id': worker_id,
'idx': cnt,
'prob': prob,
'probabilities': prob_arr[i].tolist(),
}]
cnt += 1
assert cnt == image_num
return label_to_image_idx, num_image_for_worker
def get_keep_image_idx(label_to_image_idx, selected_threshold, uid_list):
tf.logging.info('\n\ngetting keep image idx')
stats_dir = os.path.join(
FLAGS.output_dir,
'stats')
tf.gfile.MakeDirs(stats_dir)
keep_idx = {}
for i in label_to_image_idx:
label_to_image_idx[i] = sorted(label_to_image_idx[i],
key=lambda x: -x['prob'])
k = 0
uid = uid_list[i]
while k < len(label_to_image_idx[i]):
if (label_to_image_idx[i][k]['prob'] >= selected_threshold[uid][0]
and label_to_image_idx[i][k]['prob'] <= FLAGS.max_prob):
if FLAGS.use_all:
include_copy = 1
else:
include_copy = FLAGS.num_image / selected_threshold[uid][1]
if not FLAGS.upsample:
include_copy = min(include_copy, 1)
prob = include_copy - int(include_copy)
include_copy = int(include_copy) + int(np.random.random() < prob)
if include_copy:
info = label_to_image_idx[i][k]
worker_id = info['worker_id']
print('include_copy', include_copy, FLAGS.num_image, selected_threshold[uid][1], '\n\n\n')
if worker_id not in keep_idx:
keep_idx[worker_id] = {}
keep_idx[worker_id][info['idx']] = [i, info['prob'], include_copy, info['probabilities']]
k += 1
counts = collections.defaultdict(int)
total_keep_example = 0
for worker_id in keep_idx:
for label, prob, include_copy, _ in keep_idx[worker_id].values():
counts[uid_list[label]] += include_copy
total_keep_example += 1
tf.logging.info('counts: {:s}'.format(json.dumps(counts, indent=4)))
return keep_idx, total_keep_example, counts
def filter_image_by_idx(
keep_idx,
uid_list,
total_keep_example,
num_image_for_worker):
sample_prob = 30000. / (FLAGS.num_image * 1000)
image_list = []
np.random.seed(12345)
def get_image_list(features):
dump_features = {}
prob = keep_idx[worker_id][cnt][1]
label = keep_idx[worker_id][cnt][0]
include_copy = keep_idx[worker_id][cnt][2]
image_bytes = features['image/encoded'][0]
dump_features['image/encoded'] = utils.bytes_feature(image_bytes)
dump_features['prob'] = utils.float_feature(prob)
dump_features['probabilities'] = utils.float_feature(keep_idx[worker_id][cnt][3])
dump_features['label'] = utils.int64_feature(label)
example = tf.train.Example(features=tf.train.Features(feature=dump_features))
cur_image_list = []
for j in range(include_copy):
image_info = {
'example': example,
'label': label,
'prob': prob,
'image_bytes': image_bytes,
'cnt': cnt,
}
cur_image_list += [image_info]
return cur_image_list
def flush(sess):
tf.logging.info('saving images')
np.random.shuffle(image_list)
for image_info in image_list:
image_bytes = image_info['image_bytes']
prob = image_info['prob']
label = image_info['label']
example = image_info['example']
cnt = image_info['cnt']
record_writer.write(example.SerializeToString())
if np.random.random() < sample_prob:
uid = uid_list[label]
filename = os.path.join(
sample_dir, uid, 'image_{:d}_{:d}_{:d}_{:.2f}.jpeg'.format(
FLAGS.shard_id, FLAGS.task, cnt, prob))
tf.logging.info('saving {:s}'.format(filename))
image = sess.run(decoded_image,
feed_dict={image_bytes_placeholder: image_bytes}
)
utils.save_pic(image, filename)
tf.logging.info(
'{:d}/{:d} images saved, elapsed time: {:.2f} h'.format(
num_picked_images, total_keep_example,
(time.time() - start_time) / 3600))
tf.logging.info('\n\nfilter image by index')
num_picked_images = 0
sample_dir = os.path.join(FLAGS.output_dir, 'samples')
data_dir = os.path.join(FLAGS.output_dir, 'data')
for uid in uid_list:
tf.gfile.MakeDirs(os.path.join(sample_dir, uid))
tf.gfile.MakeDirs(data_dir)
image_bytes_placeholder = tf.placeholder(dtype=tf.string)
decoded_image = utils.decode_raw_image(image_bytes_placeholder)
total_cnt = 0
start_time = time.time()
image_list = []
if len(keep_idx) == 0:
return
record_writer = tf.python_io.TFRecordWriter(
os.path.join(data_dir, 'train-%d-%.5d-of-%.5d' % (
FLAGS.task, FLAGS.shard_id, FLAGS.num_shards)))
for worker_id in get_worker_id_list():
tf.logging.info('worker_id: {:d}, elapsed time: {:.2f} h'.format(
worker_id, (time.time() - start_time) / 3600.))
dst = input_dataset(worker_id)
iter = dst.make_initializable_iterator()
elem = iter.get_next()
cnt = 0
hit_samples = {}
with tf.Session() as sess:
sess.run(iter.initializer)
for i in range(num_image_for_worker[worker_id]):
features = sess.run(elem)
key = 'image/encoded'
# encoded_image_arr = features['image/encoded']
# assert encoded_image_arr.shape[0] == 1
# for j in range(encoded_image_arr.shape[0]):
for j in range(features[key].shape[0]):
if worker_id in keep_idx and cnt in keep_idx[worker_id]:
num_picked_images += 1
# image_list += get_image_list(encoded_image_arr[j])
image_list += get_image_list(features)
hit_samples[cnt] = 1
if total_cnt % 1000 == 0:
elapsed_time = (time.time() - start_time) / 3600
total_image = num_image_for_worker[worker_id]
tf.logging.info(
'scanning idx {:d} of {:d} images, {:d}/{:d} images saved, elapsed time: {:.2f} h, remaining time {:.2f} h'.format(
total_cnt, total_image,
num_picked_images, total_keep_example,
elapsed_time,
elapsed_time / (total_cnt + 1) * (total_image - total_cnt)
)
)
cnt += 1
total_cnt += 1
if len(image_list) >= 10000:
flush(sess)
image_list = []
try:
sess.run(elem)
assert False, "count isn't right"
except tf.errors.OutOfRangeError:
tf.logging.info('count is right')
assert cnt == num_image_for_worker[worker_id], (cnt, num_image_for_worker[worker_id])
for idx in keep_idx[worker_id]:
if idx not in hit_samples:
tf.logging.info('\n\nnot hit, %d %d', worker_id, idx)
assert num_picked_images == total_keep_example
if len(image_list):
with tf.Session() as sess:
flush(sess)
image_list = []
record_writer.close()
def is_master_job():
return FLAGS.shard_id == 0 and (FLAGS.task == -1 or FLAGS.task == 0)
def get_total_counts(uid_list, prelim_stats_dir, prob_threshold):
if FLAGS.only_use_num_shards != -1:
num_shards = FLAGS.only_use_num_shards
else:
num_shards = FLAGS.num_shards
to_read_filenames = []
for i in range(num_shards):
for j in range(FLAGS.total_replicas):
if FLAGS.debug == 1 and (i != FLAGS.shard_id or j != FLAGS.task):
continue
prelim_stats_filename = os.path.join(
prelim_stats_dir,
'prelim_stats_%.5d_%d.json' % (i, j))
to_read_filenames += [prelim_stats_filename]
total_counts = {}
total_counts_sum = {}
for uid in uid_list:
total_counts[uid] = []
total_counts_sum[uid] = []
for threshold in prob_threshold:
total_counts[uid] += [[threshold, 0]]
total_counts_sum[uid] += [[threshold, 0]]
tf.logging.info('reading prelim stats')
while len(to_read_filenames):
new_to_read_filenames = []
for filename in to_read_filenames:
completed, counts = load_json(filename)
if completed:
for uid in counts:
for k in range(len(prob_threshold)):
total_counts[uid][k][1] += counts[uid][k][1]
tf.logging.info('finished reading prelim stats for {:s}'.format(filename))
else:
new_to_read_filenames += [filename]
tf.logging.info('not ready: {:s}'.format(filename))
to_read_filenames = new_to_read_filenames
return total_counts, total_counts_sum
def get_threshold(label_to_image_idx, uid_list, prob_threshold):
tf.logging.info('\n\ngetting threshold')
threshold_stats = {}
prelim_stats_dir = FLAGS.prelim_stats_dir
prelim_stats_filename = os.path.join(prelim_stats_dir, 'prelim_stats_%.5d_%d.json' % (FLAGS.shard_id, FLAGS.task))
if not load_json(prelim_stats_filename)[0]:
tf.gfile.MakeDirs(prelim_stats_dir)
for i in label_to_image_idx:
label_to_image_idx[i] = sorted(label_to_image_idx[i],
key=lambda x: -x['prob'])
num_samples = []
n = len(label_to_image_idx[i])
start_idx = 0
cur_sample_idx = 0
for j in reversed(range(len(prob_threshold))):
while cur_sample_idx < n and label_to_image_idx[i][cur_sample_idx]['prob'] >= prob_threshold[j]:
cur_sample_idx += 1
num_samples += [(prob_threshold[j], cur_sample_idx - start_idx)]
start_idx = cur_sample_idx
threshold_stats[uid_list[i]] = copy.deepcopy(list(reversed(num_samples)))
with tf.gfile.Open(
prelim_stats_filename, 'w') as ouf:
json.dump(threshold_stats, ouf)
tf.logging.info('threshold_stats: {:s}'.format(json.dumps(threshold_stats, indent=4)))
if is_master_job():
total_counts_file = os.path.join(prelim_stats_dir, 'total_counts.json')
if not tf.gfile.Exists(total_counts_file):
total_counts, total_counts_sum = get_total_counts(
uid_list, prelim_stats_dir, prob_threshold)
for uid in uid_list:
for i in range(len(prob_threshold) - 1, -1, -1):
if i < len(prob_threshold) - 1:
total_counts_sum[uid][i][1] = total_counts_sum[uid][i + 1][1] + total_counts[uid][i][1]
else:
total_counts_sum[uid][i][1] = total_counts[uid][i][1]
total_counts_sum_file = os.path.join(prelim_stats_dir, 'total_counts_sum.json')
with tf.gfile.Open(total_counts_sum_file, 'w') as ouf:
json.dump(total_counts_sum, ouf)
with tf.gfile.Open(total_counts_file, 'w') as ouf:
json.dump(total_counts, ouf)
else:
with tf.gfile.Open(total_counts_file) as inf:
total_counts = json.load(inf)
tf.gfile.MakeDirs(FLAGS.output_dir)
threshold_file = os.path.join(FLAGS.output_dir, 'threshold.json')
if not tf.gfile.Exists(threshold_file):
selected_threshold = {}
num_image_across_cat = 0
for uid in uid_list:
threshold_idx = -1
total_image = 0
for i in range(len(prob_threshold) - 1, -1, -1):
if prob_threshold[i] < FLAGS.max_prob and prob_threshold[i] >= FLAGS.min_threshold:
total_image += total_counts[uid][i][1]
if not FLAGS.use_all:
if FLAGS.use_top and total_image >= FLAGS.num_image:
threshold_idx = i
break
if prob_threshold[i] == FLAGS.min_threshold:
threshold_idx = i
break
assert threshold_idx != -1
if not FLAGS.use_all:
if total_image < FLAGS.num_image:
assert prob_threshold[threshold_idx] == FLAGS.min_threshold
tf.logging.info(
'warning: too few images, {:s} only has {:d} images while we expect {:d} images, upsampling, threshold {:.3f}'.format(
uid, total_image, FLAGS.num_image, prob_threshold[threshold_idx]))
else:
tf.logging.info('warning: too many images, {:s} has {:d} images while we expect {:d} images, down sampling, threshold {:.3f}'.format(
uid, total_image, FLAGS.num_image, prob_threshold[threshold_idx]))
selected_threshold[uid] = (
prob_threshold[threshold_idx],
total_image)
num_image_across_cat += min(total_image, FLAGS.num_image)
with tf.gfile.Open(threshold_file, 'w') as ouf:
json.dump(selected_threshold, ouf)
image_across_cat_filename = os.path.join(FLAGS.output_dir, 'num_image_across_cat.json')
with tf.gfile.Open(image_across_cat_filename, 'w') as ouf:
json.dump({'num_image_acorss_cat': num_image_across_cat}, ouf)
else:
with tf.gfile.Open(threshold_file) as inf:
selected_threshold = json.load(inf)
else:
if FLAGS.only_get_stats:
return None
threshold_file = os.path.join(FLAGS.output_dir, 'threshold.json')
while not tf.gfile.Exists(threshold_file):
tf.logging.info('waiting for the threshold file')
time.sleep(300) # sleep 5 min
selected_threshold = None
while True:
try:
with tf.gfile.Open(threshold_file) as inf:
selected_threshold = json.load(inf)
break
except:
pass
return selected_threshold
def load_json(filename):
if tf.gfile.Exists(filename):
counts = None
try:
with tf.gfile.Open(filename) as inf:
counts = json.load(inf)
return (True, counts)
except:
tf.logging.info('having error loading {:s}, not exist'.format(
filename))
return (False, None)
def read_stats():
total_counts = collections.defaultdict(int)
filename_list = []
stats_dir = os.path.join(FLAGS.output_dir, 'stats')
if FLAGS.only_use_num_shards != -1:
num_shards = FLAGS.only_use_num_shards
else:
num_shards = FLAGS.num_shards
for i in range(num_shards):
for j in range(FLAGS.total_replicas):
filename = os.path.join(stats_dir, 'stats_%.5d_%d.json' % (i, j))
filename_list += [filename]
if FLAGS.debug == 1:
filename_list = [os.path.join(
stats_dir, 'stats_%.5d_%d.json' % (FLAGS.shard_id, FLAGS.task))]
while len(filename_list):
new_filename_list = []
for filename in filename_list:
if load_json(filename)[0]:
counts = None
while True:
try:
with tf.gfile.Open(filename) as inf:
counts = json.load(inf)
break
except:
tf.logging.info('having error loading {:s}, retrying'.format(
filename))
pass
for uid in counts:
total_counts[uid] += counts[uid]
else:
new_filename_list += [filename]
filename_list = new_filename_list
tf.logging.info('waiting for: {:s}'.format(' '.join(filename_list)))
count_pairs = total_counts.items()
count_pairs = sorted(count_pairs, key=lambda x: -x[1])
num_images_all_label = 0
for key, value in count_pairs:
num_images_all_label += value
final_stats = {
'cat_count': total_counts,
'cat_sorted_pairs': count_pairs,
'total_cnt': num_images_all_label
}
with tf.gfile.Open(
os.path.join(FLAGS.output_dir, 'stats', 'final_stats.json'), 'w') as ouf:
json.dump(final_stats, ouf)
tf.logging.info(json.dumps(final_stats, indent=4))
def get_label_replicas():
# infer number of replicas from data
FLAGS.total_label_replicas = 1
while True:
filename = os.path.join(
FLAGS.prediction_dir,
'train-info-%.5d-of-%.5d-%.5d' % (
0, FLAGS.num_shards, FLAGS.total_label_replicas - 1))
if tf.gfile.Exists(filename):
FLAGS.total_label_replicas *= 2
else:
break
FLAGS.total_label_replicas = FLAGS.total_label_replicas // 2
tf.logging.info('total_label_replicas {:d}'.format(FLAGS.total_label_replicas))
assert FLAGS.total_label_replicas > 0
def main(argv):
stats_dir = os.path.join(FLAGS.output_dir, 'stats')
stats_filename = os.path.join(stats_dir, 'stats_%.5d_%d.json' % (FLAGS.shard_id, FLAGS.task))
if load_json(stats_filename)[0]:
if is_master_job():
read_stats()
tf.logging.info('stats already finished, returning')
return
prelim_stats_filename = os.path.join(
FLAGS.prelim_stats_dir,
'prelim_stats_%.5d_%d.json' % (FLAGS.shard_id, FLAGS.task))
completed, _ = load_json(prelim_stats_filename)
if FLAGS.only_get_stats and completed and not is_master_job():
return
get_label_replicas()
assert FLAGS.total_label_replicas == FLAGS.total_replicas
# must be sorted
prob_threshold = []
# 0 to 0.99
for i in range(0, 101):
prob = i / 100.
prob_threshold += [prob]
uid_list = utils.get_uid_list()
print(len(uid_list))
print("\n" * 10)
label_to_image_idx, num_image_for_worker = get_label_to_image_idx()
selected_threshold = get_threshold(
label_to_image_idx, uid_list, prob_threshold)
if FLAGS.only_get_stats:
return
keep_idx, total_keep_example, counts = get_keep_image_idx(
label_to_image_idx, selected_threshold, uid_list)
filter_image_by_idx(keep_idx, uid_list, total_keep_example, num_image_for_worker)
with tf.gfile.Open(
os.path.join(stats_dir, 'stats_%.5d_%d.json' % (FLAGS.shard_id, FLAGS.task)),
'w') as ouf:
json.dump(counts, ouf)
if is_master_job():
read_stats()
if __name__ == '__main__':
app.run(main)
| 19,455
| 0
| 299
|
c7a57ea8bd5bf8926a8b9423d40481685fc17c8b
| 193
|
py
|
Python
|
PythonExercicios/ex060-calculo-do-fatorial-usando-for.py
|
ArthurAlbuquerquee/exercicios-python-cev
|
ba64d3a025731aae5e238c7cb469917420b7901e
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex060-calculo-do-fatorial-usando-for.py
|
ArthurAlbuquerquee/exercicios-python-cev
|
ba64d3a025731aae5e238c7cb469917420b7901e
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex060-calculo-do-fatorial-usando-for.py
|
ArthurAlbuquerquee/exercicios-python-cev
|
ba64d3a025731aae5e238c7cb469917420b7901e
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um número para calcular o fatorial: '))
f = 1
for c in range(n, 0, -1):
print(f'{c}', end='')
print(' x ' if c > 1 else ' = ', end='')
f = f * c
print(f'{f}')
| 24.125
| 61
| 0.502591
|
n = int(input('Digite um número para calcular o fatorial: '))
f = 1
for c in range(n, 0, -1):
print(f'{c}', end='')
print(' x ' if c > 1 else ' = ', end='')
f = f * c
print(f'{f}')
| 0
| 0
| 0
|
d462a20b6ef7379120157d79baf14c2fc027f6cd
| 3,966
|
py
|
Python
|
tests/test_preprocessing.py
|
MannLabs/alphaviz
|
5911f351cf5cd21e2a3fa2a455d1eeb0c43af9cc
|
[
"Apache-2.0"
] | 6
|
2022-01-27T12:47:31.000Z
|
2022-03-10T11:18:03.000Z
|
tests/test_preprocessing.py
|
MannLabs/alphaviz
|
5911f351cf5cd21e2a3fa2a455d1eeb0c43af9cc
|
[
"Apache-2.0"
] | 7
|
2022-01-27T14:34:15.000Z
|
2022-03-15T12:16:58.000Z
|
tests/test_preprocessing.py
|
MannLabs/alphaviz
|
5911f351cf5cd21e2a3fa2a455d1eeb0c43af9cc
|
[
"Apache-2.0"
] | null | null | null |
#!python
"""
This module provides pytest tests for the functions from preprocessing.py file
"""
import pytest
import alphaviz.preprocessing as preproc
# def test_preprocess_ckg_output():
# ckg_output_string_correct = "~Q92934;~Q15149"
# ckg_output_string_correct_2 = " ~Q92934; ~Q15149"
# ckg_output_string_wrong = "Q92934; Q15149"
#
# proteins_correct_input = alphaviz.preprocessing.preprocess_ckg_output(ckg_output_string_correct)
# assert len(proteins_correct_input) == 2, \
# "The number of extracted proteins is wrong."
# assert 'Q92934' in proteins_correct_input, \
# "A unique protein is absent in the extracted list of proteins."
#
# assert proteins_correct_input == alphaviz.preprocessing.preprocess_ckg_output(ckg_output_string_correct_2), \
# "Spaces in the ckg string don't influence on the result of the output."
#
# with pytest.raises(ValueError):
# alphaviz.preprocessing.preprocess_ckg_output(ckg_output_string_wrong)
| 47.783133
| 131
| 0.736258
|
#!python
"""
This module provides pytest tests for the functions from preprocessing.py file
"""
import pytest
import alphaviz.preprocessing as preproc
def test_get_mq_unique_proteins():
mq_analysis_file = "../test_data/evidence.txt"
proteins = preproc.get_mq_unique_proteins(mq_analysis_file)
assert len(proteins) == 6, \
"The number of extracted proteins is wrong."
assert 'Plectin' in proteins, \
"A unique protein is absent in the extracted list of proteins."
# def test_preprocess_ckg_output():
# ckg_output_string_correct = "~Q92934;~Q15149"
# ckg_output_string_correct_2 = " ~Q92934; ~Q15149"
# ckg_output_string_wrong = "Q92934; Q15149"
#
# proteins_correct_input = alphaviz.preprocessing.preprocess_ckg_output(ckg_output_string_correct)
# assert len(proteins_correct_input) == 2, \
# "The number of extracted proteins is wrong."
# assert 'Q92934' in proteins_correct_input, \
# "A unique protein is absent in the extracted list of proteins."
#
# assert proteins_correct_input == alphaviz.preprocessing.preprocess_ckg_output(ckg_output_string_correct_2), \
# "Spaces in the ckg string don't influence on the result of the output."
#
# with pytest.raises(ValueError):
# alphaviz.preprocessing.preprocess_ckg_output(ckg_output_string_wrong)
def test_get_identified_ions():
peptide = 'NTINHN'
all_ions = ['', 'b2-H2O', '', '', 'b2', 'b3', '', 'b5-NH3', '', '', '', '', 'y1-NH3', 'y1', 'y4-H2O', 'y4']
assert [False,True,True,False,True,False] == preproc.get_identified_ions(all_ions, peptide, 'b'), \
"The extraction of identified b-ions doesn't work"
assert [False,False,True,False,False,True] == preproc.get_identified_ions(all_ions, peptide, 'y'), \
"The extraction of identified y-ions doesn't work"
def test_convert_diann_mq_mod():
seq1 = 'VSHGSSPSLLEALSSDFLAC(UniMod:4)K'
assert 'VSHGSSPSLLEALSSDFLAC[Carbamidomethyl (C)]K' == preproc.convert_diann_mq_mod(seq1)
seq2 = 'VSVINTVDTSHEDMIHDAQM(UniMod:35)DYYGTR'
assert 'VSVINTVDTSHEDMIHDAQM[Oxidation (M)]DYYGTR' == preproc.convert_diann_mq_mod(seq2)
seq3 = 'HAEMPVHTGLK(UniMod:2)'
assert 'HAEMPVHTGLK[Amidated (C-term)]' == preproc.convert_diann_mq_mod(seq3)
seq4 = 'HAEMPVHTGLKS(UniMod:23)A'
assert 'HAEMPVHTGLKS[Dehydrated (ST)]A' == preproc.convert_diann_mq_mod(seq4)
seq5 = 'HAEMPVHTGLKY(UniMod:23)A'
assert 'HAEMPVHTGLKY[Dehydrated (Y)]A' == preproc.convert_diann_mq_mod(seq5)
seq1_several_dif_mods = '(UniMod:1)VSHGSSPSLLEALSSDFLAC(UniMod:4)K'
assert '[Acetyl (N-term)]VSHGSSPSLLEALSSDFLAC[Carbamidomethyl (C)]K' == preproc.convert_diann_mq_mod(seq1_several_dif_mods)
seq2_several_same_mods = 'CAALVATAEENLC(UniMod:4)C(UniMod:4)EELSSK'
assert 'CAALVATAEENLC[Carbamidomethyl (C)]C[Carbamidomethyl (C)]EELSSK' == preproc.convert_diann_mq_mod(seq2_several_same_mods)
seq_no_mod = 'CVNTTLQIK'
assert "CVNTTLQIK" == preproc.convert_diann_mq_mod(seq_no_mod)
def test_convert_diann_ap_mod():
seq1 = '(UniMod:27)ELNMIIMLPDETTDLR'
assert preproc.convert_diann_ap_mod(seq1) == 'pgELNMIIMLPDETTDLR', "The convertion of the N-term modifications doesn't work."
seq2 = 'ADFSGM(UniMod:35)SQTDLSLSK'
assert preproc.convert_diann_ap_mod(seq2) == 'ADFSGoxMSQTDLSLSK', "The convertion of the modifications doesn't work."
seq3 = 'YYYDGDMIC(UniMod:4)'
assert preproc.convert_diann_ap_mod(seq3) == 'YYYDGDMIcC', \
"The convertion of the C-term modifications doesn't work."
seq1_several_dif_mods = '(UniMod:1)VSHGSSPSLLEALSSDFLAC(UniMod:4)K'
assert 'aVSHGSSPSLLEALSSDFLAcCK' == preproc.convert_diann_ap_mod(seq1_several_dif_mods)
seq2_several_same_mods = 'CAALVATAEENLC(UniMod:4)C(UniMod:4)EELSSK'
assert 'CAALVATAEENLcCcCEELSSK' == preproc.convert_diann_ap_mod(seq2_several_same_mods)
seq_no_mod = 'CVNTTLQIK'
assert "CVNTTLQIK" == preproc.convert_diann_ap_mod(seq_no_mod)
| 2,870
| 0
| 92
|
4af483d9d5f2574d4667927db0ef0fd8d25f54f0
| 11,197
|
py
|
Python
|
smpp/pdu_builder.py
|
komuW/smpp_server
|
10ef5c2ebc09e2ef88bdd62c55a4280a187d1eb2
|
[
"BSD-3-Clause"
] | 1
|
2016-05-18T16:57:20.000Z
|
2016-05-18T16:57:20.000Z
|
smpp/pdu_builder.py
|
komuW/smpp_server
|
10ef5c2ebc09e2ef88bdd62c55a4280a187d1eb2
|
[
"BSD-3-Clause"
] | null | null | null |
smpp/pdu_builder.py
|
komuW/smpp_server
|
10ef5c2ebc09e2ef88bdd62c55a4280a187d1eb2
|
[
"BSD-3-Clause"
] | null | null | null |
from . import *
# bind = BindTransmitter(system_id='test_id', password='abc123')
# print bind.get_obj()
# print bind.get_hex()
# print bind.get_bin()
# #print json.dumps(bind.get_obj(), indent=4, sort_keys=True)
# #print json.dumps(decode_pdu(bind.get_hex()), indent=4, sort_keys=True)
# print json.dumps(unpack_pdu(bind.get_bin()), indent=4, sort_keys=True)
# sm = SubmitSM(short_message='testing testing')
# print json.dumps(unpack_pdu(sm.get_bin()), indent=4, sort_keys=True)
# sm.add_message_payload('616263646566676869')
# print json.dumps(unpack_pdu(sm.get_bin()), indent=4, sort_keys=True)
| 45.149194
| 117
| 0.676163
|
from . import *
class PDU(object):
def __init__(self, command_id, command_status, sequence_number, **kwargs):
super(PDU, self).__init__()
self.obj = {}
self.obj['header'] = {}
self.obj['header']['command_length'] = 0
self.obj['header']['command_id'] = command_id
self.obj['header']['command_status'] = command_status
self.obj['header']['sequence_number'] = sequence_number
def add_optional_parameter(self, tag, value):
if self.obj.get('body') is None:
self.obj['body'] = {}
if self.obj['body'].get('optional_parameters') is None:
self.obj['body']['optional_parameters'] = []
self.obj['body']['optional_parameters'].append({
'tag': tag,
'length': 0,
'value': value,
})
def __add_optional_parameter(self, tag, value):
# This is deprecated, but not everything has been updated yet.
return self.add_optional_parameter(tag, value)
def set_sar_msg_ref_num(self, value):
self.add_optional_parameter('sar_msg_ref_num', value)
def set_sar_segment_seqnum(self, value):
self.add_optional_parameter('sar_segment_seqnum', value)
def set_sar_total_segments(self, value):
self.add_optional_parameter('sar_total_segments', value)
def get_obj(self):
return self.obj
def get_hex(self):
return encode_pdu(self.obj)
def get_bin(self):
return pack_pdu(self.obj)
class Bind(PDU):
def __init__(self, command_id, sequence_number, system_id='', password='', system_type='',
interface_version='34', addr_ton=0, addr_npi=0, address_range='', **kwargs):
super(Bind, self).__init__(command_id, 'ESME_ROK', sequence_number)
self.obj['body'] = {}
self.obj['body']['mandatory_parameters'] = {}
self.obj['body']['mandatory_parameters']['system_id'] = system_id
self.obj['body']['mandatory_parameters']['password'] = password
self.obj['body']['mandatory_parameters']['system_type'] = system_type
self.obj['body']['mandatory_parameters']['interface_version'] = interface_version
self.obj['body']['mandatory_parameters']['addr_ton'] = addr_ton
self.obj['body']['mandatory_parameters']['addr_npi'] = addr_npi
self.obj['body']['mandatory_parameters']['address_range'] = address_range
class BindTransmitter(Bind):
def __init__(self, sequence_number, **kwargs):
super(BindTransmitter, self).__init__('bind_transmitter', sequence_number, **kwargs)
class BindReceiver(Bind):
def __init__(self, sequence_number, **kwargs):
super(BindReceiver, self).__init__('bind_receiver', sequence_number, **kwargs)
class BindTransceiver(Bind):
def __init__(self, sequence_number, **kwargs):
super(BindTransceiver, self).__init__('bind_transceiver', sequence_number, **kwargs)
class BindResp(PDU):
def __init__(self, command_id, command_status, sequence_number, system_id='', **kwargs):
super(BindResp, self).__init__(command_id, command_status, sequence_number)
self.obj['body'] = {}
self.obj['body']['mandatory_parameters'] = {}
self.obj['body']['mandatory_parameters']['system_id'] = system_id
class BindTransmitterResp(BindResp):
def __init__(self, sequence_number, command_status="ESME_ROK", **kwargs):
super(BindTransmitterResp, self).__init__('bind_transmitter_resp', command_status, sequence_number, **kwargs)
class BindReceiverResp(BindResp):
def __init__(self, sequence_number, command_status="ESME_ROK", **kwargs):
super(BindReceiverResp, self).__init__('bind_receiver_resp', command_status, sequence_number, **kwargs)
class BindTransceiverResp(BindResp):
def __init__(self, sequence_number, command_status="ESME_ROK", **kwargs):
super(BindTransceiverResp, self).__init__('bind_transceiver_resp', command_status, sequence_number, **kwargs)
class Unbind(PDU):
def __init__(self, sequence_number, **kwargs):
super(Unbind, self).__init__('unbind', 'ESME_ROK', sequence_number, **kwargs)
class UnbindResp(PDU):
def __init__(self, sequence_number, **kwargs):
super(UnbindResp, self).__init__('unbind_resp', 'ESME_ROK', sequence_number, **kwargs)
class SM1(PDU):
def __init__(self, command_id, sequence_number, service_type='', source_addr_ton=0,
source_addr_npi=0, source_addr='', esm_class=0, protocol_id=0, priority_flag=0,
schedule_delivery_time='', validity_period='', registered_delivery=0,
replace_if_present_flag=0, data_coding=0, sm_default_msg_id=0, sm_length=0,
short_message=None, **kwargs):
super(SM1, self).__init__(command_id, 'ESME_ROK', sequence_number)
self.obj['body'] = {}
self.obj['body']['mandatory_parameters'] = {}
self.obj['body']['mandatory_parameters']['service_type'] = service_type
self.obj['body']['mandatory_parameters']['source_addr_ton'] = source_addr_ton
self.obj['body']['mandatory_parameters']['source_addr_npi'] = source_addr_npi
self.obj['body']['mandatory_parameters']['source_addr'] = source_addr
self.obj['body']['mandatory_parameters']['esm_class'] = esm_class
self.obj['body']['mandatory_parameters']['protocol_id'] = protocol_id
self.obj['body']['mandatory_parameters']['priority_flag'] = priority_flag
self.obj['body']['mandatory_parameters']['schedule_delivery_time'] = schedule_delivery_time
self.obj['body']['mandatory_parameters']['validity_period'] = validity_period
self.obj['body']['mandatory_parameters']['registered_delivery'] = registered_delivery
self.obj['body']['mandatory_parameters']['replace_if_present_flag'] = replace_if_present_flag
self.obj['body']['mandatory_parameters']['data_coding'] = data_coding
self.obj['body']['mandatory_parameters']['sm_default_msg_id'] = sm_default_msg_id
self.obj['body']['mandatory_parameters']['sm_length'] = sm_length
self.obj['body']['mandatory_parameters']['short_message'] = short_message
def add_message_payload(self, value):
self.obj['body']['mandatory_parameters']['sm_length'] = 0
self.obj['body']['mandatory_parameters']['short_message'] = None
self.add_optional_parameter('message_payload', value)
class SubmitMulti(SM1):
def __init__(self, sequence_number, number_of_dests=0, dest_address=[], **kwargs):
super(SubmitMulti, self).__init__('submit_multi', sequence_number, **kwargs)
mandatory_parameters = self.obj['body']['mandatory_parameters']
mandatory_parameters['number_of_dests'] = number_of_dests
mandatory_parameters['dest_address'] = [] + dest_address
def addDestinationAddress(self, destination_addr, dest_addr_ton=0, dest_addr_npi=0):
if isinstance(destination_addr, str) and len(destination_addr) > 0:
new_entry = {
'dest_flag': 1,
'dest_addr_ton': dest_addr_ton,
'dest_addr_npi': dest_addr_npi,
'destination_addr': destination_addr,
}
mandatory_parameters = self.obj['body']['mandatory_parameters']
mandatory_parameters['dest_address'].append(new_entry)
mandatory_parameters['number_of_dests'] = len(
mandatory_parameters['dest_address'])
return True
else:
return False
def addDistributionList(self, dl_name):
if isinstance(dl_name, str) and len(dl_name) > 0:
new_entry = {
'dest_flag': 2,
'dl_name': dl_name,
}
mandatory_parameters = self.obj['body']['mandatory_parameters']
mandatory_parameters['dest_address'].append(new_entry)
mandatory_parameters['number_of_dests'] = len(
mandatory_parameters['dest_address'])
return True
else:
return False
class SM2(SM1):
def __init__(self, command_id, sequence_number, dest_addr_ton=0, dest_addr_npi=0,
destination_addr='', **kwargs):
super(SM2, self).__init__(command_id, sequence_number, **kwargs)
mandatory_parameters = self.obj['body']['mandatory_parameters']
mandatory_parameters['dest_addr_ton'] = dest_addr_ton
mandatory_parameters['dest_addr_npi'] = dest_addr_npi
mandatory_parameters['destination_addr'] = destination_addr
class SubmitSM(SM2):
def __init__(self, sequence_number, **kwargs):
super(SubmitSM, self).__init__('submit_sm', sequence_number, **kwargs)
class SubmitSMResp(PDU):
def __init__(self, sequence_number, message_id, command_status='ESME_ROK', **kwargs):
super(SubmitSMResp, self).__init__('submit_sm_resp', command_status, sequence_number, **kwargs)
self.obj['body'] = {}
self.obj['body']['mandatory_parameters'] = {}
self.obj['body']['mandatory_parameters']['message_id'] = message_id
class DeliverSM(SM2):
def __init__(self, sequence_number, **kwargs):
super(DeliverSM, self).__init__('deliver_sm', sequence_number, **kwargs)
class DeliverSMResp(PDU):
def __init__(self, sequence_number, message_id='', command_status='ESME_ROK', **kwargs):
super(DeliverSMResp, self).__init__('deliver_sm_resp', command_status, sequence_number, **kwargs)
self.obj['body'] = {}
self.obj['body']['mandatory_parameters'] = {}
self.obj['body']['mandatory_parameters']['message_id'] = ''
class EnquireLink(PDU):
def __init__(self, sequence_number, **kwargs):
super(EnquireLink, self).__init__('enquire_link', 'ESME_ROK', sequence_number, **kwargs)
class EnquireLinkResp(PDU):
def __init__(self, sequence_number, **kwargs):
super(EnquireLinkResp, self).__init__('enquire_link_resp', 'ESME_ROK', sequence_number, **kwargs)
class QuerySM(PDU):
def __init__(self, sequence_number, message_id, source_addr='', source_addr_ton=0, source_addr_npi=0, **kwargs):
super(QuerySM, self).__init__('query_sm', 'ESME_ROK', sequence_number, **kwargs)
self.obj['body'] = {}
self.obj['body']['mandatory_parameters'] = {}
self.obj['body']['mandatory_parameters']['message_id'] = message_id
self.obj['body']['mandatory_parameters']['source_addr'] = source_addr
self.obj['body']['mandatory_parameters']['source_addr_ton'] = source_addr_ton
self.obj['body']['mandatory_parameters']['source_addr_npi'] = source_addr_npi
# bind = BindTransmitter(system_id='test_id', password='abc123')
# print bind.get_obj()
# print bind.get_hex()
# print bind.get_bin()
# #print json.dumps(bind.get_obj(), indent=4, sort_keys=True)
# #print json.dumps(decode_pdu(bind.get_hex()), indent=4, sort_keys=True)
# print json.dumps(unpack_pdu(bind.get_bin()), indent=4, sort_keys=True)
# sm = SubmitSM(short_message='testing testing')
# print json.dumps(unpack_pdu(sm.get_bin()), indent=4, sort_keys=True)
# sm.add_message_payload('616263646566676869')
# print json.dumps(unpack_pdu(sm.get_bin()), indent=4, sort_keys=True)
| 9,200
| 51
| 1,326
|
67a6499830b7f74583e2c66c04f86175dac0b79b
| 24,402
|
py
|
Python
|
lib/catnip/preparation.py
|
MetOffice/CATNIP
|
82a800d000513860077735eb115b060782a4815a
|
[
"BSD-3-Clause"
] | 2
|
2020-09-28T14:03:27.000Z
|
2020-11-17T14:25:18.000Z
|
lib/catnip/preparation.py
|
MetOffice/CATNIP
|
82a800d000513860077735eb115b060782a4815a
|
[
"BSD-3-Clause"
] | 26
|
2020-10-23T08:34:16.000Z
|
2021-04-01T14:32:12.000Z
|
lib/catnip/preparation.py
|
MetOffice/CATNIP
|
82a800d000513860077735eb115b060782a4815a
|
[
"BSD-3-Clause"
] | 2
|
2020-11-20T17:08:25.000Z
|
2021-04-11T06:10:29.000Z
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import iris
import iris.analysis
import numpy as np
from six import string_types, integer_types
import iris.coord_categorisation as iccat
import doctest
import os.path
import catnip.config as conf
import iris.exceptions
from dask import array as da
def _get_xy_noborder(mask):
"""
make a function that returns the indices
of where the mask is valid. If the mask is all True (all masked)
raises a ValueError
args
----
mask: mask from numpy array
Returns
-------
x1, x2, y1, y2: int giving space where the data is valid
"""
if np.all(mask):
raise ValueError("All values masked - can't get indices")
ys, xs = np.where(~mask)
x1 = min(xs)
x2 = max(xs) + 1
y1 = min(ys)
y2 = max(ys) + 1
return x1, x2, y1, y2
def add_aux_unrotated_coords(cube):
"""
This function takes a cube that is on a rotated pole
coordinate system and adds to it, two addtional
auxillary coordinates to hold the unrotated coordinate
values.
args
----
cube: iris cube on an rotated pole coordinate system
Returns
-------
cube: input cube with auxilliary coordinates of unrotated
latitude and longitude
Notes
-----
See below for an example that should be run with python3:
>>> file = os.path.join(conf.DATA_DIR, 'mslp.daily.rcm.viet.nc')
>>> cube = iris.load_cube(file)
>>> print([coord.name() for coord in cube.coords()])
['time', 'grid_latitude', 'grid_longitude']
>>> auxcube = add_aux_unrotated_coords(cube)
>>> print([coord.name() for coord in auxcube.coords()])
['time', 'grid_latitude', 'grid_longitude', 'latitude', 'longitude']
>>> print(auxcube.coord('latitude')) # doctest: +NORMALIZE_WHITESPACE
AuxCoord(array([[35.32243855, 35.33914928, 35.355619 , ..., 35.71848081,
35.70883111, 35.69893388],
[35.10317609, 35.11986604, 35.13631525, ..., 35.49871728,
35.48908 , 35.47919551],
[34.88390966, 34.90057895, 34.91700776, ..., 35.27895246,
35.26932754, 35.25945571],
...,
[ 6.13961446, 6.15413611, 6.16844578, ..., 6.48307389,
6.47472284, 6.46615667],
[ 5.92011032, 5.93461779, 5.94891347, ..., 6.26323044,
6.25488773, 6.24633011],
[ 5.70060768, 5.71510098, 5.72938268, ..., 6.04338876,
6.03505439, 6.02650532]]), standard_name=None, \
units=Unit('degrees'), long_name='latitude')
>>> print(auxcube.shape)
(360, 136, 109)
>>> print(auxcube.coord('latitude').shape)
(136, 109)
>>> print(auxcube.coord('longitude').shape)
(136, 109)
"""
if not isinstance(cube, iris.cube.Cube):
raise TypeError("Input is not a cube")
# get cube's coordinate system
cs = cube.coord_system()
if str(cs).find("Rotated") == -1:
raise TypeError(
"The cube is not on a rotated pole, coord system is {}".format(str(cs))
)
auxcube = cube.copy()
# get coord names
# Longitude
xcoord = auxcube.coord(axis="X", dim_coords=True)
# Latitude
ycoord = auxcube.coord(axis="Y", dim_coords=True)
# read in the grid lat/lon points from the cube
glat = auxcube.coord(ycoord).points
glon = auxcube.coord(xcoord).points
# create a rectangular grid out of an array of
# glon and glat values, shape will be len(glat)xlen(glon)
x, y = np.meshgrid(glon, glat)
# get the cube dimensions which corresponds to glon and glat
x_dim = auxcube.coord_dims(xcoord)[0]
y_dim = auxcube.coord_dims(ycoord)[0]
# define two new variables to hold the unrotated coordinates
rlongitude, rlatitude = iris.analysis.cartography.unrotate_pole(
x, y, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude
)
# create two new auxillary coordinates to hold
# the values of the unrotated coordinates
reg_long = iris.coords.AuxCoord(rlongitude, long_name="longitude", units="degrees")
reg_lat = iris.coords.AuxCoord(rlatitude, long_name="latitude", units="degrees")
# add two auxilary coordinates to the cube holding
# regular(unrotated) lat/lon values
auxcube.add_aux_coord(reg_long, [y_dim, x_dim])
auxcube.add_aux_coord(reg_lat, [y_dim, x_dim])
return auxcube
def add_bounds(cube, coord_names, bound_position=0.5):
"""
Simple function to check whether a
coordinate in a cube has bounds, and
add them if it doesn't.
args
----
cube: iris cube
coord_names: string or list of strings containing the name/s
of the coordinates you want to add bounds to.
bound_position: Optional, the desired position of the bounds relative to
the position of the points. Default is 0.5.
Returns
-------
cube: cube with bounds added
Notes
-----
Need to be careful that it is appropriate
to add bounds to the data, e.g. if data
are instantaneous, time bounds are not
appropriate.
An example:
>>> file = os.path.join(conf.DATA_DIR, 'mslp.daily.rcm.viet.nc')
>>> cube = iris.load_cube(file)
>>> bcube = add_bounds(cube, 'time')
time coordinate already has bounds, none will be added
>>> bcube = add_bounds(cube, 'grid_latitude')
grid_latitude bounds added
>>> bcube = add_bounds(cube, ['grid_latitude','grid_longitude'])
grid_latitude bounds added
grid_longitude bounds added
"""
# check if the input is an Iris cube
if not isinstance(cube, iris.cube.Cube):
raise TypeError("Input is not a cube")
# check if the coordinate name input is a string
if not isinstance(coord_names, (string_types, list)):
raise TypeError("Input coordinate must be a string")
bcube = cube.copy()
# find names of dim coords
c_names = [c.name() for c in bcube.coords()]
# if coord_names is a single string, it will be split,
# by the loop this statement checks for that case and
# puts stash into a tuple to prevent splitting.
if isinstance(coord_names, string_types):
coord_names = tuple([coord_names])
for coord in coord_names:
# check if coord is a string
if not isinstance(coord, string_types):
raise TypeError(
"Coordinate {} must be a string, it is currently a {}".format(
str(coord), type(coord)
)
)
# check coord is a coordinate of the cube
if coord not in c_names:
raise AttributeError(
"{} is not a coordinate, available coordinates are: {}".format(
coord, c_names
)
)
# check if the coord already has bounds
if bcube.coord(coord).has_bounds():
print(
("{} coordinate already has bounds, none will be added".format(coord))
)
# add bounds to coord
else:
bcube.coord(coord).guess_bounds(bound_position=bound_position)
print(("{} bounds added".format(coord)))
return bcube
def add_coord_system(cube):
"""
A cube must have a coordinate system in order to be regridded.
This function checks whether a cube has a coordinate system. If
the cube has no coordinate system, the standard the ellipsoid
representation wgs84 (ie. the one used by GPS) is added.
Note: It will not work for rotated pole data without a
coordinate system.
args
----
cube: iris cube
Returns
-------
cube: The copy of the input cube with coordinate system added,
if the cube didn't have one already.
Notes
-----
A simple example:
>>> file = os.path.join(conf.DATA_DIR, 'gtopo30_025deg.nc')
>>> cube = iris.load_cube(file)
>>> print(cube.coord('latitude').coord_system)
None
>>> cscube = add_coord_system(cube)
Coordinate system GeogCS(6371229.0) added to cube
>>> print(cscube.coord('latitude').coord_system)
GeogCS(6371229.0)
"""
# Note: wgs84 is the World Geodetic System, and a standard coord
# system in iris. In GeogCS(6371229.0), 6371229 is the Earth's
# radius in m. See:
# https://scitools.org.uk/iris/docs/v1.9.0/html/iris/iris/coord_systems.html
# check if the input is an Iris cube
if not isinstance(cube, iris.cube.Cube):
raise TypeError("Input is not a cube")
cscube = cube.copy()
cs = cscube.coord_system()
if cs is not None:
if str(cs).find("Rotated") == 0:
# not possible to add a coord system for
# rotated pole cube without knowing the
# rotation. Give error message.
raise TypeError("Error, no coordinate system for rotated pole cube")
else:
coord_names = [coord.name() for coord in cscube.coords(dim_coords=True)]
wgs84_cs = iris.coord_systems.GeogCS(6371229.0)
if "latitude" in coord_names:
cscube.coord("latitude").coord_system = wgs84_cs
if "longitude" in coord_names:
cscube.coord("longitude").coord_system = wgs84_cs
print("Coordinate system GeogCS(6371229.0) added to cube")
return cscube
def add_time_coord_cats(cube):
"""
This function takes in an iris cube, and adds a range of
numeric co-ordinate categorisations to it. Depending
on the data, not all of the coords added will be relevant.
args
----
cube: iris cube that has a coordinate called 'time'
Returns
-------
Cube: cube that has new time categorisation coords added
Notes
-----
test
A simple example:
>>> file = os.path.join(conf.DATA_DIR, 'mslp.daily.rcm.viet.nc')
>>> cube = iris.load_cube(file)
>>> coord_names = [coord.name() for coord in cube.coords()]
>>> print((', '.join(coord_names)))
time, grid_latitude, grid_longitude
>>> ccube = add_time_coord_cats(cube)
>>> coord_names = [coord.name() for coord in ccube.coords()]
>>> print((', '.join(coord_names)))
time, grid_latitude, grid_longitude, day_of_month, day_of_year, month, \
month_number, season, season_number, year
>>> # print every 50th value of the added time cat coords
... for c in coord_names[3:]:
... print(ccube.coord(c).long_name)
... print(ccube.coord(c).points[::50])
...
day_of_month
[ 1 21 11 1 21 11 1 21]
day_of_year
[ 1 51 101 151 201 251 301 351]
month
['Jan' 'Feb' 'Apr' 'Jun' 'Jul' 'Sep' 'Nov' 'Dec']
month_number
[ 1 2 4 6 7 9 11 12]
season
['djf' 'djf' 'mam' 'jja' 'jja' 'son' 'son' 'djf']
season_number
[0 0 1 2 2 3 3 0]
year
[2000 2000 2000 2000 2000 2000 2000 2000]
"""
# most errors pop up when you try to add a coord that has
# previously been added, or the cube doesn't contain the
# necessary attribute.
ccube = cube.copy()
# numeric
try:
iccat.add_day_of_year(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
try:
iccat.add_day_of_month(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
try:
iccat.add_month_number(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
try:
iccat.add_season_number(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
try:
iccat.add_year(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
# strings
try:
iccat.add_month(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
try:
iccat.add_season(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
return ccube
def extract_rot_cube(cube, min_lat, min_lon, max_lat, max_lon):
"""
Function etracts the specific region from the cube.
args
----
cube: cube on rotated coord system, used as reference grid for transformation.
Returns
-------
min_lat: The minimum latitude point of the desired extracted cube.
min_lon: The minimum longitude point of the desired extracted cube.
max_lat: The maximum latitude point of the desired extracted cube.
max_lon: The maximum longitude point of the desired extracted cube.
An example:
>>> file = os.path.join(conf.DATA_DIR, 'rcm_monthly.pp')
>>> cube = iris.load_cube(file, 'air_temperature')
>>> min_lat = 50
>>> min_lon = -10
>>> max_lat = 60
>>> max_lon = 0
>>> extracted_cube = extract_rot_cube(cube, min_lat, min_lon, max_lat, max_lon)
>>> max_lat_cube = np.max(extracted_cube.coord('latitude').points)
>>> print(f'{max_lat_cube:.3f}')
61.365
>>> min_lat_cube = np.min(extracted_cube.coord('latitude').points)
>>> print(f'{min_lat_cube:.3f}')
48.213
>>> max_lon_cube = np.max(extracted_cube.coord('longitude').points)
>>> print(f'{max_lon_cube:.3f}')
3.643
>>> min_lon_cube = np.min(extracted_cube.coord('longitude').points)
>>> print(f'{min_lon_cube:.3f}')
-16.292
"""
# adding unrotated coords to the cube
cube = add_aux_unrotated_coords(cube)
# mask the cube using the true lat and lon
lats = cube.coord("latitude").points
lons = cube.coord("longitude").points
select_lons = (lons >= min_lon) & (lons <= max_lon)
select_lats = (lats >= min_lat) & (lats <= max_lat)
selection = select_lats & select_lons
selection = da.broadcast_to(selection, cube.shape)
cube.data = da.ma.masked_where(~selection, cube.core_data())
# grab a single 2D slice of X and Y and take the mask
lon_coord = cube.coord(axis="X", dim_coords=True)
lat_coord = cube.coord(axis="Y", dim_coords=True)
for yx_slice in cube.slices(["grid_latitude", "grid_longitude"]):
cmask = yx_slice.data.mask
break
# now cut the cube down along X and Y coords
x1, x2, y1, y2 = _get_xy_noborder(cmask)
idx = len(cube.shape) * [slice(None)]
idx[cube.coord_dims(cube.coord(axis="x", dim_coords=True))[0]] = slice(x1, x2, 1)
idx[cube.coord_dims(cube.coord(axis="y", dim_coords=True))[0]] = slice(y1, y2, 1)
extracted_cube = cube[tuple(idx)]
return extracted_cube
def remove_forecast_coordinates(iris_cube):
"""A function to remove the forecast_period and
forecast_reference_time coordinates from the UM PP files
args
----
iris_cube: input iris_cube
Returns
-------
iris_cube: iris cube without the forecast_period and forecast_reference_time
coordinates
Notes
-----
See below for examples:
>>> cube_list_fcr = iris.cube.CubeList()
>>> file = os.path.join(conf.DATA_DIR, 'rcm_monthly.pp')
>>> cube_list = iris.load(file)
>>> for cube in cube_list:
... cube_fcr = remove_forecast_coordinates(cube)
... cube_list_fcr.append(cube_fcr)
Removed the forecast_period coordinate from Heavyside function \
on pressure levels cube
Removed the forecast_reference_time coordinate from Heavyside \
function on pressure levels cube
Removed the forecast_period coordinate from air_temperature cube
Removed the forecast_reference_time coordinate from air_temperature cube
Removed the forecast_period coordinate from relative_humidity cube
Removed the forecast_reference_time coordinate from relative_humidity cube
Removed the forecast_period coordinate from specific_humidity cube
Removed the forecast_reference_time coordinate from specific_humidity cube
Removed the forecast_period coordinate from x_wind cube
Removed the forecast_reference_time coordinate from x_wind cube
Removed the forecast_period coordinate from y_wind cube
Removed the forecast_reference_time coordinate from y_wind cube
Now check if the forecast coordinates have been removed
>>> for cube in cube_list_fcr:
... cube_nfc = remove_forecast_coordinates(cube)
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
"""
try:
iris_cube.remove_coord("forecast_period")
print(
(
"Removed the forecast_period coordinate from {} cube".format(
iris_cube.name()
)
)
)
except iris.exceptions.CoordinateNotFoundError as coord_not_found:
print("{}".format(coord_not_found))
try:
iris_cube.remove_coord("forecast_reference_time")
print(
(
"Removed the forecast_reference_time coordinate from {} cube".format(
iris_cube.name()
)
)
)
except iris.exceptions.CoordinateNotFoundError as coord_not_found:
print("{}".format(coord_not_found))
return iris_cube
def rim_remove(cube, rim_width):
""" Return IRIS cube with rim removed.
args
----
cube: input iris cube
rim_width: integer, number of grid points to remove from edge of lat and long
Returns
-------
rrcube: rim removed cube
Notes
-----
See below for examples:
>>> cube_list_rr = iris.cube.CubeList()
>>> file = os.path.join(conf.DATA_DIR, 'rcm_monthly.pp')
>>> cube_list = iris.load(file)
>>> for cube in cube_list:
... cube_rr = rim_remove(cube, 8)
... cube_list_rr.append(cube_rr)
...
Removed 8 size rim from Heavyside function on pressure levels
Removed 8 size rim from air_temperature
Removed 8 size rim from relative_humidity
Removed 8 size rim from specific_humidity
Removed 8 size rim from x_wind
Removed 8 size rim from y_wind
>>> file = os.path.join(conf.DATA_DIR, 'rcm_mslp_monthly.pp')
>>> mslp_cube = iris.load_cube(file)
>>>
>>> mslp_cube_rr = rim_remove(mslp_cube, 8)
Removed 8 size rim from air_pressure_at_sea_level
>>>
>>> print(len(mslp_cube.coord('grid_latitude').points))
432
>>> print(len(mslp_cube.coord('grid_longitude').points))
444
>>> print(len(mslp_cube.coord('grid_latitude').points))
432
>>> print(len(mslp_cube.coord('grid_longitude').points))
444
>>>
>>> mslp_cube_rrrr = rim_remove(mslp_cube_rr, 8)
WARNING - This cube has already had it's rim removed
Removed 8 size rim from air_pressure_at_sea_level
Now test for failures:
>>> mslp_cube_rr = rim_remove(cube, 8.2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Please provide a positive integer for rim_width
>>> mslp_cube_rr = rim_remove(cube, -5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: Please provide a positive integer > 0 for rim_width
>>> mslp_cube_rr = rim_remove(cube, 400) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: length of lat or lon coord is < rim_width*2
>>> mslp_cube_rr = rim_remove(cube, 0) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: Please provide a positive integer > 0 for rim_width
>>> mslp_cube_rr = rim_remove(cube, 'a') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Please provide a positive integer for rim_width
"""
# check if the input is an Iris cube
if not isinstance(cube, iris.cube.Cube):
raise TypeError("Input is not a cube")
# check whether rim_width is an integer
if not isinstance(rim_width, (integer_types)):
raise TypeError("Please provide a positive integer for rim_width")
if rim_width <= 0:
raise IndexError("Please provide a positive integer > 0 for rim_width")
# check whether this cube has already had it's rim removed
if "rim_removed" in cube.attributes:
print("WARNING - This cube has already had it's rim removed")
# Longitude
xcoord = cube.coord(axis="X", dim_coords=True)
# Latitude
ycoord = cube.coord(axis="Y", dim_coords=True)
# make sure specified rim_width is going to work
if len(xcoord.points) <= (rim_width * 2) or len(ycoord.points) <= (rim_width * 2):
raise IndexError("length of lat or lon coord is < rim_width*2")
# Remove rim from Longitude
rrcube = cube.subset(xcoord[rim_width : -1 * rim_width])
# Remove rim from Latitude
rrcube = rrcube.subset(ycoord[rim_width : -1 * rim_width])
# add meta data that rim has been removed
rrcube.attributes["rim_removed"] = "{} point rim removed".format(rim_width)
print(("Removed {} size rim from {}".format(rim_width, cube.name())))
return rrcube
if __name__ == "__main__":
doctest.testmod()
| 35.263006
| 87
| 0.651586
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import iris
import iris.analysis
import numpy as np
from six import string_types, integer_types
import iris.coord_categorisation as iccat
import doctest
import os.path
import catnip.config as conf
import iris.exceptions
from dask import array as da
def _get_xy_noborder(mask):
"""
make a function that returns the indices
of where the mask is valid. If the mask is all True (all masked)
raises a ValueError
args
----
mask: mask from numpy array
Returns
-------
x1, x2, y1, y2: int giving space where the data is valid
"""
if np.all(mask):
raise ValueError("All values masked - can't get indices")
ys, xs = np.where(~mask)
x1 = min(xs)
x2 = max(xs) + 1
y1 = min(ys)
y2 = max(ys) + 1
return x1, x2, y1, y2
def add_aux_unrotated_coords(cube):
"""
This function takes a cube that is on a rotated pole
coordinate system and adds to it, two addtional
auxillary coordinates to hold the unrotated coordinate
values.
args
----
cube: iris cube on an rotated pole coordinate system
Returns
-------
cube: input cube with auxilliary coordinates of unrotated
latitude and longitude
Notes
-----
See below for an example that should be run with python3:
>>> file = os.path.join(conf.DATA_DIR, 'mslp.daily.rcm.viet.nc')
>>> cube = iris.load_cube(file)
>>> print([coord.name() for coord in cube.coords()])
['time', 'grid_latitude', 'grid_longitude']
>>> auxcube = add_aux_unrotated_coords(cube)
>>> print([coord.name() for coord in auxcube.coords()])
['time', 'grid_latitude', 'grid_longitude', 'latitude', 'longitude']
>>> print(auxcube.coord('latitude')) # doctest: +NORMALIZE_WHITESPACE
AuxCoord(array([[35.32243855, 35.33914928, 35.355619 , ..., 35.71848081,
35.70883111, 35.69893388],
[35.10317609, 35.11986604, 35.13631525, ..., 35.49871728,
35.48908 , 35.47919551],
[34.88390966, 34.90057895, 34.91700776, ..., 35.27895246,
35.26932754, 35.25945571],
...,
[ 6.13961446, 6.15413611, 6.16844578, ..., 6.48307389,
6.47472284, 6.46615667],
[ 5.92011032, 5.93461779, 5.94891347, ..., 6.26323044,
6.25488773, 6.24633011],
[ 5.70060768, 5.71510098, 5.72938268, ..., 6.04338876,
6.03505439, 6.02650532]]), standard_name=None, \
units=Unit('degrees'), long_name='latitude')
>>> print(auxcube.shape)
(360, 136, 109)
>>> print(auxcube.coord('latitude').shape)
(136, 109)
>>> print(auxcube.coord('longitude').shape)
(136, 109)
"""
if not isinstance(cube, iris.cube.Cube):
raise TypeError("Input is not a cube")
# get cube's coordinate system
cs = cube.coord_system()
if str(cs).find("Rotated") == -1:
raise TypeError(
"The cube is not on a rotated pole, coord system is {}".format(str(cs))
)
auxcube = cube.copy()
# get coord names
# Longitude
xcoord = auxcube.coord(axis="X", dim_coords=True)
# Latitude
ycoord = auxcube.coord(axis="Y", dim_coords=True)
# read in the grid lat/lon points from the cube
glat = auxcube.coord(ycoord).points
glon = auxcube.coord(xcoord).points
# create a rectangular grid out of an array of
# glon and glat values, shape will be len(glat)xlen(glon)
x, y = np.meshgrid(glon, glat)
# get the cube dimensions which corresponds to glon and glat
x_dim = auxcube.coord_dims(xcoord)[0]
y_dim = auxcube.coord_dims(ycoord)[0]
# define two new variables to hold the unrotated coordinates
rlongitude, rlatitude = iris.analysis.cartography.unrotate_pole(
x, y, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude
)
# create two new auxillary coordinates to hold
# the values of the unrotated coordinates
reg_long = iris.coords.AuxCoord(rlongitude, long_name="longitude", units="degrees")
reg_lat = iris.coords.AuxCoord(rlatitude, long_name="latitude", units="degrees")
# add two auxilary coordinates to the cube holding
# regular(unrotated) lat/lon values
auxcube.add_aux_coord(reg_long, [y_dim, x_dim])
auxcube.add_aux_coord(reg_lat, [y_dim, x_dim])
return auxcube
def add_bounds(cube, coord_names, bound_position=0.5):
"""
Simple function to check whether a
coordinate in a cube has bounds, and
add them if it doesn't.
args
----
cube: iris cube
coord_names: string or list of strings containing the name/s
of the coordinates you want to add bounds to.
bound_position: Optional, the desired position of the bounds relative to
the position of the points. Default is 0.5.
Returns
-------
cube: cube with bounds added
Notes
-----
Need to be careful that it is appropriate
to add bounds to the data, e.g. if data
are instantaneous, time bounds are not
appropriate.
An example:
>>> file = os.path.join(conf.DATA_DIR, 'mslp.daily.rcm.viet.nc')
>>> cube = iris.load_cube(file)
>>> bcube = add_bounds(cube, 'time')
time coordinate already has bounds, none will be added
>>> bcube = add_bounds(cube, 'grid_latitude')
grid_latitude bounds added
>>> bcube = add_bounds(cube, ['grid_latitude','grid_longitude'])
grid_latitude bounds added
grid_longitude bounds added
"""
# check if the input is an Iris cube
if not isinstance(cube, iris.cube.Cube):
raise TypeError("Input is not a cube")
# check if the coordinate name input is a string
if not isinstance(coord_names, (string_types, list)):
raise TypeError("Input coordinate must be a string")
bcube = cube.copy()
# find names of dim coords
c_names = [c.name() for c in bcube.coords()]
# if coord_names is a single string, it will be split,
# by the loop this statement checks for that case and
# puts stash into a tuple to prevent splitting.
if isinstance(coord_names, string_types):
coord_names = tuple([coord_names])
for coord in coord_names:
# check if coord is a string
if not isinstance(coord, string_types):
raise TypeError(
"Coordinate {} must be a string, it is currently a {}".format(
str(coord), type(coord)
)
)
# check coord is a coordinate of the cube
if coord not in c_names:
raise AttributeError(
"{} is not a coordinate, available coordinates are: {}".format(
coord, c_names
)
)
# check if the coord already has bounds
if bcube.coord(coord).has_bounds():
print(
("{} coordinate already has bounds, none will be added".format(coord))
)
# add bounds to coord
else:
bcube.coord(coord).guess_bounds(bound_position=bound_position)
print(("{} bounds added".format(coord)))
return bcube
def add_coord_system(cube):
"""
A cube must have a coordinate system in order to be regridded.
This function checks whether a cube has a coordinate system. If
the cube has no coordinate system, the standard the ellipsoid
representation wgs84 (ie. the one used by GPS) is added.
Note: It will not work for rotated pole data without a
coordinate system.
args
----
cube: iris cube
Returns
-------
cube: The copy of the input cube with coordinate system added,
if the cube didn't have one already.
Notes
-----
A simple example:
>>> file = os.path.join(conf.DATA_DIR, 'gtopo30_025deg.nc')
>>> cube = iris.load_cube(file)
>>> print(cube.coord('latitude').coord_system)
None
>>> cscube = add_coord_system(cube)
Coordinate system GeogCS(6371229.0) added to cube
>>> print(cscube.coord('latitude').coord_system)
GeogCS(6371229.0)
"""
# Note: wgs84 is the World Geodetic System, and a standard coord
# system in iris. In GeogCS(6371229.0), 6371229 is the Earth's
# radius in m. See:
# https://scitools.org.uk/iris/docs/v1.9.0/html/iris/iris/coord_systems.html
# check if the input is an Iris cube
if not isinstance(cube, iris.cube.Cube):
raise TypeError("Input is not a cube")
cscube = cube.copy()
cs = cscube.coord_system()
if cs is not None:
if str(cs).find("Rotated") == 0:
# not possible to add a coord system for
# rotated pole cube without knowing the
# rotation. Give error message.
raise TypeError("Error, no coordinate system for rotated pole cube")
else:
coord_names = [coord.name() for coord in cscube.coords(dim_coords=True)]
wgs84_cs = iris.coord_systems.GeogCS(6371229.0)
if "latitude" in coord_names:
cscube.coord("latitude").coord_system = wgs84_cs
if "longitude" in coord_names:
cscube.coord("longitude").coord_system = wgs84_cs
print("Coordinate system GeogCS(6371229.0) added to cube")
return cscube
def add_time_coord_cats(cube):
"""
This function takes in an iris cube, and adds a range of
numeric co-ordinate categorisations to it. Depending
on the data, not all of the coords added will be relevant.
args
----
cube: iris cube that has a coordinate called 'time'
Returns
-------
Cube: cube that has new time categorisation coords added
Notes
-----
test
A simple example:
>>> file = os.path.join(conf.DATA_DIR, 'mslp.daily.rcm.viet.nc')
>>> cube = iris.load_cube(file)
>>> coord_names = [coord.name() for coord in cube.coords()]
>>> print((', '.join(coord_names)))
time, grid_latitude, grid_longitude
>>> ccube = add_time_coord_cats(cube)
>>> coord_names = [coord.name() for coord in ccube.coords()]
>>> print((', '.join(coord_names)))
time, grid_latitude, grid_longitude, day_of_month, day_of_year, month, \
month_number, season, season_number, year
>>> # print every 50th value of the added time cat coords
... for c in coord_names[3:]:
... print(ccube.coord(c).long_name)
... print(ccube.coord(c).points[::50])
...
day_of_month
[ 1 21 11 1 21 11 1 21]
day_of_year
[ 1 51 101 151 201 251 301 351]
month
['Jan' 'Feb' 'Apr' 'Jun' 'Jul' 'Sep' 'Nov' 'Dec']
month_number
[ 1 2 4 6 7 9 11 12]
season
['djf' 'djf' 'mam' 'jja' 'jja' 'son' 'son' 'djf']
season_number
[0 0 1 2 2 3 3 0]
year
[2000 2000 2000 2000 2000 2000 2000 2000]
"""
# most errors pop up when you try to add a coord that has
# previously been added, or the cube doesn't contain the
# necessary attribute.
ccube = cube.copy()
# numeric
try:
iccat.add_day_of_year(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
try:
iccat.add_day_of_month(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
try:
iccat.add_month_number(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
try:
iccat.add_season_number(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
try:
iccat.add_year(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
# strings
try:
iccat.add_month(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
try:
iccat.add_season(ccube, "time")
except AttributeError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
except ValueError as err:
print(("add_time_coord_cats: {}, skipping . . . ".format(err)))
return ccube
def extract_rot_cube(cube, min_lat, min_lon, max_lat, max_lon):
"""
Function etracts the specific region from the cube.
args
----
cube: cube on rotated coord system, used as reference grid for transformation.
Returns
-------
min_lat: The minimum latitude point of the desired extracted cube.
min_lon: The minimum longitude point of the desired extracted cube.
max_lat: The maximum latitude point of the desired extracted cube.
max_lon: The maximum longitude point of the desired extracted cube.
An example:
>>> file = os.path.join(conf.DATA_DIR, 'rcm_monthly.pp')
>>> cube = iris.load_cube(file, 'air_temperature')
>>> min_lat = 50
>>> min_lon = -10
>>> max_lat = 60
>>> max_lon = 0
>>> extracted_cube = extract_rot_cube(cube, min_lat, min_lon, max_lat, max_lon)
>>> max_lat_cube = np.max(extracted_cube.coord('latitude').points)
>>> print(f'{max_lat_cube:.3f}')
61.365
>>> min_lat_cube = np.min(extracted_cube.coord('latitude').points)
>>> print(f'{min_lat_cube:.3f}')
48.213
>>> max_lon_cube = np.max(extracted_cube.coord('longitude').points)
>>> print(f'{max_lon_cube:.3f}')
3.643
>>> min_lon_cube = np.min(extracted_cube.coord('longitude').points)
>>> print(f'{min_lon_cube:.3f}')
-16.292
"""
# adding unrotated coords to the cube
cube = add_aux_unrotated_coords(cube)
# mask the cube using the true lat and lon
lats = cube.coord("latitude").points
lons = cube.coord("longitude").points
select_lons = (lons >= min_lon) & (lons <= max_lon)
select_lats = (lats >= min_lat) & (lats <= max_lat)
selection = select_lats & select_lons
selection = da.broadcast_to(selection, cube.shape)
cube.data = da.ma.masked_where(~selection, cube.core_data())
# grab a single 2D slice of X and Y and take the mask
lon_coord = cube.coord(axis="X", dim_coords=True)
lat_coord = cube.coord(axis="Y", dim_coords=True)
for yx_slice in cube.slices(["grid_latitude", "grid_longitude"]):
cmask = yx_slice.data.mask
break
# now cut the cube down along X and Y coords
x1, x2, y1, y2 = _get_xy_noborder(cmask)
idx = len(cube.shape) * [slice(None)]
idx[cube.coord_dims(cube.coord(axis="x", dim_coords=True))[0]] = slice(x1, x2, 1)
idx[cube.coord_dims(cube.coord(axis="y", dim_coords=True))[0]] = slice(y1, y2, 1)
extracted_cube = cube[tuple(idx)]
return extracted_cube
def remove_forecast_coordinates(iris_cube):
"""A function to remove the forecast_period and
forecast_reference_time coordinates from the UM PP files
args
----
iris_cube: input iris_cube
Returns
-------
iris_cube: iris cube without the forecast_period and forecast_reference_time
coordinates
Notes
-----
See below for examples:
>>> cube_list_fcr = iris.cube.CubeList()
>>> file = os.path.join(conf.DATA_DIR, 'rcm_monthly.pp')
>>> cube_list = iris.load(file)
>>> for cube in cube_list:
... cube_fcr = remove_forecast_coordinates(cube)
... cube_list_fcr.append(cube_fcr)
Removed the forecast_period coordinate from Heavyside function \
on pressure levels cube
Removed the forecast_reference_time coordinate from Heavyside \
function on pressure levels cube
Removed the forecast_period coordinate from air_temperature cube
Removed the forecast_reference_time coordinate from air_temperature cube
Removed the forecast_period coordinate from relative_humidity cube
Removed the forecast_reference_time coordinate from relative_humidity cube
Removed the forecast_period coordinate from specific_humidity cube
Removed the forecast_reference_time coordinate from specific_humidity cube
Removed the forecast_period coordinate from x_wind cube
Removed the forecast_reference_time coordinate from x_wind cube
Removed the forecast_period coordinate from y_wind cube
Removed the forecast_reference_time coordinate from y_wind cube
Now check if the forecast coordinates have been removed
>>> for cube in cube_list_fcr:
... cube_nfc = remove_forecast_coordinates(cube)
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
'Expected to find exactly 1 forecast_period coordinate, but found none.'
'Expected to find exactly 1 forecast_reference_time coordinate, but found none.'
"""
try:
iris_cube.remove_coord("forecast_period")
print(
(
"Removed the forecast_period coordinate from {} cube".format(
iris_cube.name()
)
)
)
except iris.exceptions.CoordinateNotFoundError as coord_not_found:
print("{}".format(coord_not_found))
try:
iris_cube.remove_coord("forecast_reference_time")
print(
(
"Removed the forecast_reference_time coordinate from {} cube".format(
iris_cube.name()
)
)
)
except iris.exceptions.CoordinateNotFoundError as coord_not_found:
print("{}".format(coord_not_found))
return iris_cube
def rim_remove(cube, rim_width):
""" Return IRIS cube with rim removed.
args
----
cube: input iris cube
rim_width: integer, number of grid points to remove from edge of lat and long
Returns
-------
rrcube: rim removed cube
Notes
-----
See below for examples:
>>> cube_list_rr = iris.cube.CubeList()
>>> file = os.path.join(conf.DATA_DIR, 'rcm_monthly.pp')
>>> cube_list = iris.load(file)
>>> for cube in cube_list:
... cube_rr = rim_remove(cube, 8)
... cube_list_rr.append(cube_rr)
...
Removed 8 size rim from Heavyside function on pressure levels
Removed 8 size rim from air_temperature
Removed 8 size rim from relative_humidity
Removed 8 size rim from specific_humidity
Removed 8 size rim from x_wind
Removed 8 size rim from y_wind
>>> file = os.path.join(conf.DATA_DIR, 'rcm_mslp_monthly.pp')
>>> mslp_cube = iris.load_cube(file)
>>>
>>> mslp_cube_rr = rim_remove(mslp_cube, 8)
Removed 8 size rim from air_pressure_at_sea_level
>>>
>>> print(len(mslp_cube.coord('grid_latitude').points))
432
>>> print(len(mslp_cube.coord('grid_longitude').points))
444
>>> print(len(mslp_cube.coord('grid_latitude').points))
432
>>> print(len(mslp_cube.coord('grid_longitude').points))
444
>>>
>>> mslp_cube_rrrr = rim_remove(mslp_cube_rr, 8)
WARNING - This cube has already had it's rim removed
Removed 8 size rim from air_pressure_at_sea_level
Now test for failures:
>>> mslp_cube_rr = rim_remove(cube, 8.2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Please provide a positive integer for rim_width
>>> mslp_cube_rr = rim_remove(cube, -5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: Please provide a positive integer > 0 for rim_width
>>> mslp_cube_rr = rim_remove(cube, 400) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: length of lat or lon coord is < rim_width*2
>>> mslp_cube_rr = rim_remove(cube, 0) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: Please provide a positive integer > 0 for rim_width
>>> mslp_cube_rr = rim_remove(cube, 'a') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Please provide a positive integer for rim_width
"""
# check if the input is an Iris cube
if not isinstance(cube, iris.cube.Cube):
raise TypeError("Input is not a cube")
# check whether rim_width is an integer
if not isinstance(rim_width, (integer_types)):
raise TypeError("Please provide a positive integer for rim_width")
if rim_width <= 0:
raise IndexError("Please provide a positive integer > 0 for rim_width")
# check whether this cube has already had it's rim removed
if "rim_removed" in cube.attributes:
print("WARNING - This cube has already had it's rim removed")
# Longitude
xcoord = cube.coord(axis="X", dim_coords=True)
# Latitude
ycoord = cube.coord(axis="Y", dim_coords=True)
# make sure specified rim_width is going to work
if len(xcoord.points) <= (rim_width * 2) or len(ycoord.points) <= (rim_width * 2):
raise IndexError("length of lat or lon coord is < rim_width*2")
# Remove rim from Longitude
rrcube = cube.subset(xcoord[rim_width : -1 * rim_width])
# Remove rim from Latitude
rrcube = rrcube.subset(ycoord[rim_width : -1 * rim_width])
# add meta data that rim has been removed
rrcube.attributes["rim_removed"] = "{} point rim removed".format(rim_width)
print(("Removed {} size rim from {}".format(rim_width, cube.name())))
return rrcube
if __name__ == "__main__":
doctest.testmod()
| 0
| 0
| 0
|
0dd32e9ca2c7ce0add00ea94a0758f176b038de9
| 27,038
|
py
|
Python
|
src/summarization/metric/rouge_metric.py
|
youngerous/kobart-voice-summarization
|
bf48edea602c0661d638f0ed6f4a35c2ced4009f
|
[
"Apache-2.0"
] | 8
|
2021-05-16T05:40:29.000Z
|
2022-03-14T08:32:54.000Z
|
src/summarization/metric/rouge_metric.py
|
youngerous/kobart-voice-summarization
|
bf48edea602c0661d638f0ed6f4a35c2ced4009f
|
[
"Apache-2.0"
] | null | null | null |
src/summarization/metric/rouge_metric.py
|
youngerous/kobart-voice-summarization
|
bf48edea602c0661d638f0ed6f4a35c2ced4009f
|
[
"Apache-2.0"
] | 3
|
2021-08-09T08:06:24.000Z
|
2021-11-29T05:04:56.000Z
|
"""
Ref: https://dacon.io/competitions/official/235673/talkboard/401911?page=1&dtype=recent
"""
import os
import re
import platform
import itertools
import collections
import pkg_resources # pip install py-rouge
from io import open
if platform.system() == "Windows":
try:
from eunjeon import Mecab
except:
print("please install eunjeon module")
else: # Ubuntu일 경우
from konlpy.tag import Mecab
| 38.625714
| 97
| 0.492529
|
"""
Ref: https://dacon.io/competitions/official/235673/talkboard/401911?page=1&dtype=recent
"""
import os
import re
import platform
import itertools
import collections
import pkg_resources # pip install py-rouge
from io import open
if platform.system() == "Windows":
try:
from eunjeon import Mecab
except:
print("please install eunjeon module")
else: # Ubuntu일 경우
from konlpy.tag import Mecab
class Rouge:
DEFAULT_METRICS = {"rouge-n"}
DEFAULT_N = 1
STATS = ["f", "p", "r"]
AVAILABLE_METRICS = {"rouge-n", "rouge-l", "rouge-w"}
AVAILABLE_LENGTH_LIMIT_TYPES = {"words", "bytes"}
REMOVE_CHAR_PATTERN = re.compile("[^A-Za-z0-9가-힣]")
def __init__(
self,
metrics=None,
max_n=None,
limit_length=True,
length_limit=1000,
length_limit_type="words",
apply_avg=True,
apply_best=False,
use_tokenizer=True,
alpha=0.5,
weight_factor=1.0,
):
self.metrics = metrics[:] if metrics is not None else Rouge.DEFAULT_METRICS
for m in self.metrics:
if m not in Rouge.AVAILABLE_METRICS:
raise ValueError("Unknown metric '{}'".format(m))
self.max_n = max_n if "rouge-n" in self.metrics else None
# Add all rouge-n metrics
if self.max_n is not None:
index_rouge_n = self.metrics.index("rouge-n")
del self.metrics[index_rouge_n]
self.metrics += ["rouge-{}".format(n) for n in range(1, self.max_n + 1)]
self.metrics = set(self.metrics)
self.limit_length = limit_length
if self.limit_length:
if length_limit_type not in Rouge.AVAILABLE_LENGTH_LIMIT_TYPES:
raise ValueError(
"Unknown length_limit_type '{}'".format(length_limit_type)
)
self.length_limit = length_limit
if self.length_limit == 0:
self.limit_length = False
self.length_limit_type = length_limit_type
self.use_tokenizer = use_tokenizer
if use_tokenizer:
self.tokenizer = Mecab()
self.apply_avg = apply_avg
self.apply_best = apply_best
self.alpha = alpha
self.weight_factor = weight_factor
if self.weight_factor <= 0:
raise ValueError("ROUGE-W weight factor must greater than 0.")
def tokenize_text(self, text):
if self.use_tokenizer:
return self.tokenizer.morphs(text)
else:
return text
@staticmethod
def split_into_sentences(text):
return text.split("\n")
@staticmethod
def _get_ngrams(n, text):
ngram_set = collections.defaultdict(int)
max_index_ngram_start = len(text) - n
for i in range(max_index_ngram_start + 1):
ngram_set[tuple(text[i : i + n])] += 1
return ngram_set
@staticmethod
def _split_into_words(sentences):
return list(itertools.chain(*[_.split() for _ in sentences]))
@staticmethod
def _get_word_ngrams_and_length(n, sentences):
assert len(sentences) > 0
assert n > 0
tokens = Rouge._split_into_words(sentences)
return Rouge._get_ngrams(n, tokens), tokens, len(tokens) - (n - 1)
@staticmethod
def _get_unigrams(sentences):
assert len(sentences) > 0
tokens = Rouge._split_into_words(sentences)
unigram_set = collections.defaultdict(int)
for token in tokens:
unigram_set[token] += 1
return unigram_set, len(tokens)
@staticmethod
def _compute_p_r_f_score(
evaluated_count,
reference_count,
overlapping_count,
alpha=0.5,
weight_factor=1.0,
):
precision = (
0.0 if evaluated_count == 0 else overlapping_count / float(evaluated_count)
)
if weight_factor != 1.0:
precision = precision ** (1.0 / weight_factor)
recall = (
0.0 if reference_count == 0 else overlapping_count / float(reference_count)
)
if weight_factor != 1.0:
recall = recall ** (1.0 / weight_factor)
f1_score = Rouge._compute_f_score(precision, recall, alpha)
return {"f": f1_score, "p": precision, "r": recall}
@staticmethod
def _compute_f_score(precision, recall, alpha=0.5):
return (
0.0
if (recall == 0.0 or precision == 0.0)
else precision * recall / ((1 - alpha) * precision + alpha * recall)
)
@staticmethod
def _compute_ngrams(evaluated_sentences, reference_sentences, n):
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_ngrams, _, evaluated_count = Rouge._get_word_ngrams_and_length(
n, evaluated_sentences
)
reference_ngrams, _, reference_count = Rouge._get_word_ngrams_and_length(
n, reference_sentences
)
# Gets the overlapping ngrams between evaluated and reference
overlapping_ngrams = set(evaluated_ngrams.keys()).intersection(
set(reference_ngrams.keys())
)
overlapping_count = 0
for ngram in overlapping_ngrams:
overlapping_count += min(evaluated_ngrams[ngram], reference_ngrams[ngram])
return evaluated_count, reference_count, overlapping_count
@staticmethod
def _compute_ngrams_lcs(
evaluated_sentences, reference_sentences, weight_factor=1.0
):
def _lcs(x, y):
m = len(x)
n = len(y)
vals = collections.defaultdict(int)
dirs = collections.defaultdict(int)
for i in range(1, m + 1):
for j in range(1, n + 1):
if x[i - 1] == y[j - 1]:
vals[i, j] = vals[i - 1, j - 1] + 1
dirs[i, j] = "|"
elif vals[i - 1, j] >= vals[i, j - 1]:
vals[i, j] = vals[i - 1, j]
dirs[i, j] = "^"
else:
vals[i, j] = vals[i, j - 1]
dirs[i, j] = "<"
return vals, dirs
def _wlcs(x, y, weight_factor):
m = len(x)
n = len(y)
vals = collections.defaultdict(float)
dirs = collections.defaultdict(int)
lengths = collections.defaultdict(int)
for i in range(1, m + 1):
for j in range(1, n + 1):
if x[i - 1] == y[j - 1]:
length_tmp = lengths[i - 1, j - 1]
vals[i, j] = (
vals[i - 1, j - 1]
+ (length_tmp + 1) ** weight_factor
- length_tmp ** weight_factor
)
dirs[i, j] = "|"
lengths[i, j] = length_tmp + 1
elif vals[i - 1, j] >= vals[i, j - 1]:
vals[i, j] = vals[i - 1, j]
dirs[i, j] = "^"
lengths[i, j] = 0
else:
vals[i, j] = vals[i, j - 1]
dirs[i, j] = "<"
lengths[i, j] = 0
return vals, dirs
def _mark_lcs(mask, dirs, m, n):
while m != 0 and n != 0:
if dirs[m, n] == "|":
m -= 1
n -= 1
mask[m] = 1
elif dirs[m, n] == "^":
m -= 1
elif dirs[m, n] == "<":
n -= 1
else:
raise UnboundLocalError("Illegal move")
return mask
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_unigrams_dict, evaluated_count = Rouge._get_unigrams(
evaluated_sentences
)
reference_unigrams_dict, reference_count = Rouge._get_unigrams(
reference_sentences
)
# Has to use weight factor for WLCS
use_WLCS = weight_factor != 1.0
if use_WLCS:
evaluated_count = evaluated_count ** weight_factor
reference_count = 0
overlapping_count = 0.0
for reference_sentence in reference_sentences:
reference_sentence_tokens = reference_sentence.split()
if use_WLCS:
reference_count += len(reference_sentence_tokens) ** weight_factor
hit_mask = [0 for _ in range(len(reference_sentence_tokens))]
for evaluated_sentence in evaluated_sentences:
evaluated_sentence_tokens = evaluated_sentence.split()
if use_WLCS:
_, lcs_dirs = _wlcs(
reference_sentence_tokens,
evaluated_sentence_tokens,
weight_factor,
)
else:
_, lcs_dirs = _lcs(
reference_sentence_tokens, evaluated_sentence_tokens
)
_mark_lcs(
hit_mask,
lcs_dirs,
len(reference_sentence_tokens),
len(evaluated_sentence_tokens),
)
overlapping_count_length = 0
for ref_token_id, val in enumerate(hit_mask):
if val == 1:
token = reference_sentence_tokens[ref_token_id]
if (
evaluated_unigrams_dict[token] > 0
and reference_unigrams_dict[token] > 0
):
evaluated_unigrams_dict[token] -= 1
reference_unigrams_dict[ref_token_id] -= 1
if use_WLCS:
overlapping_count_length += 1
if (
ref_token_id + 1 < len(hit_mask)
and hit_mask[ref_token_id + 1] == 0
) or ref_token_id + 1 == len(hit_mask):
overlapping_count += (
overlapping_count_length ** weight_factor
)
overlapping_count_length = 0
else:
overlapping_count += 1
if use_WLCS:
reference_count = reference_count ** weight_factor
return evaluated_count, reference_count, overlapping_count
def get_scores(self, hypothesis, references):
if isinstance(hypothesis, str):
hypothesis, references = [hypothesis], [references]
if type(hypothesis) != type(references):
raise ValueError("'hyps' and 'refs' are not of the same type")
if len(hypothesis) != len(references):
raise ValueError("'hyps' and 'refs' do not have the same length")
scores = {}
has_rouge_n_metric = (
len([metric for metric in self.metrics if metric.split("-")[-1].isdigit()])
> 0
)
if has_rouge_n_metric:
scores.update(self._get_scores_rouge_n(hypothesis, references))
# scores = {**scores, **self._get_scores_rouge_n(hypothesis, references)}
has_rouge_l_metric = (
len(
[
metric
for metric in self.metrics
if metric.split("-")[-1].lower() == "l"
]
)
> 0
)
if has_rouge_l_metric:
scores.update(self._get_scores_rouge_l_or_w(hypothesis, references, False))
# scores = {**scores, **self._get_scores_rouge_l_or_w(hypothesis, references, False)}
has_rouge_w_metric = (
len(
[
metric
for metric in self.metrics
if metric.split("-")[-1].lower() == "w"
]
)
> 0
)
if has_rouge_w_metric:
scores.update(self._get_scores_rouge_l_or_w(hypothesis, references, True))
# scores = {**scores, **self._get_scores_rouge_l_or_w(hypothesis, references, True)}
return scores
def _get_scores_rouge_n(self, all_hypothesis, all_references):
metrics = [metric for metric in self.metrics if metric.split("-")[-1].isdigit()]
if self.apply_avg or self.apply_best:
scores = {metric: {stat: 0.0 for stat in Rouge.STATS} for metric in metrics}
else:
scores = {
metric: [
{stat: [] for stat in Rouge.STATS}
for _ in range(len(all_hypothesis))
]
for metric in metrics
}
for sample_id, (hypothesis, references) in enumerate(
zip(all_hypothesis, all_references)
):
assert isinstance(hypothesis, str)
has_multiple_references = False
if isinstance(references, list):
has_multiple_references = len(references) > 1
if not has_multiple_references:
references = references[0]
# Prepare hypothesis and reference(s)
hypothesis = self._preprocess_summary_as_a_whole(hypothesis)
references = (
[
self._preprocess_summary_as_a_whole(reference)
for reference in references
]
if has_multiple_references
else [self._preprocess_summary_as_a_whole(references)]
)
# Compute scores
for metric in metrics:
suffix = metric.split("-")[-1]
n = int(suffix)
# Aggregate
if self.apply_avg:
# average model
total_hypothesis_ngrams_count = 0
total_reference_ngrams_count = 0
total_ngrams_overlapping_count = 0
for reference in references:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams(hypothesis, reference, n)
total_hypothesis_ngrams_count += hypothesis_count
total_reference_ngrams_count += reference_count
total_ngrams_overlapping_count += overlapping_ngrams
score = Rouge._compute_p_r_f_score(
total_hypothesis_ngrams_count,
total_reference_ngrams_count,
total_ngrams_overlapping_count,
self.alpha,
)
for stat in Rouge.STATS:
scores[metric][stat] += score[stat]
else:
# Best model
if self.apply_best:
best_current_score = None
for reference in references:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams(hypothesis, reference, n)
score = Rouge._compute_p_r_f_score(
hypothesis_count,
reference_count,
overlapping_ngrams,
self.alpha,
)
if (
best_current_score is None
or score["r"] > best_current_score["r"]
):
best_current_score = score
for stat in Rouge.STATS:
scores[metric][stat] += best_current_score[stat]
# Keep all
else:
for reference in references:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams(hypothesis, reference, n)
score = Rouge._compute_p_r_f_score(
hypothesis_count,
reference_count,
overlapping_ngrams,
self.alpha,
)
for stat in Rouge.STATS:
scores[metric][sample_id][stat].append(score[stat])
# Compute final score with the average or the the max
if (self.apply_avg or self.apply_best) and len(all_hypothesis) > 1:
for metric in metrics:
for stat in Rouge.STATS:
scores[metric][stat] /= len(all_hypothesis)
return scores
def _get_scores_rouge_l_or_w(self, all_hypothesis, all_references, use_w=False):
metric = "rouge-w" if use_w else "rouge-l"
if self.apply_avg or self.apply_best:
scores = {metric: {stat: 0.0 for stat in Rouge.STATS}}
else:
scores = {
metric: [
{stat: [] for stat in Rouge.STATS}
for _ in range(len(all_hypothesis))
]
}
for sample_id, (hypothesis_sentences, references_sentences) in enumerate(
zip(all_hypothesis, all_references)
):
assert isinstance(hypothesis_sentences, str)
has_multiple_references = False
if isinstance(references_sentences, list):
has_multiple_references = len(references_sentences) > 1
if not has_multiple_references:
references_sentences = references_sentences[0]
# Prepare hypothesis and reference(s)
hypothesis_sentences = self._preprocess_summary_per_sentence(
hypothesis_sentences
)
references_sentences = (
[
self._preprocess_summary_per_sentence(reference)
for reference in references_sentences
]
if has_multiple_references
else [self._preprocess_summary_per_sentence(references_sentences)]
)
# Compute scores
# Aggregate
if self.apply_avg:
# average model
total_hypothesis_ngrams_count = 0
total_reference_ngrams_count = 0
total_ngrams_overlapping_count = 0
for reference_sentences in references_sentences:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams_lcs(
hypothesis_sentences,
reference_sentences,
self.weight_factor if use_w else 1.0,
)
total_hypothesis_ngrams_count += hypothesis_count
total_reference_ngrams_count += reference_count
total_ngrams_overlapping_count += overlapping_ngrams
score = Rouge._compute_p_r_f_score(
total_hypothesis_ngrams_count,
total_reference_ngrams_count,
total_ngrams_overlapping_count,
self.alpha,
self.weight_factor if use_w else 1.0,
)
for stat in Rouge.STATS:
scores[metric][stat] += score[stat]
else:
# Best model
if self.apply_best:
best_current_score = None
best_current_score_wlcs = None
for reference_sentences in references_sentences:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams_lcs(
hypothesis_sentences,
reference_sentences,
self.weight_factor if use_w else 1.0,
)
score = Rouge._compute_p_r_f_score(
total_hypothesis_ngrams_count,
total_reference_ngrams_count,
total_ngrams_overlapping_count,
self.alpha,
self.weight_factor if use_w else 1.0,
)
if use_w:
reference_count_for_score = reference_count ** (
1.0 / self.weight_factor
)
overlapping_ngrams_for_score = overlapping_ngrams
score_wlcs = (
overlapping_ngrams_for_score / reference_count_for_score
) ** (1.0 / self.weight_factor)
if (
best_current_score_wlcs is None
or score_wlcs > best_current_score_wlcs
):
best_current_score = score
best_current_score_wlcs = score_wlcs
else:
if (
best_current_score is None
or score["r"] > best_current_score["r"]
):
best_current_score = score
for stat in Rouge.STATS:
scores[metric][stat] += best_current_score[stat]
# Keep all
else:
for reference_sentences in references_sentences:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams_lcs(
hypothesis_sentences,
reference_sentences,
self.weight_factor if use_w else 1.0,
)
score = Rouge._compute_p_r_f_score(
hypothesis_count,
reference_count,
overlapping_ngrams,
self.alpha,
self.weight_factor,
)
for stat in Rouge.STATS:
scores[metric][sample_id][stat].append(score[stat])
# Compute final score with the average or the the max
if (self.apply_avg or self.apply_best) and len(all_hypothesis) > 1:
for stat in Rouge.STATS:
scores[metric][stat] /= len(all_hypothesis)
return scores
def _preprocess_summary_as_a_whole(self, summary):
sentences = Rouge.split_into_sentences(summary)
# Truncate
if self.limit_length:
# By words
if self.length_limit_type == "words":
summary = " ".join(sentences)
all_tokens = summary.split() # Counting as in the perls script
summary = " ".join(all_tokens[: self.length_limit])
# By bytes
elif self.length_limit_type == "bytes":
summary = ""
current_len = 0
for sentence in sentences:
sentence = sentence.strip()
sentence_len = len(sentence)
if current_len + sentence_len < self.length_limit:
if current_len != 0:
summary += " "
summary += sentence
current_len += sentence_len
else:
if current_len > 0:
summary += " "
summary += sentence[: self.length_limit - current_len]
break
else:
summary = " ".join(sentences)
summary = Rouge.REMOVE_CHAR_PATTERN.sub(" ", summary.lower()).strip()
tokens = self.tokenize_text(Rouge.REMOVE_CHAR_PATTERN.sub(" ", summary))
preprocessed_summary = [" ".join(tokens)]
return preprocessed_summary
def _preprocess_summary_per_sentence(self, summary):
sentences = Rouge.split_into_sentences(summary)
# Truncate
if self.limit_length:
final_sentences = []
current_len = 0
# By words
if self.length_limit_type == "words":
for sentence in sentences:
tokens = sentence.strip().split()
tokens_len = len(tokens)
if current_len + tokens_len < self.length_limit:
sentence = " ".join(tokens)
final_sentences.append(sentence)
current_len += tokens_len
else:
sentence = " ".join(tokens[: self.length_limit - current_len])
final_sentences.append(sentence)
break
# By bytes
elif self.length_limit_type == "bytes":
for sentence in sentences:
sentence = sentence.strip()
sentence_len = len(sentence)
if current_len + sentence_len < self.length_limit:
final_sentences.append(sentence)
current_len += sentence_len
else:
sentence = sentence[: self.length_limit - current_len]
final_sentences.append(sentence)
break
sentences = final_sentences
final_sentences = []
for sentence in sentences:
sentence = Rouge.REMOVE_CHAR_PATTERN.sub(" ", sentence.lower()).strip()
tokens = self.tokenize_text(Rouge.REMOVE_CHAR_PATTERN.sub(" ", sentence))
sentence = " ".join(tokens)
final_sentences.append(sentence)
return final_sentences
| 25,756
| 837
| 23
|
75f00237a492774445faf7cd823f0046fc10ff0e
| 1,953
|
py
|
Python
|
data_processing/find_entity.py
|
plista/news_knowledge_tree
|
7b31a6faba1f1f9b267386a75766e435fc4e2674
|
[
"MIT"
] | null | null | null |
data_processing/find_entity.py
|
plista/news_knowledge_tree
|
7b31a6faba1f1f9b267386a75766e435fc4e2674
|
[
"MIT"
] | 1
|
2019-12-16T18:42:53.000Z
|
2019-12-16T18:42:53.000Z
|
data_processing/find_entity.py
|
plista/news_knowledge_tree
|
7b31a6faba1f1f9b267386a75766e435fc4e2674
|
[
"MIT"
] | null | null | null |
import functools
import sys
from typing import Set
import nltk
from flair.data import Sentence
from flair.models import SequenceTagger
from langdetect import detect
@functools.lru_cache(maxsize=1)
def get_tagger(language: str) -> SequenceTagger:
"""Return the tagger needed """
if language == "de":
return SequenceTagger.load("de-ner")
if language == "en":
return SequenceTagger.load("ner-fast")
raise Exception("Invalid language")
def filter_text(text: str) -> str:
"""remove unwanted character from the text which can disturb NER"""
filtered = text
for s in "\\\xa0\"'[]()’“”\xad":
filtered = filtered.replace(s, "")
return filtered
def format_entities(entities: Set[str]) -> Set[str]:
"""
Remove
:param entity:
:return:
"""
result = []
for entity in entities:
if entity[-1] in [".", ",", "?", "!", ":"]:
entity = entity[0:-1]
entity = entity.replace("\n", " ")
if entity[-1] == "s" and entity[:-1] in entities:
continue
if not entity:
continue
result.append(entity)
return set(result)
@functools.lru_cache(maxsize=512)
def find_entity(text: str, language: str) -> Set[str]:
"""extract entity using flair"""
global tagger
filtered = filter_text(text)
if not filtered:
return set()
detected_language = detect(filtered)
if language != detected_language:
return set()
sent_tokens = nltk.sent_tokenize(filtered)
sentences = [Sentence(i) for i in sent_tokens]
tagger = get_tagger(language)
tagger.predict(sentences)
flair_entities = []
for sentence in sentences:
flair_entities.extend(
[entity.text for entity in sentence.get_spans("ner")]
)
result = format_entities(set(flair_entities))
return result
if __name__ == "__main__":
text = sys.argv[1]
print(find_entity(text))
| 26.391892
| 71
| 0.62724
|
import functools
import sys
from typing import Set
import nltk
from flair.data import Sentence
from flair.models import SequenceTagger
from langdetect import detect
@functools.lru_cache(maxsize=1)
def get_tagger(language: str) -> SequenceTagger:
"""Return the tagger needed """
if language == "de":
return SequenceTagger.load("de-ner")
if language == "en":
return SequenceTagger.load("ner-fast")
raise Exception("Invalid language")
def filter_text(text: str) -> str:
"""remove unwanted character from the text which can disturb NER"""
filtered = text
for s in "\\\xa0\"'[]()’“”\xad":
filtered = filtered.replace(s, "")
return filtered
def format_entities(entities: Set[str]) -> Set[str]:
"""
Remove
:param entity:
:return:
"""
result = []
for entity in entities:
if entity[-1] in [".", ",", "?", "!", ":"]:
entity = entity[0:-1]
entity = entity.replace("\n", " ")
if entity[-1] == "s" and entity[:-1] in entities:
continue
if not entity:
continue
result.append(entity)
return set(result)
@functools.lru_cache(maxsize=512)
def find_entity(text: str, language: str) -> Set[str]:
"""extract entity using flair"""
global tagger
filtered = filter_text(text)
if not filtered:
return set()
detected_language = detect(filtered)
if language != detected_language:
return set()
sent_tokens = nltk.sent_tokenize(filtered)
sentences = [Sentence(i) for i in sent_tokens]
tagger = get_tagger(language)
tagger.predict(sentences)
flair_entities = []
for sentence in sentences:
flair_entities.extend(
[entity.text for entity in sentence.get_spans("ner")]
)
result = format_entities(set(flair_entities))
return result
if __name__ == "__main__":
text = sys.argv[1]
print(find_entity(text))
| 0
| 0
| 0
|
61180cfa234edf359cd1eae6b1289e74c60f9294
| 1,928
|
py
|
Python
|
datenguidepy/translation.py
|
EvgenyGorelov/datenguide-python
|
9ff89665149b5cee31bab63b8a7e396b7233cc39
|
[
"MIT"
] | 35
|
2019-10-10T17:42:21.000Z
|
2021-04-10T08:56:51.000Z
|
datenguidepy/translation.py
|
EvgenyGorelov/datenguide-python
|
9ff89665149b5cee31bab63b8a7e396b7233cc39
|
[
"MIT"
] | 67
|
2019-06-30T08:30:20.000Z
|
2021-06-21T12:59:18.000Z
|
datenguidepy/translation.py
|
EvgenyGorelov/datenguide-python
|
9ff89665149b5cee31bab63b8a7e396b7233cc39
|
[
"MIT"
] | 8
|
2019-10-10T21:00:50.000Z
|
2021-06-22T11:58:32.000Z
|
from typing import Dict
from abc import abstractmethod, ABC
from pandas import DataFrame
import json
import os
DEFAULT_TRANSLATION_PROVIDER = SchemaTranslationProvider()
| 31.096774
| 81
| 0.732365
|
from typing import Dict
from abc import abstractmethod, ABC
from pandas import DataFrame
import json
import os
class TranslationProvider(ABC):
@abstractmethod
def translate_data_frame_from_german(
self, data_frame: DataFrame, target_language: str
):
pass
@abstractmethod
def translate_from_german(self, source_text: str, target_language: str):
pass
@abstractmethod
def is_valid_language_code(self, target_language: str):
pass
@abstractmethod
def get_valid_language_codes(self):
pass
class SchemaTranslationProvider(TranslationProvider):
def __init__(self):
self.translated_schema: Dict = self.get_translated_schema_from_file()
@staticmethod
def get_translated_schema_from_file():
curr_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(curr_dir, "package_data")
translation_json_path = os.path.join(data_dir, "translated_schema.json")
with open(translation_json_path) as translation_file:
translation_schema = json.loads(translation_file.read())
return translation_schema
def translate_data_frame_from_german(
self, data_frame: DataFrame, target_language: str
):
data_frame.replace(self.translated_schema[target_language], inplace=True)
def translate_from_german(self, source_text: str, target_language: str):
language_specific_schema = self.translated_schema[target_language]
if source_text in language_specific_schema:
return language_specific_schema[source_text]
else:
return source_text
def is_valid_language_code(self, target_language: str):
return target_language in self.get_valid_language_codes()
def get_valid_language_codes(self):
return list(self.translated_schema.keys())
DEFAULT_TRANSLATION_PROVIDER = SchemaTranslationProvider()
| 1,299
| 408
| 46
|
2135004489baa792eeab5ba37016a6ad73478469
| 1,314
|
py
|
Python
|
src/contexts/kms/computed_data/application/find_one/ComputedDataByKeyAndInputQueryHandler.py
|
parada3desu/foxy-key-broker
|
fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c
|
[
"Apache-2.0"
] | null | null | null |
src/contexts/kms/computed_data/application/find_one/ComputedDataByKeyAndInputQueryHandler.py
|
parada3desu/foxy-key-broker
|
fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c
|
[
"Apache-2.0"
] | null | null | null |
src/contexts/kms/computed_data/application/find_one/ComputedDataByKeyAndInputQueryHandler.py
|
parada3desu/foxy-key-broker
|
fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c
|
[
"Apache-2.0"
] | null | null | null |
from src.contexts.kms.computed_data.application.find_one.ComputedDataByKeyAndInputFinder import \
ComputedDataByKeyAndInputFinder
from src.contexts.kms.computed_data.application.find_one.ComputedDataByKeyAndInputQuery import \
ComputedDataByKeyAndInputQuery
from src.contexts.kms.computed_data.application.find_one.KmsComputedDataResponse import KmsComputedDataResponse
from src.contexts.kms.computed_data.domain.entities.ComputedDataInput import ComputedDataInput
from src.contexts.kms.computed_data.domain.entities.ComputedDataType import ComputedDataType
from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyId import CryptoKeyId
from src.contexts.shared.domain.QueryHandler import QueryHandler
| 50.538462
| 111
| 0.822679
|
from src.contexts.kms.computed_data.application.find_one.ComputedDataByKeyAndInputFinder import \
ComputedDataByKeyAndInputFinder
from src.contexts.kms.computed_data.application.find_one.ComputedDataByKeyAndInputQuery import \
ComputedDataByKeyAndInputQuery
from src.contexts.kms.computed_data.application.find_one.KmsComputedDataResponse import KmsComputedDataResponse
from src.contexts.kms.computed_data.domain.entities.ComputedDataInput import ComputedDataInput
from src.contexts.kms.computed_data.domain.entities.ComputedDataType import ComputedDataType
from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyId import CryptoKeyId
from src.contexts.shared.domain.QueryHandler import QueryHandler
class ComputedDataByKeyAndInputQueryHandler(QueryHandler):
_subscription: str = ComputedDataByKeyAndInputQuery.QUERY_TYPE
def __init__(self, finder: ComputedDataByKeyAndInputFinder):
self._finder = finder
def subscribed_to(self) -> str:
return self._subscription
async def handle(self, query: ComputedDataByKeyAndInputQuery) -> KmsComputedDataResponse:
crypto_key_id = CryptoKeyId(query.key_id)
input = ComputedDataInput(query.input)
cd_type = ComputedDataType(query.type)
return await self._finder.run(crypto_key_id, input, cd_type)
| 394
| 185
| 23
|
f7c7181676be11b5c14bd43d99798ed4f03ed80a
| 3,952
|
py
|
Python
|
tempest/tvaultconf.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | null | null | null |
tempest/tvaultconf.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | null | null | null |
tempest/tvaultconf.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | null | null | null |
import apscheduler
from apscheduler.schedulers.blocking import BlockingScheduler
#If you want to cleanup all test resources like vms, volumes, workloads then set
# following cleanup parameter value to True otherwise False
cleanup = True
# pre requisite paramter
pre_req = True
#Test results for reporting
PASS = "PASS"
FAIL = "FAIL"
enabled_tests = ["Attached_Volume_Ceph"]
#Id of workload type "parallel"
parallel="2ddd528d-c9b4-4d7e-8722-cc395140255a"
#Resources to use from file
#Please add your resources one on each line in files: tempest/tempest/vms_file, volumes_file, workloads_file
vms_from_file=False
volumes_from_file=False
workloads_from_file=False
#CLI configuration parameters
workload_type_id="f82ce76f-17fe-438b-aa37-7a023058e50d"
workload_name="clitest"
source_platform="openstack"
snapshot_name = "test-snapshot"
snapshot_type_full = "full"
restore_name = "test-oneclick-restore"
selective_restore_name = "test-selective-restore"
restore_filename = "/opt/restore.json"
vm_license_filename = "test_licenses/tvault_license_10VM.txt"
capacity_license_filename = "test_licenses/tvault_license_100TB.txt"
compute_license_filename = "test_licenses/tvault_license_10compute.txt"
invalid_license_filename = "test_licenses/tvault_license_invalid.txt"
expired_license_filename = "test_licenses/tvault_license_expired.txt"
workload_modify_name = "test2-new"
workload_modify_description = "test2-new-description"
restore_type = "restore"
global_job_scheduler=False
tvault_ip = "192.168.16.254"
tvault_dbusername = "root"
tvault_dbname = "workloadmgr"
tvault_password = "sample-password"
no_of_compute_nodes = 1
compute_node_ip = "192.168.16.75"
compute_node_username = "root"
compute_node_password = "Password1!"
# Scheduler parameter
interval="1 hrs"
interval_update = "7 hrs"
enabled='false'
retention_policy_type="Number of Snapshots to Keep"
retention_policy_type_update = "Number of days to retain Snapshots"
retention_policy_value="3"
retention_policy_value_update = "7"
schedule_report_file="scheduleReport.txt"
sched=BlockingScheduler()
count=0
No_of_Backup=1
# Scheduler policy parameters
policy_name="policy2"
policy_name_update = "policy_update"
fullbackup_interval="8"
fullbackup_interval_update = "7"
# test parameters
key_pair_name = "tempest_test_key_pair"
instance_username = "ubuntu"
snapshot_restore_name = "Tempest Test Restore"
restored_instance_flavor = 2
security_group_id = "baaae013-75d5-4821-806c-2cb259c95fb4"
security_group_name = "test_security"
flavor_name = "test_flavor"
config_yaml = {"compute": ["/etc/nova", "/var/lib/nova", "/var/log/nova"],
"glance": ["/etc/glance", "/var/lib/glance", "/var/log/glance"],
"keystone": ["/etc/keystone", "/var/lib/keystone", "/var/log/keystone"],
"cinder": ["/etc/cinder", "/var/lib/cinder", "/var/log/cinder"],
"neutron": ["/etc/neutron", "/var/lib/neutron"],
"swift": ["/etc/swift", "/var/log/swift/"],
"ceilometer": ["/etc/ceilometer", "/var/log/ceilometer/"],
"orchestration": ["/etc/heat/", "/var/log/heat/"]}
additional_dir = {"tvault-contego": ["/etc/tvault-contego/"]}
bootfromvol_vol_size = 4
volumes_parts = ["/dev/vdb", "/dev/vdc"]
recovery_flavor_ref = 3
recovery_image_ref = "cd056509-666b-41fa-9236-86f202b3e619"
#Email settings data
setting_data = {"smtp_default_recipient": "trilio.build@trilio.io",
"smtp_default_sender": "trilio.build@trilio.io",
"smtp_port": "587",
"smtp_server_name": "smtp.gmail.com",
"smtp_server_password": tvault_password,
"smtp_server_username": "trilio.build@trilio.io",
"smtp_timeout": "10" }
enable_email_notification = {"smtp_email_enable" : 1}
disable_email_notification = {"smtp_email_enable" : 0}
#Parameter for multiple vm workloads etc
vm_count = 8
| 34.973451
| 108
| 0.732287
|
import apscheduler
from apscheduler.schedulers.blocking import BlockingScheduler
#If you want to cleanup all test resources like vms, volumes, workloads then set
# following cleanup parameter value to True otherwise False
cleanup = True
# pre requisite paramter
pre_req = True
#Test results for reporting
PASS = "PASS"
FAIL = "FAIL"
enabled_tests = ["Attached_Volume_Ceph"]
#Id of workload type "parallel"
parallel="2ddd528d-c9b4-4d7e-8722-cc395140255a"
#Resources to use from file
#Please add your resources one on each line in files: tempest/tempest/vms_file, volumes_file, workloads_file
vms_from_file=False
volumes_from_file=False
workloads_from_file=False
#CLI configuration parameters
workload_type_id="f82ce76f-17fe-438b-aa37-7a023058e50d"
workload_name="clitest"
source_platform="openstack"
snapshot_name = "test-snapshot"
snapshot_type_full = "full"
restore_name = "test-oneclick-restore"
selective_restore_name = "test-selective-restore"
restore_filename = "/opt/restore.json"
vm_license_filename = "test_licenses/tvault_license_10VM.txt"
capacity_license_filename = "test_licenses/tvault_license_100TB.txt"
compute_license_filename = "test_licenses/tvault_license_10compute.txt"
invalid_license_filename = "test_licenses/tvault_license_invalid.txt"
expired_license_filename = "test_licenses/tvault_license_expired.txt"
workload_modify_name = "test2-new"
workload_modify_description = "test2-new-description"
restore_type = "restore"
global_job_scheduler=False
tvault_ip = "192.168.16.254"
tvault_dbusername = "root"
tvault_dbname = "workloadmgr"
tvault_password = "sample-password"
no_of_compute_nodes = 1
compute_node_ip = "192.168.16.75"
compute_node_username = "root"
compute_node_password = "Password1!"
# Scheduler parameter
interval="1 hrs"
interval_update = "7 hrs"
enabled='false'
retention_policy_type="Number of Snapshots to Keep"
retention_policy_type_update = "Number of days to retain Snapshots"
retention_policy_value="3"
retention_policy_value_update = "7"
schedule_report_file="scheduleReport.txt"
sched=BlockingScheduler()
count=0
No_of_Backup=1
# Scheduler policy parameters
policy_name="policy2"
policy_name_update = "policy_update"
fullbackup_interval="8"
fullbackup_interval_update = "7"
# test parameters
key_pair_name = "tempest_test_key_pair"
instance_username = "ubuntu"
snapshot_restore_name = "Tempest Test Restore"
restored_instance_flavor = 2
security_group_id = "baaae013-75d5-4821-806c-2cb259c95fb4"
security_group_name = "test_security"
flavor_name = "test_flavor"
config_yaml = {"compute": ["/etc/nova", "/var/lib/nova", "/var/log/nova"],
"glance": ["/etc/glance", "/var/lib/glance", "/var/log/glance"],
"keystone": ["/etc/keystone", "/var/lib/keystone", "/var/log/keystone"],
"cinder": ["/etc/cinder", "/var/lib/cinder", "/var/log/cinder"],
"neutron": ["/etc/neutron", "/var/lib/neutron"],
"swift": ["/etc/swift", "/var/log/swift/"],
"ceilometer": ["/etc/ceilometer", "/var/log/ceilometer/"],
"orchestration": ["/etc/heat/", "/var/log/heat/"]}
additional_dir = {"tvault-contego": ["/etc/tvault-contego/"]}
bootfromvol_vol_size = 4
volumes_parts = ["/dev/vdb", "/dev/vdc"]
recovery_flavor_ref = 3
recovery_image_ref = "cd056509-666b-41fa-9236-86f202b3e619"
#Email settings data
setting_data = {"smtp_default_recipient": "trilio.build@trilio.io",
"smtp_default_sender": "trilio.build@trilio.io",
"smtp_port": "587",
"smtp_server_name": "smtp.gmail.com",
"smtp_server_password": tvault_password,
"smtp_server_username": "trilio.build@trilio.io",
"smtp_timeout": "10" }
enable_email_notification = {"smtp_email_enable" : 1}
disable_email_notification = {"smtp_email_enable" : 0}
#Parameter for multiple vm workloads etc
vm_count = 8
| 0
| 0
| 0
|
ac8a05944dc625fce2bbd415e88698aa71441818
| 2,645
|
py
|
Python
|
Sketches/MPS/Old/basicswarm.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 12
|
2015-10-20T10:22:01.000Z
|
2021-07-19T10:09:44.000Z
|
Sketches/MPS/Old/basicswarm.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 2
|
2015-10-20T10:22:55.000Z
|
2017-02-13T11:05:25.000Z
|
Sketches/MPS/Old/basicswarm.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 6
|
2015-03-09T12:51:59.000Z
|
2020-03-01T13:06:21.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Builds a basic rooted graph in a simple swarm fashion.
#
z = peer()
a = peer()
b = peer()
c = peer()
d = peer()
e = peer()
f = peer()
g = peer()
h = peer()
a.join()
b.join()
c.join()
d.join()
e.join()
f.join()
g.join()
h.join()
for i in z,a,b,c,d,e,f,g,h:
print i
print """
The following should just have been displayed:
peer (ID=0, parentID=None, max=2, children=[1, 2])
peer (ID=1, parentID=0, max=2, children=[3, 4])
peer (ID=2, parentID=0, max=2, children=[5, 6])
peer (ID=3, parentID=1, max=2, children=[7, 8])
peer (ID=4, parentID=1, max=2, children=[])
peer (ID=5, parentID=2, max=2, children=[])
peer (ID=6, parentID=2, max=2, children=[])
peer (ID=7, parentID=3, max=2, children=[])
peer (ID=8, parentID=3, max=2, children=[])"""
| 30.056818
| 88
| 0.659357
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Builds a basic rooted graph in a simple swarm fashion.
#
class peer(object):
Peermax = 2
peers = []
def __init__(self, parent=None):
self.childrenIDs = []
self.parent = None
self.peermax = self.__class__.Peermax
self.__class__.peers.append(self)
self.id = len(self.__class__.peers)-1
def __repr__(self):
return "peer (ID=%s, parentID=%s, max=%s, children=%s)" % \
(str(self.id), str(self.parent), str(self.peermax), str(self.childrenIDs))
def accept(self, fromWhoID):
if fromWhoID in self.childrenIDs:
return True, self.parent, self.childrenIDs
if len(self.childrenIDs) < self.peermax:
self.childrenIDs.append(fromWhoID)
return True, self.parent, self.childrenIDs
else:
return False, self.parent, self.childrenIDs
def join(self, nodeID=0):
index, searchList, accepted, childrenIDs = -1, [], False, [0]
while not accepted:
searchList.extend(childrenIDs)
index = index + 1
accepted, parent, childrenIDs = peer.peers[index].accept(self.id)
self.parent = index
z = peer()
a = peer()
b = peer()
c = peer()
d = peer()
e = peer()
f = peer()
g = peer()
h = peer()
a.join()
b.join()
c.join()
d.join()
e.join()
f.join()
g.join()
h.join()
for i in z,a,b,c,d,e,f,g,h:
print i
print """
The following should just have been displayed:
peer (ID=0, parentID=None, max=2, children=[1, 2])
peer (ID=1, parentID=0, max=2, children=[3, 4])
peer (ID=2, parentID=0, max=2, children=[5, 6])
peer (ID=3, parentID=1, max=2, children=[7, 8])
peer (ID=4, parentID=1, max=2, children=[])
peer (ID=5, parentID=2, max=2, children=[])
peer (ID=6, parentID=2, max=2, children=[])
peer (ID=7, parentID=3, max=2, children=[])
peer (ID=8, parentID=3, max=2, children=[])"""
| 918
| 127
| 23
|
92e442443eaed77627b9c4c3c3bec21faca2a44c
| 678
|
py
|
Python
|
dart_fss/utils/dataframe.py
|
dveamer/dart-fss
|
1ea6b937f363d604a7da9c03686fba7f66707efa
|
[
"MIT"
] | 243
|
2019-04-19T09:05:32.000Z
|
2022-03-27T10:51:24.000Z
|
dart_fss/utils/dataframe.py
|
dveamer/dart-fss
|
1ea6b937f363d604a7da9c03686fba7f66707efa
|
[
"MIT"
] | 80
|
2019-04-20T06:37:44.000Z
|
2022-03-25T12:20:47.000Z
|
dart_fss/utils/dataframe.py
|
dveamer/dart-fss
|
1ea6b937f363d604a7da9c03686fba7f66707efa
|
[
"MIT"
] | 92
|
2019-04-18T06:19:52.000Z
|
2022-03-17T07:43:39.000Z
|
# -*- coding: utf-8 -*-
from pandas import DataFrame
from typing import List, Tuple
def dataframe_astype(df: DataFrame, columns: List[Tuple[str, type]]):
""" DataFrame Column Type converter
Parameters
----------
df: DataFrame
Pandas DataFrame
columns: list of tuple of str, type
column name and type for type conversion
Returns
-------
DataFrame
Pandas DataFrame
"""
for column, tp in columns:
if tp == int or tp == float:
df[column] = df[column].str.replace(',|-', '').astype(tp, errors='ignore')
else:
df[column] = df[column].astype(tp, errors='ignore')
return df
| 25.111111
| 86
| 0.588496
|
# -*- coding: utf-8 -*-
from pandas import DataFrame
from typing import List, Tuple
def dataframe_astype(df: DataFrame, columns: List[Tuple[str, type]]):
""" DataFrame Column Type converter
Parameters
----------
df: DataFrame
Pandas DataFrame
columns: list of tuple of str, type
column name and type for type conversion
Returns
-------
DataFrame
Pandas DataFrame
"""
for column, tp in columns:
if tp == int or tp == float:
df[column] = df[column].str.replace(',|-', '').astype(tp, errors='ignore')
else:
df[column] = df[column].astype(tp, errors='ignore')
return df
| 0
| 0
| 0
|
37d19ff1083bb0281e5b6217c9468d8302d267d7
| 20,094
|
py
|
Python
|
tests/unit/modules/test_timezone.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 5
|
2017-02-07T05:39:29.000Z
|
2020-06-13T02:07:33.000Z
|
tests/unit/modules/test_timezone.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
tests/unit/modules/test_timezone.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 11
|
2017-01-26T19:36:29.000Z
|
2021-12-11T07:54:16.000Z
|
# -*- coding: utf-8 -*-
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
from tempfile import NamedTemporaryFile
import os
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch,
mock_open
)
# Import Salt Libs
from salt.exceptions import CommandExecutionError, SaltInvocationError
import salt.modules.timezone as timezone
from salt.ext import six
import salt.utils.platform
import salt.utils.stringutils
GET_ZONE_FILE = 'salt.modules.timezone._get_zone_file'
GET_LOCALTIME_PATH = 'salt.modules.timezone._get_localtime_path'
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin):
'''
Timezone test case
'''
TEST_TZ = 'UTC'
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_centos(self):
'''
Test CentOS is recognized
:return:
'''
with patch.dict(timezone.__grains__, {'os': 'centos'}):
with patch('salt.modules.timezone._get_zone_etc_localtime', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_os_family_rh_suse(self):
'''
Test RedHat and Suse are recognized
:return:
'''
for osfamily in ['RedHat', 'Suse']:
with patch.dict(timezone.__grains__, {'os_family': [osfamily]}):
with patch('salt.modules.timezone._get_zone_sysconfig', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_os_family_debian_gentoo(self):
'''
Test Debian and Gentoo are recognized
:return:
'''
for osfamily in ['Debian', 'Gentoo']:
with patch.dict(timezone.__grains__, {'os_family': [osfamily]}):
with patch('salt.modules.timezone._get_zone_etc_timezone', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_os_family_allbsd_nilinuxrt(self):
'''
Test *BSD and NILinuxRT are recognized
:return:
'''
for osfamily in ['FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT']:
with patch.dict(timezone.__grains__, {'os_family': osfamily}):
with patch('salt.modules.timezone._get_zone_etc_localtime', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_os_family_slowlaris(self):
'''
Test Slowlaris is recognized
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Solaris']}):
with patch('salt.modules.timezone._get_zone_solaris', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_os_family_aix(self):
'''
Test IBM AIX is recognized
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['AIX']}):
with patch('salt.modules.timezone._get_zone_aix', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_set_zone_redhat(self):
'''
Test zone set on RH series
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['RedHat']}):
assert timezone.set_zone(self.TEST_TZ)
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="UTC"')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_set_zone_suse(self):
'''
Test zone set on SUSE series
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Suse']}):
assert timezone.set_zone(self.TEST_TZ)
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="UTC"')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_set_zone_gentoo(self):
'''
Test zone set on Gentoo series
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Gentoo']}):
with patch('salt.utils.files.fopen', mock_open()) as m_open:
assert timezone.set_zone(self.TEST_TZ)
fh_ = m_open.filehandles['/etc/timezone'][0]
assert fh_.call.args == ('/etc/timezone', 'w'), fh_.call.args
assert fh_.write_calls == ['UTC', '\n'], fh_.write_calls
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_set_zone_debian(self):
'''
Test zone set on Debian series
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Debian']}):
with patch('salt.utils.files.fopen', mock_open()) as m_open:
assert timezone.set_zone(self.TEST_TZ)
fh_ = m_open.filehandles['/etc/timezone'][0]
assert fh_.call.args == ('/etc/timezone', 'w'), fh_.call.args
assert fh_.write_calls == ['UTC', '\n'], fh_.write_calls
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=True))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_get_hwclock_timedate_utc(self):
'''
Test get hwclock UTC/localtime
:return:
'''
with patch('salt.modules.timezone._timedatectl', MagicMock(return_value={'stdout': 'rtc in local tz'})):
assert timezone.get_hwclock() == 'UTC'
with patch('salt.modules.timezone._timedatectl', MagicMock(return_value={'stdout': 'rtc in local tz:yes'})):
assert timezone.get_hwclock() == 'localtime'
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_get_hwclock_suse(self):
'''
Test get hwclock on SUSE
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Suse']}):
timezone.get_hwclock()
name, args, kwarg = timezone.__salt__['cmd.run'].mock_calls[0]
assert args == (['tail', '-n', '1', '/etc/adjtime'],)
assert kwarg == {'python_shell': False}
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_get_hwclock_redhat(self):
'''
Test get hwclock on RedHat
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['RedHat']}):
timezone.get_hwclock()
name, args, kwarg = timezone.__salt__['cmd.run'].mock_calls[0]
assert args == (['tail', '-n', '1', '/etc/adjtime'],)
assert kwarg == {'python_shell': False}
def _test_get_hwclock_debian(self): # TODO: Enable this when testing environment is working properly
'''
Test get hwclock on Debian
:return:
'''
with patch('salt.utils.path.which', MagicMock(return_value=False)):
with patch('os.path.exists', MagicMock(return_value=True)):
with patch('os.unlink', MagicMock()):
with patch('os.symlink', MagicMock()):
with patch.dict(timezone.__grains__, {'os_family': ['Debian']}):
timezone.get_hwclock()
name, args, kwarg = timezone.__salt__['cmd.run'].mock_calls[0]
assert args == (['tail', '-n', '1', '/etc/adjtime'],)
assert kwarg == {'python_shell': False}
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_get_hwclock_solaris(self):
'''
Test get hwclock on Solaris
:return:
'''
# Incomplete
with patch.dict(timezone.__grains__, {'os_family': ['Solaris']}):
assert timezone.get_hwclock() == 'UTC'
with patch('salt.utils.files.fopen', mock_open()):
assert timezone.get_hwclock() == 'localtime'
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_get_hwclock_aix(self):
'''
Test get hwclock on AIX
:return:
'''
# Incomplete
hwclock = 'localtime'
if not os.path.isfile('/etc/environment'):
hwclock = 'UTC'
with patch.dict(timezone.__grains__, {'os_family': ['AIX']}):
assert timezone.get_hwclock() == hwclock
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=True))
def test_set_hwclock_timedatectl(self):
'''
Test set hwclock with timedatectl
:return:
'''
timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['cmd.retcode'].mock_calls[0]
assert args == (['timedatectl', 'set-local-rtc', 'false'],)
timezone.set_hwclock('localtime')
name, args, kwargs = timezone.__salt__['cmd.retcode'].mock_calls[1]
assert args == (['timedatectl', 'set-local-rtc', 'true'],)
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_set_hwclock_aix_nilinuxrt(self):
'''
Test set hwclock on AIX and NILinuxRT
:return:
'''
for osfamily in ['AIX', 'NILinuxRT']:
with patch.dict(timezone.__grains__, {'os_family': osfamily}):
with self.assertRaises(SaltInvocationError):
assert timezone.set_hwclock('forty two')
assert timezone.set_hwclock('UTC')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_solaris(self):
'''
Test set hwclock on Solaris
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Solaris'],
'cpuarch': 'x86'}):
with self.assertRaises(SaltInvocationError):
assert timezone.set_hwclock('forty two')
assert timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['cmd.retcode'].mock_calls[0]
assert args == (['rtc', '-z', 'GMT'],)
assert kwargs == {'python_shell': False}
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_arch(self):
'''
Test set hwclock on arch
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Arch']}):
assert timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['cmd.retcode'].mock_calls[0]
assert args == (['timezonectl', 'set-local-rtc', 'false'],)
assert kwargs == {'python_shell': False}
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_redhat(self):
'''
Test set hwclock on RedHat
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['RedHat']}):
assert timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="TEST_TIMEZONE"')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_suse(self):
'''
Test set hwclock on SUSE
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Suse']}):
assert timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="TEST_TIMEZONE"')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_debian(self):
'''
Test set hwclock on Debian
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Debian']}):
assert timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
assert timezone.set_hwclock('localtime')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[1]
assert args == ('/etc/default/rcS', '^UTC=.*', 'UTC=no')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_gentoo(self):
'''
Test set hwclock on Gentoo
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Gentoo']}):
with self.assertRaises(SaltInvocationError):
timezone.set_hwclock('forty two')
timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/conf.d/hwclock', '^clock=.*', 'clock="UTC"')
timezone.set_hwclock('localtime')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[1]
assert args == ('/etc/conf.d/hwclock', '^clock=.*', 'clock="local"')
| 43.120172
| 116
| 0.62342
|
# -*- coding: utf-8 -*-
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
from tempfile import NamedTemporaryFile
import os
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch,
mock_open
)
# Import Salt Libs
from salt.exceptions import CommandExecutionError, SaltInvocationError
import salt.modules.timezone as timezone
from salt.ext import six
import salt.utils.platform
import salt.utils.stringutils
GET_ZONE_FILE = 'salt.modules.timezone._get_zone_file'
GET_LOCALTIME_PATH = 'salt.modules.timezone._get_localtime_path'
@skipIf(NO_MOCK, NO_MOCK_REASON)
class TimezoneTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {timezone: {'__grains__': {'os_family': 'Ubuntu'}}}
def setUp(self):
self.tempfiles = []
def tearDown(self):
for tempfile in self.tempfiles:
try:
os.remove(tempfile.name)
except OSError:
pass
del self.tempfiles
def test_zone_compare_equal(self):
etc_localtime = self.create_tempfile_with_contents('a')
zone_path = self.create_tempfile_with_contents('a')
with patch(GET_ZONE_FILE, lambda p: zone_path.name):
with patch(GET_LOCALTIME_PATH, lambda: etc_localtime.name):
self.assertTrue(timezone.zone_compare('foo'))
def test_zone_compare_nonexistent(self):
etc_localtime = self.create_tempfile_with_contents('a')
with patch(GET_ZONE_FILE, lambda p: '/foopath/nonexistent'):
with patch(GET_LOCALTIME_PATH, lambda: etc_localtime.name):
self.assertRaises(SaltInvocationError, timezone.zone_compare, 'foo')
def test_zone_compare_unequal(self):
etc_localtime = self.create_tempfile_with_contents('a')
zone_path = self.create_tempfile_with_contents('b')
with patch(GET_ZONE_FILE, lambda p: zone_path.name):
with patch(GET_LOCALTIME_PATH, lambda: etc_localtime.name):
self.assertFalse(timezone.zone_compare('foo'))
def test_missing_localtime(self):
with patch(GET_ZONE_FILE, lambda p: '/nonexisting'):
with patch(GET_LOCALTIME_PATH, lambda: '/also-missing'):
self.assertRaises(CommandExecutionError, timezone.zone_compare, 'foo')
def create_tempfile_with_contents(self, contents):
temp = NamedTemporaryFile(delete=False)
if six.PY3:
temp.write(salt.utils.stringutils.to_bytes(contents))
else:
temp.write(contents)
temp.close()
self.tempfiles.append(temp)
return temp
@skipIf(NO_MOCK, NO_MOCK_REASON)
class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin):
'''
Timezone test case
'''
TEST_TZ = 'UTC'
def setup_loader_modules(self):
return {timezone: {'__grains__': {'os': ''},
'__salt__': {'file.sed': MagicMock(),
'cmd.run': MagicMock(),
'cmd.retcode': MagicMock(return_value=0)}}}
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_centos(self):
'''
Test CentOS is recognized
:return:
'''
with patch.dict(timezone.__grains__, {'os': 'centos'}):
with patch('salt.modules.timezone._get_zone_etc_localtime', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_os_family_rh_suse(self):
'''
Test RedHat and Suse are recognized
:return:
'''
for osfamily in ['RedHat', 'Suse']:
with patch.dict(timezone.__grains__, {'os_family': [osfamily]}):
with patch('salt.modules.timezone._get_zone_sysconfig', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_os_family_debian_gentoo(self):
'''
Test Debian and Gentoo are recognized
:return:
'''
for osfamily in ['Debian', 'Gentoo']:
with patch.dict(timezone.__grains__, {'os_family': [osfamily]}):
with patch('salt.modules.timezone._get_zone_etc_timezone', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_os_family_allbsd_nilinuxrt(self):
'''
Test *BSD and NILinuxRT are recognized
:return:
'''
for osfamily in ['FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT']:
with patch.dict(timezone.__grains__, {'os_family': osfamily}):
with patch('salt.modules.timezone._get_zone_etc_localtime', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_os_family_slowlaris(self):
'''
Test Slowlaris is recognized
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Solaris']}):
with patch('salt.modules.timezone._get_zone_solaris', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@patch('salt.utils.path.which', MagicMock(return_value=False))
def test_get_zone_os_family_aix(self):
'''
Test IBM AIX is recognized
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['AIX']}):
with patch('salt.modules.timezone._get_zone_aix', MagicMock(return_value=self.TEST_TZ)):
assert timezone.get_zone() == self.TEST_TZ
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_set_zone_redhat(self):
'''
Test zone set on RH series
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['RedHat']}):
assert timezone.set_zone(self.TEST_TZ)
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="UTC"')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_set_zone_suse(self):
'''
Test zone set on SUSE series
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Suse']}):
assert timezone.set_zone(self.TEST_TZ)
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="UTC"')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_set_zone_gentoo(self):
'''
Test zone set on Gentoo series
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Gentoo']}):
with patch('salt.utils.files.fopen', mock_open()) as m_open:
assert timezone.set_zone(self.TEST_TZ)
fh_ = m_open.filehandles['/etc/timezone'][0]
assert fh_.call.args == ('/etc/timezone', 'w'), fh_.call.args
assert fh_.write_calls == ['UTC', '\n'], fh_.write_calls
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_set_zone_debian(self):
'''
Test zone set on Debian series
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Debian']}):
with patch('salt.utils.files.fopen', mock_open()) as m_open:
assert timezone.set_zone(self.TEST_TZ)
fh_ = m_open.filehandles['/etc/timezone'][0]
assert fh_.call.args == ('/etc/timezone', 'w'), fh_.call.args
assert fh_.write_calls == ['UTC', '\n'], fh_.write_calls
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=True))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_get_hwclock_timedate_utc(self):
'''
Test get hwclock UTC/localtime
:return:
'''
with patch('salt.modules.timezone._timedatectl', MagicMock(return_value={'stdout': 'rtc in local tz'})):
assert timezone.get_hwclock() == 'UTC'
with patch('salt.modules.timezone._timedatectl', MagicMock(return_value={'stdout': 'rtc in local tz:yes'})):
assert timezone.get_hwclock() == 'localtime'
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_get_hwclock_suse(self):
'''
Test get hwclock on SUSE
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Suse']}):
timezone.get_hwclock()
name, args, kwarg = timezone.__salt__['cmd.run'].mock_calls[0]
assert args == (['tail', '-n', '1', '/etc/adjtime'],)
assert kwarg == {'python_shell': False}
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_get_hwclock_redhat(self):
'''
Test get hwclock on RedHat
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['RedHat']}):
timezone.get_hwclock()
name, args, kwarg = timezone.__salt__['cmd.run'].mock_calls[0]
assert args == (['tail', '-n', '1', '/etc/adjtime'],)
assert kwarg == {'python_shell': False}
def _test_get_hwclock_debian(self): # TODO: Enable this when testing environment is working properly
'''
Test get hwclock on Debian
:return:
'''
with patch('salt.utils.path.which', MagicMock(return_value=False)):
with patch('os.path.exists', MagicMock(return_value=True)):
with patch('os.unlink', MagicMock()):
with patch('os.symlink', MagicMock()):
with patch.dict(timezone.__grains__, {'os_family': ['Debian']}):
timezone.get_hwclock()
name, args, kwarg = timezone.__salt__['cmd.run'].mock_calls[0]
assert args == (['tail', '-n', '1', '/etc/adjtime'],)
assert kwarg == {'python_shell': False}
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_get_hwclock_solaris(self):
'''
Test get hwclock on Solaris
:return:
'''
# Incomplete
with patch.dict(timezone.__grains__, {'os_family': ['Solaris']}):
assert timezone.get_hwclock() == 'UTC'
with patch('salt.utils.files.fopen', mock_open()):
assert timezone.get_hwclock() == 'localtime'
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_get_hwclock_aix(self):
'''
Test get hwclock on AIX
:return:
'''
# Incomplete
hwclock = 'localtime'
if not os.path.isfile('/etc/environment'):
hwclock = 'UTC'
with patch.dict(timezone.__grains__, {'os_family': ['AIX']}):
assert timezone.get_hwclock() == hwclock
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=True))
def test_set_hwclock_timedatectl(self):
'''
Test set hwclock with timedatectl
:return:
'''
timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['cmd.retcode'].mock_calls[0]
assert args == (['timedatectl', 'set-local-rtc', 'false'],)
timezone.set_hwclock('localtime')
name, args, kwargs = timezone.__salt__['cmd.retcode'].mock_calls[1]
assert args == (['timedatectl', 'set-local-rtc', 'true'],)
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
def test_set_hwclock_aix_nilinuxrt(self):
'''
Test set hwclock on AIX and NILinuxRT
:return:
'''
for osfamily in ['AIX', 'NILinuxRT']:
with patch.dict(timezone.__grains__, {'os_family': osfamily}):
with self.assertRaises(SaltInvocationError):
assert timezone.set_hwclock('forty two')
assert timezone.set_hwclock('UTC')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_solaris(self):
'''
Test set hwclock on Solaris
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Solaris'],
'cpuarch': 'x86'}):
with self.assertRaises(SaltInvocationError):
assert timezone.set_hwclock('forty two')
assert timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['cmd.retcode'].mock_calls[0]
assert args == (['rtc', '-z', 'GMT'],)
assert kwargs == {'python_shell': False}
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_arch(self):
'''
Test set hwclock on arch
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Arch']}):
assert timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['cmd.retcode'].mock_calls[0]
assert args == (['timezonectl', 'set-local-rtc', 'false'],)
assert kwargs == {'python_shell': False}
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_redhat(self):
'''
Test set hwclock on RedHat
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['RedHat']}):
assert timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="TEST_TIMEZONE"')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_suse(self):
'''
Test set hwclock on SUSE
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Suse']}):
assert timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="TEST_TIMEZONE"')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_debian(self):
'''
Test set hwclock on Debian
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Debian']}):
assert timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
assert timezone.set_hwclock('localtime')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[1]
assert args == ('/etc/default/rcS', '^UTC=.*', 'UTC=no')
@skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows')
@patch('salt.utils.path.which', MagicMock(return_value=False))
@patch('os.path.exists', MagicMock(return_value=True))
@patch('os.unlink', MagicMock())
@patch('os.symlink', MagicMock())
@patch('salt.modules.timezone.get_zone', MagicMock(return_value='TEST_TIMEZONE'))
def test_set_hwclock_gentoo(self):
'''
Test set hwclock on Gentoo
:return:
'''
with patch.dict(timezone.__grains__, {'os_family': ['Gentoo']}):
with self.assertRaises(SaltInvocationError):
timezone.set_hwclock('forty two')
timezone.set_hwclock('UTC')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0]
assert args == ('/etc/conf.d/hwclock', '^clock=.*', 'clock="UTC"')
timezone.set_hwclock('localtime')
name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[1]
assert args == ('/etc/conf.d/hwclock', '^clock=.*', 'clock="local"')
| 2,046
| 35
| 265
|
60641ef4e63b02e80e53c98c3ee1677bdfba26a8
| 2,308
|
py
|
Python
|
handwriting_classifier.py
|
ngp111/digit_classification_keras
|
65d6b72d6d38bc257c5cde55d92d2210865f74d6
|
[
"MIT"
] | null | null | null |
handwriting_classifier.py
|
ngp111/digit_classification_keras
|
65d6b72d6d38bc257c5cde55d92d2210865f74d6
|
[
"MIT"
] | null | null | null |
handwriting_classifier.py
|
ngp111/digit_classification_keras
|
65d6b72d6d38bc257c5cde55d92d2210865f74d6
|
[
"MIT"
] | null | null | null |
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten
import matplotlib.pyplot as plt
import numpy as np
import random
pixel_width = 28
pixel_height = 28
no_of_classes = 10
batch_size = 32
epochs = 10
(features_train, labels_train), (features_test, labels_test) = mnist.load_data()
features_train = features_train.reshape(features_train.shape[0], pixel_width, pixel_height, 1)
features_test = features_test.reshape(features_test.shape[0], pixel_width, pixel_height, 1)
input_shape = (pixel_width, pixel_height, 1)
features_train = features_train.astype('float32')
features_test = features_test.astype('float32')
features_train /= 255
features_test /= 255
labels_train = keras.utils.to_categorical(labels_train, no_of_classes)
labels_test = keras.utils.to_categorical(labels_test, no_of_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation = 'relu', input_shape = input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(no_of_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(features_train, labels_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(features_test, labels_test))
score = model.evaluate(features_test, labels_test, verbose=0)
predictions = model.predict(features_test)
prediction_digits = np.argmax(predictions, axis=1)
plt.figure(figsize=(18, 18))
for i in range(100):
ax = plt.subplot(10, 10, i+1)
plt.xticks([])
plt.xticks([])
plt.yticks([])
plt.grid(False)
image_index = random.randint(0, len(prediction_digits))
plt.imshow(np.squeeze(features_test[image_index]), cmap=plt.cm.gray)
ax.xaxis.label.set_color(get_label_color(prediction_digits[image_index],
np.argmax(labels_test[image_index])))
#print(image_index)
plt.xlabel('Predicted: %d' % prediction_digits[image_index])
plt.show()
| 31.189189
| 94
| 0.737002
|
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten
import matplotlib.pyplot as plt
import numpy as np
import random
def get_label_color(val1, val2):
if val1 == val2:
return 'black'
else:
return 'red'
pixel_width = 28
pixel_height = 28
no_of_classes = 10
batch_size = 32
epochs = 10
(features_train, labels_train), (features_test, labels_test) = mnist.load_data()
features_train = features_train.reshape(features_train.shape[0], pixel_width, pixel_height, 1)
features_test = features_test.reshape(features_test.shape[0], pixel_width, pixel_height, 1)
input_shape = (pixel_width, pixel_height, 1)
features_train = features_train.astype('float32')
features_test = features_test.astype('float32')
features_train /= 255
features_test /= 255
labels_train = keras.utils.to_categorical(labels_train, no_of_classes)
labels_test = keras.utils.to_categorical(labels_test, no_of_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation = 'relu', input_shape = input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(no_of_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(features_train, labels_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(features_test, labels_test))
score = model.evaluate(features_test, labels_test, verbose=0)
predictions = model.predict(features_test)
prediction_digits = np.argmax(predictions, axis=1)
plt.figure(figsize=(18, 18))
for i in range(100):
ax = plt.subplot(10, 10, i+1)
plt.xticks([])
plt.xticks([])
plt.yticks([])
plt.grid(False)
image_index = random.randint(0, len(prediction_digits))
plt.imshow(np.squeeze(features_test[image_index]), cmap=plt.cm.gray)
ax.xaxis.label.set_color(get_label_color(prediction_digits[image_index],
np.argmax(labels_test[image_index])))
#print(image_index)
plt.xlabel('Predicted: %d' % prediction_digits[image_index])
plt.show()
| 74
| 0
| 23
|
078a6fe226b1f79db248bb1c1845407854fdbc98
| 673
|
py
|
Python
|
hw/hw10/tests/q2_1.py
|
ds-modules/Colab-demo
|
cccaff13633f8a5ec697cd4aeca9087f2feec2e4
|
[
"BSD-3-Clause"
] | null | null | null |
hw/hw10/tests/q2_1.py
|
ds-modules/Colab-demo
|
cccaff13633f8a5ec697cd4aeca9087f2feec2e4
|
[
"BSD-3-Clause"
] | null | null | null |
hw/hw10/tests/q2_1.py
|
ds-modules/Colab-demo
|
cccaff13633f8a5ec697cd4aeca9087f2feec2e4
|
[
"BSD-3-Clause"
] | null | null | null |
test = { 'name': 'q2_1',
'points': 1,
'suites': [ { 'cases': [ {'code': '>>> # Make sure you assigned `binary options` to an array;\n>>> type(binary_options) == np.ndarray\nTrue', 'hidden': False, 'locked': False},
{ 'code': '>>> # Should be a two element array of a binary distribution;\n>>> sorted(set(binary_options)) == sorted(set([0, 1]))\nTrue',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| 61.181818
| 186
| 0.408618
|
test = { 'name': 'q2_1',
'points': 1,
'suites': [ { 'cases': [ {'code': '>>> # Make sure you assigned `binary options` to an array;\n>>> type(binary_options) == np.ndarray\nTrue', 'hidden': False, 'locked': False},
{ 'code': '>>> # Should be a two element array of a binary distribution;\n>>> sorted(set(binary_options)) == sorted(set([0, 1]))\nTrue',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| 0
| 0
| 0
|
9df1ec0fb4cc64dcc907cbc11b4596d5cfc19976
| 383
|
py
|
Python
|
BFS_Iterative.py
|
FER-NASP/AdvancedAlgorithms
|
ce09c50b9d02fac53a09f6f0d0d099fe87aa7354
|
[
"MIT"
] | 1
|
2021-12-24T19:30:13.000Z
|
2021-12-24T19:30:13.000Z
|
BFS_Iterative.py
|
FER-NASP/AdvancedAlgorithms
|
ce09c50b9d02fac53a09f6f0d0d099fe87aa7354
|
[
"MIT"
] | null | null | null |
BFS_Iterative.py
|
FER-NASP/AdvancedAlgorithms
|
ce09c50b9d02fac53a09f6f0d0d099fe87aa7354
|
[
"MIT"
] | null | null | null |
import collections
| 20.157895
| 37
| 0.386423
|
import collections
def BFS(G):
vis=[]
Q = collections.deque()
for u0 in G:
if u0 not in vis:
Q.append(u0)
vis.append(u0)
while Q:
u=Q.popleft()
for v in G[u]['adj']:
if v not in vis:
vis.append(v)
Q.append(v)
return vis
| 339
| 0
| 23
|
3319b793d86371d50aabd94af3fdcfc7754f0115
| 3,646
|
py
|
Python
|
losses/LSGANLoss.py
|
NoelShin/LIT
|
ac08254c6ef2d29f5bb823d79f613b355f286953
|
[
"MIT"
] | 1
|
2019-01-23T07:44:47.000Z
|
2019-01-23T07:44:47.000Z
|
losses/LSGANLoss.py
|
NoelShin/LIT
|
ac08254c6ef2d29f5bb823d79f613b355f286953
|
[
"MIT"
] | null | null | null |
losses/LSGANLoss.py
|
NoelShin/LIT
|
ac08254c6ef2d29f5bb823d79f613b355f286953
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from .base_loss import Loss
| 40.065934
| 120
| 0.590236
|
import torch
import torch.nn as nn
from .base_loss import Loss
class LSGANLoss(Loss):
def __init__(self, opt):
super(LSGANLoss, self).__init__(opt)
self.criterion = nn.MSELoss()
def get_grid(self, tensor, is_real=False):
grid = torch.FloatTensor(tensor.shape).fill_(1.0 if is_real else 0.0)
grid = grid.cuda(0) if self.USE_CUDA else grid
return grid
def __call__(self, C, G, data_dict, level=None, level_in=None):
loss_C = 0
loss_G = 0
package = dict()
input, target = data_dict['input_tensor'], data_dict['target_tensor']
fake = G(x=input, level=level, level_in=level_in) if self.progression else G(input)
if self.condition and self.progression:
input = data_dict['C_input_tensor']
input_fake, input_real = torch.cat([input, fake.detach()], dim=1), torch.cat([input, target], dim=1)
elif self.condition and not self.progression:
input_fake, input_real = torch.cat([input, fake.detach()], dim=1), torch.cat([input, target], dim=1)
else:
input_fake, input_real = fake.detach(), target
fake_features = C(input_fake, level, level_in) if self.progression else C(input_fake)
real_features = C(input_real, level, level_in) if self.progression else C(input_real)
C_score = 0
for i in range(self.n_C):
fake_grid, real_grid = self.get_grid(fake_features[i][-1]), self.get_grid(real_features[i][-1], True)
C_score += self.criterion(real_features[i][-1], real_grid) + self.criterion(fake_features[i][-1], fake_grid)
C_score *= 0.5
loss_C += C_score
if self.CT:
real_features_2 = C(input_real)
CT = self.CT_lambda * self.calc_CT(real_features, real_features_2)
loss_C += CT
package.update({'CT': CT.detach().item()})
else:
package.update({'CT': 0.0})
if self.GP:
GP = self.calc_GP(C, output=input_fake.detach(), target=input_real.detach())
loss_C += self.GP_lambda * GP
package.update({'GP': GP.detach().item()})
else:
package.update({'GP': 0.0})
input_fake = torch.cat([input, fake], dim=1) if self.condition else fake
fake_features = C(input_fake, level, level_in) if self.progression else C(input_fake)
G_score = 0
for i in range(self.n_C):
real_grid = self.get_grid(fake_features[i][-1], True)
G_score += self.criterion(fake_features[i][-1], real_grid)
loss_G += G_score
if self.FM:
FM = 0
n_layers = len(fake_features[0])
for j in range(n_layers):
FM += self.FM_criterion(fake_features[i][j], real_features[i][j].detach())
loss_G += FM * self.FM_lambda / self.n_C
package.update({'FM': FM.detach().item() / self.n_C})
else:
package.update({'FM': 0.0})
if self.VGG:
VGG = 0
VGG += self.calc_FM(self.VGGNet(fake), self.VGGNet(target), weights=self.VGG_weights)
loss_G += self.VGG_lambda * VGG
package.update({'VGG': VGG.detach().item()})
else:
package.update({'VGG': 0.0})
package.update({'A_score': C_score.detach().item(), 'G_score': G_score.detach().item(), 'total_A_loss': loss_C,
'total_G_loss': loss_G, 'generated_tensor': fake.detach(), 'A_state_dict': C.state_dict(),
'G_state_dict': G.state_dict(), 'target_tensor': target})
return package
| 3,478
| 1
| 103
|
a423a6f16d3d1cb1f60c93cd7d5e53d9405dd725
| 141
|
py
|
Python
|
autoflow/opt/iterations/__init__.py
|
auto-flow/autoflow
|
f5903424ad8694d57741a0bd6dfeaba320ea6517
|
[
"BSD-3-Clause"
] | 49
|
2020-04-16T11:17:28.000Z
|
2020-05-06T01:32:44.000Z
|
autoflow/opt/iterations/__init__.py
|
auto-flow/autoflow
|
f5903424ad8694d57741a0bd6dfeaba320ea6517
|
[
"BSD-3-Clause"
] | null | null | null |
autoflow/opt/iterations/__init__.py
|
auto-flow/autoflow
|
f5903424ad8694d57741a0bd6dfeaba320ea6517
|
[
"BSD-3-Clause"
] | 3
|
2020-04-17T00:53:24.000Z
|
2020-04-23T03:04:26.000Z
|
from .successivehalving import SuccessiveHalving
from .base import WarmStartIteration
from .successiveresampling import SuccessiveResampling
| 35.25
| 54
| 0.893617
|
from .successivehalving import SuccessiveHalving
from .base import WarmStartIteration
from .successiveresampling import SuccessiveResampling
| 0
| 0
| 0
|
2faadc4f2c2569527fcf4fe1bfa31ec9ba643355
| 2,278
|
py
|
Python
|
biblio/mywebsites.py
|
lokal-profil/isfdb_site
|
0ce20d6347849926d4eda961ea9249c31519eea5
|
[
"BSD-3-Clause"
] | null | null | null |
biblio/mywebsites.py
|
lokal-profil/isfdb_site
|
0ce20d6347849926d4eda961ea9249c31519eea5
|
[
"BSD-3-Clause"
] | null | null | null |
biblio/mywebsites.py
|
lokal-profil/isfdb_site
|
0ce20d6347849926d4eda961ea9249c31519eea5
|
[
"BSD-3-Clause"
] | null | null | null |
#!_PYTHONLOC
#
# (C) COPYRIGHT 2009-2019 Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
import string
import sys
import MySQLdb
from isfdb import *
from common import *
from login import *
from SQLparsing import *
if __name__ == '__main__':
PrintHeader("My Web Sites")
PrintNavbar('mywebsites', 0, 0, 'mywebsites.cgi', 0)
(myID, username, usertoken) = GetUserData()
myID = int(myID)
if not myID:
print 'You must be logged in to modify your list of preferred Web sites'
sys.exit(0)
PrintTrailer('mywebsites', 0, 0)
#Get a list of currently defined Web sites
query = "select site_id, site_name from websites order by site_name"
db.query(query)
result = db.store_result()
row = result.fetch_row()
websites = []
while row:
websites.append(row[0])
row = result.fetch_row()
# Get the currently defined site preferences for the logged-in user
query = "select site_id,user_choice from user_sites where user_id='%d'" % (myID)
db.query(query)
result = db.store_result()
row = result.fetch_row()
user_sites = []
while row:
user_sites.append(row[0])
row = result.fetch_row()
print '<h3>Select Web Sites to link Publications to. At least one Amazon site needs to be selected since ISFDB links to Amazon-hosted images.</h3>'
print '<form id="data" METHOD="POST" ACTION="/cgi-bin/submitmywebsites.cgi">'
print '<ul>'
for website in websites:
checked = 'checked'
for user_site in user_sites:
if user_site[0] == website[0]:
if user_site[1] == 0:
checked = ''
break
print '<li><input type="checkbox" name="site_choice.%s" value="on" %s>%s ' % (website[0], checked, website[1])
print '<input name="site_id.%d" value="%s" type="HIDDEN"></li>' % (website[0], website[1])
print '</ul>'
print '<p>'
print '<input type="SUBMIT" value="Update List of Web Sites">'
print '</form>'
PrintTrailer('mywebsites', 0, 0)
| 31.638889
| 155
| 0.610184
|
#!_PYTHONLOC
#
# (C) COPYRIGHT 2009-2019 Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
import string
import sys
import MySQLdb
from isfdb import *
from common import *
from login import *
from SQLparsing import *
if __name__ == '__main__':
PrintHeader("My Web Sites")
PrintNavbar('mywebsites', 0, 0, 'mywebsites.cgi', 0)
(myID, username, usertoken) = GetUserData()
myID = int(myID)
if not myID:
print 'You must be logged in to modify your list of preferred Web sites'
sys.exit(0)
PrintTrailer('mywebsites', 0, 0)
#Get a list of currently defined Web sites
query = "select site_id, site_name from websites order by site_name"
db.query(query)
result = db.store_result()
row = result.fetch_row()
websites = []
while row:
websites.append(row[0])
row = result.fetch_row()
# Get the currently defined site preferences for the logged-in user
query = "select site_id,user_choice from user_sites where user_id='%d'" % (myID)
db.query(query)
result = db.store_result()
row = result.fetch_row()
user_sites = []
while row:
user_sites.append(row[0])
row = result.fetch_row()
print '<h3>Select Web Sites to link Publications to. At least one Amazon site needs to be selected since ISFDB links to Amazon-hosted images.</h3>'
print '<form id="data" METHOD="POST" ACTION="/cgi-bin/submitmywebsites.cgi">'
print '<ul>'
for website in websites:
checked = 'checked'
for user_site in user_sites:
if user_site[0] == website[0]:
if user_site[1] == 0:
checked = ''
break
print '<li><input type="checkbox" name="site_choice.%s" value="on" %s>%s ' % (website[0], checked, website[1])
print '<input name="site_id.%d" value="%s" type="HIDDEN"></li>' % (website[0], website[1])
print '</ul>'
print '<p>'
print '<input type="SUBMIT" value="Update List of Web Sites">'
print '</form>'
PrintTrailer('mywebsites', 0, 0)
| 0
| 0
| 0
|
abc1662bafe8b88242e1d6f85d310846facbde58
| 5,427
|
py
|
Python
|
wpy/ArtifactGenerator.py
|
dulichan/wpython
|
0e8319dd0d9e8d8cb362a6a373a07bc8c2ff5e81
|
[
"WTFPL"
] | null | null | null |
wpy/ArtifactGenerator.py
|
dulichan/wpython
|
0e8319dd0d9e8d8cb362a6a373a07bc8c2ff5e81
|
[
"WTFPL"
] | null | null | null |
wpy/ArtifactGenerator.py
|
dulichan/wpython
|
0e8319dd0d9e8d8cb362a6a373a07bc8c2ff5e81
|
[
"WTFPL"
] | null | null | null |
from pybars import Compiler
import lxml.etree as etree
import collections
from . import Artifact
import os
class ArtifactGenerator(object):
"""Class used to generate artifacts for WSO2"""
"""Generate the artifact based on the passed template location and data"""
def merge(self, a, b, path=None):
"deep merge dictionary"
if path is None: path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
self.merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
#raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
pass
else:
a[key] = b[key]
return a
def get_filepaths(self, directory, function):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths = function(filepath, filename, file_paths)
# Add it to the list.
return file_paths # Self-explanatory.
def generateArtifact(self, data, directory):
""" Pass in the generic parameters for the pom file. The method will read the synapse
directory and create the resources dictionary """
synapse_directory = directory
fileList = self.get_filepaths(synapse_directory, synapse_config)
resources = []
for fileObj in fileList:
resources.append({'type': fileObj['fileESBType'], 'fileExtension': fileObj['fileType'], 'resourceName': fileObj['fileName'].split(".")[0]})
#print resources
data['resources'] = resources
artifactArtiObj = self.generate(data, "templates/artifact.hbs")
return artifactArtiObj
#registry_directory = directory + "/gateway-registry/"
#def registry_config(filepath, filename, file_paths):
#typeName = filepath.split("/")
#print typeName
# if typeName[13]=="synapse-config":
# if len(typeName) == 16:
# typeName = typeName[14]
# file_paths.append({'filePath': filepath, 'fileName': filename, 'type': typeName})
#return file_paths
#fileList = self.get_filepaths(registry_directory, registry_config)
def generateCarPom(self, data, directory):
""" Pass in the generic parameters for the pom file. The method will read the synapse
directory and create the resources dictionary """
synapse_directory = directory
fileList = self.get_filepaths(synapse_directory, synapse_config)
resources = []
for fileObj in fileList:
resources.append({'type': fileObj['fileESBType'], 'fileExtension': fileObj['fileType'], 'resourceName': fileObj['fileName'].split(".")[0]})
data['resources'] = resources
print data
artifactCarObj = self.generate(data, "templates/car_pom.hbs")
return artifactCarObj
#registry_directory = directory + "/dev-registry/"
#def registry_config(filepath, filename, file_paths):
#typeName = filepath.split("/")
#print typeName
# if typeName[13]=="synapse-config":
# if len(typeName) == 16:
# typeName = typeName[14]
# file_paths.append({'filePath': filepath, 'fileName': filename, 'type': typeName})
#return file_paths
#fileList = self.get_filepaths(registry_directory, registry_config)
| 36.918367
| 142
| 0.676617
|
from pybars import Compiler
import lxml.etree as etree
import collections
from . import Artifact
import os
class ArtifactGenerator(object):
"""Class used to generate artifacts for WSO2"""
def __init__(self):
self.artifactList = []
pass
"""Generate the artifact based on the passed template location and data"""
def generate(self, data, templateLocation):
compiler = Compiler()
source = open(templateLocation).read().decode('utf-8')
template = compiler.compile(source)
output = template(data)
parser = etree.XMLParser(remove_blank_text=True)
root = etree.XML(output, parser)
xml_output = etree.tostring(root, pretty_print = True, xml_declaration = True, encoding='UTF-8')
artifactObj = Artifact(xml_output)
return artifactObj
def merge(self, a, b, path=None):
"deep merge dictionary"
if path is None: path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
self.merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
#raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
pass
else:
a[key] = b[key]
return a
def generateEndpoint(self, data):
if data["type"]=="address":
defaultData = {
'address_props': {
'statistics': "false",
'traceFlag': "false"
}
}
data = self.merge(data, defaultData)
artifactObj = self.generate(data, "templates/endpoint.hbs")
return artifactObj
def generateProxy(self, data):
artifactObj = self.generate(data, "templates/proxy.hbs")
return artifactObj
def get_filepaths(self, directory, function):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths = function(filepath, filename, file_paths)
# Add it to the list.
return file_paths # Self-explanatory.
def generateArtifact(self, data, directory):
""" Pass in the generic parameters for the pom file. The method will read the synapse
directory and create the resources dictionary """
synapse_directory = directory
def synapse_config(filepath, filename, file_paths):
typeName = os.path.splitext(filepath)[1]
fileESBType = filepath.split("/")[-2][:-1]
if typeName == ".xml":
file_paths.append({'filePath': filepath, 'fileName': filename, 'fileType': typeName[1:], 'fileESBType': fileESBType})
return file_paths
fileList = self.get_filepaths(synapse_directory, synapse_config)
resources = []
for fileObj in fileList:
resources.append({'type': fileObj['fileESBType'], 'fileExtension': fileObj['fileType'], 'resourceName': fileObj['fileName'].split(".")[0]})
#print resources
data['resources'] = resources
artifactArtiObj = self.generate(data, "templates/artifact.hbs")
return artifactArtiObj
#registry_directory = directory + "/gateway-registry/"
#def registry_config(filepath, filename, file_paths):
#typeName = filepath.split("/")
#print typeName
# if typeName[13]=="synapse-config":
# if len(typeName) == 16:
# typeName = typeName[14]
# file_paths.append({'filePath': filepath, 'fileName': filename, 'type': typeName})
#return file_paths
#fileList = self.get_filepaths(registry_directory, registry_config)
def generateCarPom(self, data, directory):
""" Pass in the generic parameters for the pom file. The method will read the synapse
directory and create the resources dictionary """
synapse_directory = directory
def synapse_config(filepath, filename, file_paths):
typeName = os.path.splitext(filepath)[1]
fileESBType = filepath.split("/")[-2][:-1]
if typeName == ".xml":
file_paths.append({'filePath': filepath, 'fileName': filename, 'fileType': typeName[1:], 'fileESBType': fileESBType})
return file_paths
fileList = self.get_filepaths(synapse_directory, synapse_config)
resources = []
for fileObj in fileList:
resources.append({'type': fileObj['fileESBType'], 'fileExtension': fileObj['fileType'], 'resourceName': fileObj['fileName'].split(".")[0]})
data['resources'] = resources
print data
artifactCarObj = self.generate(data, "templates/car_pom.hbs")
return artifactCarObj
#registry_directory = directory + "/dev-registry/"
#def registry_config(filepath, filename, file_paths):
#typeName = filepath.split("/")
#print typeName
# if typeName[13]=="synapse-config":
# if len(typeName) == 16:
# typeName = typeName[14]
# file_paths.append({'filePath': filepath, 'fileName': filename, 'type': typeName})
#return file_paths
#fileList = self.get_filepaths(registry_directory, registry_config)
def hold(self, artifact):
self.artifactList.append(artifact)
def saveList(self, location):
for (artifact) in self.artifactList:
artifact.save(location)
| 1,518
| 0
| 196
|
a0cae71e1c74c50e189d22060e5db50461098c6f
| 4,209
|
py
|
Python
|
early_projects/defi.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
early_projects/defi.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
early_projects/defi.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
from math import pi
import os
import time
# sum([n, n, n...]) adds any number of variables
# print (subtract(8, 3))
# print (multiply(5, 3))
# print (double(7))
# print (triple(5))
# print (divide(8, 4))
# print (half(4))
# print (celsius_conv(94))
c_c = celsius_conv
# print (fahrenheit_conv(49))
# print (p_t(3, 4))
# print (p_t2(5, 3))
# print (square(5))
# print (square_root(25))
# print (cube(4))
# print (f_y(1))
f_y = feet_to_yards
# print (i_c(10))
i_c = inches_to_centi
# print (i_f(11))
i_f = inches_to_feet
# print (f_i(30))
f_i = feet_to_inches
# print (blah(8))
# print (convert_mileage(90))
# print (liters_needed(50, 30))
def pie_perc(n):
"""precondition: n > 0
Assuming n people want to eat a pie,
return the percentage each person gets to eat."""
return int(100 / n)
def average_of_best_3(a, b, c, d): # gives the average of the highest 3
"""Use numbers between 0 and 100"""
first = min(a, b, c, d)
second = (a + b + c + d - first)
third = second / 3
return third
circum = circumference
# circum(r) = circumference(r)
| 19.307339
| 75
| 0.619625
|
from math import pi
import os
import time
# sum([n, n, n...]) adds any number of variables
def add(a, b):
return a + b
def subtract(a, b): # subtracts one number from another
return a - b
# print (subtract(8, 3))
def multiply(a, b): # multiplies two numbers
return a * b
# print (multiply(5, 3))
def double(n): # doubles a number
return n * 2
# print (double(7))
def triple(n): # triples a number
return n * 3
# print (triple(5))
def divide(a, b): # divides one number by another
return a / b
# print (divide(8, 4))
def half(n): # deduces half of a number
return n / 2
# print (half(4))
def celsius_conv(f): # takes a temperature in degF and converts to degC
if f == 0:
return -17.7778
else:
return (f - 32.0) * (5.0 / 9.0)
# print (celsius_conv(94))
c_c = celsius_conv
def fahrenheit_conv(c): # takes a temp in degC and converts to degF
if c == 0:
return 32
else:
return c * (9.0 / 5.0) + 32
# print (fahrenheit_conv(49))
def p_t(a, b): # uses the Pythag. theorem to deduce what C is
return ((a * a + b * b)**(1 / 2))
# print (p_t(3, 4))
def p_t2(c, a): # uses the Pythag. theorem to deduce what A or B is
return ((c * c - a * a)**(1 / 2))
# print (p_t2(5, 3))
def square(c): # squares a number
return (c * c)
# print (square(5))
def square_root(n): # gives the square root of a number
return (n**(1 / 2))
# print (square_root(25))
def cube(c): # cubes a number
return (c**3)
# print (cube(4))
def feet_to_yards(n): # converts feet into yards
return (n * (1 / 3))
# print (f_y(1))
f_y = feet_to_yards
def inches_to_centi(n): # converts inches to centimeters
return (n * 2.54)
# print (i_c(10))
i_c = inches_to_centi
def inches_to_feet(n): # converts inches to feet
return (n / 12)
# print (i_f(11))
i_f = inches_to_feet
def feet_to_inches(n): # converts feet to inches
return (n * 12)
# print (f_i(30))
f_i = feet_to_inches
def miles_to_kilom(n):
return n * 1.60934
def kilom_to_miles(n):
return n * 0.621371
def blah(n): # halves a number, then adds one
return half(n) + 1
# print (blah(8))
def convert_mileage(mpg): # converts mpg to liters per hundred km
first = (100 * 3.785411784)
second = (1.609344 * mpg)
l_per_hundred_km = first / second
return l_per_hundred_km
# print (convert_mileage(90))
def liters_needed(k, mpg): # tells needed amount of liters
first = convert_mileage(mpg)
second = first / 100
liters = second * k
return liters
# print (liters_needed(50, 30))
def diff_from_10(x):
return 10 - x
def add_5(x):
return x + 5
def days_difference(day1, day2): # gives number of days between 2 days
return day2 - day1
def get_weekday(current_weekday, days_ahead):
return (current_weekday + days_ahead - 1) % 7 + 1
def get_birthday_weekday(current_weekday, current_day, birthday_day):
days_diff = days_difference(current_day, birthday_day)
return get_weekday(current_weekday, days_diff)
g_b_w = get_birthday_weekday
def pie_perc(n):
"""precondition: n > 0
Assuming n people want to eat a pie,
return the percentage each person gets to eat."""
return int(100 / n)
def average_of_3(a, b, c): # gives the average of 3 numbers
return (a + b + c) / 3
def average_of_best_3(a, b, c, d): # gives the average of the highest 3
"""Use numbers between 0 and 100"""
first = min(a, b, c, d)
second = (a + b + c + d - first)
third = second / 3
return third
def weeks_elapsed(day1, day2): # deduces how many weeks are between 2 days
first = abs(days_difference(day1, day2))
second = first / 7
third = int(second)
return third
def circle_area(r):
return pi * pow(r, 2)
def circumference(r):
return (2 * (pi * r))
circum = circumference
# circum(r) = circumference(r)
def heart_disease(age, bmi):
if age < 45:
if bmi < 22.0:
print("Low risk")
else:
print("Medium risk")
else:
if bmi < 22.0:
print("Medium risk")
else:
print("High risk")
def different(a, b):
print(a != b)
| 2,308
| 0
| 781
|
1d857b0fad333456cb2cdb5bca71530476c6efd6
| 432
|
py
|
Python
|
fc_passion/db.py
|
jinShine/fc-passion-page
|
1e3af3595318a9ebf325e80869ac4d6c7194d0ff
|
[
"MIT"
] | 1
|
2020-04-19T02:12:28.000Z
|
2020-04-19T02:12:28.000Z
|
fc_passion/db.py
|
jinShine/fc-passion-page
|
1e3af3595318a9ebf325e80869ac4d6c7194d0ff
|
[
"MIT"
] | null | null | null |
fc_passion/db.py
|
jinShine/fc-passion-page
|
1e3af3595318a9ebf325e80869ac4d6c7194d0ff
|
[
"MIT"
] | null | null | null |
import config
from pymongo import MongoClient
from instagram_api import insta_fetch_feed
| 28.8
| 74
| 0.712963
|
import config
from pymongo import MongoClient
from instagram_api import insta_fetch_feed
class DB():
def __init__(self, client=None):
self.client = MongoClient(config.MongoDB_URL, 27017)
def fcpassion_db(self):
return self.client.fcpassion
def get_insta_api(self):
instagram_collection = self.fcpassion_db().instagram
return instagram_collection.find({}, {'_id': False}).sort('id', 1)
| 250
| -10
| 104
|
382240a294a5586fea04c96c0d1b774719fa869a
| 5,495
|
py
|
Python
|
data_generators/data_generator_cnn.py
|
Yannick947/person_counting
|
4fbad4836369f89e238361498382824ddecf2eae
|
[
"MIT"
] | 3
|
2020-08-17T08:36:21.000Z
|
2021-03-19T22:09:47.000Z
|
data_generators/data_generator_cnn.py
|
Yannick947/person_counting
|
4fbad4836369f89e238361498382824ddecf2eae
|
[
"MIT"
] | 7
|
2021-02-02T23:05:11.000Z
|
2022-03-12T00:50:33.000Z
|
data_generators/data_generator_cnn.py
|
Yannick947/person_counting
|
4fbad4836369f89e238361498382824ddecf2eae
|
[
"MIT"
] | 1
|
2020-08-17T08:36:24.000Z
|
2020-08-17T08:36:24.000Z
|
import os
import pandas as pd
import numpy as np
import math
from random import shuffle
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from person_counting.data_generators.data_generators import Generator_CSVS
from person_counting.data_generators.data_generators import *
from person_counting.utils.preprocessing import get_filtered_lengths
from person_counting.utils.scaler import FeatureScaler, LabelScaler
from person_counting.utils.preprocessing import apply_file_filters
class Generator_CSVS_CNN(Generator_CSVS):
"""
Generators class to load npy files from
video folder structre like PCDS Dataset and
train CNNs
Arguments (**kwargs)
length_t : Length of the feature's DataFrame in time dimension
length_y : Length of the feature's DataFrame in y direction
file_names : File names to be processed
filter_cols_upper, : Amount of columns to be filtered at end and start of DataFrame
batch_size : Batch size
top_path : Parent path where csv files are contained
label_file : Name of the label file
"""
def create_datagen(
top_path,
sample,
label_file,
augmentation_factor=0,
filter_hour_below=7,
filter_hour_above=24,
filter_category_noisy=False,
supercharge_crowdeds=False,
):
"""
Creates train and test data generators for lstm network.
Arguments:
top_path: Parent directory where shall be searched for training files
sample: sample of hyperparameters used in this run
label_file: Name of the label file containing all the labels
augmentation_factor: Factor how much augmentation shall be done, 1 means
moving every pixel for one position
filter_hour_above: Hour after which videos shall be filtered
filter_category_noisy: Flag if noisy videos shall be filtered
"""
# Load filenames and lengths
length_t, length_y = get_filtered_lengths(top_path, sample)
train_file_names, validation_file_names, test_file_names = get_file_split(
top_path, supercharge_crowdeds=supercharge_crowdeds
)
# Apply filters
train_file_names = apply_file_filters(
df=train_file_names,
filter_hour_above=filter_hour_above,
filter_category_noisy=filter_category_noisy,
filter_hour_below=filter_hour_below,
)
validation_file_names = apply_file_filters(
df=validation_file_names,
filter_hour_above=filter_hour_above,
filter_category_noisy=filter_category_noisy,
filter_hour_below=filter_hour_below,
)
test_file_names = apply_file_filters(
df=test_file_names,
filter_hour_above=filter_hour_above,
filter_category_noisy=filter_category_noisy,
filter_hour_below=filter_hour_below,
)
scale_files = pd.concat([train_file_names, validation_file_names, test_file_names])
print(
"Dataset contains: \n{} training files \n{} validation files \n{} testing files".format(
len(train_file_names), len(validation_file_names), len(test_file_names)
)
)
feature_scaler = FeatureScaler(top_path, scale_files, sample)
label_scaler = LabelScaler(top_path, label_file, scale_files, sample)
gen_train = Generator_CSVS_CNN(
length_t=length_t,
length_y=length_y,
file_names=train_file_names,
feature_scaler=feature_scaler,
label_scaler=label_scaler,
sample=sample,
top_path=top_path,
label_file=label_file,
augmentation_factor=augmentation_factor,
)
# Don't do augmentation here!
gen_validation = Generator_CSVS_CNN(
length_t=length_t,
length_y=length_y,
file_names=validation_file_names,
feature_scaler=feature_scaler,
label_scaler=label_scaler,
sample=sample,
top_path=top_path,
label_file=label_file,
augmentation_factor=0,
)
gen_test = Generator_CSVS_CNN(
length_t=length_t,
length_y=length_y,
file_names=test_file_names,
feature_scaler=feature_scaler,
label_scaler=label_scaler,
sample=sample,
top_path=top_path,
label_file=label_file,
augmentation_factor=0,
)
return gen_train, gen_validation, gen_test
def get_file_split(top_path, supercharge_crowdeds=False):
"""Get filenames previously splitted"""
if top_path[-2:] != "\\\\" and top_path[-1] != "/":
top_path += "/"
if supercharge_crowdeds:
train = top_path + pd.read_csv(
os.path.join(top_path, "supercharged_crowdeds_train_split.csv"), header=None, squeeze=True
)
else:
train = top_path + pd.read_csv(os.path.join(top_path, "train_split.csv"), header=None, squeeze=True)
val = top_path + pd.read_csv(os.path.join(top_path, "validation_split.csv"), header=None, squeeze=True)
test = top_path + pd.read_csv(os.path.join(top_path, "test_split.csv"), header=None, squeeze=True)
train = train.apply(lambda row: row.replace("\\", "/"))
val = val.apply(lambda row: row.replace("\\", "/"))
test = test.apply(lambda row: row.replace("\\", "/"))
return train, val, test
| 34.130435
| 108
| 0.697543
|
import os
import pandas as pd
import numpy as np
import math
from random import shuffle
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from person_counting.data_generators.data_generators import Generator_CSVS
from person_counting.data_generators.data_generators import *
from person_counting.utils.preprocessing import get_filtered_lengths
from person_counting.utils.scaler import FeatureScaler, LabelScaler
from person_counting.utils.preprocessing import apply_file_filters
class Generator_CSVS_CNN(Generator_CSVS):
"""
Generators class to load npy files from
video folder structre like PCDS Dataset and
train CNNs
Arguments (**kwargs)
length_t : Length of the feature's DataFrame in time dimension
length_y : Length of the feature's DataFrame in y direction
file_names : File names to be processed
filter_cols_upper, : Amount of columns to be filtered at end and start of DataFrame
batch_size : Batch size
top_path : Parent path where csv files are contained
label_file : Name of the label file
"""
def __init__(self, *args, **kwargs):
super(Generator_CSVS_CNN, self).__init__(*args, **kwargs)
def create_datagen(
top_path,
sample,
label_file,
augmentation_factor=0,
filter_hour_below=7,
filter_hour_above=24,
filter_category_noisy=False,
supercharge_crowdeds=False,
):
"""
Creates train and test data generators for lstm network.
Arguments:
top_path: Parent directory where shall be searched for training files
sample: sample of hyperparameters used in this run
label_file: Name of the label file containing all the labels
augmentation_factor: Factor how much augmentation shall be done, 1 means
moving every pixel for one position
filter_hour_above: Hour after which videos shall be filtered
filter_category_noisy: Flag if noisy videos shall be filtered
"""
# Load filenames and lengths
length_t, length_y = get_filtered_lengths(top_path, sample)
train_file_names, validation_file_names, test_file_names = get_file_split(
top_path, supercharge_crowdeds=supercharge_crowdeds
)
# Apply filters
train_file_names = apply_file_filters(
df=train_file_names,
filter_hour_above=filter_hour_above,
filter_category_noisy=filter_category_noisy,
filter_hour_below=filter_hour_below,
)
validation_file_names = apply_file_filters(
df=validation_file_names,
filter_hour_above=filter_hour_above,
filter_category_noisy=filter_category_noisy,
filter_hour_below=filter_hour_below,
)
test_file_names = apply_file_filters(
df=test_file_names,
filter_hour_above=filter_hour_above,
filter_category_noisy=filter_category_noisy,
filter_hour_below=filter_hour_below,
)
scale_files = pd.concat([train_file_names, validation_file_names, test_file_names])
print(
"Dataset contains: \n{} training files \n{} validation files \n{} testing files".format(
len(train_file_names), len(validation_file_names), len(test_file_names)
)
)
feature_scaler = FeatureScaler(top_path, scale_files, sample)
label_scaler = LabelScaler(top_path, label_file, scale_files, sample)
gen_train = Generator_CSVS_CNN(
length_t=length_t,
length_y=length_y,
file_names=train_file_names,
feature_scaler=feature_scaler,
label_scaler=label_scaler,
sample=sample,
top_path=top_path,
label_file=label_file,
augmentation_factor=augmentation_factor,
)
# Don't do augmentation here!
gen_validation = Generator_CSVS_CNN(
length_t=length_t,
length_y=length_y,
file_names=validation_file_names,
feature_scaler=feature_scaler,
label_scaler=label_scaler,
sample=sample,
top_path=top_path,
label_file=label_file,
augmentation_factor=0,
)
gen_test = Generator_CSVS_CNN(
length_t=length_t,
length_y=length_y,
file_names=test_file_names,
feature_scaler=feature_scaler,
label_scaler=label_scaler,
sample=sample,
top_path=top_path,
label_file=label_file,
augmentation_factor=0,
)
return gen_train, gen_validation, gen_test
def get_file_split(top_path, supercharge_crowdeds=False):
"""Get filenames previously splitted"""
if top_path[-2:] != "\\\\" and top_path[-1] != "/":
top_path += "/"
if supercharge_crowdeds:
train = top_path + pd.read_csv(
os.path.join(top_path, "supercharged_crowdeds_train_split.csv"), header=None, squeeze=True
)
else:
train = top_path + pd.read_csv(os.path.join(top_path, "train_split.csv"), header=None, squeeze=True)
val = top_path + pd.read_csv(os.path.join(top_path, "validation_split.csv"), header=None, squeeze=True)
test = top_path + pd.read_csv(os.path.join(top_path, "test_split.csv"), header=None, squeeze=True)
train = train.apply(lambda row: row.replace("\\", "/"))
val = val.apply(lambda row: row.replace("\\", "/"))
test = test.apply(lambda row: row.replace("\\", "/"))
return train, val, test
| 81
| 0
| 27
|
d5b22ea34f0bbc299fab73839184251258eecd69
| 310
|
py
|
Python
|
Losses/__init__.py
|
SimonTheVillain/ActiveStereoNet
|
708bddce844998b366be1a1ec8a72a31ccd26f8c
|
[
"MIT"
] | 17
|
2019-08-23T04:00:32.000Z
|
2022-02-06T13:37:02.000Z
|
Losses/__init__.py
|
SimonTheVillain/ActiveStereoNet
|
708bddce844998b366be1a1ec8a72a31ccd26f8c
|
[
"MIT"
] | null | null | null |
Losses/__init__.py
|
SimonTheVillain/ActiveStereoNet
|
708bddce844998b366be1a1ec8a72a31ccd26f8c
|
[
"MIT"
] | 7
|
2019-12-20T07:46:41.000Z
|
2021-11-01T04:18:19.000Z
|
from .supervise import *
| 22.142857
| 79
| 0.580645
|
from .supervise import *
def get_losses(name, **kwargs):
name = name.lower()
if name == 'rhloss':
loss = RHLoss(**kwargs)
elif name == 'xtloss':
loss = XTLoss(**kwargs)
else:
raise NotImplementedError('Loss [{:s}] is not supported.'.format(name))
return loss
| 262
| 0
| 23
|
97cfb67e2c0a8d318cf1456a02cf8daf9b4214dc
| 1,551
|
py
|
Python
|
beaker/data_model/workspace.py
|
allenai/beaker-py
|
99c8d7f6e9938807ca5405964ef35633a19e8d68
|
[
"Apache-2.0"
] | null | null | null |
beaker/data_model/workspace.py
|
allenai/beaker-py
|
99c8d7f6e9938807ca5405964ef35633a19e8d68
|
[
"Apache-2.0"
] | 20
|
2021-12-16T13:23:07.000Z
|
2022-03-31T16:40:02.000Z
|
beaker/data_model/workspace.py
|
allenai/beaker-py
|
99c8d7f6e9938807ca5405964ef35633a19e8d68
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from enum import Enum
from typing import Dict, List, Optional, Tuple
from .account import Account
from .base import BaseModel
class Permission(str, Enum):
"""
Workspace permission levels.
"""
no_permission = "none"
read = "read"
write = "write"
full_control = "all"
| 19.3875
| 58
| 0.68343
|
from datetime import datetime
from enum import Enum
from typing import Dict, List, Optional, Tuple
from .account import Account
from .base import BaseModel
class WorkspaceSize(BaseModel):
datasets: int
experiments: int
groups: int
images: int
class Workspace(BaseModel):
id: str
name: str
full_name: str
description: Optional[str] = None
size: WorkspaceSize
owner: Account
author: Account
created: datetime
modified: datetime
archived: bool = False
class WorkspaceRef(BaseModel):
id: str
name: str
full_name: str
class WorkspacePage(BaseModel):
data: Tuple[Workspace, ...]
next_cursor: Optional[str] = None
class WorkspaceSpec(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
public: bool = False
org: Optional[str] = None
class WorkspaceTransferSpec(BaseModel):
ids: List[str]
class Permission(str, Enum):
"""
Workspace permission levels.
"""
no_permission = "none"
read = "read"
write = "write"
full_control = "all"
class WorkspacePermissions(BaseModel):
requester_auth: str
public: bool
authorizations: Dict[str, Permission]
"""
A dictionary of account IDs to authorizations.
"""
class WorkspacePatch(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
archive: Optional[bool] = None
class WorkspacePermissionsPatch(BaseModel):
public: Optional[bool] = None
authorizations: Optional[Dict[str, Permission]] = None
| 0
| 1,007
| 207
|
c79ee80443c8e28087163ee0ff092d7379886027
| 11,816
|
py
|
Python
|
bot/modelv0/state_view.py
|
gillesdami/python-sc2-bot-manyminds
|
b44f83018ff287ab3417ed9f7444b55590b89a09
|
[
"MIT"
] | 1
|
2019-09-11T08:04:08.000Z
|
2019-09-11T08:04:08.000Z
|
bot/modelv0/state_view.py
|
gillesdami/python-sc2-bot-manyminds
|
b44f83018ff287ab3417ed9f7444b55590b89a09
|
[
"MIT"
] | null | null | null |
bot/modelv0/state_view.py
|
gillesdami/python-sc2-bot-manyminds
|
b44f83018ff287ab3417ed9f7444b55590b89a09
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
from sc2.ids.unit_typeid import UnitTypeId
from sc2.helpers.control_group import ControlGroup
from . import constants as C
| 39.651007
| 165
| 0.574898
|
import math
import numpy as np
from sc2.ids.unit_typeid import UnitTypeId
from sc2.helpers.control_group import ControlGroup
from . import constants as C
class StateView():
def __init__(self):
self.map_size = [int(round(size)) for size in self.game_info.map_size]
self.map_size_sum = self.map_size[0] + self.map_size[1]
self.meta = np.zeros(16) #TODO move
self.control_groups = [ControlGroup([]) for i in range(C.CONTROL_GROUPS_COUNT)]
# todo add units to control_groups
## utils
@staticmethod
def one_hot_encode(size, value):
return np.eye(1, size, value).reshape(-1)
@staticmethod
def get_unit_rouded_position(unit):
return unit.position.rounded
@staticmethod
def get_zerg_unit_id(unit):
return C.ZERG_UNITS_IDS.index(unit.type_id)
@staticmethod
def get_unit_health(unit):
return math.floor(unit.health_percentage * (C.HEALTH_VIEWED_SIZE - 1))
@staticmethod
def zero_pad_1d(array, final_length):
return np.pad(array, (0, final_length - len(array)), mode='constant')
## control group update
async def on_unit_destroyed(self, unit_tag):
for control_group in self.control_groups:
if unit_tag in control_group:
control_group.remove_unit(unit_tag)
async def on_unit_created(self, unit):
if not unit.unit_typeid in [C.ZERG_MILITARY_UNIT_TYPEID_IDS]:
for control_group in self.control_groups:
if control_group.amount < C.CONTROL_GROUPS_MAX_UNIT_COUNT:
control_group.add_unit(unit)
## views
def eco_viewer(self):
'''
WIP
Projection of the game state for an AI controlling the workers in the gathering group.
It uses data from:
- known_enemy_units Units
- townhalls Units
- geysers Units
- gathering_workers_group ControlGroup
- mineral_field Units
And return the following structure:
'''
self.known_enemy_units #Units
self.townhalls #Units
self.geysers #Units
self.workers #> filter worker building #Units
self.mineral_field #Units
def queens_viewer(self):
pass
@property
def building_view_size(self):
return 16 + 1 + 1 + C.MAX_ZERG_BUILDINGS_VIEWED_PER_TYPE * len(C.ZERG_BUILDINGS_IDS) + (self.map_size_sum + len(C.ZERG_UNITS_IDS)) * C.MAX_ENEMY_UNITS_VIEWED
def building_viewer(self):
'''
Projection of the game state for an AI controlling the construction of buildings.
It uses data from:
- meta number[]
- units Units
- workers Units
- minerals number
- vespene number
- known_enemy_units Units
And return the following structure:
- meta 16
- minerals 1
- vespene 1
- structure_units len(C.ZERG_BUILDINGS_UNIT_TYPEID_IDS)
- known_enemy_units (self.map_size_sum + len(C.ZERG_UNITS_IDS)) * C.MAX_ENEMY_UNITS_VIEWED
'''
def structure_units_to_view():
view = np.array([])
for unit_type in C.ZERG_BUILDINGS_UNIT_TYPEID_IDS:
# TODO filter only once
if unit_type.value in self._game_data.units:
ability = self._game_data.units[unit_type.value].creation_ability
amount = len(self.units(unit_type).not_ready)
amount += sum([o.ability == ability for w in self.workers for o in w.orders])
view = np.concatenate((
view,
self.one_hot_encode(C.MAX_ZERG_BUILDINGS_VIEWED_PER_TYPE, max([amount, C.MAX_ZERG_BUILDINGS_VIEWED_PER_TYPE - 1]))
))
return self.zero_pad_1d(view, C.MAX_ZERG_BUILDINGS_VIEWED_PER_TYPE * len(C.ZERG_BUILDINGS_IDS))
def known_enemy_units_to_view():
known_enemy_units = self.known_enemy_units.take(C.MAX_ENEMY_UNITS_VIEWED, False)
view_size = (self.map_size_sum + len(C.ZERG_UNITS_IDS)) * C.MAX_ENEMY_UNITS_VIEWED
view = np.array([])
for known_enemy_unit in known_enemy_units:
if known_enemy_unit.type_id in C.ZERG_UNITS_IDS:
position = self.get_unit_rouded_position(known_enemy_unit)
unit_id_idx = self.get_zerg_unit_id(known_enemy_unit)
view = np.concatenate((
view,
self.one_hot_encode(len(C.ZERG_UNITS_IDS), unit_id_idx),
self.one_hot_encode(self.map_size[0], position[0]),
self.one_hot_encode(self.map_size[1], position[1])
))
return self.zero_pad_1d(view, view_size)
return np.concatenate((
self.meta,
[self.minerals],
[self.vespene],
structure_units_to_view(),
known_enemy_units_to_view()
))
@property
def production_view_size(self):
return 16 + 1 + 1 + len(C.ZERG_UNITS_LARVATRAINABLE_IDS) + len(C.ZERG_UNITS_LARVATRAINABLE_IDS)
def production_viewer(self):
'''
Projection of the game state for an AI controlling the production of units.
It uses data from:
- meta number[]
- units Units
- known_enemy_units Units
- already_pending(UnitTypeId) number
- minerals number
- vespene number
And return the following structure:
- meta 16
- minerals 1
- vespene 1
- known_enemy_units_count len(C.ZERG_UNITS_LARVATRAINABLE_IDS)
- units_count len(C.ZERG_UNITS_LARVATRAINABLE_IDS)
'''
def known_enemy_units_to_view():
view = np.zeros(len(C.ZERG_UNITS_LARVATRAINABLE_IDS))
for known_enemy_unit in self.known_enemy_units:
if known_enemy_unit.type_id in C.ZERG_UNITS_LARVATRAINABLE_IDS:
view[C.get_zerg_unit_id(known_enemy_unit)] += 0.01
return view #softmax ?
def units_including_pending():
view = np.zeros(len(C.ZERG_UNITS_LARVATRAINABLE_IDS))
for unit in self.units.filter(lambda u: not u.is_structure):
if unit.type_id in C.ZERG_UNITS_LARVATRAINABLE_IDS:
view[C.get_zerg_unit_id(known_enemy_unit)] += 0.01
for unit_id_idx, unit_type_idx in enumerate(C.ZERG_UNITS_LARVATRAINABLE_IDS):
view[unit_id_idx] += self.already_pending(UnitTypeId(unit_type_idx)) / 100
return view #softmax ?
return np.concatenate((
self.meta,
[self.minerals],
[self.vespene],
known_enemy_units_to_view(),
units_including_pending()
))
@property
def military_view_size(self):
return (16 +
C.MAX_ZERG_BUILDINGS_VIEWED_PER_TYPE * self.map_size_sum +
C.MAX_ENEMY_UNITS_VIEWED * (len(C.ZERG_UNITS_IDS) + self.map_size_sum + C.HEALTH_VIEWED_SIZE) +
C.CONTROL_GROUPS_MAX_UNIT_COUNT * (len(C.ZERG_UNITS_IDS) + self.map_size_sum + C.HEALTH_VIEWED_SIZE) * C.CONTROL_GROUPS_COUNT
)
def military_viewer(self):
'''
Projection of the game state for an AI controlling the military units.
It uses data from:
- meta number[]
- units Units
- known_enemy_units Units
- control_groups ControlGroup
And return the following structure:
- meta 16
- townhall C.MAX_ZERG_BUILDINGS_VIEWED_PER_TYPE * self.map_size_sum
- known_enemy_units C.MAX_ENEMY_UNITS_VIEWED * (len(C.ZERG_UNITS_IDS) + self.map_size_sum + C.HEALTH_VIEWED_SIZE)
- units_groups C.CONTROL_GROUPS_MAX_UNIT_COUNT * (len(C.ZERG_UNITS_IDS) + self.map_size_sum + C.HEALTH_VIEWED_SIZE) * C.CONTROL_GROUPS_COUNT
'''
def townhalls_to_view():
max_encoded_townhalls = C.MAX_ZERG_BUILDINGS_VIEWED_PER_TYPE
view_size = max_encoded_townhalls * self.map_size_sum
townhalls = self.townhalls.take(max_encoded_townhalls, False)
view = np.array([])
for townhall in townhalls:
position = self.get_unit_rouded_position(townhall)
view = np.concatenate((
view,
self.one_hot_encode(self.map_size[0], position[0]),
self.one_hot_encode(self.map_size[1], position[1])
))
return self.zero_pad_1d(view, view_size)
def known_enemy_units_to_view():
known_enemy_units = self.known_enemy_units.take(C.MAX_ENEMY_UNITS_VIEWED, False)
view_size = C.MAX_ENEMY_UNITS_VIEWED * (len(C.ZERG_UNITS_IDS) + self.map_size_sum + C.HEALTH_VIEWED_SIZE)
view = np.array([])
for unit in known_enemy_units:
if unit.type_id in C.ZERG_UNITS_IDS:
position = get_unit_rouded_position(unit)
view = np.concatenate((
view,
self.one_hot_encode(len(C.ZERG_UNITS_IDS), self.get_zerg_unit_id(unit)),
self.one_hot_encode(self.map_size[0], position[0]),
self.one_hot_encode(self.map_size[1], position[1]),
self.one_hot_encode(C.HEALTH_VIEWED_SIZE, self.get_unit_health(unit))
))
return self.zero_pad_1d(view, view_size)
def units_groups_to_view():
# view creation
view = np.array([])
control_group_view_size = C.CONTROL_GROUPS_MAX_UNIT_COUNT * (len(C.ZERG_UNITS_IDS) + self.map_size_sum + C.HEALTH_VIEWED_SIZE)
view_size = control_group_view_size * C.CONTROL_GROUPS_COUNT
for control_group in self.control_groups:
units = control_group.select_units(self.units)
control_group_view = np.array([])
for unit in control_group:
if unit.type_id in C.ZERG_UNITS_IDS:
position = self.get_unit_rouded_position(unit)
view = np.concatenate((
view,
self.one_hot_encode(len(C.ZERG_UNITS_IDS), self.get_zerg_unit_id(unit)),
self.one_hot_encode(self.map_size[0], position[0]),
self.one_hot_encode(self.map_size[1], position[1]),
self.one_hot_encode(C.HEALTH_VIEWED_SIZE, self.get_unit_health(unit))
))
view = np.concatenate((
view,
self.zero_pad_1d(control_group_view, control_group_view_size)
))
return self.zero_pad_1d(view, view_size)
return np.concatenate((
self.meta,
townhalls_to_view(),
known_enemy_units_to_view(),
units_groups_to_view()
))
def research_viewer(self):
pass
def meta_viewer(self):
pass
| 7,263
| 4,375
| 23
|
b94924887631dd799abe08a7574dbfa790332e61
| 752
|
py
|
Python
|
scaf/utils/__init__.py
|
englhardt/scaf
|
7ed7b424766ccb8910d28b55014604b5d98c276c
|
[
"MIT"
] | null | null | null |
scaf/utils/__init__.py
|
englhardt/scaf
|
7ed7b424766ccb8910d28b55014604b5d98c276c
|
[
"MIT"
] | null | null | null |
scaf/utils/__init__.py
|
englhardt/scaf
|
7ed7b424766ccb8910d28b55014604b5d98c276c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Adrian Englhardt <adrian.englhardt@gmail.com>
# Licensed under the MIT License - https://opensource.org/licenses/MIT
from .change_detection_helpers import normalize, normalize_2d, normalize_2d_global, prepare_data, smooth_frequency, \
transform_to_cosdist, transform_to_padded_cosdist, relative_frequency, percentual_diff, cut_array, filter_min_freq
from .helpers import expand_path, natural_sort
__all__ = ['normalize', 'normalize_2d', 'normalize_2d_global', 'prepare_data', 'smooth_frequency',
'transform_to_cosdist', 'transform_to_padded_cosdist', 'relative_frequency', 'percentual_diff', 'cut_array',
'filter_min_freq', 'expand_path', 'natural_sort']
| 53.714286
| 119
| 0.771277
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Adrian Englhardt <adrian.englhardt@gmail.com>
# Licensed under the MIT License - https://opensource.org/licenses/MIT
from .change_detection_helpers import normalize, normalize_2d, normalize_2d_global, prepare_data, smooth_frequency, \
transform_to_cosdist, transform_to_padded_cosdist, relative_frequency, percentual_diff, cut_array, filter_min_freq
from .helpers import expand_path, natural_sort
__all__ = ['normalize', 'normalize_2d', 'normalize_2d_global', 'prepare_data', 'smooth_frequency',
'transform_to_cosdist', 'transform_to_padded_cosdist', 'relative_frequency', 'percentual_diff', 'cut_array',
'filter_min_freq', 'expand_path', 'natural_sort']
| 0
| 0
| 0
|
6207d26ad838fe858b8f58d14ff8cb583d93b58b
| 31,083
|
py
|
Python
|
openrave/sandbox/debugplanning.py
|
jdsika/TUM_HOly
|
a2ac55fa1751a3a8038cf61d29b95005f36d6264
|
[
"MIT"
] | 2
|
2015-11-13T16:40:57.000Z
|
2017-09-15T15:37:19.000Z
|
openrave/sandbox/debugplanning.py
|
jdsika/holy
|
a2ac55fa1751a3a8038cf61d29b95005f36d6264
|
[
"MIT"
] | 1
|
2016-06-13T01:29:51.000Z
|
2016-06-14T00:38:27.000Z
|
openrave/sandbox/debugplanning.py
|
jdsika/holy
|
a2ac55fa1751a3a8038cf61d29b95005f36d6264
|
[
"MIT"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# random code that helps with debugging/testing the python interfaces and examples
# this is not meant to be run by normal users
from __future__ import with_statement # for python 2.5
__copyright__ = 'Copyright (C) 2009-2010'
__license__ = 'Apache License, Version 2.0'
# random code that helps with debugging/testing the python interfaces and examples
# this is not meant to be run by normal users
from openravepy import *
import openravepy.examples
from openravepy.interfaces import *
from numpy import *
import numpy,time
| 45.777614
| 629
| 0.698453
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# random code that helps with debugging/testing the python interfaces and examples
# this is not meant to be run by normal users
from __future__ import with_statement # for python 2.5
__copyright__ = 'Copyright (C) 2009-2010'
__license__ = 'Apache License, Version 2.0'
# random code that helps with debugging/testing the python interfaces and examples
# this is not meant to be run by normal users
from openravepy import *
import openravepy.examples
from openravepy.interfaces import *
from numpy import *
import numpy,time
def test_grasping():
import grasping
env = Environment()
robot = env.ReadRobotXMLFile('robots/barrettsegway.robot.xml')
env.AddRobot(robot)
T = eye(4)
T[0,3] = 1
robot.SetTransform(T)
target = env.ReadKinBodyXMLFile('data/mug1.kinbody.xml')
target.SetTransform(T)
env.AddKinBody(target)
env.SetViewer('qtcoin')
self = grasping.GraspingModel(robot=robot,target=target)
self.init(friction=0.4,avoidlinks=[])
preshapes = array(((0.5,0.5,0.5,pi/3),(0.5,0.5,0.5,0),(0,0,0,pi/2)))
rolls = arange(0,2*pi,pi/2)
standoffs = array([0,0.025])
approachrays = self.computeBoxApproachRays(stepsize=0.02)
graspingnoise=None
self.generate(preshapes=preshapes, rolls=rolls, standoffs=standoffs, approachrays=approachrays,graspingnoise=None,addSphereNorms=False)
def test_autograsping():
import grasping
env = Environment()
env.SetDebugLevel(DebugLevel.Debug)
env.SetViewer('qtcoin')
env.Load('data/lab1.env.xml')
robot = env.GetRobots()[0]
target = env.GetKinBody('mug1')
self = grasping.GraspingModel(robot=robot,target=target)
self.autogenerate()
def test_reachability():
import kinematicreachability
env = Environment()
robot = env.ReadRobotXMLFile('robots/barrettsegway.robot.xml')
env.AddRobot(robot)
self = kinematicreachability.ReachabilityModel(robot=robot)
self.autogenerate()
def test_inversereachabilitygen():
import inversereachability
env = Environment()
robot = env.ReadRobotXMLFile('robots/barrettsegway.robot.xml')
env.AddRobot(robot)
self = inversereachability.InverseReachabilityModel(robot=robot)
heightthresh=0.05
quatthresh=0.15
self.generate(heightthresh=heightthresh,quatthresh=quatthresh)
def test_inversereachabilitytest():
import inversereachability
from scipy.optimize import leastsq
import bisect
env = openravepy.Environment()
robot = env.ReadRobotXMLFile('robots/barrettsegway.robot.xml')
env.AddRobot(robot)
self = inversereachability.InverseReachabilityModel(robot=robot)
self.load()
self.env.StopSimulation()
self.robot.SetTransform(eye(4))
self.testSampling(heights=arange(-1,1,0.1),logllthresh=2.3)
W = array([sum(e[2][:,-1]) for e in self.equivalenceclasses])
W /= max(W)
dirs = array([m[4]*linalg.inv(rotationMatrixFromQuat(m[0:4]))[:,2] for m in self.equivalencemeans])
h = self.env.plot3 (points=dirs,pointsize=5.0,colors=c_[1-W,1-W,1-W,W])
def test_inversereachabilityrun():
import inversereachability
env = Environment()
env.SetViewer('qtcoin')
env.Reset()
env.Load('data/lab1.env.xml')
robot = env.GetRobots()[0]
self = inversereachability.InverseReachabilityModel(robot=robot)
self.load()
gp = examples.graspplanning.GraspPlanning(robot=robot,randomize=False)
gm = gp.graspables[0][0]
dests = gp.graspables[0][1]
validgrasps,validindices = gm.computeValidGrasps()
Tgrasp = gm.getGlobalGraspTransform(validgrasps[0])
densityfn,samplerfn,bounds = self.computeBaseDistribution(Tgrasp,2000)
h = self.showBaseDistribution(densityfn,bounds,zoffset=1.0,thresh=1.0)
densityfn2,samplerfn2,bounds2 = self.computeAggregateBaseDistribution([Tgrasp],2000)
h2 = self.showBaseDistribution(densityfn2,bounds2,zoffset=3.0,thresh=1.0)
def draw_inversereachability():
import inversereachability
env = openravepy.Environment()
robot = env.ReadRobotXMLFile('robots/barrettsegway.robot.xml')
env.AddRobot(robot)
self = inversereachability.InverseReachabilityModel(robot=robot)
self.load()
self.env.SetViewer('qtcoin')
self.showEquivalenceClass(self.equivalenceclasses[0])
def test_graspplanning():
import graspplanning
env = Environment()
env.SetViewer('qtcoin')
env.Reset()
env.Load('data/lab1.env.xml')#wamtest1.env.xml')
robot = env.GetRobots()[0]
self = graspplanning.GraspPlanning(robot)
gmodel=self.graspables[0][0]
dests=self.graspables[0][1]
self.graspAndPlaceObject(gmodel=gmodel,dests=dests)
# draw all valid grasps
configs = []
for gmodel,dests in self.graspables:
validgrasps = gmodel.computeValidGrasps(checkcollision=True)[0]
for g in validgrasps:
T = dot(gmodel.getGlobalGraspTransform(g),linalg.inv(robot.GetActiveManipulator().GetGraspTransform()))
configs.append((T,g[gmodel.graspindices['igrasppreshape']]))
robotfilename='robots/barretthand.robot.xml'
transparency=0.7
newrobots=[]
for T,preshape in configs:
print len(newrobots)
newrobot = env.ReadRobotXMLFile(robotfilename)
for link in newrobot.GetLinks():
for geom in link.GetGeometries():
geom.SetTransparency(transparency)
env.AddRobot(newrobot,True)
newrobot.SetTransform(T)
newrobot.SetDOFValues(preshape)
newrobots.append(newrobot)
for newrobot in newrobots:
env.RemoveKinBody(newrobot)
newrobots=[]
def test_graspreachability():
import mobilemanipulation,graspplanning
env = Environment()
env.SetViewer('qtcoin')
env.Reset()
env.Load('data/lab1.env.xml')
robot = env.GetRobots()[0]
planning = graspplanning.GraspPlanning(robot)
gmodel=planning.graspables[0][0]
dests=planning.graspables[0][1]
self = mobilemanipulation.GraspReachability(robot=robot,gmodel=gmodel)
starttime = time.time()
densityfn,samplerfn,bounds,validgrasps = self.computeGraspDistribution(logllthresh=2.4)
print 'time to build distribution: %fs'%(time.time()-starttime)
h = self.irmodel.showBaseDistribution(densityfn,bounds,self.target.GetTransform()[2,3],thresh=1.0)
def test_mobilemanipulation():
import mobilemanipulation
env = Environment()
env.SetViewer('qtcoin')
env.Reset()
env.Load('data/lab1.env.xml')
robot = env.GetRobots()[0]
self = mobilemanipulation.MobileManipulationPlanning(robot)
gmodel=self.graspables[0][0]
dests=self.graspables[0][1]
def test_visibilityplanning():
import visibilityplanning, time
self = visibilityplanning.PA10GraspExample()
self.loadscene(scenefilename='data/pa10grasp.env.xml',randomize=False,sensorname='wristcam',showsensors=False)
self.start()
def test_pyann():
ktree = pyANN.KDTree(random.rand(10,7))
neighs,dists = ktree.kSearch(random.rand(7),5,1e-3);
def test_ikfast():
import openravepy
env = openravepy.Environment()
robot = env.ReadRobotXMLFile('robots/barrettwam.robot.xml')
env.AddRobot(robot)
manip = robot.GetManipulators()[0]
solvejoints = list(manip.GetArmJoints())
solvejoints.pop(2)
solvefn=openravepy.ikfast.IKFastSolver.solveFullIK_6D
self = openravepy.ikfast.IKFastSolver(kinbody=robot)
baselink = manip.GetBase().GetIndex()
eelink = manip.GetEndEffector().GetIndex()
usedummyjoints = False
code = self.generateIkSolver(baselink=baselink,eelink=eelink,solvejoints=solvejoints,freeparams=freejoints,usedummyjoints=usedummyjoints,solvefn=solvefn)
def test_gripper():
env = Environment()
env.SetViewer('qtcoin')
env.Load('data/lab1.env.xml')
robot = env.GetRobots()[0]
basemanip = openravepy.interfaces.BaseManipulation(robot)
manip = robot.GetActiveManipulator()
robot.SetActiveDOFs(manip.GetGripperJoints())
basemanip.ReleaseFingers(execute=True)
def test_constraintplanning():
import constraintplanning
env = Environment()
env.SetViewer('qtcoin')
env.Load('data/lab1.env.xml')
robot = env.GetRobots()[0]
env.UpdatePublishedBodies()
time.sleep(0.1) # give time for environment to update
self = constraintplanning.ConstraintPlanning(robot)
self.robot.SetJointValues([-0.90993702, 1.4134903 , 1.18074048, 1.6281302 , -1.42419982, 1.17677045, -2.48384023, 0.98699927, 0.59599888, 1.1350019 , 0])
target = self.gmodel.target
robot.Grab(target)
T = self.manip.GetEndEffectorTransform()
T[2,3] += 0.4
constraintfreedoms = [1,1,0,0,0,0]
constraintmatrix = eye(4)
constrainterrorthresh = 1e-2
res = self.basemanip.MoveToHandPosition(matrices=[T],maxiter=10000,maxtries=1,seedik=8,constraintfreedoms=constraintfreedoms,constraintmatrix=constraintmatrix,constrainterrorthresh=constrainterrorthresh)
#self.performGraspPlanning()
def test_convex():
import convexdecomposition
env = Environment()
#env.SetViewer('qtcoin')
env.Load('data/lab1.env.xml')
robot = env.GetRobots()[0]
self = convexdecomposition.ConvexDecompositionModel(robot)
self.load()
hulls = self.linkgeometry[0][0][1]
def test_linkstatistics():
import linkstatistics
from itertools import izip
from enthought.tvtk.api import tvtk
env = openravepy.Environment()
robot = env.ReadRobotXMLFile('robots/barrettsegway.robot.xml')
env.AddRobot(robot)
robot.SetTransform(eye(4))
self = linkstatistics.LinkStatisticsModel(robot)
self.load()
self.robot.SetTransform(eye(4))
links = self.robot.GetLinks()
ilink = 14
link = links[ilink]
linkcd = self.cdmodel.linkgeometry[ilink]
hulls = []
for ig,geom in enumerate(link.GetGeometries()):
cdhulls = [cdhull for i,cdhull in linkcd if i==ig]
if len(cdhulls) > 0:
hulls += [self.transformHull(geom.GetTransform(),hull) for hull in cdhulls[0]]
elif geom.GetType() == KinBody.Link.GeomProperties.Type.Box:
hulls.append(self.transformHull(geom.GetTransform(),ComputeBoxMesh(geom.GetBoxExtents())))
elif geom.GetType() == KinBody.Link.GeomProperties.Type.Sphere:
hulls.append(self.transformHull(geom.GetTransform(),ComputeGeodesicSphereMesh(geom.GetSphereRadius(),level=1)))
elif geom.GetType() == KinBody.Link.GeomProperties.Type.Cylinder:
hulls.append(self.transformHull(geom.GetTransform(),ComputeCylinderYMesh(radius=geom.GetCylinderRadius(),height=geom.GetCylinderHeight())))
linkstat = self.computeGeometryStatistics(hulls)
ijoint = 3
joint = self.robot.GetJoints()[0]
lower,upper = joint.GetLimits()
axis=joint.GetAxis(0)
minangle=lower[0]
maxangle=upper[0]
Tlinkjoint = self.robot.GetLinkts()[ilink].GetTransform()
Tlinkjoint[0:3,3] -= joint.GetAnchor() # joint anchor should be at center
volumepoints = dot(linkstat['volumepoints'],transpose(Tlink[0:3,0:3]))
volumepoints += tile(Tlink[0:3,3],(len(volumepoints),1))
sweptpoints,sweptindices = self.computeSweptVolume(volumepoints=volumepoints,axis=axis,minangle=minangle,maxangle=maxangle)
def test_contours():
from enthought.tvtk.api import tvtk
from numpy import *
import pickle,numpy
from openravepy import *
env = Environment()
env.SetViewer('qtcoin')
N = 50
Nj = N*(0+1j)
x, y, z = numpy.mgrid[-10:10:Nj, -10:20:Nj, -10:40:Nj]
scalars = x*x + 2.0*y*y + z*z/2.0
spacing=array((0.005,0.005,0.005))
id = tvtk.ImageData(origin=array((numpy.min(x),numpy.min(y),numpy.min(z))),spacing=spacing,dimensions=scalars.shape)
id.point_data.scalars = scalars.ravel()
x,y,z,t,sweptdata = pickle.load(open('tris.pp','r'))
sweptdata = array(sweptdata,'float64')
#sweptdata = sweptdata[0:61,0:60,0:60]
id = tvtk.ImageData(origin=array((0,0,0)),spacing=array((0.005,0.005,0.005)),dimensions=sweptdata.shape[::-1])
id.point_data.scalars = 100.0*sweptdata.ravel()
m = tvtk.MarchingCubes()
m.set_input(id)
m.set_value(0,0.5)
m.update()
o = m.get_output()
newpoints = array(o.points)
h = env.plot3 (points=newpoints,pointsize=2.0,colors=array((1,0,0)))
indices = array(o.polys.data)
indices = array(reshape(indices,(len(indices)/4,4)),'int')
h2 = env.drawtrimesh (points=newpoints,indices=indices[:,1:4],colors=array((0,0,1,0.5)))
def test_jointweights():
jointdv = array([v['volumedelta'] for v in self.jointvolumes ])
linkdv = array([v['volume'] for v in self.linkstats])
def test_simplenavigation():
import simplenavigation
env = Environment()
env.SetViewer('qtcoin')
env.Load('data/lab1.env.xml')
robot = env.GetRobots()[0]
self = simplenavigation.SimpleNavigationPlanning(robot)
self.robot.SetAffineTranslationMaxVels([0.5,0.5,0.5])
self.robot.SetAffineRotationAxisMaxVels(ones(4))
self.robot.SetActiveDOFs([],Robot.DOFAffine.X|Robot.DOFAffine.Y|Robot.DOFAffine.RotationAxis,[0,0,1])
self.basemanip.MoveActiveJoints(goal=[0.737,0.304,0])
def test_sampling():
import heapq
self = SpaceSampler()
stats = []
for level in range(5):
theta,pfi = self.sampleS2(level)
dirs = c_[cos(theta),sin(theta)*cos(pfi),sin(theta)*sin(pfi)]
dists = [heapq.nsmallest(2,arccos(dot(dirs,dir)))[1] for dir in dirs]
stats.append([level,mean(dists)])
def test_hrp2():
python convexdecomposition.py --volumeSplitThresholdPercent=5 --mergeThresholdPercent=10 --padding=0.005
rosrun openrave_database kinematicreachability_ros.py --manipname=leftarm --xyzdelta=0.04 --launchservice='8*localhost'
python kinematicreachability.py --manipname=rightarm --xyzdelta=0.02
python kinematicreachability.py --manipname=leftarm --xyzdelta=0.02
python kinematicreachability.py --manipname=rightarm_chest --xyzdelta=0.02
python inversereachability.py --manipname=rightarm --heightthresh=0.02 --quatthresh=0.2
python inversereachability.py --manipname=leftarm --heightthresh=0.02 --quatthresh=0.2
python inversereachability.py --manipname=rightarm_chest --heightthresh=0.02 --quatthresh=0.2
python inversereachability.py --manipname=leftarm_chest --heightthresh=0.02 --quatthresh=0.2
python inversereachability.py --manipname=leftarm_chest --heightthresh=0.02 --quatthresh=0.2 --id=0 --jointvalues='0'
python inversereachability.py --manipname=leftarm_chest --heightthresh=0.02 --quatthresh=0.2 --id=43 --jointvalues='0.43'
python grasping.py --robot=robots/hrp2jsk.robot.xml --manipname=rightarm --target=scenes/cereal_frootloops.kinbody.xml --standoff=0 --boxdelta=0.01 --normalanglerange=1 --avoidlink=RWristCam
python grasping.py --robot=robots/hrp2jsk.robot.xml --manipname=leftarm --target=scenes/cereal_frootloops.kinbody.xml --standoff=0 --boxdelta=0.01 --normalanglerange=1 --graspingnoise=0.01 --noviewer
rosrun openrave_database grasping_ros.py --robot=robots/hrp2jsk.robot.xml --manipname=leftarm_chest --target=scenes/cereal_frootloops.kinbody.xml --standoff=0 --boxdelta=0.01 --normalanglerange=1 --graspingnoise=0.01 --launchservice='8*localhost'
rosrun openrave_database grasping_ros.py --robot=robots/hrp2jsk.robot.xml --manipname=leftarm_chest2 --target=scenes/jskcup0.kinbody.xml --standoff=0 --boxdelta=0.01 --normalanglerange=1 --graspingnoise=0.01 --launchservice='8*localhost'
import inversereachability
env = Environment()
robot = env.ReadRobotXMLFile('robots/hrp2jsk.robot.xml')
env.AddRobot(robot)
robot.SetActiveManipulator('leftarm')
self = inversereachability.InverseReachabilityModel(robot=robot)
heightthresh=0.02
quatthresh=0.1
self.generate(heightthresh=heightthresh,quatthresh=quatthresh)
hand = env.ReadRobotXMLFile('robots/hrp2rhandjsk.robot.xml')
env.AddRobot(hand)
hand.SetTransform(Tgrasp)
# test head movement
import inversekinematics
env = Environment()
robot = env.ReadRobotXMLFile('robots/hrp2jsk08.robot.xml')
env.AddRobot(robot)
robot.SetActiveManipulator('head')
manip = robot.GetActiveManipulator()
ikmodel = inversekinematics.InverseKinematicsModel(robot,IkParameterization.Type.Direction3D)
if not ikmodel.load():
ikmodel.generate()
import inversereachability,mobilemanipulation,graspplanning,visibilitymodel
env = Environment()
env.SetViewer('qtcoin')
env.Reset()
env.Load('scenes/r602kitchen1.env.xml')
robot = env.GetRobots()[0]
origjointvalues = robot.GetJointValues()
# define all the manipulators to use
manips = [robot.GetManipulators('rightarm_chest')[0], robot.GetManipulators('rightarm_chest2')[0]]#,robot.GetManipulators('leftarm_chest')[0], robot.GetManipulators('leftarm_chest2')[0]]
irmodels = []
with robot:
for manip in manips:
robot.SetActiveManipulator(manip)
dofindices = inversereachability.InverseReachabilityModel.getdofindices(manip)
for id,value in [('0',[0]),('43',[0.43]),('n43',[-0.43])]:
robot.SetJointValues(value,dofindices)
irmodel = inversereachability.InverseReachabilityModel(robot=robot,id=id)
if irmodel.load():
irmodels.append(irmodel)
else:
print 'failed to load irmodel',manip.GetName(),id
irgmodels = []
targets = []
for manip in manips:
robot.SetActiveManipulator(manip)
planning = graspplanning.GraspPlanning(robot,nodestinations=True)
for gmodel,dests in planning.graspables:
if True:#gmodel.target.GetName() == 'cereal0' or gmodel.target.GetName() == 'cereal1':
for irmodel in irmodels:
if irmodel.manip == gmodel.manip:
irgmodels.append([irmodel,gmodel])
if not gmodel.target in targets:
targets.append(gmodel.target)
grmodel = mobilemanipulation.GraspReachability(robot=robot,irgmodels=irgmodels)
self = mobilemanipulation.MobileManipulationPlanning(robot,grmodel=grmodel)
usevisibilitycamera = 'wristcam'
gmodel = self.graspObjectMobileSearch(usevisibilitycamera=usevisibilitycamera)
table = env.GetKinBody('table')
if table is not None:
graspables = None
Trolls = [matrixFromAxisAngle(array((0,0,1)),roll) for roll in arange(0,2*pi,pi/4)]
alldests = graspplanning.GraspPlanning.setRandomDestinations(targets,table,transdelta=0.05,Trolls=Trolls,randomize=False)
targetdests=zip(targets,alldests)
self.graspAndPlaceObjectMobileSearch(targetdests=targetdests)
#h = gr.showBaseDistribution(thresh=1.0,logllthresh=logllthresh)
#grmodel.testSampling(weight=1.5,logllthresh=0.5,randomgrasps=True,randomplacement=False,updateenv=False)
validgrasps,validindices = gr.gmodel.computeValidGrasps(checkik=False,backupdist=0.01)
grmodel.gmodel.showgrasp(validgrasps[0],collisionfree=True)
densityfn,samplerfn,bounds,validgrasps = gr.computeGraspDistribution(logllthresh=logllthresh)
goals,numfailures=gr.sampleGoals(lambda goals: samplerfn(goals,weight=1.0),updateenv=True)
grasp,pose,q = goals[0]
robot.SetTransform(pose)
robot.SetJointValues(q)
basemanip.CloseFingers()
grasp = gr.gmodel.grasps[283]
Tgrasp = gr.gmodel.getGlobalGraspTransform(grasp,collisionfree=True)
equivalenceclass,logll = gr.irmodel.getEquivalenceClass(Tgrasp)
densityfn,samplerfn,bounds = gr.irmodel.computeBaseDistribution(Tgrasp,logllthresh=logllthresh)
h = gr.irmodel.showBaseDistribution(densityfn,bounds,zoffset=gr.target.GetTransform()[2,3],thresh=1.0)
env = Environment()
robot = env.ReadRobotXMLFile('robots/hrp2jsk08.robot.xml')
env.AddRobot(robot)
body = env.ReadKinBodyXMLFile('scenes/cereal_frootloops.kinbody.xml')
env.AddKinBody(body)
T = eye(4)
T[0:3,3] = [0.466,-0.157,0.544]
body.SetTransform(T)
robot.SetActiveManipulator('rightarm_chest')
robot.Grab(body)
robot.SetJointValues([-1.4,1.35239005,1.036349],[5,7,8])
robot.CheckSelfCollision()
def test_visibility():
import visibilitymodel
env = Environment()
env.Reset()
robot = env.ReadRobotXMLFile('robots/hrp2jsk08real.robot.xml')
env.AddRobot(robot)
robot.SetActiveManipulator('rightarm_chest')
target = env.ReadKinBodyXMLFile('scenes/cereal_frootloops.kinbody.xml')
env.AddKinBody(target)
robot.SetActiveManipulator('rightarm_chest')
self = visibilitymodel.VisibilityModel(robot=robot,target=target,sensorname='wristcam')
if not self.load():
self.autogenerate()
env.SetViewer('qtcoin')
self.showtransforms()
import kinematicreachability, grasping, visibilitymodel
pose = array([-0.88932403, 0. , 0. , -0.45727755, 8.26839721, 3.14201928, 0.66500002])
grasp = array([ 1.65648009e-06, -2.83057716e-06, 1.00000000e+00,
9.23879445e-01, 3.82683724e-01, -4.47596904e-07,
-3.82683724e-01, 9.23879445e-01, 3.24873599e-06,
-4.32802886e-02, 8.53489910e-04, 7.89998472e-02,
0.00000000e+00, 4.71238898e+00, 9.23879477e-01,
3.82683432e-01, -5.50675114e-08, 1.65648009e-06,
-2.83057716e-06, 1.00000000e+00, 9.23879445e-01,
3.82683724e-01, -4.47596904e-07, -3.82683724e-01,
9.23879445e-01, 3.24873599e-06, -4.23564091e-02,
1.23617332e-03, 7.89998472e-02, 6.41213000e-03,
-2.11999994e-02, 9.99999978e-03, 7.90000036e-02,
2.35619450e+00])
robot.SetTransform(pose)
robot.SetActiveManipulator('rightarm_chest')
# rmodel = kinematicreachability.ReachabilityModel(robot=robot)
# rmodel.load()
gmodel = grasping.GraspingModel(robot,target=env.GetKinBody('cereal0'))
gmodel.moveToPreshape(grasp)
gmodel.robot.GetController().Reset(0)
vmodel = visibilitymodel.VisibilityModel(robot=robot,target=gmodel.target,sensorname='wristcam')
vmodel.load()
self = vmodel
self.SetCameraTransforms(self.pruneTransformations(thresh=0.04))
validjoints=self.computeValidTransform()
self.robot.SetJointValues(validjoints[0][0],self.manip.GetArmJoints())
s=vmodel.visualprob.SampleVisibilityGoal(target=gmodel.target)
pts = array([dot(self.target.GetTransform(),matrixFromPose(pose))[0:3,3] for pose in self.visibilitytransforms])
h=self.env.plot3(pts,5,colors=array([0.5,0.5,1,0.03]))
def test_navigation():
import inversereachability,mobilemanipulation,graspplanning,visibilitymodel
env = Environment()
env.SetViewer('qtcoin')
env.Reset()
env.Load('scenes/r602kitchen1.env.xml')
robot = env.GetRobots()[0]
robot.GetController().Reset(0)
Tstart = array([[ 1. , 0. , 0. , 6.26000023-7.37],
[ 0. , 1. , 0. , 2.66899991-3.2],
[ 0. , 0. , 1. , 0.66500002],
[ 0. , 0. , 0. , 1. ]])
robot.SetTransform(Tstart)
robot.SetJointValues(array([ 0. , 0. , 0.84761411, 0. , 0. ,
-2.20907021, 0. , 0. , 0. , 0.97831494,
0. , 0. , -2.23727012, 0. , 0. ,
0. , 0. , 0. , -0.17453291, 0.17453295], dtype=float32))
self = mobilemanipulation.MobileManipulationPlanning(robot)
goal2d = array([0.29134295742674898, -0.26705494655604034, -3.1834347453894472])
#goal2d = array([6.2547940332687197-7.37, 2.2240884123771689-3.2, -6.0887479146975627])
envmin = []
envmax = []
for b in self.env.GetBodies():
ab = b.ComputeAABB()
envmin.append(ab.pos()-ab.extents())
envmax.append(ab.pos()+ab.extents())
abrobot = self.robot.ComputeAABB()
envmin = numpy.min(array(envmin),0)+abrobot.extents()
envmax = numpy.max(array(envmax),0)-abrobot.extents()
bounds = array(((envmin[0],envmin[1],-pi),(envmax[0],envmax[1],pi)))
self.robot.SetAffineTranslationLimits(envmin,envmax)
self.robot.SetAffineTranslationMaxVels([0.5,0.5,0.5])
self.robot.SetAffineRotationAxisMaxVels(ones(4))
self.robot.SetActiveDOFs([],Robot.DOFAffine.X|Robot.DOFAffine.Y|Robot.DOFAffine.RotationAxis,[0,0,1])
center = r_[goal2d[0:2],0.2]
xaxis = 0.5*array((cos(goal2d[2]),sin(goal2d[2]),0))
yaxis = 0.25*array((-sin(goal2d[2]),cos(goal2d[2]),0))
#self.hgoal = self.env.drawlinelist(transpose(c_[center-xaxis,center+xaxis,center-yaxis,center+yaxis]),linewidth=5.0,colors=array((0,1,0)))
env.SetDebugLevel(DebugLevel.Debug)
starttime = time.time()
self.basemanip.MoveActiveJoints(goal=goal2d,maxiter=3000,steplength=0.05)
print time.time()-starttime
def test_fatmodels():
env=Environment()
env.SetViewer('qtcoin')
env.Load('scenes/r602kitchen3.env.xml')
robot=env.GetRobots()[0]
taskmanip=TaskManipulation(robot)
env.SetDebugLevel(DebugLevel.Verbose)
taskmanip.SwitchModels(switchpatterns=[('frootloops(+d)$','scenes/cereal_frootloops_fat.kinbody.xml')])
taskmanip.SwitchModels(switch=True)
def test_pr2():
from openravepy.examples import inversekinematics, grasping
env=Environment()
robot=env.ReadRobotXMLFile('robots/pr2-beta-sim.robot.xml')
env.AddRobot(robot)
# kinematics
# python inversekinematics.py --robot=robots/pr2-beta-sim.robot.xml --manipname=rightarm --freejoint=r_shoulder_pan_joint --numiktests=10
# python inversekinematics.py --robot=robots/pr2-beta-sim.robot.xml --manipname=rightarm_torso --freejoint=r_shoulder_pan_joint --freejoint=torso_lift_joint --numiktests=10
manipnames = ['leftarm','rightarm','leftarm_torso','rightarm_torso']
for manipname in manipnames:
manip=robot.SetActiveManipulator(manipname)
ikmodel=inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
rmodel = kinematicreachability.ReachabilityModel(robot)
if not rmodel.load():
rmodel.autogenerate()
# grasping
# python grasping.py --robot=robots/pr2-beta-sim.robot.xml --target=data/box_frootloops.kinbody.xml --boxdelta=0.01 --standoff=0 --standoff=0.02 --standoff=0.05 --normalanglerange=1 --roll=0 --roll=1.5707963 --roll=3.141592 --roll=4.7123889 --graspingnoise=0.01
with env:
target=env.ReadKinBodyXMLFile('data/box_frootloops.kinbody.xml')
env.AddKinBody(target)
for manipname in ['leftarm','rightarm']:
robot.SetActiveManipulator(manipname)
gmodel = grasping.GraspingModel(robot,target)
if not gmodel.load():
with gmodel.target:
gmodel.target.Enable(False)
final,traj = gmodel.basemanip.ReleaseFingers(execute=False,outputfinal=True)
gmodel.generate(preshapes = array([final]),rolls=arange(0,2*pi,pi/2),graspingnoise=0.01,standoffs=[0,0.02,0.05],approachrays=gmodel.computeBoxApproachRays(0.01,normalanglerange=1,directiondelta=0.1))
# create symbolic links
def test_calibviews():
import calibrationviews
env=Environment()
env.SetViewer('qtcoin')
env.Load('scenes/pa10lab.env.xml')#data/pa10calib_envcamera.env.xml')
robot=env.GetRobots()[0]
self = calibrationviews.CalibrationViews(robot,sensorrobot=env.GetRobot('ceilingcamera'))
dists=arange(0.05,2.0,0.15)
orientationdensity=1
self.computeAndMoveToObservations()
for i,relativepose in enumerate(visibilitytransforms):
pose = array(posebase)
pose = poseMult(pose,relativepose)
q = self.vmodel.manip.FindIKSolution(dot(matrixFromPose(pose),self.Tpatternrobot),True)
if q is not None:
print i
#self.robot.SetJointValues(q,self.vmodel.manip.GetArmJoints())
self.vmodel.visualprob.ComputeVisibleConfiguration(pose=pose)
raw_input('asdf')
def test_freejoints():
env=Environment()
env.SetViewer('qtcoin')
env.Load('data/lab1.env.xml')
robot=env.GetRobots()[0]
robot.GetController().Reset(0)
robot.SetJointValues(array([ -3.64122450e-01, 1.27151251e+00, -7.88666554e-09, 1.29461884e+00, -2.69412994e-05, 4.65967804e-01, 9.38504954e-08, 2.44345713e+00, 2.44345832e+00, 2.44345665e+00, 0.00000000e+00]))
task=interfaces.TaskManipulation(robot)
basemanip=interfaces.BaseManipulation(robot)
m=robot.GetActiveManipulator()
robot.SetActiveDOFs(m.GetArmIndices())
basemanip.MoveUnsyncJoints(jointvalues=[0,0,0,0],jointinds=m.GetGripperIndices())
task.ReleaseFingers()
def test_pr2movehandstraight():
env=Environment()
env.Load('robots/pr2-beta-static.robot.xml')
robot=env.GetRobots()[0]
RaveSetDebugLevel(DebugLevel.Debug)
basemanip = interfaces.BaseManipulation(robot)
lmodel = databases.linkstatistics.LinkStatisticsModel(robot)
if not lmodel.load():
lmodel.autogenerate()
lmodel.setRobotWeights()
lmodel.setRobotResolutions()
with env:
robot.GetController().Reset(0)
robot.SetDOFValues(array([ 3.92742505e+00, -1.11998514e+02, -1.12338294e+02, -2.58996561e+01, -4.50039340e+02, -4.94179434e+02, -1.02276493e+01, -7.15039024e+02, -7.36311760e+02, 1.02395814e+01, 2.59771698e+02, 2.57443795e+02, 2.30174678e-01, 1.46851569e-02, -4.67573707e-01, 5.14696340e-03, 8.37552006e-01, 1.02808376e+00, -2.17092539e+00, 7.99473354e+00, -1.90625735e+00, 3.75677048e-02, 7.16413010e-03, 9.25338303e-04, 1.30856421e-02, -1.44843374e-01, 1.06201002e+00, -1.84726393e+00, -1.53293256e+00, 1.22156997e+00, 4.56456176e-03, -5.94439315e+00, 7.32226686e-03, 9.54206204e-04]))
Tgoal = array([[ 1.77635684e-15, 0.00000000e+00, 1.00000000e+00, 0.6],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00, 0],
[ -1.00000000e+00, 0.00000000e+00, 1.77635684e-15, 1.2],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
robot.SetActiveManipulator('rightarm_torso')
manip = robot.GetManipulator('rightarm_torso')
sol = manip.FindIKSolution(Tgoal,IkFilterOptions.CheckEnvCollisions)
for iter in range(10):
basemanip.MoveToHandPosition(matrices=[Tgoal],execute=False)
robot.SetDOFValues(sol,manip.GetArmIndices())
| 29,337
| 0
| 671
|
77e4d9b317e4a31644728d015d912e3836a26295
| 1,009
|
py
|
Python
|
python/package/model_2.py
|
pchtsp/ROADEF2018
|
442dfe919a41fa993155226b601625917e632577
|
[
"MIT"
] | null | null | null |
python/package/model_2.py
|
pchtsp/ROADEF2018
|
442dfe919a41fa993155226b601625917e632577
|
[
"MIT"
] | null | null | null |
python/package/model_2.py
|
pchtsp/ROADEF2018
|
442dfe919a41fa993155226b601625917e632577
|
[
"MIT"
] | null | null | null |
import package.tuplist as tl
import package.superdict as sd
import pulp as pl
import package.config as conf
import package.params as pm
import numpy as np
import pprint as pp
| 32.548387
| 82
| 0.65114
|
import package.tuplist as tl
import package.superdict as sd
import pulp as pl
import package.config as conf
import package.params as pm
import numpy as np
import pprint as pp
def solve_model(self, options):
plates = self.trees
bins = range(len(self.trees))
plates_bins = [(p, b) for p in plates for b in bins]
model = pl.LpProblem("PHASE_2", pl.LpMinimize)
print('Creating variables')
# variables:
# each plate has a position in the bin and a bin assignment.
plate_bin = pl.LpVariable.dicts(name='plate_bin', indexs=plates_bins,
lowBound=0, upBound=1, cat=pl.LpInteger)
plate_pos = pl.LpVariable.dicts(name='plate_pos', indexs=plates,
lowBound=0, upBound=3200, cat=pl.LpContinuous)
# items have horizontal and vertical movement
item_pos = pl.LpVariable.dicts(name='plate_pos', indexs=plates,
lowBound=0, upBound=3200, cat=pl.LpContinuous)
return
| 811
| 0
| 23
|
452928815217ec89a44a47d3ef314ea32d20e384
| 611
|
py
|
Python
|
ephios/core/admin.py
|
garinm90/ephios
|
7d04d3287ae16ee332e31add1f25829b199f29a5
|
[
"MIT"
] | null | null | null |
ephios/core/admin.py
|
garinm90/ephios
|
7d04d3287ae16ee332e31add1f25829b199f29a5
|
[
"MIT"
] | null | null | null |
ephios/core/admin.py
|
garinm90/ephios
|
7d04d3287ae16ee332e31add1f25829b199f29a5
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from ephios.core.models import (
Consequence,
Event,
EventType,
LocalParticipation,
Qualification,
QualificationCategory,
QualificationGrant,
Shift,
WorkingHours,
)
admin.site.register(Qualification)
admin.site.register(QualificationGrant)
admin.site.register(QualificationCategory)
admin.site.register(WorkingHours)
admin.site.register(Consequence)
admin.site.register(Shift)
admin.site.register(Event, GuardedModelAdmin)
admin.site.register(EventType)
admin.site.register(LocalParticipation)
| 23.5
| 45
| 0.798691
|
from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from ephios.core.models import (
Consequence,
Event,
EventType,
LocalParticipation,
Qualification,
QualificationCategory,
QualificationGrant,
Shift,
WorkingHours,
)
admin.site.register(Qualification)
admin.site.register(QualificationGrant)
admin.site.register(QualificationCategory)
admin.site.register(WorkingHours)
admin.site.register(Consequence)
admin.site.register(Shift)
admin.site.register(Event, GuardedModelAdmin)
admin.site.register(EventType)
admin.site.register(LocalParticipation)
| 0
| 0
| 0
|
ed8ed35373a497ad4d9a9c5d8b7259a588436899
| 20,933
|
py
|
Python
|
front-end/testsuite-python-lib/Python-2.3/Lib/_strptime.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
front-end/testsuite-python-lib/Python-2.3/Lib/_strptime.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | null | null | null |
front-end/testsuite-python-lib/Python-2.3/Lib/_strptime.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2019-04-11T11:27:01.000Z
|
2019-04-11T11:27:01.000Z
|
"""Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and/or stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information as is returned by time.strftime()
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
Requires Python 2.2.1 or higher (mainly because of the use of property()).
Can be used in Python 2.2 if the following line is added:
True = 1; False = 0
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from datetime import date as datetime_date
__author__ = "Brett Cannon"
__email__ = "brett@python.org"
__all__ = ['strptime']
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
This is not thread-safe! Attributes are lazily calculated and no
precaution is taken to check to see if the locale information has changed
since the creation of the instance in use.
ATTRIBUTES (all read-only after instance creation! Instance variables that
store the values have mangled names):
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full weekday names (14-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated weekday names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(3-item list; code tacks on blank item at end for
possible lack of timezone such as UTC)
lang -- Language used by instance (string)
"""
def __init__(self, f_weekday=None, a_weekday=None, f_month=None,
a_month=None, am_pm=None, LC_date_time=None, LC_time=None,
LC_date=None, timezone=None, lang=None):
"""Optionally set attributes with passed-in values."""
if f_weekday is None:
self.__f_weekday = None
elif len(f_weekday) == 7:
self.__f_weekday = list(f_weekday)
else:
raise TypeError("full weekday names must be a 7-item sequence")
if a_weekday is None:
self.__a_weekday = None
elif len(a_weekday) == 7:
self.__a_weekday = list(a_weekday)
else:
raise TypeError(
"abbreviated weekday names must be a 7-item sequence")
if f_month is None:
self.__f_month = None
elif len(f_month) == 12:
self.__f_month = self.__pad(f_month, True)
else:
raise TypeError("full month names must be a 12-item sequence")
if a_month is None:
self.__a_month = None
elif len(a_month) == 12:
self.__a_month = self.__pad(a_month, True)
else:
raise TypeError(
"abbreviated month names must be a 12-item sequence")
if am_pm is None:
self.__am_pm = None
elif len(am_pm) == 2:
self.__am_pm = am_pm
else:
raise TypeError("AM/PM representation must be a 2-item sequence")
self.__LC_date_time = LC_date_time
self.__LC_time = LC_time
self.__LC_date = LC_date
self.__timezone = timezone
if timezone:
if len(timezone) != 2:
raise TypeError("timezone names must contain 2 items")
else:
self.__timezone = self.__pad(timezone, False)
if lang:
self.__lang = lang
else:
self.__lang = _getlang()
f_weekday = property(__get_f_weekday, __set_nothing,
doc="Full weekday names")
a_weekday = property(__get_a_weekday, __set_nothing,
doc="Abbreviated weekday names")
f_month = property(__get_f_month, __set_nothing,
doc="Full month names (dummy value at index 0)")
a_month = property(__get_a_month, __set_nothing,
doc="Abbreviated month names (dummy value at index 0)")
am_pm = property(__get_am_pm, __set_nothing, doc="AM/PM representation")
timezone = property(__get_timezone, __set_nothing,
doc="Timezone representation (dummy value at index 2)")
LC_date_time = property(
__get_LC_date_time, __set_nothing,
doc=
"Format string for locale's date/time representation ('%c' format)")
LC_date = property(__get_LC_date, __set_nothing,
doc="Format string for locale's date representation ('%x' format)")
LC_time = property(__get_LC_time, __set_nothing,
doc="Format string for locale's time representation ('%X' format)")
lang = property(lambda self: self.__lang, __set_nothing,
doc="Language used for instance")
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Init inst with non-locale regexes and store LocaleTime object."""
#XXX: Does 'Y' need to worry about having less or more than 4 digits?
base = super(TimeRE, self)
base.__init__({
# The " \d" option is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
'Y': r"(?P<Y>\d\d\d\d)"})
base.__setitem__('W', base.__getitem__('U'))
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
def __getitem__(self, fetch):
"""Try to fetch regex; if it does not exist, construct it."""
try:
return super(TimeRE, self).__getitem__(fetch)
except KeyError:
constructors = {
'A': lambda: self.__seqToRE(self.locale_time.f_weekday, fetch),
'a': lambda: self.__seqToRE(self.locale_time.a_weekday, fetch),
'B': lambda: self.__seqToRE(self.locale_time.f_month[1:],
fetch),
'b': lambda: self.__seqToRE(self.locale_time.a_month[1:],
fetch),
'c': lambda: self.pattern(self.locale_time.LC_date_time),
'p': lambda: self.__seqToRE(self.locale_time.am_pm, fetch),
'x': lambda: self.pattern(self.locale_time.LC_date),
'X': lambda: self.pattern(self.locale_time.LC_time),
'Z': lambda: self.__seqToRE(self.locale_time.timezone, fetch),
'%': lambda: '%',
}
if fetch in constructors:
self[fetch] = constructors[fetch]()
return self[fetch]
else:
raise
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive."""
def sorter(a, b):
"""Sort based on length.
Done in case for some strange reason that names in the locale only
differ by a suffix and thus want the name with the suffix to match
first.
"""
try:
a_length = len(a)
except TypeError:
a_length = 0
try:
b_length = len(b)
except TypeError:
b_length = 0
return cmp(b_length, a_length)
to_convert = to_convert[:] # Don't want to change value in-place.
for value in to_convert:
if value != '':
break
else:
return ''
to_convert.sort(sorter)
regex = '|'.join(to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return re pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax is escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax.
regex_chars = re_compile(r"([\\.^$*+?{}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s*', format)
while format.find('%') != -1:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
def strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input data and the format string."""
time_re = TimeRE()
locale_time = time_re.locale_time
format_regex = time_re.compile(format)
found = format_regex.match(data_string)
if not found:
raise ValueError("time data did not match format: data=%s fmt=%s" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = 1900
month = day = 1
hour = minute = second = 0
tz = -1
# weekday and julian defaulted to -1 so as to signal need to calculate values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.iterkeys():
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = _insensitiveindex(locale_time.f_month, found_dict['B'])
elif group_key == 'b':
month = _insensitiveindex(locale_time.a_month, found_dict['b'])
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0].lower()):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1].lower():
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'A':
weekday = _insensitiveindex(locale_time.f_weekday,
found_dict['A'])
elif group_key == 'a':
weekday = _insensitiveindex(locale_time.a_weekday,
found_dict['a'])
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
if locale_time.timezone[0] == locale_time.timezone[1] and \
time.daylight:
pass #Deals with bad locale setup where timezone info is
# the same; first found on FreeBSD 4.4.
elif found_zone in ("utc", "gmt"):
tz = 0
elif locale_time.timezone[2].lower() == found_zone:
tz = 0
elif time.daylight and \
locale_time.timezone[3].lower() == found_zone:
tz = 1
# Cannot pre-calculate datetime_date() since can change in Julian
#calculation and thus could have different value for the day of the week
#calculation
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
#be accurate
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
return time.struct_time((year, month, day,
hour, minute, second,
weekday, julian, tz))
| 40.024857
| 105
| 0.558878
|
"""Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and/or stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information as is returned by time.strftime()
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
Requires Python 2.2.1 or higher (mainly because of the use of property()).
Can be used in Python 2.2 if the following line is added:
True = 1; False = 0
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from datetime import date as datetime_date
__author__ = "Brett Cannon"
__email__ = "brett@python.org"
__all__ = ['strptime']
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
This is not thread-safe! Attributes are lazily calculated and no
precaution is taken to check to see if the locale information has changed
since the creation of the instance in use.
ATTRIBUTES (all read-only after instance creation! Instance variables that
store the values have mangled names):
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full weekday names (14-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated weekday names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(3-item list; code tacks on blank item at end for
possible lack of timezone such as UTC)
lang -- Language used by instance (string)
"""
def __init__(self, f_weekday=None, a_weekday=None, f_month=None,
a_month=None, am_pm=None, LC_date_time=None, LC_time=None,
LC_date=None, timezone=None, lang=None):
"""Optionally set attributes with passed-in values."""
if f_weekday is None:
self.__f_weekday = None
elif len(f_weekday) == 7:
self.__f_weekday = list(f_weekday)
else:
raise TypeError("full weekday names must be a 7-item sequence")
if a_weekday is None:
self.__a_weekday = None
elif len(a_weekday) == 7:
self.__a_weekday = list(a_weekday)
else:
raise TypeError(
"abbreviated weekday names must be a 7-item sequence")
if f_month is None:
self.__f_month = None
elif len(f_month) == 12:
self.__f_month = self.__pad(f_month, True)
else:
raise TypeError("full month names must be a 12-item sequence")
if a_month is None:
self.__a_month = None
elif len(a_month) == 12:
self.__a_month = self.__pad(a_month, True)
else:
raise TypeError(
"abbreviated month names must be a 12-item sequence")
if am_pm is None:
self.__am_pm = None
elif len(am_pm) == 2:
self.__am_pm = am_pm
else:
raise TypeError("AM/PM representation must be a 2-item sequence")
self.__LC_date_time = LC_date_time
self.__LC_time = LC_time
self.__LC_date = LC_date
self.__timezone = timezone
if timezone:
if len(timezone) != 2:
raise TypeError("timezone names must contain 2 items")
else:
self.__timezone = self.__pad(timezone, False)
if lang:
self.__lang = lang
else:
self.__lang = _getlang()
def __pad(self, seq, front):
# Add '' to seq to either front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __set_nothing(self, stuff):
# Raise TypeError when trying to set an attribute.
raise TypeError("attribute does not support assignment")
def __get_f_weekday(self):
# Fetch self.f_weekday.
if not self.__f_weekday:
self.__calc_weekday()
return self.__f_weekday
def __get_a_weekday(self):
# Fetch self.a_weekday.
if not self.__a_weekday:
self.__calc_weekday()
return self.__a_weekday
f_weekday = property(__get_f_weekday, __set_nothing,
doc="Full weekday names")
a_weekday = property(__get_a_weekday, __set_nothing,
doc="Abbreviated weekday names")
def __get_f_month(self):
# Fetch self.f_month.
if not self.__f_month:
self.__calc_month()
return self.__f_month
def __get_a_month(self):
# Fetch self.a_month.
if not self.__a_month:
self.__calc_month()
return self.__a_month
f_month = property(__get_f_month, __set_nothing,
doc="Full month names (dummy value at index 0)")
a_month = property(__get_a_month, __set_nothing,
doc="Abbreviated month names (dummy value at index 0)")
def __get_am_pm(self):
# Fetch self.am_pm.
if not self.__am_pm:
self.__calc_am_pm()
return self.__am_pm
am_pm = property(__get_am_pm, __set_nothing, doc="AM/PM representation")
def __get_timezone(self):
# Fetch self.timezone.
if not self.__timezone:
self.__calc_timezone()
return self.__timezone
timezone = property(__get_timezone, __set_nothing,
doc="Timezone representation (dummy value at index 2)")
def __get_LC_date_time(self):
# Fetch self.LC_date_time.
if not self.__LC_date_time:
self.__calc_date_time()
return self.__LC_date_time
def __get_LC_date(self):
# Fetch self.LC_date.
if not self.__LC_date:
self.__calc_date_time()
return self.__LC_date
def __get_LC_time(self):
# Fetch self.LC_time.
if not self.__LC_time:
self.__calc_date_time()
return self.__LC_time
LC_date_time = property(
__get_LC_date_time, __set_nothing,
doc=
"Format string for locale's date/time representation ('%c' format)")
LC_date = property(__get_LC_date, __set_nothing,
doc="Format string for locale's date representation ('%x' format)")
LC_time = property(__get_LC_time, __set_nothing,
doc="Format string for locale's time representation ('%X' format)")
lang = property(lambda self: self.__lang, __set_nothing,
doc="Language used for instance")
def __calc_weekday(self):
# Set self.__a_weekday and self.__f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i] for i in range(7)]
f_weekday = [calendar.day_name[i] for i in range(7)]
if not self.__a_weekday:
self.__a_weekday = a_weekday
if not self.__f_weekday:
self.__f_weekday = f_weekday
def __calc_month(self):
# Set self.__f_month and self.__a_month using the calendar module.
a_month = [calendar.month_abbr[i] for i in range(13)]
f_month = [calendar.month_name[i] for i in range(13)]
if not self.__a_month:
self.__a_month = a_month
if not self.__f_month:
self.__f_month = f_month
def __calc_am_pm(self):
# Set self.__am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (01,22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple))
self.__am_pm = am_pm
def __calc_date_time(self):
# Set self.__date_time, self.__date, & self.__time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple)
date_time[1] = time.strftime("%x", time_tuple)
date_time[2] = time.strftime("%X", time_tuple)
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in (
('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
(self.timezone[0], '%Z'), (self.timezone[1], '%Z'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')):
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if time.strftime(directive, time_tuple).find('00'):
U_W = '%U'
else:
U_W = '%W'
date_time[offset] = current_format.replace('11', U_W)
if not self.__LC_date_time:
self.__LC_date_time = date_time[0]
if not self.__LC_date:
self.__LC_date = date_time[1]
if not self.__LC_time:
self.__LC_time = date_time[2]
def __calc_timezone(self):
# Set self.__timezone by using time.tzname.
#
# Empty string used for matching when timezone is not used/needed.
try:
time.tzset()
except AttributeError:
pass
time_zones = ["UTC", "GMT"]
if time.daylight:
time_zones.extend(time.tzname)
else:
time_zones.append(time.tzname[0])
self.__timezone = self.__pad(time_zones, 0)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Init inst with non-locale regexes and store LocaleTime object."""
#XXX: Does 'Y' need to worry about having less or more than 4 digits?
base = super(TimeRE, self)
base.__init__({
# The " \d" option is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
'Y': r"(?P<Y>\d\d\d\d)"})
base.__setitem__('W', base.__getitem__('U'))
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
def __getitem__(self, fetch):
"""Try to fetch regex; if it does not exist, construct it."""
try:
return super(TimeRE, self).__getitem__(fetch)
except KeyError:
constructors = {
'A': lambda: self.__seqToRE(self.locale_time.f_weekday, fetch),
'a': lambda: self.__seqToRE(self.locale_time.a_weekday, fetch),
'B': lambda: self.__seqToRE(self.locale_time.f_month[1:],
fetch),
'b': lambda: self.__seqToRE(self.locale_time.a_month[1:],
fetch),
'c': lambda: self.pattern(self.locale_time.LC_date_time),
'p': lambda: self.__seqToRE(self.locale_time.am_pm, fetch),
'x': lambda: self.pattern(self.locale_time.LC_date),
'X': lambda: self.pattern(self.locale_time.LC_time),
'Z': lambda: self.__seqToRE(self.locale_time.timezone, fetch),
'%': lambda: '%',
}
if fetch in constructors:
self[fetch] = constructors[fetch]()
return self[fetch]
else:
raise
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive."""
def sorter(a, b):
"""Sort based on length.
Done in case for some strange reason that names in the locale only
differ by a suffix and thus want the name with the suffix to match
first.
"""
try:
a_length = len(a)
except TypeError:
a_length = 0
try:
b_length = len(b)
except TypeError:
b_length = 0
return cmp(b_length, a_length)
to_convert = to_convert[:] # Don't want to change value in-place.
for value in to_convert:
if value != '':
break
else:
return ''
to_convert.sort(sorter)
regex = '|'.join(to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return re pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax is escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax.
regex_chars = re_compile(r"([\\.^$*+?{}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s*', format)
while format.find('%') != -1:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
def strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input data and the format string."""
time_re = TimeRE()
locale_time = time_re.locale_time
format_regex = time_re.compile(format)
found = format_regex.match(data_string)
if not found:
raise ValueError("time data did not match format: data=%s fmt=%s" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = 1900
month = day = 1
hour = minute = second = 0
tz = -1
# weekday and julian defaulted to -1 so as to signal need to calculate values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.iterkeys():
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = _insensitiveindex(locale_time.f_month, found_dict['B'])
elif group_key == 'b':
month = _insensitiveindex(locale_time.a_month, found_dict['b'])
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0].lower()):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1].lower():
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'A':
weekday = _insensitiveindex(locale_time.f_weekday,
found_dict['A'])
elif group_key == 'a':
weekday = _insensitiveindex(locale_time.a_weekday,
found_dict['a'])
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
if locale_time.timezone[0] == locale_time.timezone[1] and \
time.daylight:
pass #Deals with bad locale setup where timezone info is
# the same; first found on FreeBSD 4.4.
elif found_zone in ("utc", "gmt"):
tz = 0
elif locale_time.timezone[2].lower() == found_zone:
tz = 0
elif time.daylight and \
locale_time.timezone[3].lower() == found_zone:
tz = 1
# Cannot pre-calculate datetime_date() since can change in Julian
#calculation and thus could have different value for the day of the week
#calculation
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
#be accurate
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
return time.struct_time((year, month, day,
hour, minute, second,
weekday, julian, tz))
def _insensitiveindex(lst, findme):
# Perform a case-insensitive index search.
#XXX <bc>: If LocaleTime is not exposed, then consider removing this and
# just lowercase when LocaleTime sets its vars and lowercasing
# search values.
findme = findme.lower()
for key,item in enumerate(lst):
if item.lower() == findme:
return key
else:
raise ValueError("value not in list")
| 5,875
| 0
| 478
|
c7f02da4a17a76fc3ef4f8355db88715fe9fe33d
| 2,231
|
py
|
Python
|
examples/widgets/lists/list_kv.py
|
xinmingzhang/kivy
|
86b6e19d8a02788fe8850b690bcecdff848f3c4e
|
[
"MIT"
] | 9
|
2016-09-03T07:20:01.000Z
|
2020-05-21T14:44:48.000Z
|
examples/widgets/lists/list_kv.py
|
xinmingzhang/kivy
|
86b6e19d8a02788fe8850b690bcecdff848f3c4e
|
[
"MIT"
] | 1
|
2017-05-30T20:45:15.000Z
|
2017-05-30T20:45:15.000Z
|
examples/widgets/lists/list_kv.py
|
xinmingzhang/kivy
|
86b6e19d8a02788fe8850b690bcecdff848f3c4e
|
[
"MIT"
] | 4
|
2016-09-10T15:27:54.000Z
|
2020-03-27T22:05:31.000Z
|
from kivy.adapters.dictadapter import DictAdapter
from kivy.uix.selectableview import SelectableView
from kivy.uix.listview import ListView, ListItemButton
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
from kivy.factory import Factory
from fixtures import integers_dict
# [TODO] Will SelectableView be in the kivy/factory_registers.py,
# as a result of setup.py? ListItemButton? others?
Factory.register('SelectableView', cls=SelectableView)
Factory.register('ListItemButton', cls=ListItemButton)
# [TODO] SelectableView is subclassed here, yet, it is necessary to add the
# index property in the template. Same TODO in list_cascade_images.py.
Builder.load_string('''
[CustomListItem@SelectableView+BoxLayout]:
size_hint_y: ctx.size_hint_y
height: ctx.height
ListItemButton:
text: ctx.text
is_selected: ctx.is_selected
''')
class MainView(GridLayout):
'''Implementation of a list view with a kv template used for the list
item class.
'''
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(MainView(width=800))
| 36.57377
| 78
| 0.650829
|
from kivy.adapters.dictadapter import DictAdapter
from kivy.uix.selectableview import SelectableView
from kivy.uix.listview import ListView, ListItemButton
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
from kivy.factory import Factory
from fixtures import integers_dict
# [TODO] Will SelectableView be in the kivy/factory_registers.py,
# as a result of setup.py? ListItemButton? others?
Factory.register('SelectableView', cls=SelectableView)
Factory.register('ListItemButton', cls=ListItemButton)
# [TODO] SelectableView is subclassed here, yet, it is necessary to add the
# index property in the template. Same TODO in list_cascade_images.py.
Builder.load_string('''
[CustomListItem@SelectableView+BoxLayout]:
size_hint_y: ctx.size_hint_y
height: ctx.height
ListItemButton:
text: ctx.text
is_selected: ctx.is_selected
''')
class MainView(GridLayout):
'''Implementation of a list view with a kv template used for the list
item class.
'''
def __init__(self, **kwargs):
kwargs['cols'] = 1
super(MainView, self).__init__(**kwargs)
list_item_args_converter = \
lambda row_index, rec: {'text': rec['text'],
'is_selected': rec['is_selected'],
'size_hint_y': None,
'height': 25}
# Here we create a dict adapter with 1..100 integer strings as
# sorted_keys, and integers_dict from fixtures as data, passing our
# CompositeListItem kv template for the list item view. Then we
# create a list view using this adapter. args_converter above converts
# dict attributes to ctx attributes.
dict_adapter = DictAdapter(sorted_keys=[str(i) for i in range(100)],
data=integers_dict,
args_converter=list_item_args_converter,
template='CustomListItem')
list_view = ListView(adapter=dict_adapter)
self.add_widget(list_view)
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(MainView(width=800))
| 1,075
| 0
| 27
|
3d08429226f10159b4af20dd37f0a9c228fb7474
| 10,140
|
py
|
Python
|
utils/evaluate.py
|
kyuyeonpooh/objects-that-sound
|
962031567f7e5657637d5518dff4f9a44af1c7eb
|
[
"BSD-3-Clause"
] | 18
|
2020-05-11T06:38:14.000Z
|
2022-03-08T02:01:08.000Z
|
utils/evaluate.py
|
kyuyeonpooh/objects-that-sound
|
962031567f7e5657637d5518dff4f9a44af1c7eb
|
[
"BSD-3-Clause"
] | 6
|
2020-11-13T17:46:51.000Z
|
2021-04-30T21:13:55.000Z
|
utils/evaluate.py
|
kyuyeonpooh/objects-that-sound
|
962031567f7e5657637d5518dff4f9a44af1c7eb
|
[
"BSD-3-Clause"
] | 5
|
2020-05-27T07:36:45.000Z
|
2022-03-08T02:01:09.000Z
|
import math
import os
import sys
import numpy as np
from util import load_result
from itertools import combinations
from itertools import product
from ontology import Ontology
def get_max_tree_distance(ontology, tags, debug=False):
"""
Description:
Return max tree distance which can be derived with given tag list.
Parameters:
tags: list of tags used in training. (type: list)
[Example] ['Acoustic guitar', 'Electric Guitar', ..., 'Piano']
"""
# create combination between tags
comb = combinations(tags, 2)
# initiate maximum distance between two tags
max_dist = 0
# loop for every combination
for item in comb:
# calculate distance between two tags
distance = ontology.get_min_distance(item[0], item[1])
if debug:
print("'%s'-'%s': %d" % (item[0], item[1], distance))
# update max_dist if distance > max_dist
max_dist = distance if distance > max_dist else max_dist
return max_dist
def get_min_tag_distance(ontology, tags_x, tags_y):
"""
Description:
Return minimum available tree distance between two videos
Parameters:
tags_x: tags of one video
tags_y: tags of the other video
[Example]
[Example] tags_x = ['Electric Guitar', 'Human Voice']
tags_y = ['Human Voice']
This function with the example above should return 0,
because 'Human Voice' tag exists in both of tag lists.
tags_x = ['Piano', 'Guitar', 'Bass Guitar']
tags_y = ['Accordion']
This function with the example above should return the
distance between 'Accordion' and 'Piano', because its distance
will be the smallest among the followings:
'Piano' - 'Accordion', 'Guitar' - 'Accordion', 'Bass Guitar' - 'Accordion'
"""
products = product(tags_x, tags_y)
min_dist = sys.maxsize
for x, y in products:
distance = ontology.get_min_distance(x, y)
min_dist = min_dist if min_dist < distance else distance
return min_dist
def dist_to_score(ontology, distances, tags=[], max_dist=-1, debug=False):
"""
Description:
Convert distances of K retrieved items into scores
Parameters:
distances: tree distance between query and K retrieved items
[Example] [0, 0, 1, 2, 1, 0, 5, 4, ..., 9] (type: ndarray, len: K)
[Note] score = max_tree_distance - distance
"""
# get maximum tree distance
max_tree_distance = 0
if max_dist >= 0:
max_tree_distance = max_dist
elif len(tags) >= 0:
max_tree_distance = get_max_tree_distance(ontology, tags)
scores = max_tree_distance - distances
return scores
def DCG(scores, k=30, alternate=False):
"""
Description:
Return DCG(Discounted Cumulative Gain) with given score (relevance) list
Parameters:
scores: score list (type: ndarray, len: N)
[Example] [8, 6, 6, 8, 4, 7, ..., 2]
k: length of retrieved items to calculate nDCG
"""
# return zero if scores is None
if scores is None or len(scores) < 1:
return 0.0
# set the number of items in scores
scores = scores[:k]
n_scores = len(scores)
# use alternative formula of DCG
if alternate:
log2i = np.log2(np.asarray(range(1, n_scores + 1)) + 1)
return ((np.power(2, scores) - 1) / log2i).sum()
# use traditional formula of DCG
else:
log2i = np.log2(np.asarray(range(1, n_scores + 1)) + 1)
return (scores / log2i).sum()
def IDCG(scores, k=30, alternate=False):
"""
Description:
Return IDCG(Ideal Discounted Cumulative Gain) with given score (relevance) list
Parameters:
scores: score list (type: ndarray, len: N)
[Example] [8, 6, 6, 8, 4, 7, ..., 2]
k: length of retrieved items to calculate nDCG
"""
if scores is None or len(scores) < 1:
return 0.0
# copy and sort scores in incresing order
s = sorted(scores)
s = s[::-1][:k]
# convert s in decresing order
return DCG(s, k, alternate)
def NDCG(scores, k=30, alternate=False):
"""
Description:
Return nDCG(normalized Discounted Cumulative Gain) with given score (relevance) list
Parameters:
scores: score list (type: ndarray, len: N)
[Example] [8, 6, 6, 8, 4, 7, ..., 2]
"""
# return 0 if scores is empty
if scores is None or len(scores) < 1:
return 0.0
# calculate idcg
idcg = IDCG(scores, k, alternate)
if idcg == 0:
return 0.0
return DCG(scores, k, alternate) / idcg
def do_NDCG(ontology, k, queries, ret_items, tags):
"""
Description:
Return Average nDCG for queries and ret_item
Parameters:
queries: list of N queries (type: list, dimension: 2D, shape: (N, ?))
[Example] [[tag1, tag2, ..., tagK], ..., [tagA, tagB, ..., tagG]]
ret_items: list of N retrieved items (type: list, dimension: 3D, shape: (N, K, ?))
[Example] [[[tagA, tagB, ..., tagG], ..., [tagX, tagY, ..., tagZ]], ... , [ ... ]]
"""
N = len(queries)
ndcgs = 0
# get max_tree_distance
max_tree_distance = get_max_tree_distance(ontology, tags, debug=False)
# for every query, calculate nDCG
for i in range(N):
distances = np.asarray(
[get_min_tag_distance(ontology, queries[i], ret_items[i][j]) for j in range(len(ret_items[i]))]
)
scores = dist_to_score(ontology, distances, max_dist=max_tree_distance)
ndcgs += NDCG(scores, k)
return ndcgs / N
def AP(target, results):
"""
Description:
Return AP(Average Precision) with target and results
Parameters:
target: list of K retrieved items (type: list, len: K)
[Example] [tag1, tag2, ..., tagK]
results: list of N retrieved items (type: list, shape: (N, ?))
[Example] [[tagA, tagB, ..., tagG], ..., [tagX, tagY, ..., tagZ]]
"""
# initiate variables for average precision
n = 1 # the number of result
hit = 0 # the number of hit
ap = 0 # average precision = 1/hit * sum(precision)
len_target = len(target)
for res in results:
(small_set, big_set) = (target, res) if len_target < len(res) else (res, target)
for item in small_set:
if item in big_set: # hit
hit += 1
ap += hit / n
break
n += 1
return ap / hit
def recallAtK(target, results):
"""
Description:
Return 'recall at k' with target and results
Parameters:
target: list of K retrieved items (type: list, len: K)
[Example] [tag1, tag2, ..., tagK]
results: list of N retrieved items (type: list, shape: (N, ?))
[Example] [[tagA, tagB, ..., tagG], ..., [tagX, tagY, ..., tagZ]]
"""
# initiate variables for average precision
recall = 0
K = len(results)
len_target = len(target)
for res in results:
(small_set, big_set) = (target, res) if len_target < len(res) else (res, target)
for item in small_set:
if item in big_set: # hit
recall += 1
break
return recall / K
if __name__ == "__main__":
data_dir = "json"
tags = [
"Acoustic guitar",
"Bass guitar",
"Strum",
"Piano",
"Independent music",
"Wedding music",
"Scary music",
"Firecracker",
"Drip",
]
ontology = Ontology(data_dir)
"""
# Calculate maximum tree distance between tags
print("Calculate maximum tree distance between tags")
max_dist = get_max_tree_distance(ontology, tags, debug=False)
print("Maximum tree distance: ", max_dist, end="\n\n")
# Convert distances to scores with max_dist
print("Convert distances to scores with max_dist")
distances = np.array([0, 0, 1, 2, 1, 0, 5, 4, 8, 9])
print("Distances: ", distances)
scores = dist_to_score(ontology, distances, max_dist=max_dist, debug=True)
print("Scores: ", scores, end="\n\n")
# Convert distances to scores with tags
print("Convert distances to scores with tags")
distances = np.array([0, 0, 1, 2, 1, 0, 5, 4, 8, 9])
print("Distances: ", distances)
scores = dist_to_score(ontology, distances, tags=tags, debug=True)
print("Scores: ", scores, end="\n\n")
# Do DCG, IDCG, NDCG
scores = [3, 2, 3, 0, 1, 2]
print("### Do DCG ###: ", DCG(scores, alternate=False))
print("### Do IDCG ###: ", IDCG(scores))
print("### Do NDCG ###: ", NDCG(scores), end="\n\n")
# Do AP and recall at K
target = ["a", "b", "c"]
results = [["a", "g"], ["d", "e", "f", "b"], ["g", "h", "c"], ["y", "k", "p"]]
print("### AP ###: ", AP(target, results))
print("### Recall at K ###: ", recallAtK(target, results), end="\n\n")
# Do get_min_tag_distance: example1
tags_x = ["Independent music", "Drip"]
tags_y = ["Drip"]
print("@@@ get_min_tag_distance1 @@@: ", get_min_tag_distance(ontology, tags_x, tags_y))
# Do get_min_tag_distance: example2
tags_x = ["Piano", "Guitar", "Bass guitar"]
tags_y = ["Accordion"]
print("@@@ get_min_tag_distance2 @@@: ", get_min_tag_distance(ontology, tags_x, tags_y), end="\n\n")
"""
# Do average nDCG
with open("metadata/all_tags.cls") as fi:
tags = map(lambda x: x[:-1], fi.readlines())
tags = dict((x, i) for i, x in enumerate(tags))
file_names = [
"./results/AVE_aug_ave_i2a.pickle",
"./results/AVE_aug_ave_a2i.pickle",
"./results/AVE_aug_ave_i2i.pickle",
"./results/AVE_aug_ave_a2a.pickle",
]
for f in file_names:
queries, ret_items = load_result(f)
ndcgs = do_NDCG(ontology, 5, queries, ret_items, tags)
print("nDCG: %s" % (f), ndcgs, end="\n\n")
| 32.190476
| 107
| 0.581953
|
import math
import os
import sys
import numpy as np
from util import load_result
from itertools import combinations
from itertools import product
from ontology import Ontology
def get_max_tree_distance(ontology, tags, debug=False):
"""
Description:
Return max tree distance which can be derived with given tag list.
Parameters:
tags: list of tags used in training. (type: list)
[Example] ['Acoustic guitar', 'Electric Guitar', ..., 'Piano']
"""
# create combination between tags
comb = combinations(tags, 2)
# initiate maximum distance between two tags
max_dist = 0
# loop for every combination
for item in comb:
# calculate distance between two tags
distance = ontology.get_min_distance(item[0], item[1])
if debug:
print("'%s'-'%s': %d" % (item[0], item[1], distance))
# update max_dist if distance > max_dist
max_dist = distance if distance > max_dist else max_dist
return max_dist
def get_min_tag_distance(ontology, tags_x, tags_y):
"""
Description:
Return minimum available tree distance between two videos
Parameters:
tags_x: tags of one video
tags_y: tags of the other video
[Example]
[Example] tags_x = ['Electric Guitar', 'Human Voice']
tags_y = ['Human Voice']
This function with the example above should return 0,
because 'Human Voice' tag exists in both of tag lists.
tags_x = ['Piano', 'Guitar', 'Bass Guitar']
tags_y = ['Accordion']
This function with the example above should return the
distance between 'Accordion' and 'Piano', because its distance
will be the smallest among the followings:
'Piano' - 'Accordion', 'Guitar' - 'Accordion', 'Bass Guitar' - 'Accordion'
"""
products = product(tags_x, tags_y)
min_dist = sys.maxsize
for x, y in products:
distance = ontology.get_min_distance(x, y)
min_dist = min_dist if min_dist < distance else distance
return min_dist
def dist_to_score(ontology, distances, tags=[], max_dist=-1, debug=False):
"""
Description:
Convert distances of K retrieved items into scores
Parameters:
distances: tree distance between query and K retrieved items
[Example] [0, 0, 1, 2, 1, 0, 5, 4, ..., 9] (type: ndarray, len: K)
[Note] score = max_tree_distance - distance
"""
# get maximum tree distance
max_tree_distance = 0
if max_dist >= 0:
max_tree_distance = max_dist
elif len(tags) >= 0:
max_tree_distance = get_max_tree_distance(ontology, tags)
scores = max_tree_distance - distances
return scores
def DCG(scores, k=30, alternate=False):
"""
Description:
Return DCG(Discounted Cumulative Gain) with given score (relevance) list
Parameters:
scores: score list (type: ndarray, len: N)
[Example] [8, 6, 6, 8, 4, 7, ..., 2]
k: length of retrieved items to calculate nDCG
"""
# return zero if scores is None
if scores is None or len(scores) < 1:
return 0.0
# set the number of items in scores
scores = scores[:k]
n_scores = len(scores)
# use alternative formula of DCG
if alternate:
log2i = np.log2(np.asarray(range(1, n_scores + 1)) + 1)
return ((np.power(2, scores) - 1) / log2i).sum()
# use traditional formula of DCG
else:
log2i = np.log2(np.asarray(range(1, n_scores + 1)) + 1)
return (scores / log2i).sum()
def IDCG(scores, k=30, alternate=False):
"""
Description:
Return IDCG(Ideal Discounted Cumulative Gain) with given score (relevance) list
Parameters:
scores: score list (type: ndarray, len: N)
[Example] [8, 6, 6, 8, 4, 7, ..., 2]
k: length of retrieved items to calculate nDCG
"""
if scores is None or len(scores) < 1:
return 0.0
# copy and sort scores in incresing order
s = sorted(scores)
s = s[::-1][:k]
# convert s in decresing order
return DCG(s, k, alternate)
def NDCG(scores, k=30, alternate=False):
"""
Description:
Return nDCG(normalized Discounted Cumulative Gain) with given score (relevance) list
Parameters:
scores: score list (type: ndarray, len: N)
[Example] [8, 6, 6, 8, 4, 7, ..., 2]
"""
# return 0 if scores is empty
if scores is None or len(scores) < 1:
return 0.0
# calculate idcg
idcg = IDCG(scores, k, alternate)
if idcg == 0:
return 0.0
return DCG(scores, k, alternate) / idcg
def do_NDCG(ontology, k, queries, ret_items, tags):
"""
Description:
Return Average nDCG for queries and ret_item
Parameters:
queries: list of N queries (type: list, dimension: 2D, shape: (N, ?))
[Example] [[tag1, tag2, ..., tagK], ..., [tagA, tagB, ..., tagG]]
ret_items: list of N retrieved items (type: list, dimension: 3D, shape: (N, K, ?))
[Example] [[[tagA, tagB, ..., tagG], ..., [tagX, tagY, ..., tagZ]], ... , [ ... ]]
"""
N = len(queries)
ndcgs = 0
# get max_tree_distance
max_tree_distance = get_max_tree_distance(ontology, tags, debug=False)
# for every query, calculate nDCG
for i in range(N):
distances = np.asarray(
[get_min_tag_distance(ontology, queries[i], ret_items[i][j]) for j in range(len(ret_items[i]))]
)
scores = dist_to_score(ontology, distances, max_dist=max_tree_distance)
ndcgs += NDCG(scores, k)
return ndcgs / N
def AP(target, results):
"""
Description:
Return AP(Average Precision) with target and results
Parameters:
target: list of K retrieved items (type: list, len: K)
[Example] [tag1, tag2, ..., tagK]
results: list of N retrieved items (type: list, shape: (N, ?))
[Example] [[tagA, tagB, ..., tagG], ..., [tagX, tagY, ..., tagZ]]
"""
# initiate variables for average precision
n = 1 # the number of result
hit = 0 # the number of hit
ap = 0 # average precision = 1/hit * sum(precision)
len_target = len(target)
for res in results:
(small_set, big_set) = (target, res) if len_target < len(res) else (res, target)
for item in small_set:
if item in big_set: # hit
hit += 1
ap += hit / n
break
n += 1
return ap / hit
def recallAtK(target, results):
"""
Description:
Return 'recall at k' with target and results
Parameters:
target: list of K retrieved items (type: list, len: K)
[Example] [tag1, tag2, ..., tagK]
results: list of N retrieved items (type: list, shape: (N, ?))
[Example] [[tagA, tagB, ..., tagG], ..., [tagX, tagY, ..., tagZ]]
"""
# initiate variables for average precision
recall = 0
K = len(results)
len_target = len(target)
for res in results:
(small_set, big_set) = (target, res) if len_target < len(res) else (res, target)
for item in small_set:
if item in big_set: # hit
recall += 1
break
return recall / K
if __name__ == "__main__":
data_dir = "json"
tags = [
"Acoustic guitar",
"Bass guitar",
"Strum",
"Piano",
"Independent music",
"Wedding music",
"Scary music",
"Firecracker",
"Drip",
]
ontology = Ontology(data_dir)
"""
# Calculate maximum tree distance between tags
print("Calculate maximum tree distance between tags")
max_dist = get_max_tree_distance(ontology, tags, debug=False)
print("Maximum tree distance: ", max_dist, end="\n\n")
# Convert distances to scores with max_dist
print("Convert distances to scores with max_dist")
distances = np.array([0, 0, 1, 2, 1, 0, 5, 4, 8, 9])
print("Distances: ", distances)
scores = dist_to_score(ontology, distances, max_dist=max_dist, debug=True)
print("Scores: ", scores, end="\n\n")
# Convert distances to scores with tags
print("Convert distances to scores with tags")
distances = np.array([0, 0, 1, 2, 1, 0, 5, 4, 8, 9])
print("Distances: ", distances)
scores = dist_to_score(ontology, distances, tags=tags, debug=True)
print("Scores: ", scores, end="\n\n")
# Do DCG, IDCG, NDCG
scores = [3, 2, 3, 0, 1, 2]
print("### Do DCG ###: ", DCG(scores, alternate=False))
print("### Do IDCG ###: ", IDCG(scores))
print("### Do NDCG ###: ", NDCG(scores), end="\n\n")
# Do AP and recall at K
target = ["a", "b", "c"]
results = [["a", "g"], ["d", "e", "f", "b"], ["g", "h", "c"], ["y", "k", "p"]]
print("### AP ###: ", AP(target, results))
print("### Recall at K ###: ", recallAtK(target, results), end="\n\n")
# Do get_min_tag_distance: example1
tags_x = ["Independent music", "Drip"]
tags_y = ["Drip"]
print("@@@ get_min_tag_distance1 @@@: ", get_min_tag_distance(ontology, tags_x, tags_y))
# Do get_min_tag_distance: example2
tags_x = ["Piano", "Guitar", "Bass guitar"]
tags_y = ["Accordion"]
print("@@@ get_min_tag_distance2 @@@: ", get_min_tag_distance(ontology, tags_x, tags_y), end="\n\n")
"""
# Do average nDCG
with open("metadata/all_tags.cls") as fi:
tags = map(lambda x: x[:-1], fi.readlines())
tags = dict((x, i) for i, x in enumerate(tags))
file_names = [
"./results/AVE_aug_ave_i2a.pickle",
"./results/AVE_aug_ave_a2i.pickle",
"./results/AVE_aug_ave_i2i.pickle",
"./results/AVE_aug_ave_a2a.pickle",
]
for f in file_names:
queries, ret_items = load_result(f)
ndcgs = do_NDCG(ontology, 5, queries, ret_items, tags)
print("nDCG: %s" % (f), ndcgs, end="\n\n")
| 0
| 0
| 0
|
32c39be02178140946a124ae7744a66b50840fb3
| 1,711
|
py
|
Python
|
task06/kadai3.py
|
hameji/PythonDjango
|
35f3c41c03f17e23ed3a74bc6457e5d501a9650d
|
[
"MIT"
] | null | null | null |
task06/kadai3.py
|
hameji/PythonDjango
|
35f3c41c03f17e23ed3a74bc6457e5d501a9650d
|
[
"MIT"
] | null | null | null |
task06/kadai3.py
|
hameji/PythonDjango
|
35f3c41c03f17e23ed3a74bc6457e5d501a9650d
|
[
"MIT"
] | null | null | null |
#coding:utf-8
import requests
import pprint
import csv
main()
| 25.161765
| 78
| 0.566335
|
#coding:utf-8
import requests
import pprint
import csv
def request_Url(url):
payload = {
'applicationId': 1094782773139302380,
'genreId': 100283, # 洋菓子
'carrier': 0, # 0:PC, 1:mobile
'page':1, #何ページ目か
}
r = requests.get(url, params=payload)
return r.json()
def check_Data(json):
print("_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/")
print("_/_/_/_/_/_/_/_/_/構造確認_/_/_/_/_/_/_/_/_/")
print("_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/")
pprint.pprint(json, depth=2, compact=True)
# total = int(resp['Items']) <- これ働かなかったなぜ?
# Max = total/30 + 1
# print("【num of item】",total)
# print("【num of page】",Max)
print("_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/")
data = json['Items'][0]
pprint.pprint(data, depth=2, compact=True)
print("_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/")
def parse_Json_to_List(json):
ranking_list = []
counter = 0
for i in json['Items']:
counter = counter + 1
item = i['Item']
rank = item['rank']
name = item['itemName']
print('【rank】'+ str(rank))
print('【Name】' + str(name[:30]) + '...')
rankData = [rank, name]
ranking_list.append(rankData)
if counter == 10:
break
return ranking_list
def add_to_csv_file(path, list):
with open(path, mode='a', encoding="utf-8_sig") as file:
writer = csv.writer(file)
writer.writerow(list)
def main():
url = 'https://app.rakuten.co.jp/services/api/IchibaItem/Ranking/20170628'
json = request_Url(url)
# checkData(json)
list = parse_Json_to_List(json)
add_to_csv_file('kadai3data.csv', list)
main()
| 1,593
| 0
| 115
|
974251c9784ccca2d7e09370ed045cd02bd561ef
| 3,686
|
py
|
Python
|
recognizer/pddl/pddl_planner.py
|
RukNdf/MA-Landmark
|
4038ebe7edc9e353e1987479f5f9edc528a4bd2a
|
[
"Unlicense"
] | null | null | null |
recognizer/pddl/pddl_planner.py
|
RukNdf/MA-Landmark
|
4038ebe7edc9e353e1987479f5f9edc528a4bd2a
|
[
"Unlicense"
] | null | null | null |
recognizer/pddl/pddl_planner.py
|
RukNdf/MA-Landmark
|
4038ebe7edc9e353e1987479f5f9edc528a4bd2a
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
#
# pddl_planner.py
# ma-goal-recognition
#
# Created by Felipe Meneguzzi on 2020-03-12.
# Copyright 2020 Felipe Meneguzzi. All rights reserved.
#
from recognizer.pddl.pddl_parser import PDDL_Parser
from recognizer.pddl.state import applicable, apply
import time
| 35.786408
| 115
| 0.607976
|
#!/usr/bin/env python
#
# pddl_planner.py
# ma-goal-recognition
#
# Created by Felipe Meneguzzi on 2020-03-12.
# Copyright 2020 Felipe Meneguzzi. All rights reserved.
#
from recognizer.pddl.pddl_parser import PDDL_Parser
from recognizer.pddl.state import applicable, apply
import time
class PDDL_Planner:
def __init__(self, verbose=False):
self.verbose = verbose
def applicable(self, state, positive, negative):
return applicable(state, positive, negative)
def apply(self, state, positive, negative):
return apply(state, positive, negative)
def solvable(self, domain, initial_state, goal_state):
""""Computes whether the problem posed by initial_state, goal_state is solvable by reachability analysis"""
last_state = set([])
reachable_literals = set(initial_state)
positive_goals = set(goal_state[0])
actions = domain
positive_effects = set([])
negative_effects = set([])
for a in actions:
positive_effects = positive_effects.union(set(a.add_effects))
negative_effects = negative_effects.union(set(a.del_effects))
# First check the obvious stuff
for p in goal_state[0]:
if p not in reachable_literals and p not in positive_effects:
return False
for p in goal_state[1]:
if p in reachable_literals and p not in negative_effects:
return False
while last_state != reachable_literals:
last_state = reachable_literals.copy()
if positive_goals.issubset(reachable_literals):
return True
for a in actions:
if a.applicable(reachable_literals):
reachable_literals = reachable_literals.union(a.add_effects)
return False
#-----------------------------------------------
# Solve
#-----------------------------------------------
def solve_file(self, domainfile, problemfile, verbose=False):
# Parser
start_time = time.time()
parser = self.parse(domainfile,problemfile)
# Test if first state is not the goal
if applicable(parser.state, parser.positive_goals, parser.negative_goals):
return [], 0
# Grounding process
ground_actions = self.grounding(parser)
plan = self.solve(ground_actions, parser.state, (parser.positive_goals, parser.negative_goals))
final_time = time.time() - start_time
if verbose:
print('Time: ' + str(final_time) + 's')
if plan:
print('plan:')
for act in plan:
print('(' + act.name + ''.join(' ' + p for p in act.parameters) + ')')
else:
print('No plan was found')
return plan, final_time
def parse(self, domainfile, problemfile):
if self.verbose: print("Parsing %s and %s" % (domainfile,problemfile))
parser = PDDL_Parser()
parser.parse_domain(domainfile)
parser.parse_problem(problemfile)
return parser
def grounding(self, parser):
ground_actions = []
start_time = time.time()
for action in parser.actions:
for act in action.groundify(parser.objects):
ground_actions.append(act)
final_time = time.time() - start_time
if self.verbose:
print("Grounding time: %d s" % final_time)
print("Number of actions: %d" % len(ground_actions))
return ground_actions
def solve(self, domain, initial_state, goals):
raise NotImplementedError("PDDL Planners need to implement solve")
| 1,819
| 1,553
| 23
|
22109965bbbb582bac925c5777e6cc232e05234f
| 1,332
|
py
|
Python
|
qualang_tools/config/__init__.py
|
TheoLaudatQM/py-qua-tools
|
60c005c651148bafee8f437ce1be69d2340a265b
|
[
"BSD-3-Clause"
] | null | null | null |
qualang_tools/config/__init__.py
|
TheoLaudatQM/py-qua-tools
|
60c005c651148bafee8f437ce1be69d2340a265b
|
[
"BSD-3-Clause"
] | null | null | null |
qualang_tools/config/__init__.py
|
TheoLaudatQM/py-qua-tools
|
60c005c651148bafee8f437ce1be69d2340a265b
|
[
"BSD-3-Clause"
] | null | null | null |
from qualang_tools.config.integration_weights_tools import (
convert_integration_weights,
compress_integration_weights,
plot_integration_weights,
)
from qualang_tools.config.waveform_tools import (
drag_gaussian_pulse_waveforms,
drag_cosine_pulse_waveforms,
)
from qualang_tools.config.builder import ConfigBuilder
from qualang_tools.config.components import *
from qualang_tools.config.primitive_components import *
__all__ = [
"drag_gaussian_pulse_waveforms",
"drag_cosine_pulse_waveforms",
"convert_integration_weights",
"compress_integration_weights",
"plot_integration_weights",
"Controller",
"ArbitraryWaveform",
"ConstantWaveform",
"DigitalWaveform",
"MeasurePulse",
"ControlPulse",
"Mixer",
"Element",
"MeasureElement",
"ConstantIntegrationWeights",
"ArbitraryIntegrationWeights",
"ElementCollection",
"ReadoutResonator",
"Transmon",
"FluxTunableTransmon",
"Coupler",
"Oscillator",
"Port",
"AnalogInputPort",
"AnalogOutputPort",
"DigitalInputPort",
"DigitalOutputPort",
"Waveform",
"Pulse",
"Operation",
"IntegrationWeights",
"Weights",
"DigitalSample",
"Matrix2x2",
"AnalogOutputFilter",
"ConfigBuilder",
]
| 25.615385
| 61
| 0.688438
|
from qualang_tools.config.integration_weights_tools import (
convert_integration_weights,
compress_integration_weights,
plot_integration_weights,
)
from qualang_tools.config.waveform_tools import (
drag_gaussian_pulse_waveforms,
drag_cosine_pulse_waveforms,
)
from qualang_tools.config.builder import ConfigBuilder
from qualang_tools.config.components import *
from qualang_tools.config.primitive_components import *
__all__ = [
"drag_gaussian_pulse_waveforms",
"drag_cosine_pulse_waveforms",
"convert_integration_weights",
"compress_integration_weights",
"plot_integration_weights",
"Controller",
"ArbitraryWaveform",
"ConstantWaveform",
"DigitalWaveform",
"MeasurePulse",
"ControlPulse",
"Mixer",
"Element",
"MeasureElement",
"ConstantIntegrationWeights",
"ArbitraryIntegrationWeights",
"ElementCollection",
"ReadoutResonator",
"Transmon",
"FluxTunableTransmon",
"Coupler",
"Oscillator",
"Port",
"AnalogInputPort",
"AnalogOutputPort",
"DigitalInputPort",
"DigitalOutputPort",
"Waveform",
"Pulse",
"Operation",
"IntegrationWeights",
"Weights",
"DigitalSample",
"Matrix2x2",
"AnalogOutputFilter",
"ConfigBuilder",
]
| 0
| 0
| 0
|
8debd24c7b2f10459219c2a5b93fdb2751a9e97a
| 3,931
|
py
|
Python
|
api/subtitle/opensubtitles.py
|
stephanos/subtitlevocabulary
|
f452d7f35468912fe3967cfcf81c8f65cab345ec
|
[
"MIT"
] | 19
|
2017-04-20T00:57:36.000Z
|
2018-01-06T10:47:38.000Z
|
api/subtitle/opensubtitles.py
|
stephanos/subtitlevocabulary
|
f452d7f35468912fe3967cfcf81c8f65cab345ec
|
[
"MIT"
] | null | null | null |
api/subtitle/opensubtitles.py
|
stephanos/subtitlevocabulary
|
f452d7f35468912fe3967cfcf81c8f65cab345ec
|
[
"MIT"
] | 3
|
2018-01-01T06:27:48.000Z
|
2020-04-09T17:21:28.000Z
|
import base64
import json
import os
import re
import zlib
from retrying import retry
from xmlrpc.client import ServerProxy
from api.fixture import load_fixture
from api.subtitle.model import to_model
LANGUAGE = 'en'
NEWLINE_PATTERN = re.compile(r'(\r\n|\r|\n)')
OPENSUBTITLES_URL = 'http://api.opensubtitles.org/xml-rpc'
OPENSUBTITLES_UA = 'subvoc v1.0'
UNICODE_BOM = u'\N{ZERO WIDTH NO-BREAK SPACE}'
class OpenSubtitles:
"""API client to download subtitles from opensubtitles.org"""
def __init__(self, credentials, client=None):
"""Constructor to prepare API connection.
:param credentials: username/password tupel
:param client: optional, custom XMLRPC client
"""
self.token = None
self.credentials = credentials
self.xmlrpc = client or ServerProxy(OPENSUBTITLES_URL, allow_none=True)
def login(self):
"""Request and save authentication token.
:raises RuntimeError: if login fails
"""
username = self.credentials[0]
password = self.credentials[1]
resp = self.xmlrpc.LogIn(username, password, LANGUAGE, OPENSUBTITLES_UA)
self._ensure_success(resp)
self.token = resp.get('token')
def find_by_query(self, query):
"""Find subtitles by query.
Note that it first tries to find and return a local fixture,
and only does an HTTP call if none was found.
:param query: query string describing movie
:returns: list of subtitles that match query
"""
qry = query.lower().strip()
resp = self._fixture('query', qry) \
or self._find({'query': qry, 'sublanguageid': 'eng'})
return self._resp_to_model(resp)
def find_subtitles_for_movie(self, imdb_id):
"""Find subtitle by IMDb ID.
Note that it first tries to find and return a local fixture,
and only does an HTTP call if none was found.
:param imdb_id: IMDb ID of movie (starts with 'tt')
:returns: list of subtitles for movie
"""
search_id = imdb_id.replace('tt', '').lstrip('0')
resp = self._fixture('id', imdb_id) \
or self._find({'imdbid': search_id, 'sublanguageid': 'eng'})
return self._resp_to_model(resp)
def load_text(self, subtitle_id, subtitle_encoding):
"""Load subtitle text for movie.
:param subtitle_id: ID of subtitle
:param subtitle_encoding: encoding of subtitle text
:returns: string with movie subtitle text
"""
resp = self._fixture('subtitle', subtitle_id) \
or self._download(subtitle_id)
text = resp.get('data')[0].get('data')
text = base64.standard_b64decode(text)
text = zlib.decompress(text, 47)
text = str(text, subtitle_encoding)
text = text.lstrip(UNICODE_BOM)
text = re.sub(NEWLINE_PATTERN, '\n', text)
return text
@retry(stop_max_delay=5000, stop_max_attempt_number=3)
@retry(stop_max_delay=5000, stop_max_attempt_number=3)
| 32.487603
| 84
| 0.640041
|
import base64
import json
import os
import re
import zlib
from retrying import retry
from xmlrpc.client import ServerProxy
from api.fixture import load_fixture
from api.subtitle.model import to_model
LANGUAGE = 'en'
NEWLINE_PATTERN = re.compile(r'(\r\n|\r|\n)')
OPENSUBTITLES_URL = 'http://api.opensubtitles.org/xml-rpc'
OPENSUBTITLES_UA = 'subvoc v1.0'
UNICODE_BOM = u'\N{ZERO WIDTH NO-BREAK SPACE}'
class OpenSubtitles:
"""API client to download subtitles from opensubtitles.org"""
def __init__(self, credentials, client=None):
"""Constructor to prepare API connection.
:param credentials: username/password tupel
:param client: optional, custom XMLRPC client
"""
self.token = None
self.credentials = credentials
self.xmlrpc = client or ServerProxy(OPENSUBTITLES_URL, allow_none=True)
def login(self):
"""Request and save authentication token.
:raises RuntimeError: if login fails
"""
username = self.credentials[0]
password = self.credentials[1]
resp = self.xmlrpc.LogIn(username, password, LANGUAGE, OPENSUBTITLES_UA)
self._ensure_success(resp)
self.token = resp.get('token')
def find_by_query(self, query):
"""Find subtitles by query.
Note that it first tries to find and return a local fixture,
and only does an HTTP call if none was found.
:param query: query string describing movie
:returns: list of subtitles that match query
"""
qry = query.lower().strip()
resp = self._fixture('query', qry) \
or self._find({'query': qry, 'sublanguageid': 'eng'})
return self._resp_to_model(resp)
def find_subtitles_for_movie(self, imdb_id):
"""Find subtitle by IMDb ID.
Note that it first tries to find and return a local fixture,
and only does an HTTP call if none was found.
:param imdb_id: IMDb ID of movie (starts with 'tt')
:returns: list of subtitles for movie
"""
search_id = imdb_id.replace('tt', '').lstrip('0')
resp = self._fixture('id', imdb_id) \
or self._find({'imdbid': search_id, 'sublanguageid': 'eng'})
return self._resp_to_model(resp)
def load_text(self, subtitle_id, subtitle_encoding):
"""Load subtitle text for movie.
:param subtitle_id: ID of subtitle
:param subtitle_encoding: encoding of subtitle text
:returns: string with movie subtitle text
"""
resp = self._fixture('subtitle', subtitle_id) \
or self._download(subtitle_id)
text = resp.get('data')[0].get('data')
text = base64.standard_b64decode(text)
text = zlib.decompress(text, 47)
text = str(text, subtitle_encoding)
text = text.lstrip(UNICODE_BOM)
text = re.sub(NEWLINE_PATTERN, '\n', text)
return text
@retry(stop_max_delay=5000, stop_max_attempt_number=3)
def _download(self, subtitle_id):
if not self.token:
self.login()
resp = self.xmlrpc.DownloadSubtitles(self.token, [subtitle_id])
self._ensure_success(resp)
return resp
@retry(stop_max_delay=5000, stop_max_attempt_number=3)
def _find(self, query):
if not self.token:
self.login()
resp = self.xmlrpc.SearchSubtitles(self.token, [query], [{'limit': 500}])
self._ensure_success(resp)
return resp
def _ensure_success(self, resp):
if resp.get('status').split()[0] != '200':
raise RuntimeError("received status {}".format(resp.get('status')))
def _fixture(self, directory, arg):
data = load_fixture(os.path.join('opensubtitles', directory, arg + '.json'))
if data:
return json.loads(data)
def _resp_to_model(self, resp):
return [to_model(item) for item in resp.get('data')]
| 749
| 0
| 133
|
8254adb03861f55618a48b8c21e079a5ff47bf47
| 19,391
|
py
|
Python
|
TrainingExtensions/tensorflow/src/python/aimet_tensorflow/channel_pruning/channel_pruner.py
|
quic-sendilk/aimet
|
85b183955f7cf17cdb5fba76a6d48cc7e57d878c
|
[
"BSD-3-Clause"
] | 3
|
2021-08-23T13:00:54.000Z
|
2021-11-17T10:52:36.000Z
|
TrainingExtensions/tensorflow/src/python/aimet_tensorflow/channel_pruning/channel_pruner.py
|
4ant00ra/aimet
|
c6ffd3c31c290fe0913b50831d58534f6df61d76
|
[
"BSD-3-Clause"
] | null | null | null |
TrainingExtensions/tensorflow/src/python/aimet_tensorflow/channel_pruning/channel_pruner.py
|
4ant00ra/aimet
|
c6ffd3c31c290fe0913b50831d58534f6df61d76
|
[
"BSD-3-Clause"
] | null | null | null |
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Prunes layers using Channel Pruning scheme """
from typing import List, Dict, Tuple, Set
import copy
import tensorflow as tf
import numpy as np
# Import aimet specific modules
from aimet_common.defs import CostMetric, LayerCompRatioPair
from aimet_common.utils import AimetLogger
from aimet_common.pruner import Pruner
from aimet_common.channel_pruner import select_channels_to_prune
from aimet_common.cost_calculator import CostCalculator, Cost
from aimet_common.winnow.winnow_utils import update_winnowed_channels
from aimet_tensorflow.utils.graph_saver import save_and_load_graph
from aimet_tensorflow.utils.common import is_op_compressible, get_ordered_ops
from aimet_tensorflow.layer_database import Layer, LayerDatabase
from aimet_tensorflow.utils.op.conv import WeightTensorUtils
from aimet_tensorflow.winnow import winnow
from aimet_tensorflow.channel_pruning.data_subsampler import DataSubSampler
from aimet_tensorflow.channel_pruning.weight_reconstruction import WeightReconstructor
from aimet_tensorflow.common.graph_eval import initialize_uninitialized_vars
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.ChannelPruning)
class InputChannelPruner(Pruner):
"""
Pruner for Channel Pruning method
"""
def __init__(self, input_op_names: List[str], output_op_names: List[str], data_set: tf.data.Dataset,
batch_size: int, num_reconstruction_samples: int, allow_custom_downsample_ops: bool):
"""
Input Channel Pruner with given dataset, input shape, number of batches and samples per image.
:param input_op_names: list of input op names
:param output_op_names: List of output op names of the model, used to help ConnectedGraph determine valid ops
(to ignore training ops for example).
:param data_set: data set to be used with the model
:param batch_size: batch size
:param num_reconstruction_samples: number of reconstruction samples
:param allow_custom_downsample_ops: allow downsample/upsample ops to be inserted
"""
self._input_op_names = input_op_names
self._output_op_names = output_op_names
self._data_set = data_set
self._batch_size = batch_size
self._num_reconstruction_samples = num_reconstruction_samples
self._allow_custom_downsample_ops = allow_custom_downsample_ops
@staticmethod
def _select_inp_channels(layer: Layer, comp_ratio: float) -> list:
"""
:param layer: layer for which input channels to prune are selected.
:param comp_ratio: the ratio of costs after pruning has taken place
0 < comp_ratio <= 1.
:return: prune_indices: list of input channels indices to prune.
"""
assert layer.module.type == 'Conv2D'
weight_index = WeightTensorUtils.get_tensor_index_in_given_op(layer.module)
weight_tensor = layer.model.run(layer.module.inputs[weight_index])
# Conv2d weight shape in TensorFlow [kh, kw, Nic, Noc]
# re order in the common shape [Noc, Nic, kh, kw]
weight_tensor = np.transpose(weight_tensor, (3, 2, 0, 1))
num_in_channels = weight_tensor.shape[1]
prune_indices = select_channels_to_prune(weight_tensor, comp_ratio, num_in_channels)
return prune_indices
def _data_subsample_and_reconstruction(self, orig_layer: Layer, pruned_layer: Layer, output_mask: List[int],
orig_layer_db: LayerDatabase, comp_layer_db: LayerDatabase):
"""
Collect and sub sampled output data from original layer and input data from pruned layer and set
reconstructed weight and bias to pruned layer in compressed model database
:param orig_layer: layer from original model
:param pruned_layer: layer from potentially compressed model
:param output_mask : output mask that specifies certain output channels to remove
:param orig_layer_db: original Layer database without any compression
:param comp_layer_db: compressed Layer database
:return:
"""
sub_sampled_inp, sub_sampled_out = DataSubSampler.get_sub_sampled_data(orig_layer, pruned_layer,
self._input_op_names, orig_layer_db,
comp_layer_db, self._data_set,
self._batch_size,
self._num_reconstruction_samples)
logger.debug("Input Data size: %s, Output data size: %s", len(sub_sampled_inp), len(sub_sampled_out))
# update the weight and bias (if any) using sub sampled input and output data
WeightReconstructor.reconstruct_params_for_conv2d(pruned_layer, sub_sampled_inp, sub_sampled_out, output_mask)
def _sort_on_occurrence(self, sess: tf.Session, layer_comp_ratio_list: List[LayerCompRatioPair]) -> \
List[LayerCompRatioPair]:
"""
Function takes session and list of conv layer-comp ratio to sort, and sorts them based on
occurrence in the model.
:param sess: tf.Session
:param layer_comp_ratio_list: layer compression ratio list
:return: sorted_layer_comp_ratio_List
"""
sorted_layer_comp_ratio_list = []
ordered_ops = get_ordered_ops(graph=sess.graph, starting_op_names=self._input_op_names)
for op in ordered_ops:
if is_op_compressible(op):
for pair in layer_comp_ratio_list:
if op.name == pair.layer.name:
sorted_layer_comp_ratio_list.append(LayerCompRatioPair(pair.layer, pair.comp_ratio))
return sorted_layer_comp_ratio_list
def calculate_compressed_cost(self, layer_db: LayerDatabase,
layer_comp_ratio_list: List[LayerCompRatioPair]) -> Cost:
"""
Calculate cost of a compressed model given a set of layers and corresponding comp-ratios
:param layer_db: Layer database for original model
:param layer_comp_ratio_list: List of (layer + comp-ratio) pairs
:return: Estimated cost of the compressed model
"""
# sort all the layers in layer_comp_ratio_list based on occurrence
layer_comp_ratio_list = self._sort_on_occurrence(layer_db.model, layer_comp_ratio_list)
detached_op_names = set()
# Copy the db
comp_layer_db = copy.deepcopy(layer_db)
current_sess = comp_layer_db.model
for layer_comp_ratio in layer_comp_ratio_list:
orig_layer = layer_db.find_layer_by_name(layer_comp_ratio.layer.name)
comp_ratio = layer_comp_ratio.comp_ratio
if comp_ratio is not None and comp_ratio < 1.0:
# select input channels of conv2d op to winnow
prune_indices = self._select_inp_channels(orig_layer, comp_ratio)
if not prune_indices:
continue
# Winnow the selected op and modify it's upstream affected ops
current_sess, ordered_modules_list = winnow.winnow_tf_model(current_sess, self._input_op_names,
self._output_op_names,
[(orig_layer.module, prune_indices)],
reshape=self._allow_custom_downsample_ops,
in_place=True, verbose=False)
if not ordered_modules_list:
continue
# Get all the detached op names from updated session graph
for orig_op_name, _, _, _ in ordered_modules_list:
detached_op_names.add(orig_op_name)
# update layer database by excluding the detached ops
comp_layer_db.update_database(current_sess, detached_op_names, update_model=False)
# calculate the cost of this model
compressed_model_cost = CostCalculator.compute_model_cost(comp_layer_db)
# close the session associated with compressed layer database
comp_layer_db.model.close()
return compressed_model_cost
@staticmethod
def _update_pruned_ops_and_masks_info(
ordered_modules_list: List[Tuple[str, tf.Operation, List[List[int]], List[List[int]]]],
orig_layer_name_to_pruned_name_and_mask_dict: Dict[str, Tuple[str, List[int]]],
pruned_name_to_orig_name_dict: Dict[str, str],
detached_op_names: Set[str]):
"""
Update dictionaries with information about newly winnowed ops and masks
:param ordered_modules_list: Output of winnow_tf_model holding information on winnowed ops and masks
:param orig_layer_name_to_pruned_name_and_mask_dict: Dictionary mapping original layer names to most recent
pruned op name and most recent output masks.
:param pruned_name_to_orig_name_dict: Dictionary mapping pruned layer names to original layer names (if a layer
was winnowed in multiple rounds of winnow_tf_model, there may be multiple prined layer names mapping to the same
original layer name)
:param detached_op_names: Set holding names of operations which are detached due to winnowing and should not be
used.
"""
for prepruned_op_name, pruned_op, _, output_masks in ordered_modules_list:
detached_op_names.add(prepruned_op_name)
if pruned_op.type == 'Conv2D': # Currently, we only care about tracking information about conv ops
if prepruned_op_name in pruned_name_to_orig_name_dict:
# the op was already pruned once prior to this most recent round of winnowing
original_op_name = pruned_name_to_orig_name_dict[prepruned_op_name]
# Get and update previous pruned op name and output mask
_, running_output_mask = \
orig_layer_name_to_pruned_name_and_mask_dict.get(original_op_name, (None, None))
assert running_output_mask is not None
# Replace previous pruned op name with most recent pruned op name
# Update output mask
update_winnowed_channels(running_output_mask, output_masks[0])
orig_layer_name_to_pruned_name_and_mask_dict[original_op_name] = (pruned_op.name,
running_output_mask)
else:
# This is the first time this op is being pruned
# The name should not show up in either dict
assert prepruned_op_name not in orig_layer_name_to_pruned_name_and_mask_dict
assert prepruned_op_name not in pruned_name_to_orig_name_dict
original_op_name = prepruned_op_name
# Add output channel mask info to layer_to_masks_dict
orig_layer_name_to_pruned_name_and_mask_dict[prepruned_op_name] = (pruned_op.name,
output_masks[0])
# Map pruned op's name to original op name in pruned_to_orig_name_dict
pruned_name_to_orig_name_dict[pruned_op.name] = original_op_name
def _reconstruct_layers(self, layers_to_reconstruct: List[Layer],
orig_layer_name_to_pruned_name_and_mask_dict: Dict[str, Tuple[str, List[int]]],
layer_db: LayerDatabase, comp_layer_db: LayerDatabase):
"""
Reconstruct weights and biases of layers in the layers_to_reconstruct list.
:param layers_to_reconstruct: List of layers to reconstruct weights and biases of
:param orig_layer_name_to_pruned_name_and_mask_dict: Dictionary mapping original layer names to most recent
pruned op name and most recent output masks.
:param layer_db: Original layer database
:param comp_layer_db: Compressed layer database
"""
for layer in layers_to_reconstruct:
# Get output mask of layer, that contains information about all channels winnowed since the start
pruned_layer_name, output_mask = \
orig_layer_name_to_pruned_name_and_mask_dict.get(layer.name, (None, None))
assert pruned_layer_name is not None
pruned_layer = comp_layer_db.find_layer_by_name(pruned_layer_name)
self._data_subsample_and_reconstruction(layer, pruned_layer, output_mask, layer_db, comp_layer_db)
class ChannelPruningCostCalculator(CostCalculator):
""" Cost calculation utilities for Channel Pruning """
def calculate_compressed_cost(self, layer_db: LayerDatabase,
layer_ratio_list: List[LayerCompRatioPair], cost_metric: CostMetric) -> Cost:
"""
Calculate compressed cost of a model given a list of layer-compression-ratio pairs
:param layer_db: Layer database for the original model
:param layer_ratio_list: List of layer, compression-ratio
:param cost_metric: Cost metric to use for compression (mac or memory)
:return: Compressed cost
"""
# Special logic for channel pruning - we first actually prune the model and then determine its cost
# Because it is not easy to estimate it otherwise
compressed_cost = self._pruner.calculate_compressed_cost(layer_db, layer_ratio_list)
return compressed_cost
| 51.847594
| 120
| 0.657264
|
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Prunes layers using Channel Pruning scheme """
from typing import List, Dict, Tuple, Set
import copy
import tensorflow as tf
import numpy as np
# Import aimet specific modules
from aimet_common.defs import CostMetric, LayerCompRatioPair
from aimet_common.utils import AimetLogger
from aimet_common.pruner import Pruner
from aimet_common.channel_pruner import select_channels_to_prune
from aimet_common.cost_calculator import CostCalculator, Cost
from aimet_common.winnow.winnow_utils import update_winnowed_channels
from aimet_tensorflow.utils.graph_saver import save_and_load_graph
from aimet_tensorflow.utils.common import is_op_compressible, get_ordered_ops
from aimet_tensorflow.layer_database import Layer, LayerDatabase
from aimet_tensorflow.utils.op.conv import WeightTensorUtils
from aimet_tensorflow.winnow import winnow
from aimet_tensorflow.channel_pruning.data_subsampler import DataSubSampler
from aimet_tensorflow.channel_pruning.weight_reconstruction import WeightReconstructor
from aimet_tensorflow.common.graph_eval import initialize_uninitialized_vars
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.ChannelPruning)
class InputChannelPruner(Pruner):
"""
Pruner for Channel Pruning method
"""
def __init__(self, input_op_names: List[str], output_op_names: List[str], data_set: tf.data.Dataset,
batch_size: int, num_reconstruction_samples: int, allow_custom_downsample_ops: bool):
"""
Input Channel Pruner with given dataset, input shape, number of batches and samples per image.
:param input_op_names: list of input op names
:param output_op_names: List of output op names of the model, used to help ConnectedGraph determine valid ops
(to ignore training ops for example).
:param data_set: data set to be used with the model
:param batch_size: batch size
:param num_reconstruction_samples: number of reconstruction samples
:param allow_custom_downsample_ops: allow downsample/upsample ops to be inserted
"""
self._input_op_names = input_op_names
self._output_op_names = output_op_names
self._data_set = data_set
self._batch_size = batch_size
self._num_reconstruction_samples = num_reconstruction_samples
self._allow_custom_downsample_ops = allow_custom_downsample_ops
@staticmethod
def _select_inp_channels(layer: Layer, comp_ratio: float) -> list:
"""
:param layer: layer for which input channels to prune are selected.
:param comp_ratio: the ratio of costs after pruning has taken place
0 < comp_ratio <= 1.
:return: prune_indices: list of input channels indices to prune.
"""
assert layer.module.type == 'Conv2D'
weight_index = WeightTensorUtils.get_tensor_index_in_given_op(layer.module)
weight_tensor = layer.model.run(layer.module.inputs[weight_index])
# Conv2d weight shape in TensorFlow [kh, kw, Nic, Noc]
# re order in the common shape [Noc, Nic, kh, kw]
weight_tensor = np.transpose(weight_tensor, (3, 2, 0, 1))
num_in_channels = weight_tensor.shape[1]
prune_indices = select_channels_to_prune(weight_tensor, comp_ratio, num_in_channels)
return prune_indices
def _data_subsample_and_reconstruction(self, orig_layer: Layer, pruned_layer: Layer, output_mask: List[int],
orig_layer_db: LayerDatabase, comp_layer_db: LayerDatabase):
"""
Collect and sub sampled output data from original layer and input data from pruned layer and set
reconstructed weight and bias to pruned layer in compressed model database
:param orig_layer: layer from original model
:param pruned_layer: layer from potentially compressed model
:param output_mask : output mask that specifies certain output channels to remove
:param orig_layer_db: original Layer database without any compression
:param comp_layer_db: compressed Layer database
:return:
"""
sub_sampled_inp, sub_sampled_out = DataSubSampler.get_sub_sampled_data(orig_layer, pruned_layer,
self._input_op_names, orig_layer_db,
comp_layer_db, self._data_set,
self._batch_size,
self._num_reconstruction_samples)
logger.debug("Input Data size: %s, Output data size: %s", len(sub_sampled_inp), len(sub_sampled_out))
# update the weight and bias (if any) using sub sampled input and output data
WeightReconstructor.reconstruct_params_for_conv2d(pruned_layer, sub_sampled_inp, sub_sampled_out, output_mask)
def _sort_on_occurrence(self, sess: tf.Session, layer_comp_ratio_list: List[LayerCompRatioPair]) -> \
List[LayerCompRatioPair]:
"""
Function takes session and list of conv layer-comp ratio to sort, and sorts them based on
occurrence in the model.
:param sess: tf.Session
:param layer_comp_ratio_list: layer compression ratio list
:return: sorted_layer_comp_ratio_List
"""
sorted_layer_comp_ratio_list = []
ordered_ops = get_ordered_ops(graph=sess.graph, starting_op_names=self._input_op_names)
for op in ordered_ops:
if is_op_compressible(op):
for pair in layer_comp_ratio_list:
if op.name == pair.layer.name:
sorted_layer_comp_ratio_list.append(LayerCompRatioPair(pair.layer, pair.comp_ratio))
return sorted_layer_comp_ratio_list
def _prune_layer(self, orig_layer_db: LayerDatabase, comp_layer_db: LayerDatabase,
layer: Layer, comp_ratio: float, cost_metric: CostMetric):
pass
def calculate_compressed_cost(self, layer_db: LayerDatabase,
layer_comp_ratio_list: List[LayerCompRatioPair]) -> Cost:
"""
Calculate cost of a compressed model given a set of layers and corresponding comp-ratios
:param layer_db: Layer database for original model
:param layer_comp_ratio_list: List of (layer + comp-ratio) pairs
:return: Estimated cost of the compressed model
"""
# sort all the layers in layer_comp_ratio_list based on occurrence
layer_comp_ratio_list = self._sort_on_occurrence(layer_db.model, layer_comp_ratio_list)
detached_op_names = set()
# Copy the db
comp_layer_db = copy.deepcopy(layer_db)
current_sess = comp_layer_db.model
for layer_comp_ratio in layer_comp_ratio_list:
orig_layer = layer_db.find_layer_by_name(layer_comp_ratio.layer.name)
comp_ratio = layer_comp_ratio.comp_ratio
if comp_ratio is not None and comp_ratio < 1.0:
# select input channels of conv2d op to winnow
prune_indices = self._select_inp_channels(orig_layer, comp_ratio)
if not prune_indices:
continue
# Winnow the selected op and modify it's upstream affected ops
current_sess, ordered_modules_list = winnow.winnow_tf_model(current_sess, self._input_op_names,
self._output_op_names,
[(orig_layer.module, prune_indices)],
reshape=self._allow_custom_downsample_ops,
in_place=True, verbose=False)
if not ordered_modules_list:
continue
# Get all the detached op names from updated session graph
for orig_op_name, _, _, _ in ordered_modules_list:
detached_op_names.add(orig_op_name)
# update layer database by excluding the detached ops
comp_layer_db.update_database(current_sess, detached_op_names, update_model=False)
# calculate the cost of this model
compressed_model_cost = CostCalculator.compute_model_cost(comp_layer_db)
# close the session associated with compressed layer database
comp_layer_db.model.close()
return compressed_model_cost
def prune_model(self, layer_db: LayerDatabase, layer_comp_ratio_list: List[LayerCompRatioPair],
cost_metric: CostMetric, trainer):
# sort all the layers in layer_comp_ratio_list based on occurrence
layer_comp_ratio_list = self._sort_on_occurrence(layer_db.model, layer_comp_ratio_list)
# Copy the db
comp_layer_db = copy.deepcopy(layer_db)
current_sess = comp_layer_db.model
# Dictionary to map original layer name to list of most recent pruned layer name and output mask.
# Masks remain at the original length and specify channels winnowed after each round of winnower.
orig_layer_name_to_pruned_name_and_mask_dict = {}
# Dictionary to map most recent pruned layer name to the original layer name
pruned_name_to_orig_name_dict = {}
# List to hold original layers to reconstruct
layers_to_reconstruct = []
detached_op_names = set()
# Prune layers which have comp ratios less than 1
for layer_comp_ratio in layer_comp_ratio_list:
orig_layer = layer_db.find_layer_by_name(layer_comp_ratio.layer.name)
if layer_comp_ratio.comp_ratio is not None and layer_comp_ratio.comp_ratio < 1.0:
# 1) channel selection
prune_indices = self._select_inp_channels(orig_layer, layer_comp_ratio.comp_ratio)
if not prune_indices:
continue
# 2) Winnowing the model
current_sess, ordered_modules_list = winnow.winnow_tf_model(current_sess, self._input_op_names,
self._output_op_names,
[(orig_layer.module, prune_indices)],
reshape=self._allow_custom_downsample_ops,
in_place=True, verbose=False)
if not ordered_modules_list:
continue
layers_to_reconstruct.append(orig_layer)
# Update dictionaries with new info about pruned ops and new masks
self._update_pruned_ops_and_masks_info(ordered_modules_list,
orig_layer_name_to_pruned_name_and_mask_dict,
pruned_name_to_orig_name_dict,
detached_op_names)
# Save and reload modified graph to allow changes to take effect
# Need to initialize uninitialized variables first since only newly winnowed conv ops are initialized during
# winnow_tf_model, and all other newly winnowed ops are not.
with current_sess.graph.as_default():
initialize_uninitialized_vars(current_sess)
current_sess = save_and_load_graph('./saver', current_sess)
comp_layer_db.update_database(current_sess, detached_op_names, update_model=True)
# Perform reconstruction
self._reconstruct_layers(layers_to_reconstruct, orig_layer_name_to_pruned_name_and_mask_dict, layer_db,
comp_layer_db)
return comp_layer_db
@staticmethod
def _update_pruned_ops_and_masks_info(
ordered_modules_list: List[Tuple[str, tf.Operation, List[List[int]], List[List[int]]]],
orig_layer_name_to_pruned_name_and_mask_dict: Dict[str, Tuple[str, List[int]]],
pruned_name_to_orig_name_dict: Dict[str, str],
detached_op_names: Set[str]):
"""
Update dictionaries with information about newly winnowed ops and masks
:param ordered_modules_list: Output of winnow_tf_model holding information on winnowed ops and masks
:param orig_layer_name_to_pruned_name_and_mask_dict: Dictionary mapping original layer names to most recent
pruned op name and most recent output masks.
:param pruned_name_to_orig_name_dict: Dictionary mapping pruned layer names to original layer names (if a layer
was winnowed in multiple rounds of winnow_tf_model, there may be multiple prined layer names mapping to the same
original layer name)
:param detached_op_names: Set holding names of operations which are detached due to winnowing and should not be
used.
"""
for prepruned_op_name, pruned_op, _, output_masks in ordered_modules_list:
detached_op_names.add(prepruned_op_name)
if pruned_op.type == 'Conv2D': # Currently, we only care about tracking information about conv ops
if prepruned_op_name in pruned_name_to_orig_name_dict:
# the op was already pruned once prior to this most recent round of winnowing
original_op_name = pruned_name_to_orig_name_dict[prepruned_op_name]
# Get and update previous pruned op name and output mask
_, running_output_mask = \
orig_layer_name_to_pruned_name_and_mask_dict.get(original_op_name, (None, None))
assert running_output_mask is not None
# Replace previous pruned op name with most recent pruned op name
# Update output mask
update_winnowed_channels(running_output_mask, output_masks[0])
orig_layer_name_to_pruned_name_and_mask_dict[original_op_name] = (pruned_op.name,
running_output_mask)
else:
# This is the first time this op is being pruned
# The name should not show up in either dict
assert prepruned_op_name not in orig_layer_name_to_pruned_name_and_mask_dict
assert prepruned_op_name not in pruned_name_to_orig_name_dict
original_op_name = prepruned_op_name
# Add output channel mask info to layer_to_masks_dict
orig_layer_name_to_pruned_name_and_mask_dict[prepruned_op_name] = (pruned_op.name,
output_masks[0])
# Map pruned op's name to original op name in pruned_to_orig_name_dict
pruned_name_to_orig_name_dict[pruned_op.name] = original_op_name
def _reconstruct_layers(self, layers_to_reconstruct: List[Layer],
orig_layer_name_to_pruned_name_and_mask_dict: Dict[str, Tuple[str, List[int]]],
layer_db: LayerDatabase, comp_layer_db: LayerDatabase):
"""
Reconstruct weights and biases of layers in the layers_to_reconstruct list.
:param layers_to_reconstruct: List of layers to reconstruct weights and biases of
:param orig_layer_name_to_pruned_name_and_mask_dict: Dictionary mapping original layer names to most recent
pruned op name and most recent output masks.
:param layer_db: Original layer database
:param comp_layer_db: Compressed layer database
"""
for layer in layers_to_reconstruct:
# Get output mask of layer, that contains information about all channels winnowed since the start
pruned_layer_name, output_mask = \
orig_layer_name_to_pruned_name_and_mask_dict.get(layer.name, (None, None))
assert pruned_layer_name is not None
pruned_layer = comp_layer_db.find_layer_by_name(pruned_layer_name)
self._data_subsample_and_reconstruction(layer, pruned_layer, output_mask, layer_db, comp_layer_db)
class ChannelPruningCostCalculator(CostCalculator):
""" Cost calculation utilities for Channel Pruning """
def __init__(self, pruner: InputChannelPruner):
self._pruner = pruner
def calculate_compressed_cost(self, layer_db: LayerDatabase,
layer_ratio_list: List[LayerCompRatioPair], cost_metric: CostMetric) -> Cost:
"""
Calculate compressed cost of a model given a list of layer-compression-ratio pairs
:param layer_db: Layer database for the original model
:param layer_ratio_list: List of layer, compression-ratio
:param cost_metric: Cost metric to use for compression (mac or memory)
:return: Compressed cost
"""
# Special logic for channel pruning - we first actually prune the model and then determine its cost
# Because it is not easy to estimate it otherwise
compressed_cost = self._pruner.calculate_compressed_cost(layer_db, layer_ratio_list)
return compressed_cost
| 3,532
| 0
| 81
|
34c77104cbf677459d426c7e0ff996874e4646da
| 6,521
|
py
|
Python
|
fitness_tracker/notes/workouts/create_workout_window.py
|
JuricaRT/fitness_tracker
|
178ab7f20cc3cabcb5b2cad34cebfd4aa20c4fc6
|
[
"MIT"
] | 1
|
2020-08-30T11:30:25.000Z
|
2020-08-30T11:30:25.000Z
|
fitness_tracker/notes/workouts/create_workout_window.py
|
JuricaRT/fitness_tracker
|
178ab7f20cc3cabcb5b2cad34cebfd4aa20c4fc6
|
[
"MIT"
] | 21
|
2020-08-23T17:14:52.000Z
|
2021-05-04T19:22:21.000Z
|
fitness_tracker/notes/workouts/create_workout_window.py
|
JuricaRT/fitness_tracker
|
178ab7f20cc3cabcb5b2cad34cebfd4aa20c4fc6
|
[
"MIT"
] | null | null | null |
import json
from datetime import datetime
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, QMessageBox
from PyQt5.QtGui import QIntValidator
from PyQt5.QtCore import Qt, pyqtSignal
from fitness_tracker.database_wrapper import DatabaseWrapper
| 40.75625
| 137
| 0.698973
|
import json
from datetime import datetime
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, QMessageBox
from PyQt5.QtGui import QIntValidator
from PyQt5.QtCore import Qt, pyqtSignal
from fitness_tracker.database_wrapper import DatabaseWrapper
class CreateWorkoutWindow(QWidget):
refresh_my_workouts_signal = pyqtSignal(str)
refresh_after_creating_signal = pyqtSignal(bool)
def __init__(self, workout_name=None, one_time=False, date=None):
super().__init__()
self.setStyleSheet("""
QWidget{
background-color: #232120;
font-weight: bold;
color:#c7c7c7;
}
QPushButton{
background-color: rgba(0, 0, 0, 0);
border: 1px solid;
font-size: 18px;
font-weight: bold;
border-color: #808080;
min-height: 28px;
white-space:nowrap;
text-align: center;
padding-left: 5%;
font-family: Montserrat;
}
QPushButton:hover:!pressed{
border: 2px solid;
border-color: #747474;
}
QPushButton:pressed{
border: 2px solid;
background-color: #323232;
}
""")
self.setWindowFlags(Qt.FramelessWindowHint | Qt.Tool)
self.setWindowModality(Qt.ApplicationModal)
if workout_name != None:
self.setWindowTitle("Edit Workout")
else:
self.setWindowTitle("Create a New Workout")
self.db_wrapper = DatabaseWrapper()
self.table_name = "Workouts"
self.workout_name = workout_name
self.one_time = one_time
self.current_date = date
self.fetched_my_workouts = json.loads(self.db_wrapper.fetch_local_column(self.table_name, "my_workouts"))
self.create_panel()
def create_panel(self):
layout = QVBoxLayout()
workout_name_layout = QHBoxLayout()
workout_name_label = QLabel("Workout Name:")
self.workout_name_edit = QLineEdit()
workout_name_layout.addWidget(workout_name_label)
workout_name_layout.addWidget(self.workout_name_edit)
grid_layout = QGridLayout()
empty = QLabel("")
name_label = QLabel("Name")
sets_label = QLabel("Sets")
reps_label = QLabel("Reps")
rest_label = QLabel("Rest(Opt.)[min]")
grid_layout.addWidget(empty, 0, 0)
grid_layout.addWidget(name_label, 0, 1)
grid_layout.addWidget(sets_label, 0, 2)
grid_layout.addWidget(reps_label, 0, 3)
grid_layout.addWidget(rest_label, 0, 4)
exercise_number = [None] * 10
self.name_edits = [None] * 10
self.sets_edits = [None] * 10
self.reps_edits = [None] * 10
self.rest_edits = [None] * 10
j = 1
for i in range(10):
exercise_number[i] = QLabel("Exercise #"+ str(i+1))
self.name_edits[i], self.sets_edits[i], self.reps_edits[i], self.rest_edits[i] = QLineEdit(), QLineEdit(), QLineEdit(), QLineEdit()
self.sets_edits[i].setValidator(QIntValidator())
self.reps_edits[i].setValidator(QIntValidator())
self.rest_edits[i].setValidator(QIntValidator())
grid_layout.addWidget(exercise_number[i], j, 0)
grid_layout.addWidget(self.name_edits[i], j, 1)
grid_layout.addWidget(self.sets_edits[i], j, 2)
grid_layout.addWidget(self.reps_edits[i], j, 3)
grid_layout.addWidget(self.rest_edits[i], j, 4)
j += 1
if self.workout_name != None:
self.workout_name_edit.setText(self.workout_name)
for i, exercise in enumerate(self.fetched_my_workouts[self.workout_name].keys()):
self.name_edits[i].setText(exercise)
self.sets_edits[i].setText(self.fetched_my_workouts[self.workout_name][exercise]["Sets"])
self.reps_edits[i].setText(self.fetched_my_workouts[self.workout_name][exercise]["Reps"])
self.rest_edits[i].setText(self.fetched_my_workouts[self.workout_name][exercise]["Rest"])
buttons_layout = QHBoxLayout()
save_button = QPushButton("Save")
save_button.clicked.connect(lambda: self.save_changes())
cancel_button = QPushButton("Cancel")
cancel_button.clicked.connect(lambda: self.close())
buttons_layout.addWidget(save_button)
if self.workout_name != None:
delete_button = QPushButton("Delete Workout")
delete_button.clicked.connect(lambda: self.delete_workout())
buttons_layout.addWidget(delete_button)
buttons_layout.addWidget(cancel_button)
layout.addLayout(workout_name_layout)
layout.addLayout(grid_layout)
layout.addLayout(buttons_layout)
self.setLayout(layout)
def delete_workout(self):
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Question)
message_box.setText("Are you sure you want to delete this workout?")
message_box.setWindowTitle("Confirm delete")
message_box.setStandardButtons(QMessageBox.Cancel | QMessageBox.Ok)
message_box.buttonClicked.connect(lambda answer: self.delete_workout_confirmed(answer.text()))
message_box.exec_()
def delete_workout_confirmed(self, answer):
if "OK" in answer:
del self.fetched_my_workouts[self.workout_name]
self.db_wrapper.update_table_column(self.table_name, "my_workouts", json.dumps(self.fetched_my_workouts))
self.refresh_my_workouts_signal.emit(self.workout_name)
self.close()
def save_changes(self):
workout_name = self.workout_name_edit.text()
if workout_name != "":
new_workout = {}
for i in range(10):
exercise_dict = {}
if self.name_edits[i].text() != "" and self.sets_edits[i].text() != "" and self.reps_edits[i].text() != "":
exercise_dict["Sets"] = str(self.sets_edits[i].text())
exercise_dict["Reps"] = str(self.reps_edits[i].text())
exercise_dict["Rest"] = "N/A" if self.rest_edits[i].text() == "" else str(self.rest_edits[i].text())
new_workout[self.name_edits[i].text()] = exercise_dict
if self.one_time == False:
self.fetched_my_workouts[workout_name] = new_workout
self.db_wrapper.update_table_column(self.table_name, "my_workouts", json.dumps(self.fetched_my_workouts))
else:
workouts = json.loads(self.db_wrapper.fetch_local_column(self.table_name, "workouts"))
if not self.current_date in workouts:
workouts[self.current_date] = {"Personal Notes": "", "Workout Name": "None"}
workouts[self.current_date]["Workout Name"] = workout_name
workouts[self.current_date]["Exercises"] = new_workout
self.db_wrapper.update_table_column(self.table_name, "workouts", json.dumps(workouts))
self.refresh_after_creating_signal.emit(True)
self.close()
| 5,958
| 239
| 23
|
2d947a8901e26ca611324dba4e552212acbd3611
| 142
|
py
|
Python
|
DataProfileViewer/__init__.py
|
remram44/DataProfileVis
|
ca8a02f462c2860ce874805e2f036bdc2c3035d1
|
[
"BSD-3-Clause"
] | null | null | null |
DataProfileViewer/__init__.py
|
remram44/DataProfileVis
|
ca8a02f462c2860ce874805e2f036bdc2c3035d1
|
[
"BSD-3-Clause"
] | null | null | null |
DataProfileViewer/__init__.py
|
remram44/DataProfileVis
|
ca8a02f462c2860ce874805e2f036bdc2c3035d1
|
[
"BSD-3-Clause"
] | 1
|
2020-10-29T17:03:29.000Z
|
2020-10-29T17:03:29.000Z
|
from ._plot_metadata_table import plot_data_summary, plot_edit_profiler, get_exported_metadata
from ._demodata import get_lifeexpectancy_data
| 47.333333
| 94
| 0.901408
|
from ._plot_metadata_table import plot_data_summary, plot_edit_profiler, get_exported_metadata
from ._demodata import get_lifeexpectancy_data
| 0
| 0
| 0
|
8c79cfb53997f5073994566dbb00f69088c6de42
| 30,204
|
py
|
Python
|
sql/views.py
|
a4221722/autops
|
d59aedd2b505cd0ecda5e7715a9eb887927b038b
|
[
"Apache-2.0"
] | 1
|
2019-04-20T06:08:09.000Z
|
2019-04-20T06:08:09.000Z
|
sql/views.py
|
a4221722/autops
|
d59aedd2b505cd0ecda5e7715a9eb887927b038b
|
[
"Apache-2.0"
] | null | null | null |
sql/views.py
|
a4221722/autops
|
d59aedd2b505cd0ecda5e7715a9eb887927b038b
|
[
"Apache-2.0"
] | 2
|
2019-01-18T03:55:37.000Z
|
2020-05-15T03:32:01.000Z
|
# -*- coding: UTF-8 -*-
import re
import json
import multiprocessing
import math
from collections import OrderedDict
import pdb
from django.db.models import Q
from django.db import transaction
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password
from django.core.paginator import Paginator,InvalidPage,EmptyPage,PageNotAnInteger
from .daoora import DaoOra
from .const import Const
from .sendmail import MailSender
from .aes_decryptor import Prpcrypt
from .models import *
from .getnow import getNow
from .tasks import oraAutoReview,mailDba,wechatDba,dingDba
daoora = DaoOra()
prpCryptor = Prpcrypt()
cryColList = ['cert_no','qq','cell','card_no','database_password']
configMap = {
'oracle':ora_primary_config,
'mysql':'my_primary_config'}
daoMap = {
'oracle':daoora,
'mysql':'my_master_config'}
#首页,也是查看所有SQL工单页面,具备翻页功能
#提交oracle sql界面
#将中文名映射为英文名
#判断工单类型,做相应处理
#展示SQL工单详细内容,以及可以人工审核,审核通过即可执行
#人工审核也通过,执行SQL
#终止流程
#工程师确认
#检查登录用户是否为admin
#数据同步
@check_admin
#查询功能
@csrf_exempt
#SQL审核必读
#图表展示
#获取当前请求url
#展示数据库schema列表
#个人中心
#配置用户权限
@check_admin
| 43.458993
| 463
| 0.683121
|
# -*- coding: UTF-8 -*-
import re
import json
import multiprocessing
import math
from collections import OrderedDict
import pdb
from django.db.models import Q
from django.db import transaction
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password
from django.core.paginator import Paginator,InvalidPage,EmptyPage,PageNotAnInteger
from .daoora import DaoOra
from .const import Const
from .sendmail import MailSender
from .aes_decryptor import Prpcrypt
from .models import *
from .getnow import getNow
from .tasks import oraAutoReview,mailDba,wechatDba,dingDba
daoora = DaoOra()
prpCryptor = Prpcrypt()
cryColList = ['cert_no','qq','cell','card_no','database_password']
configMap = {
'oracle':ora_primary_config,
'mysql':'my_primary_config'}
daoMap = {
'oracle':daoora,
'mysql':'my_master_config'}
def login(request):
if request.GET.get('originPath') is not None and re.match(r'/detail/(?P<workflowId>[0-9]+)/$',request.GET.get('originPath')):
origin_path = request.GET.get('originPath')
else:
origin_path = '/allworkflow/'
context={'origin_path':origin_path}
return render(request, 'login.html',context)
def logout(request):
if request.session.get('login_username', False):
del request.session['login_username']
return render(request, 'login.html')
def _mapRmDisplay(reviewMan):
try:
userList = json.loads(reviewMan)
except Exception:
userList = reviewMan
displayList = [users.objects.get(username=un).display for un in userList]
return displayList
def _mapEnDisplay(userName):
try:
display=users.objects.get(username=userName).display
except Exception:
display=''
return display
#首页,也是查看所有SQL工单页面,具备翻页功能
def allworkflow(request):
#一个页面展示
PAGE_LIMIT = 12
pageNo = 0
navStatus = ''
listAllWorkflow = []
#if 'navStatus' in request.GET:
# navStatus = request.GET['navStatus']
#else:
# navStatus = 'all'
hasAffirmed = request.GET.get('hasAffirmed')
searchStatus = request.GET.get('search_status')
waitForPrc = request.GET.get('wait_process')
loginUser = request.session.get('login_username', False)
#修改全部工单、审核不通过、已执行完毕界面工程师只能看到自己发起的工单,审核人可以看到全部
allFlow = []
listWorkflow = []
#查询全部流程
loginUserOb = users.objects.get(username=loginUser)
#查询workflow model,根据pageNo和navStatus获取对应的内容
role = loginUserOb.role
if role == '审核人' or loginUser == 'admin':
allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').order_by('-create_time')
elif role == '工程师':
allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').filter(engineer=loginUser).order_by('-create_time')
#if navStatus == 'all' and (role == '审核人' or loginUser == 'admin'):
# allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').order_by('-create_time')
#elif navStatus == 'all' and role == '工程师':
# allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').filter(engineer=loginUser).order_by('-create_time')
#elif navStatus == 'waitingforme':
# allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').filter(Q(status__in=(Const.workflowStatus['manreviewing'],Const.workflowStatus['manexec'],Const.workflowStatus['autoreviewwrong']),review_man__icontains=loginUser ) | Q(status__in=(Const.workflowStatus['manreviewing'],Const.workflowStatus['manexec']), review_man__contains='"' + loginUser + '"')).order_by('-create_time')
#elif (role == '审核人' or loginUser == 'admin') and navStatus == 'unaffirm':
# allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').filter(status__in=(Const.workflowStatus['finish'],Const.workflowStatus['manfinish'],),affirm = '未确认').order_by('-create_time')
#elif role == '工程师' and navStatus == 'unaffirm':
# allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').filter(engineer=loginUser,status__in=(Const.workflowStatus['finish'],Const.workflowStatus['manfinish'],),affirm = '未确认').order_by('-create_time')
#elif (role == '审核人' or loginUser == 'admin') and navStatus == 'affirm':
# allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').filter(status__in=(Const.workflowStatus['finish'],Const.workflowStatus['manfinish'],),affirm = '已确认').order_by('-create_time')
#elif role == '工程师' and navStatus == 'affirm':
# allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').filter(engineer=loginUser,status__in=(Const.workflowStatus['finish'],Const.workflowStatus['manfinish'],),affirm = '已确认').order_by('-create_time')
# #allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','create_time','cluster_name').filter(status__in=(Const.workflowStatus['finish'],Const.workflowStatus['exception'],Const.workflowStatus['manfinish'],Const.workflowStatus['manexcept'])).order_by('-create_time')
#elif role == '工程师':
# allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').filter(status__in=(Const.workflowStatus['finish'],Const.workflowStatus['exception'],Const.workflowStatus['manfinish'],Const.workflowStatus['manexcept']),engineer=loginUser).order_by('-create_time')
#elif role == '审核人' or loginUser == 'admin':
# allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').filter(status=Const.workflowStatus[navStatus]).order_by('-create_time')
#elif role == '工程师':
# allFlow = workflow.objects.values('id','data_change_type','workflow_name','engineer','status','affirm','create_time','cluster_name').filter(status=Const.workflowStatus[navStatus],engineer=loginUser).order_by('-create_time')
#else:
# context = {'errMsg': '传入参数有误!'}
# return render(request, 'error.html', context)
if hasAffirmed:
allFlow = allFlow.filter(affirm = hasAffirmed)
if searchStatus:
allFlow = allFlow.filter(status = searchStatus)
if waitForPrc and waitForPrc == '待我处理':
allFlow = allFlow.filter(review_man__contains='"' + loginUser + '"',status__in = (Const.workflowStatus['manreviewing'],Const.workflowStatus['autoreviewwrong'],Const.workflowStatus['manexec']))
pages = math.ceil(len(allFlow)/PAGE_LIMIT)
pageList = [i+1 for i in range(0,pages)]
#参数检查
if 'pageNo' in request.GET:
pageNo = min(int(request.GET['pageNo']),pages)
else:
pageNo = 1
if pageNo < 1:
pageNo = 1
offset = (pageNo-1) * PAGE_LIMIT
limit = offset + PAGE_LIMIT
listWorkflow = allFlow[offset:limit]
pageDisplay = [pageNo+i for i in range(-5,5) if pageNo+i>0 and pageNo+i<=pages]
for flow in listWorkflow:
flow['engineer_display']=_mapEnDisplay(flow['engineer'])
flowStatus = [v for k,v in Const.workflowStatus.items()]
#context = {'currentMenu':'allworkflow', 'listWorkflow':listWorkflow,'pages':pages, 'pageNo':pageNo, 'navStatus':navStatus, 'PAGE_LIMIT':PAGE_LIMIT, 'role':role, 'flowStatus':flowStatus}
return render(request, 'allWorkflow.html', locals())
#提交oracle sql界面
def submitSqlOra(request,workflowId=None):
workflowId = request.GET.get('workflowid')
if workflowId:
workflowDetail = get_object_or_404(workflow, pk=workflowId)
sql_content = workflowDetail.sql_content
workflow_name = workflowDetail.workflow_name
cluster_name = workflowDetail.cluster_name.split(',')
message = workflowDetail.message
reason = workflowDetail.reason
data_change_type = workflowDetail.data_change_type
primaries = ora_primary_config.objects.all().order_by('cluster_name')
if len(primaries) == 0:
context = {'errMsg': '目标数为0, 请查看后端是否没有配置数据库信息'}
return render(request, 'error.html', context)
#获取所有数据库名称
listAllClusterName = [primary.cluster_name for primary in primaries]
dictAllClusterSchema = OrderedDict()
##获取主库地址:
for clusterName in listAllClusterName:
# listPrimaries = ora_primary_config.objects.filter(cluster_name=clusterName)
# if len(listPrimaries) !=1:
# context = {'errMsg': '存在两个名称一样的数据库,请修改数据库配置'}
# return render(request, 'error.html', context)
# #取出连接信息
# primaryHost = listPrimaries[0].primary_host
# primaryPort = listPrimaries[0].primary_port
# primarySrv = listPrimaries[0].primary_srv
# primaryUser = listPrimaries[0].primary_user
# primaryPassword = prpCryptor.decrypt(listPrimaries[0].primary_password)
# listSchema = daoora.getAllSchemaByCluster(clusterName)
dictAllClusterSchema[clusterName] = ''
#获取所有审核人,当前登录用户不可以审核
loginUser = request.session.get('login_username', False)
reviewMen = users.objects.filter(role='审核人').exclude(username=loginUser)
if len(reviewMen) == 0:
context = {'errMsg': '审核人为0,请配置审核人'}
return render(request, 'error.html', context)
listAllReviewMen = [user.display for user in reviewMen]
if workflowId:
context = {'currentMenu':'submitsqlora', 'dictAllClusterSchema':dictAllClusterSchema, 'reviewMen':listAllReviewMen,'workflowid':workflowId,'sql_content':sql_content,'workflow_name':workflow_name,'message':message,'reason':reason,'data_change_type':data_change_type}
else:
context = {'currentMenu':'submitsqlora', 'dictAllClusterSchema':dictAllClusterSchema, 'reviewMen':listAllReviewMen}
return render(request, 'submitSqlOra.html', context)
#将中文名映射为英文名
def _mapReviewMan(review_man):
usersList = users.objects.filter(display=review_man)
listUsername = [user.username for user in usersList]
return listUsername[0]
#判断工单类型,做相应处理
def workflowSubmit(request):
dataChangeType = request.POST.get('data_change_type')
workflowid = request.POST.get('workflowid')
sqlContent = request.POST['sql_content']
workflowName = request.POST['workflow_name']
clusterNameStr = request.POST.get('cluster_name')
isBackup = request.POST['is_backup']
reviewMan = request.POST.get('review_man').split(',')
listAllReviewMen = [_mapReviewMan(man) for man in reviewMan]
message = request.POST['message']
reason = request.POST['reason']
data_change_type = request.POST['data_change_type']
#subReviewMen = _mapReviewMan(request.POST.get('sub_review_man', ''))
#服务器端参数验证
if data_change_type in ('数据修订','数据初始化'):
if sqlContent is None:
context = {'errMsg': 'SQL内容不能为空'}
return render(request, 'error.html', context)
else:
if message is None:
context = {'errMsg': '备注不能为空'}
return render(request, 'error.html', context)
if reason is None or workflowName is None or clusterNameStr is None or isBackup is None or reviewMan is None:
context = {'errMsg': '页面提交参数可能为空'}
return render(request, 'error.html', context)
primaries = ora_primary_config.objects.all().order_by('cluster_name')
if len(primaries) == 0:
context = {'errMsg': '目标数为0, 请查看后端是否没有配置数据库信息'}
return render(request, 'error.html', context)
#获取所有数据库名称
listAllClusterName = [primary.cluster_name for primary in primaries]
dictAllClusterSchema = OrderedDict()
#获取主库地址:
for clusterName in listAllClusterName:
listPrimaries = ora_primary_config.objects.filter(cluster_name=clusterName)
if len(listPrimaries) !=1:
context = {'errMsg': '存在两个名称一样的数据库,请修改数据库配置'}
return render(request, 'error.html', context)
#取出连接信息
primaryHost = listPrimaries[0].primary_host
primaryPort = listPrimaries[0].primary_port
primarySrv = listPrimaries[0].primary_srv
primaryUser = listPrimaries[0].primary_user
primaryPassword = prpCryptor.decrypt(listPrimaries[0].primary_password)
#判断工单类型,转入相应的状态
if data_change_type in ('数据修订','数据初始化') and sqlContent:
sqlContent = sqlContent.rstrip()
if sqlContent[-1] != ";":
context = {'errMsg': "SQL语句结尾没有以;结尾,请后退重新修改并提交!"}
return render(request, 'error.html', context)
workflowStatus = Const.workflowStatus['autoreviewing']
else:
workflowStatus = Const.workflowStatus['manexec']
#存进数据库里
engineer = request.session.get('login_username', False)
if not workflowid:
Workflow = workflow()
Workflow.create_time = getNow()
else:
Workflow = workflow.objects.get(id=int(workflowid))
Workflow.workflow_name = workflowName
Workflow.engineer = engineer
Workflow.review_man = json.dumps(listAllReviewMen, ensure_ascii=False)
Workflow.status = workflowStatus
Workflow.is_backup = isBackup
Workflow.cluster_name = clusterNameStr
Workflow.sql_content = sqlContent
Workflow.execute_result = ''
Workflow.message = message
Workflow.reason = reason
Workflow.data_change_type = data_change_type
Workflow.save()
workflowId = Workflow.id
if data_change_type in ('数据修订','数据初始化') and sqlContent:
oraAutoReview.delay(workflowId)
url = _getDetailUrl(request) + str(workflowId) +'/'
strTitle = "新的SQL上线工单提醒 # " + str(workflowId)
objEngineer = users.objects.get(username=engineer)
for reviewMan in listAllReviewMen:
if reviewMan == "":
continue
strContent = "发起人:" + engineer + "\n审核人:" + str(listAllReviewMen) + "\n工单地址:" + url + "\n工单名称: " + workflowName+"\n原因:"+ reason+"\n备注说明: "+ message + "\n具体SQL:" + sqlContent
objReviewMan = users.objects.get(username=reviewMan)
mailDba.delay(strTitle, strContent, [objReviewMan.email])
wechatDba.delay(strTitle,strContent,objReviewMan.wechat_account)
dingDba.delay(strContent,objReviewMan.mobile)
return HttpResponseRedirect('/detail/' + str(workflowId) + '/')
def _mapResultSt(x):
if x['stagestatus'] == '连接服务器异常':
return 1
elif x['stagestatus'] == 'sql执行异常':
return 2
elif x['stagestatus'] == '解析失败':
return 3
else:
return 4
#展示SQL工单详细内容,以及可以人工审核,审核通过即可执行
def detail(request, workflowId):
PAGE_LIMIT = 12
loginUser = request.session.get('login_username', False)
loginUserObj = users.objects.get(username = loginUser)
workflowDetail = get_object_or_404(workflow, pk=workflowId)
if workflowDetail.status in (Const.workflowStatus['finish'], Const.workflowStatus['exception'],Const.workflowStatus['manfinish'],Const.workflowStatus['manexcept']):
try:
listResult = json.loads(workflowDetail.execute_result)
listResult.sort(key=_mapResultSt)
except Exception as err:
listResult = []
else:
if workflowDetail.review_content:
listResult = json.loads(workflowDetail.review_content)
else:
listResult=[]
pages = math.ceil(len(listResult)/PAGE_LIMIT)
if 'pageNo' in request.GET:
pageNo = min(int(request.GET['pageNo']),pages-1)
else:
pageNo = 0
if pageNo < 0:
pageNo = 0
offset = pageNo * PAGE_LIMIT
limit = offset + PAGE_LIMIT
pageRange = range(pageNo,pageNo+min(4,pages-pageNo)+1)
listContent = listResult[offset:limit]
try:
listAllReviewMen = json.loads(workflowDetail.review_man)
except ValueError:
listAllReviewMen = (workflowDetail.review_man, )
strMessage = workflowDetail.message
workflowStatus = Const.workflowStatus
engineer_display = _mapEnDisplay(workflowDetail.engineer)
reviewman_display = _mapRmDisplay(workflowDetail.review_man)
operator_display = _mapEnDisplay(workflowDetail.operator)
context = {'currentMenu':'allworkflow', 'workflowDetail':workflowDetail, 'listContent':listContent,'pages':pages,'pageNo':pageNo,'PAGE_LIMIT':PAGE_LIMIT,'listAllReviewMen':listAllReviewMen,'pageRange':pageRange,'strMessage':strMessage,'loginUserObj':loginUserObj,'workflowStatus':workflowStatus,'engineer_display':engineer_display,'reviewman_display':reviewman_display,'operator_display':operator_display}
return render(request, 'detail.html', context)
#人工审核也通过,执行SQL
def execute(request):
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None:
context = {'errMsg': 'workflowId参数为空.'}
return render(request, 'error.html', context)
workflowId = int(workflowId)
with transaction.atomic():
try:
workflowDetail = workflow.objects.select_for_update().get(id=workflowId,status__in=(Const.workflowStatus['manreviewing'],Const.workflowStatus['autoreviewwrong'],))
except Exception:
context = {'errMsg': '已经在执行'}
return render(request, 'error.html', context)
#clusterName = workflowDetail.cluster_name
try:
listAllReviewMen = json.loads(workflowDetail.review_man)
except ValueError:
listAllReviewMen = (workflowDetail.review_man, )
#服务器端二次验证,正在执行人工审核动作的当前登录用户必须为审核人. 避免攻击或被接口测试工具强行绕过
loginUser = request.session.get('login_username', False)
if loginUser is None or loginUser not in listAllReviewMen:
context = {'errMsg': '当前登录用户不是审核人,请重新登录.'}
return render(request, 'error.html', context)
#将流程状态修改为执行中,并更新reviewok_time字段
workflowDetail.status = Const.workflowStatus['executing']
workflowDetail.reviewok_time = getNow()
workflowDetail.save()
(finalStatus, finalList) = daoora.executeFinal(workflowDetail)
#封装成JSON格式存进数据库字段里
strJsonResult = json.dumps(finalList)
workflowDetail.execute_result = strJsonResult
workflowDetail.finish_time = getNow()
workflowDetail.status = finalStatus
workflowDetail.operator = loginUser
workflowDetail.save()
#如果执行完毕了,则根据settings.py里的配置决定是否给提交者和DBA一封邮件提醒.DBA需要知晓审核并执行过的单子
url = _getDetailUrl(request) + str(workflowId) + '/'
#给主、副审核人,申请人,DBA各发一封邮件
engineer = workflowDetail.engineer
reviewMen = workflowDetail.review_man
workflowStatus = workflowDetail.status
workflowName = workflowDetail.workflow_name
objEngineer = users.objects.get(username=engineer)
strTitle = "SQL上线工单执行完毕 # " + str(workflowId)
strContent = "发起人:" + engineer + "\n审核人:" + reviewMen + "\n工单地址:" + url + "\n工单名称: " + workflowName +"\n执行结果:" + workflowStatus
mailDba.delay(strTitle, strContent, [objEngineer.email])
wechatDba.delay(strTitle, strContent,objEngineer.wechat_account)
dingDba.delay(strContent,objEngineer.mobile)
return HttpResponseRedirect('/detail/' + str(workflowId) + '/')
#终止流程
def cancel(request):
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None:
context = {'errMsg': 'workflowId参数为空.'}
return render(request, 'error.html', context)
workflowId = int(workflowId)
workflowDetail = workflow.objects.get(id=workflowId)
reviewMan = workflowDetail.review_man
try:
listAllReviewMen = json.loads(reviewMan)
except ValueError:
listAllReviewMen = (reviewMan, )
#服务器端二次验证,如果正在执行终止动作的当前登录用户,不是发起人也不是审核人,则异常.
loginUser = request.session.get('login_username', False)
if loginUser is None or (loginUser not in listAllReviewMen and loginUser != workflowDetail.engineer):
context = {'errMsg': '当前登录用户不是审核人也不是发起人,请重新登录.'}
return render(request, 'error.html', context)
#服务器端二次验证,如果当前单子状态是结束状态,则不能发起终止
if workflowDetail.status in (Const.workflowStatus['abort'], Const.workflowStatus['finish'], Const.workflowStatus['manfinish']):
return HttpResponseRedirect('/detail/' + str(workflowId) + '/')
workflowDetail.status = Const.workflowStatus['abort']
workflowDetail.save()
#如果人工终止了,则根据settings.py里的配置决定是否给提交者和审核人发邮件提醒。如果是发起人终止流程,则给主、副审核人各发一封;如果是审核人终止流程,则给发起人发一封邮件,并附带说明此单子被拒绝掉了,需要重新修改.
url = _getDetailUrl(request) + str(workflowId) + '/'
engineer = workflowDetail.engineer
workflowStatus = workflowDetail.status
workflowName = workflowDetail.workflow_name
if loginUser == engineer:
strTitle = "发起人主动终止SQL上线工单流程 # " + str(workflowId)
strContent = "发起人:" + engineer + "\n审核人:" + reviewMan + "\n工单地址:" + url + "\n工单名称: " + workflowName +"\n执行结果:" + workflowStatus +"\n提醒:发起人主动终止流程"
for reviewMan in listAllReviewMen:
if reviewMan == "":
continue
objReviewMan = users.objects.get(username=reviewMan)
mailDba.delay(strTitle, strContent, [objReviewMan.email])
wechatDba.delay(strTitle, strContent, objReviewMan.wechat_account)
dingDba.delay(strContent,objReviewMan.mobile)
else:
objEngineer = users.objects.get(username=engineer)
strTitle = "SQL上线工单被拒绝执行 # " + str(workflowId)
strContent = "发起人:" + engineer + "\n审核人:" + reviewMan + "\n工单地址:" + url + "\n工单名称: " + workflowName +"\n执行结果:" + workflowStatus +"\n提醒:此工单被拒绝执行,请登陆重新提交或修改工单"
mailDba.delay(strTitle, strContent, [objEngineer.email])
wechatDba.delay(strTitle, strContent, objEngineer.wechat_account)
dingDba.delay(strContent,objEngineer.mobile)
return HttpResponseRedirect('/detail/' + str(workflowId) + '/')
#工程师确认
def engineerAffirm(request):
loginUser = request.session.get('login_user')
workflowId = request.POST['workflowid']
with transaction.atomic():
try:
workflowDetail = workflow.objects.select_for_update().get(id=workflowId,status__in=(Const.workflowStatus['finish'],Const.workflowStatus['manfinish'],))
except Exception:
context = {'errMsg': '在操作中'}
return render(request, 'error.html', context)
workflowDetail.affirm = '已确认'
workflowDetail.affirm_time = getNow()
workflowDetail.save()
return HttpResponseRedirect('/detail/' + str(workflowId) + '/')
#检查登录用户是否为admin
def check_admin(func):
def _decrator(request):
loginUser = request.session.get('login_username', False)
if loginUser != 'admin':
context = {'errMsg': '无权限访问该页面'}
return render(request, 'error.html', context)
else:
return func(request)
return _decrator
#数据同步
@check_admin
def datasync(request):
optCtls = operation_ctl.objects.all().order_by('data_type').order_by('opt_type')
optDict = {}
for optCtl in optCtls:
optDict[optCtl.data_type+'_'+optCtl.opt_type]=[optCtl.modify_time.strftime("%Y-%m-%d %H:%M:%S"),optCtl.status]
#optDict = json.dumps(optDict)
listOptInfo = operation_record.objects.all().order_by('-modify_time')[:10]
for row in listOptInfo:
#row.message = row.message[max(row.message.rfind('$$',0,-3)+2,len(row.message)-50):row.message.rfind('$$')]
for i in ['create_time','modify_time','finish_time']:
if getattr(row,i) is not None:
setattr(row,i,getattr(row,i).strftime('%Y-%m-%d %H:%M:%S'))
context = {'currentMenu':'datasync','optDict':optDict,'listOptInfo':listOptInfo}
return render(request, 'datasync.html', context)
#查询功能
@csrf_exempt
def queryora(request):
primaries = ora_primary_config.objects.all().order_by('cluster_name')
if len(primaries) == 0:
context = {'errMsg': '目标数为0, 请查看后端是否没有配置数据库信息'}
return render(request, 'error.html', context)
#获取所有数据库名称
listAllClusterName = [primary.cluster_name for primary in primaries]
dictAllClusterSchema = OrderedDict()
for clusterName in listAllClusterName:
dictAllClusterSchema[clusterName] = ''
#query = request.POST.get('query')
#if query and query == '1':
logon_user = request.session.get('login_username', False)
clusterName = request.POST.get('cluster_name')
sql_content = request.POST.get('sql_content')
sql_query = request.POST.get('sql_query')
try:
page = int(request.POST.get('page','1'))
except Exception:
page = 1
headerList = []
queryResultP = []
after_range_num = 5
before_range_num = 4
if page < 1:
page = 1
if sql_query:
sqlContent = sql_query.strip().rstrip(';')
if len(sqlContent) == 0:
context = {'errMsg':'sql内容不能为空'}
return render(request,'error.html',context)
finalStatus,msg,headerList,queryResult = daoora.query(logon_user,clusterName,sqlContent)
paginator = Paginator(queryResult, 10)
try:
queryResultP = paginator.page(page)
except (EmptyPage,InvalidPage,PageNotAnInteger):
queryResultP = paginator.page(1)
if page >= after_range_num:
page_range = paginator.page_range[page-after_range_num:page+before_range_num]
else:
page_range = paginator.page_range[0:int(page)+before_range_num]
if finalStatus != '执行结束':
context = {'errMsg':msg}
return render(request, 'error.html', context)
header_list = headerList
crtList = []
for cnt in range(0,len(headerList)):
if headerList[cnt].lower() in cryColList:
crtList.append(cnt)
query_result_p = []
for row in queryResultP:
strRow = []
for i in range(0,len(row)):
if i in crtList:
try:
strRow.append(prpCryptor.encrypt(row[i]))
except Exception as err:
strRow.append('$xxxxxxxxxxxxxx$')
else:
try:
strRow.append(str(row[i]))
except:
strRow.append('byte data type')
query_result_p.append(strRow)
currentMenu='queryora'
return render(request, 'queryora.html', locals())
#SQL审核必读
def dbaprinciples(request):
context = {'currentMenu':'dbaprinciples'}
return render(request, 'dbaprinciples.html', context)
#图表展示
def charts(request):
context = {'currentMenu':'charts'}
return render(request, 'charts.html', context)
#获取当前请求url
def _getDetailUrl(request):
scheme = request.scheme
#host = request.META['HTTP_HOST']
host = getattr(settings,'WAN_HOST')
return "%s://%s/detail/" % (scheme, host)
#展示数据库schema列表
def oradict(request):
primaries = configMap['oracle'].objects.all().order_by('cluster_name')
if len(primaries) == 0:
context = {'errMsg': '目标数为0, 请查看后端是否没有配置数据库信息'}
return render(request, 'error.html', context)
#获取所有数据库名称
listAllClusterName = [primary.cluster_name for primary in primaries]
clusterName = request.GET.get('cluster_name')
if clusterName:
listSchema = daoMap['oracle'].getAllSchemaByCluster(clusterName)
result = {'listSchema':listSchema}
return HttpResponse(json.dumps(result), content_type='application/json')
#dictAllClusterSchema = OrderedDict()
##获取主库地址:
#for clusterName in listAllClusterName:
# listSchema = daoMap['oracle'].getAllSchemaByCluster(clusterName)
# dictAllClusterSchema[clusterName] = listSchema
return render(request,'oradict.html',locals())
#个人中心
def myProfile(request):
pass
#配置用户权限
@check_admin
def privConfig(request):
after_range_num = 5
before_range_num = 4
try:
page = int(request.GET.get('page','1'))
if page < 1:
page = 1
except ValueError:
page = 1
listUsers = users.objects.filter(is_active=1).order_by('username')
paginator = Paginator(listUsers, 10)
listCluster = ora_primary_config.objects.all().order_by('cluster_name')
try:
listUsersP = paginator.page(page)
except (EmptyPage,InvalidPage,PageNotAnInteger):
listUsersP = paginator.page(1)
if page >= after_range_num:
page_range = paginator.page_range[page-after_range_num:page+before_range_num]
else:
page_range = paginator.page_range[0:int(page)+before_range_num]
currentMenu='privconfig'
return render(request,'privconfig.html',locals())
def myPrivs(request):
listUsers = users.objects.filter(username=request.session.get('login_username'))
listCluster = ora_primary_config.objects.all().order_by('cluster_name')
currentMenu='myprivs'
return render(request,'myprivs.html',locals())
def assignToMe(request):
loginUser = request.session.get('login_username')
workflowId = request.POST['workflowid']
with transaction.atomic():
try:
workflowDetail = workflow.objects.select_for_update().get(id=workflowId,status=Const.workflowStatus['manexec'],operator=None)
except Exception:
context = {'errMsg': '获取工单状态失败或者已经有人在处理'}
return render(request, 'error.html', context)
try:
reviewMen = json.loads(workflowDetail.review_man)
except Exception:
reviewMen = workflowDetail.review_man
if not loginUser in reviewMen:
context = {'errMsg': '你不在审核人之列'}
return render(request, 'error.html', context)
workflowDetail.operator = loginUser
try:
workflowDetail.save()
except Exception as e:
context = {'errMsg': str(e)}
return render(request, 'error.html', context)
else:
return HttpResponseRedirect('/detail/' + str(workflowId) + '/')
| 30,409
| 0
| 538
|
efe2db08314c57bd59ee078b29667d0527f4de9f
| 844
|
py
|
Python
|
NeuralNet/Oli/evaluateNN.py
|
alex-ta/Fontinator
|
7ca9effe3b61ded032176557520127e1d4b7a5ef
|
[
"Apache-2.0"
] | 6
|
2017-04-12T14:05:19.000Z
|
2021-01-29T11:23:50.000Z
|
NeuralNet/Oli/evaluateNN.py
|
alex-ta/Fontinator
|
7ca9effe3b61ded032176557520127e1d4b7a5ef
|
[
"Apache-2.0"
] | null | null | null |
NeuralNet/Oli/evaluateNN.py
|
alex-ta/Fontinator
|
7ca9effe3b61ded032176557520127e1d4b7a5ef
|
[
"Apache-2.0"
] | null | null | null |
from NeuralNet.Oli.libs.ProcessingPipeline import ProcessingPipeline
from NeuralNet.Oli.libs.Preprocessor import SimplePreprocessor, IPreprocessor
#__________Configuration__________#
# Path to folder which contains subfolders with the images
IMG_PATH = '../../images/Dataset_2'
# Name for model when saved
MODEL_LOAD_PATH = "SavedModels/LT2"
# Pipeline managing working with keras model
pipeline: ProcessingPipeline = ProcessingPipeline()
# Loads all images and extract features and labels
preprocessor: IPreprocessor = SimplePreprocessor()
x, y = pipeline.load_features_and_preprocess(IMG_PATH, img_preprocessor=preprocessor)
# Load the model from disk
pipeline.load_model(MODEL_LOAD_PATH)
# Make predictions with loaded model
y_pred = pipeline.predict(x)
# Evaluate model on test images and show summary
pipeline.evaluate(y, y_pred)
| 30.142857
| 85
| 0.81872
|
from NeuralNet.Oli.libs.ProcessingPipeline import ProcessingPipeline
from NeuralNet.Oli.libs.Preprocessor import SimplePreprocessor, IPreprocessor
#__________Configuration__________#
# Path to folder which contains subfolders with the images
IMG_PATH = '../../images/Dataset_2'
# Name for model when saved
MODEL_LOAD_PATH = "SavedModels/LT2"
# Pipeline managing working with keras model
pipeline: ProcessingPipeline = ProcessingPipeline()
# Loads all images and extract features and labels
preprocessor: IPreprocessor = SimplePreprocessor()
x, y = pipeline.load_features_and_preprocess(IMG_PATH, img_preprocessor=preprocessor)
# Load the model from disk
pipeline.load_model(MODEL_LOAD_PATH)
# Make predictions with loaded model
y_pred = pipeline.predict(x)
# Evaluate model on test images and show summary
pipeline.evaluate(y, y_pred)
| 0
| 0
| 0
|
a25701cc6b6f43c6850d384533ce9653d157c2e6
| 3,530
|
py
|
Python
|
examples/checkpointing/checkpoint.py
|
RajatRasal/devito
|
162abb6b318e77eaa4e8f719047327c45782056f
|
[
"MIT"
] | null | null | null |
examples/checkpointing/checkpoint.py
|
RajatRasal/devito
|
162abb6b318e77eaa4e8f719047327c45782056f
|
[
"MIT"
] | null | null | null |
examples/checkpointing/checkpoint.py
|
RajatRasal/devito
|
162abb6b318e77eaa4e8f719047327c45782056f
|
[
"MIT"
] | null | null | null |
from pyrevolve import Checkpoint, Operator
from devito import TimeFunction
class CheckpointOperator(Operator):
"""Devito's concrete implementation of the ABC pyrevolve.Operator. This class wraps
devito.Operator so it conforms to the pyRevolve API. pyRevolve will call apply
with arguments t_start and t_end. Devito calls these arguments t_s and t_e so
the following dict is used to perform the translations between different names.
:param op: The devito.Operator object that this object will wrap
:param args: If devito.Operator.apply() expects any arguments, they can be provided
here to be cached. Any calls to CheckpointOperator.apply() will
automatically include these cached arguments in the call to the
underlying devito.Operator.apply().
"""
t_arg_names = {'t_start': 'time_m', 't_end': 'time_M'}
def apply(self, t_start, t_end):
""" If the devito operator requires some extra arguments in the call to apply
they can be stored in the args property of this object so pyRevolve calls
pyRevolve.Operator.apply() without caring about these extra arguments while
this method passes them on correctly to devito.Operator
"""
# Build the arguments list to invoke the kernel function
args = self.op.arguments(**self._prepare_args(t_start, t_end))
# Invoke kernel function with args
arg_values = [args[p.name] for p in self.op.parameters]
self.op.cfunction(*arg_values)
class DevitoCheckpoint(Checkpoint):
"""Devito's concrete implementation of the Checkpoint abstract base class provided by
pyRevolve. Holds a list of symbol objects that hold data.
"""
def __init__(self, objects):
"""Intialise a checkpoint object. Upon initialisation, a checkpoint
stores only a reference to the objects that are passed into it."""
assert(all(isinstance(o, TimeFunction) for o in objects))
dtypes = set([o.dtype for o in objects])
assert(len(dtypes) == 1)
self._dtype = dtypes.pop()
self.objects = objects
@property
def save(self, ptr):
"""Overwrite live-data in this Checkpoint object with data found at
the ptr location."""
i_ptr_lo = 0
i_ptr_hi = 0
for o in self.objects:
i_ptr_hi = i_ptr_hi + o.size
ptr[i_ptr_lo:i_ptr_hi] = o.data.flatten()[:]
i_ptr_lo = i_ptr_hi
def load(self, ptr):
"""Copy live-data from this Checkpoint object into the memory given by
the ptr."""
i_ptr_lo = 0
i_ptr_hi = 0
for o in self.objects:
i_ptr_hi = i_ptr_hi + o.size
o.data[:] = ptr[i_ptr_lo:i_ptr_hi].reshape(o.shape)
i_ptr_lo = i_ptr_hi
@property
def size(self):
"""The memory consumption of the data contained in a checkpoint."""
return sum([o.size for o in self.objects])
| 42.02381
| 90
| 0.650142
|
from pyrevolve import Checkpoint, Operator
from devito import TimeFunction
class CheckpointOperator(Operator):
"""Devito's concrete implementation of the ABC pyrevolve.Operator. This class wraps
devito.Operator so it conforms to the pyRevolve API. pyRevolve will call apply
with arguments t_start and t_end. Devito calls these arguments t_s and t_e so
the following dict is used to perform the translations between different names.
:param op: The devito.Operator object that this object will wrap
:param args: If devito.Operator.apply() expects any arguments, they can be provided
here to be cached. Any calls to CheckpointOperator.apply() will
automatically include these cached arguments in the call to the
underlying devito.Operator.apply().
"""
t_arg_names = {'t_start': 'time_m', 't_end': 'time_M'}
def __init__(self, op, **kwargs):
self.op = op
self.args = kwargs
op_default_args = self.op.prepare_arguments(**kwargs)
self.start_offset = op_default_args[self.t_arg_names['t_start']]
def _prepare_args(self, t_start, t_end):
args = self.args.copy()
args[self.t_arg_names['t_start']] = t_start + self.start_offset
args[self.t_arg_names['t_end']] = t_end - 1 + self.start_offset
return args
def apply(self, t_start, t_end):
""" If the devito operator requires some extra arguments in the call to apply
they can be stored in the args property of this object so pyRevolve calls
pyRevolve.Operator.apply() without caring about these extra arguments while
this method passes them on correctly to devito.Operator
"""
# Build the arguments list to invoke the kernel function
args = self.op.arguments(**self._prepare_args(t_start, t_end))
# Invoke kernel function with args
arg_values = [args[p.name] for p in self.op.parameters]
self.op.cfunction(*arg_values)
class DevitoCheckpoint(Checkpoint):
"""Devito's concrete implementation of the Checkpoint abstract base class provided by
pyRevolve. Holds a list of symbol objects that hold data.
"""
def __init__(self, objects):
"""Intialise a checkpoint object. Upon initialisation, a checkpoint
stores only a reference to the objects that are passed into it."""
assert(all(isinstance(o, TimeFunction) for o in objects))
dtypes = set([o.dtype for o in objects])
assert(len(dtypes) == 1)
self._dtype = dtypes.pop()
self.objects = objects
@property
def dtype(self):
return self._dtype
def save(self, ptr):
"""Overwrite live-data in this Checkpoint object with data found at
the ptr location."""
i_ptr_lo = 0
i_ptr_hi = 0
for o in self.objects:
i_ptr_hi = i_ptr_hi + o.size
ptr[i_ptr_lo:i_ptr_hi] = o.data.flatten()[:]
i_ptr_lo = i_ptr_hi
def load(self, ptr):
"""Copy live-data from this Checkpoint object into the memory given by
the ptr."""
i_ptr_lo = 0
i_ptr_hi = 0
for o in self.objects:
i_ptr_hi = i_ptr_hi + o.size
o.data[:] = ptr[i_ptr_lo:i_ptr_hi].reshape(o.shape)
i_ptr_lo = i_ptr_hi
@property
def size(self):
"""The memory consumption of the data contained in a checkpoint."""
return sum([o.size for o in self.objects])
| 432
| 0
| 80
|
73e96d1b436844de47df13ec105b301ac7ba9065
| 1,543
|
py
|
Python
|
demo_feature_descriptor.py
|
DVukalov/cvclasses18
|
e9a1cab443d847e547fea98b55eecf248520122d
|
[
"MIT"
] | null | null | null |
demo_feature_descriptor.py
|
DVukalov/cvclasses18
|
e9a1cab443d847e547fea98b55eecf248520122d
|
[
"MIT"
] | null | null | null |
demo_feature_descriptor.py
|
DVukalov/cvclasses18
|
e9a1cab443d847e547fea98b55eecf248520122d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# DRAFT
import os
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
if len(sys.argv) == 2:
compare_descriptors(sys.argv[1])
| 25.295082
| 130
| 0.610499
|
#!/usr/bin/python3
# DRAFT
import os
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
def get_hamming(a, b):
d = 0;
for i in range(len(a[0])):
d+=bin(a[0][i] ^ b[0][i]).count("1")
return d
def get_distances(values, cols, flag):
d = []
dist = (lambda i, j : values[i] ^ values[j]) if type(values[0] is float) else (lambda x : bin(values[i] ^ values[j]).count("1"))
descr = []
for i in range(0, int(len(values) / cols)):
descr.append([values[i * cols : (i + 1) * cols]])
# добавить расстояние хэмминга
if flag==0:
for i in range(0, len(descr)):
for j in range(i+1, len(descr)):
d.append(get_hamming(descr[i], descr[j]))
#d.append(np.linalg.norm(np.array(descr[i]) - np.array(descr[j])))
else:
for i in range(0, len(descr)):
for j in range(i+1, len(descr)):
#d.append(get_hamming(descr[i], descr[j]))
d.append(np.linalg.norm(np.array(descr[i]) - np.array(descr[j])))
return d
def compare_descriptors(filename):
data = dict()
with open(filename, 'r+', encoding=('UTF-8')) as f:
data = json.loads(f.read())
hist_data = dict()
i= 0;
for key, val in data.items():
hist_data[key] = get_distances(val["data"], val["cols"], i)
i=i+1
for key, val in hist_data.items():
val = (val - np.min(val))/np.ptp(val)
bins = np.linspace(min(val), max(val), 50)
plt.hist(val, alpha=0.5, label=key, normed=True)
plt.legend()
plt.show()
if __name__ == "__main__":
if len(sys.argv) == 2:
compare_descriptors(sys.argv[1])
| 1,293
| 0
| 72
|
0a6f6ccab95232541ad45f0a864c6c8f318c1343
| 5,198
|
py
|
Python
|
bookorbooks/school/tests/class_tests.py
|
talhakoylu/SummerInternshipBackend
|
4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3
|
[
"MIT"
] | 1
|
2021-08-10T22:24:17.000Z
|
2021-08-10T22:24:17.000Z
|
bookorbooks/school/tests/class_tests.py
|
talhakoylu/SummerInternshipBackend
|
4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3
|
[
"MIT"
] | null | null | null |
bookorbooks/school/tests/class_tests.py
|
talhakoylu/SummerInternshipBackend
|
4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3
|
[
"MIT"
] | null | null | null |
from account.models.instructor_model import InstructorProfile
import json
from school.models.class_model import Class
from school.models.school_model import School
from django.urls.base import reverse
from rest_framework.test import APITestCase
from country.models import Country, City
from django.contrib.auth import get_user_model
User = get_user_model()
| 41.584
| 144
| 0.649865
|
from account.models.instructor_model import InstructorProfile
import json
from school.models.class_model import Class
from school.models.school_model import School
from django.urls.base import reverse
from rest_framework.test import APITestCase
from country.models import Country, City
from django.contrib.auth import get_user_model
User = get_user_model()
class ClassTests(APITestCase):
url_list = reverse("school:list_class")
url_create = reverse("school:add_class")
url_login = reverse("token_obtain_pair")
def setUp(self) -> None:
self.country = Country.objects.create(name = "Türkiye", code = "Tur")
self.city = City.objects.create(country = self.country, name = "Konya", code = "42")
self.school = School.objects.create(city = self.city, name = "Example School", address = "Example Address", website = "Example website")
self.create_instructor(school=self.school)
self.create_class(school_class = self.school ,user = self.user.user_instructor, name = "Class A", grade = 4)
self.url_update = reverse("school:update_class", kwargs={"id" : self.school_class.id})
self.fake_user_data = {
"username" : "johndoetest",
"password" : "johndoe123",
"user_type" : 3
}
self.fake_user = User.objects.create_user(username = self.fake_user_data["username"], password = self.fake_user_data["password"],
user_type = self.fake_user_data["user_type"], email = "johndoetest@example.com", identity_number = "12345678910")
def create_instructor(self, school, username = "johndoe", password = "johndoe123", user_type = 4):
self.login_data = {
username : username,
password : password
}
self.user = User.objects.create_user(username = username, password = password, user_type = user_type)
instructor = InstructorProfile.objects.get(user = self.user)
instructor.school = school
instructor.save()
def create_class(self, school_class, user, name, grade):
self.school_class = Class.objects.create(instructor = user, school = school_class, name = name, grade = grade)
def login_with_token(self, login_data):
"""
A method for using login process.
"""
response = self.client.post(self.url_login, login_data)
self.assertEqual(200, response.status_code)
token = response.data["access"]
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + token)
def test_get_class_list(self):
"""
Tests that class list page returns a status code of 200 and whether the details contains city, country and name fields.
"""
response = self.client.get(self.url_list)
result = json.loads(response.content)
self.assertEqual(200, response.status_code)
self.assertTrue("school" in result[0] and result[0]["school"]["name"] == "Example School")
self.assertTrue("instructor" in result[0])
self.assertTrue("name" in result[0] and result[0]["name"] == "Class A")
def test_create_class_is_authenticated(self):
"""
Tests whether the user is authenticated, and if not, the user cannot access the "class add" page.
"""
response = self.client.get(self.url_create)
self.assertEqual(401, response.status_code)
def test_create_class_is_instructor(self):
"""
Tests whether the user is instructor.
"""
self.login_with_token(self.fake_user_data)
response = self.client.get(self.url_create)
self.assertEqual(403, response.status_code)
def test_class_update_is_authenticated(self):
"""
Tests whether the user is authenticated, and if not, the user cannot access the "class update" page.
"""
response = self.client.get(self.url_update)
self.assertEqual(401, response.status_code)
def test_class_update_is_instructor(self):
"""
Tests whether the user is instructor.
"""
self.login_with_token(self.fake_user_data)
response = self.client.get(self.url_create)
self.assertEqual(403, response.status_code)
def test_class_update_is_own_class(self):
"""
Tests whether the user is instructor.
"""
self.fake_user_data["user_type"] = 4
self.login_with_token(self.fake_user_data)
response = self.client.get(self.url_create)
self.assertEqual(403, response.status_code)
self.assertTrue("detail" in json.loads(response.content))
def test_class_create(self):
"""
Class createcls page test.
"""
class_data = {
"name": "Class Name",
"grade" : 8
}
login = {
"username": "johndoe",
"password" : "johndoe123"
}
self.login_with_token(login)
response = self.client.post(self.url_create, class_data)
result = Class.objects.filter(instructor=self.user.user_instructor)
self.assertEqual(201, response.status_code)
self.assertTrue(result.count() > 0)
| 1,567
| 3,242
| 23
|
4669eb8a1f882f9e79fab53b7a21494546fdaba9
| 2,321
|
py
|
Python
|
frarch/modules/metrics/base.py
|
victorbadenas/frarch
|
e75e2a63aaf14cf797ffffc901ca382b3d88b7b0
|
[
"Apache-2.0"
] | null | null | null |
frarch/modules/metrics/base.py
|
victorbadenas/frarch
|
e75e2a63aaf14cf797ffffc901ca382b3d88b7b0
|
[
"Apache-2.0"
] | 4
|
2022-02-16T20:53:24.000Z
|
2022-02-16T21:39:26.000Z
|
frarch/modules/metrics/base.py
|
victorbadenas/frarch
|
e75e2a63aaf14cf797ffffc901ca382b3d88b7b0
|
[
"Apache-2.0"
] | 1
|
2022-03-20T23:47:16.000Z
|
2022-03-20T23:47:16.000Z
|
import abc
from typing import Any
import torch
AGGREGATION_MODES = ["mean", "max", "min"]
class Metric(metaclass=abc.ABCMeta):
"""abstract class for Metric objects.
Example:
Simple usage of the Metric class::
class MyMetric(Metric):
def _update(self, predictions, truth):
# compute some metric
return metric_value
model = MyModel()
mymetric = MyMetric()
for batch, labels in dataset:
predictions = model(batch)
mymetric.update(predictions, labels)
print(mymetric.get_metric(mode="mean"))
"""
def reset(self) -> None:
"""Clear metrics from class."""
self.metrics = []
def update(self, predictions: torch.Tensor, truth: torch.Tensor) -> None:
"""Compute metric value and append to the metrics array.
Args:
predictions (torch.Tensor): output tensors from model.
truth (torch.Tensor): ground truth tensor.
"""
self.metrics.append(self._update(predictions, truth))
@abc.abstractmethod
def _update(self, predictions: torch.Tensor, truth: torch.Tensor) -> Any:
"""Compute the metric value.
Args:
predictions (torch.Tensor): output tensors from model.
truth (torch.Tensor): ground truth tensor.
"""
def get_metric(self, mode="mean") -> float:
"""Aggregate all values stored in the metric class.
Args:
mode (str, optional): aggregation type. mean, max or min.
Defaults to "mean".
Raises:
ValueError: aggregation mode not supported
Returns:
float: aggregated metric.
"""
if len(self) == 0:
return 0.0
if mode not in AGGREGATION_MODES:
raise ValueError(
f"Mode {mode} not supported. Supported modes: {AGGREGATION_MODES}"
)
if mode == "mean":
return sum(self.metrics) / len(self)
elif mode == "max":
return max(self.metrics)
elif mode == "min":
return min(self.metrics)
| 28.304878
| 82
| 0.560534
|
import abc
from typing import Any
import torch
AGGREGATION_MODES = ["mean", "max", "min"]
class Metric(metaclass=abc.ABCMeta):
"""abstract class for Metric objects.
Example:
Simple usage of the Metric class::
class MyMetric(Metric):
def _update(self, predictions, truth):
# compute some metric
return metric_value
model = MyModel()
mymetric = MyMetric()
for batch, labels in dataset:
predictions = model(batch)
mymetric.update(predictions, labels)
print(mymetric.get_metric(mode="mean"))
"""
def __init__(self) -> None:
self.reset()
def reset(self) -> None:
"""Clear metrics from class."""
self.metrics = []
def update(self, predictions: torch.Tensor, truth: torch.Tensor) -> None:
"""Compute metric value and append to the metrics array.
Args:
predictions (torch.Tensor): output tensors from model.
truth (torch.Tensor): ground truth tensor.
"""
self.metrics.append(self._update(predictions, truth))
@abc.abstractmethod
def _update(self, predictions: torch.Tensor, truth: torch.Tensor) -> Any:
"""Compute the metric value.
Args:
predictions (torch.Tensor): output tensors from model.
truth (torch.Tensor): ground truth tensor.
"""
def __len__(self) -> int:
return len(self.metrics)
def get_metric(self, mode="mean") -> float:
"""Aggregate all values stored in the metric class.
Args:
mode (str, optional): aggregation type. mean, max or min.
Defaults to "mean".
Raises:
ValueError: aggregation mode not supported
Returns:
float: aggregated metric.
"""
if len(self) == 0:
return 0.0
if mode not in AGGREGATION_MODES:
raise ValueError(
f"Mode {mode} not supported. Supported modes: {AGGREGATION_MODES}"
)
if mode == "mean":
return sum(self.metrics) / len(self)
elif mode == "max":
return max(self.metrics)
elif mode == "min":
return min(self.metrics)
| 64
| 0
| 54
|
bed916834bec6eedf516cff915c111227e3b0006
| 561
|
py
|
Python
|
simple5ploit/utils/server.py
|
binexisHATT/simple5ploit
|
c3be89bbe67e413ee313da310d57a180243a758f
|
[
"MIT"
] | 1
|
2021-03-21T21:00:42.000Z
|
2021-03-21T21:00:42.000Z
|
simple5ploit/utils/server.py
|
binexisHATT/simple5ploit
|
c3be89bbe67e413ee313da310d57a180243a758f
|
[
"MIT"
] | null | null | null |
simple5ploit/utils/server.py
|
binexisHATT/simple5ploit
|
c3be89bbe67e413ee313da310d57a180243a758f
|
[
"MIT"
] | 1
|
2021-10-29T19:07:03.000Z
|
2021-10-29T19:07:03.000Z
|
"""Use this template for creating simple Python3 server"""
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
| 35.0625
| 65
| 0.647059
|
"""Use this template for creating simple Python3 server"""
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
def serve(port: int):
if port > 65535:
print(f"[X] port number, {port}, is not a valid port")
print("[*] will use port 8888 instead")
port = 8888
Handler = SimpleHTTPRequestHandler
with TCPServer(("", port), Handler) as httpd:
print(f"[**]::server running at http://localhost:{port}")
print(f"[**]::press (CTRL+C) to stop server...")
httpd.serve_forever()
| 394
| 0
| 23
|
0cf4c615a40fbaa98c794e4e41937f5390b1e17a
| 1,323
|
py
|
Python
|
src/plt-lorenz.py
|
nicola144/auxiliary-particle-filters
|
61d72e9163abb73007c0fbd30f68d4cc6d7ab4e9
|
[
"MIT"
] | 5
|
2020-11-26T15:56:15.000Z
|
2022-02-06T12:48:21.000Z
|
src/plt-lorenz.py
|
nicola144/auxiliary-particle-filters
|
61d72e9163abb73007c0fbd30f68d4cc6d7ab4e9
|
[
"MIT"
] | null | null | null |
src/plt-lorenz.py
|
nicola144/auxiliary-particle-filters
|
61d72e9163abb73007c0fbd30f68d4cc6d7ab4e9
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
# This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
def lorenz(x, y, z, s=10, r=28, b=2.667):
'''
Given:
x, y, z: a point of interest in three dimensional space
s, r, b: parameters defining the lorenz attractor
Returns:
x_dot, y_dot, z_dot: values of the lorenz attractor's partial
derivatives at the point x, y, z
'''
x_dot = s*(y - x)
y_dot = r*x - y - x*z
z_dot = x*y - b*z
return x_dot, y_dot, z_dot
dt = 0.01
num_steps = 1000
# Need one more for the initial values
xs = np.empty(num_steps + 1)
ys = np.empty(num_steps + 1)
zs = np.empty(num_steps + 1)
# Set initial values
xs[0], ys[0], zs[0] = (0., 1., 1.05)
# Step through "time", calculating the partial derivatives at the current point
# and using them to estimate the next point
for i in range(num_steps):
x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i])
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
# Plot
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(xs, ys, zs, lw=0.5)
ax.set_xlabel("X1 Axis")
ax.set_ylabel("X2 Axis")
ax.set_zlabel("X3 Axis")
ax.set_title("Lorenz 63 noiseless trajectory")
plt.savefig('lorenz-2.pdf')
| 25.442308
| 79
| 0.662887
|
import numpy as np
import matplotlib.pyplot as plt
# This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
def lorenz(x, y, z, s=10, r=28, b=2.667):
'''
Given:
x, y, z: a point of interest in three dimensional space
s, r, b: parameters defining the lorenz attractor
Returns:
x_dot, y_dot, z_dot: values of the lorenz attractor's partial
derivatives at the point x, y, z
'''
x_dot = s*(y - x)
y_dot = r*x - y - x*z
z_dot = x*y - b*z
return x_dot, y_dot, z_dot
dt = 0.01
num_steps = 1000
# Need one more for the initial values
xs = np.empty(num_steps + 1)
ys = np.empty(num_steps + 1)
zs = np.empty(num_steps + 1)
# Set initial values
xs[0], ys[0], zs[0] = (0., 1., 1.05)
# Step through "time", calculating the partial derivatives at the current point
# and using them to estimate the next point
for i in range(num_steps):
x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i])
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
# Plot
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(xs, ys, zs, lw=0.5)
ax.set_xlabel("X1 Axis")
ax.set_ylabel("X2 Axis")
ax.set_zlabel("X3 Axis")
ax.set_title("Lorenz 63 noiseless trajectory")
plt.savefig('lorenz-2.pdf')
| 0
| 0
| 0
|
08fc5cb1dced07419054a946bb8215665b291c68
| 3,908
|
py
|
Python
|
demo.py
|
Ushk/fourier-feature-networks
|
af4947e137e31c5e3a887d800f1995485414297d
|
[
"MIT"
] | null | null | null |
demo.py
|
Ushk/fourier-feature-networks
|
af4947e137e31c5e3a887d800f1995485414297d
|
[
"MIT"
] | null | null | null |
demo.py
|
Ushk/fourier-feature-networks
|
af4947e137e31c5e3a887d800f1995485414297d
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torchvision
import numpy as np
from tqdm import tqdm
from dataset import ImageDataset
if __name__ == '__main__':
device = "cuda:0"
network_size = (4, 512, 256)
learning_rate = 1e-4
iters = 250
mapping_size = 256
B_gauss = torch.randn((mapping_size, 2)).to(device) * 10
ds = ImageDataset("data/fox.jpg", 512)
grid, image = ds[0]
grid = grid.unsqueeze(0).to(device)
image = image.unsqueeze(0).to(device)
test_data = (grid, image)
train_data = (grid[:, ::2, ::2], image[:, ::2, :: 2])
output = train_model(network_size, learning_rate, iters, B_gauss,
train_data=train_data, test_data=(grid, image), device=device)
| 28.948148
| 92
| 0.591095
|
import torch
import torch.nn as nn
import torchvision
import numpy as np
from tqdm import tqdm
from dataset import ImageDataset
class Swish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class SirenLayer(nn.Module):
def __init__(self, in_f, out_f, w0=30, is_first=False, is_last=False):
super().__init__()
self.in_f = in_f
self.w0 = w0
self.linear = nn.Linear(in_f, out_f)
self.is_first = is_first
self.is_last = is_last
self.init_weights()
def init_weights(self):
b = 1 / \
self.in_f if self.is_first else np.sqrt(6 / self.in_f) / self.w0
with torch.no_grad():
self.linear.weight.uniform_(-b, b)
def forward(self, x):
x = self.linear(x)
return x if self.is_last else torch.sin(self.w0 * x)
def input_mapping(x, B):
if B is None:
return x
else:
x_proj = (2. * np.pi * x) @ B.t()
return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
def make_network(num_layers, input_dim, hidden_dim):
layers = [nn.Linear(input_dim, hidden_dim), Swish()]
for i in range(1, num_layers - 1):
layers.append(nn.Linear(hidden_dim, hidden_dim))
layers.append(Swish())
layers.append(nn.Linear(hidden_dim, 3))
layers.append(nn.Sigmoid())
return nn.Sequential(*layers)
def gon_model(num_layers, input_dim, hidden_dim):
layers = [SirenLayer(input_dim, hidden_dim, is_first=True)]
for i in range(1, num_layers - 1):
layers.append(SirenLayer(hidden_dim, hidden_dim))
layers.append(SirenLayer(hidden_dim, 3, is_last=True))
return nn.Sequential(*layers)
def train_model(network_size, learning_rate, iters, B, train_data, test_data, device="cpu"):
model = gon_model(*network_size).to(device)
optim = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_fn = torch.nn.MSELoss()
train_psnrs = []
test_psnrs = []
xs = []
for i in range(iters):
#for i in tqdm(range(iters), desc='train iter', leave=False):
model.train()
optim.zero_grad()
t_o = model(input_mapping(train_data[0], B))
t_loss = .5 * loss_fn(t_o, train_data[1])
t_loss.backward()
optim.step()
# print(f"---[steps: {i}]: train loss: {t_loss.item():.6f}")
train_psnrs.append(- 10 * torch.log10(2 * t_loss).item())
if i % 25 == 0:
model.eval()
with torch.no_grad():
v_o = model(input_mapping(test_data[0], B))
save_img = torch.zeros((1,3,256,256)).to(device)
test_inds = (test_data[0]*255).round().long()
save_img[0, :, test_inds[:,0], test_inds[:,1]] = v_o.T
#v_loss = loss_fn(v_o, test_data[1])
#v_psnrs = - 10 * torch.log10(2 * v_loss).item()
#test_psnrs.append(v_psnrs)
#xs.append(i)
torchvision.utils.save_image(save_img, f"imgs/{i}.jpeg")
# print(f"---[steps: {i}]: valid loss: {v_loss.item():.6f}")
return {
'state': model.state_dict(),
'train_psnrs': train_psnrs,
'test_psnrs': test_psnrs,
}
if __name__ == '__main__':
device = "cuda:0"
network_size = (4, 512, 256)
learning_rate = 1e-4
iters = 250
mapping_size = 256
B_gauss = torch.randn((mapping_size, 2)).to(device) * 10
ds = ImageDataset("data/fox.jpg", 512)
grid, image = ds[0]
grid = grid.unsqueeze(0).to(device)
image = image.unsqueeze(0).to(device)
test_data = (grid, image)
train_data = (grid[:, ::2, ::2], image[:, ::2, :: 2])
output = train_model(network_size, learning_rate, iters, B_gauss,
train_data=train_data, test_data=(grid, image), device=device)
| 2,882
| 9
| 272
|
2507f30939de842954c4932ecd45b09333c6cfe5
| 33,664
|
py
|
Python
|
tests/tree/aggs/test_aggs.py
|
alk-lbinet/pandagg
|
542350f84ca4497ab4a5f01b054aff2385f6827e
|
[
"Apache-2.0"
] | null | null | null |
tests/tree/aggs/test_aggs.py
|
alk-lbinet/pandagg
|
542350f84ca4497ab4a5f01b054aff2385f6827e
|
[
"Apache-2.0"
] | null | null | null |
tests/tree/aggs/test_aggs.py
|
alk-lbinet/pandagg
|
542350f84ca4497ab4a5f01b054aff2385f6827e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# IMPORTS
# =============================================================================
from __future__ import unicode_literals
from unittest import TestCase
from mock import patch
from pandagg.tree.aggs import Aggs
from pandagg.exceptions import InvalidOperationMappingFieldError
from pandagg.aggs import DateHistogram, Terms, Avg, Min, Filter
import tests.testing_samples.data_sample as sample
from tests.testing_samples.mapping_example import MAPPINGS
| 35.213389
| 113
| 0.361157
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# IMPORTS
# =============================================================================
from __future__ import unicode_literals
from unittest import TestCase
from mock import patch
from pandagg.tree.aggs import Aggs
from pandagg.exceptions import InvalidOperationMappingFieldError
from pandagg.aggs import DateHistogram, Terms, Avg, Min, Filter
import tests.testing_samples.data_sample as sample
from tests.testing_samples.mapping_example import MAPPINGS
class AggTestCase(TestCase):
def setUp(self):
patcher = patch("uuid.uuid4", side_effect=range(1000))
patcher.start()
self.addCleanup(patcher.stop)
def test_deserialize_nodes_with_subaggs(self):
expected = {
"genres": {
"terms": {"field": "genres", "size": 3},
"aggs": {
"movie_decade": {
"date_histogram": {"field": "year", "fixed_interval": "3650d"}
}
},
}
}
agg1 = Aggs(expected)
agg2 = Aggs(
{
"genres": Terms(
field="genres",
size=3,
aggs={
"movie_decade": DateHistogram(
field="year", fixed_interval="3650d"
)
},
)
}
)
agg3 = Aggs(
{
"genres": Terms(
field="genres",
size=3,
aggs={
"movie_decade": DateHistogram(
field="year", fixed_interval="3650d"
)
},
)
}
)
agg4 = Aggs(
{
"genres": Terms(
field="genres",
size=3,
aggs={
"movie_decade": {
"date_histogram": {
"field": "year",
"fixed_interval": "3650d",
}
}
},
)
}
)
agg5 = Aggs(
{
"genres": {
"terms": {"field": "genres", "size": 3},
"aggs": {
"movie_decade": DateHistogram(
field="year", fixed_interval="3650d"
)
},
}
}
)
for a in (agg1, agg2, agg3, agg4, agg5):
self.assertEqual(a.to_dict(), expected)
self.assertEqual(
a.show(stdout=False),
"""<Aggregations>
genres <terms, field="genres", size=3>
└── movie_decade <date_histogram, field="year", fixed_interval="3650d">
""",
)
def test_add_node_with_mapping(self):
with_mapping = Aggs(mappings=MAPPINGS, nested_autocorrect=True)
# add regular node
with_mapping = with_mapping.agg("workflow", Terms(field="workflow"))
self.assertEqual(
with_mapping.to_dict(), {"workflow": {"terms": {"field": "workflow"}}}
)
# try to add field aggregation on non-existing field will fail
with self.assertRaises(ValueError):
with_mapping.agg(
"imaginary_agg", Terms(field="imaginary_field"), insert_below="workflow"
)
# try to add aggregation on a non-compatible field will fail
with self.assertRaises(InvalidOperationMappingFieldError):
with_mapping.agg(
"average_of_string",
Avg(field="classification_type"),
insert_below="workflow",
)
# add field aggregation on field passing through nested will automatically add nested
with_mapping = with_mapping.agg(
"local_f1_score",
Avg(field="local_metrics.performance.test.f1_score"),
insert_below="workflow",
)
self.assertEqual(
with_mapping.to_dict(),
{
"workflow": {
"aggs": {
"nested_below_workflow": {
"aggs": {
"local_f1_score": {
"avg": {
"field": "local_metrics.performance.test.f1_score"
}
}
},
"nested": {"path": "local_metrics"},
}
},
"terms": {"field": "workflow"},
}
},
)
self.assertEqual(
with_mapping.show(stdout=False),
"""<Aggregations>
workflow <terms, field="workflow">
└── nested_below_workflow <nested, path="local_metrics">
└── local_f1_score <avg, field="local_metrics.performance.test.f1_score">
""",
)
auto_nested_id = with_mapping.id_from_key("nested_below_workflow")
k, node = with_mapping.get(auto_nested_id)
self.assertEqual(k, "nested_below_workflow")
self.assertEqual(node.KEY, "nested")
self.assertEqual(node.path, "local_metrics")
# add other agg requiring nested will reuse nested agg as parent
with_mapping = with_mapping.agg(
"local_precision",
Avg(field="local_metrics.performance.test.precision"),
insert_below="workflow",
)
self.assertEqual(
{
"workflow": {
"aggs": {
"nested_below_workflow": {
"aggs": {
"local_f1_score": {
"avg": {
"field": "local_metrics.performance.test.f1_score"
}
},
"local_precision": {
"avg": {
"field": "local_metrics.performance.test.precision"
}
},
},
"nested": {"path": "local_metrics"},
}
},
"terms": {"field": "workflow"},
}
},
with_mapping.to_dict(),
)
# add under a nested parent a field aggregation that requires to be located under root will automatically
# add reverse-nested
with_mapping = with_mapping.agg(
"language_terms",
Terms(field="language"),
insert_below="nested_below_workflow",
)
self.assertEqual(
with_mapping.to_dict(),
{
"workflow": {
"aggs": {
"nested_below_workflow": {
"aggs": {
"local_f1_score": {
"avg": {
"field": "local_metrics.performance.test.f1_score"
}
},
"local_precision": {
"avg": {
"field": "local_metrics.performance.test.precision"
}
},
"reverse_nested_below_nested_below_workflow": {
"aggs": {
"language_terms": {
"terms": {"field": "language"}
}
},
"reverse_nested": {},
},
},
"nested": {"path": "local_metrics"},
}
},
"terms": {"field": "workflow"},
}
},
)
# TODO - finish these tests (reverse nested)
def test_paste_tree_with_mapping(self):
# with explicit nested
initial_agg_1 = Aggs(
{
"week": {
"date_histogram": {
"field": "date",
"format": "yyyy-MM-dd",
"interval": "1w",
}
}
},
mappings=MAPPINGS,
)
self.assertEqual({k for k, _ in initial_agg_1.list()}, {None, "week"})
pasted_agg_1 = Aggs(
{
"nested_below_week": {
"nested": {"path": "local_metrics"},
"aggs": {
"local_metrics.field_class.name": {
"terms": {
"field": "local_metrics.field_class.name",
"size": 10,
}
}
},
}
}
)
self.assertEqual(
{k for k, _ in pasted_agg_1.list()},
{None, "nested_below_week", "local_metrics.field_class.name"},
)
agg_2 = initial_agg_1.aggs(pasted_agg_1, insert_below="week")
self.assertEqual(
{k for k, _ in agg_2.list()},
{None, "week", "nested_below_week", "local_metrics.field_class.name"},
)
self.assertEqual(
agg_2.to_dict(),
{
"week": {
"date_histogram": {
"field": "date",
"format": "yyyy-MM-dd",
"interval": "1w",
},
"aggs": {
"nested_below_week": {
"nested": {"path": "local_metrics"},
"aggs": {
"local_metrics.field_class.name": {
"terms": {
"field": "local_metrics.field_class.name",
"size": 10,
}
}
},
}
},
}
},
)
# without explicit nested
initial_agg_2 = Aggs(
{
"week": {
"date_histogram": {
"field": "date",
"format": "yyyy-MM-dd",
"interval": "1w",
}
}
},
mappings=MAPPINGS,
nested_autocorrect=True,
)
self.assertEqual({k for k, _ in initial_agg_2.list()}, {None, "week"})
pasted_agg_2 = Aggs(
{
"local_metrics.field_class.name": {
"terms": {"field": "local_metrics.field_class.name", "size": 10}
}
}
)
self.assertEqual(
{k for k, _ in pasted_agg_2.list()},
{None, "local_metrics.field_class.name"},
)
agg_3 = initial_agg_2.aggs(pasted_agg_2, insert_below="week")
self.assertEqual(
{k for k, _ in agg_3.list()},
{None, "week", "nested_below_week", "local_metrics.field_class.name"},
)
self.assertEqual(
agg_3.to_dict(),
{
"week": {
"date_histogram": {
"field": "date",
"format": "yyyy-MM-dd",
"interval": "1w",
},
"aggs": {
"nested_below_week": {
"nested": {"path": "local_metrics"},
"aggs": {
"local_metrics.field_class.name": {
"terms": {
"field": "local_metrics.field_class.name",
"size": 10,
}
}
},
}
},
}
},
)
def test_insert_tree_without_mapping(self):
# with explicit nested
initial_agg_1 = Aggs(
{
"week": {
"date_histogram": {
"field": "date",
"format": "yyyy-MM-dd",
"interval": "1w",
}
}
}
)
self.assertEqual({k for k, _ in initial_agg_1.list()}, {None, "week"})
pasted_agg_1 = Aggs(
{
"nested_below_week": {
"nested": {"path": "local_metrics"},
"aggs": {
"local_metrics.field_class.name": {
"terms": {
"field": "local_metrics.field_class.name",
"size": 10,
}
}
},
}
}
)
self.assertEqual(
{k for k, _ in pasted_agg_1.list()},
{None, "nested_below_week", "local_metrics.field_class.name"},
)
agg_2 = initial_agg_1.aggs(pasted_agg_1, insert_below="week")
self.assertEqual(
{k for k, _ in agg_2.list()},
{None, "week", "nested_below_week", "local_metrics.field_class.name"},
)
self.assertEqual(
agg_2.to_dict(),
{
"week": {
"date_histogram": {
"field": "date",
"format": "yyyy-MM-dd",
"interval": "1w",
},
"aggs": {
"nested_below_week": {
"nested": {"path": "local_metrics"},
"aggs": {
"local_metrics.field_class.name": {
"terms": {
"field": "local_metrics.field_class.name",
"size": 10,
}
}
},
}
},
}
},
)
def test_interpret_agg_string(self):
some_agg = Aggs()
some_agg = some_agg.agg("some_field", insert_below=None)
self.assertEqual(
some_agg.to_dict(), {"some_field": {"terms": {"field": "some_field"}}}
)
# with default size
some_agg = Aggs()
some_agg = some_agg.agg("some_field", insert_below=None, size=10)
self.assertEqual(
some_agg.to_dict(),
{"some_field": {"terms": {"field": "some_field", "size": 10}}},
)
# with parent
some_agg = Aggs(
{"root_agg_name": {"terms": {"field": "some_field", "size": 5}}}
)
some_agg = some_agg.agg("child_field", insert_below="root_agg_name")
self.assertEqual(
some_agg.to_dict(),
{
"root_agg_name": {
"aggs": {"child_field": {"terms": {"field": "child_field"}}},
"terms": {"field": "some_field", "size": 5},
}
},
)
# with required nested
some_agg = Aggs(
{"term_workflow": {"terms": {"field": "workflow", "size": 5}}},
mappings=MAPPINGS,
nested_autocorrect=True,
)
some_agg = some_agg.agg(
"local_metrics.field_class.name", insert_below="term_workflow"
)
self.assertEqual(
some_agg.to_dict(),
{
"term_workflow": {
"aggs": {
"nested_below_term_workflow": {
"aggs": {
"local_metrics.field_class.name": {
"terms": {"field": "local_metrics.field_class.name"}
}
},
"nested": {"path": "local_metrics"},
}
},
"terms": {"field": "workflow", "size": 5},
}
},
)
def test_aggs(self):
node = Terms(field="some_field", size=10)
some_agg = Aggs().agg("some_name", node, insert_below=None)
self.assertEqual(
some_agg.to_dict(),
{"some_name": {"terms": {"field": "some_field", "size": 10}}},
)
# with parent with required nested
some_agg = Aggs(
{"term_workflow": {"terms": {"field": "workflow", "size": 5}}},
mappings=MAPPINGS,
nested_autocorrect=True,
)
node = Avg(field="local_metrics.performance.test.f1_score")
some_agg = some_agg.agg("min_local_f1", node, insert_below="term_workflow")
self.assertEqual(
some_agg.to_dict(),
{
"term_workflow": {
"aggs": {
"nested_below_term_workflow": {
"aggs": {
"min_local_f1": {
"avg": {
"field": "local_metrics.performance.test.f1_score"
}
}
},
"nested": {"path": "local_metrics"},
}
},
"terms": {"field": "workflow", "size": 5},
}
},
)
def test_aggs_at_root(self):
# not at root
a = (
Aggs()
.groupby("zero", "terms", field="terms_zero")
.agg("one", "terms", field="terms_one")
.agg("two", "terms", field="terms_two")
)
self.assertEqual(
a.to_dict(),
{
"zero": {
"terms": {"field": "terms_zero"},
"aggs": {
"one": {"terms": {"field": "terms_one"}},
"two": {"terms": {"field": "terms_two"}},
},
}
},
)
# at root
a = (
Aggs()
.groupby("zero", "terms", field="terms_zero")
.agg("one", "terms", field="terms_one")
.agg("two", "terms", field="terms_two", at_root=True)
)
self.assertEqual(
a.to_dict(),
{
"zero": {
"terms": {"field": "terms_zero"},
"aggs": {"one": {"terms": {"field": "terms_one"}}},
},
"two": {"terms": {"field": "terms_two"}},
},
)
def test_aggs_strings(self):
self.assertEqual(
Aggs().agg("yolo1").agg("yolo2").to_dict(),
{
"yolo1": {"terms": {"field": "yolo1"}},
"yolo2": {"terms": {"field": "yolo2"}},
},
)
def test_init_from_node_hierarchy(self):
node_hierarchy = sample.get_node_hierarchy()
agg = Aggs(node_hierarchy, mappings=MAPPINGS)
self.assertEqual(agg.to_dict(), sample.EXPECTED_AGG_QUERY)
# with nested
node_hierarchy = {
"week": DateHistogram(
field="date",
interval="1w",
aggs={
"local_metrics.field_class.name": Terms(
field="local_metrics.field_class.name",
size=10,
aggs={
"min_f1_score": Min(
field="local_metrics.performance.test.f1_score"
)
},
)
},
)
}
agg = Aggs(node_hierarchy, mappings=MAPPINGS, nested_autocorrect=True)
self.assertEqual(
agg.to_dict(),
{
"week": {
"aggs": {
"nested_below_week": {
"aggs": {
"local_metrics.field_class.name": {
"aggs": {
"min_f1_score": {
"min": {
"field": "local_metrics.performance.test.f1_score"
}
}
},
"terms": {
"field": "local_metrics.field_class.name",
"size": 10,
},
}
},
"nested": {"path": "local_metrics"},
}
},
"date_histogram": {"field": "date", "interval": "1w"},
}
},
)
self.assertEqual(
agg.to_dict(),
{
"week": {
"aggs": {
"nested_below_week": {
"aggs": {
"local_metrics.field_class.name": {
"aggs": {
"min_f1_score": {
"min": {
"field": "local_metrics.performance.test.f1_score"
}
}
},
"terms": {
"field": "local_metrics.field_class.name",
"size": 10,
},
}
},
"nested": {"path": "local_metrics"},
}
},
"date_histogram": {"field": "date", "interval": "1w"},
}
},
)
def test_agg_init(self):
agg = sample.get_wrapper_declared_agg()
self.assertEqual(agg.to_dict(), sample.EXPECTED_AGG_QUERY)
def test_groupby_args_syntax(self):
a = Aggs().groupby("some_name", "terms", field="some_field")
self.assertEqual(a.to_dict(), {"some_name": {"terms": {"field": "some_field"}}})
def test_groupby_at_root(self):
a = (
Aggs()
.groupby("one", "terms", field="terms_one")
.groupby("two", "terms", field="terms_two", at_root=True)
)
self.assertEqual(
a.to_dict(),
{
"two": {
"terms": {"field": "terms_two"},
"aggs": {"one": {"terms": {"field": "terms_one"}}},
}
},
)
# not at root: default behavior
a = (
Aggs()
.groupby("one", "terms", field="terms_one")
.groupby("two", "terms", field="terms_two")
)
self.assertEqual(
a.to_dict(),
{
"one": {
"terms": {"field": "terms_one"},
"aggs": {"two": {"terms": {"field": "terms_two"}}},
}
},
)
def test_groupby_insert_below(self):
a1 = Aggs(
{"A": Terms(field="A", aggs={"B": Terms(field="B"), "C": Terms(field="C")})}
)
self.assertEqual(
a1.to_dict(),
{
"A": {
"terms": {"field": "A"},
"aggs": {
"C": {"terms": {"field": "C"}},
"B": {"terms": {"field": "B"}},
},
}
},
)
self.assertEqual(
a1.groupby("D", Terms(field="D"), insert_below="A").to_dict(),
{
"A": {
"terms": {"field": "A"},
"aggs": {
"D": {
"terms": {"field": "D"},
"aggs": {
"B": {"terms": {"field": "B"}},
"C": {"terms": {"field": "C"}},
},
}
},
}
},
)
def test_agg_insert_below(self):
a1 = Aggs(
{"A": Terms(field="A", aggs={"B": Terms(field="B"), "C": Terms(field="C")})}
)
self.assertEqual(
a1.to_dict(),
{
"A": {
"terms": {"field": "A"},
"aggs": {
"C": {"terms": {"field": "C"}},
"B": {"terms": {"field": "B"}},
},
}
},
)
expected = {
"A": {
"aggs": {
"B": {"terms": {"field": "B"}},
"C": {"terms": {"field": "C"}},
"D": {"terms": {"field": "D"}},
},
"terms": {"field": "A"},
}
}
self.assertEqual(
a1.agg(name="D", type_or_agg=Terms(field="D"), insert_below="A").to_dict(),
expected,
)
self.assertEqual(
a1.agg(
name="D", type_or_agg="terms", field="D", insert_below="A"
).to_dict(),
expected,
)
self.assertEqual(
a1.agg(
name="D", type_or_agg={"terms": {"field": "D"}}, insert_below="A"
).to_dict(),
expected,
)
def test_applied_nested_path_at_node(self):
"""Check that correct nested path is detected at node levels:
week
└── nested_below_week
└── local_metrics.field_class.name
├── avg_f1_score
├── max_f1_score
└── min_f1_score
"""
node_hierarchy = {
"week": DateHistogram(
field="date",
interval="1w",
aggs={
"local_metrics.field_class.name": Terms(
field="local_metrics.field_class.name",
size=10,
aggs={
"min_f1_score": Min(
field="local_metrics.performance.test.f1_score"
)
},
)
},
)
}
agg = Aggs(node_hierarchy, mappings=MAPPINGS, nested_autocorrect=True)
self.assertEqual(agg.applied_nested_path_at_node(agg.id_from_key("week")), None)
for node_key in (
"nested_below_week",
"local_metrics.field_class.name",
"min_f1_score",
):
self.assertEqual(
agg.applied_nested_path_at_node(agg.id_from_key(node_key)),
"local_metrics",
)
def test_groupby_pointer(self):
a = (
Aggs()
.groupby("A", "terms", field="a")
.groupby("B", "date_histogram", fixed_interval="1d", field="b")
)
self.assertEqual(a.get_key(a._groupby_ptr), "B")
a1 = a.agg("C1", "terms", field="c1").agg("C2", "terms", field="c2")
self.assertEqual(
a1.show(stdout=False),
"""<Aggregations>
A <terms, field="a">
└── B <date_histogram, field="b", fixed_interval="1d">
├── C1 <terms, field="c1">
└── C2 <terms, field="c2">
""",
)
self.assertEqual(
a1.to_dict(),
{
"A": {
"aggs": {
"B": {
"aggs": {
"C1": {"terms": {"field": "c1"}},
"C2": {"terms": {"field": "c2"}},
},
"date_histogram": {"field": "b", "fixed_interval": "1d"},
}
},
"terms": {"field": "a"},
}
},
)
def test_deepest_linear_agg(self):
# deepest_linear_bucket_agg
"""
week
└── nested_below_week
└── local_metrics.field_class.name <----- HERE because then metric aggregation
└── avg_f1_score
"""
node_hierarchy = {
"week": DateHistogram(
field="date",
interval="1w",
aggs={
"local_metrics.field_class.name": Terms(
field="local_metrics.field_class.name",
size=10,
aggs={
"min_f1_score": Min(
field="local_metrics.performance.test.f1_score"
)
},
)
},
)
}
agg = Aggs(node_hierarchy, mappings=MAPPINGS, nested_autocorrect=True)
self.assertEqual(
agg.get_key(agg._deepest_linear_bucket_agg),
"local_metrics.field_class.name",
)
# week is last bucket linear bucket
node_hierarchy_2 = {
"week": DateHistogram(
field="date",
interval="1w",
aggs={
"local_metrics.field_class.name": Terms(
field="local_metrics.field_class.name", size=10
),
"f1_score_above_threshold": Filter(
filter={
"range": {
"local_metrics.performance.test.f1_score": {"gte": 0.5}
}
}
),
},
)
}
agg2 = Aggs(node_hierarchy_2, mappings=MAPPINGS, nested_autocorrect=True)
self.assertEqual(agg2.get_key(agg2._deepest_linear_bucket_agg), "week")
def test_grouped_by(self):
a = Aggs().aggs(
{
"some_agg": {
"terms": {"field": "some_field"},
"aggs": {"below_agg": {"terms": {"field": "other_field"}}},
}
}
)
self.assertEqual(a._groupby_ptr, a.root)
self.assertEqual(
a.agg("age_avg", "avg", field="age").to_dict(),
{
"some_agg": {
"terms": {"field": "some_field"},
"aggs": {"below_agg": {"terms": {"field": "other_field"}}},
},
"age_avg": {"avg": {"field": "age"}},
},
)
# select a specific agg
new_a = a.grouped_by("some_agg")
self.assertEqual(new_a._groupby_ptr, new_a.id_from_key("some_agg"))
self.assertEqual(
new_a.agg("age_avg", "avg", field="age").to_dict(),
{
"some_agg": {
"terms": {"field": "some_field"},
"aggs": {
"below_agg": {"terms": {"field": "other_field"}},
"age_avg": {"avg": {"field": "age"}},
},
}
},
)
# deepest
last_a = a.grouped_by(deepest=True)
self.assertEqual(last_a._groupby_ptr, new_a.id_from_key("below_agg"))
self.assertEqual(
last_a.agg("age_avg", "avg", field="age").to_dict(),
{
"some_agg": {
"terms": {"field": "some_field"},
"aggs": {
"below_agg": {
"terms": {"field": "other_field"},
"aggs": {"age_avg": {"avg": {"field": "age"}}},
}
},
}
},
)
| 29,246
| 3,856
| 23
|
16a28ee4b62eb063602230dea021f902824e2ced
| 2,785
|
py
|
Python
|
airflow/providers/google/cloud/links/cloud_tasks.py
|
holly-evans/airflow
|
865406cbab4defd35c95afbf0a8d5987ff7788b1
|
[
"Apache-2.0"
] | 8,092
|
2016-04-27T20:32:29.000Z
|
2019-01-05T07:39:33.000Z
|
airflow/providers/google/cloud/links/cloud_tasks.py
|
holly-evans/airflow
|
865406cbab4defd35c95afbf0a8d5987ff7788b1
|
[
"Apache-2.0"
] | 2,961
|
2016-05-05T07:16:16.000Z
|
2019-01-05T08:47:59.000Z
|
airflow/providers/google/cloud/links/cloud_tasks.py
|
holly-evans/airflow
|
865406cbab4defd35c95afbf0a8d5987ff7788b1
|
[
"Apache-2.0"
] | 3,546
|
2016-05-04T20:33:16.000Z
|
2019-01-05T05:14:26.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Tasks links."""
from typing import TYPE_CHECKING, Optional
from airflow.models import BaseOperator
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
CLOUD_TASKS_BASE_LINK = "https://pantheon.corp.google.com/cloudtasks"
CLOUD_TASKS_QUEUE_LINK = CLOUD_TASKS_BASE_LINK + "/queue/{location}/{queue_id}/tasks?project={project_id}"
CLOUD_TASKS_LINK = CLOUD_TASKS_BASE_LINK + "?project={project_id}"
class CloudTasksQueueLink(BaseGoogleLink):
"""Helper class for constructing Cloud Task Queue Link"""
name = "Cloud Tasks Queue"
key = "cloud_task_queue"
format_str = CLOUD_TASKS_QUEUE_LINK
@staticmethod
def extract_parts(queue_name: Optional[str]):
"""
Extract project_id, location and queue id from queue name:
projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID
"""
if not queue_name:
return "", "", ""
parts = queue_name.split("/")
return parts[1], parts[3], parts[5]
@staticmethod
class CloudTasksLink(BaseGoogleLink):
"""Helper class for constructing Cloud Task Link"""
name = "Cloud Tasks"
key = "cloud_task"
format_str = CLOUD_TASKS_LINK
@staticmethod
| 33.963415
| 106
| 0.70018
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Tasks links."""
from typing import TYPE_CHECKING, Optional
from airflow.models import BaseOperator
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
CLOUD_TASKS_BASE_LINK = "https://pantheon.corp.google.com/cloudtasks"
CLOUD_TASKS_QUEUE_LINK = CLOUD_TASKS_BASE_LINK + "/queue/{location}/{queue_id}/tasks?project={project_id}"
CLOUD_TASKS_LINK = CLOUD_TASKS_BASE_LINK + "?project={project_id}"
class CloudTasksQueueLink(BaseGoogleLink):
"""Helper class for constructing Cloud Task Queue Link"""
name = "Cloud Tasks Queue"
key = "cloud_task_queue"
format_str = CLOUD_TASKS_QUEUE_LINK
@staticmethod
def extract_parts(queue_name: Optional[str]):
"""
Extract project_id, location and queue id from queue name:
projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID
"""
if not queue_name:
return "", "", ""
parts = queue_name.split("/")
return parts[1], parts[3], parts[5]
@staticmethod
def persist(
operator_instance: BaseOperator,
context: "Context",
queue_name: Optional[str],
):
project_id, location, queue_id = CloudTasksQueueLink.extract_parts(queue_name)
operator_instance.xcom_push(
context,
key=CloudTasksQueueLink.key,
value={"project_id": project_id, "location": location, "queue_id": queue_id},
)
class CloudTasksLink(BaseGoogleLink):
"""Helper class for constructing Cloud Task Link"""
name = "Cloud Tasks"
key = "cloud_task"
format_str = CLOUD_TASKS_LINK
@staticmethod
def persist(
operator_instance: BaseOperator,
context: "Context",
project_id: Optional[str],
):
operator_instance.xcom_push(
context,
key=CloudTasksLink.key,
value={"project_id": project_id},
)
| 640
| 0
| 52
|
90143376f671142320b39462852736a614776a02
| 3,729
|
py
|
Python
|
app/create_app.py
|
communitiesuk/funding-service-design-assessment
|
95ec0cbb97a42ef12d931658b90d3d91f1edee4c
|
[
"MIT"
] | null | null | null |
app/create_app.py
|
communitiesuk/funding-service-design-assessment
|
95ec0cbb97a42ef12d931658b90d3d91f1edee4c
|
[
"MIT"
] | null | null | null |
app/create_app.py
|
communitiesuk/funding-service-design-assessment
|
95ec0cbb97a42ef12d931658b90d3d91f1edee4c
|
[
"MIT"
] | null | null | null |
from app.assets import compile_static_assets
from flask import Flask
from flask_assets import Environment
from flask_compress import Compress
from flask_talisman import Talisman
from flask_wtf.csrf import CSRFProtect
from jinja2 import ChoiceLoader
from jinja2 import PackageLoader
from jinja2 import PrefixLoader
app = create_app()
| 28.684615
| 79
| 0.588898
|
from app.assets import compile_static_assets
from flask import Flask
from flask_assets import Environment
from flask_compress import Compress
from flask_talisman import Talisman
from flask_wtf.csrf import CSRFProtect
from jinja2 import ChoiceLoader
from jinja2 import PackageLoader
from jinja2 import PrefixLoader
def create_app() -> Flask:
flask_app = Flask(
__name__, static_url_path="/assets", static_folder="static/dist"
)
flask_app.config.from_pyfile("config.py")
flask_app.jinja_loader = ChoiceLoader(
[
PackageLoader("app"),
PrefixLoader(
{"govuk_frontend_jinja": PackageLoader("govuk_frontend_jinja")}
),
]
)
flask_app.jinja_env.trim_blocks = True
flask_app.jinja_env.lstrip_blocks = True
csp = {
"default-src": "'self'",
"script-src": [
"'self'",
"'sha256-+6WnXIl4mbFTCARd8N3COQmT3bJJmo32N8q8ZSQAIcU='",
"'sha256-l1eTVSK8DTnK8+yloud7wZUqFrI0atVo6VlC6PJvYaQ='",
],
"img-src": ["data:", "'self'"],
}
if str(
flask_app.config.get(
"".join(["APPLICATION_STORE", "_API_HOST_PUBLIC"])
)
).startswith("https://"):
csp.update(
{
"connect-src": [
flask_app.config.get("APPLICATION_STORE_API_HOST_PUBLIC"),
],
}
)
hss = {
"Strict-Transport-Security": (
"max-age=31536000; includeSubDomains; preload"
),
"X-Content-Type-Options": "nosniff",
"X-Frame-Options": "SAMEORIGIN",
"X-XSS-Protection": "1; mode=block",
"Feature_Policy": (
"microphone 'none'; camera 'none'; geolocation 'none'"
),
}
Compress(flask_app)
Talisman(
flask_app,
content_security_policy=csp,
strict_transport_security=hss,
content_security_policy_nonce_in=["script-src"],
)
csrf = CSRFProtect()
csrf.init_app(flask_app)
# This is silently used by flask in the background.
@flask_app.context_processor
def inject_global_constants():
return dict(
stage="beta",
service_title="Funding Service Design - Assessment Hub",
service_meta_description=(
"Funding Service Design Iteration - Assessment Hub"
),
service_meta_keywords="Funding Service Design - Assessment Hub",
service_meta_author="DLUHC",
)
with flask_app.app_context():
from app.default.routes import (
default_bp,
not_found,
internal_server_error,
)
from app.assess.routes import assess_bp
from app.assess.api import api_bp
from app.assess.views.assess import AssessQuestionView
flask_app.register_error_handler(404, not_found)
flask_app.register_error_handler(500, internal_server_error)
flask_app.register_blueprint(default_bp)
flask_app.register_blueprint(assess_bp)
flask_app.register_blueprint(api_bp)
flask_app.add_url_rule(
"/".join(
[
flask_app.config["ASSESSMENT_HUB_ROUTE"],
"application",
"<application_id>",
"question",
"<question_id>",
]
)
+ "/",
view_func=AssessQuestionView.as_view("application_question"),
)
# Bundle and compile assets
assets = Environment()
assets.init_app(flask_app)
compile_static_assets(assets)
return flask_app
app = create_app()
| 3,370
| 0
| 23
|
5d4f8ce969782769c49f9bd860ae31820b06ab43
| 1,738
|
py
|
Python
|
setup.py
|
msdslab/ASReview-vizualisation
|
725cdfcc028a91225487fc6c63b15bd1cb3c6130
|
[
"Apache-2.0"
] | 1
|
2020-01-31T07:13:34.000Z
|
2020-01-31T07:13:34.000Z
|
setup.py
|
msdslab/ASReview-vizualisation
|
725cdfcc028a91225487fc6c63b15bd1cb3c6130
|
[
"Apache-2.0"
] | 1
|
2020-01-30T12:40:03.000Z
|
2020-01-30T12:41:08.000Z
|
setup.py
|
msdslab/ASReview-vizualisation
|
725cdfcc028a91225487fc6c63b15bd1cb3c6130
|
[
"Apache-2.0"
] | 1
|
2020-01-29T15:18:43.000Z
|
2020-01-29T15:18:43.000Z
|
# based on https://github.com/pypa/sampleproject
# MIT License
from io import open
from os import path
# Always prefer setuptools over distutils
from setuptools import find_namespace_packages
from setuptools import setup
import versioneer
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='asreview-insights',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Insight tools for the ASReview project',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/asreview/asreview-insights',
author='ASReview LAB developers',
author_email='asreview@uu.nl',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
keywords='asreview plot insights',
packages=find_namespace_packages(include=['asreviewcontrib.*']),
install_requires=[
"numpy",
"matplotlib",
"asreview>=1,<2",
],
extras_require={},
entry_points={
"asreview.entry_points": [
"plot = asreviewcontrib.insights.entrypoint:PlotEntryPoint",
"metrics = asreviewcontrib.insights.entrypoint:MetricsEntryPoint",
]
},
project_urls={
'Bug Reports': "https://github.com/asreview/asreview-insights/issues",
'Source': "https://github.com/asreview/asreview-insights",
},
)
| 31.6
| 78
| 0.677791
|
# based on https://github.com/pypa/sampleproject
# MIT License
from io import open
from os import path
# Always prefer setuptools over distutils
from setuptools import find_namespace_packages
from setuptools import setup
import versioneer
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='asreview-insights',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Insight tools for the ASReview project',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/asreview/asreview-insights',
author='ASReview LAB developers',
author_email='asreview@uu.nl',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
keywords='asreview plot insights',
packages=find_namespace_packages(include=['asreviewcontrib.*']),
install_requires=[
"numpy",
"matplotlib",
"asreview>=1,<2",
],
extras_require={},
entry_points={
"asreview.entry_points": [
"plot = asreviewcontrib.insights.entrypoint:PlotEntryPoint",
"metrics = asreviewcontrib.insights.entrypoint:MetricsEntryPoint",
]
},
project_urls={
'Bug Reports': "https://github.com/asreview/asreview-insights/issues",
'Source': "https://github.com/asreview/asreview-insights",
},
)
| 0
| 0
| 0
|
07526d850b339b3e5cf2dc6b9dcb0479990f24ad
| 819
|
py
|
Python
|
distort_obj.py
|
aschier/mesh_scripts
|
730e51abe04c8b0e189774ca7fcf9039c60f80cd
|
[
"BSD-3-Clause"
] | null | null | null |
distort_obj.py
|
aschier/mesh_scripts
|
730e51abe04c8b0e189774ca7fcf9039c60f80cd
|
[
"BSD-3-Clause"
] | null | null | null |
distort_obj.py
|
aschier/mesh_scripts
|
730e51abe04c8b0e189774ca7fcf9039c60f80cd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import sys, random
vertices = []
faces = []
with open(sys.argv[1], "r") as file:
lines = file.read().split("\n")
outputobj = ""
for line in lines:
if line.startswith("v"):
parts = line.split(" ")
vertices.append([float(x) for x in parts[1:]])
if line.startswith("f"):
parts = line.split(" ")
faces.append([int(x) for x in parts[1:]])
for face in faces:
for idx in face:
vertex = [x+random.random()/50.0 for x in vertices[idx-1]]
outputobj += "v " + " ".join([str(x) for x in vertex]) + "\n"
for i in xrange(len(faces)):
outputobj += "f {0:d} {1:d} {2:d}".format(3*i+1, 3*i+1+1, 3*i+2+1) + "\n"
with open(sys.argv[2], "w") as outfile:
outfile.write(outputobj)
| 32.76
| 81
| 0.521368
|
#!/usr/bin/env python
import sys, random
vertices = []
faces = []
with open(sys.argv[1], "r") as file:
lines = file.read().split("\n")
outputobj = ""
for line in lines:
if line.startswith("v"):
parts = line.split(" ")
vertices.append([float(x) for x in parts[1:]])
if line.startswith("f"):
parts = line.split(" ")
faces.append([int(x) for x in parts[1:]])
for face in faces:
for idx in face:
vertex = [x+random.random()/50.0 for x in vertices[idx-1]]
outputobj += "v " + " ".join([str(x) for x in vertex]) + "\n"
for i in xrange(len(faces)):
outputobj += "f {0:d} {1:d} {2:d}".format(3*i+1, 3*i+1+1, 3*i+2+1) + "\n"
with open(sys.argv[2], "w") as outfile:
outfile.write(outputobj)
| 0
| 0
| 0
|
b7fcc03c4d39ad34490d062cf734cf9b98afa16c
| 3,933
|
py
|
Python
|
metadata/scrape/hscic/ods.py
|
nhsengland/publish-o-matic
|
dc8f16cb83a2360989afa44d887e63b5cde6af29
|
[
"MIT"
] | null | null | null |
metadata/scrape/hscic/ods.py
|
nhsengland/publish-o-matic
|
dc8f16cb83a2360989afa44d887e63b5cde6af29
|
[
"MIT"
] | 11
|
2015-03-02T16:30:20.000Z
|
2016-11-29T12:16:15.000Z
|
metadata/scrape/hscic/ods.py
|
nhsengland/publish-o-matic
|
dc8f16cb83a2360989afa44d887e63b5cde6af29
|
[
"MIT"
] | 2
|
2020-12-25T20:38:31.000Z
|
2021-04-11T07:35:01.000Z
|
"""
Scrape ODS data from the HSCIC
"""
import json
import sys
import ffs
sys.path.append(ffs.Path.here().parent)
import scrape
DATA_DIR = ffs.Path.here()/'../../data'
DOWNLOADS = 'http://systems.hscic.gov.uk/data/ods/datadownloads/index'
def check_sanity_of(metadata):
"""
We've just finished scraping, let's make sure we haven't scraped bullshit.
"""
for dataset in metadata:
for resource in dataset['resources']:
if not resource['url']:
print dataset['title']
print dataset['url']
print resource
raise Error('You scraped a resource without noting the URL Larry')
return
def fetch_dataset_metadata(url):
"""
Given a URL, fetch the metadata and resources
from that page, and return it as a dict.
"""
print url
dom = scrape._astree(url)
title = dom.cssselect('h1.documentFirstHeading')[0].text_content().strip()
description_elements = [e.text_content() for e in dom.cssselect('#parent-fieldname-text')[0] if e.tag != 'table']
description = "\n".join(description_elements).strip()
metadata = dict(
url=url,
title=title,
description=description,
)
resources = []
try:
data_tbody = dom.cssselect('table.listing tbody')[1]
except IndexError: # Sometimes the table isn't built that way
data_tbody = dom.cssselect('table.listing tbody')[0]
resource_rows = data_tbody.cssselect('tr')
try:
for row in resource_rows:
if 'haandsa' in url:
try:
description, name, created, _ = row
except ValueError:
description, name, created = row
else:
name, description, created = row
# if 'safehaven' in url:
# import pdb;pdb.set_trace()
resource = {
'url': name.cssselect('a')[0].get('href'),
'name': name.text_content().strip(),
'description': description.text_content().strip()
}
resources.append(resource)
except ValueError: # Sometimes there are more columns
for row in resource_rows:
name, full, excel, created = row
resource = {
'url': full.cssselect('a')[0].get('href'),
'name': 'Full ' + name.text_content().strip(),
'description': name.text_content().strip()
}
resources.append(resource)
if excel.text_content().strip() == 'N/A':
continue
try:
resource = {
'url': excel.cssselect('a')[0].get('href'),
'name': 'Excel ' + name.text_content().strip(),
'description': name.text_content().strip()
}
except IndexError:
import pdb;pdb.set_trace()
print row
resources.append(resource)
metadata['resources'] = resources
return metadata
def fetch_ods_metadata():
"""
* Fetch the list of downloads from the download index
* Iterate through them gathering metadata on each
* Write to a file as one dataset per "Download"
"""
dom = scrape._astree(DOWNLOADS)
downloads = dom.cssselect('table.listing a.internal-link')
categories = list(set(a.get('href') for a in downloads))
metadata = [fetch_dataset_metadata(url) for url in categories]
check_sanity_of(metadata)
metafile = DATA_DIR/'ods.json'
metafile.truncate()
metafile << json.dumps(metadata, indent=2)
return
if __name__ == '__main__':
sys.exit(main())
| 31.214286
| 117
| 0.567251
|
"""
Scrape ODS data from the HSCIC
"""
import json
import sys
import ffs
sys.path.append(ffs.Path.here().parent)
import scrape
DATA_DIR = ffs.Path.here()/'../../data'
DOWNLOADS = 'http://systems.hscic.gov.uk/data/ods/datadownloads/index'
class Error(Exception):
def __init__(self, msg):
Exception.__init__(self, '\n\n\n{0}\n\n\n'.format(msg))
def check_sanity_of(metadata):
"""
We've just finished scraping, let's make sure we haven't scraped bullshit.
"""
for dataset in metadata:
for resource in dataset['resources']:
if not resource['url']:
print dataset['title']
print dataset['url']
print resource
raise Error('You scraped a resource without noting the URL Larry')
return
def fetch_dataset_metadata(url):
"""
Given a URL, fetch the metadata and resources
from that page, and return it as a dict.
"""
print url
dom = scrape._astree(url)
title = dom.cssselect('h1.documentFirstHeading')[0].text_content().strip()
description_elements = [e.text_content() for e in dom.cssselect('#parent-fieldname-text')[0] if e.tag != 'table']
description = "\n".join(description_elements).strip()
metadata = dict(
url=url,
title=title,
description=description,
)
resources = []
try:
data_tbody = dom.cssselect('table.listing tbody')[1]
except IndexError: # Sometimes the table isn't built that way
data_tbody = dom.cssselect('table.listing tbody')[0]
resource_rows = data_tbody.cssselect('tr')
try:
for row in resource_rows:
if 'haandsa' in url:
try:
description, name, created, _ = row
except ValueError:
description, name, created = row
else:
name, description, created = row
# if 'safehaven' in url:
# import pdb;pdb.set_trace()
resource = {
'url': name.cssselect('a')[0].get('href'),
'name': name.text_content().strip(),
'description': description.text_content().strip()
}
resources.append(resource)
except ValueError: # Sometimes there are more columns
for row in resource_rows:
name, full, excel, created = row
resource = {
'url': full.cssselect('a')[0].get('href'),
'name': 'Full ' + name.text_content().strip(),
'description': name.text_content().strip()
}
resources.append(resource)
if excel.text_content().strip() == 'N/A':
continue
try:
resource = {
'url': excel.cssselect('a')[0].get('href'),
'name': 'Excel ' + name.text_content().strip(),
'description': name.text_content().strip()
}
except IndexError:
import pdb;pdb.set_trace()
print row
resources.append(resource)
metadata['resources'] = resources
return metadata
def fetch_ods_metadata():
"""
* Fetch the list of downloads from the download index
* Iterate through them gathering metadata on each
* Write to a file as one dataset per "Download"
"""
dom = scrape._astree(DOWNLOADS)
downloads = dom.cssselect('table.listing a.internal-link')
categories = list(set(a.get('href') for a in downloads))
metadata = [fetch_dataset_metadata(url) for url in categories]
check_sanity_of(metadata)
metafile = DATA_DIR/'ods.json'
metafile.truncate()
metafile << json.dumps(metadata, indent=2)
return
def main():
fetch_ods_metadata()
return 0
if __name__ == '__main__':
sys.exit(main())
| 95
| 2
| 81
|
f176663e74b963d6d7e6336560c123d0572a7cad
| 847
|
py
|
Python
|
nanitosbaby/store/migrations/0007_auto_20170722_1945.py
|
Hector-hedb12/nanitosbaby
|
86eff05157dab02a7daca61e1f70ec76bbf6cbdf
|
[
"MIT"
] | null | null | null |
nanitosbaby/store/migrations/0007_auto_20170722_1945.py
|
Hector-hedb12/nanitosbaby
|
86eff05157dab02a7daca61e1f70ec76bbf6cbdf
|
[
"MIT"
] | null | null | null |
nanitosbaby/store/migrations/0007_auto_20170722_1945.py
|
Hector-hedb12/nanitosbaby
|
86eff05157dab02a7daca61e1f70ec76bbf6cbdf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-22 19:45
from __future__ import unicode_literals
from django.db import migrations
| 26.46875
| 87
| 0.582054
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-22 19:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0006_auto_20170227_0204'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name': 'Categoria', 'verbose_name_plural': 'Categorias'},
),
migrations.AlterModelOptions(
name='product',
options={'verbose_name': 'Producto'},
),
migrations.AlterModelOptions(
name='productamount',
options={'verbose_name': 'Cantidad de Producto'},
),
migrations.AlterModelOptions(
name='size',
options={'verbose_name': 'Talla'},
),
]
| 0
| 676
| 23
|
6e0b8d667aadec326f62af823df8872b61b53d0e
| 5,659
|
py
|
Python
|
SpriteTool.py
|
LV-6502/Sprite-Layer-Compiler
|
9d37dbf1473e5d4015758dcc60a5fb1965d2a9e3
|
[
"MIT"
] | 3
|
2021-05-06T06:40:08.000Z
|
2021-09-12T14:28:38.000Z
|
SpriteTool.py
|
LV-6502/Sprite-Layer-Compiler
|
9d37dbf1473e5d4015758dcc60a5fb1965d2a9e3
|
[
"MIT"
] | null | null | null |
SpriteTool.py
|
LV-6502/Sprite-Layer-Compiler
|
9d37dbf1473e5d4015758dcc60a5fb1965d2a9e3
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter import filedialog, Entry, messagebox
from PIL import ImageTk, Image
filenames = []
all_labels = []
base_labels = []
Layer_0 = Image.new(mode = "RGB", size = (1, 1))
left_img_out = Image.new(mode = "RGB", size = (1, 1))
right_img_out = Image.new(mode = "RGB", size = (1, 1))
window = tk.Tk()
window.geometry("1000x600")
window.title("V1.0")
heading = tk.Label(text = "Sprite Layer Compiler", bg="grey", fg="white", width="500",height="3")
heading2 = tk.Label(text = "", bg="grey", fg="white", width="500",height="1")
heading.pack()
heading2.place(x=0,y=350)
Description_Label = tk.Label(window, text = "Sprite Frame Dimensions").place(x=320,y=260)
X_label = tk.Label(window, text = "Pixel X:").place(x=320,y=280)
xval = Entry(window, width="8")
xval.place(x=370,y=280)
xval.focus_set()
Y_label = tk.Label(window, text = "Pixel Y:").place(x=320,y=300)
yval = Entry(window, width="8")
yval.place(x=370,y=300)
yval.focus_set()
tk.Button(window, text='Layer 0', command= lambda: openfile0(60,60),).place(x=10,y=60)
tk.Button(window, text='Layer 1', command= lambda: openfile1(60,90)).place(x=10,y=90)
tk.Button(window, text='Layer 2', command= lambda: openfile1(60,120)).place(x=10,y=120)
tk.Button(window, text='Layer 3', command= lambda: openfile1(60,150)).place(x=10,y=150)
tk.Button(window, text='Layer 4', command= lambda: openfile1(60,180)).place(x=10,y=180)
tk.Button(window, text='Layer 5', command= lambda: openfile1(60,210)).place(x=10,y=210)
tk.Button(window, text='Layer 6', command= lambda: openfile1(60,240)).place(x=10,y=240)
tk.Button(window, text='Preview', command=Preview, bg="grey", fg="white", width="8",height="1").place(x=10,y=280)
tk.Button(window, text='Flip Left', command=FlipLeft, bg="grey", fg="white", width="8",height="1").place(x=80,y=280)
tk.Button(window, text='Clear', command=Clear, bg="#e12120", fg="white", width="8",height="1").place(x=170,y=280)
tk.Button(window, text='Clear All', command=ClearAll, bg="#971414", fg="white", width="8",height="1").place(x=240,y=280)
tk.Button(window, text='Save Right', command=SaveFileRight, bg="green", fg="white", width="8",height="1").place(x=10,y=310)
tk.Button(window, text='Save Left', command=SaveFileLeft, bg="green", fg="white", width="8",height="1").place(x=80,y=310)
window.mainloop()
| 34.717791
| 123
| 0.641103
|
import tkinter as tk
from tkinter import filedialog, Entry, messagebox
from PIL import ImageTk, Image
filenames = []
all_labels = []
base_labels = []
Layer_0 = Image.new(mode = "RGB", size = (1, 1))
left_img_out = Image.new(mode = "RGB", size = (1, 1))
right_img_out = Image.new(mode = "RGB", size = (1, 1))
def openfile0(xnum,ynum):
global filename0
filename0 = filedialog.askopenfilename(parent=window,title="Layer 0")
if filename0:
b0_l = tk.Label(window, text = "File path: " + filename0)
b0_l.place(x=xnum,y=ynum)
filenames.append(filename0)
base_labels.append(b0_l)
else:
b0_l = tk.Label(window, text = "File path: " + "Nothing Chosen")
b0_l.place(x=xnum,y=ynum)
base_labels.append(b0_l)
def openfile1(xnum,ynum):
filename = filedialog.askopenfilename(parent=window, title="Layer 1")
if filename:
b1_l = tk.Label(window, text = "File path: " + filename)
b1_l.place(x=xnum,y=ynum)
filenames.append(filename)
all_labels.append(b1_l)
all_labels.append(b1_l)
else:
b1_l = tk.Label(window, text = "File path: " + "Nothing chosen")
b1_l.place(x=xnum,y=ynum)
all_labels.append(b1_l)
def Preview():
global Layer_0
global right_img_out
try:
Layer_0 = Image.open(filename0)
for name in filenames:
try:
sub_image = Image.open(name)
Layer_0.paste(sub_image, (0, 0), sub_image)
except:
pass
img_x = ImageTk.PhotoImage(Layer_0)
right_img_out = ImageTk.getimage(img_x)
panel = tk.Label(window, image=img_x)
panel.image = img_x
panel.place(x=10,y=380)
all_labels.append(panel)
except:
pass
def ClearAll():
global filenames
global filename0
global Layer_0
filenames = []
Preview()
for label in all_labels: label.destroy()
for label in base_labels: label.destroy()
filename0 = ""
Layer_0 = Image.new(mode = "RGB", size = (1, 1))
def Clear():
global filenames
filenames = []
Preview()
for label in all_labels: label.destroy()
def FlipLeft():
global flip_img
global left_img_out
try:
flip_img = Layer_0
x_value = getXval()
y_value = getYval()
width, height = flip_img.size
limit = int(width/x_value)
for x in range(limit):
y = x+1
sub_image = flip_img.crop(box=((x_value*x),0,(x_value*y),y_value)).transpose(Image.FLIP_LEFT_RIGHT)
flip_img.paste(sub_image, box=((x_value*x),0))
img_sub = ImageTk.PhotoImage(flip_img)
img_PIL = ImageTk.getimage(img_sub)
flip_img = img_PIL
img = ImageTk.PhotoImage(flip_img)
left_img_out = ImageTk.getimage(img)
panel2 = tk.Label(window, image=img)
panel2.image = img
all_labels.append(panel2)
panel2.place(x=10,y=450)
except ValueError:
tk.messagebox.showerror('Hold up','Sprite dimensions required')
def getXval():
global xval
string = xval.get()
strtoint = int(string)
return strtoint
def getYval():
global yval
string = yval.get()
strtoint = int(string)
return strtoint
def SaveFileRight():
location = filedialog.asksaveasfile(mode='wb', defaultextension=".png")
if not location:
return
right_img_out.save(location, "png")
def SaveFileLeft():
location = filedialog.asksaveasfile(mode='wb', defaultextension=".png")
if not location:
return
left_img_out.save(location, "png")
window = tk.Tk()
window.geometry("1000x600")
window.title("V1.0")
heading = tk.Label(text = "Sprite Layer Compiler", bg="grey", fg="white", width="500",height="3")
heading2 = tk.Label(text = "", bg="grey", fg="white", width="500",height="1")
heading.pack()
heading2.place(x=0,y=350)
Description_Label = tk.Label(window, text = "Sprite Frame Dimensions").place(x=320,y=260)
X_label = tk.Label(window, text = "Pixel X:").place(x=320,y=280)
xval = Entry(window, width="8")
xval.place(x=370,y=280)
xval.focus_set()
Y_label = tk.Label(window, text = "Pixel Y:").place(x=320,y=300)
yval = Entry(window, width="8")
yval.place(x=370,y=300)
yval.focus_set()
tk.Button(window, text='Layer 0', command= lambda: openfile0(60,60),).place(x=10,y=60)
tk.Button(window, text='Layer 1', command= lambda: openfile1(60,90)).place(x=10,y=90)
tk.Button(window, text='Layer 2', command= lambda: openfile1(60,120)).place(x=10,y=120)
tk.Button(window, text='Layer 3', command= lambda: openfile1(60,150)).place(x=10,y=150)
tk.Button(window, text='Layer 4', command= lambda: openfile1(60,180)).place(x=10,y=180)
tk.Button(window, text='Layer 5', command= lambda: openfile1(60,210)).place(x=10,y=210)
tk.Button(window, text='Layer 6', command= lambda: openfile1(60,240)).place(x=10,y=240)
tk.Button(window, text='Preview', command=Preview, bg="grey", fg="white", width="8",height="1").place(x=10,y=280)
tk.Button(window, text='Flip Left', command=FlipLeft, bg="grey", fg="white", width="8",height="1").place(x=80,y=280)
tk.Button(window, text='Clear', command=Clear, bg="#e12120", fg="white", width="8",height="1").place(x=170,y=280)
tk.Button(window, text='Clear All', command=ClearAll, bg="#971414", fg="white", width="8",height="1").place(x=240,y=280)
tk.Button(window, text='Save Right', command=SaveFileRight, bg="green", fg="white", width="8",height="1").place(x=10,y=310)
tk.Button(window, text='Save Left', command=SaveFileLeft, bg="green", fg="white", width="8",height="1").place(x=80,y=310)
window.mainloop()
| 3,093
| 0
| 254
|
362ad954b7ae66d4982bdce7facb466b9ccb8cc8
| 1,188
|
py
|
Python
|
app/app.py
|
valuebaseai/docker_viz_test
|
a873238436a273936374b9ba2c1eae364eaa65ca
|
[
"MIT"
] | null | null | null |
app/app.py
|
valuebaseai/docker_viz_test
|
a873238436a273936374b9ba2c1eae364eaa65ca
|
[
"MIT"
] | null | null | null |
app/app.py
|
valuebaseai/docker_viz_test
|
a873238436a273936374b9ba2c1eae364eaa65ca
|
[
"MIT"
] | null | null | null |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
# This dataframe has 244 lines, but 4 distinct values for `day`
df = px.data.tips()
app = dash.Dash(__name__)
app.layout = html.Div([
html.P("Selector:"),
dcc.Dropdown(
id='names',
value='day',
options=[{'value': x, 'label': x}
for x in ['smoker', 'day', 'time', 'sex']],
clearable=False
),
html.P("Values:"),
dcc.Dropdown(
id='values',
value='total_bill',
options=[{'value': x, 'label': x}
for x in ['total_bill', 'tip', 'size']],
clearable=False
),
dcc.Graph(id="pie-chart"),
])
@app.callback(
Output("pie-chart", "figure"),
[Input("names", "value"),
Input("values", "value")])
#app.run_server(debug=True)
if __name__ == "__main__":
import os
debug = False if os.environ["DASH_DEBUG_MODE"] == "False" else True
app.run_server(host="0.0.0.0", port=8050, debug=debug)
| 25.276596
| 71
| 0.59596
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
# This dataframe has 244 lines, but 4 distinct values for `day`
df = px.data.tips()
app = dash.Dash(__name__)
app.layout = html.Div([
html.P("Selector:"),
dcc.Dropdown(
id='names',
value='day',
options=[{'value': x, 'label': x}
for x in ['smoker', 'day', 'time', 'sex']],
clearable=False
),
html.P("Values:"),
dcc.Dropdown(
id='values',
value='total_bill',
options=[{'value': x, 'label': x}
for x in ['total_bill', 'tip', 'size']],
clearable=False
),
dcc.Graph(id="pie-chart"),
])
@app.callback(
Output("pie-chart", "figure"),
[Input("names", "value"),
Input("values", "value")])
def generate_chart(names, values):
fig = px.pie(df, values=values, names=names)
return fig
#app.run_server(debug=True)
if __name__ == "__main__":
import os
debug = False if os.environ["DASH_DEBUG_MODE"] == "False" else True
app.run_server(host="0.0.0.0", port=8050, debug=debug)
| 77
| 0
| 22
|
2100a825b81abf1ae8d76c45a765eb6393aee0f6
| 3,392
|
py
|
Python
|
codedigger/contest/cron.py
|
jyothiprakashpanaik/Backend
|
9ab1b57436a0a1a6197777c0b36c842e71121d3a
|
[
"Apache-2.0"
] | 17
|
2020-10-07T22:40:37.000Z
|
2022-01-20T07:19:09.000Z
|
codedigger/contest/cron.py
|
jyothiprakashpanaik/Backend
|
9ab1b57436a0a1a6197777c0b36c842e71121d3a
|
[
"Apache-2.0"
] | 42
|
2021-06-03T01:58:04.000Z
|
2022-01-31T14:49:22.000Z
|
codedigger/contest/cron.py
|
jyothiprakashpanaik/Backend
|
9ab1b57436a0a1a6197777c0b36c842e71121d3a
|
[
"Apache-2.0"
] | 25
|
2020-10-06T17:55:19.000Z
|
2021-12-09T07:56:50.000Z
|
# Cron Job -
# Problem Assign -- Contest with isProblem False -- Assign Problem
# Result Assign -- Contest with isResult False
# contest end -- (startTime + duration) <= time.now
#Email
from django.core.mail import send_mail
from codedigger.settings import EMAIL_HOST_USER
## Short Code Contest
# from .utils import login, clean, penalty
# from .models import CodeforcesContest, CodeforcesContestSubmission, CodeforcesContestParticipation
# import requests, random, re
# from codeforces.cron import save_user
# from codeforces.models import user as CodeforcesUser
# from bs4 import BeautifulSoup as bs
# def update_penalty(contest, cookie) :
# contestId = contest.contestId
# groupId = contest.groupId
# page = 0
# prevHandle = None
# while(page < 100):
# page+=1
# url = "https://codeforces.com/group/"+groupId+"/contest/"+str(contestId)+"/standings/page/"+str(page)
# res = requests.get(url , headers = {'Cookie' : cookie})
# soup = bs(res.content,features="html5lib")
# participants = soup.find('table' , {'class' :'standings'}).findAll('tr')
# NProblems = len(participants[0].findAll('th'))-4
# isBreak = False
# isFirst = True
# for participant in participants[1:-1] :
# column = participant.findAll('td')
# user_handle = clean(column[1].find('a').text)
# if isFirst:
# if user_handle == prevHandle:
# isBreak = True
# break
# else :
# prevHandle = user_handle
# isFirst = False
# contest_user,created = CodeforcesUser.objects.get_or_create(handle = user_handle)
# if created :
# url = "https://codeforces.com/api/user.info?handles="+user_handle
# res = requests.get(url)
# if res.status_code == 200:
# data = res.json()
# if data['status'] == 'OK':
# save_user(contest_user , data['result'][0])
# contest_participant,created = CodeforcesContestParticipation.objects.get_or_create(
# contest=contest,
# user=contest_user,
# participantId=participant['participantid'],
# defaults={
# 'isOfficial' : clean(column[0].text) != '',
# 'isVirtual' : column[1].find('sup') != None
# })
# for i in range(NProblems):
# sub = CodeforcesContestSubmission.objects.filter(participant=contest_participant, problemIndex = i)
# newSub = CodeforcesContestSubmission(participant=contest_participant, problemIndex = i)
# if column[4+i].find('span' , {'class' : 'cell-accepted'})!=None and column[4+i]['title'][:3]=='GNU':
# subId = participant.findAll('td')[4+i]['acceptedsubmissionid']
# if sub.exists() and str(sub[0].submissionId) == subId :
# continue
# if sub.exists() :
# sub[0].isSolved = True
# sub[0].submissionId = subId
# sub[0].lang = column[4+i]['title']
# sub[0].penalty = penalty(cookie, contestId, subId, groupId)
# sub[0].save()
# else :
# newSub.isSolved = True
# newSub.submissionId = subId
# newSub.lang = column[4+i]['title']
# newSub.penalty = penalty(cookie, contestId, subId, groupId)
# newSub.save()
# else :
# newSub.isSolved = False
# if not sub.exists() :
# newSub.save()
# if isBreak:
# break
# def update_codeforces_short_code_contests() :
# cookie = login()
# codeforcescontest = CodeforcesContest.objects.filter(Type = "Short Code")
# for contest in codeforcescontest :
# update_penalty(contest, cookie)
| 34.612245
| 106
| 0.661557
|
# Cron Job -
# Problem Assign -- Contest with isProblem False -- Assign Problem
# Result Assign -- Contest with isResult False
# contest end -- (startTime + duration) <= time.now
#Email
from django.core.mail import send_mail
from codedigger.settings import EMAIL_HOST_USER
## Short Code Contest
# from .utils import login, clean, penalty
# from .models import CodeforcesContest, CodeforcesContestSubmission, CodeforcesContestParticipation
# import requests, random, re
# from codeforces.cron import save_user
# from codeforces.models import user as CodeforcesUser
# from bs4 import BeautifulSoup as bs
# def update_penalty(contest, cookie) :
# contestId = contest.contestId
# groupId = contest.groupId
# page = 0
# prevHandle = None
# while(page < 100):
# page+=1
# url = "https://codeforces.com/group/"+groupId+"/contest/"+str(contestId)+"/standings/page/"+str(page)
# res = requests.get(url , headers = {'Cookie' : cookie})
# soup = bs(res.content,features="html5lib")
# participants = soup.find('table' , {'class' :'standings'}).findAll('tr')
# NProblems = len(participants[0].findAll('th'))-4
# isBreak = False
# isFirst = True
# for participant in participants[1:-1] :
# column = participant.findAll('td')
# user_handle = clean(column[1].find('a').text)
# if isFirst:
# if user_handle == prevHandle:
# isBreak = True
# break
# else :
# prevHandle = user_handle
# isFirst = False
# contest_user,created = CodeforcesUser.objects.get_or_create(handle = user_handle)
# if created :
# url = "https://codeforces.com/api/user.info?handles="+user_handle
# res = requests.get(url)
# if res.status_code == 200:
# data = res.json()
# if data['status'] == 'OK':
# save_user(contest_user , data['result'][0])
# contest_participant,created = CodeforcesContestParticipation.objects.get_or_create(
# contest=contest,
# user=contest_user,
# participantId=participant['participantid'],
# defaults={
# 'isOfficial' : clean(column[0].text) != '',
# 'isVirtual' : column[1].find('sup') != None
# })
# for i in range(NProblems):
# sub = CodeforcesContestSubmission.objects.filter(participant=contest_participant, problemIndex = i)
# newSub = CodeforcesContestSubmission(participant=contest_participant, problemIndex = i)
# if column[4+i].find('span' , {'class' : 'cell-accepted'})!=None and column[4+i]['title'][:3]=='GNU':
# subId = participant.findAll('td')[4+i]['acceptedsubmissionid']
# if sub.exists() and str(sub[0].submissionId) == subId :
# continue
# if sub.exists() :
# sub[0].isSolved = True
# sub[0].submissionId = subId
# sub[0].lang = column[4+i]['title']
# sub[0].penalty = penalty(cookie, contestId, subId, groupId)
# sub[0].save()
# else :
# newSub.isSolved = True
# newSub.submissionId = subId
# newSub.lang = column[4+i]['title']
# newSub.penalty = penalty(cookie, contestId, subId, groupId)
# newSub.save()
# else :
# newSub.isSolved = False
# if not sub.exists() :
# newSub.save()
# if isBreak:
# break
# def update_codeforces_short_code_contests() :
# cookie = login()
# codeforcescontest = CodeforcesContest.objects.filter(Type = "Short Code")
# for contest in codeforcescontest :
# update_penalty(contest, cookie)
| 0
| 0
| 0
|
d954154e7b9daa5cdea8c4b4390e0576c8ac5293
| 5,948
|
py
|
Python
|
ocr/form_recognizer.py
|
PrynsTag/oneBarangay
|
6a8d56003d85b8385e91f5c5d81208619023c1ee
|
[
"Apache-2.0"
] | null | null | null |
ocr/form_recognizer.py
|
PrynsTag/oneBarangay
|
6a8d56003d85b8385e91f5c5d81208619023c1ee
|
[
"Apache-2.0"
] | 96
|
2021-08-28T12:37:02.000Z
|
2022-03-23T04:25:12.000Z
|
ocr/form_recognizer.py
|
PrynsTag/oneBarangay
|
6a8d56003d85b8385e91f5c5d81208619023c1ee
|
[
"Apache-2.0"
] | null | null | null |
"""Recognize and extract forms."""
import os
from statistics import fmean
from azure.ai.formrecognizer.aio import FormRecognizerClient, FormTrainingClient
from azure.core.credentials import AzureKeyCredential
class RecognizeCustomFormsSampleAsync:
"""Class to recognize forms in async mode."""
async def recognize_custom_forms(self, custom_model_id, filename):
"""Extract text from custom form.
Args:
custom_model_id: The trained custom model id.
filename: The filename of the document that will be scanned.
Returns:
The header for the table and the extracted text.
"""
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
model_id = os.getenv("CUSTOM_TRAINED_MODEL_ID", custom_model_id)
async with FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
# Make sure your form's type is included in the
# list of form types the custom model can recognize
form_url = (
f"https://storage.googleapis.com/"
f"{os.getenv('GS_MEDIA_BUCKET_NAME')}/"
f"{filename}"
)
poller = await form_recognizer_client.begin_recognize_custom_forms_from_url(
model_id=model_id, form_url=form_url, include_field_elements=True
)
forms = await poller.result()
table = []
header = {}
for _, form in enumerate(forms):
row = {}
for idx, (name, field) in enumerate(form.fields.items()):
if idx >= 3:
for value in field.value:
for i, val in value.to_dict()["value"].items():
data = val["value_data"]
# Condition for "No Data"
if data:
words = data["field_elements"]
# Condition for multiple word result
if len(words) > 1:
word_list = [word["text"] for word in words]
confidence_list = [word["confidence"] for word in words]
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": " ".join(word_list),
"confidence": round(fmean(confidence_list), 3),
}
else:
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": words[0]["text"],
"confidence": words[0]["confidence"],
}
else:
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": data,
"confidence": data,
}
if i == "REMARKS":
table.append(row)
row = {}
else:
slug_name = (
name.lower().replace(" ", "_").replace("(", "").replace(")", "")
)
header[slug_name] = {
"text": field.value,
"confidence": field.confidence,
}
return header, table
async def form_recognizer_runner(filename):
"""Runner for the form recognizer.
Args:
filename: The filename of the document to be scanned
Returns:
The form header and the table scanned.
"""
sample = RecognizeCustomFormsSampleAsync()
model_id = None
if os.getenv("CONTAINER_SAS_URL"):
endpoint = os.getenv("AZURE_FORM_RECOGNIZER_ENDPOINT")
key = os.getenv("AZURE_FORM_RECOGNIZER_KEY")
if not endpoint or not key:
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
model = await (
await form_training_client.begin_training(
os.getenv("CONTAINER_SAS_URL"), use_training_labels=True
)
).result()
model_id = model.model_id
return await sample.recognize_custom_forms(model_id, filename)
| 43.416058
| 96
| 0.408036
|
"""Recognize and extract forms."""
import os
from statistics import fmean
from azure.ai.formrecognizer.aio import FormRecognizerClient, FormTrainingClient
from azure.core.credentials import AzureKeyCredential
class RecognizeCustomFormsSampleAsync:
"""Class to recognize forms in async mode."""
async def recognize_custom_forms(self, custom_model_id, filename):
"""Extract text from custom form.
Args:
custom_model_id: The trained custom model id.
filename: The filename of the document that will be scanned.
Returns:
The header for the table and the extracted text.
"""
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
model_id = os.getenv("CUSTOM_TRAINED_MODEL_ID", custom_model_id)
async with FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
# Make sure your form's type is included in the
# list of form types the custom model can recognize
form_url = (
f"https://storage.googleapis.com/"
f"{os.getenv('GS_MEDIA_BUCKET_NAME')}/"
f"{filename}"
)
poller = await form_recognizer_client.begin_recognize_custom_forms_from_url(
model_id=model_id, form_url=form_url, include_field_elements=True
)
forms = await poller.result()
table = []
header = {}
for _, form in enumerate(forms):
row = {}
for idx, (name, field) in enumerate(form.fields.items()):
if idx >= 3:
for value in field.value:
for i, val in value.to_dict()["value"].items():
data = val["value_data"]
# Condition for "No Data"
if data:
words = data["field_elements"]
# Condition for multiple word result
if len(words) > 1:
word_list = [word["text"] for word in words]
confidence_list = [word["confidence"] for word in words]
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": " ".join(word_list),
"confidence": round(fmean(confidence_list), 3),
}
else:
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": words[0]["text"],
"confidence": words[0]["confidence"],
}
else:
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": data,
"confidence": data,
}
if i == "REMARKS":
table.append(row)
row = {}
else:
slug_name = (
name.lower().replace(" ", "_").replace("(", "").replace(")", "")
)
header[slug_name] = {
"text": field.value,
"confidence": field.confidence,
}
return header, table
async def form_recognizer_runner(filename):
"""Runner for the form recognizer.
Args:
filename: The filename of the document to be scanned
Returns:
The form header and the table scanned.
"""
sample = RecognizeCustomFormsSampleAsync()
model_id = None
if os.getenv("CONTAINER_SAS_URL"):
endpoint = os.getenv("AZURE_FORM_RECOGNIZER_ENDPOINT")
key = os.getenv("AZURE_FORM_RECOGNIZER_KEY")
if not endpoint or not key:
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
model = await (
await form_training_client.begin_training(
os.getenv("CONTAINER_SAS_URL"), use_training_labels=True
)
).result()
model_id = model.model_id
return await sample.recognize_custom_forms(model_id, filename)
| 0
| 0
| 0
|
6847eac22fa6fe907df819c4635e45eba818a864
| 17,227
|
py
|
Python
|
SSTransits.py
|
keatonb/SolarSystemTransits
|
c11966972d0f407892c4aeb046fd4aaea3f2d76d
|
[
"MIT"
] | null | null | null |
SSTransits.py
|
keatonb/SolarSystemTransits
|
c11966972d0f407892c4aeb046fd4aaea3f2d76d
|
[
"MIT"
] | null | null | null |
SSTransits.py
|
keatonb/SolarSystemTransits
|
c11966972d0f407892c4aeb046fd4aaea3f2d76d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 15:39:33 2021
Calculate and plot details of inner Solar System transits as seen from outer
Solar System objects.
Requires package solarsystem (https://pypi.org/project/solarsystem/)
Animation requires imagemagick (imagemagick.org)
@author: keatonb
"""
import solarsystem
import numpy as np
import matplotlib.pyplot as plt
import warnings
from astropy import units as u
from astropy import constants as const
from astropy.time import Time
from scipy.interpolate import interp1d
from datetime import timedelta
from matplotlib import animation
#from NASA Planetary Fact Sheet #km
#https://nssdc.gsfc.nasa.gov/planetary/factsheet/
planetdiameter = {"Mercury":4879,
"Venus":12104,
"Earth":12756,
"Mars":6792,
"Jupiter":142984,
"Saturn":120536,
"Uranus":51118,
"Neptune":49528}
class Geometry:
"""
innerplanet relative to Sun as seen from outerplanet at time
Derived in Bell & Rustamkulov (2021, in prep.)
"""
def __init__(self, innerplanet, outerplanet, time):
"""
Parameters:
innerplanet (str): name of inner planet
outerplanet (str): name of outer planet
time (datetime): timestamp (UTC)
"""
self.innerplanet = innerplanet.capitalize()
self.outerplanet = outerplanet.capitalize()
self.time = time
#Get heliocentric ecliptic planet positions at time:
#(Longitude (l, deg), Latitude (b, deg), Distance (r, AU))
H = solarsystem.Heliocentric(year=time.year, month=time.month,
day=time.day, hour=time.hour,
minute=time.minute + time.second/60 + time.microsecond/1e6)
planets = H.planets()
#Get heliocentric ecliptic planet positions:
#(Longitude (l, deg), Latitude (b, deg), Distance (r, AU))
O = planets[outerplanet]
I = planets[innerplanet]
E = planets["Earth"]
#Convert to spherical position vector:
#[r (AU, b (radians), l (radians)]
rvec = lambda P: np.array([P[2],P[1]*np.pi/180,P[0]*np.pi/180])
rO = rvec(O)
rI = rvec(I)
rE = rvec(E)
#Convert to Cartesian coordinates (x,y,z in AU)
xvec = lambda rP: np.array([rP[0]*np.cos(rP[1])*np.cos(rP[2]),
rP[0]*np.cos(rP[1])*np.sin(rP[2]),
rP[0]*np.sin(rP[1])])
xO = xvec(rO)
xI = xvec(rI)
xE = xvec(rE)
#Get positions relative to outer planet
xO_Sun = - xO
xO_I = xI - xO
xO_E = xE - xO
#Align x-axis with Sun for relative planet positions
#With two rotation matrices: x' = BAx
A = np.array([[-np.cos(rO[2]),-np.sin(rO[2]),0],
[np.sin(rO[2]),-np.cos(rO[2]),0],
[0,0,1]])
B = np.array([[np.cos(rO[1]),0,-np.sin(rO[1])],
[0,1,0],
[np.sin(rO[1]),0,np.cos(rO[1])]])
BA = np.matmul(B, A)
xvecprime = lambda xO_P: np.matmul(BA,xO_P)
xO_Sun_prime = xvecprime(xO_Sun) #Passes a sanity check
xO_I_prime = xvecprime(xO_I)
xO_E_prime = xvecprime(xO_E)
self.xSun = xO_Sun_prime
self.xI = xO_I_prime
self.xE = xO_I_prime
#Convert back to spherical coordinates
#for on-sky positions as seen from O [r (AU,b (radians),l (radians)]
rvecprime = lambda xvecp: np.array([np.sqrt(np.sum(xvecp**2.)),
np.arctan(xvecp[2]/np.sqrt(np.sum(xvecp[:2]**2))),
-np.arctan(xvecp[1]/xvecp[0])])
rO_Sun_prime = rvecprime(xO_Sun_prime) #Passes a sanity check
rO_I_prime = rvecprime(xO_I_prime) #Passes a sanity check
rO_E_prime = rvecprime(xO_E_prime) #Praise Boas!
self.rSun = rO_Sun_prime
self.rI = rO_I_prime
self.rE = rO_I_prime
#Angular separation between inner planet and Sun (radians)
self.theta = np.arccos(np.dot(xO_Sun_prime,xO_I_prime)/(rO_Sun_prime[0]*rO_I_prime[0]))
#Angular diameters of inner planet and Sun (radians)
self.angdiam_Sun = 2*const.R_sun.to(u.AU)/(rO_Sun_prime[0]*u.AU)
self.angdiam_I = planetdiameter[innerplanet]*u.km.to(u.AU)/rO_I_prime[0]
#Are we in transit?
self.intransit = ((self.theta < (self.angdiam_Sun + self.angdiam_I)/2.) &
(rO_I_prime[0] < rO_Sun_prime[0]))
#Fraction of distance toward Solar limb (0 at center)
r = self.theta / (self.angdiam_Sun/2.0)
self.mu = np.sqrt(1-r**2.)
#Light travel time delay to Earth (seconds)
self.timedelay = ((rO_I_prime[0] + rO_E_prime[0])*u.AU/const.c).to(u.s).value
def plot(self, ax=None, fov=(4,4), unit=u.arcsec, show=True,
filename=None, timedelay=True, fontsize=13, **kwargs):
"""
Plot snapshot of Sun, innerplanet from outerplanet
Parameters:
ax (mpl axis): axis to plot to (default: create new fig,ax)
fov (tuple): (width,height) in solar radii
unit (astropy angle unit): unit for axes
show (bool): whether to show plot (default: True)
filename (str): filename to save to (default: None)
timedelay (bool): add light-travel time to text?
fontsize (float): fontsize
**kwargs: args for figure if no axis provided
"""
#Create fig and ax if no ax provided
if ax is None:
fig,ax = plt.subplots(**kwargs)
#Circles must be round
ax.set_aspect(1)
#Angular unit conversion (from radians)
scale = u.radian.to(unit)
#Display sun, planet
sunangrad = scale*self.angdiam_Sun/2.
sun = plt.Circle((0, 0), sunangrad, color='y', zorder = 1)
#Is planet in front of Sun?
infront = self.rI[0] < self.rSun[0]
#The line on this circle makes it look larger than reality,
#but it's almost too small to see without
planet = plt.Circle((scale*self.rI[2], scale*self.rI[1]),
scale*self.angdiam_I/2., color='blue',
zorder=2*infront)
ax.add_patch(sun)
ax.add_patch(planet)
#Add text
time = self.time
if timedelay:
time += timedelta(seconds=self.timedelay)
ax.text(0.03,0.02,(f"{self.innerplanet} from {self.outerplanet} \n" +
time.strftime('%Y-%m-%d %H:%M:%S')),
transform=ax.transAxes, ha='left', va='bottom', fontsize=fontsize)
ax.set_xlabel(fr"$l'$ ({unit.short_names[0]})", fontsize=fontsize)
ax.set_ylabel(fr"$b'$ ({unit.short_names[0]})", fontsize=fontsize)
#Scale axes
ax.set_xlim(-fov[0]*sunangrad/2, fov[0]*sunangrad/2)
ax.set_ylim(-fov[1]*sunangrad/2, fov[1]*sunangrad/2)
#Save plot or show
if filename is not None:
plt.savefig(filename)
if show:
plt.show()
def _limbdarkening(phi, u2=0.88, v2=-0.23):
"""limb darkening law
parameterization from Section 14.7 of Allen's Astrophysical Quantities
(4th ed, Cox, 2000, AIP Press)
default u2,v2 values are for ~V filter @ 600 nm
phi is angle between solar radius vector and line of sight (radians)
normalized so disk integrates to 1
"""
mu = np.cos(phi)
return (1 - u2 - v2 + u2*mu + v2*(mu**2))/(1-u2/3 - v2/2)
class Transit:
"""
Properties and plots of transits in time window.
Calculates:
- MJD (instantaneous and observed) of ingress,egress,midtranist
- Impact parameter (b)
Plots:
- animate (gif)
- traceplot (path)
TODO: lightcurve (simulated)
"""
def __init__(self, innerplanet, outerplanet, starttime, endtime, timestep):
"""
Parameters:
innerplanet (str): name of inner planet
outerplanet (str): name of outer planet
starttime (datetime): timestamp (UTC) before transit
endtime (datetime): timestamp (UTC) before transit
timestep (float): sampling interval (minutes; > 0)
Notes:
Impact parameter, b, is minimum within timestamp
"""
#Check that timestep is positive
if timestep <= 0:
raise Exception("Timestep must be positive.")
if timestep > 10:
warnings.warn("Timesteps longer than 10 minutes may produce poor results")
deltatime = timedelta(minutes=timestep)
self.innerplanet = innerplanet
self.outerplanet = outerplanet
#Compute timestamps
self.times = [starttime]
while self.times[-1] < endtime:
self.times.append(self.times[-1] + deltatime)
self.mjd = np.array([Time(time).mjd for time in self.times])
#Calculate geometry at each timestamp
self.geometry = [Geometry(self.innerplanet, self.outerplanet, time)
for time in self.times]
#Get observed times (corrected for light travel time)
self.mjdobs = self.mjd + np.array([g.timedelay for g in self.geometry])/(24*3600.)
#compute transit start, end, and mid-eclipse times
#in transit when transitsep <= 1
transitsep = [g.theta / ((g.angdiam_Sun+g.angdiam_I)/2.0) for g in self.geometry]
#separate below and after transit
deepest = np.argmin([g.theta / ((g.angdiam_Sun+g.angdiam_I)/2.) for g in self.geometry])
#we'll interpolate precise start and end times
if deepest != 0:
self.startingress_mjd = float(interp1d(transitsep[:deepest],self.mjd[:deepest],
bounds_error=False)(1))
self.startingress_mjdobs = float(interp1d(transitsep[:deepest],self.mjdobs[:deepest],
bounds_error=False)(1))
else:
self.startingress_mjd = np.nan
self.startingress_mjdobs = np.nan
if deepest != len(self.geometry)-1:
self.endegress_mjd = float(interp1d(transitsep[deepest:],self.mjd[deepest:],
bounds_error=False)(1))
self.endegress_mjdobs = float(interp1d(transitsep[deepest:],self.mjdobs[deepest:],
bounds_error=False)(1))
else:
self.endegress_mjd = np.nan
self.endegress_mjdobs = np.nan
self.midtransit_mjd = (self.startingress_mjd + self.endegress_mjd)/2.
self.midtransit_mjdobs = (self.startingress_mjdobs + self.endegress_mjdobs)/2.
self.transitdurationobs = (self.endegress_mjdobs - self.startingress_mjdobs)*24*u.h
#Compute geometry at mid-transit
self.midtransit_geometry = Geometry(self.innerplanet, self.outerplanet,
Time(self.midtransit_mjd,format='mjd').to_datetime())
#Simulate mid-transit (default limb darkening)
phi = np.arcsin(2*self.midtransit_geometry.theta/self.midtransit_geometry.angdiam_Sun)
self.midtransit_depth = ((self.midtransit_geometry.angdiam_I**2/
self.midtransit_geometry.angdiam_Sun**2)*
_limbdarkening(phi))*1e6 # ppm
#Compute impact parameter (good to timestep precision)
self.b = self.midtransit_geometry.theta / ((self.midtransit_geometry.angdiam_Sun)/2.)
def animate(self, filename="Transit.gif", duration=3, figsize=(4,4), dpi=150, **kwargs):
"""Animate the transit
Parameters:
filename (str): file to save animation to
duration (float): loop duration (seconds)
figsize (float,float): width, height in inches
dpi (float): dots per inch
**kwargs: for Geometry plot function
"""
fig,ax = plt.subplots(figsize=figsize)
#No initialization needed
#Animation function to call
#Time between frames
interval = duration/len(self.times)
#Animate it and save!
anim = animation.FuncAnimation(fig, animateframe, init_func=init,
frames=len(self.times), interval=interval,
blit=False)
anim.save(filename, dpi=dpi, fps = 1/interval, writer='imagemagick')
def traceplot(self, ax=None, fov=(4,4), unit=u.arcsec, show=True,
filename=None, plotsun=True, fontsize=13, **kwargs):
"""Plot path of transit across Sun
Parameters:
ax (mpl axis): axis to plot to (default: create new fig,ax)
fov (tuple): (width,height) in solar radii
unit (astropy angle unit or "solarradii"): unit for axes
show (bool): whether to show plot (default: True)
filename (str): filename to save to (default: None)
sun (bool): plot Sun circle? (default: True)
fontsize (float): fontsize
**kwargs: args for figure if no axis provided
"""
#collect relevant details
angdiam_I = np.array([g.angdiam_I for g in self.geometry])
angdiam_Sun = np.array([g.angdiam_Sun for g in self.geometry])
b = np.array([g.rI[1] for g in self.geometry])
l = np.array([g.rI[2] for g in self.geometry])
rI = np.array([g.rI[0] for g in self.geometry])
rSun = np.array([g.rSun[0] for g in self.geometry])
#Are we plotting in solar radii? (useful for overlaying traces)
solarradii = unit == "solarradii"
if solarradii:
unit = u.radian
#Angular unit conversion (from radians)
scale = u.radian.to(unit)
#Get trajectory angle, phi, to plot shadow wide enough
phi = np.arctan(np.diff(b)/np.diff(l))
phi = np.concatenate((phi,[phi[-1]])) # match length
#Create fig and ax if no ax provided
if ax is None:
fig,ax = plt.subplots(**kwargs)
#Circles must be round
ax.set_aspect(1)
#Display sun, using angular size at mid-transit (unless solarradii display units)
midtransit = np.argmin([g.theta / ((g.angdiam_Sun)/2.) for g in self.geometry])
angdiam_Sun = angdiam_Sun[midtransit]
sunangrad = scale*angdiam_Sun/2.
if solarradii: #Handle case for solar radii units
sunangrad = 1
scale = 2./angdiam_Sun
if plotsun: #Only plot sun if requested
sun = plt.Circle((0, 0), sunangrad, color='y', zorder = 1)
ax.add_patch(sun)
#Is planet in front of Sun?
infront = rI[midtransit] < rSun[midtransit]
#Display transit path
linewidth = scale*angdiam_I / np.cos(phi) #Width of shadow path
ax.fill_between(scale*l,scale*b+linewidth/2.,scale*b-linewidth/2,lw=0, fc='0.2',zorder=2*infront)
ax.set_xlabel(fr"$l'$ ({unit.short_names[0]})", fontsize=fontsize)
ax.set_ylabel(fr"$b'$ ({unit.short_names[0]})", fontsize=fontsize)
if solarradii:
ax.set_xlabel("Solar radii", fontsize=fontsize)
ax.set_ylabel("Solar radii", fontsize=fontsize)
#Scale axes
ax.set_xlim(-fov[0]*sunangrad/2, fov[0]*sunangrad/2)
ax.set_ylim(-fov[1]*sunangrad/2, fov[1]*sunangrad/2)
#Save plot or show
if filename is not None:
plt.tight_layout()
plt.savefig(filename)
if show:
plt.tight_layout()
plt.show()
def simlightcurve(self,limbdarkeningfunc = _limbdarkening,
limbdarkening_args = {"u2":0.88, "v2":-0.23}):
"""
Simulate transit light curve with limb darkening
Assumes negligible limb darkening gradient across transiting planet disk
Returns relative model flux at self.mjd_obs
"""
theta = np.array([g.theta for g in self.geometry])
angdiam_Sun = np.array([g.angdiam_Sun for g in self.geometry])
angdiam_I = np.array([g.angdiam_I for g in self.geometry])
#Angle between radial vector and line of sight
phi = np.arcsin(2*theta/angdiam_Sun)
#compute relative flux
lc = 1 - (angdiam_I**2/angdiam_Sun**2)*_limbdarkening(phi,**limbdarkening_args)
lc[np.isnan(lc)] = 1
return lc
| 41.311751
| 105
| 0.573925
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 15:39:33 2021
Calculate and plot details of inner Solar System transits as seen from outer
Solar System objects.
Requires package solarsystem (https://pypi.org/project/solarsystem/)
Animation requires imagemagick (imagemagick.org)
@author: keatonb
"""
import solarsystem
import numpy as np
import matplotlib.pyplot as plt
import warnings
from astropy import units as u
from astropy import constants as const
from astropy.time import Time
from scipy.interpolate import interp1d
from datetime import timedelta
from matplotlib import animation
#from NASA Planetary Fact Sheet #km
#https://nssdc.gsfc.nasa.gov/planetary/factsheet/
planetdiameter = {"Mercury":4879,
"Venus":12104,
"Earth":12756,
"Mars":6792,
"Jupiter":142984,
"Saturn":120536,
"Uranus":51118,
"Neptune":49528}
class Geometry:
"""
innerplanet relative to Sun as seen from outerplanet at time
Derived in Bell & Rustamkulov (2021, in prep.)
"""
def __init__(self, innerplanet, outerplanet, time):
"""
Parameters:
innerplanet (str): name of inner planet
outerplanet (str): name of outer planet
time (datetime): timestamp (UTC)
"""
self.innerplanet = innerplanet.capitalize()
self.outerplanet = outerplanet.capitalize()
self.time = time
#Get heliocentric ecliptic planet positions at time:
#(Longitude (l, deg), Latitude (b, deg), Distance (r, AU))
H = solarsystem.Heliocentric(year=time.year, month=time.month,
day=time.day, hour=time.hour,
minute=time.minute + time.second/60 + time.microsecond/1e6)
planets = H.planets()
#Get heliocentric ecliptic planet positions:
#(Longitude (l, deg), Latitude (b, deg), Distance (r, AU))
O = planets[outerplanet]
I = planets[innerplanet]
E = planets["Earth"]
#Convert to spherical position vector:
#[r (AU, b (radians), l (radians)]
rvec = lambda P: np.array([P[2],P[1]*np.pi/180,P[0]*np.pi/180])
rO = rvec(O)
rI = rvec(I)
rE = rvec(E)
#Convert to Cartesian coordinates (x,y,z in AU)
xvec = lambda rP: np.array([rP[0]*np.cos(rP[1])*np.cos(rP[2]),
rP[0]*np.cos(rP[1])*np.sin(rP[2]),
rP[0]*np.sin(rP[1])])
xO = xvec(rO)
xI = xvec(rI)
xE = xvec(rE)
#Get positions relative to outer planet
xO_Sun = - xO
xO_I = xI - xO
xO_E = xE - xO
#Align x-axis with Sun for relative planet positions
#With two rotation matrices: x' = BAx
A = np.array([[-np.cos(rO[2]),-np.sin(rO[2]),0],
[np.sin(rO[2]),-np.cos(rO[2]),0],
[0,0,1]])
B = np.array([[np.cos(rO[1]),0,-np.sin(rO[1])],
[0,1,0],
[np.sin(rO[1]),0,np.cos(rO[1])]])
BA = np.matmul(B, A)
xvecprime = lambda xO_P: np.matmul(BA,xO_P)
xO_Sun_prime = xvecprime(xO_Sun) #Passes a sanity check
xO_I_prime = xvecprime(xO_I)
xO_E_prime = xvecprime(xO_E)
self.xSun = xO_Sun_prime
self.xI = xO_I_prime
self.xE = xO_I_prime
#Convert back to spherical coordinates
#for on-sky positions as seen from O [r (AU,b (radians),l (radians)]
rvecprime = lambda xvecp: np.array([np.sqrt(np.sum(xvecp**2.)),
np.arctan(xvecp[2]/np.sqrt(np.sum(xvecp[:2]**2))),
-np.arctan(xvecp[1]/xvecp[0])])
rO_Sun_prime = rvecprime(xO_Sun_prime) #Passes a sanity check
rO_I_prime = rvecprime(xO_I_prime) #Passes a sanity check
rO_E_prime = rvecprime(xO_E_prime) #Praise Boas!
self.rSun = rO_Sun_prime
self.rI = rO_I_prime
self.rE = rO_I_prime
#Angular separation between inner planet and Sun (radians)
self.theta = np.arccos(np.dot(xO_Sun_prime,xO_I_prime)/(rO_Sun_prime[0]*rO_I_prime[0]))
#Angular diameters of inner planet and Sun (radians)
self.angdiam_Sun = 2*const.R_sun.to(u.AU)/(rO_Sun_prime[0]*u.AU)
self.angdiam_I = planetdiameter[innerplanet]*u.km.to(u.AU)/rO_I_prime[0]
#Are we in transit?
self.intransit = ((self.theta < (self.angdiam_Sun + self.angdiam_I)/2.) &
(rO_I_prime[0] < rO_Sun_prime[0]))
#Fraction of distance toward Solar limb (0 at center)
r = self.theta / (self.angdiam_Sun/2.0)
self.mu = np.sqrt(1-r**2.)
#Light travel time delay to Earth (seconds)
self.timedelay = ((rO_I_prime[0] + rO_E_prime[0])*u.AU/const.c).to(u.s).value
def plot(self, ax=None, fov=(4,4), unit=u.arcsec, show=True,
filename=None, timedelay=True, fontsize=13, **kwargs):
"""
Plot snapshot of Sun, innerplanet from outerplanet
Parameters:
ax (mpl axis): axis to plot to (default: create new fig,ax)
fov (tuple): (width,height) in solar radii
unit (astropy angle unit): unit for axes
show (bool): whether to show plot (default: True)
filename (str): filename to save to (default: None)
timedelay (bool): add light-travel time to text?
fontsize (float): fontsize
**kwargs: args for figure if no axis provided
"""
#Create fig and ax if no ax provided
if ax is None:
fig,ax = plt.subplots(**kwargs)
#Circles must be round
ax.set_aspect(1)
#Angular unit conversion (from radians)
scale = u.radian.to(unit)
#Display sun, planet
sunangrad = scale*self.angdiam_Sun/2.
sun = plt.Circle((0, 0), sunangrad, color='y', zorder = 1)
#Is planet in front of Sun?
infront = self.rI[0] < self.rSun[0]
#The line on this circle makes it look larger than reality,
#but it's almost too small to see without
planet = plt.Circle((scale*self.rI[2], scale*self.rI[1]),
scale*self.angdiam_I/2., color='blue',
zorder=2*infront)
ax.add_patch(sun)
ax.add_patch(planet)
#Add text
time = self.time
if timedelay:
time += timedelta(seconds=self.timedelay)
ax.text(0.03,0.02,(f"{self.innerplanet} from {self.outerplanet} \n" +
time.strftime('%Y-%m-%d %H:%M:%S')),
transform=ax.transAxes, ha='left', va='bottom', fontsize=fontsize)
ax.set_xlabel(fr"$l'$ ({unit.short_names[0]})", fontsize=fontsize)
ax.set_ylabel(fr"$b'$ ({unit.short_names[0]})", fontsize=fontsize)
#Scale axes
ax.set_xlim(-fov[0]*sunangrad/2, fov[0]*sunangrad/2)
ax.set_ylim(-fov[1]*sunangrad/2, fov[1]*sunangrad/2)
#Save plot or show
if filename is not None:
plt.savefig(filename)
if show:
plt.show()
def _limbdarkening(phi, u2=0.88, v2=-0.23):
"""limb darkening law
parameterization from Section 14.7 of Allen's Astrophysical Quantities
(4th ed, Cox, 2000, AIP Press)
default u2,v2 values are for ~V filter @ 600 nm
phi is angle between solar radius vector and line of sight (radians)
normalized so disk integrates to 1
"""
mu = np.cos(phi)
return (1 - u2 - v2 + u2*mu + v2*(mu**2))/(1-u2/3 - v2/2)
class Transit:
"""
Properties and plots of transits in time window.
Calculates:
- MJD (instantaneous and observed) of ingress,egress,midtranist
- Impact parameter (b)
Plots:
- animate (gif)
- traceplot (path)
TODO: lightcurve (simulated)
"""
def __init__(self, innerplanet, outerplanet, starttime, endtime, timestep):
"""
Parameters:
innerplanet (str): name of inner planet
outerplanet (str): name of outer planet
starttime (datetime): timestamp (UTC) before transit
endtime (datetime): timestamp (UTC) before transit
timestep (float): sampling interval (minutes; > 0)
Notes:
Impact parameter, b, is minimum within timestamp
"""
#Check that timestep is positive
if timestep <= 0:
raise Exception("Timestep must be positive.")
if timestep > 10:
warnings.warn("Timesteps longer than 10 minutes may produce poor results")
deltatime = timedelta(minutes=timestep)
self.innerplanet = innerplanet
self.outerplanet = outerplanet
#Compute timestamps
self.times = [starttime]
while self.times[-1] < endtime:
self.times.append(self.times[-1] + deltatime)
self.mjd = np.array([Time(time).mjd for time in self.times])
#Calculate geometry at each timestamp
self.geometry = [Geometry(self.innerplanet, self.outerplanet, time)
for time in self.times]
#Get observed times (corrected for light travel time)
self.mjdobs = self.mjd + np.array([g.timedelay for g in self.geometry])/(24*3600.)
#compute transit start, end, and mid-eclipse times
#in transit when transitsep <= 1
transitsep = [g.theta / ((g.angdiam_Sun+g.angdiam_I)/2.0) for g in self.geometry]
#separate below and after transit
deepest = np.argmin([g.theta / ((g.angdiam_Sun+g.angdiam_I)/2.) for g in self.geometry])
#we'll interpolate precise start and end times
if deepest != 0:
self.startingress_mjd = float(interp1d(transitsep[:deepest],self.mjd[:deepest],
bounds_error=False)(1))
self.startingress_mjdobs = float(interp1d(transitsep[:deepest],self.mjdobs[:deepest],
bounds_error=False)(1))
else:
self.startingress_mjd = np.nan
self.startingress_mjdobs = np.nan
if deepest != len(self.geometry)-1:
self.endegress_mjd = float(interp1d(transitsep[deepest:],self.mjd[deepest:],
bounds_error=False)(1))
self.endegress_mjdobs = float(interp1d(transitsep[deepest:],self.mjdobs[deepest:],
bounds_error=False)(1))
else:
self.endegress_mjd = np.nan
self.endegress_mjdobs = np.nan
self.midtransit_mjd = (self.startingress_mjd + self.endegress_mjd)/2.
self.midtransit_mjdobs = (self.startingress_mjdobs + self.endegress_mjdobs)/2.
self.transitdurationobs = (self.endegress_mjdobs - self.startingress_mjdobs)*24*u.h
#Compute geometry at mid-transit
self.midtransit_geometry = Geometry(self.innerplanet, self.outerplanet,
Time(self.midtransit_mjd,format='mjd').to_datetime())
#Simulate mid-transit (default limb darkening)
phi = np.arcsin(2*self.midtransit_geometry.theta/self.midtransit_geometry.angdiam_Sun)
self.midtransit_depth = ((self.midtransit_geometry.angdiam_I**2/
self.midtransit_geometry.angdiam_Sun**2)*
_limbdarkening(phi))*1e6 # ppm
#Compute impact parameter (good to timestep precision)
self.b = self.midtransit_geometry.theta / ((self.midtransit_geometry.angdiam_Sun)/2.)
def animate(self, filename="Transit.gif", duration=3, figsize=(4,4), dpi=150, **kwargs):
"""Animate the transit
Parameters:
filename (str): file to save animation to
duration (float): loop duration (seconds)
figsize (float,float): width, height in inches
dpi (float): dots per inch
**kwargs: for Geometry plot function
"""
fig,ax = plt.subplots(figsize=figsize)
#No initialization needed
def init():
self.geometry[0].plot(ax=ax, show=False, **kwargs)
plt.tight_layout()
return
#Animation function to call
def animateframe(i):
ax.clear() #Clear previous data
self.geometry[i].plot(ax=ax, show=False, **kwargs)
return
#Time between frames
interval = duration/len(self.times)
#Animate it and save!
anim = animation.FuncAnimation(fig, animateframe, init_func=init,
frames=len(self.times), interval=interval,
blit=False)
anim.save(filename, dpi=dpi, fps = 1/interval, writer='imagemagick')
def traceplot(self, ax=None, fov=(4,4), unit=u.arcsec, show=True,
filename=None, plotsun=True, fontsize=13, **kwargs):
"""Plot path of transit across Sun
Parameters:
ax (mpl axis): axis to plot to (default: create new fig,ax)
fov (tuple): (width,height) in solar radii
unit (astropy angle unit or "solarradii"): unit for axes
show (bool): whether to show plot (default: True)
filename (str): filename to save to (default: None)
sun (bool): plot Sun circle? (default: True)
fontsize (float): fontsize
**kwargs: args for figure if no axis provided
"""
#collect relevant details
angdiam_I = np.array([g.angdiam_I for g in self.geometry])
angdiam_Sun = np.array([g.angdiam_Sun for g in self.geometry])
b = np.array([g.rI[1] for g in self.geometry])
l = np.array([g.rI[2] for g in self.geometry])
rI = np.array([g.rI[0] for g in self.geometry])
rSun = np.array([g.rSun[0] for g in self.geometry])
#Are we plotting in solar radii? (useful for overlaying traces)
solarradii = unit == "solarradii"
if solarradii:
unit = u.radian
#Angular unit conversion (from radians)
scale = u.radian.to(unit)
#Get trajectory angle, phi, to plot shadow wide enough
phi = np.arctan(np.diff(b)/np.diff(l))
phi = np.concatenate((phi,[phi[-1]])) # match length
#Create fig and ax if no ax provided
if ax is None:
fig,ax = plt.subplots(**kwargs)
#Circles must be round
ax.set_aspect(1)
#Display sun, using angular size at mid-transit (unless solarradii display units)
midtransit = np.argmin([g.theta / ((g.angdiam_Sun)/2.) for g in self.geometry])
angdiam_Sun = angdiam_Sun[midtransit]
sunangrad = scale*angdiam_Sun/2.
if solarradii: #Handle case for solar radii units
sunangrad = 1
scale = 2./angdiam_Sun
if plotsun: #Only plot sun if requested
sun = plt.Circle((0, 0), sunangrad, color='y', zorder = 1)
ax.add_patch(sun)
#Is planet in front of Sun?
infront = rI[midtransit] < rSun[midtransit]
#Display transit path
linewidth = scale*angdiam_I / np.cos(phi) #Width of shadow path
ax.fill_between(scale*l,scale*b+linewidth/2.,scale*b-linewidth/2,lw=0, fc='0.2',zorder=2*infront)
ax.set_xlabel(fr"$l'$ ({unit.short_names[0]})", fontsize=fontsize)
ax.set_ylabel(fr"$b'$ ({unit.short_names[0]})", fontsize=fontsize)
if solarradii:
ax.set_xlabel("Solar radii", fontsize=fontsize)
ax.set_ylabel("Solar radii", fontsize=fontsize)
#Scale axes
ax.set_xlim(-fov[0]*sunangrad/2, fov[0]*sunangrad/2)
ax.set_ylim(-fov[1]*sunangrad/2, fov[1]*sunangrad/2)
#Save plot or show
if filename is not None:
plt.tight_layout()
plt.savefig(filename)
if show:
plt.tight_layout()
plt.show()
def simlightcurve(self,limbdarkeningfunc = _limbdarkening,
limbdarkening_args = {"u2":0.88, "v2":-0.23}):
"""
Simulate transit light curve with limb darkening
Assumes negligible limb darkening gradient across transiting planet disk
Returns relative model flux at self.mjd_obs
"""
theta = np.array([g.theta for g in self.geometry])
angdiam_Sun = np.array([g.angdiam_Sun for g in self.geometry])
angdiam_I = np.array([g.angdiam_I for g in self.geometry])
#Angle between radial vector and line of sight
phi = np.arcsin(2*theta/angdiam_Sun)
#compute relative flux
lc = 1 - (angdiam_I**2/angdiam_Sun**2)*_limbdarkening(phi,**limbdarkening_args)
lc[np.isnan(lc)] = 1
return lc
| 228
| 0
| 61
|
eabe8c69163f71157cd2ba1f49bb642b4bbb49cc
| 4,751
|
py
|
Python
|
pose_estimation/my_train.py
|
RoboBachelor/vehicle_pose_estimation
|
53f1ff4a70d69256ba10fd2ecee024cf19801b22
|
[
"MIT"
] | 1
|
2021-11-01T15:12:39.000Z
|
2021-11-01T15:12:39.000Z
|
pose_estimation/my_train.py
|
RoboBachelor/vehicle_pose_estimation
|
53f1ff4a70d69256ba10fd2ecee024cf19801b22
|
[
"MIT"
] | 1
|
2021-11-01T15:11:14.000Z
|
2021-11-06T07:02:13.000Z
|
pose_estimation/my_train.py
|
RoboBachelor/vehicle_pose_estimation
|
53f1ff4a70d69256ba10fd2ecee024cf19801b22
|
[
"MIT"
] | 1
|
2021-12-15T22:19:08.000Z
|
2021-12-15T22:19:08.000Z
|
import os.path as osp
import sys
import argparse
import time
import torch
from torchvision import transforms
this_dir = osp.dirname(__file__)
paths = []
paths.append(osp.join(this_dir, '..', 'lib'))
paths.append(osp.join(this_dir, '..', 'lib', 'dataset'))
for path in paths:
if path not in sys.path:
sys.path.insert(0, path)
import models
from core.loss import JointsMSELoss
from utils.utils import get_optimizer
from core.config import config
from core.config import update_config
from core.config import update_dir
from core.config import get_model_name
from core.evaluate import accuracy
from CarJointsDataset import CarJointsDataset
class AverageMeter(object):
"""Computes and stores the average and current value"""
args = parse_args()
model = eval('models.' + 'pose_resnet' + '.get_pose_net')(
config, is_train=False
)
print(model)
# define loss function (criterion) and optimizer
criterion = JointsMSELoss(
use_target_weight=config.LOSS.USE_TARGET_WEIGHT
)
optimizer = get_optimizer(config, model)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR
)
# Data loading code
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = CarJointsDataset(
config,
transforms.Compose([
transforms.ToTensor(),
normalize,
])
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.TRAIN.BATCH_SIZE,
shuffle=config.TRAIN.SHUFFLE,
num_workers=config.WORKERS
)
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
end = time.time()
for epoch in range(config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH):
lr_scheduler.step()
for i, (input, target) in enumerate(train_loader):
# compute output
output = model(input)
loss = criterion(output, target, 0)
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
_, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(),
target.detach().cpu().numpy())
acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % config.PRINT_FREQ == 0:
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0)/batch_time.val,
data_time=data_time, loss=losses, acc=acc)
print(msg)
print("End of current epoch")
print("End of training!")
| 26.841808
| 75
| 0.595875
|
import os.path as osp
import sys
import argparse
import time
import torch
from torchvision import transforms
this_dir = osp.dirname(__file__)
paths = []
paths.append(osp.join(this_dir, '..', 'lib'))
paths.append(osp.join(this_dir, '..', 'lib', 'dataset'))
for path in paths:
if path not in sys.path:
sys.path.insert(0, path)
import models
from core.loss import JointsMSELoss
from utils.utils import get_optimizer
from core.config import config
from core.config import update_config
from core.config import update_dir
from core.config import get_model_name
from core.evaluate import accuracy
from CarJointsDataset import CarJointsDataset
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count if self.count != 0 else 0
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
args, rest = parser.parse_known_args()
# update config
update_config(args.cfg)
# training
parser.add_argument('--frequent',
help='frequency of logging',
default=config.PRINT_FREQ,
type=int)
parser.add_argument('--gpus',
help='gpus',
type=str)
parser.add_argument('--workers',
help='num of dataloader workers',
type=int)
args = parser.parse_args()
return args
args = parse_args()
model = eval('models.' + 'pose_resnet' + '.get_pose_net')(
config, is_train=False
)
print(model)
# define loss function (criterion) and optimizer
criterion = JointsMSELoss(
use_target_weight=config.LOSS.USE_TARGET_WEIGHT
)
optimizer = get_optimizer(config, model)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR
)
# Data loading code
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = CarJointsDataset(
config,
transforms.Compose([
transforms.ToTensor(),
normalize,
])
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.TRAIN.BATCH_SIZE,
shuffle=config.TRAIN.SHUFFLE,
num_workers=config.WORKERS
)
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
end = time.time()
for epoch in range(config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH):
lr_scheduler.step()
for i, (input, target) in enumerate(train_loader):
# compute output
output = model(input)
loss = criterion(output, target, 0)
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
_, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(),
target.detach().cpu().numpy())
acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % config.PRINT_FREQ == 0:
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0)/batch_time.val,
data_time=data_time, loss=losses, acc=acc)
print(msg)
print("End of current epoch")
print("End of training!")
def valid_speed_test():
# switch to val mode
model.eval()
sum_time = 0
with torch.no_grad():
for i in range(10):
start = time.time()
# compute output
output = model(input[0:1])
sum_time += time.time() - start
print("End of valid! {} seconds for 10 images!".format(sum_time))
model.train()
| 1,422
| 0
| 126
|
ac8a36d1877984ef81576c267c469097f46c2c4e
| 1,898
|
py
|
Python
|
setup.py
|
qctrl/python-cirq
|
72f2e412bcef2d92a53dee71ad50dca4153bb192
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
qctrl/python-cirq
|
72f2e412bcef2d92a53dee71ad50dca4153bb192
|
[
"Apache-2.0"
] | 6
|
2020-01-16T03:10:34.000Z
|
2022-03-13T22:57:46.000Z
|
setup.py
|
qctrl/python-cirq
|
72f2e412bcef2d92a53dee71ad50dca4153bb192
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE!
# This file has been autogenerated by dephell <3
# https://github.com/dephell/dephell
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os.path
readme = ''
here = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(here, 'README.rst')
if os.path.exists(readme_path):
with open(readme_path, 'rb') as stream:
readme = stream.read().decode('utf8')
setup(
long_description=readme,
name='qctrl-cirq',
version='0.0.4',
description='Q-CTRL Python Cirq',
python_requires='<3.9,>=3.6.4',
project_urls={"documentation": "", "homepage": "https://q-ctrl.com", "repository": "https://github.com/qctrl/python-cirq"},
author='Q-CTRL',
author_email='support@q-ctrl.com',
license='Apache-2.0',
keywords='q-ctrl qctrl quantum control',
classifiers=['Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Scientific/Engineering :: Physics', 'Topic :: Scientific/Engineering :: Visualization', 'Topic :: Software Development :: Embedded Systems', 'Topic :: System :: Distributed Computing'],
packages=['qctrlcirq'],
package_dir={"": "."},
package_data={},
install_requires=['cirq==0.*,>=0.6.0', 'numpy==1.*,>=1.16.0', 'qctrl-open-controls==4.*,>=4.3.0', 'scipy==1.*,>=1.3.0', 'toml==0.*,>=0.10.0'],
extras_require={"dev": ["nbval==0.*,>=0.9.5", "pylama", "pylint", "pylint-runner", "pytest", "qctrl-visualizer==2.*,>=2.1.0", "sphinx==2.*,>=2.2.0"]},
)
| 45.190476
| 620
| 0.652266
|
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE!
# This file has been autogenerated by dephell <3
# https://github.com/dephell/dephell
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os.path
readme = ''
here = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(here, 'README.rst')
if os.path.exists(readme_path):
with open(readme_path, 'rb') as stream:
readme = stream.read().decode('utf8')
setup(
long_description=readme,
name='qctrl-cirq',
version='0.0.4',
description='Q-CTRL Python Cirq',
python_requires='<3.9,>=3.6.4',
project_urls={"documentation": "", "homepage": "https://q-ctrl.com", "repository": "https://github.com/qctrl/python-cirq"},
author='Q-CTRL',
author_email='support@q-ctrl.com',
license='Apache-2.0',
keywords='q-ctrl qctrl quantum control',
classifiers=['Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Scientific/Engineering :: Physics', 'Topic :: Scientific/Engineering :: Visualization', 'Topic :: Software Development :: Embedded Systems', 'Topic :: System :: Distributed Computing'],
packages=['qctrlcirq'],
package_dir={"": "."},
package_data={},
install_requires=['cirq==0.*,>=0.6.0', 'numpy==1.*,>=1.16.0', 'qctrl-open-controls==4.*,>=4.3.0', 'scipy==1.*,>=1.3.0', 'toml==0.*,>=0.10.0'],
extras_require={"dev": ["nbval==0.*,>=0.9.5", "pylama", "pylint", "pylint-runner", "pytest", "qctrl-visualizer==2.*,>=2.1.0", "sphinx==2.*,>=2.2.0"]},
)
| 0
| 0
| 0
|
c8628f4105fa9dc4a1e175f4c7f8e662791f1969
| 4,341
|
py
|
Python
|
tests/test_elasticsearch_driver_metadata_as_nested.py
|
James-QiuHaoran/scalable-image-matching
|
a96fe5132d2c39f32680a97e216b438c87280a24
|
[
"MIT"
] | 22
|
2018-11-24T12:30:47.000Z
|
2021-09-08T12:22:18.000Z
|
tests/test_elasticsearch_driver_metadata_as_nested.py
|
James-QiuHaoran/scalable-image-matching
|
a96fe5132d2c39f32680a97e216b438c87280a24
|
[
"MIT"
] | null | null | null |
tests/test_elasticsearch_driver_metadata_as_nested.py
|
James-QiuHaoran/scalable-image-matching
|
a96fe5132d2c39f32680a97e216b438c87280a24
|
[
"MIT"
] | 6
|
2019-01-18T12:18:36.000Z
|
2021-05-07T08:47:34.000Z
|
import pytest
import urllib.request
import os
import hashlib
from elasticsearch import Elasticsearch, ConnectionError, RequestError, NotFoundError
from time import sleep
from image_match.elasticsearch_driver import SignatureES
from PIL import Image
test_img_url1 = 'https://camo.githubusercontent.com/810bdde0a88bc3f8ce70c5d85d8537c37f707abe/68747470733a2f2f75706c6f61642e77696b696d656469612e6f72672f77696b6970656469612f636f6d6d6f6e732f7468756d622f652f65632f4d6f6e615f4c6973612c5f62795f4c656f6e6172646f5f64615f56696e63692c5f66726f6d5f4332524d465f7265746f75636865642e6a70672f36383770782d4d6f6e615f4c6973612c5f62795f4c656f6e6172646f5f64615f56696e63692c5f66726f6d5f4332524d465f7265746f75636865642e6a7067'
test_img_url2 = 'https://camo.githubusercontent.com/826e23bc3eca041110a5af467671b012606aa406/68747470733a2f2f63322e737461746963666c69636b722e636f6d2f382f373135382f363831343434343939315f303864383264653537655f7a2e6a7067'
urllib.request.urlretrieve(test_img_url1, 'test1.jpg')
urllib.request.urlretrieve(test_img_url2, 'test2.jpg')
INDEX_NAME = 'test_environment_{}'.format(hashlib.md5(os.urandom(128)).hexdigest()[:12])
DOC_TYPE = 'image'
MAPPINGS = {
"mappings": {
DOC_TYPE: {
"dynamic": True,
"properties": {
"metadata": {
"type": "nested",
"dynamic": True,
"properties": {
"tenant_id": { "type": "keyword" },
"project_id": { "type": "keyword" }
}
}
}
}
}
}
@pytest.fixture(scope='module', autouse=True)
@pytest.fixture(scope='function', autouse=True)
@pytest.fixture(scope='function', autouse=True)
@pytest.fixture
@pytest.fixture
| 32.395522
| 452
| 0.652384
|
import pytest
import urllib.request
import os
import hashlib
from elasticsearch import Elasticsearch, ConnectionError, RequestError, NotFoundError
from time import sleep
from image_match.elasticsearch_driver import SignatureES
from PIL import Image
test_img_url1 = 'https://camo.githubusercontent.com/810bdde0a88bc3f8ce70c5d85d8537c37f707abe/68747470733a2f2f75706c6f61642e77696b696d656469612e6f72672f77696b6970656469612f636f6d6d6f6e732f7468756d622f652f65632f4d6f6e615f4c6973612c5f62795f4c656f6e6172646f5f64615f56696e63692c5f66726f6d5f4332524d465f7265746f75636865642e6a70672f36383770782d4d6f6e615f4c6973612c5f62795f4c656f6e6172646f5f64615f56696e63692c5f66726f6d5f4332524d465f7265746f75636865642e6a7067'
test_img_url2 = 'https://camo.githubusercontent.com/826e23bc3eca041110a5af467671b012606aa406/68747470733a2f2f63322e737461746963666c69636b722e636f6d2f382f373135382f363831343434343939315f303864383264653537655f7a2e6a7067'
urllib.request.urlretrieve(test_img_url1, 'test1.jpg')
urllib.request.urlretrieve(test_img_url2, 'test2.jpg')
INDEX_NAME = 'test_environment_{}'.format(hashlib.md5(os.urandom(128)).hexdigest()[:12])
DOC_TYPE = 'image'
MAPPINGS = {
"mappings": {
DOC_TYPE: {
"dynamic": True,
"properties": {
"metadata": {
"type": "nested",
"dynamic": True,
"properties": {
"tenant_id": { "type": "keyword" },
"project_id": { "type": "keyword" }
}
}
}
}
}
}
@pytest.fixture(scope='module', autouse=True)
def index_name():
return INDEX_NAME
@pytest.fixture(scope='function', autouse=True)
def setup_index(request, index_name):
es = Elasticsearch()
try:
es.indices.create(index=index_name, body=MAPPINGS)
except RequestError as e:
if e.error == u'index_already_exists_exception':
es.indices.delete(index_name)
else:
raise
def fin():
try:
es.indices.delete(index_name)
except NotFoundError:
pass
request.addfinalizer(fin)
@pytest.fixture(scope='function', autouse=True)
def cleanup_index(request, es, index_name):
def fin():
try:
es.indices.delete(index_name)
except NotFoundError:
pass
request.addfinalizer(fin)
@pytest.fixture
def es():
return Elasticsearch()
@pytest.fixture
def ses(es, index_name):
return SignatureES(es=es, index=index_name, doc_type=DOC_TYPE)
def test_elasticsearch_running(es):
i = 0
while i < 5:
try:
es.ping()
assert True
return
except ConnectionError:
i += 1
sleep(2)
pytest.fail('Elasticsearch not running (failed to connect after {} tries)'
.format(str(i)))
def test_lookup_with_filter_by_metadata(ses):
ses.add_image('test1.jpg', metadata=_metadata('foo', 'project-x'), refresh_after=True)
ses.add_image('test2.jpg', metadata=_metadata('foo', 'project-x'), refresh_after=True)
ses.add_image('test3.jpg', img='test1.jpg', metadata=_metadata('foo', 'project-y'), refresh_after=True)
ses.add_image('test2.jpg', metadata=_metadata('bar', 'project-x'), refresh_after=True)
r = ses.search_image('test1.jpg', pre_filter=_nested_filter('foo', 'project-x'))
assert len(r) == 2
r = ses.search_image('test1.jpg', pre_filter=_nested_filter('foo', 'project-z'))
assert len(r) == 0
r = ses.search_image('test1.jpg', pre_filter=_nested_filter('bar', 'project-x'))
assert len(r) == 1
r = ses.search_image('test1.jpg', pre_filter=_nested_filter('bar-2', 'project-x'))
assert len(r) == 0
r = ses.search_image('test1.jpg', pre_filter=_nested_filter('bar', 'project-z'))
assert len(r) == 0
def _metadata(tenant_id, project_id):
return dict(
tenant_id=tenant_id,
project_id=project_id
)
def _nested_filter(tenant_id, project_id):
return {
"nested" : {
"path" : "metadata",
"query" : {
"bool" : {
"must" : [
{"term": {"metadata.tenant_id": tenant_id}},
{"term": {"metadata.project_id": project_id}}
]
}
}
}
}
| 2,453
| 0
| 214
|
4cbf11419437fed2cf010cf45d94dd5dea8c505a
| 41
|
py
|
Python
|
tru/global_vars.py
|
TroyRetter/TRU
|
c9989d09021b0e1483487c99c5a2e73b0310aae6
|
[
"MIT"
] | null | null | null |
tru/global_vars.py
|
TroyRetter/TRU
|
c9989d09021b0e1483487c99c5a2e73b0310aae6
|
[
"MIT"
] | null | null | null |
tru/global_vars.py
|
TroyRetter/TRU
|
c9989d09021b0e1483487c99c5a2e73b0310aae6
|
[
"MIT"
] | null | null | null |
BASE_DIRECTORY = None
CACHE = "/tmp/tru"
| 13.666667
| 21
| 0.707317
|
BASE_DIRECTORY = None
CACHE = "/tmp/tru"
| 0
| 0
| 0
|
a2008ac42ab43f59dd987f910382bf756d67c855
| 1,670
|
py
|
Python
|
src/BandC/ParserUtil.py
|
pywash/pywash
|
f105752f67ad5c4648117a2bebd875f8c88caeb2
|
[
"MIT"
] | 7
|
2019-02-26T10:45:18.000Z
|
2019-08-13T18:08:58.000Z
|
src/BandC/ParserUtil.py
|
pywash/pywash
|
f105752f67ad5c4648117a2bebd875f8c88caeb2
|
[
"MIT"
] | null | null | null |
src/BandC/ParserUtil.py
|
pywash/pywash
|
f105752f67ad5c4648117a2bebd875f8c88caeb2
|
[
"MIT"
] | 1
|
2021-06-11T14:56:52.000Z
|
2021-06-11T14:56:52.000Z
|
from src.BandC import *
from src.Exceptions import FileFormatNotFound
__parsers__ = {'.csv': CSV, '.arff': Arff}
__url_parsers__ = {'.csv': URLCSV, '.arff': URLARFF}
def assign_parser(file_path: str, contents: str = None, verbose: bool = False) -> callable:
""" Allocate a specific parser to a file_path
:param file_path: The file path of the dataset to parse
:param contents: The dataset as a string
:param verbose: True for output
:return: A parser object which is able to parse the dataset
"""
# Check file path to see if we need a local or an url parser
parsers = __parsers__
if file_path.startswith('https:'):
print('THIS IS IT')
parsers = __url_parsers__
# Find the correct parser for the file
for parser in parsers:
# Check if we have implemented a parser for this file
if file_path.endswith(parser):
# Check if the dataset has been given as a string
if contents is None:
return parsers[parser](file_path=file_path,
verbose=verbose)
else:
return parsers[parser](file_path=file_path,
contents=contents,
verbose=verbose)
# When the file format is not in our list of parable formats
raise FileFormatNotFound("File format of file: " + file_path + " is unknown")
if __name__ == "__main__":
p = assign_parser("C:/AAA_School/Assignments/BEP/Datasets/Test.csv", verbose=True)
print()
print(p)
print(p.get_dialect)
print()
print(p.parse_file().head(5))
print('Done')
| 35.531915
| 91
| 0.619162
|
from src.BandC import *
from src.Exceptions import FileFormatNotFound
__parsers__ = {'.csv': CSV, '.arff': Arff}
__url_parsers__ = {'.csv': URLCSV, '.arff': URLARFF}
def assign_parser(file_path: str, contents: str = None, verbose: bool = False) -> callable:
""" Allocate a specific parser to a file_path
:param file_path: The file path of the dataset to parse
:param contents: The dataset as a string
:param verbose: True for output
:return: A parser object which is able to parse the dataset
"""
# Check file path to see if we need a local or an url parser
parsers = __parsers__
if file_path.startswith('https:'):
print('THIS IS IT')
parsers = __url_parsers__
# Find the correct parser for the file
for parser in parsers:
# Check if we have implemented a parser for this file
if file_path.endswith(parser):
# Check if the dataset has been given as a string
if contents is None:
return parsers[parser](file_path=file_path,
verbose=verbose)
else:
return parsers[parser](file_path=file_path,
contents=contents,
verbose=verbose)
# When the file format is not in our list of parable formats
raise FileFormatNotFound("File format of file: " + file_path + " is unknown")
if __name__ == "__main__":
p = assign_parser("C:/AAA_School/Assignments/BEP/Datasets/Test.csv", verbose=True)
print()
print(p)
print(p.get_dialect)
print()
print(p.parse_file().head(5))
print('Done')
| 0
| 0
| 0
|
1c749ee797e17400b3ddbf7ac439a88ffca0a93e
| 2,955
|
py
|
Python
|
tests/test_xsd_union.py
|
imanashoorii/zibal-zeep
|
9ff7b229b0759597823da41d1dbf48c6e7b5b383
|
[
"MIT"
] | null | null | null |
tests/test_xsd_union.py
|
imanashoorii/zibal-zeep
|
9ff7b229b0759597823da41d1dbf48c6e7b5b383
|
[
"MIT"
] | null | null | null |
tests/test_xsd_union.py
|
imanashoorii/zibal-zeep
|
9ff7b229b0759597823da41d1dbf48c6e7b5b383
|
[
"MIT"
] | null | null | null |
from tests.utils import assert_nodes_equal, load_xml, render_node
from zibalzeep import xsd
| 30.78125
| 92
| 0.551946
|
from tests.utils import assert_nodes_equal, load_xml, render_node
from zibalzeep import xsd
def test_union_same_types():
schema = xsd.Schema(
load_xml(
"""
<?xml version="1.0"?>
<xsd:schema
xmlns="http://tests.python-zeep.org/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://tests.python-zeep.org/"
targetNamespace="http://tests.python-zeep.org/"
elementFormDefault="qualified">
<xsd:simpleType name="MMYY">
<xsd:restriction base="xsd:int"/>
</xsd:simpleType>
<xsd:simpleType name="MMYYYY">
<xsd:restriction base="xsd:int"/>
</xsd:simpleType>
<xsd:simpleType name="Date">
<xsd:union memberTypes="tns:MMYY MMYYYY"/>
</xsd:simpleType>
<xsd:element name="item" type="tns:Date"/>
</xsd:schema>
"""
)
)
elm = schema.get_element("ns0:item")
node = render_node(elm, "102018")
expected = """
<document>
<ns0:item xmlns:ns0="http://tests.python-zeep.org/">102018</ns0:item>
</document>
"""
assert_nodes_equal(expected, node)
value = elm.parse(list(node)[0], schema)
assert value == 102018
def test_union_mixed():
schema = xsd.Schema(
load_xml(
"""
<?xml version="1.0"?>
<xsd:schema
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://tests.python-zeep.org/"
targetNamespace="http://tests.python-zeep.org/"
elementFormDefault="qualified">
<xsd:element name="item" type="tns:Date"/>
<xsd:simpleType name="Date">
<xsd:union memberTypes="xsd:date xsd:gYear xsd:gYearMonth tns:MMYY tns:MMYYYY"/>
</xsd:simpleType>
<xsd:simpleType name="MMYY">
<xsd:restriction base="xsd:string">
<xsd:pattern value="(0[123456789]|1[012]){1}\d{2}"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="MMYYYY">
<xsd:restriction base="xsd:string">
<xsd:pattern value="(0[123456789]|1[012]){1}\d{4}"/>
</xsd:restriction>
</xsd:simpleType>
</xsd:schema>
"""
)
)
elm = schema.get_element("ns0:item")
node = render_node(elm, "102018")
expected = """
<document>
<ns0:item xmlns:ns0="http://tests.python-zeep.org/">102018</ns0:item>
</document>
"""
assert_nodes_equal(expected, node)
value = elm.parse(list(node)[0], schema)
assert value == "102018"
node = render_node(elm, "2018")
expected = """
<document>
<ns0:item xmlns:ns0="http://tests.python-zeep.org/">2018</ns0:item>
</document>
"""
assert_nodes_equal(expected, node)
value = elm.parse(list(node)[0], schema)
assert value == "2018"
| 2,815
| 0
| 46
|
14b8846f714d76e5b90a4f207ecf7721fe8b3e01
| 3,334
|
py
|
Python
|
docs/mail-merge/docs_mail_merge_test.py
|
kvij/python-samples
|
1f3341c37bacf5239210ea0cbe957a373c095a0a
|
[
"Apache-2.0"
] | 1
|
2021-06-12T22:39:57.000Z
|
2021-06-12T22:39:57.000Z
|
docs/mail-merge/docs_mail_merge_test.py
|
kvij/python-samples
|
1f3341c37bacf5239210ea0cbe957a373c095a0a
|
[
"Apache-2.0"
] | null | null | null |
docs/mail-merge/docs_mail_merge_test.py
|
kvij/python-samples
|
1f3341c37bacf5239210ea0cbe957a373c095a0a
|
[
"Apache-2.0"
] | 8
|
2021-11-25T04:26:15.000Z
|
2021-11-30T17:06:49.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright ©2018-2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
docs_mail_merge_test.py -- unit test for docs_mail_merge.py:
1. test credentials file availability
2. test whether project can connect to all 3 APIs
3. test creation (and deletion) of Google Docs file
4. test copying (and deletion) of Google Docs file
5. test getting plain text data
6. test getting data from Google Sheets spreadsheet
"""
import os
import unittest
from googleapiclient import discovery
from docs_mail_merge import (CLIENT_ID_FILE, get_data, get_http_client,
_copy_template)
class TestDocsMailMerge(unittest.TestCase):
'Unit tests for Mail Merge sample'
def project_test():
'Tests whether project credentials file was downloaded from project.'
if os.path.exists(CLIENT_ID_FILE):
return True
raise IOError('''\
ERROR: Must create a Google APIs project, enable both
the Drive and Docs REST APIs, create and download OAuth2
client credentials as %r before unit test can run.''' % CLIENT_ID_FILE)
def gapis_test():
'Tests whether project can connect to all 3 APIs used in the sample.'
HTTP = get_http_client()
discovery.build('drive', 'v3', http=HTTP)
discovery.build('docs', 'v1', http=HTTP)
discovery.build('sheets', 'v4', http=HTTP)
return True
def create_doc_test():
'Tests whether project can create and delete a Google Docs file.'
DRIVE = discovery.build('drive', 'v3', http=get_http_client())
DATA = {
'name': 'Test Doc',
'mimeType': 'application/vnd.google-apps.document',
}
doc_id = DRIVE.files().create(body=DATA, fields='id').execute().get('id')
DRIVE.files().delete(fileId=doc_id, fields='').execute()
return True
def copy_doc_test():
'Tests whether project can copy and delete a Google Docs file.'
DRIVE = discovery.build('drive', 'v3', http=get_http_client())
DOCS_FILE_ID = '1Xycxuuv7OhEQUuzbt_Mw0TPMq02MseSD1vZdBJ3nLjk'
doc_id = _copy_template(DOCS_FILE_ID, 'text', DRIVE)
DRIVE.files().delete(fileId=doc_id, fields='').execute()
return True
def get_text_data_test():
'Tests reading plain text data.'
return get_data('text')
def get_sheets_data_test():
'Tests reading Google Sheets data.'
return get_data('sheets')
if __name__ == '__main__':
unittest.main()
| 35.849462
| 79
| 0.709058
|
# -*- coding: utf-8 -*-
#
# Copyright ©2018-2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
docs_mail_merge_test.py -- unit test for docs_mail_merge.py:
1. test credentials file availability
2. test whether project can connect to all 3 APIs
3. test creation (and deletion) of Google Docs file
4. test copying (and deletion) of Google Docs file
5. test getting plain text data
6. test getting data from Google Sheets spreadsheet
"""
import os
import unittest
from googleapiclient import discovery
from docs_mail_merge import (CLIENT_ID_FILE, get_data, get_http_client,
_copy_template)
class TestDocsMailMerge(unittest.TestCase):
'Unit tests for Mail Merge sample'
def test_project(self):
self.assertTrue(project_test())
def test_gapis(self):
self.assertTrue(gapis_test())
def test_create_doc(self):
self.assertTrue(create_doc_test())
def test_copy_doc(self):
self.assertTrue(copy_doc_test())
def test_get_text_data(self):
self.assertTrue(bool(get_text_data_test()))
def test_get_sheets_data(self):
self.assertTrue(bool(get_sheets_data_test()))
def project_test():
'Tests whether project credentials file was downloaded from project.'
if os.path.exists(CLIENT_ID_FILE):
return True
raise IOError('''\
ERROR: Must create a Google APIs project, enable both
the Drive and Docs REST APIs, create and download OAuth2
client credentials as %r before unit test can run.''' % CLIENT_ID_FILE)
def gapis_test():
'Tests whether project can connect to all 3 APIs used in the sample.'
HTTP = get_http_client()
discovery.build('drive', 'v3', http=HTTP)
discovery.build('docs', 'v1', http=HTTP)
discovery.build('sheets', 'v4', http=HTTP)
return True
def create_doc_test():
'Tests whether project can create and delete a Google Docs file.'
DRIVE = discovery.build('drive', 'v3', http=get_http_client())
DATA = {
'name': 'Test Doc',
'mimeType': 'application/vnd.google-apps.document',
}
doc_id = DRIVE.files().create(body=DATA, fields='id').execute().get('id')
DRIVE.files().delete(fileId=doc_id, fields='').execute()
return True
def copy_doc_test():
'Tests whether project can copy and delete a Google Docs file.'
DRIVE = discovery.build('drive', 'v3', http=get_http_client())
DOCS_FILE_ID = '1Xycxuuv7OhEQUuzbt_Mw0TPMq02MseSD1vZdBJ3nLjk'
doc_id = _copy_template(DOCS_FILE_ID, 'text', DRIVE)
DRIVE.files().delete(fileId=doc_id, fields='').execute()
return True
def get_text_data_test():
'Tests reading plain text data.'
return get_data('text')
def get_sheets_data_test():
'Tests reading Google Sheets data.'
return get_data('sheets')
if __name__ == '__main__':
unittest.main()
| 296
| 0
| 156
|
2644da3c7c17205a4c4c3e561f05108d119f037f
| 2,323
|
py
|
Python
|
datashader/tests/test_raster.py
|
nside/datashader
|
7d1af5a63c51430731e728a627d5e2304c56da8a
|
[
"BSD-3-Clause"
] | null | null | null |
datashader/tests/test_raster.py
|
nside/datashader
|
7d1af5a63c51430731e728a627d5e2304c56da8a
|
[
"BSD-3-Clause"
] | 1
|
2021-07-30T18:24:08.000Z
|
2021-07-30T18:24:08.000Z
|
datashader/tests/test_raster.py
|
nside/datashader
|
7d1af5a63c51430731e728a627d5e2304c56da8a
|
[
"BSD-3-Clause"
] | null | null | null |
from os import path
import pytest
import datashader as ds
import rasterio as rio
from pytest import set_trace
BASE_PATH = path.split(__file__)[0]
DATA_PATH = path.abspath(path.join(BASE_PATH, 'data'))
TEST_RASTER_PATH = path.join(DATA_PATH, 'world.rgb.tif')
with rio.open(TEST_RASTER_PATH) as src:
x_range = (src.bounds.left, src.bounds.right)
y_range = (src.bounds.bottom, src.bounds.top)
cvs = ds.Canvas(plot_width=2,
plot_height=2,
x_range=x_range,
y_range=y_range)
| 33.666667
| 95
| 0.641412
|
from os import path
import pytest
import datashader as ds
import rasterio as rio
from pytest import set_trace
BASE_PATH = path.split(__file__)[0]
DATA_PATH = path.abspath(path.join(BASE_PATH, 'data'))
TEST_RASTER_PATH = path.join(DATA_PATH, 'world.rgb.tif')
with rio.open(TEST_RASTER_PATH) as src:
x_range = (src.bounds.left, src.bounds.right)
y_range = (src.bounds.bottom, src.bounds.top)
cvs = ds.Canvas(plot_width=2,
plot_height=2,
x_range=x_range,
y_range=y_range)
def test_raster_aggregate_default():
with rio.open(TEST_RASTER_PATH) as src:
agg = cvs.raster(src)
assert agg is not None
def test_raster_aggregate_nearest():
with rio.open(TEST_RASTER_PATH) as src:
agg = cvs.raster(src, resample_method='nearest')
assert agg is not None
def test_raster_aggregate_with_overviews():
with rio.open(TEST_RASTER_PATH) as src:
agg = cvs.raster(src, use_overviews=True)
assert agg is not None
def test_raster_aggregate_without_overviews():
with rio.open(TEST_RASTER_PATH) as src:
agg = cvs.raster(src, use_overviews=False)
assert agg is not None
def test_raster_set_missing():
with rio.open(TEST_RASTER_PATH) as src:
agg = cvs.raster(src, missing=-100)
assert agg is not None
def test_out_of_bounds_return_correct_size():
with rio.open(TEST_RASTER_PATH) as src:
cvs = ds.Canvas(plot_width=2,
plot_height=2,
x_range=[1e10, 1e20],
y_range=[1e10, 1e20])
agg = cvs.raster(src, missing=-100)
assert agg.shape == (2,2)
assert agg is not None
def test_partial_extent_returns_correct_size():
with rio.open(TEST_RASTER_PATH) as src:
half_width = (src.bounds.right - src.bounds.left) / 2
half_height = (src.bounds.top - src.bounds.bottom) / 2
cvs = ds.Canvas(plot_width=512,
plot_height=256,
x_range=[src.bounds.left-half_width, src.bounds.left+half_width],
y_range=[src.bounds.bottom-half_height, src.bounds.bottom+half_height])
agg = cvs.raster(src, missing=-100)
assert agg.shape == (256, 512)
assert agg is not None
| 1,615
| 0
| 161
|
f2dedbfad2292cca8b53e6df8edb30d71cc8f25c
| 298
|
py
|
Python
|
pybpodgui_plugin_session_history/settings.py
|
pybpod/pybpod-gui-plugin-session-history
|
6767c5a6590001a8f0420cfdb5327f924b98dc5a
|
[
"MIT"
] | null | null | null |
pybpodgui_plugin_session_history/settings.py
|
pybpod/pybpod-gui-plugin-session-history
|
6767c5a6590001a8f0420cfdb5327f924b98dc5a
|
[
"MIT"
] | null | null | null |
pybpodgui_plugin_session_history/settings.py
|
pybpod/pybpod-gui-plugin-session-history
|
6767c5a6590001a8f0420cfdb5327f924b98dc5a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
SETTINGS_PRIORITY = 80
# THESE SETTINGS ARE NEEDED FOR PYSETTINGS
SESSIONLOG_PLUGIN_ICON = os.path.join(os.path.dirname(__file__), 'resources', 'history.png')
SESSIONLOG_PLUGIN_WINDOW_SIZE = 700, 600
SESSIONLOG_PLUGIN_REFRESH_RATE = 1000
| 21.285714
| 92
| 0.758389
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
SETTINGS_PRIORITY = 80
# THESE SETTINGS ARE NEEDED FOR PYSETTINGS
SESSIONLOG_PLUGIN_ICON = os.path.join(os.path.dirname(__file__), 'resources', 'history.png')
SESSIONLOG_PLUGIN_WINDOW_SIZE = 700, 600
SESSIONLOG_PLUGIN_REFRESH_RATE = 1000
| 0
| 0
| 0
|
ffe35272f8ce7acb0b3fa5d432b36ab020bfbd5f
| 1,795
|
py
|
Python
|
utils/utils.py
|
likesum/bpn
|
bf4cc2b78c461f99cdc7ac91453b1c4cd3aad9b6
|
[
"MIT"
] | 17
|
2021-04-14T13:57:59.000Z
|
2022-02-09T04:28:13.000Z
|
utils/utils.py
|
likesum/bpn
|
bf4cc2b78c461f99cdc7ac91453b1c4cd3aad9b6
|
[
"MIT"
] | 4
|
2021-04-29T03:41:59.000Z
|
2022-01-06T02:33:39.000Z
|
utils/utils.py
|
likesum/bpn
|
bf4cc2b78c461f99cdc7ac91453b1c4cd3aad9b6
|
[
"MIT"
] | 6
|
2021-05-19T08:04:19.000Z
|
2021-12-28T12:32:40.000Z
|
"""Various utility functions."""
import sys
import time
import signal
import numpy as np
def getstop():
"""Returns stop so that stop[0] is True if ctrl+c was hit."""
stop = [False]
_orig = [None]
_orig[0] = signal.signal(signal.SIGINT, handler)
return stop
def saveopt(fname, opt):
"""Save optimizer state to file"""
weights = opt.get_weights()
npz = {('%d' % i): weights[i] for i in range(len(weights))}
np.savez(fname, **npz)
def savemodel(fname, model):
"""Save model weights to file"""
weights = model.get_weights()
npz = {('%d' % i): weights[i] for i in range(len(weights))}
np.savez(fname, **npz)
def loadmodel(fname, model):
"""Restore model weights from file."""
npz = np.load(fname)
weights = [npz['%d' % i] for i in range(len(npz.files))]
model.set_weights(weights)
def loadopt(fname, opt, model):
"""Restore optimizer state from file."""
npz = np.load(fname)
weights = [npz['%d' % i] for i in range(len(npz.files))]
opt._create_all_weights(model.trainable_variables)
opt.set_weights(weights)
| 27.19697
| 75
| 0.579387
|
"""Various utility functions."""
import sys
import time
import signal
import numpy as np
class LogWriter:
def __init__(self, filename):
self._log = open(filename, 'a')
def log(self, data, numit=None):
"""Log output in standard format."""
if numit is None:
lstr = data
else:
dstr = [(k + (' = %.3e' % data[k])) for k in data.keys()]
lstr = '[%09d] ' % numit + ' '.join(dstr)
sys.stdout.write(time.strftime("%Y-%m-%d %H:%M:%S ") + lstr + "\n")
sys.stdout.flush()
self._log.write(time.strftime("%Y-%m-%d %H:%M:%S ") + lstr + "\n")
self._log.flush()
def getstop():
"""Returns stop so that stop[0] is True if ctrl+c was hit."""
stop = [False]
_orig = [None]
def handler(_a, _b):
del _a
del _b
stop[0] = True
signal.signal(signal.SIGINT, _orig[0])
_orig[0] = signal.signal(signal.SIGINT, handler)
return stop
def saveopt(fname, opt):
"""Save optimizer state to file"""
weights = opt.get_weights()
npz = {('%d' % i): weights[i] for i in range(len(weights))}
np.savez(fname, **npz)
def savemodel(fname, model):
"""Save model weights to file"""
weights = model.get_weights()
npz = {('%d' % i): weights[i] for i in range(len(weights))}
np.savez(fname, **npz)
def loadmodel(fname, model):
"""Restore model weights from file."""
npz = np.load(fname)
weights = [npz['%d' % i] for i in range(len(npz.files))]
model.set_weights(weights)
def loadopt(fname, opt, model):
"""Restore optimizer state from file."""
npz = np.load(fname)
weights = [npz['%d' % i] for i in range(len(npz.files))]
opt._create_all_weights(model.trainable_variables)
opt.set_weights(weights)
| 147
| 496
| 50
|
153f0948bdb59700318ba30f9815f029f9d0d05f
| 2,579
|
py
|
Python
|
src/fb.py
|
webgisdeveloper/COvid19
|
45d80109109ee03ca4b7ea463f63830b6f411cf0
|
[
"MIT"
] | null | null | null |
src/fb.py
|
webgisdeveloper/COvid19
|
45d80109109ee03ca4b7ea463f63830b6f411cf0
|
[
"MIT"
] | null | null | null |
src/fb.py
|
webgisdeveloper/COvid19
|
45d80109109ee03ca4b7ea463f63830b6f411cf0
|
[
"MIT"
] | null | null | null |
import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
from geopandas.tools import sjoin
import sqlite3
#from datetime import datetime, timezone
import datetime
co_county_sf = '/Users/rl/scratch/covid-19/facebook/co_counties/co_counties.shp'
boulder_county_zone_sf = '/Users/rl/scratch/covid-19/facebook/boulder_county_zoning/Zoning__Zoning_Districts.shp'
| 27.43617
| 113
| 0.618457
|
import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
from geopandas.tools import sjoin
import sqlite3
#from datetime import datetime, timezone
import datetime
def day_of_week(date):
year,month,day = date.split('-')
ans = datetime.date(int(year), int(month), int(day))
day_of_week = ans.strftime("%A")
return day_of_week
def utc_to_local(utc_dt):
return utc_dt.replace(tzinfo=datetime.timezone.utc).astimezone(tz=None)
def switch_tz(date, time):
year, month, day = date.split('-')
hour = time[:2]
minute = time[2:]
utc = datetime.datetime( int(year), int(month), int(day), int(hour), int(minute) )
mnt = utc_to_local(utc)
return ('-'.join([mnt.strftime('%Y'), mnt.strftime('%m'), mnt.strftime('%d')]) , \
mnt.strftime('%H') + mnt.strftime('%M'))
def get_bounding_shape(lat, lon, gdf, name):
h=pd.DataFrame({'Lat':[lat], 'Lon':[lon]})
geometry = [Point(xy) for xy in zip([lon], [lat])]
hg = gpd.GeoDataFrame(h, geometry=geometry)
hg.crs = {'init' :'epsg:4326'}
hg_1 = hg.to_crs(gdf.crs)
r = sjoin(gdf,hg_1)
if r.empty:
return None
else:
return r[name].tolist()[0]
co_county_sf = '/Users/rl/scratch/covid-19/facebook/co_counties/co_counties.shp'
def get_co_county(lat, lon, gdf=None):
if gdf is None:
gdf = gpd.read_file(co_county_sf)
return get_bounding_shape(lat, lon, gdf, 'NAME')
boulder_county_zone_sf = '/Users/rl/scratch/covid-19/facebook/boulder_county_zoning/Zoning__Zoning_Districts.shp'
def get_boulder_co_zone(lat, lon, gdf=None):
if gdf is None:
gdf = gpd.read_file(boulder_county_zone_sf)
return get_bounding_shape(lat, lon, gdf, 'ZONEDESC')
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def get_db_fields(db_path, fields, local_tz=True):
c = sqlite3.connect(db_path)
c.row_factory = dict_factory
D = {}
dates = []
for row in c.execute('SELECT * FROM pop_tile WHERE n_crisis != "\\N"'):
lat = row['lat']
lon = row['lon']
if (lat,lon) not in D:
D[(lat,lon)] = {}
date, time = row['date_time'].split()
if local_tz:
date, time = switch_tz(date,time)
if date not in dates:
dates.append(date)
if date not in D[(lat,lon)]:
D[(lat,lon)][date] = {}
d = {}
for field in fields:
d[field] = row[field]
D[(lat,lon)][date][time] = d
return D
| 2,011
| 0
| 182
|
7095a2abb5c57ba434cd9edb10719738604c0238
| 1,607
|
py
|
Python
|
django-saiqa/saiqa/Test/RegisterTest.py
|
mark-mo/saiqa
|
a12482931aa0830de915ace4a25b039db951cd34
|
[
"Apache-2.0"
] | null | null | null |
django-saiqa/saiqa/Test/RegisterTest.py
|
mark-mo/saiqa
|
a12482931aa0830de915ace4a25b039db951cd34
|
[
"Apache-2.0"
] | 21
|
2019-12-26T17:09:47.000Z
|
2022-03-21T22:16:46.000Z
|
saidj/saidj/test/RegisterTest.py
|
mark-mo/saiqa
|
a12482931aa0830de915ace4a25b039db951cd34
|
[
"Apache-2.0"
] | null | null | null |
from saiqa.Model.UserModel import User
from saiqa.Service.UserService import UserService
from saiqa.Service.QuestionService import QuestionService
from saiqa.Exception.CustomException import FormatError, PasswordMismatchError, EmptyFormError
import re
service = UserService('test')
qser = QuestionService('test')
# List of tests:
# Good Register
# Password Mismatch
# Incorrect Username Format
# Incorrect Password Format 1
# Incorrect Password Format 2
# Bad Register
# Duplicate User
| 34.934783
| 94
| 0.671437
|
from saiqa.Model.UserModel import User
from saiqa.Service.UserService import UserService
from saiqa.Service.QuestionService import QuestionService
from saiqa.Exception.CustomException import FormatError, PasswordMismatchError, EmptyFormError
import re
service = UserService('test')
qser = QuestionService('test')
# List of tests:
# Good Register
# Password Mismatch
# Incorrect Username Format
# Incorrect Password Format 1
# Incorrect Password Format 2
# Bad Register
# Duplicate User
def testregister(username, password, repassword):
user = User(username, password)
try:
if user.getpassword() == '':
raise EmptyFormError
if user.getpassword() != repassword:
raise PasswordMismatchError
except PasswordMismatchError: # This would kick a user back to the register screen
return 'Passwords do not match'
except EmptyFormError: # This would kick a user back to the register screen
return 'Empty form'
specres = re.findall('[$&+,=?@`~^*%!-_]', user.getpassword())
upres = re.findall('[A-Z]', user.getpassword())
try:
if(len(user.getusername()) < 4 or len(user.getusername()) > 20):
raise FormatError
if(len(user.getpassword()) < 4 or len(user.getpassword()) > 20):
raise FormatError
if(len(specres) < 2 or len(upres) < 2):
raise FormatError
except FormatError:
return 'Incorrect formatting'
# If true, the user exists
if not (service.createUser(user)):
return 'Duplicate user'
return 'Good registration'
| 1,084
| 0
| 22
|
8bd670a796ae7575f3e1ae3f850652a36e145686
| 1,400
|
py
|
Python
|
Official Notes/Exercises/Chapter 7/ex_0617.py
|
rafay99-epic/Ssmsunng-Innovation-Campus-Notes
|
19a2dfd125957d5a3d3458636d91747b48267689
|
[
"MIT"
] | 1
|
2022-01-14T15:20:43.000Z
|
2022-01-14T15:20:43.000Z
|
Official Notes/Exercises/Chapter 7/ex_0617.py
|
rafay99-epic/Ssmsunng-Innovation-Campus-Notes
|
19a2dfd125957d5a3d3458636d91747b48267689
|
[
"MIT"
] | null | null | null |
Official Notes/Exercises/Chapter 7/ex_0617.py
|
rafay99-epic/Ssmsunng-Innovation-Campus-Notes
|
19a2dfd125957d5a3d3458636d91747b48267689
|
[
"MIT"
] | null | null | null |
# Coding Practice #0617
#----------------------------------------------------------------------------------
import numpy as np
import cv2
# Go to the directory where the data file is located.
# os.chdir(r'~~') # Please, replace the path with your own.
# 1. Morphological filtering.
# Open an image in B/W.
img = cv2.imread('picture_Texture.jpg',0)
cv2.imshow("Texture", img)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
# 1.1. Erosion and dilation:
# Erosion: Turns white pixels into black ones.
# Dilation: Turns black pixels into white ones.
kernel = np.ones((5,5),'uint8')
img_eroded = cv2.erode(img, kernel, iterations=5) # 'iterations' is adjustable.
img_dilated = cv2.dilate(img,kernel,iterations=5) # 'iterations' is adjustable.
cv2.imshow("Eroded", img_eroded)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
cv2.imshow("Dilated", img_dilated)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
| 43.75
| 118
| 0.506429
|
# Coding Practice #0617
#----------------------------------------------------------------------------------
import numpy as np
import cv2
# Go to the directory where the data file is located.
# os.chdir(r'~~') # Please, replace the path with your own.
# 1. Morphological filtering.
# Open an image in B/W.
img = cv2.imread('picture_Texture.jpg',0)
cv2.imshow("Texture", img)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
# 1.1. Erosion and dilation:
# Erosion: Turns white pixels into black ones.
# Dilation: Turns black pixels into white ones.
kernel = np.ones((5,5),'uint8')
img_eroded = cv2.erode(img, kernel, iterations=5) # 'iterations' is adjustable.
img_dilated = cv2.dilate(img,kernel,iterations=5) # 'iterations' is adjustable.
cv2.imshow("Eroded", img_eroded)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
cv2.imshow("Dilated", img_dilated)
cv2.waitKey(0) # Wait until a key is pressed.
cv2.destroyAllWindows() # Close the open window.
| 0
| 0
| 0
|