hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d7b45a56aa0481b6e6b51e3210f9a274cb1f81ab
| 2,267
|
py
|
Python
|
src/modules/string.py
|
StaticallyTypedRice/PodcastDownloader
|
b2d5bc2a5b22ba5b2dc537fdafc588aedd67bcb5
|
[
"MIT"
] | 2
|
2019-08-07T09:23:26.000Z
|
2020-02-29T05:06:58.000Z
|
src/modules/string.py
|
StaticallyTypedRice/PodcastDownloader
|
b2d5bc2a5b22ba5b2dc537fdafc588aedd67bcb5
|
[
"MIT"
] | null | null | null |
src/modules/string.py
|
StaticallyTypedRice/PodcastDownloader
|
b2d5bc2a5b22ba5b2dc537fdafc588aedd67bcb5
|
[
"MIT"
] | 1
|
2019-03-26T10:00:49.000Z
|
2019-03-26T10:00:49.000Z
|
def str_to_filename(name: str, placeholder: str='') -> str:
'''A rudimentary function for parsing a string into a valid file name.
Removes invalid file name characters.
NOTE: There is no guarantee that the string returned is will always be a valid
file name, but it should be valid in most cases.
Arguments:
name: The string to be parsed.
placeholder: Replace invalid file name characters with this string.
'''
invalid_characters = [
'\\',
'/',
':',
'*',
'?',
'"',
'<',
'>',
'|',
]
for character in invalid_characters:
name = name.replace(character, placeholder)
return name
def command_line_to_bool(string: str, strict=True) -> bool:
'''Parses the strings 'y', 'yes', 'n' or 'no' to a boolean.
The strings are not case sensitive.
Arguments:
string: The string to be parsed.
strict: If True, only 'y', 'yes', 'n' and 'no' will be parsed,
with other strings raising a ValueError.
If False, 'y' and 'yes' will return True and other strings
will return False.
NOTE: This is different from using the bool() function on a string.
The bool() function returns false for an empty string,
and returns true otherwise. This function parses the words
'y', 'yes', 'n' and 'no' into a boolean.'''
# Use the lower-case version of the string
string_lowercase = string.lower()
if string_lowercase == 'y' or string_lowercase == 'yes':
# the string is equal to 'y' or 'yes'
return True
else:
if strict:
if string_lowercase == 'n' or string_lowercase == 'no':
# the string is equal to 'n' or 'no'
return False
else:
# The string is invalid
raise ValueError(
f'The string \'{string}\' is invalid. '
'Only \'y\', \'yes\', \'n\' or \'no\' are valid in strict mode.'
)
else:
# The string does not equal 'y' or 'yes'
return False
| 32.385714
| 85
| 0.533304
|
3078be5943e8324505e23c3c1e1e4b055a1fa512
| 8,759
|
py
|
Python
|
data_utils.py
|
TylerCools/thesis
|
8195ca3b8658bdd10f4ce38c6b50cd2bbedd3e57
|
[
"MIT"
] | null | null | null |
data_utils.py
|
TylerCools/thesis
|
8195ca3b8658bdd10f4ce38c6b50cd2bbedd3e57
|
[
"MIT"
] | null | null | null |
data_utils.py
|
TylerCools/thesis
|
8195ca3b8658bdd10f4ce38c6b50cd2bbedd3e57
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import os
import re
import numpy as np
import tensorflow as tf
stop_words=set(["a","an","the"])
def load_candidates(data_dir, task_id):
assert task_id > 0 and task_id < 7
candidates=[]
candidates_f=None
candid_dic={}
if task_id==6:
candidates_f='dialog-babi-task6-dstc2-candidates.txt'
else:
candidates_f='dialog-babi-candidates.txt'
with open(os.path.join(data_dir,candidates_f)) as f:
for i,line in enumerate(f):
candid_dic[line.strip().split(' ',1)[1]] = i
line=tokenize(line.strip())[1:]
candidates.append(line)
return candidates,candid_dic
def load_dialog_task(data_dir, task_id, candid_dic, isOOV):
'''Load the nth task. There are 20 tasks in total.
Returns a tuple containing the training and testing data for the task.
'''
assert task_id > 0 and task_id < 7
files = os.listdir(data_dir)
files = [os.path.join(data_dir, f) for f in files]
s = 'dialog-babi-task{}-'.format(task_id)
train_file = [f for f in files if s in f and 'trn' in f][0]
if isOOV:
print("OOV True")
test_file = [f for f in files if s in f and 'tst-OOV' in f][0]
else:
test_file = [f for f in files if s in f and 'tst.' in f][0]
val_file = [f for f in files if s in f and 'dev' in f][0]
train_data = get_dialogs(train_file,candid_dic)
test_data = get_dialogs(test_file,candid_dic)
val_data = get_dialogs(val_file,candid_dic)
return train_data, test_data, val_data
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple']
'''
sent=sent.lower()
if sent=='<silence>':
return [sent]
result=[x.strip() for x in re.split('(\W+)?', sent) if x.strip() and x.strip() not in stop_words]
if not result:
result=['<silence>']
if result[-1]=='.' or result[-1]=='?' or result[-1]=='!':
result=result[:-1]
return result
'''
This function is adapted to create different dialogue parts according to the theory of Source Awareness.
'''
def parse_dialogs_per_response(lines,candid_dic):
'''
Parse dialogs provided in the babi tasks format
The user and system responses are split here.
Also the length of the dialog is being monitored
with the nid. Furthermore the essential part of
Source Awareness is created here. In this function
are the dialogues split in a user, system and result
history.
'''
data=[]
context=[]
user= None
system= None
system_final = None
result = None
whole_system = []
whole_user = []
results = []
for line in lines:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
nid = int(nid)
if '\t' in line:
user, system = line.split('\t')
answer = candid_dic[system]
system = tokenize(system)
user = tokenize(user)
user.append('$user')
user.append('#'+str(nid))
system.append('$system')
system.append('#'+str(nid))
whole_system.append(system)
whole_user.append(user)
context.append(user)
context.append(system)
data.append([context[:], user[:], system, whole_user, whole_system, answer, results])
else:
result=tokenize(line)
result.append('$result')
result.append('#'+str(nid))
context.append(result)
results.append(result)
else:
whole_system = []
whole_user = []
context=[]
results = []
return data
def get_dialogs(f,candid_dic):
'''Given a file name, read the file, retrieve the dialogs, and then convert the sentences into a single dialog.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
with open(f) as f:
return parse_dialogs_per_response(f.readlines(),candid_dic)
def vectorize_candidates_sparse(candidates,word_idx):
shape=(len(candidates),len(word_idx)+1)
indices=[]
values=[]
for i,candidate in enumerate(candidates):
for w in candidate:
indices.append([i,word_idx[w]])
values.append(1.0)
return tf.SparseTensor(indices,values,shape)
def vectorize_candidates(candidates,word_idx,sentence_size):
shape=(len(candidates),sentence_size)
C=[]
for i,candidate in enumerate(candidates):
lc=max(0,sentence_size-len(candidate))
C.append([word_idx[w] if w in word_idx else 0 for w in candidate] + [0] * lc)
return tf.constant(C,shape=shape)
"""
This function is adapted to vectorize all the dialogue parts correctly.
"""
def vectorize_data(data, word_idx, sentence_size, batch_size, candidates_size, max_memory_size):
"""
Vectorize stories and queries.
If a sentence length < sentence_size, the sentence will be padded with 0's.
If a story length < memory_size, the story will be padded with empty memories.
Empty memories are 1-D arrays of length sentence_size filled with 0's.
The answer array is returned as a one-hot encoding.
"""
System = []
Query = []
Story = []
Answer = []
WholeU = []
WholeS = []
Results = []
Story_words = []
data.sort(key=lambda x:len(x[0]),reverse=True)
for i, (story, query, system, whole_user, whole_system, answer, results) in enumerate(data):
story2 = []
story2.append(story)
story2.append(i)
Story_words.append(story2)
if i%batch_size==0:
memory_size=max(1,min(max_memory_size,len(story)))
stor = []
for i, sentence in enumerate(story, 1):
ls = max(0, sentence_size - len(sentence))
stor.append([word_idx[w] if w in word_idx else 0 for w in sentence] + [0] * ls)
# take only the most recent sentences that fit in memory
stor = stor[::-1][:memory_size][::-1]
# pad to memory_size
lm = max(0, memory_size - len(stor))
for _ in range(lm):
stor.append([0] * sentence_size)
sys = []
for i, sentence in enumerate(system, 1):
ls = max(0, sentence_size - len(sentence))
sys.append([word_idx[w] if w in word_idx else 0 for w in sentence] + [0] * ls)
# take only the most recent sentences that fit in memory
sys = sys[::-1][:memory_size][::-1]
# pad to memory_size
lm = max(0, memory_size - len(sys))
for _ in range(lm):
sys.append([0] * sentence_size)
wu = []
for i, sentence in enumerate(whole_user, 1):
ls = max(0, sentence_size - len(sentence))
wu.append([word_idx[w] if w in word_idx else 0 for w in sentence] + [0] * ls)
# take only the most recent sentences that fit in memory
wu = wu[::-1][:memory_size][::-1]
# pad to memory_size
lm = max(0, memory_size - len(wu))
for _ in range(lm):
wu.append([0] * sentence_size)
ws = []
for i, sentence in enumerate(whole_system, 1):
ls = max(0, sentence_size - len(sentence))
ws.append([word_idx[w] if w in word_idx else 0 for w in sentence] + [0] * ls)
# take only the most recent sentences that fit in memory
ws = ws[::-1][:memory_size][::-1]
# pad to memory_size
lm = max(0, memory_size - len(ws))
for _ in range(lm):
ws.append([0] * sentence_size)
re = []
for i, sentence in enumerate(results, 1):
ls = max(0, sentence_size - len(sentence))
re.append([word_idx[w] if w in word_idx else 0 for w in sentence] + [0] * ls)
# take only the most recent sentences that fit in memory
re = re[::-1][:memory_size][::-1]
# pad to memory_size
lm = max(0, memory_size - len(re))
for _ in range(lm):
re.append([0] * sentence_size)
lq = max(0, sentence_size - len(query))
q = [word_idx[w] if w in word_idx else 0 for w in query] + [0] * lq
Story.append(np.array(stor))
Query.append(np.array(q))
System.append(np.array(sys))
Answer.append(np.array(answer))
WholeU.append(np.array(wu))
WholeS.append(np.array(ws))
Results.append(np.array(re))
return Story, Query, System, Answer, WholeU, WholeS, Results, Story_words
| 35.176707
| 115
| 0.593675
|
1964d69e2297c10b4fef79191946b508529ed3e0
| 913
|
py
|
Python
|
freshdesk/v2/tests/test_customer.py
|
FellipeMendonca/python-freshdesk
|
d483ffffb593fd597b6e2a8d4b813e1605cbf325
|
[
"BSD-2-Clause"
] | null | null | null |
freshdesk/v2/tests/test_customer.py
|
FellipeMendonca/python-freshdesk
|
d483ffffb593fd597b6e2a8d4b813e1605cbf325
|
[
"BSD-2-Clause"
] | null | null | null |
freshdesk/v2/tests/test_customer.py
|
FellipeMendonca/python-freshdesk
|
d483ffffb593fd597b6e2a8d4b813e1605cbf325
|
[
"BSD-2-Clause"
] | null | null | null |
import datetime
import pytest
from freshdesk.v2.models import Customer
@pytest.fixture
def customer(api):
return api.customers.get_customer('1')
@pytest.fixture
def contact(api):
return api.contacts.get_contact(1)
def test_customer(customer):
assert isinstance(customer, Customer)
assert customer.name == 'ACME Corp.'
assert customer.domains == 'acme.com'
assert customer.cf_custom_key == 'custom_value'
def test_customer_datetime(customer):
assert isinstance(customer.created_at, datetime.datetime)
assert isinstance(customer.updated_at, datetime.datetime)
def test_customer_str(customer):
assert str(customer) == 'ACME Corp.'
def test_customer_repr(customer):
assert repr(customer) == '<Customer \'ACME Corp.\'>'
def test_get_customer_from_contact(api, contact):
customer = api.customers.get_customer_from_contact(contact)
test_customer(customer)
| 22.268293
| 63
| 0.75356
|
d5b31f68688aaccaf6abebcb0d7f811fefdd6309
| 6,117
|
py
|
Python
|
certbot_dns_websupportsk/dns_websupportsk.py
|
JozefGalbicka/certbot-dns-websupportsk
|
1ec2290104348ef8c6abe8b062df44c5f9f5cf0e
|
[
"MIT"
] | 3
|
2021-07-26T20:20:03.000Z
|
2021-11-14T10:04:35.000Z
|
certbot_dns_websupportsk/dns_websupportsk.py
|
JozefGalbicka/certbot-dns-websupportsk
|
1ec2290104348ef8c6abe8b062df44c5f9f5cf0e
|
[
"MIT"
] | null | null | null |
certbot_dns_websupportsk/dns_websupportsk.py
|
JozefGalbicka/certbot-dns-websupportsk
|
1ec2290104348ef8c6abe8b062df44c5f9f5cf0e
|
[
"MIT"
] | null | null | null |
# imports for WebsupportAPI class
import hmac
import hashlib
import time
import requests
import base64
from datetime import datetime, timezone
import json
# imports for authenticator
import json
import logging
import time
import requests
import zope.interface
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
"""DNS Authenticator for Websupport
This Authenticator uses the Websupport Remote REST API to fulfill a dns-01 challenge.
"""
description = "Obtain certificates using a DNS TXT record (if you are using Websupport for DNS)."
ttl = 60
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.credentials = None
@classmethod
def add_parser_arguments(cls, add, **kwargs): # pylint: disable=arguments-differ
super(Authenticator, cls).add_parser_arguments(
add, default_propagation_seconds=60
)
add("credentials", help="Websupport credentials INI file.")
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return (
"This plugin configures a DNS TXT record to respond to a dns-01 challenge using "
+ "the Websupport Remote REST API."
)
def _setup_credentials(self):
self.credentials = self._configure_credentials(
"credentials",
"Websupport credentials INI file",
{
"api_key": "API key for Websupport Remote API.",
"secret": "Password for Websupport Remote API.",
"domain": "Domain for dns01 authentication.",
},
)
def _perform(self, domain, validation_name, validation):
self._get_websupport_client().handle_wildcard_auth(validation_name, validation)
def _cleanup(self, domain, validation_name, validation):
self._get_websupport_client().clean_wildcard_auth(validation_name)
def _get_websupport_client(self):
return WebsupportAPI(
self.credentials.conf("api_key"),
self.credentials.conf("secret"),
self.credentials.conf("domain"),
)
def print_json_data(json_data):
print(json.dumps(json_data, indent=2))
class WebsupportAPI:
def __init__(self, api_key, secret, domain):
self.default_path = "/v1/user/self"
self.api = "https://rest.websupport.sk"
self.query = "" # query part is optional and may be empty
self.domain = domain
# creating signature
method = "GET"
timestamp = int(time.time())
canonical_request = "%s %s %s" % (method, self.default_path, timestamp)
signature = hmac.new(bytes(secret, 'UTF-8'), bytes(canonical_request, 'UTF-8'), hashlib.sha1).hexdigest()
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Date": datetime.fromtimestamp(timestamp, timezone.utc).isoformat()
}
# creating session
self.s = requests.Session()
self.s.auth = (api_key, signature)
self.s.headers.update(headers)
login_response = self.s.get("%s%s%s" % (self.api, self.default_path, self.query)).content
def get_records(self, type_=None, id_=None, name=None, content=None, ttl=None, note=None):
# create dict of arguments passed, filter out 'None' values and 'self' argument, rename keys(remove "_"
# trailing)
args = {k.replace("_", ""): v for k, v in locals().items() if v is not None and k != 'self'}
# get data from api
data = json.loads(self.s.get(f"{self.api}{self.default_path}/zone/{self.domain}/record{self.query}").content)
items = data["items"]
records = list()
for item in items:
shared_keys = args.keys() & item.keys()
# intersection dict of shared items
intersection_dict = {k: item[k] for k in shared_keys if item[k] == args[k]}
# record is valid only if all values from args match
records.append(item) if len(intersection_dict) == len(args) else None
print(f"Getting records, arguments: {args},... found: {len(records)} record(s)")
return records
def create_record(self, type_, name, content, ttl=600, **kwargs):
# print(get_records(type_=type_, name=name, content=content))
args = {k.replace("_", ""): v for k, v in locals().items()}
args.pop('self')
args.pop('kwargs')
args.update(**kwargs)
print(f"Creating record: type:{type_}, name:{name}, content:{content}", end=" ")
print(self.s.post(f"{self.api}{self.default_path}/zone/{self.domain}/record", json=args))
def edit_record(self, id_, **kwargs):
print(f"Editing record: id:{id_}, kwargs:{kwargs}", end=" ")
print(self.s.put(f"{self.api}{self.default_path}/zone/{self.domain}/record/{id_}", json=kwargs))
def delete_record(self, id_):
print(f"Deleting record: id:{id_}", end=" ")
print(self.s.delete(f"{self.api}{self.default_path}/zone/{self.domain}/record/{id_}"))
# return first record found
# TO-DO: add error handling for not found record and multiple records found
def get_record_id(self, type_, name, **kwargs):
record = self.get_records(type_=type_, name=name, **kwargs)
return record[0]['id']
# return record[0]['id'] if len(record) == 1 and type(record) == list else None
def handle_wildcard_auth(self, domain_name, validation_token):
print(f"Certbot passed domain name: {domain_name}")
subdomain = domain_name.replace(f".{self.domain}", "")
self.create_record(type_="TXT", name=subdomain, content=validation_token)
def clean_wildcard_auth(self, domain_name):
subdomain = domain_name.replace(f".{self.domain}", "")
id_ = self.get_record_id("TXT", subdomain)
self.delete_record(id_)
| 38.71519
| 117
| 0.646559
|
dceada90f6989de8f9ba0f96b89b3409e0affa92
| 1,442
|
py
|
Python
|
NER/preprocess.py
|
armantsh/allnews-am
|
e7e9ec5f65814851843b95d2c245cf4d6f54f4b8
|
[
"MIT"
] | 1
|
2021-11-05T15:05:44.000Z
|
2021-11-05T15:05:44.000Z
|
NER/preprocess.py
|
armantsh/allnews-am
|
e7e9ec5f65814851843b95d2c245cf4d6f54f4b8
|
[
"MIT"
] | 2
|
2020-01-14T05:15:23.000Z
|
2020-01-16T06:28:25.000Z
|
NER/preprocess.py
|
armantsh/allnews-am
|
e7e9ec5f65814851843b95d2c245cf4d6f54f4b8
|
[
"MIT"
] | 6
|
2020-01-03T21:02:08.000Z
|
2020-01-12T16:35:56.000Z
|
import sentence
import random
from transformers import AutoTokenizer
def partitionRankings(data, percent):
howManyNumbers = int(round(percent*len(data)))
shuffled = list(data[:])
random.shuffle(shuffled)
return shuffled[howManyNumbers:], shuffled[:howManyNumbers]
def writeInfile(data, filename):
tokenizer = AutoTokenizer.from_pretrained('bert-base-multilingual-cased')
subword_len_counter = 0
with open(filename, 'wt',encoding='utf-8') as f:
for sentence in data:
for (token, key) in sentence:
current_subwords_len = len(tokenizer.tokenize(token))
if current_subwords_len == 0:
continue
if (subword_len_counter + current_subwords_len) > 512:
f.write("\n")
f.write((token+' '+key+'\n'))
subword_len_counter = 0
continue
subword_len_counter += current_subwords_len
f.write((token+' '+key+'\n'))
f.write('\n')
if __name__ == '__main__':
traindevgetter=sentence.Sentence('pioner-silver/train.conll03')
testgetter = sentence.Sentence('pioner-silver/dev.conll03')
train,dev=partitionRankings(traindevgetter.tagged_sentences, 0.1)
writeInfile(list(train),'data/train.txt')
writeInfile(list(dev), 'data/dev.txt')
writeInfile(list(testgetter.tagged_sentences), 'data/test.txt')
| 36.05
| 77
| 0.633842
|
33c900bc33ee0360dcfcabf074b43610ecb3820d
| 5,896
|
py
|
Python
|
utils/metrics.py
|
yairkit/flowstep3d
|
d339a8872365ba5a93cce02650ce06b64b41057a
|
[
"MIT"
] | 13
|
2021-04-20T05:45:58.000Z
|
2022-03-30T02:52:56.000Z
|
utils/metrics.py
|
lizhiqihhh/flowstep3d
|
d339a8872365ba5a93cce02650ce06b64b41057a
|
[
"MIT"
] | 3
|
2021-08-25T07:09:24.000Z
|
2021-10-21T02:29:01.000Z
|
utils/metrics.py
|
lizhiqihhh/flowstep3d
|
d339a8872365ba5a93cce02650ce06b64b41057a
|
[
"MIT"
] | 3
|
2021-12-13T03:56:10.000Z
|
2022-03-29T13:03:47.000Z
|
import torch
from pytorch_lightning.metrics import TensorMetric
from typing import Any, Optional
from losses.supervised_losses import *
from losses.unsupervised_losses import *
from losses.common_losses import *
class EPE3D(TensorMetric):
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
epe3d = torch.norm(pred_flow - gt_flow, dim=2).mean()
return epe3d
class Acc3DR(TensorMetric):
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
l2_norm = torch.norm(pred_flow - gt_flow, dim=2)
sf_norm = torch.norm(gt_flow, dim=2)
relative_err = l2_norm / (sf_norm + 1e-4)
acc3d_relax = (torch.logical_or(l2_norm < 0.1, relative_err < 0.1)).float().mean()
return acc3d_relax
class Acc3DS(TensorMetric):
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
l2_norm = torch.norm(pred_flow - gt_flow, dim=2)
sf_norm = torch.norm(gt_flow, dim=2)
relative_err = l2_norm / (sf_norm + 1e-4)
acc3d_strict = (torch.logical_or(l2_norm < 0.05, relative_err < 0.05)).float().mean()
return acc3d_strict
class EPE3DOutliers(TensorMetric):
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
l2_norm = torch.norm(pred_flow - gt_flow, dim=2)
sf_norm = torch.norm(gt_flow, dim=2)
relative_err = l2_norm / (sf_norm + 1e-4)
epe3d_outliers = (torch.logical_or(l2_norm > 0.3, relative_err > 0.1)).float().mean()
return epe3d_outliers
class SupervisedL1LossMetric(TensorMetric):
def __init__(self, name: str, reduce_op: Optional[Any] = None):
super(SupervisedL1LossMetric, self).__init__(name=name, reduce_op=reduce_op)
self.loss = SupervisedL1Loss()
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
loss_metric = self.loss(pc_source, pc_target, pred_flow, gt_flow)
return loss_metric
class SmoothnessLossMetric(TensorMetric):
def __init__(self, smoothness_loss_params, name: str, reduce_op: Optional[Any] = None):
super(SmoothnessLossMetric, self).__init__(name=name, reduce_op=reduce_op)
self.loss = SmoothnessLoss(**smoothness_loss_params)
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
loss_metric = self.loss(pc_source, pred_flow)
return loss_metric
class ChamferLossMetric(TensorMetric):
def __init__(self, chamfer_loss_params, name: str, reduce_op: Optional[Any] = None):
super(ChamferLossMetric, self).__init__(name=name, reduce_op=reduce_op)
self.loss = ChamferLoss(**chamfer_loss_params)
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
loss_metric = self.loss(pc_source, pc_target, pred_flow)
return loss_metric
class SceneFlowMetrics():
"""
An object of relevant metrics for scene flow.
"""
def __init__(self, split: str, loss_params: dict, reduce_op: Optional[Any] = None):
"""
Initializes a dictionary of metrics for scene flow
keep reduction as 'none' to allow metrics computation per sample.
Arguments:
split : a string with split type, should be used to allow logging of same metrics for different aplits
loss_params: loss configuration dictionary
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
self.metrics = {
split + '_epe3d': EPE3D(name='epe3d', reduce_op=reduce_op),
}
if loss_params['loss_type'] == 'sv_l1_reg':
self.metrics[f'{split}_data_loss'] = SupervisedL1LossMetric(name=f'{split}_data_loss', reduce_op=reduce_op)
self.metrics[f'{split}_smoothness_loss'] = SmoothnessLossMetric(loss_params['smoothness_loss_params'], name=f'{split}_smoothness_loss', reduce_op=reduce_op)
if loss_params['loss_type'] == 'unsup_l1':
self.metrics[f'{split}_chamfer_loss'] = ChamferLossMetric(loss_params['chamfer_loss_params'], name=f'{split}_chamfer_loss', reduce_op=reduce_op)
self.metrics[f'{split}_smoothness_loss'] = SmoothnessLossMetric(loss_params['smoothness_loss_params'], name=f'{split}_smoothness_loss', reduce_op=reduce_op)
if split in ['test', 'val']:
self.metrics[f'{split}_acc3dr'] = Acc3DR(name='acc3dr', reduce_op=reduce_op)
self.metrics[f'{split}_acc3ds'] = Acc3DS(name='acc3ds', reduce_op=reduce_op)
self.metrics[f'{split}_epe3d_outliers'] = EPE3DOutliers(name='epe3d_outliers', reduce_op=reduce_op)
def __call__(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flows: list, gt_flow: torch.Tensor) -> dict:
"""
Compute and scale the resulting metrics
Arguments:
pc_source : a tensor containing source point cloud
pc_target : a tensor containing target point cloud
pred_flows : list of tensors containing model's predictions
gt_flow : a tensor containing ground truth labels
Return:
A dictionary of copmuted metrics
"""
result = {}
for key, metric in self.metrics.items():
for i, pred_flow in enumerate(pred_flows):
val = metric(pc_source, pc_target, pred_flow, gt_flow)
result.update({f'{key}_i#{i}': val})
return result
| 49.966102
| 168
| 0.686737
|
00128b39043c4f4a230b61f2fcbfc6aaac1e48db
| 404
|
py
|
Python
|
src/utils/fileio/File.py
|
danerprog/DataArchiver
|
aee76318746dcc78a5551ff375860762e68b93ab
|
[
"Apache-2.0"
] | null | null | null |
src/utils/fileio/File.py
|
danerprog/DataArchiver
|
aee76318746dcc78a5551ff375860762e68b93ab
|
[
"Apache-2.0"
] | null | null | null |
src/utils/fileio/File.py
|
danerprog/DataArchiver
|
aee76318746dcc78a5551ff375860762e68b93ab
|
[
"Apache-2.0"
] | null | null | null |
class File:
def __init__(self, filename, mode="r+") :
self._openedFile = open(filename, mode)
self._filename = filename
def file(self) :
return self._openedFile
def seek(self, index) :
self._openedFile.seek(index)
def name(self) :
return self._filename
def close(self) :
self._openedFile.close()
| 23.764706
| 47
| 0.55198
|
9b9e89509a13d1a444813a7d4921ba0609b5757c
| 476
|
py
|
Python
|
micro_chat/__main__.py
|
That-Cool-Coder/micro-chat
|
66e41f3704f4718f214c8528c8bb90607ff2e4b1
|
[
"MIT"
] | null | null | null |
micro_chat/__main__.py
|
That-Cool-Coder/micro-chat
|
66e41f3704f4718f214c8528c8bb90607ff2e4b1
|
[
"MIT"
] | null | null | null |
micro_chat/__main__.py
|
That-Cool-Coder/micro-chat
|
66e41f3704f4718f214c8528c8bb90607ff2e4b1
|
[
"MIT"
] | null | null | null |
import argparse
from . import config, Server, Client
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--server', help=f'Run a {config.PROGRAM_NAME} server',
action='store_true')
parser.add_argument('-c', '--client', help=f'Run a {config.PROGRAM_NAME} client ' +
'and connect to a server', action='store_true')
args = parser.parse_args()
if args.server:
Server().run()
elif args.client:
Client().run()
else:
parser.print_usage()
quit()
| 25.052632
| 84
| 0.684874
|
8a5dfb553c2740c7664e92d0e637790797fbf0c0
| 8,295
|
py
|
Python
|
venv/Lib/site-packages/psychopy/app/pavlovia_ui/_base.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
venv/Lib/site-packages/psychopy/app/pavlovia_ui/_base.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
venv/Lib/site-packages/psychopy/app/pavlovia_ui/_base.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import wx
import wx.html2
from psychopy.localization import _translate
from psychopy.projects import pavlovia
from psychopy import logging
class BaseFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.Center()
# set up menu bar
self.menuBar = wx.MenuBar()
self.fileMenu = self.makeFileMenu()
self.menuBar.Append(self.fileMenu, _translate('&File'))
self.SetMenuBar(self.menuBar)
def makeFileMenu(self):
fileMenu = wx.Menu()
app = wx.GetApp()
keyCodes = app.keys
# add items to file menu
fileMenu.Append(wx.ID_CLOSE,
_translate("&Close View\t%s") % keyCodes['close'],
_translate("Close current window"))
self.Bind(wx.EVT_MENU, self.closeFrame, id=wx.ID_CLOSE)
# -------------quit
fileMenu.AppendSeparator()
fileMenu.Append(wx.ID_EXIT,
_translate("&Quit\t%s") % keyCodes['quit'],
_translate("Terminate the program"))
self.Bind(wx.EVT_MENU, app.quit, id=wx.ID_EXIT)
return fileMenu
def closeFrame(self, event=None, checkSave=True):
self.Destroy()
def checkSave(self):
"""If the app asks whether everything is safely saved
"""
return True # for OK
class PavloviaMiniBrowser(wx.Dialog):
"""This class is used by to open an internal browser for the user stuff
"""
style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
def __init__(self, parent, user=None, loginOnly=False, logoutOnly=False,
style=style, *args, **kwargs):
# create the dialog
wx.Dialog.__init__(self, parent, style=style, *args, **kwargs)
# create browser window for authentication
self.browser = wx.html2.WebView.New(self)
self.loginOnly = loginOnly
self.logoutOnly = logoutOnly
self.tokenInfo = {}
# do layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.browser, 1, wx.EXPAND, 10)
self.SetSizer(sizer)
if loginOnly:
self.SetSize((600, 600))
else:
self.SetSize((700, 600))
self.CenterOnParent()
# check there is a user (or log them in)
if not user:
self.user = pavlovia.getCurrentSession().user
if not user:
self.login()
if not user:
return None
def logout(self):
self.browser.Bind(wx.html2.EVT_WEBVIEW_LOADED, self.checkForLogoutURL)
self.browser.LoadURL('https://gitlab.pavlovia.org/users/sign_out')
def login(self):
self._loggingIn = True
authURL, state = pavlovia.getAuthURL()
self.browser.Bind(wx.html2.EVT_WEBVIEW_ERROR, self.onConnectionErr)
self.browser.Bind(wx.html2.EVT_WEBVIEW_LOADED, self.checkForLoginURL)
self.browser.LoadURL(authURL)
def setURL(self, url):
self.browser.LoadURL(url)
def gotoUserPage(self):
if self.user:
url = self.user.attributes['web_url']
self.browser.LoadURL(url)
def gotoProjects(self):
self.browser.LoadURL("https://pavlovia.org/projects.html")
def onConnectionErr(self, event):
if 'INET_E_DOWNLOAD_FAILURE' in event.GetString():
self.EndModal(wx.ID_EXIT)
raise Exception("{}: No internet connection available.".format(event.GetString()))
def checkForLoginURL(self, event):
url = event.GetURL()
if 'access_token=' in url:
self.tokenInfo['token'] = self.getParamFromURL(
'access_token', url)
self.tokenInfo['tokenType'] = self.getParamFromURL(
'token_type', url)
self.tokenInfo['state'] = self.getParamFromURL(
'state', url)
self._loggingIn = False # we got a log in
self.browser.Unbind(wx.html2.EVT_WEBVIEW_LOADED)
pavlovia.login(self.tokenInfo['token'])
if self.loginOnly:
self.EndModal(wx.ID_OK)
elif url == 'https://gitlab.pavlovia.org/dashboard/projects':
# this is what happens if the user registered instead of logging in
# try now to do the log in (in the same session)
self.login()
else:
logging.info("OAuthBrowser.onNewURL: {}".format(url))
def checkForLogoutURL(self, event):
url = event.GetURL()
if url == 'https://gitlab.pavlovia.org/users/sign_in':
if self.logoutOnly:
self.EndModal(wx.ID_OK)
def getParamFromURL(self, paramName, url=None):
"""Takes a url and returns the named param"""
if url is None:
url = self.browser.GetCurrentURL()
return url.split(paramName + '=')[1].split('&')[0]
class PavloviaCommitDialog(wx.Dialog):
"""This class will be used to brings up a commit dialog
(if there is anything to commit)"""
def __init__(self, *args, **kwargs):
# pop kwargs for Py2 compatibility
changeInfo = kwargs.pop('changeInfo', '')
initMsg = kwargs.pop('initMsg', '')
super(PavloviaCommitDialog, self).__init__(*args, **kwargs)
# Set Text widgets
wx.Dialog(None, id=wx.ID_ANY, title=_translate("Committing changes"))
self.updatesInfo = wx.StaticText(self, label=changeInfo)
self.commitTitleLbl = wx.StaticText(self, label=_translate('Summary of changes'))
self.commitTitleCtrl = wx.TextCtrl(self, size=(500, -1), value=initMsg)
self.commitDescrLbl = wx.StaticText(self, label=_translate('Details of changes\n (optional)'))
self.commitDescrCtrl = wx.TextCtrl(self, size=(500, 200), style=wx.TE_MULTILINE | wx.TE_AUTO_URL)
# Set buttons
self.btnOK = wx.Button(self, wx.ID_OK)
self.btnCancel = wx.Button(self, wx.ID_CANCEL)
# Format elements
self.setToolTips()
self.setDlgSizers()
def setToolTips(self):
"""Set the tooltips for the dialog widgets"""
self.commitTitleCtrl.SetToolTip(
wx.ToolTip(
_translate("Title summarizing the changes you're making in these files")))
self.commitDescrCtrl.SetToolTip(
wx.ToolTip(
_translate("Optional details about the changes you're making in these files")))
def setDlgSizers(self):
"""
Set the commit dialog sizers and layout.
"""
commitSizer = wx.FlexGridSizer(cols=2, rows=2, vgap=5, hgap=5)
commitSizer.AddMany([(self.commitTitleLbl, 0, wx.ALIGN_RIGHT),
self.commitTitleCtrl,
(self.commitDescrLbl, 0, wx.ALIGN_RIGHT),
self.commitDescrCtrl])
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
buttonSizer.AddMany([self.btnCancel,
self.btnOK])
# main sizer and layout
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(self.updatesInfo, 0, wx.ALL | wx.EXPAND, border=5)
mainSizer.Add(commitSizer, 1, wx.ALL | wx.EXPAND, border=5)
mainSizer.Add(buttonSizer, 0, wx.ALL | wx.ALIGN_RIGHT, border=5)
self.SetSizerAndFit(mainSizer)
self.Layout()
def ShowCommitDlg(self):
"""Show the commit application-modal dialog
Returns
-------
wx event
"""
return self.ShowModal()
def getCommitMsg(self):
"""
Gets the commit message for the git commit.
Returns
-------
string:
The commit message and description.
If somehow the commit message is blank, a default is given.
"""
if self.commitTitleCtrl.IsEmpty():
commitMsg = "_"
else:
commitMsg = self.commitTitleCtrl.GetValue()
if not self.commitDescrCtrl.IsEmpty():
commitMsg += "\n\n" + self.commitDescrCtrl.GetValue()
return commitMsg
| 36.222707
| 105
| 0.602893
|
5575e680c3d407732e2f7bd0167bf6438c3bd2b5
| 2,254
|
py
|
Python
|
stippling.py
|
anabrtorres/Trabalho-2021-1-GCC218
|
b752e2d8cf6cfe89da59d31e4c288250574d6be5
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
stippling.py
|
anabrtorres/Trabalho-2021-1-GCC218
|
b752e2d8cf6cfe89da59d31e4c288250574d6be5
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
stippling.py
|
anabrtorres/Trabalho-2021-1-GCC218
|
b752e2d8cf6cfe89da59d31e4c288250574d6be5
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
# Copyright Matthew Mack (c) 2020 under CC-BY 4.0: https://creativecommons.org/licenses/by/4.0/
import os
import sys
cmd = sys.executable
# The filename of the image you want to stipple goes here.
ORIGINAL_IMAGE = "images/ufla-logo.png"
# Enables saving of images.
SAVE_IMAGE = True
# Total number of points to stipple your image with
NUMBER_OF_POINTS = 3072
# Number of iterations for the algorithm to evenly spread out all the points. Increase if it looks like all the points haven't 'settled' after the last few iterations.
NUMBER_OF_ITERATIONS = 25
# Sets of the point size of dots to appear on the final iteration. Currently untested.
POINT_SIZE = "1.0 1.0"
# Size of the window that shows the points and their iterations.
FIGURE_SIZE = 8
# Sets a cutoff point X between black and white (0-255) where any value between X and 255 (white) is considered the 'background' and will not be 'covered' by a dot.
THRESHOLD = 255
# Forces recalculations. Currently untested, so best to laeve this on True.
FORCE = True
# Display a diagram that shows each iteration of the algorithm, showing the points being arranged into their positions.
INTERACTIVE = False
# Displays the plot of the final iteration. Usually disabled if INTERACTIVE = True, since the diagram will also show the final iteration.
DISPLAY_FINAL_ITERATION = False
# Save the image of the final iteration as a .png file.
SAVE_AS_PNG = True
# Saves the image of the final iteration as a .pdf file.
SAVE_AS_PDF = False
# Saves the position of all points as a numpy array.
SAVE_AS_NPY = False
full_command = " weighted-voronoi-stippler/stippler.py " + ORIGINAL_IMAGE
if(SAVE_IMAGE):
full_command += " --save"
full_command += " --n_point " + str(NUMBER_OF_POINTS)
full_command += " --n_iter " + str(NUMBER_OF_ITERATIONS)
full_command += " --pointsize " + POINT_SIZE
full_command += " --figsize " + str(FIGURE_SIZE)
full_command += " --threshold " + str(THRESHOLD)
if(FORCE):
full_command += " --force"
if(INTERACTIVE):
full_command += " --interactive"
if(DISPLAY_FINAL_ITERATION):
full_command += " --display"
if(SAVE_AS_PNG):
full_command += " --png"
if(SAVE_AS_PDF):
full_command += " --pdf"
if(SAVE_AS_NPY):
full_command += " --npy"
os.system(cmd + full_command)
| 33.147059
| 167
| 0.743567
|
085f10c863fe86b8623da37a0be22321fcdc0ec1
| 594
|
py
|
Python
|
lib/galaxy/tool_shed/galaxy_install/migrate/versions/0002_tools.py
|
lawrence14701/galaxy
|
7eb2fcb708e7b63e17800c87613ddfa5497c0654
|
[
"CC-BY-3.0"
] | 4
|
2018-10-29T18:34:38.000Z
|
2021-09-29T23:30:42.000Z
|
lib/galaxy/tool_shed/galaxy_install/migrate/versions/0002_tools.py
|
lawrence14701/galaxy
|
7eb2fcb708e7b63e17800c87613ddfa5497c0654
|
[
"CC-BY-3.0"
] | 30
|
2016-10-20T15:35:12.000Z
|
2018-10-02T15:59:54.000Z
|
lib/galaxy/tool_shed/galaxy_install/migrate/versions/0002_tools.py
|
lawrence14701/galaxy
|
7eb2fcb708e7b63e17800c87613ddfa5497c0654
|
[
"CC-BY-3.0"
] | 7
|
2016-11-03T19:11:01.000Z
|
2020-05-11T14:23:52.000Z
|
"""
The Emboss 5.0.0 tools have been eliminated from the distribution and the Emboss datatypes have been removed from
datatypes_conf.xml.sample. You should remove the Emboss datatypes from your version of datatypes_conf.xml. The
repositories named emboss_5 and emboss_datatypes from the main Galaxy tool shed at http://toolshed.g2.bx.psu.edu
will be installed into your local Galaxy instance at the location discussed above by running the following command.
"""
from __future__ import print_function
def upgrade(migrate_engine):
print(__doc__)
def downgrade(migrate_engine):
pass
| 37.125
| 115
| 0.80303
|
88fcc84b8533ec8bf24ee1eebf54e9095a1da389
| 937
|
py
|
Python
|
wavefront/test_binner.py
|
n1ywb/wavefront
|
f061f4254916703ae570b03e9669b25a6f8c1ce6
|
[
"MIT"
] | null | null | null |
wavefront/test_binner.py
|
n1ywb/wavefront
|
f061f4254916703ae570b03e9669b25a6f8c1ce6
|
[
"MIT"
] | null | null | null |
wavefront/test_binner.py
|
n1ywb/wavefront
|
f061f4254916703ae570b03e9669b25a6f8c1ce6
|
[
"MIT"
] | null | null | null |
from wavefront.binner import Binner, Bin
from wavefront.timebuf import TimeBuffer
from unittest import TestCase
from mock import Mock
element_time = 0.25
class Test_Bin(TestCase):
def test_bin(self):
bin = Bin(size=1, timestamp=0)
bin.add((0,1))
class Test_Binner(TestCase):
def setUp(self):
self.store = dict()
#self.tb = TimeBuffer(size=4, head_time=1, element_time=element_time)
self.binner = Binner(1, self.store)
def test_update_1(self):
self.assertEquals(self.binner.update(0, tuple(), 4), set())
updated = self.binner.update(0, [0], 4)
self.assertEquals(updated, set())
updated = self.binner.update(-1, [0], 4)
self.assertEquals(updated.pop().timestamp, 0)
updated = self.binner.update(1, [0], 4)
self.assertEquals(updated.pop().timestamp, -1)
def test_update_2(self):
self.binner.update(0, [0, 1, 0], 4)
| 31.233333
| 77
| 0.643543
|
07bfea4dd3c7da31dd35f48cfdf1b8ab0a9d1cb9
| 3,737
|
py
|
Python
|
geoNet/extract_geoNet_stations.py
|
ucgmsim/Pre-processing
|
c4b9ae20a9e5e4f96f930bde29aa15176d9c8b64
|
[
"MIT"
] | 1
|
2020-10-01T10:36:09.000Z
|
2020-10-01T10:36:09.000Z
|
geoNet/extract_geoNet_stations.py
|
ucgmsim/Pre-processing
|
c4b9ae20a9e5e4f96f930bde29aa15176d9c8b64
|
[
"MIT"
] | 38
|
2018-08-01T04:25:24.000Z
|
2022-03-08T23:37:53.000Z
|
geoNet/extract_geoNet_stations.py
|
ucgmsim/Pre-processing
|
c4b9ae20a9e5e4f96f930bde29aa15176d9c8b64
|
[
"MIT"
] | 1
|
2019-10-17T21:44:14.000Z
|
2019-10-17T21:44:14.000Z
|
"""
See networks.csv https://github.com/GeoNet/delta/blob/master/network/networks.csv
for what the codes mean. The relevant codes for QuakeCoRE are (I am guessing here)
NZ (included per Brendon's advice
SM National strong motion network
SC Canterbury regional strong motion network
SB is included in http://info.geonet.org.nz/display/equip/Network+Location+Queries
operational strong motions stations XML file.
SX private sites strong motion.
stations.csv can be found at
https://github.com/GeoNet/delta/tree/master/network
"""
import os
import csv
import numpy as np
def In_southIsland(lon,lat):
"""
define a rectangular box given a centre, coordinates of length and width of the box
(xp, yp) define the new coordinate system
"""
centre_lat = -43.894316
centre_lon = 170.650197
L_lat = -40.877260
L_lon = 174.528072
W_lat = -42.733521
W_lon = 169.254083
theta=-45.
x0 = centre_lon
y0 = centre_lat
x = lon
y = lat
xp = np.cos(theta*np.pi/180.)*(x-x0) -np.sin(theta*np.pi/180.)*(y-y0)
yp = np.sin(theta*np.pi/180.)*(x-x0) +np.cos(theta*np.pi/180.)*(y-y0)
xp_L = np.cos(theta*np.pi/180.)*(L_lon-x0) -np.sin(theta*np.pi/180.)*(L_lat-y0)
yp_L = np.sin(theta*np.pi/180.)*(L_lon-x0) +np.cos(theta*np.pi/180.)*(L_lat-y0)
L=np.sqrt(xp_L**2 + yp_L**2)
xp_W = np.cos(theta*np.pi/180.)*(W_lon-x0) -np.sin(theta*np.pi/180.)*(W_lat-y0)
yp_W = np.sin(theta*np.pi/180.)*(W_lon-x0) +np.cos(theta*np.pi/180.)*(W_lat-y0)
W=np.sqrt(xp_W**2 + yp_W**2)
if np.abs(xp) <= L and np.abs(yp) <= W:
within_southIsland = True
else:
within_southIsland = False
return within_southIsland
def In_northIsland(lon,lat):
"""
define a rectangular box given a centre, coordinates of length and width of the box
(xp, yp) define the new coordinate system
"""
centre_lat = -37.651034
centre_lon = 175.435162
L_lat = -34.104810
L_lon = 172.823488
W_lat = -38.888624
W_lon = 172.315141
theta=-329.
x0 = centre_lon
y0 = centre_lat
x = lon
y = lat
xp = np.cos(theta*np.pi/180.)*(x-x0) -np.sin(theta*np.pi/180.)*(y-y0)
yp = np.sin(theta*np.pi/180.)*(x-x0) +np.cos(theta*np.pi/180.)*(y-y0)
xp_L = np.cos(theta*np.pi/180.)*(L_lon-x0) -np.sin(theta*np.pi/180.)*(L_lat-y0)
yp_L = np.sin(theta*np.pi/180.)*(L_lon-x0) +np.cos(theta*np.pi/180.)*(L_lat-y0)
L=np.sqrt(xp_L**2 + yp_L**2)
xp_W = np.cos(theta*np.pi/180.)*(W_lon-x0) -np.sin(theta*np.pi/180.)*(W_lat-y0)
yp_W = np.sin(theta*np.pi/180.)*(W_lon-x0) +np.cos(theta*np.pi/180.)*(W_lat-y0)
W=np.sqrt(xp_W**2 + yp_W**2)
if np.abs(xp) <= L and np.abs(yp) <= W:
within_southIsland = True
else:
within_southIsland = False
return within_southIsland
fname="stations.csv"
f = open("/".join([os.getcwd(),fname]),'r')
fcsv = csv.DictReader(f,delimiter=",")
with open("all_geoNet_stats.ll",'w') as fstats:
for line in fcsv:
#Skip stations that have closed
if line['End Date'] != '9999-01-01T00:00:00Z':
continue
Network = line['Network']
if Network in ['NZ', 'SM', 'SC', 'SB', 'SX']:
pass #don't do nothing
else:
continue #skip loop
lon = float(line['Longitude'])
lat = float(line['Latitude'])
#only save if within Main Land NZ
if In_southIsland(lon, lat) or In_northIsland(lon, lat):
pass
else:
continue
fstats.write("%10.4f %10.4f %10s\n" %(
lon,
lat,
line['Station'])
)
f.close()
| 27.07971
| 87
| 0.596468
|
f6336ff4b355ae2ae80f9a1a340e896556d10abf
| 1,385
|
py
|
Python
|
pygame_gui/core/text/image_layout_rect.py
|
lionel42/pygame_gui
|
27b51f5b811b4569bc463566bc9f2d82ada119f6
|
[
"MIT"
] | null | null | null |
pygame_gui/core/text/image_layout_rect.py
|
lionel42/pygame_gui
|
27b51f5b811b4569bc463566bc9f2d82ada119f6
|
[
"MIT"
] | null | null | null |
pygame_gui/core/text/image_layout_rect.py
|
lionel42/pygame_gui
|
27b51f5b811b4569bc463566bc9f2d82ada119f6
|
[
"MIT"
] | null | null | null |
from typing import Optional
from pygame.surface import Surface
from pygame.rect import Rect
from pygame.image import load
from pygame_gui.core.text.text_layout_rect import TextLayoutRect, Padding
class ImageLayoutRect(TextLayoutRect):
"""
Represents an image that sits in the text.
"""
def __init__(self, image_path, float_position, padding: Padding):
self.image_path = image_path
self.image_surf = load(image_path)
self.padding = padding
self.size_with_padding = (self.image_surf.get_width() + padding.left + padding.right,
self.image_surf.get_height() + padding.top + padding.bottom)
super().__init__(self.size_with_padding, float_pos=float_position)
def finalise(self,
target_surface: Surface,
target_area: Rect,
row_chunk_origin: int,
row_chunk_height: int,
row_bg_height: int,
x_scroll_offset: int = 0,
letter_end: Optional[int] = None):
blit_rect = self.copy()
blit_rect.width -= (self.padding.left + self.padding.right)
blit_rect.height -= (self.padding.top + self.padding.bottom)
blit_rect.left += self.padding.left
blit_rect.top += self.padding.top
target_surface.blit(self.image_surf, blit_rect, target_area)
| 38.472222
| 94
| 0.644765
|
bfe81991faae2379f1745003a5b1f01b282a6d88
| 2,268
|
py
|
Python
|
cuml_bench/linear.py
|
owerbat/scikit-learn_bench
|
972efac3779578865424515db2897c4b8c71307a
|
[
"Apache-2.0"
] | null | null | null |
cuml_bench/linear.py
|
owerbat/scikit-learn_bench
|
972efac3779578865424515db2897c4b8c71307a
|
[
"Apache-2.0"
] | null | null | null |
cuml_bench/linear.py
|
owerbat/scikit-learn_bench
|
972efac3779578865424515db2897c4b8c71307a
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import argparse
import bench
from cuml import LinearRegression
parser = argparse.ArgumentParser(description='cuML linear regression '
'benchmark')
parser.add_argument('--no-fit-intercept', dest='fit_intercept', default=True,
action='store_false',
help="Don't fit intercept (assume data already centered)")
parser.add_argument('--solver', default='eig', choices=('eig', 'svd'),
help='Solver used for training')
params = bench.parse_args(parser, prefix='cuml')
# Load data
X_train, X_test, y_train, y_test = bench.load_data(
params, generated_data=['X_train', 'y_train'])
# Create our regression object
regr = LinearRegression(fit_intercept=params.fit_intercept,
algorithm=params.solver)
# Time fit
fit_time, _ = bench.measure_function_time(regr.fit, X_train, y_train, params=params)
# Time predict
predict_time, yp = bench.measure_function_time(regr.predict, X_test, params=params)
test_rmse = bench.rmse_score(yp, y_test)
yp = regr.predict(X_train)
train_rmse = bench.rmse_score(yp, y_train)
bench.print_output(library='cuml', algorithm='linear_regression',
stages=['training', 'prediction'], params=params,
functions=['Linear.fit', 'Linear.predict'],
times=[fit_time, predict_time], metric_type='rmse',
metrics=[train_rmse, test_rmse], data=[X_train, X_test],
alg_instance=regr)
| 40.5
| 84
| 0.636684
|
0742b8fcce563d1d23503cbfd5f49a3476d56a23
| 5,647
|
py
|
Python
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifySecurityGroupRuleRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifySecurityGroupRuleRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifySecurityGroupRuleRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class ModifySecurityGroupRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'ModifySecurityGroupRule','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NicType(self): # String
return self.get_query_params().get('NicType')
def set_NicType(self, NicType): # String
self.add_query_param('NicType', NicType)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_SourcePrefixListId(self): # String
return self.get_query_params().get('SourcePrefixListId')
def set_SourcePrefixListId(self, SourcePrefixListId): # String
self.add_query_param('SourcePrefixListId', SourcePrefixListId)
def get_SourcePortRange(self): # String
return self.get_query_params().get('SourcePortRange')
def set_SourcePortRange(self, SourcePortRange): # String
self.add_query_param('SourcePortRange', SourcePortRange)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_SecurityGroupId(self): # String
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self, SecurityGroupId): # String
self.add_query_param('SecurityGroupId', SecurityGroupId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_SourceGroupOwnerId(self): # Long
return self.get_query_params().get('SourceGroupOwnerId')
def set_SourceGroupOwnerId(self, SourceGroupOwnerId): # Long
self.add_query_param('SourceGroupOwnerId', SourceGroupOwnerId)
def get_SourceGroupOwnerAccount(self): # String
return self.get_query_params().get('SourceGroupOwnerAccount')
def set_SourceGroupOwnerAccount(self, SourceGroupOwnerAccount): # String
self.add_query_param('SourceGroupOwnerAccount', SourceGroupOwnerAccount)
def get_Ipv6SourceCidrIp(self): # String
return self.get_query_params().get('Ipv6SourceCidrIp')
def set_Ipv6SourceCidrIp(self, Ipv6SourceCidrIp): # String
self.add_query_param('Ipv6SourceCidrIp', Ipv6SourceCidrIp)
def get_Ipv6DestCidrIp(self): # String
return self.get_query_params().get('Ipv6DestCidrIp')
def set_Ipv6DestCidrIp(self, Ipv6DestCidrIp): # String
self.add_query_param('Ipv6DestCidrIp', Ipv6DestCidrIp)
def get_Policy(self): # String
return self.get_query_params().get('Policy')
def set_Policy(self, Policy): # String
self.add_query_param('Policy', Policy)
def get_PortRange(self): # String
return self.get_query_params().get('PortRange')
def set_PortRange(self, PortRange): # String
self.add_query_param('PortRange', PortRange)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_IpProtocol(self): # String
return self.get_query_params().get('IpProtocol')
def set_IpProtocol(self, IpProtocol): # String
self.add_query_param('IpProtocol', IpProtocol)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_SourceCidrIp(self): # String
return self.get_query_params().get('SourceCidrIp')
def set_SourceCidrIp(self, SourceCidrIp): # String
self.add_query_param('SourceCidrIp', SourceCidrIp)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Priority(self): # String
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # String
self.add_query_param('Priority', Priority)
def get_DestCidrIp(self): # String
return self.get_query_params().get('DestCidrIp')
def set_DestCidrIp(self, DestCidrIp): # String
self.add_query_param('DestCidrIp', DestCidrIp)
def get_SourceGroupId(self): # String
return self.get_query_params().get('SourceGroupId')
def set_SourceGroupId(self, SourceGroupId): # String
self.add_query_param('SourceGroupId', SourceGroupId)
| 40.625899
| 82
| 0.765362
|
cfe6fbcd360bcc3e35f11bacedeff71eddaaedc5
| 10,161
|
py
|
Python
|
tests/BlazingSQLTest/EndToEndTests/useLimitTest.py
|
shaunstoltz/blazingsql
|
e6459dfd9dff3a2e32ca1fbcd414b9cd193e8730
|
[
"Apache-2.0"
] | null | null | null |
tests/BlazingSQLTest/EndToEndTests/useLimitTest.py
|
shaunstoltz/blazingsql
|
e6459dfd9dff3a2e32ca1fbcd414b9cd193e8730
|
[
"Apache-2.0"
] | 1
|
2022-01-21T23:47:01.000Z
|
2022-01-21T23:47:01.000Z
|
tests/BlazingSQLTest/EndToEndTests/useLimitTest.py
|
shaunstoltz/blazingsql
|
e6459dfd9dff3a2e32ca1fbcd414b9cd193e8730
|
[
"Apache-2.0"
] | 1
|
2020-11-01T12:28:58.000Z
|
2020-11-01T12:28:58.000Z
|
from blazingsql import DataType
from Configuration import ExecutionMode
from Configuration import Settings as Settings
from DataBase import createSchema as cs
from pynvml import nvmlInit
from Runner import runTest
from Utils import Execution, gpuMemory, init_context, skip_test
queryType = "Limit"
def main(dask_client, drill, spark, dir_data_file, bc, nRals):
start_mem = gpuMemory.capture_gpu_memory_usage()
def executionTest():
tables = ["orders", "customer", "partsupp", "lineitem"]
data_types = [
DataType.DASK_CUDF,
DataType.CUDF,
DataType.CSV,
DataType.ORC,
DataType.PARQUET,
] # TODO json
# Create Tables -----------------------------------------------------
for fileSchemaType in data_types:
if skip_test(dask_client, nRals, fileSchemaType, queryType):
continue
cs.create_tables(bc, dir_data_file, fileSchemaType, tables=tables)
# Run Query ------------------------------------------------------
worder = 0
use_percentage = False
acceptable_difference = 0.01
print("==============================")
print(queryType)
print("==============================")
queryId = "TEST_01"
query = "select o_orderkey from orders order by 1 limit 10"
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_02"
query = """select o_orderdate, o_orderkey, o_clerk from orders
order by o_orderdate, o_orderkey, o_custkey,
o_orderstatus, o_clerk
limit 1000"""
query_spark = """select o_orderdate, o_orderkey, o_clerk from orders
order by o_orderdate nulls last, o_orderkey nulls last,
o_custkey nulls last, o_orderstatus nulls last,
o_clerk nulls last limit 1000"""
if fileSchemaType == DataType.ORC:
runTest.run_query(
bc,
spark,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
query_spark=query_spark,
)
else:
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_03"
query = """select o_orderkey from orders
where o_custkey < 300 and o_orderdate >= '1990-08-01'
order by o_orderkey limit 50"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_04"
query = """select ps_partkey, ps_availqty from partsupp
where ps_availqty < 3 and ps_availqty >= 1
order by ps_partkey, ps_availqty limit 50"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
# queryId = 'TEST_05'
# query = """select o_orderkey, o_orderstatus from orders
# where o_custkey < 10 and o_orderstatus = 'O'
# order by o_orderkey, o_orderstatus limit 50"""
# runTest.run_query(bc, drill, query, queryId, queryType, worder,
# '', acceptable_difference, use_percentage, fileSchemaType)
queryId = "TEST_06"
query = """select orders.o_totalprice, customer.c_name from orders
inner join customer
on orders.o_custkey = customer.c_custkey
order by customer.c_name, orders.o_orderkey limit 10"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_07"
query = """(select l_shipdate, l_orderkey, l_linestatus
from lineitem where l_linenumber = 1
order by 1, 2, 3, l_linenumber limit 10)
union all
(select l_shipdate, l_orderkey, l_linestatus
from lineitem where l_linenumber = 1
order by 1 desc, 2, 3, l_linenumber limit 10)"""
query_spark = """(select l_shipdate, l_orderkey, l_linestatus
from lineitem where l_linenumber = 1
order by 1 nulls last, 2 nulls last, 3 nulls last,
l_linenumber nulls last limit 10)
union all
(select l_shipdate, l_orderkey, l_linestatus
from lineitem where l_linenumber = 1
order by 1 desc nulls first, 2 nulls last, 3 nulls last,
l_linenumber nulls last limit 10)"""
if fileSchemaType == DataType.ORC:
runTest.run_query(
bc,
spark,
query,
queryId,
queryType,
1,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
query_spark=query_spark,
)
else:
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
1,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_08"
query = """select c_custkey from customer
where c_custkey < 0 order by c_custkey limit 40"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_09"
query = """select c_custkey, c_name from customer
where c_custkey < 10 order by 1 limit 30"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_10"
query = """select c_custkey, c_name from customer
where c_custkey < 10 limit 30"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
1,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_11"
query = """select avg(CAST(c_custkey AS DOUBLE)), min(c_custkey)
from customer limit 5"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
1,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
# if Settings.execution_mode == ExecutionMode.GENERATOR:
# print("==============================")
# break
executionTest()
end_mem = gpuMemory.capture_gpu_memory_usage()
gpuMemory.log_memory_usage(queryType, start_mem, end_mem)
if __name__ == "__main__":
Execution.getArgs()
nvmlInit()
drill = "drill" # None
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if ((Settings.execution_mode == ExecutionMode.FULL and
compareResults == "true") or
Settings.execution_mode == ExecutionMode.GENERATOR):
# Create Table Drill ------------------------------------------------
print("starting drill")
from pydrill.client import PyDrill
drill = PyDrill(host="localhost", port=8047)
cs.init_drill_schema(drill,
Settings.data["TestSettings"]["dataDirectory"])
# Create Context For BlazingSQL
bc, dask_client = init_context()
nRals = Settings.data["RunSettings"]["nRals"]
main(dask_client, drill, Settings.data["TestSettings"]["dataDirectory"],
bc, nRals)
if Settings.execution_mode != ExecutionMode.GENERATOR:
runTest.save_log()
gpuMemory.print_log_gpu_memory()
| 32.99026
| 80
| 0.456156
|
c71d2b9e872be82c2091906daa5c3ff66e6bf163
| 998
|
py
|
Python
|
duplyaml/tests/test_serialize.py
|
peterkmurphy/duplyaml
|
7ce59cacb4ea04c6b12c6abb27a3e1eec2cca0cf
|
[
"MIT"
] | null | null | null |
duplyaml/tests/test_serialize.py
|
peterkmurphy/duplyaml
|
7ce59cacb4ea04c6b12c6abb27a3e1eec2cca0cf
|
[
"MIT"
] | null | null | null |
duplyaml/tests/test_serialize.py
|
peterkmurphy/duplyaml
|
7ce59cacb4ea04c6b12c6abb27a3e1eec2cca0cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase
from duplyaml import *
sn0 = YAMLScalarNode("null", "!!null")
sn1 = YAMLScalarNode("null", "!!null")
sn2 = YAMLScalarNode("", "!!str")
sn3 = YAMLScalarNode("Test", "!!str")
sn4 = YAMLScalarNode("test", "!!str")
sncol = [sn0, sn1, sn2, sn3, sn4]
mn = YAMLMapNode(sncol, sncol, "!!map")
sncol.append(mn)
listicle = YAMLSeqNode(sncol, "!!seq")
ygapher = YAMLGraph("l")
ygapher.add_doc(listicle)
ygapher.add_doc(sn0)
ygapher.add_doc(sn1)
ygapher.add_doc(sn2)
ygapher.add_doc(sn3)
ygapher.add_doc(sn4)
ygapher.add_doc(mn)
Yase = YAMLSerializer(ygapher, YAMLComposer(None))
Yase.serializestream()
import StringIO
YAdumpit = YAMLSerializer(ygapher,YAMLDump(StringIO.StringIO()))
YAdumpit.serializestream()
class TestSerialise(TestCase):
def test_serialisation(self):
testlen = len(ygapher.children)
for i in range(testlen):
self.assertEqual(ygapher.children[i], Yase.yamlgraph.children[i])
| 25.589744
| 77
| 0.711423
|
221531c4557306b4959c19f9600dd613750ce1e0
| 632
|
py
|
Python
|
sdk/python/pulumi_aws/redshift/__init__.py
|
Otanikotani/pulumi-aws
|
00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/redshift/__init__.py
|
Otanikotani/pulumi-aws
|
00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/redshift/__init__.py
|
Otanikotani/pulumi-aws
|
00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .cluster import *
from .event_subscription import *
from .get_cluster import *
from .get_orderable_cluster import *
from .get_service_account import *
from .parameter_group import *
from .security_group import *
from .snapshot_copy_grant import *
from .snapshot_schedule import *
from .snapshot_schedule_association import *
from .subnet_group import *
from ._inputs import *
from . import outputs
| 33.263158
| 87
| 0.77057
|
513a5e49dcc68da44c90ee8bfe47ffe36f64903e
| 2,079
|
py
|
Python
|
setup.py
|
IceyMint/dumserver
|
c2f10eb602c9890080d72672872a9255aa508c8f
|
[
"MIT"
] | 66
|
2018-12-06T05:57:34.000Z
|
2022-03-02T15:45:22.000Z
|
setup.py
|
IceyMint/dumserver
|
c2f10eb602c9890080d72672872a9255aa508c8f
|
[
"MIT"
] | 41
|
2018-12-11T14:50:33.000Z
|
2021-11-26T11:18:36.000Z
|
setup.py
|
IceyMint/dumserver
|
c2f10eb602c9890080d72672872a9255aa508c8f
|
[
"MIT"
] | 16
|
2019-02-08T02:09:27.000Z
|
2021-01-26T19:10:21.000Z
|
__filename__ = "setup.py"
__author__ = "Bartek Radwanski"
__credits__ = "Bartek Radwanski"
__license__ = "MIT"
__version__ = "0.7.1"
__maintainer__ = "Bartek Radwanski"
__email__ = "bartek.radwanski@gmail.com"
__status__ = "Stable"
import os
from requests import get
def yes_or_no(question):
reply = " "
while reply[0] is not 'y' or reply[0] is not 'n':
reply = str(input(question+' (y/n): ')).lower().strip()
if reply[0] == 'y':
return True
if reply[0] == 'n':
return False
else:
return yes_or_no("Uhhhh... please enter ")
print('\n\n\n')
with open('/tmp/dum.home', 'r') as file:
dumhome = str(file.read().replace('\n', ''))
file.close()
print('Dum installed in: ' + dumhome)
print('\n')
ip = get('https://api.ipify.org').text
print('Following Public IP address has been detected:', ip)
if yes_or_no('Would you like to use it for DUM Webclient configuration?'):
pass
else:
ip = str(input("Please input Public IP:")).strip()
# Read in the webclient config file
with open(str(dumhome + '/webclient/config/default.js'), 'r') as file :
filedata = file.read()
# Update the config file
filedata = filedata.replace('PUBLIC_IP', str(ip))
# Write the file out again
with open(str(dumhome + '/webclient/config/default.js'), 'w') as file:
file.write(filedata)
if not os.path.exists(dumhome + '/setup.completed'):
open(str(dumhome + '/setup.completed'), 'a').close()
print('\ndumserver configuration has been completed. You can start using the sever with the help of following commands:')
print('./server-start.sh - Boot up an instance of dumserver. Once up and running, server can be accessed on http://<Your public IP>')
print('./server-status.sh - Check the status of dumserver components')
print('./server-stop.sh - Stop all dumserver components')
print('\n')
print('Note: if dumserver is listening for clients on port 80 (which it is configured to do by default!), it needs to run as root. As such, above scripts need to be invoked with a "sudo" prefix.')
| 34.65
| 196
| 0.669072
|
6051bc13dbdea8431d5d835e2394680c5cadebdb
| 683
|
py
|
Python
|
app/core/migrations/0002_tag.py
|
largodeivis/recipe-app-api
|
95c59cf707099cd82350bfac168a491ac14ecd15
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
largodeivis/recipe-app-api
|
95c59cf707099cd82350bfac168a491ac14ecd15
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
largodeivis/recipe-app-api
|
95c59cf707099cd82350bfac168a491ac14ecd15
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2021-09-25 01:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.458333
| 118
| 0.616398
|
c1aa0630a488e9a59fe3c1c27f444dce8e47282c
| 3,312
|
py
|
Python
|
aliyun-python-sdk-iotcc/aliyunsdkiotcc/request/v20210513/ListServiceEntriesRequest.py
|
leafcoder/aliyun-openapi-python-sdk
|
26b441ab37a5cda804de475fd5284bab699443f1
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-iotcc/aliyunsdkiotcc/request/v20210513/ListServiceEntriesRequest.py
|
leafcoder/aliyun-openapi-python-sdk
|
26b441ab37a5cda804de475fd5284bab699443f1
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-iotcc/aliyunsdkiotcc/request/v20210513/ListServiceEntriesRequest.py
|
leafcoder/aliyun-openapi-python-sdk
|
26b441ab37a5cda804de475fd5284bab699443f1
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ListServiceEntriesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'IoTCC', '2021-05-13', 'ListServiceEntries','cciot')
self.set_method('POST')
def get_ServiceEntryStatuss(self):
return self.get_query_params().get('ServiceEntryStatus')
def set_ServiceEntryStatuss(self, ServiceEntryStatuss):
for depth1 in range(len(ServiceEntryStatuss)):
if ServiceEntryStatuss[depth1] is not None:
self.add_query_param('ServiceEntryStatus.' + str(depth1 + 1) , ServiceEntryStatuss[depth1])
def get_TargetTypes(self):
return self.get_query_params().get('TargetType')
def set_TargetTypes(self, TargetTypes):
for depth1 in range(len(TargetTypes)):
if TargetTypes[depth1] is not None:
self.add_query_param('TargetType.' + str(depth1 + 1) , TargetTypes[depth1])
def get_ServiceEntryIdss(self):
return self.get_query_params().get('ServiceEntryIds')
def set_ServiceEntryIdss(self, ServiceEntryIdss):
for depth1 in range(len(ServiceEntryIdss)):
if ServiceEntryIdss[depth1] is not None:
self.add_query_param('ServiceEntryIds.' + str(depth1 + 1) , ServiceEntryIdss[depth1])
def get_NextToken(self):
return self.get_query_params().get('NextToken')
def set_NextToken(self,NextToken):
self.add_query_param('NextToken',NextToken)
def get_ServiceEntryNames(self):
return self.get_query_params().get('ServiceEntryName')
def set_ServiceEntryNames(self, ServiceEntryNames):
for depth1 in range(len(ServiceEntryNames)):
if ServiceEntryNames[depth1] is not None:
self.add_query_param('ServiceEntryName.' + str(depth1 + 1) , ServiceEntryNames[depth1])
def get_Targets(self):
return self.get_query_params().get('Target')
def set_Targets(self, Targets):
for depth1 in range(len(Targets)):
if Targets[depth1] is not None:
self.add_query_param('Target.' + str(depth1 + 1) , Targets[depth1])
def get_IoTCloudConnectorId(self):
return self.get_query_params().get('IoTCloudConnectorId')
def set_IoTCloudConnectorId(self,IoTCloudConnectorId):
self.add_query_param('IoTCloudConnectorId',IoTCloudConnectorId)
def get_MaxResults(self):
return self.get_query_params().get('MaxResults')
def set_MaxResults(self,MaxResults):
self.add_query_param('MaxResults',MaxResults)
def get_ServiceId(self):
return self.get_query_params().get('ServiceId')
def set_ServiceId(self,ServiceId):
self.add_query_param('ServiceId',ServiceId)
| 36.8
| 96
| 0.754831
|
3c04b4331ccf489aa4c96d75ed8a6536a18e4bbb
| 418
|
py
|
Python
|
exercise/migrations/0005_auto_20180523_1338.py
|
jkimbo/phishtray
|
089c778f38f96b611d69b7679cb7d4a8f6effb0c
|
[
"MIT"
] | null | null | null |
exercise/migrations/0005_auto_20180523_1338.py
|
jkimbo/phishtray
|
089c778f38f96b611d69b7679cb7d4a8f6effb0c
|
[
"MIT"
] | null | null | null |
exercise/migrations/0005_auto_20180523_1338.py
|
jkimbo/phishtray
|
089c778f38f96b611d69b7679cb7d4a8f6effb0c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.5 on 2018-05-23 13:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exercise', '0004_exercisekey_exercise'),
]
operations = [
migrations.AlterField(
model_name='exercisekey',
name='type',
field=models.IntegerField(choices=[(0, 'number'), (1, 'text')]),
),
]
| 22
| 76
| 0.590909
|
cd106033c9f593e3b3e6606bdac9795dfb4d7587
| 1,272
|
py
|
Python
|
blog/migrations/0001_initial.py
|
hbvj99/blog
|
e718818b03de5372eebfeb9bff887e3c59e822d7
|
[
"Unlicense"
] | null | null | null |
blog/migrations/0001_initial.py
|
hbvj99/blog
|
e718818b03de5372eebfeb9bff887e3c59e822d7
|
[
"Unlicense"
] | 4
|
2021-03-30T14:01:02.000Z
|
2022-03-12T00:42:06.000Z
|
blog/migrations/0001_initial.py
|
hbvj99/blog
|
e718818b03de5372eebfeb9bff887e3c59e822d7
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-07-24 15:20
import ckeditor.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=80)),
('slug', models.SlugField(blank=True, editable=False, max_length=90, null=True, unique=True)),
('description', ckeditor.fields.RichTextField(max_length=1500)),
('image', models.ImageField(blank=True, null=True, upload_to='articles/%Y/%m/%d/')),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-updated_at'],
},
),
]
| 36.342857
| 118
| 0.610849
|
1ad9439a9167ab87c1ed950309e0c760d6025538
| 459
|
py
|
Python
|
python/sourcing/rss_util.py
|
supratikchatterjee16/nasty
|
3d47b073c80b752d78556968096069d379319f84
|
[
"Apache-2.0"
] | 1
|
2019-04-12T04:05:53.000Z
|
2019-04-12T04:05:53.000Z
|
python/sourcing/rss_util.py
|
supratikchatterjee16/nasty
|
3d47b073c80b752d78556968096069d379319f84
|
[
"Apache-2.0"
] | null | null | null |
python/sourcing/rss_util.py
|
supratikchatterjee16/nasty
|
3d47b073c80b752d78556968096069d379319f84
|
[
"Apache-2.0"
] | 1
|
2019-05-06T07:27:53.000Z
|
2019-05-06T07:27:53.000Z
|
import requests
import feedparser
import re
class RSS():
def __init__(self, url):
try:
page = requests.get(url)
content = page.content
rss = feedparser.parse(content)
self.url = url
self.feed = rss["feed"]
self.items = rss["entries"]
self.valid = False
if len(rss["items"]) != 0:
self.valid = True
except Exception as e:
print(url+" "+e)
rss ="http://feeds.bbci.co.uk/news/world/europe/rss.xml"
print("rss_util imported")
| 21.857143
| 56
| 0.657952
|
1f805fe41d04c00b283b653907460a9da458b7a5
| 1,372
|
py
|
Python
|
django_server/olmap/models/base.py
|
ForumViriumHelsinki/OLMap
|
f700a8c84b87714af68840a37cfd8667dde6758c
|
[
"MIT"
] | null | null | null |
django_server/olmap/models/base.py
|
ForumViriumHelsinki/OLMap
|
f700a8c84b87714af68840a37cfd8667dde6758c
|
[
"MIT"
] | 39
|
2020-12-30T11:03:49.000Z
|
2021-11-10T12:16:29.000Z
|
django_server/olmap/models/base.py
|
ForumViriumHelsinki/OLMap
|
f700a8c84b87714af68840a37cfd8667dde6758c
|
[
"MIT"
] | 1
|
2020-12-08T13:19:33.000Z
|
2020-12-08T13:19:33.000Z
|
import re
import geocoder
from django.db import models
from django.utils.translation import gettext_lazy as _
class Model(models.Model):
class Meta:
abstract = True
def __str__(self):
return (self.id and f'{self.__class__.__name__}({self.id})') or f'New {self.__class__.__name__}'
class TimestampedModel(Model):
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Address(TimestampedModel):
street = models.CharField(max_length=64, blank=True)
housenumber = models.CharField(max_length=8, blank=True, null=True, help_text='E.g. 3-5')
postal_code = models.CharField(verbose_name=_('postal code'), max_length=16)
city = models.CharField(verbose_name=_('city'), max_length=64)
country = models.CharField(verbose_name=_('country'), max_length=64)
lat = models.DecimalField(max_digits=11, decimal_places=8, null=True, blank=True)
lon = models.DecimalField(max_digits=11, decimal_places=8, null=True, blank=True)
street_address_regex = re.compile(r'^(?P<street>.+?) +(?P<housenumber>[\d\-]+[a-z]?) *((?P<unit>[A-Z]{1,2})[ ,])?.*$')
class Meta:
verbose_name = _('address')
verbose_name_plural = _('addresses')
def __str__(self):
return f'{self.street} {self.housenumber}'
| 31.906977
| 122
| 0.690962
|
832ebd30fca2fdf58bef4c96dd839e671beab46c
| 3,187
|
py
|
Python
|
bucky/npi.py
|
marounbs/pa-ocha-bucky
|
b76bc221bff0cc982bcd23ad6f89789c3204b890
|
[
"MIT"
] | 19
|
2020-10-28T14:35:01.000Z
|
2022-02-22T14:38:52.000Z
|
bucky/npi.py
|
marounbs/pa-ocha-bucky
|
b76bc221bff0cc982bcd23ad6f89789c3204b890
|
[
"MIT"
] | null | null | null |
bucky/npi.py
|
marounbs/pa-ocha-bucky
|
b76bc221bff0cc982bcd23ad6f89789c3204b890
|
[
"MIT"
] | 8
|
2020-10-20T12:34:01.000Z
|
2021-05-05T10:54:36.000Z
|
"""Module to parse npi csv files"""
import datetime
import logging
import numpy as np
import pandas as pd
from .util import remove_chars
def read_npi_file(fname, start_date, end_t, adm2_map, disable_npi=False):
"""TODO Description.
Parameters
----------
fname : string
Filename of NPI file
start_date : string
Start date to use
end_t : int
Number of days after start date
adm2_map : NumPy array
Array of adm2 IDs
disable_npi : bool (default: False)
Bool indicating whether NPIs should be disabled
Returns
-------
npi_params : dict
TODO
"""
# filter by overlap with simulation date range
df = pd.read_csv(fname)
df["date"] = pd.to_datetime(df.date) # force a parse in case it's an odd format
# rename adm2 column b/c people keep using different names
df = df.rename(columns={"admin2": "adm2", "FIPS": "adm2"})
end_date = start_date + datetime.timedelta(days=end_t)
mask = (df["date"] >= str(start_date)) & (df["date"] <= str(end_date))
# If npi file isn't up to date just use last known value
if np.all(~mask):
max_npi_date = df["date"].max()
mask = df["date"] == max_npi_date
df = df.loc[mask]
npi_params = {}
r0_reductions = []
mobility_reductions = []
contact_weights = []
# 1st dimension is date, 2nd is admin2 code
for _, group in df.sort_values(by=["date"]).groupby("date"):
# convert adm2 id to int
group["adm2"] = group.adm2.apply(remove_chars).astype(int)
date_group = group.set_index("adm2").reindex(adm2_map)
r0_reduction = np.array(date_group[["r0_reduction"]])
mobility_reduction = np.array(date_group[["mobility_reduction"]])
contact_weight = np.array(date_group[["home", "other_locations", "school", "work"]])
r0_reductions.append(r0_reduction)
mobility_reductions.append(mobility_reduction)
contact_weights.append(contact_weight)
npi_params["r0_reduct"] = np.array(r0_reductions)
npi_params["mobility_reduct"] = np.array(mobility_reductions)
npi_params["contact_weights"] = np.array(contact_weights)
for key, value in npi_params.items():
logging.debug(str(key) + str(value.shape))
# forward fill with last defined date
tmp = np.repeat(value[-1][None, ...], end_t + 1 - value.shape[0], axis=0)
npi_params[key] = np.squeeze(np.concatenate((value, tmp), axis=0))
if disable_npi:
npi_params["mobility_reduct"].fill(1.0)
npi_params["contact_weights"].fill(1.0)
npi_params["r0_reduct"] = 1.0 / npi_params["r0_reduct"]
else:
# rescale the r0 scaling such that it's 1 on the first day because the doubling time is set
# to match case history @ that date (i.e, it's not unmitigated, it's 'currently mitigated')
# This doesn't need to happen for Cij or Aij
npi_params["r0_reduct"] /= npi_params["r0_reduct"][0]
# Fill any missing values with 1. (in case we don't have all the adm2 in the file)
for k in npi_params:
npi_params[k] = np.nan_to_num(npi_params[k], nan=1.0)
return npi_params
| 36.215909
| 99
| 0.6492
|
98a6c8ebfdbedc1c3d8138e4b7967634655f19ff
| 445
|
py
|
Python
|
apps/alert/migrations/0002_ding_alias.py
|
Balro/lark
|
35581c766dfa2c959c020bcc9a3d5df9a8d8d58d
|
[
"Apache-2.0"
] | 1
|
2020-06-18T10:13:29.000Z
|
2020-06-18T10:13:29.000Z
|
apps/alert/migrations/0002_ding_alias.py
|
Balro/lark
|
35581c766dfa2c959c020bcc9a3d5df9a8d8d58d
|
[
"Apache-2.0"
] | 2
|
2020-06-05T22:08:25.000Z
|
2021-06-10T21:43:31.000Z
|
apps/alert/migrations/0002_ding_alias.py
|
Balro/lark
|
35581c766dfa2c959c020bcc9a3d5df9a8d8d58d
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.8 on 2019-07-15 12:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alert', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='ding',
name='alias',
field=models.CharField(db_index=True, default='a', max_length=50, unique=True),
preserve_default=False,
),
]
| 22.25
| 91
| 0.593258
|
fefe82951548b4f085a3e7ac500030865638673b
| 10,020
|
py
|
Python
|
apps/markets3/models.py
|
uktrade/enav-alpha
|
8d38f05763367ca6b6747203241f267612fd6e44
|
[
"MIT"
] | null | null | null |
apps/markets3/models.py
|
uktrade/enav-alpha
|
8d38f05763367ca6b6747203241f267612fd6e44
|
[
"MIT"
] | 67
|
2016-07-11T12:57:58.000Z
|
2016-08-08T12:59:19.000Z
|
apps/markets3/models.py
|
UKTradeInvestment/enav-alpha
|
8d38f05763367ca6b6747203241f267612fd6e44
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import base64
import datetime
from django.db import models
from django.utils import timezone
from ckeditor.fields import RichTextField
# SAMPLE DATA
PLATFORM_BRAND_POSITION = (
('0', 'Luxury'),
('1', 'Mid rage'),
('2', 'Discount')
)
LOGISTICS_MODELS = (
('0', 'Dropshipping'),
('1', 'Warehousing'),
('2', 'Other')
)
# SAMPLE DATA
# Pulled from https://en.wikipedia.org/wiki/ISO_639
LISTING_LANGUAGES = (
('0', 'English (eng)'),
('1', 'Spanish (spa)'),
('2', 'Chinese', ('cdo'))
)
BOOLEAN = (
('0', 'No'),
('1', 'Yes')
)
class ProductCategory(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return "{0}".format(self.name)
class Meta:
ordering = ('name',)
class Logo(models.Model):
name = models.CharField(max_length=200)
_encoded_data = models.TextField()
def base64_logo(self):
return self._encoded_data
def __str__(self):
return "{0}".format(self.name)
class Region(models.Model):
name = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return "{0}".format(self.name)
class Meta:
ordering = ('-name',)
class Country(models.Model):
name = models.CharField(max_length=200, blank=True, null=True)
region = models.ForeignKey(Region)
def __str__(self):
return "{0}".format(self.name)
class Meta:
ordering = ('-name',)
class Market(models.Model):
last_modified = models.DateTimeField(auto_now=True)
# Trading name of the marketplace
name = models.CharField(max_length=200, null=True, blank=True)
# Description of the marketplace suitable for a seller.
description = models.CharField(max_length=200, null=True, blank=True)
# URL of the market
web_address = models.URLField(max_length=200, blank=True, null=True)
# Image of the marketplace logo
logo = models.ForeignKey('Logo', null=True, blank=True)
# Country where the marketplace is based
country = models.CharField(max_length=200, blank=True, null=True)
# That countries that have buyers for the marketplace
countries_served = models.ManyToManyField(Country)
# Industry standard for product categories.
product_categories = models.ManyToManyField(ProductCategory)
# Do they provide local customer services
local_customer_service = models.CharField(choices=BOOLEAN, max_length=1, blank=0, default=False)
local_customer_service_notes = models.CharField(max_length=200, blank=True, null=True, verbose_name='notes')
# Structure of the logistics and fulfillment for the e-marketplace.
logistics_structure = models.CharField(choices=LOGISTICS_MODELS, max_length=1, null=True, blank=True)
# Product type
product_type = models.CharField(choices=PLATFORM_BRAND_POSITION, max_length=1, null=True, blank=True)
# Uses the field product_categories, for each category provides a demand value
product_category_demand = models.CommaSeparatedIntegerField(max_length=500, blank=True, null=True)
# The number of buyers, sellers on a marketplace.
size = RichTextField(null=True, blank=True)
# The number of buyers, sellers for a particular product/product category on a marketplace.
product_category_size = models.CommaSeparatedIntegerField(max_length=10000, blank=True, null=True)
# Number of users going to the website per day on average.
web_traffic_to_site = RichTextField(null=True, blank=True)
# Number of users bouncing from the website per day on average.
web_traffic_to_bounce = RichTextField(null=True, blank=True)
# Structure of the fees and costs for sellers on the marketplace.
fee_pricing_structure = RichTextField(null=True, blank=True)
# Terms in place for sellers to receive payment from e-marketplace
payment_terms = RichTextField(null=True, blank=True)
# Type of support offered to sellers on the e-marketplace.
seller_support_structure = RichTextField(null=True, blank=True)
# Translation services offered for communication between buyers and sellers
# and/or translation of product/marketing material for a site.
translation_services = RichTextField(null=True, blank=True)
# Customer service offered to buyers on the e-marketplace
buyers_customer_service = RichTextField(null=True, blank=True)
# Details of the merchandising offer and associated costs involved
# (fe. marketing, feature to bump your product up on listings)
merchandising_offer_cost = RichTextField(null=True, blank=True)
# The payment methods for buyers on the e-marketplace. (fe. Card, PayPal)
payment_methods = RichTextField(null=True, blank=True)
# Languages offered for listing products on the e-marketplace
listing_languages = RichTextField(max_length=500, blank=True, null=True)
# The number of other sellers for a product/product category on the e-marketplace.
product_visibility = RichTextField(null=True, blank=True)
# The types of sellers for product/product category on the e-marketplace.
competitor_comparison = RichTextField(null=True, blank=True)
# What terms has been negotiated on behalf of UK Businesses by UKTI
ukti_terms = RichTextField(null=True, blank=True)
# Marketplace contacts which are supplied from UKTI for sellers.
contact_details = RichTextField(null=True, blank=True)
# List of steps a seller needs to go through to sell on the platform.
shop_analytics = RichTextField(null=True, blank=True)
# Tailoring options, themes, etc.
customization = RichTextField(null=True, blank=True)
# Details of social media integrations
social_media_integration = RichTextField(null=True, blank=True)
# Details of product promotion options
product_promotion_options = RichTextField(null=True, blank=True)
# Reviews, ratings, etc.
feedback_system = RichTextField(null=True, blank=True)
# Revenue of the business
revenue = RichTextField(null=True, blank=True)
# Parent company name
parent_company_name = RichTextField(null=True, blank=True)
# Platform target market
platform_target_market = RichTextField(null=True, blank=True)
# Product feedback system
product_feedback_system = RichTextField(null=True, blank=True)
# The application process for signing up
seller_application_process = RichTextField(null=True, blank=True)
# The subscription fee of the platform
subscription_fees = RichTextField(null=True, blank=True)
# The registration fee of the platform
registration_fees = RichTextField(null=True, blank=True)
# Additional operating fees of the platform
additional_fees = RichTextField(null=True, blank=True)
# Referral fee of the platform
referral_fees = RichTextField(null=True, blank=True)
# Prohibited items of the platform
prohibited_items = RichTextField(null=True, blank=True)
# Logistics options
logistics_options = RichTextField(null=True, blank=True)
# Local laws related to the countries in which you want to ship to
local_laws = RichTextField(null=True, blank=True)
# Platform signup
platform_signup = RichTextField(null=True, blank=True)
# General things to consider
things_to_consider = RichTextField(null=True, blank=True)
# Platform type eg shopfront or catalogue
platform_type = models.CharField(max_length=255, null=True, blank=True)
web_traffic = models.CharField(max_length=30, null=True, blank=True)
# Misc fields
misc1 = RichTextField(null=True, blank=True, help_text='')
misc2 = RichTextField(null=True, blank=True, help_text='')
misc3 = RichTextField(null=True, blank=True, help_text='')
misc4 = RichTextField(null=True, blank=True, help_text='')
misc5 = RichTextField(null=True, blank=True, help_text='')
misc6 = RichTextField(null=True, blank=True, help_text='')
misc7 = RichTextField(null=True, blank=True, help_text='')
misc8 = RichTextField(null=True, blank=True, help_text='')
misc9 = RichTextField(null=True, blank=True, help_text='')
misc10 = RichTextField(null=True, blank=True, help_text='Website traffic - grey box1')
misc11 = RichTextField(null=True, blank=True, help_text='Website traffic - grey box2')
misc12 = RichTextField(null=True, blank=True, help_text='Website traffic - grey box3')
misc13 = RichTextField(null=True, blank=True, help_text='Website traffic - grey box4')
misc14 = RichTextField(null=True, blank=True, help_text='Demographic profile')
misc15 = RichTextField(null=True, blank=True, help_text='Product upload process')
misc16 = RichTextField(null=True, blank=True, help_text='Customer support')
misc17 = RichTextField(null=True, blank=True, help_text='Local return address (Yes/No)')
misc18 = RichTextField(null=True, blank=True, help_text='Return rates')
misc19 = RichTextField(null=True, blank=True, help_text='Marketing and merchandising')
misc20 = RichTextField(null=True, blank=True, help_text='Local incorporation')
misc21 = RichTextField(null=True, blank=True, help_text='Local bank account')
misc22 = RichTextField(null=True, blank=True, help_text='Exclusivity')
misc23 = RichTextField(null=True, blank=True, help_text='Translation')
misc24 = RichTextField(null=True, blank=True, help_text='Payment time')
misc25 = RichTextField(null=True, blank=True, help_text='Exchange rate')
misc26 = RichTextField(null=True, blank=True, help_text='Bond required')
misc27 = RichTextField(null=True, blank=True, help_text='')
misc28 = RichTextField(null=True, blank=True, help_text='')
misc29 = RichTextField(null=True, blank=True, help_text='')
def __str__(self):
return "{0} {1}".format(self.country, self.name)
class Meta:
ordering = ('country',)
class OldMarket(Market):
class Meta:
proxy = True
verbose_name = "Market - deprecated"
verbose_name_plural = "Markets - deprecated"
| 44.140969
| 112
| 0.725749
|
24fa6daee42e83a11bfb3a089f57778b3b32c239
| 815
|
py
|
Python
|
expression_data/data/__init__.py
|
davebridges/expression-data-server
|
7f70fd5d5a9569a315716c389f828b17a487fdbc
|
[
"BSD-2-Clause"
] | 1
|
2015-08-25T10:16:31.000Z
|
2015-08-25T10:16:31.000Z
|
expression_data/data/__init__.py
|
davebridges/expression-data-server
|
7f70fd5d5a9569a315716c389f828b17a487fdbc
|
[
"BSD-2-Clause"
] | null | null | null |
expression_data/data/__init__.py
|
davebridges/expression-data-server
|
7f70fd5d5a9569a315716c389f828b17a487fdbc
|
[
"BSD-2-Clause"
] | null | null | null |
"""This app contains the views and models for expression data
Expression Objects
------------------
For microarray results (metadata in :class:`~experiments.models.MicroArrayExperiment`) there is only *gene* level data, as specified by the specific probe used.
For RNAseq results (metadata in :class:`~experiments.models.mRNASeqExperiment`), there is aggregated data at the level of the *gene*, *transcript*, *exon*, *promoter* and *splice site*.
Currently we are only able to work with gene level data.
Types of Data
-------------
The database can contain two types of data:
* SampleData level data, such as how many (hopefully normalized) counts are in each sample for each gene.
* ExperimentData, which includes the average counts for each group as well as statistical tests for differential expression.
"""
| 47.941176
| 185
| 0.752147
|
67bf19b76f0ef0fd295ec0734118edb7b1cea705
| 20,188
|
py
|
Python
|
moldyn/ui/qt/create_model.py
|
open-molecular-dynamics/moldyn
|
95a8be53981a2159a75e82c7321136bebc674bc8
|
[
"MIT"
] | 2
|
2019-07-15T08:36:50.000Z
|
2019-08-11T11:47:30.000Z
|
moldyn/ui/qt/create_model.py
|
open-molecular-dynamics/moldyn
|
95a8be53981a2159a75e82c7321136bebc674bc8
|
[
"MIT"
] | 1
|
2020-01-13T15:35:35.000Z
|
2020-01-13T15:35:35.000Z
|
moldyn/ui/qt/create_model.py
|
open-molecular-dynamics/moldyn
|
95a8be53981a2159a75e82c7321136bebc674bc8
|
[
"MIT"
] | 1
|
2019-07-15T08:34:29.000Z
|
2019-07-15T08:34:29.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'create_model.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_CreateModel(object):
def setupUi(self, CreateModel):
CreateModel.setObjectName("CreateModel")
CreateModel.resize(508, 537)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(CreateModel.sizePolicy().hasHeightForWidth())
CreateModel.setSizePolicy(sizePolicy)
CreateModel.setWizardStyle(QtWidgets.QWizard.ModernStyle)
self.speciesPage = QtWidgets.QWizardPage()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.speciesPage.sizePolicy().hasHeightForWidth())
self.speciesPage.setSizePolicy(sizePolicy)
self.speciesPage.setObjectName("speciesPage")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.speciesPage)
self.horizontalLayout.setObjectName("horizontalLayout")
self.groupBox_2 = QtWidgets.QGroupBox(self.speciesPage)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.layout_a = QtWidgets.QVBoxLayout()
self.layout_a.setObjectName("layout_a")
self.verticalLayout_2.addLayout(self.layout_a)
self.horizontalLayout.addWidget(self.groupBox_2)
self.groupBox_1 = QtWidgets.QGroupBox(self.speciesPage)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_1.sizePolicy().hasHeightForWidth())
self.groupBox_1.setSizePolicy(sizePolicy)
self.groupBox_1.setObjectName("groupBox_1")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox_1)
self.verticalLayout.setObjectName("verticalLayout")
self.layout_b = QtWidgets.QVBoxLayout()
self.layout_b.setObjectName("layout_b")
self.verticalLayout.addLayout(self.layout_b)
self.horizontalLayout.addWidget(self.groupBox_1)
CreateModel.addPage(self.speciesPage)
self.spatialPage = QtWidgets.QWizardPage()
self.spatialPage.setObjectName("spatialPage")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.spatialPage)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.groupBox = QtWidgets.QGroupBox(self.spatialPage)
self.groupBox.setObjectName("groupBox")
self.formLayout_2 = QtWidgets.QFormLayout(self.groupBox)
self.formLayout_2.setObjectName("formLayout_2")
self.gridWidthNumberOfAtomsLabel = QtWidgets.QLabel(self.groupBox)
self.gridWidthNumberOfAtomsLabel.setObjectName("gridWidthNumberOfAtomsLabel")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.gridWidthNumberOfAtomsLabel)
self.gridWidth = QtWidgets.QSpinBox(self.groupBox)
self.gridWidth.setMaximum(100000000)
self.gridWidth.setSingleStep(20)
self.gridWidth.setProperty("value", 100)
self.gridWidth.setObjectName("gridWidth")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.gridWidth)
self.gridHeightNumberOfAtomsLabel = QtWidgets.QLabel(self.groupBox)
self.gridHeightNumberOfAtomsLabel.setObjectName("gridHeightNumberOfAtomsLabel")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.gridHeightNumberOfAtomsLabel)
self.gridHeight = QtWidgets.QSpinBox(self.groupBox)
self.gridHeight.setReadOnly(True)
self.gridHeight.setMaximum(100000000)
self.gridHeight.setSingleStep(20)
self.gridHeight.setProperty("value", 100)
self.gridHeight.setObjectName("gridHeight")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.gridHeight)
self.keepRatioLabel = QtWidgets.QLabel(self.groupBox)
self.keepRatioLabel.setObjectName("keepRatioLabel")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.keepRatioLabel)
self.keepRatioCheckBox = QtWidgets.QCheckBox(self.groupBox)
self.keepRatioCheckBox.setChecked(True)
self.keepRatioCheckBox.setObjectName("keepRatioCheckBox")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.keepRatioCheckBox)
self.distanceBetweenAtomsLabel = QtWidgets.QLabel(self.groupBox)
self.distanceBetweenAtomsLabel.setObjectName("distanceBetweenAtomsLabel")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.distanceBetweenAtomsLabel)
self.distanceBetweenAtoms = QtWidgets.QLineEdit(self.groupBox)
self.distanceBetweenAtoms.setText("")
self.distanceBetweenAtoms.setClearButtonEnabled(False)
self.distanceBetweenAtoms.setObjectName("distanceBetweenAtoms")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.distanceBetweenAtoms)
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setObjectName("label")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label)
self.label_atom_number = QtWidgets.QLabel(self.groupBox)
self.label_atom_number.setObjectName("label_atom_number")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.label_atom_number)
self.boxWidthLabel = QtWidgets.QLabel(self.groupBox)
self.boxWidthLabel.setObjectName("boxWidthLabel")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.boxWidthLabel)
self.boxWidthLineEdit = QtWidgets.QLineEdit(self.groupBox)
self.boxWidthLineEdit.setObjectName("boxWidthLineEdit")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.boxWidthLineEdit)
self.verticalLayout_3.addWidget(self.groupBox)
self.groupBox_4 = QtWidgets.QGroupBox(self.spatialPage)
self.groupBox_4.setObjectName("groupBox_4")
self.formLayout_3 = QtWidgets.QFormLayout(self.groupBox_4)
self.formLayout_3.setObjectName("formLayout_3")
self.xPeriodicBoundariesLabel = QtWidgets.QLabel(self.groupBox_4)
self.xPeriodicBoundariesLabel.setObjectName("xPeriodicBoundariesLabel")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.xPeriodicBoundariesLabel)
self.xPeriodicBoundariesCheckBox = QtWidgets.QCheckBox(self.groupBox_4)
self.xPeriodicBoundariesCheckBox.setChecked(True)
self.xPeriodicBoundariesCheckBox.setObjectName("xPeriodicBoundariesCheckBox")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.xPeriodicBoundariesCheckBox)
self.yPeriodicBoudariesLabel = QtWidgets.QLabel(self.groupBox_4)
self.yPeriodicBoudariesLabel.setObjectName("yPeriodicBoudariesLabel")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.yPeriodicBoudariesLabel)
self.yPeriodicBoudariesCheckBox = QtWidgets.QCheckBox(self.groupBox_4)
self.yPeriodicBoudariesCheckBox.setChecked(True)
self.yPeriodicBoudariesCheckBox.setObjectName("yPeriodicBoudariesCheckBox")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.yPeriodicBoudariesCheckBox)
self.verticalLayout_3.addWidget(self.groupBox_4)
self.groupBox_3 = QtWidgets.QGroupBox(self.spatialPage)
self.groupBox_3.setObjectName("groupBox_3")
self.formLayout = QtWidgets.QFormLayout(self.groupBox_3)
self.formLayout.setObjectName("formLayout")
self.firstSpeciesMoleFractionLabel = QtWidgets.QLabel(self.groupBox_3)
self.firstSpeciesMoleFractionLabel.setObjectName("firstSpeciesMoleFractionLabel")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.firstSpeciesMoleFractionLabel)
self.firstSpeciesMoleFraction = QtWidgets.QDoubleSpinBox(self.groupBox_3)
self.firstSpeciesMoleFraction.setMaximum(1.0)
self.firstSpeciesMoleFraction.setSingleStep(0.1)
self.firstSpeciesMoleFraction.setProperty("value", 1.0)
self.firstSpeciesMoleFraction.setObjectName("firstSpeciesMoleFraction")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.firstSpeciesMoleFraction)
self.verticalLayout_3.addWidget(self.groupBox_3)
self.previewButton = QtWidgets.QPushButton(self.spatialPage)
self.previewButton.setObjectName("previewButton")
self.verticalLayout_3.addWidget(self.previewButton)
CreateModel.addPage(self.spatialPage)
self.otherPage = QtWidgets.QWizardPage()
self.otherPage.setObjectName("otherPage")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.otherPage)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.groupBox_5 = QtWidgets.QGroupBox(self.otherPage)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_5.sizePolicy().hasHeightForWidth())
self.groupBox_5.setSizePolicy(sizePolicy)
self.groupBox_5.setObjectName("groupBox_5")
self.formLayout_4 = QtWidgets.QFormLayout(self.groupBox_5)
self.formLayout_4.setObjectName("formLayout_4")
self.temperatureKLabel = QtWidgets.QLabel(self.groupBox_5)
self.temperatureKLabel.setObjectName("temperatureKLabel")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.temperatureKLabel)
self.temperatureKDoubleSpinBox = QtWidgets.QDoubleSpinBox(self.groupBox_5)
self.temperatureKDoubleSpinBox.setMinimum(0.0)
self.temperatureKDoubleSpinBox.setMaximum(100000000000000.0)
self.temperatureKDoubleSpinBox.setProperty("value", 1.0)
self.temperatureKDoubleSpinBox.setObjectName("temperatureKDoubleSpinBox")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.temperatureKDoubleSpinBox)
self.verticalLayout_4.addWidget(self.groupBox_5)
self.groupBox_7 = QtWidgets.QGroupBox(self.otherPage)
self.groupBox_7.setObjectName("groupBox_7")
self.formLayout_6 = QtWidgets.QFormLayout(self.groupBox_7)
self.formLayout_6.setObjectName("formLayout_6")
self.timestepLabel = QtWidgets.QLabel(self.groupBox_7)
self.timestepLabel.setObjectName("timestepLabel")
self.formLayout_6.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.timestepLabel)
self.timestepLineEdit = QtWidgets.QLineEdit(self.groupBox_7)
self.timestepLineEdit.setObjectName("timestepLineEdit")
self.formLayout_6.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.timestepLineEdit)
self.resetTimestep = QtWidgets.QToolButton(self.groupBox_7)
self.resetTimestep.setObjectName("resetTimestep")
self.formLayout_6.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.resetTimestep)
self.verticalLayout_4.addWidget(self.groupBox_7)
self.groupBox_6 = QtWidgets.QGroupBox(self.otherPage)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_6.sizePolicy().hasHeightForWidth())
self.groupBox_6.setSizePolicy(sizePolicy)
self.groupBox_6.setObjectName("groupBox_6")
self.formLayout_5 = QtWidgets.QFormLayout(self.groupBox_6)
self.formLayout_5.setObjectName("formLayout_5")
self.sigmaMLabel = QtWidgets.QLabel(self.groupBox_6)
self.sigmaMLabel.setObjectName("sigmaMLabel")
self.formLayout_5.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.sigmaMLabel)
self.sigmaMLineEdit = QtWidgets.QLineEdit(self.groupBox_6)
self.sigmaMLineEdit.setObjectName("sigmaMLineEdit")
self.formLayout_5.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.sigmaMLineEdit)
self.epsilonJLabel = QtWidgets.QLabel(self.groupBox_6)
self.epsilonJLabel.setObjectName("epsilonJLabel")
self.formLayout_5.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.epsilonJLabel)
self.epsilonJLineEdit = QtWidgets.QLineEdit(self.groupBox_6)
self.epsilonJLineEdit.setObjectName("epsilonJLineEdit")
self.formLayout_5.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.epsilonJLineEdit)
self.reset_ia_LJ = QtWidgets.QToolButton(self.groupBox_6)
self.reset_ia_LJ.setObjectName("reset_ia_LJ")
self.formLayout_5.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.reset_ia_LJ)
self.verticalLayout_4.addWidget(self.groupBox_6)
self.groupBox_8 = QtWidgets.QGroupBox(self.otherPage)
self.groupBox_8.setObjectName("groupBox_8")
self.formLayout_7 = QtWidgets.QFormLayout(self.groupBox_8)
self.formLayout_7.setObjectName("formLayout_7")
self.r_cut_aLabel = QtWidgets.QLabel(self.groupBox_8)
self.r_cut_aLabel.setObjectName("r_cut_aLabel")
self.formLayout_7.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.r_cut_aLabel)
self.r_cut_aLineEdit = QtWidgets.QLineEdit(self.groupBox_8)
self.r_cut_aLineEdit.setObjectName("r_cut_aLineEdit")
self.formLayout_7.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.r_cut_aLineEdit)
self.r_cut_bLabel = QtWidgets.QLabel(self.groupBox_8)
self.r_cut_bLabel.setObjectName("r_cut_bLabel")
self.formLayout_7.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.r_cut_bLabel)
self.r_cut_bLineEdit = QtWidgets.QLineEdit(self.groupBox_8)
self.r_cut_bLineEdit.setObjectName("r_cut_bLineEdit")
self.formLayout_7.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.r_cut_bLineEdit)
self.r_cut_abLabel = QtWidgets.QLabel(self.groupBox_8)
self.r_cut_abLabel.setObjectName("r_cut_abLabel")
self.formLayout_7.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.r_cut_abLabel)
self.r_cut_abLineEdit = QtWidgets.QLineEdit(self.groupBox_8)
self.r_cut_abLineEdit.setObjectName("r_cut_abLineEdit")
self.formLayout_7.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.r_cut_abLineEdit)
self.resetRcut = QtWidgets.QToolButton(self.groupBox_8)
self.resetRcut.setObjectName("resetRcut")
self.formLayout_7.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.resetRcut)
self.verticalLayout_4.addWidget(self.groupBox_8)
CreateModel.addPage(self.otherPage)
self.gridWidthNumberOfAtomsLabel.setBuddy(self.gridWidth)
self.gridHeightNumberOfAtomsLabel.setBuddy(self.gridHeight)
self.distanceBetweenAtomsLabel.setBuddy(self.distanceBetweenAtoms)
self.xPeriodicBoundariesLabel.setBuddy(self.xPeriodicBoundariesCheckBox)
self.yPeriodicBoudariesLabel.setBuddy(self.yPeriodicBoudariesCheckBox)
self.firstSpeciesMoleFractionLabel.setBuddy(self.firstSpeciesMoleFraction)
self.temperatureKLabel.setBuddy(self.temperatureKDoubleSpinBox)
self.timestepLabel.setBuddy(self.timestepLineEdit)
self.sigmaMLabel.setBuddy(self.sigmaMLineEdit)
self.epsilonJLabel.setBuddy(self.epsilonJLineEdit)
self.retranslateUi(CreateModel)
QtCore.QMetaObject.connectSlotsByName(CreateModel)
CreateModel.setTabOrder(self.gridWidth, self.gridHeight)
CreateModel.setTabOrder(self.gridHeight, self.keepRatioCheckBox)
CreateModel.setTabOrder(self.keepRatioCheckBox, self.distanceBetweenAtoms)
CreateModel.setTabOrder(self.distanceBetweenAtoms, self.boxWidthLineEdit)
CreateModel.setTabOrder(self.boxWidthLineEdit, self.xPeriodicBoundariesCheckBox)
CreateModel.setTabOrder(self.xPeriodicBoundariesCheckBox, self.yPeriodicBoudariesCheckBox)
CreateModel.setTabOrder(self.yPeriodicBoudariesCheckBox, self.firstSpeciesMoleFraction)
CreateModel.setTabOrder(self.firstSpeciesMoleFraction, self.previewButton)
CreateModel.setTabOrder(self.previewButton, self.temperatureKDoubleSpinBox)
CreateModel.setTabOrder(self.temperatureKDoubleSpinBox, self.timestepLineEdit)
CreateModel.setTabOrder(self.timestepLineEdit, self.sigmaMLineEdit)
CreateModel.setTabOrder(self.sigmaMLineEdit, self.epsilonJLineEdit)
CreateModel.setTabOrder(self.epsilonJLineEdit, self.reset_ia_LJ)
def retranslateUi(self, CreateModel):
_translate = QtCore.QCoreApplication.translate
CreateModel.setWindowTitle(_translate("CreateModel", "Create Model"))
self.speciesPage.setTitle(_translate("CreateModel", "Species definition"))
self.groupBox_2.setTitle(_translate("CreateModel", "First species"))
self.groupBox_1.setTitle(_translate("CreateModel", "Second species"))
self.spatialPage.setTitle(_translate("CreateModel", "Spatial configuration"))
self.groupBox.setTitle(_translate("CreateModel", "Grid"))
self.gridWidthNumberOfAtomsLabel.setText(_translate("CreateModel", "Grid width (number of atoms)"))
self.gridHeightNumberOfAtomsLabel.setText(_translate("CreateModel", "Grid height (number of atoms)"))
self.keepRatioLabel.setText(_translate("CreateModel", "Keep equal ratio"))
self.distanceBetweenAtomsLabel.setText(_translate("CreateModel", "Distance between atoms (m)"))
self.label.setText(_translate("CreateModel", "Atom number"))
self.label_atom_number.setText(_translate("CreateModel", "10000"))
self.boxWidthLabel.setText(_translate("CreateModel", "Box width (m)"))
self.groupBox_4.setTitle(_translate("CreateModel", "Boundaries"))
self.xPeriodicBoundariesLabel.setText(_translate("CreateModel", "Enable X periodic boundaries"))
self.yPeriodicBoudariesLabel.setText(_translate("CreateModel", "Enable Y periodic boundaries"))
self.groupBox_3.setTitle(_translate("CreateModel", "Mixup"))
self.firstSpeciesMoleFractionLabel.setText(_translate("CreateModel", "First species mole fraction"))
self.previewButton.setText(_translate("CreateModel", "Preview"))
self.otherPage.setTitle(_translate("CreateModel", "Other parameters"))
self.groupBox_5.setTitle(_translate("CreateModel", "Thermodynamics"))
self.temperatureKLabel.setText(_translate("CreateModel", "Temperature (K)"))
self.groupBox_7.setTitle(_translate("CreateModel", "Simulation"))
self.timestepLabel.setText(_translate("CreateModel", "Timestep (s)"))
self.resetTimestep.setText(_translate("CreateModel", "Reset"))
self.groupBox_6.setTitle(_translate("CreateModel", "Inter-atomic Lennard-Jones parameters"))
self.sigmaMLabel.setText(_translate("CreateModel", "Sigma (m)"))
self.epsilonJLabel.setText(_translate("CreateModel", "Epsilon (J)"))
self.reset_ia_LJ.setText(_translate("CreateModel", "Reset"))
self.groupBox_8.setTitle(_translate("CreateModel", "Performance"))
self.r_cut_aLabel.setText(_translate("CreateModel", "r_cut_a (m)"))
self.r_cut_bLabel.setText(_translate("CreateModel", "r_cut_b (m)"))
self.r_cut_abLabel.setText(_translate("CreateModel", "r_cut_ab (m)"))
self.resetRcut.setText(_translate("CreateModel", "Reset"))
| 66.847682
| 109
| 0.756885
|
f09277d8ddb4a69d39f65443a78a5a183f45bb07
| 89
|
py
|
Python
|
pytest_trio/_version.py
|
Suenweek/pytest-trio
|
bbae5adfadff16754397bb9d4648fd9f3c8a0ebe
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
pytest_trio/_version.py
|
Suenweek/pytest-trio
|
bbae5adfadff16754397bb9d4648fd9f3c8a0ebe
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
pytest_trio/_version.py
|
Suenweek/pytest-trio
|
bbae5adfadff16754397bb9d4648fd9f3c8a0ebe
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# This file is imported from __init__.py and exec'd from setup.py
__version__ = "0.5.1"
| 22.25
| 65
| 0.730337
|
1ef8f3125ab60319d8d191a58d8cce721097f71f
| 2,169
|
py
|
Python
|
hendrics/rebin.py
|
StingraySoftware/HENDRICS
|
551a77fc42b16055da5bfd37701521655394b80b
|
[
"BSD-3-Clause"
] | 18
|
2017-08-15T17:20:31.000Z
|
2021-12-24T13:33:10.000Z
|
hendrics/rebin.py
|
StingraySoftware/HENDRICS
|
551a77fc42b16055da5bfd37701521655394b80b
|
[
"BSD-3-Clause"
] | 118
|
2017-08-11T13:48:32.000Z
|
2022-03-31T12:30:03.000Z
|
hendrics/rebin.py
|
StingraySoftware/HENDRICS
|
551a77fc42b16055da5bfd37701521655394b80b
|
[
"BSD-3-Clause"
] | 11
|
2017-08-11T13:43:12.000Z
|
2021-09-18T07:34:10.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions to rebin light curves and frequency spectra."""
import numpy as np
from astropy import log
from .io import get_file_type
from .io import save_lcurve, save_pds
from .io import HEN_FILE_EXTENSION, get_file_extension
def rebin_file(filename, rebin):
"""Rebin the contents of a file, be it a light curve or a spectrum."""
ftype, contents = get_file_type(filename)
if ftype not in ["lc", "pds", "cpds"]:
raise ValueError("This format does not support rebin (yet):", ftype)
if rebin == int(rebin):
contents = contents.rebin(f=rebin)
else:
contents = contents.rebin_log(f=rebin)
options = {}
if ftype == "lc":
func = save_lcurve
elif ftype in ["pds", "cpds"]:
func = save_pds
options = {"save_all": True}
outfile = filename.replace(
get_file_extension(filename), "_rebin%g" % rebin + HEN_FILE_EXTENSION
)
log.info("Saving %s to %s" % (ftype, outfile))
func(contents, outfile, **options)
def main(args=None):
"""Main function called by the `HENrebin` command line script."""
import argparse
from .base import _add_default_args, check_negative_numbers_in_args
description = "Rebin light curves and frequency spectra. "
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="List of light curve files", nargs="+")
parser.add_argument(
"-r",
"--rebin",
type=float,
default=1,
help="Rebinning to apply. Only if the quantity to"
+ " rebin is a (C)PDS, it is possible to specify a"
+ " non-integer rebin factor, in which case it is"
+ " interpreted as a geometrical binning factor",
)
_add_default_args(parser, ["loglevel", "debug"])
args = check_negative_numbers_in_args(args)
args = parser.parse_args(args)
files = args.files
if args.debug:
args.loglevel = "DEBUG"
log.setLevel(args.loglevel)
with log.log_to_file("HENrebin.log"):
rebin = args.rebin
for f in files:
rebin_file(f, rebin)
| 30.549296
| 77
| 0.651913
|
a840f2f4ae5f634c4c8a71fe8fefb9f1d8bf9984
| 1,536
|
py
|
Python
|
csvkit/utilities/csvpy.py
|
SpazioDati/csvkit
|
96c2ab1fb6fc22eb34a6d8457e3101e9702a5498
|
[
"MIT"
] | 3
|
2016-05-16T13:35:03.000Z
|
2020-02-13T04:19:14.000Z
|
csvkit/utilities/csvpy.py
|
SpazioDati/csvkit
|
96c2ab1fb6fc22eb34a6d8457e3101e9702a5498
|
[
"MIT"
] | null | null | null |
csvkit/utilities/csvpy.py
|
SpazioDati/csvkit
|
96c2ab1fb6fc22eb34a6d8457e3101e9702a5498
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from csvkit import CSVKitReader, CSVKitDictReader
from csvkit.cli import CSVFileType, CSVKitUtility
class CSVPy(CSVKitUtility):
description = 'Load a CSV file into a CSVKitReader object and then drops into a Python shell.'
override_flags = ['l', 'f', 'zero']
def add_arguments(self):
self.argparser.add_argument('file', metavar="FILE", type=CSVFileType(),
help='The CSV file to operate on.')
self.argparser.add_argument('--dict', dest='as_dict', action='store_true',
help='Use CSVKitDictReader instead of CSVKitReader.')
def main(self):
# Attempt reading filename, will cause lazy loader to access file and raise error if it does not exist
filename = self.args.file.name
if self.args.as_dict:
reader_class = CSVKitDictReader
else:
reader_class = CSVKitReader
reader = reader_class(self.args.file, **self.reader_kwargs)
welcome_message = 'Welcome! "%s" has been loaded in a %s object named "reader".' % (filename, reader_class.__name__)
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
ipy = InteractiveShellEmbed(banner1=welcome_message)
ipy()
except ImportError:
import code
code.interact(welcome_message, local={ 'reader': reader })
def launch_new_instance():
utility = CSVPy()
utility.main()
if __name__ == "__main__":
launch_new_instance()
| 34.909091
| 124
| 0.654297
|
c6c9c870398b29ee5bfdd63577a2203537b7f14c
| 901
|
py
|
Python
|
LeetcodeAlgorithms/581. Shortest Unsorted Continuous Subarray/shortest-unsorted-continuous-subarray.py
|
Fenghuapiao/PyLeetcode
|
d804a62643fe935eb61808196a2c093ea9583654
|
[
"MIT"
] | 3
|
2019-08-20T06:54:38.000Z
|
2022-01-07T12:56:46.000Z
|
LeetcodeAlgorithms/581. Shortest Unsorted Continuous Subarray/shortest-unsorted-continuous-subarray.py
|
yhangf/PyLeetcode
|
d804a62643fe935eb61808196a2c093ea9583654
|
[
"MIT"
] | null | null | null |
LeetcodeAlgorithms/581. Shortest Unsorted Continuous Subarray/shortest-unsorted-continuous-subarray.py
|
yhangf/PyLeetcode
|
d804a62643fe935eb61808196a2c093ea9583654
|
[
"MIT"
] | 2
|
2018-11-01T16:10:34.000Z
|
2020-06-02T03:24:43.000Z
|
class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2:
return 0
maxs = [float("inf")] * len(nums)
mins = [float("inf")] * len(nums)
mins[-1] = nums[-1]
maxs[0] = nums[0]
start, end = 0, -2
for i in range(1, len(nums)):
maxs[i] = max(maxs[i-1], nums[i])
for i in reversed(range(len(nums) - 1)):
mins[i] = min(mins[i+1], nums[i])
for i in reversed(range(1, len(nums))):
if nums[i] < maxs[i - 1]:
end = i
break
for i in range(len(nums) - 1):
if nums[i] > mins[i + 1]:
start = i
break
print end, start
return max(end - start + 1, 0)
| 31.068966
| 49
| 0.406215
|
db20531e827801a95fb57ba05d668b4893400c54
| 272
|
py
|
Python
|
skp_edu_docker/code/cluster/dataconfig/dataconf_node_text.py
|
TensorMSA/hoyai_docker
|
12f0041e6306d8a6421585a4b51666bad30be442
|
[
"MIT"
] | 8
|
2017-06-16T00:19:12.000Z
|
2020-08-13T03:15:57.000Z
|
skp_edu_docker/code/cluster/dataconfig/dataconf_node_text.py
|
TensorMSA/tensormsa_docker
|
12f0041e6306d8a6421585a4b51666bad30be442
|
[
"MIT"
] | 21
|
2017-06-09T10:15:14.000Z
|
2018-03-29T07:51:02.000Z
|
skp_edu_docker/code/cluster/dataconfig/dataconf_node_text.py
|
TensorMSA/hoyai_docker
|
12f0041e6306d8a6421585a4b51666bad30be442
|
[
"MIT"
] | 4
|
2017-10-25T09:59:53.000Z
|
2020-05-07T09:51:11.000Z
|
from cluster.dataconfig.dataconf_node import DataConfNode
class DataConfNodeText(DataConfNode):
"""
"""
def run(self, conf_data):
return None
def _init_node_parm(self):
return None
def _set_progress_state(self):
return None
| 18.133333
| 57
| 0.669118
|
1d25d1ae7cc4cf0e2f5635b97581e55085a8be53
| 10,776
|
py
|
Python
|
src/python_delta_crdt/base.py
|
rtibbles/python-delta-crdts
|
6a0bce501e387567384c8a0495f3bbca4e605102
|
[
"MIT"
] | 2
|
2020-05-13T20:08:41.000Z
|
2020-05-14T01:18:40.000Z
|
src/python_delta_crdt/base.py
|
rtibbles/python-delta-crdts
|
6a0bce501e387567384c8a0495f3bbca4e605102
|
[
"MIT"
] | null | null | null |
src/python_delta_crdt/base.py
|
rtibbles/python-delta-crdts
|
6a0bce501e387567384c8a0495f3bbca4e605102
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta
from abc import abstractmethod
import operator
try:
from collections.abc import Iterable
from collections.abc import Mapping
from collections.abc import MutableMapping
from collections.abc import MutableSet
except ImportError:
from collections import Iterable
from collections import Mapping
from collections import MutableMapping
from collections import MutableSet
from six import with_metaclass
from .object_hash import get_object_key
_type_registry = {}
def get_crdt_type(name):
name = str(name).lower()
try:
return _type_registry[name]
except KeyError:
raise TypeError("Unknown CRDT type: {}".format(name))
class CRDTMeta(ABCMeta):
def __new__(cls, name, bases, attrs):
global _type_registry
new_class = super(CRDTMeta, cls).__new__(cls, name, bases, attrs)
if hasattr(new_class, "type_id"):
name = new_class.type_id
# For consistency with the JS library, we store all identifiers in lower case.
type_id = name.lower()
new_class.type = type_id
_type_registry[type_id] = new_class
return new_class
class CRDT(with_metaclass(CRDTMeta)):
def __init__(self, identifier, state=None):
self.id = identifier
self.state = state or self.initial()
self.state.type = self.type
self._value_cache = self.value(self.state)
# Keep track of applied deltas to allow introspection
# of updates that happen through Python attribute manipulation
self.deltas = []
def __repr__(self):
return repr(self._value_cache)
def __str__(self):
return str(self._value_cache)
def __len__(self):
return len(self._value_cache)
def __le__(self, other):
return self._value_cache <= other
def __lt__(self, other):
return self._value_cache < other
def __eq__(self, other):
return self._value_cache == other
def __ne__(self, other):
return self._value_cache != other
def __gt__(self, other):
return self._value_cache > other
def __ge__(self, other):
return self._value_cache >= other
def __and__(self, other):
return self._value_cache & other
def __or__(self, other):
return self._value_cache | other
def __invert__(self):
return ~self._value_cache
def __add__(self, other):
return self._value_cache + other
def __sub__(self, other):
return self._value_cache - other
def __mul__(self, other):
return self._value_cache * other
def __div__(self, other):
return self._value_cache / other
def __floordiv__(self, other):
return self._value_cache // other
def __mod__(self, other):
return self._value_cache % other
def __xor__(self, other):
return self._value_cache ^ other
@classmethod
@abstractmethod
def initial(self):
pass
def apply(self, delta):
new_state = self.join(self.state, delta)
new_state.type = self.type
if hasattr(self, "incremental_value"):
self._value_cache = self.incremental_value(
self.state, new_state, delta, self._value_cache
)
else:
self._value_cache = self.value(new_state)
if getattr(self, "parent", None):
self.parent.propagate_delta(self, delta)
self.state = new_state
self.deltas.append(delta)
return self.state
@classmethod
@abstractmethod
def join(cls, s1, s2):
pass
@classmethod
@abstractmethod
def value(cls, state):
pass
class CRDTSetMixin(MutableSet):
def __iter__(self):
return iter(self._value_cache)
def __contains__(self, item):
return item in self._value_cache
def __len__(self):
return len(self._value_cache)
def discard(self, item):
try:
return self.remove(item)
except KeyError:
pass
class CausalCRDT(CRDT):
parent = None
key = None
class EmbeddableCRDT(CausalCRDT):
@abstractmethod
def propagate_delta(self):
pass
def mutator(func):
def wrapper(self, *args, **kwargs):
delta = func(self, *args, **kwargs)
setattr(delta, "type", self.type)
self.apply(delta)
return delta
setattr(wrapper, "mutator", True)
return wrapper
def has_mutator(crdt_type, mutator_name):
try:
getattr(getattr(crdt_type, mutator_name), "mutator")
except AttributeError:
raise AttributeError(
"{} has no mutator named {}".format(crdt_type.type, mutator_name)
)
class StateMeta(ABCMeta):
def __new__(cls, name, bases, attrs):
new_class = super(StateMeta, cls).__new__(cls, name, bases, attrs)
# Set type property here to avoid shadowing Python type global
new_class.type = None
return new_class
class StateContainer(with_metaclass(StateMeta)):
msgpack_code = None
def __repr__(self):
return repr(self._state)
def __str__(self):
return str(self._state)
@classmethod
def factory(cls, init_args):
if isinstance(init_args, Mapping):
return cls(**init_args)
return cls(*init_args)
def copy(self):
return self.factory(self.to_init())
@abstractmethod
def to_init(self):
"""
Should return a representation of the state to a format that can be passed
to the __init__ method of the class, useful for copying and encoding.
"""
pass
class BaseMap(MutableMapping):
def __getitem__(self, key):
return self._state[key]
def __setitem__(self, key, value):
self._state[key] = value
def __delitem__(self, key):
del self._state[key]
def __iter__(self):
return iter(self._state)
def __len__(self):
return len(self._state)
def __contains__(self, item):
return item in self._state
def keys(self):
return self._state.keys()
def values(self):
return self._state.values()
def items(self):
return self._state.items()
def update(self, d):
self._state.update(d)
def get(self, item, default=None):
return self._state.get(item, default)
class Map(BaseMap, StateContainer):
msgpack_code = 64
def __init__(self, *args, **kwargs):
if args:
if len(args) > 1:
raise ValueError("Can only pass a single positional argument to Map")
self._state = dict(args[0])
else:
self._state = dict(kwargs)
@classmethod
def factory(cls, init_args):
return cls(init_args)
def to_init(self):
return self._state.items()
def check_comparator(other):
if not isinstance(other, Iterable):
raise NotImplementedError(
"Can only compare Set to classes that derive from Python Iterable abstract base class"
)
if not isinstance(other, Set):
other = Set(other)
return other
class Set(MutableSet, StateContainer):
msgpack_code = 65
def __init__(self, iterable=None):
self._state = {}
for item in iterable or tuple():
self.add(item)
def __repr__(self):
return repr(self._state.values())
def __str__(self):
return str(self._state.values())
def __iter__(self):
return iter(self._state.values())
def __len__(self):
return len(self._state)
def __contains__(self, item):
item_key = get_object_key(item)
return item_key in self._state
def __le__(self, other):
other = check_comparator(other)
return set(self._state.keys()) <= set(other._state.keys())
def __lt__(self, other):
other = check_comparator(other)
return set(self._state.keys()) < set(other._state.keys())
def __eq__(self, other):
other = check_comparator(other)
return set(self._state.keys()) == set(other._state.keys())
def __ne__(self, other):
other = check_comparator(other)
return set(self._state.keys()) != set(other._state.keys())
def __gt__(self, other):
other = check_comparator(other)
return set(self._state.keys()) > set(other._state.keys())
def __ge__(self, other):
other = check_comparator(other)
return set(self._state.keys()) >= set(other._state.keys())
def __and__(self, other):
other = check_comparator(other)
return set(self._state.keys()) & set(other._state.keys())
def __or__(self, other):
other = check_comparator(other)
return set(self._state.keys()) | set(other._state.keys())
def __invert__(self):
return ~set(self._state.keys())
def __add__(self, other):
other = check_comparator(other)
return set(self._state.keys()) + set(other._state.keys())
def __sub__(self, other):
other = check_comparator(other)
return set(self._state.keys()) - set(other._state.keys())
def __mul__(self, other):
other = check_comparator(other)
return set(self._state.keys()) * set(other._state.keys())
def __div__(self, other):
other = check_comparator(other)
return set(self._state.keys()) / set(other._state.keys())
def __floordiv__(self, other):
other = check_comparator(other)
return set(self._state.keys()) // set(other._state.keys())
def __mod__(self, other):
other = check_comparator(other)
return set(self._state.keys()) % set(other._state.keys())
def __xor__(self, other):
other = check_comparator(other)
return set(self._state.keys()) ^ set(other._state.keys())
def isdisjoint(self, other):
other = check_comparator(other)
return set(self._state.keys()).isdisjoint(set(other._state.keys()))
def add(self, item):
item_key = get_object_key(item)
self._state[item_key] = item
def remove(self, item):
item_key = get_object_key(item)
del self._state[item_key]
def discard(self, item):
try:
return self.remove(item)
except KeyError:
pass
def union(self, other):
return Set(tuple(self) + tuple(other))
def difference(self, other):
return Set((item for item in self if item not in other))
@classmethod
def factory(cls, init_args):
return cls(init_args)
def to_init(self):
return tuple(self)
class Tuple(tuple, StateContainer):
@classmethod
def factory(cls, init_args):
return cls(init_args)
def to_init(self):
return self
| 25.779904
| 98
| 0.630568
|
a9c9722c9d2ddd7dd09cfe10d86cf6c10d186e43
| 67,497
|
py
|
Python
|
test/dialect/postgresql/test_compiler.py
|
lelit/sqlalchemy
|
55f930ef3d4e60bed02a2dad16e331fe42cfd12b
|
[
"MIT"
] | null | null | null |
test/dialect/postgresql/test_compiler.py
|
lelit/sqlalchemy
|
55f930ef3d4e60bed02a2dad16e331fe42cfd12b
|
[
"MIT"
] | null | null | null |
test/dialect/postgresql/test_compiler.py
|
lelit/sqlalchemy
|
55f930ef3d4e60bed02a2dad16e331fe42cfd12b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from sqlalchemy import and_
from sqlalchemy import cast
from sqlalchemy import Column
from sqlalchemy import delete
from sqlalchemy import Enum
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import null
from sqlalchemy import schema
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import Text
from sqlalchemy import text
from sqlalchemy import types as sqltypes
from sqlalchemy import update
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects.postgresql import aggregate_order_by
from sqlalchemy.dialects.postgresql import ARRAY as PG_ARRAY
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.dialects.postgresql import array_agg as pg_array_agg
from sqlalchemy.dialects.postgresql import ExcludeConstraint
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.dialects.postgresql import TSRANGE
from sqlalchemy.orm import aliased
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Session
from sqlalchemy.sql import column
from sqlalchemy.sql import literal_column
from sqlalchemy.sql import operators
from sqlalchemy.sql import table
from sqlalchemy.sql import util as sql_util
from sqlalchemy.testing import engines
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertions import assert_raises
from sqlalchemy.testing.assertions import assert_raises_message
from sqlalchemy.testing.assertions import AssertsCompiledSQL
from sqlalchemy.testing.assertions import expect_warnings
from sqlalchemy.testing.assertions import is_
from sqlalchemy.util import OrderedDict
from sqlalchemy.util import u
class SequenceTest(fixtures.TestBase, AssertsCompiledSQL):
__prefer__ = "postgresql"
def test_format(self):
seq = Sequence("my_seq_no_schema")
dialect = postgresql.dialect()
assert (
dialect.identifier_preparer.format_sequence(seq)
== "my_seq_no_schema"
)
seq = Sequence("my_seq", schema="some_schema")
assert (
dialect.identifier_preparer.format_sequence(seq)
== "some_schema.my_seq"
)
seq = Sequence("My_Seq", schema="Some_Schema")
assert (
dialect.identifier_preparer.format_sequence(seq)
== '"Some_Schema"."My_Seq"'
)
@testing.only_on("postgresql", "foo")
@testing.provide_metadata
def test_reverse_eng_name(self):
metadata = self.metadata
engine = engines.testing_engine(options=dict(implicit_returning=False))
for tname, cname in [
("tb1" * 30, "abc"),
("tb2", "abc" * 30),
("tb3" * 30, "abc" * 30),
("tb4", "abc"),
]:
t = Table(
tname[:57],
metadata,
Column(cname[:57], Integer, primary_key=True),
)
t.create(engine)
r = engine.execute(t.insert())
assert r.inserted_primary_key == [1]
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = postgresql.dialect()
def test_update_returning(self):
dialect = postgresql.dialect()
table1 = table(
"mytable",
column("myid", Integer),
column("name", String(128)),
column("description", String(128)),
)
u = update(table1, values=dict(name="foo")).returning(
table1.c.myid, table1.c.name
)
self.assert_compile(
u,
"UPDATE mytable SET name=%(name)s "
"RETURNING mytable.myid, mytable.name",
dialect=dialect,
)
u = update(table1, values=dict(name="foo")).returning(table1)
self.assert_compile(
u,
"UPDATE mytable SET name=%(name)s "
"RETURNING mytable.myid, mytable.name, "
"mytable.description",
dialect=dialect,
)
u = update(table1, values=dict(name="foo")).returning(
func.length(table1.c.name)
)
self.assert_compile(
u,
"UPDATE mytable SET name=%(name)s "
"RETURNING length(mytable.name) AS length_1",
dialect=dialect,
)
def test_insert_returning(self):
dialect = postgresql.dialect()
table1 = table(
"mytable",
column("myid", Integer),
column("name", String(128)),
column("description", String(128)),
)
i = insert(table1, values=dict(name="foo")).returning(
table1.c.myid, table1.c.name
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) RETURNING mytable.myid, "
"mytable.name",
dialect=dialect,
)
i = insert(table1, values=dict(name="foo")).returning(table1)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) RETURNING mytable.myid, "
"mytable.name, mytable.description",
dialect=dialect,
)
i = insert(table1, values=dict(name="foo")).returning(
func.length(table1.c.name)
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) RETURNING length(mytable.name) "
"AS length_1",
dialect=dialect,
)
def test_create_drop_enum(self):
# test escaping and unicode within CREATE TYPE for ENUM
typ = postgresql.ENUM(
"val1", "val2", "val's 3", u("méil"), name="myname"
)
self.assert_compile(
postgresql.CreateEnumType(typ),
u(
"CREATE TYPE myname AS "
"ENUM ('val1', 'val2', 'val''s 3', 'méil')"
),
)
typ = postgresql.ENUM("val1", "val2", "val's 3", name="PleaseQuoteMe")
self.assert_compile(
postgresql.CreateEnumType(typ),
'CREATE TYPE "PleaseQuoteMe" AS ENUM '
"('val1', 'val2', 'val''s 3')",
)
def test_generic_enum(self):
e1 = Enum("x", "y", "z", name="somename")
e2 = Enum("x", "y", "z", name="somename", schema="someschema")
self.assert_compile(
postgresql.CreateEnumType(e1),
"CREATE TYPE somename AS ENUM ('x', 'y', 'z')",
)
self.assert_compile(
postgresql.CreateEnumType(e2),
"CREATE TYPE someschema.somename AS ENUM " "('x', 'y', 'z')",
)
self.assert_compile(postgresql.DropEnumType(e1), "DROP TYPE somename")
self.assert_compile(
postgresql.DropEnumType(e2), "DROP TYPE someschema.somename"
)
t1 = Table("sometable", MetaData(), Column("somecolumn", e1))
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE sometable (somecolumn " "somename)",
)
t1 = Table(
"sometable",
MetaData(),
Column("somecolumn", Enum("x", "y", "z", native_enum=False)),
)
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE sometable (somecolumn "
"VARCHAR(1), CHECK (somecolumn IN ('x', "
"'y', 'z')))",
)
def test_create_type_schema_translate(self):
e1 = Enum("x", "y", "z", name="somename")
e2 = Enum("x", "y", "z", name="somename", schema="someschema")
schema_translate_map = {None: "foo", "someschema": "bar"}
self.assert_compile(
postgresql.CreateEnumType(e1),
"CREATE TYPE foo.somename AS ENUM ('x', 'y', 'z')",
schema_translate_map=schema_translate_map,
)
self.assert_compile(
postgresql.CreateEnumType(e2),
"CREATE TYPE bar.somename AS ENUM ('x', 'y', 'z')",
schema_translate_map=schema_translate_map,
)
def test_create_table_with_tablespace(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
postgresql_tablespace="sometablespace",
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) TABLESPACE sometablespace",
)
def test_create_table_with_tablespace_quoted(self):
# testing quoting of tablespace name
m = MetaData()
tbl = Table(
"anothertable",
m,
Column("id", Integer),
postgresql_tablespace="table",
)
self.assert_compile(
schema.CreateTable(tbl),
'CREATE TABLE anothertable (id INTEGER) TABLESPACE "table"',
)
def test_create_table_inherits(self):
m = MetaData()
tbl = Table(
"atable", m, Column("id", Integer), postgresql_inherits="i1"
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) INHERITS ( i1 )",
)
def test_create_table_inherits_tuple(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
postgresql_inherits=("i1", "i2"),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) INHERITS ( i1, i2 )",
)
def test_create_table_inherits_quoting(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
postgresql_inherits=("Quote Me", "quote Me Too"),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) INHERITS "
'( "Quote Me", "quote Me Too" )',
)
def test_create_table_partition_by_list(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
Column("part_column", Integer),
postgresql_partition_by="LIST (part_column)",
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER, part_column INTEGER) "
"PARTITION BY LIST (part_column)",
)
def test_create_table_partition_by_range(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
Column("part_column", Integer),
postgresql_partition_by="RANGE (part_column)",
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER, part_column INTEGER) "
"PARTITION BY RANGE (part_column)",
)
def test_create_table_with_oids(self):
m = MetaData()
tbl = Table(
"atable", m, Column("id", Integer), postgresql_with_oids=True
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) WITH OIDS",
)
tbl2 = Table(
"anothertable",
m,
Column("id", Integer),
postgresql_with_oids=False,
)
self.assert_compile(
schema.CreateTable(tbl2),
"CREATE TABLE anothertable (id INTEGER) WITHOUT OIDS",
)
def test_create_table_with_oncommit_option(self):
m = MetaData()
tbl = Table(
"atable", m, Column("id", Integer), postgresql_on_commit="drop"
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) ON COMMIT DROP",
)
def test_create_table_with_multiple_options(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
postgresql_tablespace="sometablespace",
postgresql_with_oids=False,
postgresql_on_commit="preserve_rows",
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) WITHOUT OIDS "
"ON COMMIT PRESERVE ROWS TABLESPACE sometablespace",
)
def test_create_partial_index(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx = Index(
"test_idx1",
tbl.c.data,
postgresql_where=and_(tbl.c.data > 5, tbl.c.data < 10),
)
idx = Index(
"test_idx1",
tbl.c.data,
postgresql_where=and_(tbl.c.data > 5, tbl.c.data < 10),
)
# test quoting and all that
idx2 = Index(
"test_idx2",
tbl.c.data,
postgresql_where=and_(tbl.c.data > "a", tbl.c.data < "b's"),
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl (data) "
"WHERE data > 5 AND data < 10",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (data) "
"WHERE data > 'a' AND data < 'b''s'",
dialect=postgresql.dialect(),
)
def test_create_index_with_ops(self):
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("data", String),
Column("data2", Integer, key="d2"),
)
idx = Index(
"test_idx1",
tbl.c.data,
postgresql_ops={"data": "text_pattern_ops"},
)
idx2 = Index(
"test_idx2",
tbl.c.data,
tbl.c.d2,
postgresql_ops={"data": "text_pattern_ops", "d2": "int4_ops"},
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl " "(data text_pattern_ops)",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl "
"(data text_pattern_ops, data2 int4_ops)",
dialect=postgresql.dialect(),
)
def test_create_index_with_labeled_ops(self):
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("data", String),
Column("data2", Integer, key="d2"),
)
idx = Index(
"test_idx1",
func.lower(tbl.c.data).label("data_lower"),
postgresql_ops={"data_lower": "text_pattern_ops"},
)
idx2 = Index(
"test_idx2",
(func.xyz(tbl.c.data) + tbl.c.d2).label("bar"),
tbl.c.d2.label("foo"),
postgresql_ops={"bar": "text_pattern_ops", "foo": "int4_ops"},
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl "
"(lower(data) text_pattern_ops)",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl "
"((xyz(data) + data2) text_pattern_ops, "
"data2 int4_ops)",
dialect=postgresql.dialect(),
)
def test_create_index_with_text_or_composite(self):
m = MetaData()
tbl = Table("testtbl", m, Column("d1", String), Column("d2", Integer))
idx = Index("test_idx1", text("x"))
tbl.append_constraint(idx)
idx2 = Index("test_idx2", text("y"), tbl.c.d2)
idx3 = Index(
"test_idx2",
tbl.c.d1,
text("y"),
tbl.c.d2,
postgresql_ops={"d1": "x1", "d2": "x2"},
)
idx4 = Index(
"test_idx2",
tbl.c.d1,
tbl.c.d2 > 5,
text("q"),
postgresql_ops={"d1": "x1", "d2": "x2"},
)
idx5 = Index(
"test_idx2",
tbl.c.d1,
(tbl.c.d2 > 5).label("g"),
text("q"),
postgresql_ops={"d1": "x1", "g": "x2"},
)
self.assert_compile(
schema.CreateIndex(idx), "CREATE INDEX test_idx1 ON testtbl (x)"
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (y, d2)",
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx2 ON testtbl (d1 x1, y, d2 x2)",
)
# note that at the moment we do not expect the 'd2' op to
# pick up on the "d2 > 5" expression
self.assert_compile(
schema.CreateIndex(idx4),
"CREATE INDEX test_idx2 ON testtbl (d1 x1, (d2 > 5), q)",
)
# however it does work if we label!
self.assert_compile(
schema.CreateIndex(idx5),
"CREATE INDEX test_idx2 ON testtbl (d1 x1, (d2 > 5) x2, q)",
)
def test_create_index_with_using(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String))
idx1 = Index("test_idx1", tbl.c.data)
idx2 = Index("test_idx2", tbl.c.data, postgresql_using="btree")
idx3 = Index("test_idx3", tbl.c.data, postgresql_using="hash")
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl " "(data)",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl " "USING btree (data)",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx3 ON testtbl " "USING hash (data)",
dialect=postgresql.dialect(),
)
def test_create_index_with_with(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String))
idx1 = Index("test_idx1", tbl.c.data)
idx2 = Index(
"test_idx2", tbl.c.data, postgresql_with={"fillfactor": 50}
)
idx3 = Index(
"test_idx3",
tbl.c.data,
postgresql_using="gist",
postgresql_with={"buffering": "off"},
)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl " "(data)",
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl "
"(data) "
"WITH (fillfactor = 50)",
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx3 ON testtbl "
"USING gist (data) "
"WITH (buffering = off)",
)
def test_create_index_with_tablespace(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String))
idx1 = Index("test_idx1", tbl.c.data)
idx2 = Index(
"test_idx2", tbl.c.data, postgresql_tablespace="sometablespace"
)
idx3 = Index(
"test_idx3",
tbl.c.data,
postgresql_tablespace="another table space",
)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl " "(data)",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl "
"(data) "
"TABLESPACE sometablespace",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx3 ON testtbl "
"(data) "
'TABLESPACE "another table space"',
dialect=postgresql.dialect(),
)
def test_create_index_with_multiple_options(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String))
idx1 = Index(
"test_idx1",
tbl.c.data,
postgresql_using="btree",
postgresql_tablespace="atablespace",
postgresql_with={"fillfactor": 60},
postgresql_where=and_(tbl.c.data > 5, tbl.c.data < 10),
)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl "
"USING btree (data) "
"WITH (fillfactor = 60) "
"TABLESPACE atablespace "
"WHERE data > 5 AND data < 10",
dialect=postgresql.dialect(),
)
def test_create_index_expr_gets_parens(self):
m = MetaData()
tbl = Table("testtbl", m, Column("x", Integer), Column("y", Integer))
idx1 = Index("test_idx1", 5 / (tbl.c.x + tbl.c.y))
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl ((5 / (x + y)))",
)
def test_create_index_literals(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx1 = Index("test_idx1", tbl.c.data + 5)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl ((data + 5))",
)
def test_create_index_concurrently(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx1 = Index("test_idx1", tbl.c.data, postgresql_concurrently=True)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data)",
)
dialect_8_1 = postgresql.dialect()
dialect_8_1._supports_create_index_concurrently = False
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl (data)",
dialect=dialect_8_1,
)
def test_drop_index_concurrently(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx1 = Index("test_idx1", tbl.c.data, postgresql_concurrently=True)
self.assert_compile(
schema.DropIndex(idx1), "DROP INDEX CONCURRENTLY test_idx1"
)
dialect_9_1 = postgresql.dialect()
dialect_9_1._supports_drop_index_concurrently = False
self.assert_compile(
schema.DropIndex(idx1), "DROP INDEX test_idx1", dialect=dialect_9_1
)
def test_exclude_constraint_min(self):
m = MetaData()
tbl = Table("testtbl", m, Column("room", Integer, primary_key=True))
cons = ExcludeConstraint(("room", "="))
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist " "(room WITH =)",
dialect=postgresql.dialect(),
)
def test_exclude_constraint_full(self):
m = MetaData()
room = Column("room", Integer, primary_key=True)
tbl = Table("testtbl", m, room, Column("during", TSRANGE))
room = Column("room", Integer, primary_key=True)
cons = ExcludeConstraint(
(room, "="),
("during", "&&"),
name="my_name",
using="gist",
where="room > 100",
deferrable=True,
initially="immediate",
)
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD CONSTRAINT my_name "
"EXCLUDE USING gist "
"(room WITH =, during WITH "
"&&) WHERE "
"(room > 100) DEFERRABLE INITIALLY immediate",
dialect=postgresql.dialect(),
)
def test_exclude_constraint_copy(self):
m = MetaData()
cons = ExcludeConstraint(("room", "="))
tbl = Table(
"testtbl", m, Column("room", Integer, primary_key=True), cons
)
# apparently you can't copy a ColumnCollectionConstraint until
# after it has been bound to a table...
cons_copy = cons.copy()
tbl.append_constraint(cons_copy)
self.assert_compile(
schema.AddConstraint(cons_copy),
"ALTER TABLE testtbl ADD EXCLUDE USING gist " "(room WITH =)",
)
def test_exclude_constraint_copy_where_using(self):
m = MetaData()
tbl = Table("testtbl", m, Column("room", Integer, primary_key=True))
cons = ExcludeConstraint(
(tbl.c.room, "="), where=tbl.c.room > 5, using="foobar"
)
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING foobar "
"(room WITH =) WHERE (testtbl.room > 5)",
)
m2 = MetaData()
tbl2 = tbl.tometadata(m2)
self.assert_compile(
schema.CreateTable(tbl2),
"CREATE TABLE testtbl (room SERIAL NOT NULL, "
"PRIMARY KEY (room), "
"EXCLUDE USING foobar "
"(room WITH =) WHERE (testtbl.room > 5))",
)
def test_exclude_constraint_text(self):
m = MetaData()
cons = ExcludeConstraint((text("room::TEXT"), "="))
Table("testtbl", m, Column("room", String), cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist "
"(room::TEXT WITH =)",
)
def test_exclude_constraint_cast(self):
m = MetaData()
tbl = Table("testtbl", m, Column("room", String))
cons = ExcludeConstraint((cast(tbl.c.room, Text), "="))
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist "
"(CAST(room AS TEXT) WITH =)",
)
def test_exclude_constraint_cast_quote(self):
m = MetaData()
tbl = Table("testtbl", m, Column("Room", String))
cons = ExcludeConstraint((cast(tbl.c.Room, Text), "="))
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist "
'(CAST("Room" AS TEXT) WITH =)',
)
def test_exclude_constraint_when(self):
m = MetaData()
tbl = Table("testtbl", m, Column("room", String))
cons = ExcludeConstraint(("room", "="), where=tbl.c.room.in_(["12"]))
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist "
"(room WITH =) WHERE (testtbl.room IN ('12'))",
dialect=postgresql.dialect(),
)
def test_substring(self):
self.assert_compile(
func.substring("abc", 1, 2),
"SUBSTRING(%(substring_1)s FROM %(substring_2)s "
"FOR %(substring_3)s)",
)
self.assert_compile(
func.substring("abc", 1),
"SUBSTRING(%(substring_1)s FROM %(substring_2)s)",
)
def test_for_update(self):
table1 = table(
"mytable", column("myid"), column("name"), column("description")
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR UPDATE",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR UPDATE NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
skip_locked=True
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR UPDATE SKIP LOCKED",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(read=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR SHARE",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, nowait=True
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR SHARE NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, skip_locked=True
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE SKIP LOCKED",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
of=table1.c.myid
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR UPDATE OF mytable",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, nowait=True, of=table1
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, nowait=True, of=table1.c.myid
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, nowait=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, skip_locked=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable SKIP LOCKED",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
key_share=True, nowait=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR NO KEY UPDATE OF mytable NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
key_share=True,
skip_locked=True,
of=[table1.c.myid, table1.c.name],
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR NO KEY UPDATE OF mytable SKIP LOCKED",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
key_share=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR NO KEY UPDATE OF mytable",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(key_share=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR NO KEY UPDATE",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, key_share=True
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR KEY SHARE",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, key_share=True, of=table1
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR KEY SHARE OF mytable",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, key_share=True, skip_locked=True
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR KEY SHARE SKIP LOCKED",
)
ta = table1.alias()
self.assert_compile(
ta.select(ta.c.myid == 7).with_for_update(
of=[ta.c.myid, ta.c.name]
),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable AS mytable_1 "
"WHERE mytable_1.myid = %(myid_1)s FOR UPDATE OF mytable_1",
)
def test_for_update_with_schema(self):
m = MetaData()
table1 = Table(
"mytable", m, Column("myid"), Column("name"), schema="testschema"
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(of=table1),
"SELECT testschema.mytable.myid, testschema.mytable.name "
"FROM testschema.mytable "
"WHERE testschema.mytable.myid = %(myid_1)s "
"FOR UPDATE OF mytable",
)
def test_reserved_words(self):
table = Table(
"pg_table",
MetaData(),
Column("col1", Integer),
Column("variadic", Integer),
)
x = select([table.c.col1, table.c.variadic])
self.assert_compile(
x, """SELECT pg_table.col1, pg_table."variadic" FROM pg_table"""
)
def test_array(self):
c = Column("x", postgresql.ARRAY(Integer))
self.assert_compile(
cast(c, postgresql.ARRAY(Integer)), "CAST(x AS INTEGER[])"
)
self.assert_compile(c[5], "x[%(x_1)s]", checkparams={"x_1": 5})
self.assert_compile(
c[5:7], "x[%(x_1)s:%(x_2)s]", checkparams={"x_2": 7, "x_1": 5}
)
self.assert_compile(
c[5:7][2:3],
"x[%(x_1)s:%(x_2)s][%(param_1)s:%(param_2)s]",
checkparams={"x_2": 7, "x_1": 5, "param_1": 2, "param_2": 3},
)
self.assert_compile(
c[5:7][3],
"x[%(x_1)s:%(x_2)s][%(param_1)s]",
checkparams={"x_2": 7, "x_1": 5, "param_1": 3},
)
self.assert_compile(
c.contains([1]), "x @> %(x_1)s", checkparams={"x_1": [1]}
)
self.assert_compile(
c.contained_by([2]), "x <@ %(x_1)s", checkparams={"x_1": [2]}
)
self.assert_compile(
c.overlap([3]), "x && %(x_1)s", checkparams={"x_1": [3]}
)
self.assert_compile(
postgresql.Any(4, c),
"%(param_1)s = ANY (x)",
checkparams={"param_1": 4},
)
self.assert_compile(
c.any(5, operator=operators.ne),
"%(param_1)s != ANY (x)",
checkparams={"param_1": 5},
)
self.assert_compile(
postgresql.All(6, c, operator=operators.gt),
"%(param_1)s > ALL (x)",
checkparams={"param_1": 6},
)
self.assert_compile(
c.all(7, operator=operators.lt),
"%(param_1)s < ALL (x)",
checkparams={"param_1": 7},
)
def _test_array_zero_indexes(self, zero_indexes):
c = Column("x", postgresql.ARRAY(Integer, zero_indexes=zero_indexes))
add_one = 1 if zero_indexes else 0
self.assert_compile(
cast(c, postgresql.ARRAY(Integer, zero_indexes=zero_indexes)),
"CAST(x AS INTEGER[])",
)
self.assert_compile(
c[5], "x[%(x_1)s]", checkparams={"x_1": 5 + add_one}
)
self.assert_compile(
c[5:7],
"x[%(x_1)s:%(x_2)s]",
checkparams={"x_2": 7 + add_one, "x_1": 5 + add_one},
)
self.assert_compile(
c[5:7][2:3],
"x[%(x_1)s:%(x_2)s][%(param_1)s:%(param_2)s]",
checkparams={
"x_2": 7 + add_one,
"x_1": 5 + add_one,
"param_1": 2 + add_one,
"param_2": 3 + add_one,
},
)
self.assert_compile(
c[5:7][3],
"x[%(x_1)s:%(x_2)s][%(param_1)s]",
checkparams={
"x_2": 7 + add_one,
"x_1": 5 + add_one,
"param_1": 3 + add_one,
},
)
def test_array_zero_indexes_true(self):
self._test_array_zero_indexes(True)
def test_array_zero_indexes_false(self):
self._test_array_zero_indexes(False)
def test_array_literal_type(self):
isinstance(postgresql.array([1, 2]).type, postgresql.ARRAY)
is_(postgresql.array([1, 2]).type.item_type._type_affinity, Integer)
is_(
postgresql.array(
[1, 2], type_=String
).type.item_type._type_affinity,
String,
)
def test_array_literal(self):
self.assert_compile(
func.array_dims(
postgresql.array([1, 2]) + postgresql.array([3, 4, 5])
),
"array_dims(ARRAY[%(param_1)s, %(param_2)s] || "
"ARRAY[%(param_3)s, %(param_4)s, %(param_5)s])",
checkparams={
"param_5": 5,
"param_4": 4,
"param_1": 1,
"param_3": 3,
"param_2": 2,
},
)
def test_array_literal_compare(self):
self.assert_compile(
postgresql.array([1, 2]) == [3, 4, 5],
"ARRAY[%(param_1)s, %(param_2)s] = "
"ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]",
checkparams={
"param_5": 5,
"param_4": 4,
"param_1": 1,
"param_3": 3,
"param_2": 2,
},
)
def test_array_literal_insert(self):
m = MetaData()
t = Table("t", m, Column("data", postgresql.ARRAY(Integer)))
self.assert_compile(
t.insert().values(data=array([1, 2, 3])),
"INSERT INTO t (data) VALUES (ARRAY[%(param_1)s, "
"%(param_2)s, %(param_3)s])",
)
def test_update_array_element(self):
m = MetaData()
t = Table("t", m, Column("data", postgresql.ARRAY(Integer)))
self.assert_compile(
t.update().values({t.c.data[5]: 1}),
"UPDATE t SET data[%(data_1)s]=%(param_1)s",
checkparams={"data_1": 5, "param_1": 1},
)
def test_update_array_slice(self):
m = MetaData()
t = Table("t", m, Column("data", postgresql.ARRAY(Integer)))
self.assert_compile(
t.update().values({t.c.data[2:5]: 2}),
"UPDATE t SET data[%(data_1)s:%(data_2)s]=%(param_1)s",
checkparams={"param_1": 2, "data_2": 5, "data_1": 2},
)
def test_from_only(self):
m = MetaData()
tbl1 = Table("testtbl1", m, Column("id", Integer))
tbl2 = Table("testtbl2", m, Column("id", Integer))
stmt = tbl1.select().with_hint(tbl1, "ONLY", "postgresql")
expected = "SELECT testtbl1.id FROM ONLY testtbl1"
self.assert_compile(stmt, expected)
talias1 = tbl1.alias("foo")
stmt = talias1.select().with_hint(talias1, "ONLY", "postgresql")
expected = "SELECT foo.id FROM ONLY testtbl1 AS foo"
self.assert_compile(stmt, expected)
stmt = select([tbl1, tbl2]).with_hint(tbl1, "ONLY", "postgresql")
expected = (
"SELECT testtbl1.id, testtbl2.id FROM ONLY testtbl1, " "testtbl2"
)
self.assert_compile(stmt, expected)
stmt = select([tbl1, tbl2]).with_hint(tbl2, "ONLY", "postgresql")
expected = (
"SELECT testtbl1.id, testtbl2.id FROM testtbl1, ONLY " "testtbl2"
)
self.assert_compile(stmt, expected)
stmt = select([tbl1, tbl2])
stmt = stmt.with_hint(tbl1, "ONLY", "postgresql")
stmt = stmt.with_hint(tbl2, "ONLY", "postgresql")
expected = (
"SELECT testtbl1.id, testtbl2.id FROM ONLY testtbl1, "
"ONLY testtbl2"
)
self.assert_compile(stmt, expected)
stmt = update(tbl1, values=dict(id=1))
stmt = stmt.with_hint("ONLY", dialect_name="postgresql")
expected = "UPDATE ONLY testtbl1 SET id=%(id)s"
self.assert_compile(stmt, expected)
stmt = delete(tbl1).with_hint(
"ONLY", selectable=tbl1, dialect_name="postgresql"
)
expected = "DELETE FROM ONLY testtbl1"
self.assert_compile(stmt, expected)
tbl3 = Table("testtbl3", m, Column("id", Integer), schema="testschema")
stmt = tbl3.select().with_hint(tbl3, "ONLY", "postgresql")
expected = (
"SELECT testschema.testtbl3.id FROM " "ONLY testschema.testtbl3"
)
self.assert_compile(stmt, expected)
assert_raises(
exc.CompileError,
tbl3.select().with_hint(tbl3, "FAKE", "postgresql").compile,
dialect=postgresql.dialect(),
)
def test_aggregate_order_by_one(self):
m = MetaData()
table = Table("table1", m, Column("a", Integer), Column("b", Integer))
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select([expr])
# note this tests that the object exports FROM objects
# correctly
self.assert_compile(
stmt,
"SELECT array_agg(table1.a ORDER BY table1.b DESC) "
"AS array_agg_1 FROM table1",
)
def test_aggregate_order_by_two(self):
m = MetaData()
table = Table("table1", m, Column("a", Integer), Column("b", Integer))
expr = func.string_agg(
table.c.a, aggregate_order_by(literal_column("','"), table.c.a)
)
stmt = select([expr])
self.assert_compile(
stmt,
"SELECT string_agg(table1.a, ',' ORDER BY table1.a) "
"AS string_agg_1 FROM table1",
)
def test_aggregate_order_by_multi_col(self):
m = MetaData()
table = Table("table1", m, Column("a", Integer), Column("b", Integer))
expr = func.string_agg(
table.c.a,
aggregate_order_by(
literal_column("','"), table.c.a, table.c.b.desc()
),
)
stmt = select([expr])
self.assert_compile(
stmt,
"SELECT string_agg(table1.a, "
"',' ORDER BY table1.a, table1.b DESC) "
"AS string_agg_1 FROM table1",
)
def test_aggregate_orcer_by_no_arg(self):
assert_raises_message(
TypeError,
"at least one ORDER BY element is required",
aggregate_order_by,
literal_column("','"),
)
def test_pg_array_agg_implicit_pg_array(self):
expr = pg_array_agg(column("data", Integer))
assert isinstance(expr.type, PG_ARRAY)
is_(expr.type.item_type._type_affinity, Integer)
def test_pg_array_agg_uses_base_array(self):
expr = pg_array_agg(column("data", sqltypes.ARRAY(Integer)))
assert isinstance(expr.type, sqltypes.ARRAY)
assert not isinstance(expr.type, PG_ARRAY)
is_(expr.type.item_type._type_affinity, Integer)
def test_pg_array_agg_uses_pg_array(self):
expr = pg_array_agg(column("data", PG_ARRAY(Integer)))
assert isinstance(expr.type, PG_ARRAY)
is_(expr.type.item_type._type_affinity, Integer)
def test_pg_array_agg_explicit_base_array(self):
expr = pg_array_agg(
column("data", sqltypes.ARRAY(Integer)),
type_=sqltypes.ARRAY(Integer),
)
assert isinstance(expr.type, sqltypes.ARRAY)
assert not isinstance(expr.type, PG_ARRAY)
is_(expr.type.item_type._type_affinity, Integer)
def test_pg_array_agg_explicit_pg_array(self):
expr = pg_array_agg(
column("data", sqltypes.ARRAY(Integer)), type_=PG_ARRAY(Integer)
)
assert isinstance(expr.type, PG_ARRAY)
is_(expr.type.item_type._type_affinity, Integer)
def test_aggregate_order_by_adapt(self):
m = MetaData()
table = Table("table1", m, Column("a", Integer), Column("b", Integer))
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select([expr])
a1 = table.alias("foo")
stmt2 = sql_util.ClauseAdapter(a1).traverse(stmt)
self.assert_compile(
stmt2,
"SELECT array_agg(foo.a ORDER BY foo.b DESC) AS array_agg_1 "
"FROM table1 AS foo",
)
def test_delete_extra_froms(self):
t1 = table("t1", column("c1"))
t2 = table("t2", column("c1"))
q = delete(t1).where(t1.c.c1 == t2.c.c1)
self.assert_compile(q, "DELETE FROM t1 USING t2 WHERE t1.c1 = t2.c1")
def test_delete_extra_froms_alias(self):
a1 = table("t1", column("c1")).alias("a1")
t2 = table("t2", column("c1"))
q = delete(a1).where(a1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM t1 AS a1 USING t2 WHERE a1.c1 = t2.c1"
)
class InsertOnConflictTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = postgresql.dialect()
def setup(self):
self.table1 = table1 = table(
"mytable",
column("myid", Integer),
column("name", String(128)),
column("description", String(128)),
)
md = MetaData()
self.table_with_metadata = Table(
"mytable",
md,
Column("myid", Integer, primary_key=True),
Column("name", String(128)),
Column("description", String(128)),
)
self.unique_constr = schema.UniqueConstraint(
table1.c.name, name="uq_name"
)
self.excl_constr = ExcludeConstraint(
(table1.c.name, "="),
(table1.c.description, "&&"),
name="excl_thing",
)
self.excl_constr_anon = ExcludeConstraint(
(self.table_with_metadata.c.name, "="),
(self.table_with_metadata.c.description, "&&"),
where=self.table_with_metadata.c.description != "foo",
)
self.goofy_index = Index(
"goofy_index", table1.c.name, postgresql_where=table1.c.name > "m"
)
def test_do_nothing_no_target(self):
i = insert(
self.table1, values=dict(name="foo")
).on_conflict_do_nothing()
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT DO NOTHING",
)
def test_do_nothing_index_elements_target(self):
i = insert(
self.table1, values=dict(name="foo")
).on_conflict_do_nothing(index_elements=["myid"])
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT (myid) DO NOTHING",
)
def test_do_update_set_clause_none(self):
i = insert(self.table_with_metadata).values(myid=1, name="foo")
i = i.on_conflict_do_update(
index_elements=["myid"],
set_=OrderedDict([("name", "I'm a name"), ("description", None)]),
)
self.assert_compile(
i,
"INSERT INTO mytable (myid, name) VALUES "
"(%(myid)s, %(name)s) ON CONFLICT (myid) "
"DO UPDATE SET name = %(param_1)s, "
"description = %(param_2)s",
{
"myid": 1,
"name": "foo",
"param_1": "I'm a name",
"param_2": None,
},
)
def test_do_update_set_clause_literal(self):
i = insert(self.table_with_metadata).values(myid=1, name="foo")
i = i.on_conflict_do_update(
index_elements=["myid"],
set_=OrderedDict(
[("name", "I'm a name"), ("description", null())]
),
)
self.assert_compile(
i,
"INSERT INTO mytable (myid, name) VALUES "
"(%(myid)s, %(name)s) ON CONFLICT (myid) "
"DO UPDATE SET name = %(param_1)s, "
"description = NULL",
{"myid": 1, "name": "foo", "param_1": "I'm a name"},
)
def test_do_update_str_index_elements_target_one(self):
i = insert(self.table_with_metadata).values(myid=1, name="foo")
i = i.on_conflict_do_update(
index_elements=["myid"],
set_=OrderedDict(
[
("name", i.excluded.name),
("description", i.excluded.description),
]
),
)
self.assert_compile(
i,
"INSERT INTO mytable (myid, name) VALUES "
"(%(myid)s, %(name)s) ON CONFLICT (myid) "
"DO UPDATE SET name = excluded.name, "
"description = excluded.description",
)
def test_do_update_str_index_elements_target_two(self):
i = insert(self.table1, values=dict(name="foo"))
i = i.on_conflict_do_update(
index_elements=["myid"], set_=dict(name=i.excluded.name)
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT (myid) "
"DO UPDATE SET name = excluded.name",
)
def test_do_update_col_index_elements_target(self):
i = insert(self.table1, values=dict(name="foo"))
i = i.on_conflict_do_update(
index_elements=[self.table1.c.myid],
set_=dict(name=i.excluded.name),
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT (myid) "
"DO UPDATE SET name = excluded.name",
)
def test_do_update_unnamed_pk_constraint_target(self):
i = insert(self.table_with_metadata, values=dict(myid=1, name="foo"))
i = i.on_conflict_do_update(
constraint=self.table_with_metadata.primary_key,
set_=dict(name=i.excluded.name),
)
self.assert_compile(
i,
"INSERT INTO mytable (myid, name) VALUES "
"(%(myid)s, %(name)s) ON CONFLICT (myid) "
"DO UPDATE SET name = excluded.name",
)
def test_do_update_pk_constraint_index_elements_target(self):
i = insert(self.table_with_metadata, values=dict(myid=1, name="foo"))
i = i.on_conflict_do_update(
index_elements=self.table_with_metadata.primary_key,
set_=dict(name=i.excluded.name),
)
self.assert_compile(
i,
"INSERT INTO mytable (myid, name) VALUES "
"(%(myid)s, %(name)s) ON CONFLICT (myid) "
"DO UPDATE SET name = excluded.name",
)
def test_do_update_named_unique_constraint_target(self):
i = insert(self.table1, values=dict(name="foo"))
i = i.on_conflict_do_update(
constraint=self.unique_constr, set_=dict(myid=i.excluded.myid)
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT ON CONSTRAINT uq_name "
"DO UPDATE SET myid = excluded.myid",
)
def test_do_update_string_constraint_target(self):
i = insert(self.table1, values=dict(name="foo"))
i = i.on_conflict_do_update(
constraint=self.unique_constr.name, set_=dict(myid=i.excluded.myid)
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT ON CONSTRAINT uq_name "
"DO UPDATE SET myid = excluded.myid",
)
def test_do_update_index_elements_where_target(self):
i = insert(self.table1, values=dict(name="foo"))
i = i.on_conflict_do_update(
index_elements=self.goofy_index.expressions,
index_where=self.goofy_index.dialect_options["postgresql"][
"where"
],
set_=dict(name=i.excluded.name),
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT (name) "
"WHERE name > %(name_1)s "
"DO UPDATE SET name = excluded.name",
)
def test_do_update_index_elements_where_target_multivalues(self):
i = insert(
self.table1,
values=[dict(name="foo"), dict(name="bar"), dict(name="bat")],
)
i = i.on_conflict_do_update(
index_elements=self.goofy_index.expressions,
index_where=self.goofy_index.dialect_options["postgresql"][
"where"
],
set_=dict(name=i.excluded.name),
)
self.assert_compile(
i,
"INSERT INTO mytable (name) "
"VALUES (%(name_m0)s), (%(name_m1)s), (%(name_m2)s) "
"ON CONFLICT (name) "
"WHERE name > %(name_1)s "
"DO UPDATE SET name = excluded.name",
checkparams={
"name_1": "m",
"name_m0": "foo",
"name_m1": "bar",
"name_m2": "bat",
},
)
def test_do_update_unnamed_index_target(self):
i = insert(self.table1, values=dict(name="foo"))
unnamed_goofy = Index(
None, self.table1.c.name, postgresql_where=self.table1.c.name > "m"
)
i = i.on_conflict_do_update(
constraint=unnamed_goofy, set_=dict(name=i.excluded.name)
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT (name) "
"WHERE name > %(name_1)s "
"DO UPDATE SET name = excluded.name",
)
def test_do_update_unnamed_exclude_constraint_target(self):
i = insert(self.table1, values=dict(name="foo"))
i = i.on_conflict_do_update(
constraint=self.excl_constr_anon, set_=dict(name=i.excluded.name)
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT (name, description) "
"WHERE description != %(description_1)s "
"DO UPDATE SET name = excluded.name",
)
def test_do_update_add_whereclause(self):
i = insert(self.table1, values=dict(name="foo"))
i = i.on_conflict_do_update(
constraint=self.excl_constr_anon,
set_=dict(name=i.excluded.name),
where=(
(self.table1.c.name != "brah")
& (self.table1.c.description != "brah")
),
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT (name, description) "
"WHERE description != %(description_1)s "
"DO UPDATE SET name = excluded.name "
"WHERE mytable.name != %(name_1)s "
"AND mytable.description != %(description_2)s",
)
def test_do_update_add_whereclause_references_excluded(self):
i = insert(self.table1, values=dict(name="foo"))
i = i.on_conflict_do_update(
constraint=self.excl_constr_anon,
set_=dict(name=i.excluded.name),
where=((self.table1.c.name != i.excluded.name)),
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT (name, description) "
"WHERE description != %(description_1)s "
"DO UPDATE SET name = excluded.name "
"WHERE mytable.name != excluded.name",
)
def test_do_update_additional_colnames(self):
i = insert(self.table1, values=dict(name="bar"))
i = i.on_conflict_do_update(
constraint=self.excl_constr_anon,
set_=dict(name="somename", unknown="unknown"),
)
with expect_warnings(
"Additional column names not matching any "
"column keys in table 'mytable': 'unknown'"
):
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) ON CONFLICT (name, description) "
"WHERE description != %(description_1)s "
"DO UPDATE SET name = %(param_1)s, "
"unknown = %(param_2)s",
checkparams={
"name": "bar",
"description_1": "foo",
"param_1": "somename",
"param_2": "unknown",
},
)
def test_on_conflict_as_cte(self):
i = insert(self.table1, values=dict(name="foo"))
i = (
i.on_conflict_do_update(
constraint=self.excl_constr_anon,
set_=dict(name=i.excluded.name),
where=((self.table1.c.name != i.excluded.name)),
)
.returning(literal_column("1"))
.cte("i_upsert")
)
stmt = select([i])
self.assert_compile(
stmt,
"WITH i_upsert AS "
"(INSERT INTO mytable (name) VALUES (%(name)s) "
"ON CONFLICT (name, description) "
"WHERE description != %(description_1)s "
"DO UPDATE SET name = excluded.name "
"WHERE mytable.name != excluded.name RETURNING 1) "
"SELECT i_upsert.1 "
"FROM i_upsert",
)
def test_quote_raw_string_col(self):
t = table("t", column("FancyName"), column("other name"))
stmt = (
insert(t)
.values(FancyName="something new")
.on_conflict_do_update(
index_elements=["FancyName", "other name"],
set_=OrderedDict(
[
("FancyName", "something updated"),
("other name", "something else"),
]
),
)
)
self.assert_compile(
stmt,
'INSERT INTO t ("FancyName") VALUES (%(FancyName)s) '
'ON CONFLICT ("FancyName", "other name") '
'DO UPDATE SET "FancyName" = %(param_1)s, '
'"other name" = %(param_2)s',
{
"param_1": "something updated",
"param_2": "something else",
"FancyName": "something new",
},
)
class DistinctOnTest(fixtures.TestBase, AssertsCompiledSQL):
"""Test 'DISTINCT' with SQL expression language and orm.Query with
an emphasis on PG's 'DISTINCT ON' syntax.
"""
__dialect__ = postgresql.dialect()
def setup(self):
self.table = Table(
"t",
MetaData(),
Column("id", Integer, primary_key=True),
Column("a", String),
Column("b", String),
)
def test_plain_generative(self):
self.assert_compile(
select([self.table]).distinct(),
"SELECT DISTINCT t.id, t.a, t.b FROM t",
)
def test_on_columns_generative(self):
self.assert_compile(
select([self.table]).distinct(self.table.c.a),
"SELECT DISTINCT ON (t.a) t.id, t.a, t.b FROM t",
)
def test_on_columns_generative_multi_call(self):
self.assert_compile(
select([self.table])
.distinct(self.table.c.a)
.distinct(self.table.c.b),
"SELECT DISTINCT ON (t.a, t.b) t.id, t.a, t.b FROM t",
)
def test_plain_inline(self):
self.assert_compile(
select([self.table], distinct=True),
"SELECT DISTINCT t.id, t.a, t.b FROM t",
)
def test_on_columns_inline_list(self):
self.assert_compile(
select(
[self.table], distinct=[self.table.c.a, self.table.c.b]
).order_by(self.table.c.a, self.table.c.b),
"SELECT DISTINCT ON (t.a, t.b) t.id, "
"t.a, t.b FROM t ORDER BY t.a, t.b",
)
def test_on_columns_inline_scalar(self):
self.assert_compile(
select([self.table], distinct=self.table.c.a),
"SELECT DISTINCT ON (t.a) t.id, t.a, t.b FROM t",
)
def test_literal_binds(self):
self.assert_compile(
select([self.table]).distinct(self.table.c.a == 10),
"SELECT DISTINCT ON (t.a = 10) t.id, t.a, t.b FROM t",
literal_binds=True,
)
def test_query_plain(self):
sess = Session()
self.assert_compile(
sess.query(self.table).distinct(),
"SELECT DISTINCT t.id AS t_id, t.a AS t_a, " "t.b AS t_b FROM t",
)
def test_query_on_columns(self):
sess = Session()
self.assert_compile(
sess.query(self.table).distinct(self.table.c.a),
"SELECT DISTINCT ON (t.a) t.id AS t_id, t.a AS t_a, "
"t.b AS t_b FROM t",
)
def test_query_on_columns_multi_call(self):
sess = Session()
self.assert_compile(
sess.query(self.table)
.distinct(self.table.c.a)
.distinct(self.table.c.b),
"SELECT DISTINCT ON (t.a, t.b) t.id AS t_id, t.a AS t_a, "
"t.b AS t_b FROM t",
)
def test_query_on_columns_subquery(self):
sess = Session()
class Foo(object):
pass
mapper(Foo, self.table)
sess = Session()
self.assert_compile(
sess.query(Foo).from_self().distinct(Foo.a, Foo.b),
"SELECT DISTINCT ON (anon_1.t_a, anon_1.t_b) anon_1.t_id "
"AS anon_1_t_id, anon_1.t_a AS anon_1_t_a, anon_1.t_b "
"AS anon_1_t_b FROM (SELECT t.id AS t_id, t.a AS t_a, "
"t.b AS t_b FROM t) AS anon_1",
)
def test_query_distinct_on_aliased(self):
class Foo(object):
pass
mapper(Foo, self.table)
a1 = aliased(Foo)
sess = Session()
self.assert_compile(
sess.query(a1).distinct(a1.a),
"SELECT DISTINCT ON (t_1.a) t_1.id AS t_1_id, "
"t_1.a AS t_1_a, t_1.b AS t_1_b FROM t AS t_1",
)
def test_distinct_on_subquery_anon(self):
sq = select([self.table]).alias()
q = (
select([self.table.c.id, sq.c.id])
.distinct(sq.c.id)
.where(self.table.c.id == sq.c.id)
)
self.assert_compile(
q,
"SELECT DISTINCT ON (anon_1.id) t.id, anon_1.id "
"FROM t, (SELECT t.id AS id, t.a AS a, t.b "
"AS b FROM t) AS anon_1 WHERE t.id = anon_1.id",
)
def test_distinct_on_subquery_named(self):
sq = select([self.table]).alias("sq")
q = (
select([self.table.c.id, sq.c.id])
.distinct(sq.c.id)
.where(self.table.c.id == sq.c.id)
)
self.assert_compile(
q,
"SELECT DISTINCT ON (sq.id) t.id, sq.id "
"FROM t, (SELECT t.id AS id, t.a AS a, "
"t.b AS b FROM t) AS sq WHERE t.id = sq.id",
)
class FullTextSearchTest(fixtures.TestBase, AssertsCompiledSQL):
"""Tests for full text searching
"""
__dialect__ = postgresql.dialect()
def setup(self):
self.table = Table(
"t",
MetaData(),
Column("id", Integer, primary_key=True),
Column("title", String),
Column("body", String),
)
self.table_alt = table(
"mytable",
column("id", Integer),
column("title", String(128)),
column("body", String(128)),
)
def _raise_query(self, q):
"""
useful for debugging. just do...
self._raise_query(q)
"""
c = q.compile(dialect=postgresql.dialect())
raise ValueError(c)
def test_match_basic(self):
s = select([self.table_alt.c.id]).where(
self.table_alt.c.title.match("somestring")
)
self.assert_compile(
s,
"SELECT mytable.id "
"FROM mytable "
"WHERE mytable.title @@ to_tsquery(%(title_1)s)",
)
def test_match_regconfig(self):
s = select([self.table_alt.c.id]).where(
self.table_alt.c.title.match(
"somestring", postgresql_regconfig="english"
)
)
self.assert_compile(
s,
"SELECT mytable.id "
"FROM mytable "
"""WHERE mytable.title @@ to_tsquery('english', %(title_1)s)""",
)
def test_match_tsvector(self):
s = select([self.table_alt.c.id]).where(
func.to_tsvector(self.table_alt.c.title).match("somestring")
)
self.assert_compile(
s,
"SELECT mytable.id "
"FROM mytable "
"WHERE to_tsvector(mytable.title) "
"@@ to_tsquery(%(to_tsvector_1)s)",
)
def test_match_tsvectorconfig(self):
s = select([self.table_alt.c.id]).where(
func.to_tsvector("english", self.table_alt.c.title).match(
"somestring"
)
)
self.assert_compile(
s,
"SELECT mytable.id "
"FROM mytable "
"WHERE to_tsvector(%(to_tsvector_1)s, mytable.title) @@ "
"to_tsquery(%(to_tsvector_2)s)",
)
def test_match_tsvectorconfig_regconfig(self):
s = select([self.table_alt.c.id]).where(
func.to_tsvector("english", self.table_alt.c.title).match(
"somestring", postgresql_regconfig="english"
)
)
self.assert_compile(
s,
"SELECT mytable.id "
"FROM mytable "
"WHERE to_tsvector(%(to_tsvector_1)s, mytable.title) @@ "
"""to_tsquery('english', %(to_tsvector_2)s)""",
)
| 33.765383
| 79
| 0.541802
|
6b9652bbd9321cead43f0cbe1865831192766535
| 843
|
py
|
Python
|
scripts/data_migration.py
|
wlhong-allo/whitelistPy
|
b5add2e8b22b08cdd36a1a3c646afad35ced1c91
|
[
"MIT"
] | 1
|
2022-03-15T06:14:42.000Z
|
2022-03-15T06:14:42.000Z
|
scripts/data_migration.py
|
wlhong-allo/whitelistPy
|
b5add2e8b22b08cdd36a1a3c646afad35ced1c91
|
[
"MIT"
] | null | null | null |
scripts/data_migration.py
|
wlhong-allo/whitelistPy
|
b5add2e8b22b08cdd36a1a3c646afad35ced1c91
|
[
"MIT"
] | 1
|
2022-02-27T03:12:14.000Z
|
2022-02-27T03:12:14.000Z
|
"""
This file migrates the old json-format of backend data to the new sqlite format.
This is to not lose old data and maintain consistency in the DB
"""
from ..db import DB
import json
NEW_DB_NAME = "new_data.db"
OLD_JSON_FILE = "data.json"
db = DB(NEW_DB_NAME)
with open(OLD_JSON_FILE, 'r') as in_file:
old_data = json.load(in_file)
for server in old_data:
channel = old_data[server]["whitelist_channel"]
role = old_data[server]["whitelist_role"]
db.execute("INSERT INTO discord_server VALUES (?,?,?,?)",
(int(server), None if channel is None else int(channel) , None if role is None else int(role), old_data[server]["blockchain"]))
for user in old_data[server]['data']:
db.execute("INSERT INTO user VALUES (?,?,?)", (int(user), int(server), old_data[server]['data'][user]))
db.commit()
db.close()
| 35.125
| 135
| 0.689205
|
ea33e38be2a0295645abf5636074749b52c902f8
| 824
|
py
|
Python
|
neutron_dynamic_routing/extensions/bgp_4byte_asn.py
|
openstack/neutron-dynamic-routing
|
8a0ddf6051c81b982187bb062b194284398f5703
|
[
"Apache-2.0"
] | 26
|
2016-04-22T10:03:14.000Z
|
2021-04-20T12:23:06.000Z
|
neutron_dynamic_routing/extensions/bgp_4byte_asn.py
|
openstack/neutron-dynamic-routing
|
8a0ddf6051c81b982187bb062b194284398f5703
|
[
"Apache-2.0"
] | 1
|
2021-05-09T06:13:46.000Z
|
2021-05-09T06:13:47.000Z
|
neutron_dynamic_routing/extensions/bgp_4byte_asn.py
|
openstack/neutron-dynamic-routing
|
8a0ddf6051c81b982187bb062b194284398f5703
|
[
"Apache-2.0"
] | 9
|
2016-05-29T16:50:38.000Z
|
2021-09-15T14:19:12.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import bgp_4byte_asn as api_def
from neutron_lib.api import extensions as api_extensions
class Bgp_4byte_asn(api_extensions.APIExtensionDescriptor):
"""Extension class supporting bgp 4-byte AS numbers.
"""
api_definition = api_def
| 37.454545
| 75
| 0.775485
|
b39b1e05fb24ebeff3823a7f82672413663ed841
| 5,854
|
py
|
Python
|
chapter_3/chapter3_equations.py
|
p-giakoumoglou/numerical_analysis
|
1f2e23530972baae00e793b30bb66a5aa75df02b
|
[
"MIT"
] | null | null | null |
chapter_3/chapter3_equations.py
|
p-giakoumoglou/numerical_analysis
|
1f2e23530972baae00e793b30bb66a5aa75df02b
|
[
"MIT"
] | null | null | null |
chapter_3/chapter3_equations.py
|
p-giakoumoglou/numerical_analysis
|
1f2e23530972baae00e793b30bb66a5aa75df02b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Author: Paschalis Giakoumoglou
Date: Thu Jun 17 17:25:00 2021
"""
import matplotlib.pyplot as plt
import numpy as np
import math
#add f(a)*f(b) < 0 as a condition
def partition(f, a, b, error):
""" 3.2 Partition (p. 32) """
if f(a)*f(b) < 0:
print(f"f({a}) = {f(a)}, f({b}) = {f(b)} => f({a})*f({b}) < 0")
print()
k = 1
while abs(b-a) >= error:
m = (a+b)/2
print("k = " , k)
print(f"[a{k},b{k}] = [{a}, {b}]")
print(f"m{k} = (a+b)/2 = ", m)
print(f"f(m{k}) = f({m}) =", f(m))
print(f"f(a{k}) = f({a}) =", f(a))
print(f"f(b{k}) = f({b}) =", f(b))
if f(a)*f(m) <= 0:
print(f"f(a{k})*f(m{k}) < 0")
b = m
else:
print(f"f(m{k})*f(b{k}) < 0")
a = m
k += 1
print(f"a{k} = ", a)
print(f"b{k} = ", b)
print()
else:
print("wrong input...")
def convPart(a, b, error):
h = b-a
n = math.log10(h/error)/math.log10((2))
print("|r-c_n| = (1/2)^n*(b-a)")
print(f"|r-c_n| <= 10^{math.log10(error)}")
print(f"|r-c_n| = (1/2)^n*{h} = 10^{math.log10(error)}")
print(f"2^n = {h}/{error} => n*log2 = log({h}/10^{math.log10(error)}) = {math.log10(h/error)}")
print(f"n = {n} => n = {math.ceil(n)}")
def string(f, a, b, error):
""" 3.3 String (p. 36) """
if f(a)*f(b) < 0:
print(f"f({a}) = {f(a)}, f({b}) = {f(b)} => f({a})*f({b}) < 0")
print()
k = 1
c = 1
while abs(f(c)) >= error:
c = (a*f(b)-b*f(a)) / (f(b)-f(a))
print("k = " , k)
print(f"[a{k},b{k}] = [{a}, {b}]")
print(f"c{k} = (a{k}*f(b{k})-b*f(a{k})) / (f(b{k})-f(a{k})) = " , c)
print(f"f(c{k}) = ", f(c))
print(f"f(a{k}) = ", f(a))
print(f"f(b{k}) = ", f(b))
if f(a)*f(c) <= 0:
print(f"f(a{k})*f(c{k}) < 0")
b = c
else:
print(f"f(c{k})*f(b{k}) < 0")
a = c
k += 1
print(f"a{k} = ", a)
print(f"b{k} = ", b)
print()
else:
print("wrong input...")
def variable_string(f, a, b, error):
""" 3.4 Partition (p. 39) """
if f(a)*f(b) < 0:
print(f"f({a}) = {f(a)}, f({b}) = {f(b)} => f({a})*f({b}) < 0")
print()
k = 0
L = f(a)
R = f(b)
x = []
x.append(a)
print("R = f(b1) =", R)
print("L = f(a1) =", L)
print(f"x{k+1} = a1 ={x[k]}")
print()
while abs(b-a) >= error:
x.append((a*R-b*L)/(R-L))
print("k = " , k+1)
print(f"[a{k+1},b{k+1}] = [{a}, {b}]")
print(f"x{k+2} = (a{k+1}*R-b{k+1})*L/(R-L) ={x[k+1]}")
print(f"f(x{k+2}) = ", f(x[k+1]))
print(f"f(a{k+1}) = ", f(a))
print(f"f(b{k+1}) = ", f(b))
if f(a)*f(x[k+1]) <= 0:
print(f"f(a{k+1})*f(x{k+2}) < 0")
b = x[k+1]
R = f(x[k+1])
print("New R = ", R)
if f(x[k])*f(x[k+1]) > 0:
print(f"f(x{k+1})*f(x{k+2}) > 0")
L = L/2
print("New L = L/2 =", L)
else:
print(f"f(x{k+2})*f(b{k+1}) < 0")
a = x[k+1]
L = f(x[k+1])
print("New L = ", L)
if f(x[k])*f(x[k+1]) > 0:
print(f"f(x{k+1})*f(x{k+2}) < 0")
R = R/2
print("New R = R/2 = ", R)
k += 1
print(f"a{k+1} = ", a)
print(f"b{k+1} = ", b)
print()
else:
print("wrong input...")
def newton(f, f2, error, x0):
""" 3.5 Newton (p. 41) """
x = [x0]
k = 0
while True:
k+=1
x.append(x[k-1]-f(x[k-1])/f2(x[k-1]))
print("k = ", k)
print(f"f(x{k-1}) = f({x[k-1]}) = {f(x[k-1])}")
print(f"f'(x{k-1})= f'({x[k-1]}) = {f2(x[k-1])}")
print(f"x{k} = x{k-1} - f(x{k-1})/f'(x{k-1}) = {x[k]}")
#print(f"x{k} = {x[k]}")
#print(f"f({x[k]}) = {f(x[k])}")
print()
if abs(x[k]-x[k-1]) < 10**-10:
break
def fixed_point(g, x0, error):
""" 3.6 Fixed Point (p. 44) """
k = 0
x = [x0]
print("k = ", k)
print("x[0] = ", x[k])
print(f"g[{x[k]}] = {g(x[k])}")
print()
while abs(x[k]-g(x[k])) > error:
k+=1
x.append(g(x[k-1]))
print(k)
print(f"x[{k}] = {x[k]}")
print(f"g[{x[k]}] = {g(x[k])}")
print()
if __name__ == "__main__":
def f1(x):
return x**3+x-1
def g(x):
return x**6-x-1
def g2(x):
return 6*x**5-1
def h(x):
return math.sqrt(x+2)
partition(f1, a=0, b=1, error=10**-3) # Example 3.2 (p.33)
convPart(a=0, b=1, error = 10**-5)
string(f1, a=0, b=1, error=10**-3)
variable_string(f1, a=0, b=1, error=10**-3)
fixed_point(h, x0=0, error=10**-2)
newton(g, g2, error=10**-10, x0=2)
print(g(1.1347))
#example 2020
def f(x):
return x**5+x-1
partition(f,0.5,1,1/8)
# Example 2021
def f(x):
return (20-x+math.sqrt(20-x))*(x+math.sqrt(x)) - 155.5
def f2(x):
a1 = (1/(2*math.sqrt(x))+1) * (-x+math.sqrt(20-x)+20)
a2 = (-1/(2*math.sqrt(20-x))-1) * (x+math.sqrt(x))
return a1 + a2
newton(f,f2,0.01,5.265)
| 30.489583
| 100
| 0.340109
|
0a2cd3ecf6d88a3869eb79d27c73d74903c84c97
| 70
|
py
|
Python
|
trimet_freq_vs_ridership/etl.py
|
hackoregon/transportation-systems-data-science
|
758b80eb5e15c7b864409bd0d970360379f42459
|
[
"MIT"
] | 4
|
2018-01-24T04:33:31.000Z
|
2019-10-31T06:33:11.000Z
|
trimet_freq_vs_ridership/etl.py
|
hackoregon/transportation-systems-data-science
|
758b80eb5e15c7b864409bd0d970360379f42459
|
[
"MIT"
] | 1
|
2018-02-07T04:00:12.000Z
|
2018-03-11T06:17:24.000Z
|
trimet_freq_vs_ridership/etl.py
|
hackoregon/transportation-systems-data-science
|
758b80eb5e15c7b864409bd0d970360379f42459
|
[
"MIT"
] | 1
|
2018-03-21T09:22:19.000Z
|
2018-03-21T09:22:19.000Z
|
def trimet():
return ('Trimet Ridership vs. Frequency Analysis.')
| 23.333333
| 55
| 0.7
|
1196ea8042e183eb33f6169da2593b752ac26c66
| 3,459
|
py
|
Python
|
tools/populate_dotenv.py
|
open-edge-insights/eii-azure-bridge
|
346da9d56be78c6e06a470dfbaf808d568427679
|
[
"MIT"
] | null | null | null |
tools/populate_dotenv.py
|
open-edge-insights/eii-azure-bridge
|
346da9d56be78c6e06a470dfbaf808d568427679
|
[
"MIT"
] | null | null | null |
tools/populate_dotenv.py
|
open-edge-insights/eii-azure-bridge
|
346da9d56be78c6e06a470dfbaf808d568427679
|
[
"MIT"
] | 2
|
2022-02-07T09:05:54.000Z
|
2022-03-17T04:32:50.000Z
|
# Copyright (c) 2020 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Helper script to populate the .env file from the docker_setup/.env file.
"""
import os
import re
# List of variables which need to be populated from the parent .env file
VARS = [
'EII_VERSION',
'DEV_MODE',
'PROFILING_MODE',
'EII_USER_NAME',
'EII_UID',
'ETCD_HOST',
'ETCD_PREFIX',
'SOCKET_DIR',
'EII_INSTALL_PATH',
'DOCKER_REGISTRY',
'HOST_IP'
]
# Paths to .env files
parent_env_fn = os.path.join('..', 'build', '.env')
this_env_fn = '.env'
# Verify required files exist
assert os.path.exists(parent_env_fn), f'Cannot find {parent_env_fn} file'
assert os.path.exists(this_env_fn), f'Cannot find {this_env_fn}'
# Read .env files
with open(parent_env_fn, 'r') as f:
parent_env = list(f.readlines())
with open(this_env_fn, 'r') as f:
this_env = f.read()
# Parse needed values from the parent_env file
for var in VARS:
try:
for line in parent_env:
# Make sure the line starts with to make sure we don't accidentally
# match with a comment in the ../build/.env file
if line.startswith(var):
# Get value of the var
res = re.findall(f'(?:{var}=)(.+|)', line)
if len(res) > 0:
value = res[0]
except IndexError as e:
# A value wasn't set, so set it to nothing
value = ''
# Check if the variable already exists in the .env
if re.findall(f'{var}=(.+|)', this_env):
# Place the value in this_env
this_env = re.sub(
f'{var}=(.+|)',
f'{var}={value}',
this_env)
else:
this_env += f'\n{var}={value}'
if var == 'DOCKER_REGISTRY':
# Need to populate special variable for the Azure Container Registry
# which omits the "/" in the DOCKER_REGISTRY variable
# Check if the variable already exists in the .env
if re.findall(f'AZ_CONTAINER_REGISTRY=.+', this_env):
# Place the value in this_env
this_env = re.sub(
f'AZ_CONTAINER_REGISTRY=.+',
f'AZ_CONTAINER_REGISTRY={value[:-1]}',
this_env)
else:
this_env += f'\nAZ_CONTAINER_REGISTRY={value[:-1]}'
# Write the new .env file contents for the Azure Bridge
with open(this_env_fn, 'w') as f:
f.write(this_env)
| 35.295918
| 79
| 0.648453
|
0b0ed46665a1d611ccb387df3307a6fb97296bd9
| 192
|
py
|
Python
|
Python-Hackerrank/Set .discard(), .remove() & .pop().py
|
nihalkhan2810/Data-Structures-Algorithms
|
3224fba18dcdbb1c6cc3d7f7a16df4ed0e0a321d
|
[
"MIT"
] | 10
|
2020-05-02T14:42:15.000Z
|
2021-01-26T16:51:47.000Z
|
Python-Hackerrank/Set .discard(), .remove() & .pop().py
|
nihalkhan2810/Data-Structures-Algorithms
|
3224fba18dcdbb1c6cc3d7f7a16df4ed0e0a321d
|
[
"MIT"
] | null | null | null |
Python-Hackerrank/Set .discard(), .remove() & .pop().py
|
nihalkhan2810/Data-Structures-Algorithms
|
3224fba18dcdbb1c6cc3d7f7a16df4ed0e0a321d
|
[
"MIT"
] | 13
|
2020-03-05T13:31:11.000Z
|
2021-01-29T08:14:26.000Z
|
n = int(input())
s = set(map(int, input().split()))
t = int(input())
for i in range(t):
c, *args = map(str,input().split())
getattr(s,c) (*(int(x) for x in args))
print (sum(s))
| 14.769231
| 42
| 0.536458
|
8e2e1cf6d8ca98a62f778e0f37263e4179368a90
| 4,842
|
py
|
Python
|
nabs/utils.py
|
tangkong/nabs
|
06d21428cddaa3f2ec6a31150f9d4758f5a7c2c5
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
nabs/utils.py
|
tangkong/nabs
|
06d21428cddaa3f2ec6a31150f9d4758f5a7c2c5
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
nabs/utils.py
|
tangkong/nabs
|
06d21428cddaa3f2ec6a31150f9d4758f5a7c2c5
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import inspect
import multiprocessing as mp
import numbers
import traceback
from typing import Any, Callable, Dict, Union
import numpy as np
from ophyd.signal import DerivedSignal, SignalRO
class InvertedSignal(DerivedSignal):
"""
Invert another `ophyd` Signal
Parameters
----------
derived_from: `ophyd.signal.Signal`
Ophyd Signal
"""
def __init__(self, derived_from, *, name=None, **kwargs):
# Create a name if None is given
if not name:
name = derived_from.name + '_inverted'
# Initialize the DerivedSignal
super().__init__(derived_from, name=name, **kwargs)
def forward(self, value):
"""Invert the value"""
return -value
def inverse(self, value):
"""Invert the value"""
return -value
def trigger(self):
return self.derived_from.trigger()
class ErrorSignal(SignalRO, DerivedSignal):
"""
Signal that reports the absolute error from a provided target
Parameters
----------
derived_from : `ophyd.signal.Signal`
target : float
Position of zero error
"""
def __init__(self, derived_from, target, *, name=None, **kwargs):
# Create a name if None is given
if not name:
name = derived_from.name + '_error'
# Store the target
self.target = target
# Initialize the DerivedSignal
super().__init__(derived_from, name=name, **kwargs)
def forward(self, value):
"""Invert the value"""
return NotImplemented
def inverse(self, value):
"""Invert the value"""
return abs(value - self.target)
def trigger(self):
return self.derived_from.trigger()
def add_named_kwargs_to_signature(
func_or_signature: Union[inspect.Signature, Callable],
kwargs: Dict[str, Any]
) -> inspect.Signature:
"""
Add named keyword arguments with default values to a function signature.
Parameters
----------
func_or_signature : inspect.Signature or callable
The function or signature.
kwargs : dict
The dictionary of kwarg_name to default_value.
Returns
-------
modified_signature : inspect.Signature
The modified signature with additional keyword arguments.
"""
if isinstance(func_or_signature, inspect.Signature):
sig = func_or_signature
else:
sig = inspect.signature(func_or_signature)
params = list(sig.parameters.values())
keyword_only_indices = [
idx for idx, param in enumerate(params)
if param.kind == inspect.Parameter.KEYWORD_ONLY
]
if not keyword_only_indices:
start_params, end_params = params, []
else:
insert_at = keyword_only_indices[0]
start_params, end_params = params[:insert_at], params[insert_at:]
wrapper_params = list(
inspect.Parameter(
name, kind=inspect.Parameter.KEYWORD_ONLY, default=value
)
for name, value in kwargs.items()
if name not in sig.parameters
)
return sig.replace(parameters=start_params + wrapper_params + end_params)
class Process(mp.Process):
"""
A subclass of multiprocessing.Process that makes exceptions
accessible by the parent process.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
def run(self):
try:
super().run()
self._cconn.send(None)
except Exception as e:
tb = traceback.format_exc()
self._cconn.send((e, tb))
def join_and_raise(self):
super().join()
# raise exceptions after process is finished
if self.exception:
raise self.exception[0]
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
return self._exception
def orange(start, stop, num, rtol=1.e-5, atol=1.e-7):
"""
Get scan points based on the type of `num`. If `num` is an
integer, interpret as the number of points in a scan. If `num`
is a float, interpret it as a step size.
Modified to include end points.
Parameters
----------
start : int or float
The first point in the scan
end : int or float
The last point in the scan
num : int or float
if int, the number of points in the scan.
if float, step size
Returns
-------
list
a list of scan points
"""
moves = []
if isinstance(num, numbers.Integral):
moves = list(np.linspace(start, stop, num))
elif isinstance(num, numbers.Real):
num = np.sign(stop - start) * np.abs(num)
moves = list(np.arange(start, stop + num, num))
return moves
| 26.459016
| 77
| 0.620818
|
e072e9d44ada055d1f7443e09d4c9535b9900b3b
| 20,476
|
py
|
Python
|
test/codes_tests/test_fortran_sockets_implementation.py
|
egpbos/amuse
|
64b3bc5b7fef9496012b023578c4d71cecef92b7
|
[
"Apache-2.0"
] | null | null | null |
test/codes_tests/test_fortran_sockets_implementation.py
|
egpbos/amuse
|
64b3bc5b7fef9496012b023578c4d71cecef92b7
|
[
"Apache-2.0"
] | null | null | null |
test/codes_tests/test_fortran_sockets_implementation.py
|
egpbos/amuse
|
64b3bc5b7fef9496012b023578c4d71cecef92b7
|
[
"Apache-2.0"
] | null | null | null |
from amuse.test.amusetest import TestWithMPI
import subprocess
import os
import time
import shlex
from amuse.units import nbody_system
from amuse.units import units
from amuse import datamodel
from amuse.rfi.tools import create_fortran
from amuse.rfi import channel
from amuse.rfi.core import *
codestring = """
function echo_int(int_in, int_out)
implicit none
integer :: int_in, int_out
integer :: echo_int
int_out = int_in
echo_int = 0
end function
function echo_double(double_in, double_out)
implicit none
DOUBLE PRECISION :: double_in, double_out
integer :: echo_double
double_out = double_in
echo_double = 0
end function
function echo_float(float_in, float_out)
implicit none
REAL(kind=4) :: float_in, float_out
integer :: echo_float
float_out = float_in
echo_float = 0
end function
function echo_string(string_in, string_out)
implicit none
character(len=*) :: string_in, string_out
integer :: echo_string
string_out = string_in
echo_string = 0
end function
function echo_strings(string_inout1, string_inout2)
implicit none
character(len=*) :: string_inout1, string_inout2
integer :: echo_strings
string_inout1(1:1) = 'A'
string_inout2(1:1) = 'B'
echo_strings = 0
end function
function return_string(string_in)
implicit none
character(len=*) :: string_in, return_string
return_string = string_in
end function
function hello_string(string_out)
implicit none
character(len=30) :: string_out
integer :: hello_string
string_out = 'hello'
hello_string = 0
end function
function print_string(string_in)
implicit none
character(len=*) :: string_in
integer :: print_string
write (*,*) string_in
print_string = 0
end function
function print_error_string(string_in)
implicit none
character(len=*) :: string_in
integer :: print_error_string
write (0,*) string_in
print_error_string = 0
end function
function echo_string_fixed_len(string_in, string_out)
implicit none
character(len=30) :: string_in, string_out
integer :: echo_string_fixed_len
string_out = string_in
echo_string_fixed_len = 0
end function
function echo_array_with_result(int_in, int_out, N)
implicit none
integer, intent(in) :: N
integer :: int_in(N), int_out(N)
integer :: echo_array_with_result, i
do i = 1, N
int_out(i) = int_in(i)
end do
echo_array_with_result = -1
end function
function echo_inout_array_with_result(inout, N)
implicit none
integer, intent(in) :: N
integer :: inout(N)
integer :: echo_inout_array_with_result, i
do i = 1, N
inout(i) = inout(i) + 10
end do
echo_inout_array_with_result = 11;
end function
function echo_logical(input, output)
implicit none
logical :: input, output
integer :: echo_logical
output = input
print *, "INPUT=", input
echo_logical = 0
end function
"""
class ForTestingInterface(CodeInterface):
def __init__(self, exefile, **options):
CodeInterface.__init__(self, exefile, **options)
@legacy_function
def echo_int():
function = LegacyFunctionSpecification()
function.addParameter('int_in', dtype='int32', direction=function.IN)
function.addParameter('int_out', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def echo_double():
function = LegacyFunctionSpecification()
function.addParameter('double_in', dtype='float64', direction=function.IN)
function.addParameter('double_out', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def echo_float():
function = LegacyFunctionSpecification()
function.addParameter('float_in', dtype='float32', direction=function.IN)
function.addParameter('float_out', dtype='float32', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def echo_string():
function = LegacyFunctionSpecification()
function.addParameter('string_in', dtype='string', direction=function.IN)
function.addParameter('string_out', dtype='string', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def echo_strings():
function = LegacyFunctionSpecification()
function.addParameter('string_inout1', dtype='string', direction=function.INOUT)
function.addParameter('string_inout2', dtype='string', direction=function.INOUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def return_string():
function = LegacyFunctionSpecification()
function.addParameter('string_in', dtype='string', direction=function.IN)
function.result_type = 'string'
function.can_handle_array = True
return function
@legacy_function
def hello_string():
function = LegacyFunctionSpecification()
function.addParameter('string_out', dtype='string', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def echo_string_fixed_len():
function = LegacyFunctionSpecification()
function.addParameter('string_in', dtype='string', direction=function.IN)
function.addParameter('string_out', dtype='string', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def echo_array_with_result():
function = LegacyFunctionSpecification()
function.addParameter('int_in', dtype='int32', direction=function.IN)
function.addParameter('int_out', dtype='int32', direction=function.OUT)
function.addParameter('len', dtype='int32', direction=function.LENGTH)
function.result_type = 'int32'
function.must_handle_array = True
return function
@legacy_function
def echo_inout_array_with_result():
function = LegacyFunctionSpecification()
function.addParameter('in_out', dtype='int32', direction=function.INOUT)
function.addParameter('len', dtype='int32', direction=function.LENGTH)
function.result_type = 'int32'
function.must_handle_array = True
return function
@legacy_function
def echo_logical():
function = LegacyFunctionSpecification()
function.addParameter('input', dtype='bool', direction=function.IN)
function.addParameter('output', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def print_string():
function = LegacyFunctionSpecification()
function.addParameter('string_in', dtype='string', direction=function.IN)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def print_error_string():
function = LegacyFunctionSpecification()
function.addParameter('string_in', dtype='string', direction=function.IN)
function.result_type = 'int32'
function.can_handle_array = True
return function
class TestInterface(TestWithMPI):
def is_fortan_version_up_to_date(self):
try:
from amuse import config
is_configured = hasattr(config, 'compilers')
if is_configured:
is_configured = hasattr(config.compilers, 'gfortran_version')
except ImportError:
is_configured = False
if is_configured:
if not config.compilers.gfortran_version:
if not hasattr(config.compilers, 'ifort_version') or not config.compilers.ifort_version:
return True
try:
parts = [int(x) for x in config.compilers.ifort_version.split('.')]
except:
parts = []
return parts[0] > 9
try:
parts = [int(x) for x in config.compilers.gfortran_version.split('.')]
except:
parts = []
if len(parts) < 2:
return True
return parts[0] >= 4 and parts[1] >= 3
else:
return True
def get_mpif90_name(self):
try:
from amuse import config
is_configured = hasattr(config, 'mpi')
except ImportError:
is_configured = False
if is_configured:
return config.mpi.mpif95
else:
return os.environ['MPIFC'] if 'MPIFC' in os.environ else 'mpif90'
def get_mpif90_arguments(self):
name = self.get_mpif90_name()
return list(shlex.split(name))
def wait_for_file(self, filename):
for dt in [0.01, 0.01, 0.02, 0.05]:
if os.path.exists(filename):
return
time.sleep(dt)
def fortran_compile(self, objectname, string):
if os.path.exists(objectname):
os.remove(objectname)
root, ext = os.path.splitext(objectname)
sourcename = root + '.f90'
with open(sourcename, "w") as f:
f.write(string)
arguments = self.get_mpif90_arguments()
arguments.extend(["-g", "-I{0}/lib/forsockets".format(self.get_amuse_root_dir()), "-c", "-o" , objectname, sourcename])
process = subprocess.Popen(
arguments,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
stdout, stderr = process.communicate()
process.wait()
if process.returncode == 0:
self.wait_for_file(objectname)
if process.returncode != 0 or not os.path.exists(objectname):
print "Could not compile {0}, error = {1}".format(objectname, stderr)
raise Exception("Could not compile {0}, error = {1}".format(objectname, stderr))
def fortran_build(self, exename, objectnames):
if os.path.exists(exename):
os.remove(exename)
arguments = self.get_mpif90_arguments()
arguments.extend(objectnames)
arguments.append("-o")
arguments.append(exename)
arguments.append("-L{0}/lib/forsockets".format(self.get_amuse_root_dir()))
arguments.append("-lforsockets")
process = subprocess.Popen(
arguments,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
stdout, stderr = process.communicate()
if process.returncode == 0:
self.wait_for_file(exename)
if process.returncode != 0 or not os.path.exists(exename):
print "Could not build {0}, error = {1}".format(exename, stderr)
raise Exception("Could not build {0}, error = {1}".format(exename, stderr))
def build_worker(self):
path = os.path.abspath(self.get_path_to_results())
codefile = os.path.join(path,"code-sockets.o")
interfacefile = os.path.join(path,"interface-sockets.o")
self.exefile = os.path.join(path,"fortran_worker")
self.fortran_compile(codefile, codestring)
uc = create_fortran.GenerateAFortranSourcecodeStringFromASpecificationClass()
uc.specification_class = ForTestingInterface
string = uc.result
self.fortran_compile(interfacefile, string)
self.fortran_build(self.exefile, [interfacefile, codefile] )
def setUp(self):
super(TestInterface, self).setUp()
print "building"
self.check_can_compile_modules()
self.check_fortran_version()
self.check_not_in_mpiexec()
self.build_worker()
def check_fortran_version(self):
if not self.is_fortan_version_up_to_date():
self.skip('cannot compile fortran socket modules with old fortran compilers (missing C support)')
def check_not_in_mpiexec(self):
"""
The tests will fork another process, if the test run
is itself an mpi process, the tests may fail.
For the hydra process manager the tests will fail.
So skip the tests if we detect hydra
"""
if 'HYDI_CONTROL_FD' in os.environ:
return # can run in modern mpiexec.hydra
if 'HYDRA_CONTROL_FD' in os.environ or 'PMI_FD' in os.environ:
self.skip('cannot run the socket tests under hydra process manager')
def test1(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
int_out, error = instance.echo_int(10)
del instance
self.assertEquals(int_out, 10)
self.assertEquals(error, 0)
def test2(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.echo_double(4.0)
del instance
self.assertEquals(out, 4.0)
self.assertEquals(error, 0)
def test3(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
input = [1,2,3,4]
output, errors = instance.echo_int(input)
del instance
self.assertEquals(len(errors),4)
for actual, expected in zip(output, input):
self.assertEquals(actual, expected)
def test4(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
input = [1.0,2.1,3.3,4.2]
output, errors = instance.echo_double(input)
del instance
self.assertEquals(len(errors),4)
for actual, expected in zip(output, input):
self.assertEquals(actual, expected)
def test5(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.echo_float(4.0)
del instance
self.assertEquals(out, 4.0)
self.assertEquals(error, 0)
def test6(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.echo_string("abc")
del instance
self.assertEquals(error, 0)
self.assertEquals(out, "abc")
def test7(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.echo_string(["abc","def"])
del instance
self.assertEquals(error[0], 0)
self.assertEquals(error[1], 0)
self.assertEquals(out[0], "abc")
self.assertEquals(out[1], "def")
def test8(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out1, out2, error = instance.echo_strings("abc","def")
del instance
self.assertEquals(error, 0)
self.assertEquals(out1, "Abc")
self.assertEquals(out2, "Bef")
def test9(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
str1_out, str2_out, error = instance.echo_strings(["abc", "def"], ["ghi", "jkl"])
del instance
self.assertEquals(error[0], 0)
self.assertEquals(error[1], 0)
self.assertEquals(str1_out[0], "Abc")
self.assertEquals(str1_out[1], "Aef")
self.assertEquals(str2_out[0], "Bhi")
self.assertEquals(str2_out[1], "Bkl")
def test10(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out = instance.return_string("abc")
del instance
self.assertEquals(out, "abc")
def test11(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.hello_string()
del instance
self.assertEquals(out, "hello")
def test12(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.echo_string_fixed_len("abc")
del instance
self.assertEquals(error, 0)
self.assertEquals(out, "abc")
def test13(self):
instance = ForTestingInterface(self.exefile, debugger="none", channel_type="sockets")
(output_ints, error) = instance.echo_array_with_result([4,5,6])
instance.stop()
print output_ints, error
self.assertEquals(output_ints[0], 4)
self.assertEquals(output_ints[1], 5)
self.assertEquals(output_ints[2], 6)
self.assertEquals(error[0], -1)
self.assertEquals(error[1], -1)
self.assertEquals(error[2], -1)
def test14(self):
for x in range(4):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
int_out, error = instance.echo_int(10)
instance.stop()
self.assertEquals(int_out, 10)
self.assertEquals(error, 0)
def test15(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
(output_ints, error) = instance.echo_inout_array_with_result([4,5,6])
instance.stop()
self.assertEquals(output_ints[0], 14)
self.assertEquals(output_ints[1], 15)
self.assertEquals(output_ints[2], 16)
self.assertEquals(error[0], 11)
self.assertEquals(error[1], 11)
self.assertEquals(error[2], 11)
def test16(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
(output1, error1) = instance.echo_logical(True)
(output2, error2) = instance.echo_logical(False)
instance.stop()
self.assertEquals(error1, 0)
self.assertEquals(error2, 0)
self.assertTrue(output1)
self.assertFalse(output2)
def xtest20(self):
#
# TURNED OFF support for redirection,
# by default output is redirected to /dev/null
# if you need file, use the support from your mpi implementation
#
if os.path.exists("pout.000"):
os.remove("pout.000")
if os.path.exists("perr.000"):
os.remove("perr.000")
x = ForTestingInterface(self.exefile, redirect_stderr_file = 'perr', redirect_stdout_file = 'pout', redirection="file", channel_type="sockets")
x.print_string("abc")
x.print_error_string("exex")
x.stop()
time.sleep(0.2)
self.assertTrue(os.path.exists("pout.000"))
with open("pout.000","r") as f:
content = f.read()
self.assertEquals(content.strip(), "abc")
self.assertTrue(os.path.exists("perr.000"))
with open("perr.000","r") as f:
content = f.read()
self.assertEquals(content.strip(), "exex")
x = ForTestingInterface(self.exefile, redirect_stderr_file = 'perr', redirect_stdout_file = 'pout', redirection="file", channel_type="sockets")
x.print_string("def")
x.print_error_string("exex")
x.stop()
time.sleep(0.2)
self.assertTrue(os.path.exists("pout.000"))
with open("pout.000","r") as f:
content = f.read()
self.assertEquals(content.strip(), "abc\n def")
self.assertTrue(os.path.exists("perr.000"))
with open("perr.000","r") as f:
content = f.read()
self.assertEquals(content.strip(), "exex\n exex")
| 32.144427
| 151
| 0.613694
|
f594af1b5c0755ca007d63d71c2307018c4e3b24
| 38,407
|
py
|
Python
|
Archive/Other tests/Yale pulse frequency analysis.py
|
JohanWinther/cat-state-encoding
|
3fa95c5c9d9d223e4b9fbc38fe5e27a46d0d12ef
|
[
"MIT"
] | 3
|
2020-02-10T01:53:29.000Z
|
2022-01-13T09:23:40.000Z
|
Archive/Other tests/Yale pulse frequency analysis.py
|
JohanWinther/cat-state-encoding
|
3fa95c5c9d9d223e4b9fbc38fe5e27a46d0d12ef
|
[
"MIT"
] | null | null | null |
Archive/Other tests/Yale pulse frequency analysis.py
|
JohanWinther/cat-state-encoding
|
3fa95c5c9d9d223e4b9fbc38fe5e27a46d0d12ef
|
[
"MIT"
] | 1
|
2021-07-31T08:55:43.000Z
|
2021-07-31T08:55:43.000Z
|
# coding: utf-8
# # Fourier analysis of the endocing pulses produced by the [Yale group](https://www.nature.com/articles/nature18949)
# In[1]:
import matplotlib.pyplot as plt
import numpy as np
def normalisePoints(x,y,x_max):
x = x - x[9]
x = x * (x_max/x[-1])
y = y - y[0]
y = y/np.max(y)
return (x,y)
def plotPulsesTimeDomain(pulses, title):
fig1, ax = plt.subplots(1,1, figsize=(15,5))
ax.plot(pulses[0],pulses[1],'r')
ax.plot(pulses[2],pulses[3],'b')
ax.set_xlabel('Time (ns)')
ax.set_ylabel('Amp')
ax.set_title(title)
return fig1, ax
def plotPulsesPhaseSpace(pulses, title):
fig1, ax = plt.subplots(1,1, figsize=(5,5))
ax.plot(pulses[1],pulses[3],'k')
ax.set_xlabel('I')
ax.set_ylabel('Q')
ax.set_title(title)
return fig1, ax
# Resonator pulse
I_x = np.array([33.9192963,34.1278992,34.3365021,34.5451965,34.7537994,34.9624023,35.1710052,35.3796997,35.5883026,35.7969,36.0054932,36.2142,36.4228,36.84,37.0487,37.2573,37.4659,37.6745,37.8832,38.0918,38.3004,38.509,38.7177,38.9263,39.1349,39.3436,39.7608,39.9694,40.1781,40.3867,40.5953,40.8039,41.0126,41.2212,41.4298,41.6384,41.8471,42.0557,42.2643,42.4729,42.8902,43.0988,43.3074,43.5161,43.7247,43.9333,44.142,44.3506,44.5592,44.7678,44.9765,45.1851,45.3937,45.811,46.0196,46.2282,46.4368,46.6455,46.8541,47.0627,47.2713,47.48,47.6886,47.8972,48.1059,48.3145,48.7317,48.9404,49.149,49.3576,49.5662,49.7749,49.9835,50.1921,50.4007,50.6094,50.818,51.0266,51.2352,51.4439,51.8611,52.0697,52.2784,52.487,52.6956,52.9042,53.1129,53.3215,53.5301,53.7388,53.9474,54.156,54.3646,54.7819,54.9905,55.1991,55.4078,55.6164,55.825,56.0336,56.2423,56.4509,56.6595,56.8681,57.0768,57.2854,57.7026,57.9113,58.1199,58.3285,58.5371,58.7458,58.9544,59.163,59.3716,59.5803,59.7889,59.9975,60.2061,60.4148,60.832,61.0406,61.2493,61.4579,61.6665,61.8752,62.0838,62.2924,62.501,62.7097,62.9183,63.1269,63.3355,63.7528,63.9614,64.17,64.3787,64.5873,64.7959,65.0045,65.2132,65.4218,65.6304,65.8391,66.0477,66.2563,66.4649,66.8822,67.0908,67.2994,67.5081,67.7167,67.9253,68.1339,68.3426,68.5512,68.7598,68.9684,69.1771,69.3857,69.8029,70.0116,70.2202,70.4288,70.6375,70.8461,71.0547,71.2633,71.472,71.6806,71.8892,72.0978,72.3065,72.7237,72.9323,73.141,73.3496,73.5582,73.7668,73.9755,74.1841,74.3927,74.6013,74.81,75.0186,75.2272,75.4359,75.8531,76.0617,76.2704,76.479,76.6876,76.8962,77.1049,77.3135,77.5221,77.7307,77.9394,78.148,78.3566,78.7739,78.9825,79.1911,79.3997,79.6084,79.817,80.0256,80.2343,80.4429,80.6515,80.8601,81.0688,81.2774,81.6946,81.9033,82.1119,82.3205,82.5291,82.7378,82.9464,83.155,83.3636,83.5723,83.7809,83.9895,84.1982,84.4068,84.824,85.0327,85.2413,85.4499,85.6585,85.8672,86.0758,86.2844,86.493,86.7017,86.9103,87.1189,87.3275,87.7448,87.9534,88.162,88.3707,88.5793,88.7879,88.9965,89.2052,89.4138,89.6224,89.8311,90.0397,90.2483,90.6656,90.8742,91.0828,91.2914,91.5001,91.7087,91.9173,92.1259,92.3346,92.5432,92.7518,92.9604,93.1691,93.3777,93.7949,94.0036,94.2122,94.4208,94.6294,94.8381,95.0467,95.2553,95.4639,95.6726,95.8812,96.0898,96.2984,96.7157,96.9243,97.1329,97.3416,97.5502,97.7588,97.9675,98.1761,98.3847,98.5933,98.802,99.0106,99.2192,99.4278,99.8451,100.0537,100.2623,100.471,100.6796,100.8882,101.0968,101.3055,101.5141,101.7227,101.9314,102.14,102.3489,102.7661,102.9748,103.1834,103.392,103.6006,103.8093,104.0179,104.2265,104.4352,104.6438,104.8524,105.061,105.2697,105.6869,105.8955,106.1042,106.3128,106.5214,106.73,106.9387,107.1473,107.3559,107.5645,107.7732,107.9818,108.1904,108.399,108.8163,109.0249,109.2336,109.4422,109.6508,109.8594,110.0681,110.2767,110.4853,110.6939,110.9026,111.1112,111.3198,111.7371,111.9457,112.1543,112.3629,112.5716,112.7802,112.9888,113.1974,113.4061,113.6147,113.8233,114.0319,114.2406,114.6578,114.8664,115.0751,115.2837,115.4923,115.7009,115.9096,116.1182,116.3268,116.5354,116.7441,116.9527,117.1613,117.3699,117.7872,117.9958,118.2045,118.4131,118.6217,118.8303,119.039,119.2476,119.4562,119.6648,119.8735,120.0821,120.2907,120.708,120.9166,121.1252,121.3338,121.5425,121.7511,121.9597,122.1684,122.377,122.5856,122.7942,123.0029,123.2115,123.4201,123.8374,124.046,124.2546,124.4632,124.6719,124.8805,125.0891,125.2977,125.5064,125.715,125.9236,126.1322,126.3409,126.7581,126.9668,127.1754,127.384,127.5926,127.8013,128.0099,128.2185,128.4271,128.6357,128.8444,129.053,129.2616,129.6789,129.8875,130.0961,130.3047,130.5134,130.722,130.9306,131.1393,131.3479,131.5565,131.7651,131.9738,132.1824,132.391,132.8083,133.0169,133.2255,133.4341,133.6428,133.8514,134.06,134.2686,134.4773,134.6859,134.8945,135.1032,135.3118,135.729,135.9377,136.1463,136.3549,136.5635,136.7722,136.9808,137.1894,137.398,137.6067,137.8153,138.0239,138.2325,138.6498,138.8584,139.067,139.2757,139.4843,139.6929,139.9016,140.1102,140.3188,140.5274,140.7361,140.9447,141.1533,141.3619,141.7792,141.9878,142.1964,142.4051,142.6137,142.8223,143.0309,143.2396,143.4482,143.6568,143.8654,144.0741,144.2827,144.7,144.9086,145.1172,145.3258,145.5345,145.7431,145.9517,146.1603,146.369,146.5776,146.7862,146.9948,147.2035])
I_y = np.array([351.6132965,351.6132965,351.6132965,351.6132965,351.6132965,351.6132965,351.6132965,351.6132965,352.6564941,354.7427,357.6634979,361.0015,364.1309,367.2603,369.7638,372.0587,373.9363,375.3967,376.2312,376.8571,377.0657,376.8571,376.4398,376.0226,375.1881,374.3536,373.5191,372.6846,371.8501,370.8069,369.9724,369.1379,368.5121,367.6776,367.0517,366.2172,365.5913,364.9654,364.3395,363.7137,363.0878,362.2533,361.6274,361.0015,360.167,359.5411,358.7066,358.0807,357.2462,356.4117,355.5772,354.5341,353.6996,352.8651,352.0306,351.4047,350.5702,349.9441,349.1096,348.4837,347.8578,347.0233,346.3974,345.7715,345.3543,344.937,344.5198,344.1025,343.6853,343.6853,343.4767,343.4767,343.6853,343.8939,344.1025,344.5198,345.1457,345.7715,346.3974,347.0233,347.6492,348.2751,349.1096,349.7354,350.3616,351.1961,351.822,352.2392,352.8651,353.491,353.9082,354.5341,354.9514,355.5772,355.9945,356.4117,356.829,357.0376,357.2462,357.2462,357.2462,357.2462,357.2462,357.0376,356.829,356.4117,355.7859,354.9514,354.3255,353.491,352.4478,351.6133,350.5702,349.5268,348.4837,347.4406,346.606,345.7715,344.937,344.1025,343.268,342.8508,342.4335,342.2249,341.8076,341.599,341.1818,340.9731,341.1818,341.599,342.2249,343.0594,344.1025,345.1457,345.9802,347.0233,347.8578,348.6923,349.3182,349.7354,350.1527,350.3616,350.3616,350.1527,349.9441,349.7354,349.5268,349.1096,348.6923,348.4837,348.2751,348.2751,348.2751,348.2751,348.4837,348.9009,349.3182,349.9441,350.5702,351.1961,352.0306,352.8651,353.6996,354.5341,355.3686,355.9945,356.6204,357.0376,357.4549,357.6635,358.0807,358.2894,358.498,358.9153,358.9153,358.9153,358.9153,358.7066,358.498,358.2894,357.8721,357.4549,357.0376,356.6204,356.2031,355.7859,355.5772,355.16,354.9514,354.9514,354.7427,354.9514,354.9514,355.16,355.3686,355.5772,355.9945,356.2031,356.4117,356.4117,356.4117,356.4117,356.4117,355.9945,355.7859,355.3686,354.9514,354.3255,353.9082,353.491,353.0737,352.6565,352.2392,352.0306,351.822,351.6133,351.6133,351.6133,351.6133,351.6133,351.6133,351.6133,351.6133,351.4047,351.4047,351.1961,350.7788,350.5702,350.3616,350.1527,350.1527,349.9441,349.9441,349.9441,350.1527,350.3616,350.3616,350.5702,350.7788,350.9875,350.9875,350.9875,350.7788,350.5702,350.3616,349.9441,349.5268,349.1096,348.9009,348.4837,348.2751,348.0664,347.8578,347.8578,348.0664,348.2751,348.4837,348.9009,349.3182,349.5268,349.9441,350.3616,350.5702,350.7788,350.9875,350.9875,350.9875,350.9875,350.7788,350.5702,350.3616,349.9441,349.7354,349.5268,349.1096,348.9009,348.6923,348.4837,348.2751,348.2751,348.2751,348.2751,348.4837,348.6923,348.9009,349.3182,349.5268,349.9441,350.1527,350.5702,350.7788,350.9875,351.1961,351.4047,351.4047,351.4047,351.4047,351.4047,351.4047,351.4047,351.1961,351.1961,350.9875,350.7788,350.5702,350.3616,349.9441,349.7354,349.3182,349.1096,348.6923,348.4837,348.2751,348.0664,347.8578,347.6492,347.6492,347.6492,347.6492,347.8578,348.0664,348.2751,348.4837,348.9009,349.1096,349.5268,349.7354,349.9441,349.9441,349.9441,349.7354,349.3182,348.9009,348.2751,347.6492,346.8147,345.9802,345.3543,344.5198,343.6853,343.0594,342.4335,341.8076,341.599,341.1818,341.1818,340.9731,341.1818,341.1818,341.3904,341.8076,342.2249,342.6422,343.0594,343.6853,344.1025,344.7284,345.3543,345.9802,346.3974,346.606,347.0233,347.0233,347.2319,347.4406,347.4406,347.8578,348.0664,348.6923,349.5268,350.5702,352.0306,353.491,355.3686,357.2462,359.5411,361.6274,363.9223,366.2172,368.3034,370.3897,372.0587,373.5191,374.145,374.5622,374.7708,375.3967,376.2312,376.4398,375.814,375.6053,376.6485,378.5261,380.1951,381.6555,382.49,383.1159,383.5331,383.3245,383.3245,382.6986,381.8641,381.0296,380.1951,379.3606,378.3175,376.6485,374.145,370.8069,366.843,361.6274,356.829,354.3255,353.6996,353.9082,353.9082,353.6996,353.2823,353.0737,353.0737,353.491,353.6996,353.9082,353.6996,353.0737,351.6133,349.7354,347.8578,346.1888,344.5198,342.8508,340.7645,339.0955,338.4696,338.4696,338.6783,339.0955,339.5128,339.93,340.5559,340.9731,341.1818,341.1818,341.3904,341.8076,341.8076,340.7645,339.3041,338.261,338.6783,339.93,341.1818,342.8508,345.7715,349.7354,354.3255,358.7066,362.4619,364.7568,364.9654,362.8792,358.9153,353.6996,348.0664,342.6422,338.0524,334.5057,332.0022,330.3332,329.4987,328.8728,328.2469,327.6211,327.6211,327.8297,328.4556,329.2901,330.5418,332.0022,333.6712,335.5489,337.6351,339.7214,342.0163,344.5198,347.4406,350.3616,353.0737,355.9945,358.498,360.7929,362.8792,364.5482,365.7999,366.6344,367.0517,366.843,366.0085,364.7568,363.2964,361.4188,359.3325,357.2462,355.16,353.491,352.2392,351.6133])
Q_x = np.array([33.9192963,34.1278992,34.3365021,34.5451965,34.7537994,34.9624023,35.1710052,35.3796997,35.5883026,35.7969055,36.0054932,36.2142029,36.4228058,36.8399963,37.0487061,37.2572937,37.4658966,37.6744995,37.883194,38.0917969,38.3003998,38.5090027,38.7176971,38.9263,39.134903,39.3435974,39.7608032,39.9694061,40.1781006,40.3867035,40.5953064,40.803894,41.0126038,41.2212067,41.4297943,41.6383972,41.8471069,42.0557,42.2643,42.4729,42.8902,43.0988,43.3074,43.5161,43.7247,43.9333,44.142,44.3506,44.5592,44.7678,44.9765,45.1851,45.3937,45.811,46.0196,46.2282,46.4368,46.6455,46.8541,47.0627,47.2713,47.48,47.6886,47.8972,48.1059,48.3145,48.7317,48.9404,49.149,49.3576,49.5662,49.7749,49.9835,50.1921,50.4007,50.6094,50.818,51.0266,51.2352,51.4439,51.8611,52.0697,52.2784,52.487,52.6956,52.9042,53.1129,53.3215,53.5301,53.7388,53.9474,54.156,54.3646,54.7819,54.9905,55.1991,55.4078,55.6164,55.825,56.0336,56.2423,56.4509,56.6595,56.8681,57.0768,57.2854,57.7026,57.9113,58.1199,58.3285,58.5371,58.7458,58.9544,59.163,59.3716,59.5803,59.7889,59.9975,60.2061,60.4148,60.832,61.0406,61.2493,61.4579,61.6665,61.8752,62.0838,62.2924,62.501,62.7097,62.9183,63.1269,63.3355,63.7528,63.9614,64.17,64.3787,64.5873,64.7959,65.0045,65.2132,65.4218,65.6304,65.8391,66.0477,66.2563,66.4649,66.8822,67.0908,67.2994,67.5081,67.7167,67.9253,68.1339,68.3426,68.5512,68.7598,68.9684,69.1771,69.3857,69.8029,70.0116,70.2202,70.4288,70.6375,70.8461,71.0547,71.2633,71.472,71.6806,71.8892,72.0978,72.3065,72.7237,72.9323,73.141,73.3496,73.5582,73.7668,73.9755,74.1841,74.3927,74.6013,74.81,75.0186,75.2272,75.4359,75.8531,76.0617,76.2704,76.479,76.6876,76.8962,77.1049,77.3135,77.5221,77.7307,77.9394,78.148,78.3566,78.7739,78.9825,79.1911,79.3997,79.6084,79.817,80.0256,80.2343,80.4429,80.6515,80.8601,81.0688,81.2774,81.6946,81.9033,82.1119,82.3205,82.5291,82.7378,82.9464,83.155,83.3636,83.5723,83.7809,83.9895,84.1982,84.4068,84.824,85.0327,85.2413,85.4499,85.6585,85.8672,86.0758,86.2844,86.493,86.7017,86.9103,87.1189,87.3275,87.7448,87.9534,88.162,88.3707,88.5793,88.7879,88.9965,89.2052,89.4138,89.6224,89.8311,90.0397,90.2483,90.6656,90.8742,91.0828,91.2914,91.5001,91.7087,91.9173,92.1259,92.3346,92.5432,92.7518,92.9604,93.1691,93.3777,93.7949,94.0036,94.2122,94.4208,94.6294,94.8381,95.0467,95.2553,95.4639,95.6726,95.8812,96.0898,96.2984,96.7157,96.9243,97.1329,97.3416,97.5502,97.7588,97.9675,98.1761,98.3847,98.5933,98.802,99.0106,99.2192,99.4278,99.8451,100.0537,100.2623,100.471,100.6796,100.8882,101.0968,101.3055,101.5141,101.7227,101.9314,102.14,102.3489,102.7661,102.9748,103.1834,103.392,103.6006,103.8093,104.0179,104.2265,104.4352,104.6438,104.8524,105.061,105.2697,105.6869,105.8955,106.1042,106.3128,106.5214,106.73,106.9387,107.1473,107.3559,107.5645,107.7732,107.9818,108.1904,108.399,108.8163,109.0249,109.2336,109.4422,109.6508,109.8594,110.0681,110.2767,110.4853,110.6939,110.9026,111.1112,111.3198,111.7371,111.9457,112.1543,112.3629,112.5716,112.7802,112.9888,113.1974,113.4061,113.6147,113.8233,114.0319,114.2406,114.6578,114.8664,115.0751,115.2837,115.4923,115.7009,115.9096,116.1182,116.3268,116.5354,116.7441,116.9527,117.1613,117.3699,117.7872,117.9958,118.2045,118.4131,118.6217,118.8303,119.039,119.2476,119.4562,119.6648,119.8735,120.0821,120.2907,120.708,120.9166,121.1252,121.3338,121.5425,121.7511,121.9597,122.1684,122.377,122.5856,122.7942,123.0029,123.2115,123.4201,123.8374,124.046,124.2546,124.4632,124.6719,124.8805,125.0891,125.2977,125.5064,125.715,125.9236,126.1322,126.3409,126.7581,126.9668,127.1754,127.384,127.5926,127.8013,128.0099,128.2185,128.4271,128.6357,128.8444,129.053,129.2616,129.6789,129.8875,130.0961,130.3047,130.5134,130.722,130.9306,131.1393,131.3479,131.5565,131.7651,131.9738,132.1824,132.391,132.8083,133.0169,133.2255,133.4341,133.6428,133.8514,134.06,134.2686,134.4773,134.6859,134.8945,135.1032,135.3118,135.729,135.9377,136.1463,136.3549,136.5635,136.7722,136.9808,137.1894,137.398,137.6067,137.8153,138.0239,138.2325,138.6498,138.8584,139.067,139.2757,139.4843,139.6929,139.9016,140.1102,140.3188,140.5274,140.7361,140.9447,141.1533,141.3619,141.7792,141.9878,142.1964,142.4051,142.6137,142.8223,143.0309,143.2396,143.4482,143.6568,143.8654,144.0741,144.2827,144.7,144.9086,145.1172,145.3258,145.5345,145.7431,145.9517,146.1603,146.369,146.5776,146.7862,146.9948,147.2035])
Q_y = np.array([351.6132965,351.6132965,351.6132965,351.6132965,351.6132965,351.6132965,351.6132965,351.6132965,351.196106,349.7353973,347.6492004,345.5628967,343.4766998,341.598999,339.721405,338.2610016,336.8005981,335.5489044,334.7144012,333.8798981,333.4626007,333.0453033,333.0453033,333.2539978,333.8798981,334.5057068,335.7575073,337.009201,338.678299,340.5558929,342.4335022,344.5198059,346.6060028,348.6923065,350.7787933,352.6564941,354.3255005,355.7859,357.0376,358.0807,358.9153,359.3325,359.7498,359.7498,359.7498,359.3325,358.9153,358.498,357.8721,357.0376,356.2031,355.3686,354.3255,353.2823,352.0306,350.7788,349.5268,348.0664,346.606,345.1457,343.4767,342.0163,340.5559,339.3041,337.8438,336.592,335.5489,334.7144,334.0885,333.4626,333.0453,332.8367,332.8367,332.8367,333.254,333.6712,334.0885,334.7144,335.3402,336.3834,337.2179,338.4696,339.7214,340.9731,342.4335,344.1025,345.9802,347.6492,349.7354,351.6133,353.6996,355.5772,357.6635,359.7498,361.6274,363.505,365.3827,367.2603,368.7207,370.1811,371.6414,372.6846,373.5191,374.145,374.5622,374.7708,374.7708,374.5622,374.145,373.5191,373.1018,372.2673,371.6414,371.0156,370.3897,369.5552,369.1379,368.5121,367.8862,367.2603,366.843,366.2172,365.7999,365.174,364.5482,363.7137,362.6705,361.6274,360.5843,359.5411,358.498,357.4549,356.4117,355.3686,354.1169,353.0737,351.822,350.7788,349.5268,348.4837,347.6492,346.8147,345.9802,345.3543,344.7284,344.3112,343.8939,343.6853,343.268,343.0594,342.8508,342.6422,342.2249,342.0163,341.599,341.3904,340.9731,340.5559,340.1386,339.7214,339.0955,338.6783,338.261,337.6351,337.2179,336.8006,336.1747,335.7575,335.3402,335.1316,335.1316,335.3402,335.7575,336.3834,337.2179,338.4696,339.7214,341.1818,342.6422,344.3112,345.9802,347.4406,349.1096,350.5702,351.822,352.8651,353.6996,354.5341,354.9514,355.3686,355.5772,355.5772,355.3686,355.3686,354.9514,354.7427,354.5341,354.3255,354.1169,353.9082,353.9082,353.9082,354.1169,354.3255,354.7427,355.16,355.7859,356.2031,356.829,357.4549,357.8721,358.2894,358.7066,358.9153,359.1239,359.1239,358.9153,358.7066,358.498,358.0807,357.6635,357.2462,356.829,356.4117,355.9945,355.5772,355.3686,355.16,354.9514,354.7427,354.5341,354.5341,354.5341,354.5341,354.3255,354.3255,354.1169,354.1169,353.9082,353.491,353.2823,352.8651,352.4478,351.822,351.4047,350.7788,350.3616,349.9441,349.3182,349.1096,348.6923,348.4837,348.2751,348.2751,348.2751,348.2751,348.4837,348.6923,348.9009,349.3182,349.5268,349.7354,349.9441,349.9441,349.9441,349.9441,349.7354,349.5268,349.3182,348.9009,348.4837,348.2751,347.8578,347.4406,347.0233,346.8147,346.606,346.606,346.3974,346.3974,346.606,346.606,346.8147,346.8147,347.0233,347.0233,347.0233,347.0233,347.0233,346.8147,346.606,346.1888,345.9802,345.5629,345.3543,345.1457,344.937,344.937,345.1457,345.3543,345.7715,346.3974,347.0233,347.6492,348.4837,349.3182,349.9441,350.3616,350.7788,350.9875,350.9875,350.7788,350.3616,349.5268,348.4837,347.4406,346.1888,344.937,343.6853,342.6422,341.599,340.9731,340.5559,340.3473,340.5559,341.1818,342.0163,343.0594,344.3112,345.5629,347.0233,348.4837,349.9441,351.1961,352.2392,353.0737,353.9082,354.3255,354.7427,354.9514,354.9514,354.9514,354.7427,354.5341,354.5341,354.3255,354.1169,353.9082,353.9082,353.6996,353.9082,354.1169,354.5341,354.9514,355.5772,356.2031,356.829,357.2462,357.4549,357.6635,357.6635,357.6635,357.6635,357.4549,357.4549,357.4549,357.6635,357.8721,358.498,358.9153,359.5411,360.3756,361.0015,361.6274,362.2533,362.6705,363.0878,363.505,363.9223,364.5482,365.174,366.6344,368.5121,371.2242,374.145,376.8571,378.7347,379.7778,380.821,381.8641,381.6555,379.3606,374.145,367.0517,360.167,356.6204,357.0376,358.498,357.0376,353.9082,352.6565,354.1169,355.16,352.6565,348.9009,348.2751,350.5702,353.6996,356.4117,359.1239,361.2101,361.4188,359.9584,357.4549,354.7427,352.6565,351.4047,351.4047,352.0306,353.0737,354.3255,354.9514,354.9514,354.3255,352.8651,350.7788,348.0664,345.1457,342.4335,340.7645,340.5559,342.2249,344.937,347.2319,348.6923,349.3182,349.7354,350.7788,352.4478,354.5341,355.9945,356.4117,355.16,352.4478,349.9441,348.6923,350.1527,354.7427,361.6274,368.5121,372.0587,371.0156,366.843,361.6274,357.6635,354.9514,353.491,352.6565,352.0306,350.7788,348.6923,346.3974,344.3112,343.268,343.4767,344.7284,346.3974,347.8578,348.0664,347.0233,345.5629,344.937,344.1025,342.8508,340.9731,339.0955,337.4265,335.5489,333.8799,332.2108,331.1677,330.5418,330.3332,330.3332,330.5418,331.1677,332.0022,333.0453,334.2971,335.7575,337.2179,338.8869,340.3473,342.0163,343.4767,344.7284,345.9802,347.2319,348.2751,349.1096,349.7354,350.3616,350.7788,351.1961,351.4047,351.6133,351.6133])
(I_x, I_y) = normalisePoints(I_x,I_y,len(I_x))
(Q_x, Q_y) = normalisePoints(Q_x,Q_y,len(I_x))
resonator_pulses = (I_x, I_y, Q_x, Q_y)
# Qubit pulse
Q_x = np.array([159.3841,159.5927,159.8014,160.01,160.2186,160.4272,160.6359,160.8445,161.0531,161.2617,161.4704,161.679,161.8876,162.3049,162.5135,162.7221,162.9308,163.1394,163.348,163.5566,163.7653,163.9739,164.1825,164.3911,164.5998,164.8084,165.2256,165.4343,165.6429,165.8515,166.0602,166.2688,166.4774,166.686,166.8947,167.1033,167.3119,167.5205,167.7292,167.9378,168.355,168.5637,168.7723,168.9809,169.1895,169.3982,169.6068,169.8154,170.024,170.2327,170.4413,170.6499,170.8586,171.2758,171.4844,171.6931,171.9017,172.1103,172.3189,172.5276,172.7362,172.9448,173.1534,173.3621,173.5707,173.7793,174.1966,174.4052,174.6138,174.8224,175.0311,175.2397,175.4483,175.657,175.8656,176.0742,176.2828,176.4915,176.7001,176.9087,177.326,177.5346,177.7432,177.9518,178.1605,178.3691,178.5777,178.7863,178.995,179.2036,179.4122,179.6208,179.8295,180.2467,180.4554,180.664,180.8726,181.0812,181.2899,181.4985,181.7071,181.9157,182.1244,182.333,182.5416,182.7502,183.1675,183.3761,183.5847,183.7934,184.002,184.2106,184.4192,184.6279,184.8365,185.0451,185.2537,185.4624,185.671,185.8796,186.2969,186.5055,186.7141,186.9228,187.1314,187.34,187.5486,187.7573,187.9659,188.1745,188.3831,188.5918,188.8004,189.2176,189.4263,189.6349,189.8435,190.0521,190.2608,190.4694,190.678,190.8866,191.0953,191.3039,191.5125,191.7211,191.9298,192.347,192.5557,192.7643,192.9729,193.1815,193.3902,193.5988,193.8074,194.016,194.2247,194.4333,194.6419,194.8505,195.2678,195.4764,195.685,195.8937,196.1023,196.3109,196.5195,196.7282,196.9368,197.1454,197.354,197.5627,197.7713,198.1886,198.3972,198.6058,198.8144,199.0231,199.2317,199.4403,199.6489,199.8576,200.0662,200.2748,200.4834,200.6921,200.9007,201.3179,201.5266,201.7352,201.9438,202.1525,202.3611,202.5697,202.7783,202.987,203.1956,203.4042,203.6128,203.8215,204.2387,204.4473,204.656,204.8646,205.0732,205.2818,205.4905,205.6991,205.9077,206.1163,206.325,206.5336,206.7422,207.1595,207.3681,207.5767,207.7854,207.994,208.2026,208.4112,208.6199,208.8285,209.0371,209.2457,209.4544,209.663,209.8716,210.2889,210.4975,210.7061,210.9147,211.1234,211.332,211.5406,211.7492,211.9579,212.1665,212.3751,212.5837,212.7924,213.2096,213.4182,213.6269,213.8355,214.0441,214.2527,214.4614,214.67,214.8786,215.0872,215.2959,215.5045,215.7131,216.1304,216.339,216.5476,216.7563,216.9649,217.1735,217.3821,217.5908,217.7994,218.008,218.2166,218.4253,218.6339,218.8425,219.2598,219.4684,219.677,219.8857,220.0943,220.3029,220.5115,220.7202,220.9288,221.1374,221.346,221.5547,221.7633,222.1805,222.3892,222.5978,222.8064,223.015,223.2237,223.4323,223.6409,223.8495,224.0582,224.2668,224.4754,224.6841,224.8927,225.3099,225.5186,225.7272,225.9358,226.1444,226.3531,226.5617,226.7703,226.9789,227.1876,227.3962,227.6048,227.8137,228.231,228.4396,228.6482,228.8569,229.0655,229.2741,229.4827,229.6914,229.9,230.1086,230.3172,230.5259,230.7345,231.1517,231.3604,231.569,231.7776,231.9862,232.1949,232.4035,232.6121,232.8207,233.0294,233.238,233.4466,233.6552,233.8639,234.2811,234.4898,234.6984,234.907,235.1156,235.3243,235.5329,235.7415,235.9501,236.1588,236.3674,236.576,236.7846,237.2019,237.4105,237.6191,237.8278,238.0364,238.245,238.4536,238.6623,238.8709,239.0795,239.2881,239.4968,239.7054,240.1227,240.3313,240.5399,240.7485,240.9572,241.1658,241.3744,241.583,241.7917,242.0003,242.2089,242.4175,242.6262,242.8348,243.252,243.4607,243.6693,243.8779,244.0865,244.2952,244.5038,244.7124,244.9211,245.1297,245.3383,245.5469,245.7556,246.1728,246.3814,246.5901,246.7987,247.0073,247.2159,247.4246,247.6332,247.8418,248.0504,248.2591,248.4677,248.6763,248.8849,249.3022,249.5108,249.7195,249.9281,250.1367,250.3453,250.554,250.7626,250.9712,251.1798,251.3885,251.5971,251.8057,252.223,252.4316,252.6402,252.8488,253.0575,253.2661,253.4747,253.6833,253.892,254.1006,254.3092,254.5179,254.7265,255.1437,255.3524,255.561,255.7696,255.9782,256.1869,256.3955,256.6041,256.8127,257.0214,257.23,257.4386,257.6472,257.8559,258.2731,258.4818,258.6904,258.899,259.1076,259.3162,259.5249,259.7335,259.9421,260.1508,260.3594,260.568,260.7766,261.1939,261.4025,261.6111,261.8198,262.0284,262.237,262.4456,262.6543,262.8629,263.0715,263.2802,263.4888,263.6974,264.1146,264.3233,264.5319,264.7405,264.9492,265.1578,265.3664,265.575,265.7836,265.9923,266.2009,266.4095,266.6182,266.8268,267.244,267.4526,267.6613,267.8699,268.0786,268.2872,268.4958,268.7044,268.913,269.1217,269.3303,269.5389,269.7476,270.1648,270.3734,270.582,270.7907,270.9993,271.2079,271.4166,271.6252,271.8338,272.0424,272.251,272.4597,272.6683])
Q_y = np.array([351.6133,351.6133,351.6133,351.6133,351.6133,352.0306,352.4478,353.0737,353.6996,354.3255,354.5341,354.5341,354.1169,353.2823,352.0306,350.3616,348.4837,346.606,344.937,343.4767,342.4335,342.0163,342.2249,343.268,344.937,347.0233,349.7354,352.4478,355.5772,358.2894,360.7929,362.8792,364.3395,365.3827,365.5913,365.174,364.1309,362.6705,360.7929,358.498,356.2031,353.6996,351.1961,348.9009,346.8147,345.1457,343.8939,342.8508,342.2249,341.8076,341.8076,342.0163,342.4335,343.0594,343.8939,344.7284,345.5629,346.8147,347.8578,349.3182,350.7788,352.4478,354.1169,355.9945,357.8721,359.7498,361.2101,362.6705,363.7137,364.3395,364.5482,364.5482,364.1309,363.505,362.6705,361.6274,361.0015,360.3756,359.7498,359.5411,359.5411,359.5411,359.5411,359.5411,359.1239,358.2894,357.0376,355.3686,353.2823,350.7788,348.2751,345.7715,343.4767,341.8076,340.7645,340.5559,341.3904,343.0594,345.3543,348.4837,352.0306,355.5772,359.1239,362.0446,364.5482,366.4258,367.2603,367.2603,366.4258,364.7568,362.4619,359.3325,355.9945,352.4478,348.6923,344.937,341.599,338.6783,336.3834,334.7144,334.0885,334.0885,335.3402,337.4265,340.3473,343.8939,347.8578,351.822,355.7859,358.9153,361.4188,362.8792,363.2964,362.6705,361.0015,358.498,355.3686,352.0306,348.9009,346.1888,344.3112,343.268,343.6853,345.1457,347.6492,350.9875,354.7427,358.7066,362.4619,365.3827,367.6776,368.9293,369.1379,368.3034,366.6344,364.5482,361.836,359.1239,356.829,354.7427,353.2823,352.6565,352.6565,353.491,354.9514,357.0376,359.3325,361.836,364.3395,366.4258,367.8862,368.7207,368.7207,367.6776,366.0085,363.2964,360.167,356.6204,352.8651,349.3182,345.7715,342.8508,340.3473,338.6783,337.8438,337.8438,338.6783,340.1386,342.0163,344.5198,347.2319,349.9441,352.4478,354.7427,356.6204,357.8721,358.498,358.498,358.0807,357.0376,355.5772,353.6996,352.0306,350.3616,348.6923,347.6492,346.8147,346.3974,346.606,347.0233,348.0664,349.1096,350.3616,351.822,353.0737,353.9082,354.7427,354.9514,354.9514,354.3255,353.491,352.2392,350.7788,349.1096,347.4406,345.9802,344.7284,343.6853,343.268,343.0594,343.4767,344.1025,345.1457,346.3974,347.6492,348.9009,350.1527,351.1961,352.0306,352.6565,352.6565,352.4478,352.0306,351.4047,350.5702,349.5268,348.6923,347.8578,347.0233,346.606,346.1888,346.1888,346.606,347.2319,347.8578,348.9009,350.1527,351.1961,352.4478,353.491,354.3255,354.7427,354.9514,354.7427,354.3255,353.2823,352.2392,350.7788,349.3182,347.8578,346.3974,345.1457,344.3112,343.6853,343.6853,344.1025,344.937,346.3974,348.0664,350.1527,352.2392,354.3255,355.9945,357.4549,358.498,358.9153,358.7066,357.8721,356.4117,354.5341,352.4478,350.1527,347.8578,345.7715,344.1025,343.0594,342.6422,342.8508,343.8939,345.7715,348.0664,350.7788,353.491,356.4117,358.9153,361.0015,362.4619,363.0878,363.0878,362.0446,360.3756,357.8721,354.9514,351.6133,348.2751,344.937,342.0163,339.7214,338.0524,337.2179,337.4265,338.4696,340.3473,343.0594,345.9802,349.1096,352.4478,355.3686,357.8721,359.5411,360.7929,361.2101,360.7929,359.9584,358.498,356.829,355.16,353.491,351.822,350.7788,349.7354,349.3182,349.1096,349.1096,349.3182,349.7354,350.1527,350.5702,350.7788,351.1961,351.4047,351.6133,351.822,352.0306,352.2392,352.4478,352.6565,352.8651,353.0737,353.2823,353.2823,353.0737,352.8651,352.4478,352.0306,351.6133,350.9875,350.5702,350.1527,349.9441,349.7354,349.7354,349.9441,350.3616,350.5702,350.9875,351.1961,351.1961,351.1961,350.7788,349.9441,348.9009,347.6492,345.9802,344.5198,343.268,342.2249,341.8076,342.0163,342.6422,343.6853,345.1457,346.606,348.0664,349.3182,349.7354,349.7354,348.9009,347.4406,345.3543,343.268,341.1818,339.5128,338.4696,338.0524,338.0524,338.4696,338.4696,338.261,337.6351,336.592,335.5489,334.7144,334.2971,334.0885,334.5057,335.3402,336.3834,337.8438,339.5128,341.1818,342.6422,343.8939,344.3112,344.5198,344.1025,343.4767,342.6422,342.0163,341.599,341.3904,341.3904,341.8076,342.2249,343.0594,344.3112,345.7715,347.4406,349.3182,351.4047,353.6996,355.7859,357.8721,359.7498,361.2101,362.0446,362.2533,361.6274,360.167,357.8721,355.16,352.2392,349.3182,346.606,344.3112,342.4335,341.1818,340.5559,340.7645,341.599,343.268,345.3543,348.0664,350.9875,353.9082,356.829,359.7498,362.4619,364.7568,366.843,368.5121,369.5552,370.1811,370.3897,369.9724,369.3466,368.3034,367.2603,366.0085,364.7568,363.505,362.0446,360.7929,359.3325,357.6635,355.7859,353.9082,351.822,349.5268,347.4406,345.3543,343.4767,341.8076,340.5559,339.93,339.7214,340.1386,341.3904,343.0594,345.1457,347.2319,349.5268,350.9875,351.6133,351.6133,351.6133,351.6133])
I_x = np.array([159.3841,159.5927,159.8014,160.01,160.2186,160.4272,160.6359,160.8445,161.0531,161.2617,161.4704,161.679,161.8876,162.3049,162.5135,162.7221,162.9308,163.1394,163.348,163.5566,163.7653,163.9739,164.1825,164.3911,164.5998,164.8084,165.2256,165.4343,165.6429,165.8515,166.0602,166.2688,166.4774,166.686,166.8947,167.1033,167.3119,167.5205,167.7292,167.9378,168.355,168.5637,168.7723,168.9809,169.1895,169.3982,169.6068,169.8154,170.024,170.2327,170.4413,170.6499,170.8586,171.2758,171.4844,171.6931,171.9017,172.1103,172.3189,172.5276,172.7362,172.9448,173.1534,173.3621,173.5707,173.7793,174.1966,174.4052,174.6138,174.8224,175.0311,175.2397,175.4483,175.657,175.8656,176.0742,176.2828,176.4915,176.7001,176.9087,177.326,177.5346,177.7432,177.9518,178.1605,178.3691,178.5777,178.7863,178.995,179.2036,179.4122,179.6208,179.8295,180.2467,180.4554,180.664,180.8726,181.0812,181.2899,181.4985,181.7071,181.9157,182.1244,182.333,182.5416,182.7502,183.1675,183.3761,183.5847,183.7934,184.002,184.2106,184.4192,184.6279,184.8365,185.0451,185.2537,185.4624,185.671,185.8796,186.2969,186.5055,186.7141,186.9228,187.1314,187.34,187.5486,187.7573,187.9659,188.1745,188.3831,188.5918,188.8004,189.2176,189.4263,189.6349,189.8435,190.0521,190.2608,190.4694,190.678,190.8866,191.0953,191.3039,191.5125,191.7211,191.9298,192.347,192.5557,192.7643,192.9729,193.1815,193.3902,193.5988,193.8074,194.016,194.2247,194.4333,194.6419,194.8505,195.2678,195.4764,195.685,195.8937,196.1023,196.3109,196.5195,196.7282,196.9368,197.1454,197.354,197.5627,197.7713,198.1886,198.3972,198.6058,198.8144,199.0231,199.2317,199.4403,199.6489,199.8576,200.0662,200.2748,200.4834,200.6921,200.9007,201.3179,201.5266,201.7352,201.9438,202.1525,202.3611,202.5697,202.7783,202.987,203.1956,203.4042,203.6128,203.8215,204.2387,204.4473,204.656,204.8646,205.0732,205.2818,205.4905,205.6991,205.9077,206.1163,206.325,206.5336,206.7422,207.1595,207.3681,207.5767,207.7854,207.994,208.2026,208.4112,208.6199,208.8285,209.0371,209.2457,209.4544,209.663,209.8716,210.2889,210.4975,210.7061,210.9147,211.1234,211.332,211.5406,211.7492,211.9579,212.1665,212.3751,212.5837,212.7924,213.2096,213.4182,213.6269,213.8355,214.0441,214.2527,214.4614,214.67,214.8786,215.0872,215.2959,215.5045,215.7131,216.1304,216.339,216.5476,216.7563,216.9649,217.1735,217.3821,217.5908,217.7994,218.008,218.2166,218.4253,218.6339,218.8425,219.2598,219.4684,219.677,219.8857,220.0943,220.3029,220.5115,220.7202,220.9288,221.1374,221.346,221.5547,221.7633,222.1805,222.3892,222.5978,222.8064,223.015,223.2237,223.4323,223.6409,223.8495,224.0582,224.2668,224.4754,224.6841,224.8927,225.3099,225.5186,225.7272,225.9358,226.1444,226.3531,226.5617,226.7703,226.9789,227.1876,227.3962,227.6048,227.8137,228.231,228.4396,228.6482,228.8569,229.0655,229.2741,229.4827,229.6914,229.9,230.1086,230.3172,230.5259,230.7345,231.1517,231.3604,231.569,231.7776,231.9862,232.1949,232.4035,232.6121,232.8207,233.0294,233.238,233.4466,233.6552,233.8639,234.2811,234.4898,234.6984,234.907,235.1156,235.3243,235.5329,235.7415,235.9501,236.1588,236.3674,236.576,236.7846,237.2019,237.4105,237.6191,237.8278,238.0364,238.245,238.4536,238.6623,238.8709,239.0795,239.2881,239.4968,239.7054,240.1227,240.3313,240.5399,240.7485,240.9572,241.1658,241.3744,241.583,241.7917,242.0003,242.2089,242.4175,242.6262,242.8348,243.252,243.4607,243.6693,243.8779,244.0865,244.2952,244.5038,244.7124,244.9211,245.1297,245.3383,245.5469,245.7556,246.1728,246.3814,246.5901,246.7987,247.0073,247.2159,247.4246,247.6332,247.8418,248.0504,248.2591,248.4677,248.6763,248.8849,249.3022,249.5108,249.7195,249.9281,250.1367,250.3453,250.554,250.7626,250.9712,251.1798,251.3885,251.5971,251.8057,252.223,252.4316,252.6402,252.8488,253.0575,253.2661,253.4747,253.6833,253.892,254.1006,254.3092,254.5179,254.7265,255.1437,255.3524,255.561,255.7696,255.9782,256.1869,256.3955,256.6041,256.8127,257.0214,257.23,257.4386,257.6472,257.8559,258.2731,258.4818,258.6904,258.899,259.1076,259.3162,259.5249,259.7335,259.9421,260.1508,260.3594,260.568,260.7766,261.1939,261.4025,261.6111,261.8198,262.0284,262.237,262.4456,262.6543,262.8629,263.0715,263.2802,263.4888,263.6974,264.1146,264.3233,264.5319,264.7405,264.9492,265.1578,265.3664,265.575,265.7836,265.9923,266.2009,266.4095,266.6182,266.8268,267.244,267.4526,267.6613,267.8699,268.0786,268.2872,268.4958,268.7044,268.913,269.1217,269.3303,269.5389,269.7476,270.1648,270.3734,270.582,270.7907,270.9993,271.2079,271.4166,271.6252,271.8338,272.0424,272.251,272.4597,272.6683])
I_y = np.array([351.6133,351.6133,351.6133,351.6133,351.6133,352.0306,352.6565,353.491,354.5341,355.7859,357.0376,358.0807,359.1239,359.9584,360.3756,360.167,359.5411,358.2894,356.4117,353.9082,351.1961,348.0664,345.1457,342.2249,339.7214,337.8438,336.3834,335.9661,336.1747,337.2179,339.0955,341.1818,343.8939,346.8147,349.9441,352.8651,355.5772,358.0807,359.9584,361.6274,362.4619,362.8792,362.8792,362.4619,361.6274,360.3756,359.1239,357.6635,356.2031,354.7427,353.2823,352.0306,350.9875,349.9441,349.1096,348.2751,347.6492,347.2319,346.8147,346.3974,346.1888,346.1888,346.3974,346.8147,347.4406,348.2751,349.5268,350.7788,352.2392,353.9082,355.16,356.4117,357.4549,358.0807,358.498,358.2894,358.0807,357.4549,357.0376,356.4117,355.9945,355.7859,355.9945,356.4117,356.829,357.6635,358.2894,358.498,358.498,357.8721,356.6204,354.5341,351.822,348.2751,344.5198,340.5559,336.8006,333.254,330.3332,328.2469,327.4124,327.4124,328.6642,330.7505,333.6712,337.2179,340.9731,344.937,348.9009,352.4478,355.5772,358.0807,359.9584,361.0015,361.2101,360.5843,359.3325,357.2462,354.7427,351.6133,348.2751,344.7284,341.599,338.6783,336.592,335.1316,334.923,335.5489,337.2179,339.7214,343.0594,346.606,350.5702,354.1169,357.0376,359.3325,360.3756,360.5843,359.3325,357.4549,354.5341,351.1961,347.6492,344.3112,341.599,339.93,339.3041,339.7214,341.3904,343.8939,347.0233,350.3616,353.9082,357.0376,359.5411,361.2101,362.0446,361.836,361.0015,359.3325,357.2462,354.7427,352.2392,349.9441,348.0664,346.8147,346.1888,346.1888,347.2319,348.9009,350.9875,353.6996,356.6204,359.3325,361.836,363.9223,365.3827,366.0085,365.7999,364.7568,362.8792,360.3756,357.4549,353.9082,350.5702,347.0233,343.8939,341.1818,339.0955,337.4265,336.8006,336.8006,337.4265,338.8869,340.5559,342.6422,344.937,347.2319,349.3182,350.9875,352.2392,353.0737,353.0737,352.8651,352.0306,350.9875,349.7354,348.4837,347.2319,345.9802,345.1457,344.7284,344.7284,345.1457,345.9802,347.2319,348.4837,350.1527,351.822,353.2823,354.7427,355.5772,356.2031,356.4117,356.2031,355.5772,354.5341,353.2823,351.822,350.3616,349.1096,347.8578,347.0233,346.3974,346.3974,346.606,347.0233,347.8578,348.9009,350.1527,351.1961,352.4478,353.2823,354.1169,354.5341,354.7427,354.7427,354.3255,353.6996,353.0737,352.2392,351.4047,350.3616,349.7354,349.1096,348.6923,348.6923,348.9009,349.3182,350.1527,351.1961,352.4478,353.6996,354.9514,356.2031,357.2462,357.8721,358.2894,358.2894,357.8721,357.0376,355.7859,354.3255,352.6565,350.7788,349.1096,347.4406,346.1888,345.3543,344.937,345.1457,345.9802,347.2319,348.9009,350.7788,353.0737,355.16,357.0376,358.7066,359.9584,360.3756,360.167,359.3325,357.8721,355.9945,353.491,350.9875,348.4837,345.9802,344.1025,342.6422,342.0163,342.0163,343.0594,344.5198,346.8147,349.5268,352.4478,355.5772,358.498,361.0015,363.0878,364.3395,364.9654,364.5482,363.505,361.4188,358.9153,355.7859,352.4478,349.3182,346.1888,343.6853,341.8076,340.5559,340.3473,340.9731,342.2249,344.3112,346.606,349.3182,352.0306,354.3255,356.4117,358.0807,359.1239,359.5411,359.5411,358.9153,358.0807,357.0376,355.9945,354.9514,353.9082,353.0737,352.2392,351.6133,351.1961,350.7788,350.5702,350.1527,349.7354,349.3182,349.1096,348.6923,348.4837,348.2751,348.0664,348.0664,348.0664,348.2751,348.2751,348.2751,348.2751,348.2751,348.0664,347.8578,347.6492,347.2319,346.8147,346.606,346.3974,346.3974,346.606,347.0233,347.8578,348.9009,350.3616,351.822,353.491,354.9514,356.2031,357.2462,357.6635,357.6635,357.0376,355.7859,354.1169,352.2392,350.5702,349.1096,348.2751,347.8578,347.8578,348.2751,348.9009,349.7354,350.3616,350.7788,350.7788,350.1527,349.1096,347.6492,345.9802,344.5198,343.4767,343.0594,343.0594,343.4767,343.6853,343.8939,343.6853,343.0594,342.2249,341.1818,340.1386,339.3041,338.6783,338.6783,338.8869,339.7214,340.9731,342.6422,344.3112,346.1888,347.8578,349.1096,349.9441,350.3616,350.5702,350.3616,349.9441,349.3182,348.6923,348.0664,347.2319,346.606,346.1888,345.9802,345.9802,346.606,347.6492,349.1096,351.1961,353.6996,356.4117,359.1239,361.6274,363.7137,365.174,365.7999,365.7999,364.9654,363.2964,361.2101,358.7066,355.7859,352.8651,349.9441,347.4406,345.3543,343.6853,342.6422,342.2249,342.2249,343.0594,344.1025,345.9802,348.0664,350.5702,353.491,356.6204,359.5411,362.6705,365.3827,367.8862,369.7638,371.0156,371.4328,371.2242,370.3897,368.9293,366.843,364.5482,361.6274,358.7066,355.16,351.6133,347.8578,343.8939,340.1386,336.3834,333.0453,330.5418,328.6642,328.0383,328.6642,330.3332,333.4626,337.4265,342.0163,346.606,350.1527,351.6133,351.6133,351.6133,351.6133])
(I_x, I_y) = normalisePoints(I_x,I_y,len(I_x))
(Q_x, Q_y) = normalisePoints(Q_x,Q_y,len(I_x))
qubit_pulses = (I_x, I_y, Q_x, Q_y)
# Plot
plotPulsesTimeDomain(resonator_pulses, 'Resonator pulses');
plotPulsesTimeDomain(qubit_pulses, 'Qubit pulses');
plotPulsesPhaseSpace(resonator_pulses, 'Resonator pulse');
plotPulsesPhaseSpace(qubit_pulses, 'Qubit pulse');
# In[ ]:
samples = len(qubit_pulses[1])
power_two = 2**30
signal_resonator = resonator_pulses[1] + 1j*resonator_pulses[3]
signal_resonator = np.pad(signal_resonator, (0, power_two-samples), mode='constant')
samples = power_two
time = np.linspace(0, samples*sample_interval, samples)
sample_interval = 1e-9 # 1 ns
sample_frequency = 1e9 # 1 GHz
time = np.linspace(0, samples*sample_interval, samples)
signal_spectrum = np.fft.fftshift(np.fft.fft(signal_resonator))
freqs = np.fft.fftshift(np.fft.fftfreq(samples, d=sample_interval))
plt.figure(figsize=(10,5))
plt.plot(freqs / 1e6, np.abs(signal_spectrum)) # in MHz
plt.xlim(0, 125)
plt.title('Resonator pulse spectrum')
plt.xlabel('f (MHz)');
signal_qubit = qubit_pulses[1] + 1j*qubit_pulses[3]
signal_spectrum = np.fft.fftshift(np.fft.fft(signal_qubit))
freqs = np.fft.fftshift(np.fft.fftfreq(samples, d=sample_interval))
plt.figure(figsize=(10,5))
plt.plot(freqs / 1e6, np.abs(signal_spectrum)) # in MHz
plt.xlim(0, 125)
plt.title('Qubit pulse spectrum')
plt.xlabel('f (MHz)');
# Conclusion: the frequency spectrum does not provide any explanation of the pulse shapes (for me at least)
| 387.949495
| 4,635
| 0.768089
|
1350d5e999ba62082386ca30a0941213478229cf
| 479
|
py
|
Python
|
validate.py
|
fgeek/pyfibot
|
c541cdd18960182b252eeeb184c08e339e8b3e57
|
[
"BSD-3-Clause"
] | 28
|
2015-01-15T21:14:41.000Z
|
2020-05-19T06:16:44.000Z
|
validate.py
|
fgeek/pyfibot
|
c541cdd18960182b252eeeb184c08e339e8b3e57
|
[
"BSD-3-Clause"
] | 214
|
2015-01-17T02:54:37.000Z
|
2021-07-25T02:09:30.000Z
|
validate.py
|
fgeek/pyfibot
|
c541cdd18960182b252eeeb184c08e339e8b3e57
|
[
"BSD-3-Clause"
] | 20
|
2015-01-16T20:41:59.000Z
|
2021-06-04T18:35:33.000Z
|
#!bin/python
import yaml
import json
import jsonschema
print("Loading config..")
config = yaml.load(file("config.yml"), Loader=yaml.FullLoader)
print("Loading json schema..")
schema = json.load(file("pyfibot/config_schema.json"))
print("Validating configuration")
v = jsonschema.Draft3Validator(schema)
if not v.is_valid(config):
print("Error(s) in configuration:")
for error in sorted(v.iter_errors(config), key=str):
print(error)
else:
print("config ok")
| 25.210526
| 62
| 0.724426
|
752f14e8205768382a34cee126b0389200e9ecbf
| 455
|
py
|
Python
|
examples/head_touch.py
|
MPI-IS/reactive_pepper
|
079f9b0627bfd6c9e3f2a4466c95ad662002a600
|
[
"BSD-3-Clause"
] | null | null | null |
examples/head_touch.py
|
MPI-IS/reactive_pepper
|
079f9b0627bfd6c9e3f2a4466c95ad662002a600
|
[
"BSD-3-Clause"
] | null | null | null |
examples/head_touch.py
|
MPI-IS/reactive_pepper
|
079f9b0627bfd6c9e3f2a4466c95ad662002a600
|
[
"BSD-3-Clause"
] | null | null | null |
import math,time,random
import pepper_interface
IP = "192.168.0.147"
PORT = 9559
simulation = False
with pepper_interface.get(IP,PORT,simulation) as pepper:
time_start = time.time()
while time.time()-time_start < 10:
data = pepper.head_touch.get()
if data is not None:
data,time_stamp = data
print data["front"],"\t",data["middle"],"\t",data["rear"]
time.sleep(0.2)
| 16.851852
| 69
| 0.589011
|
89c90db5b7d7c130172cc9d12fac59a9e90d2fc9
| 610
|
py
|
Python
|
aiokraken/tests/test_config.py
|
asmodehn/aiokraken
|
b260bd41d5aa091e6a4f1818328426fbe6f625c0
|
[
"MIT"
] | null | null | null |
aiokraken/tests/test_config.py
|
asmodehn/aiokraken
|
b260bd41d5aa091e6a4f1818328426fbe6f625c0
|
[
"MIT"
] | 82
|
2019-08-30T09:37:49.000Z
|
2022-03-29T14:53:22.000Z
|
aiokraken/tests/test_config.py
|
asmodehn/aiokraken
|
b260bd41d5aa091e6a4f1818328426fbe6f625c0
|
[
"MIT"
] | null | null | null |
import unittest
import parameterized
if not __package__:
from aiokraken.config import load_api_keyfile
else:
from..config import load_api_keyfile
# This is a kind of integration test for the user environment...
# Need to test user environment (and not CI)
class TestLoadKeyFile(unittest.TestCase):
@unittest.skipIf(load_api_keyfile() is None,
"keyfile not detected")
def test_load_api_keyfile(self):
r = load_api_keyfile()
assert 'key' in r
assert 'secret' in r
assert len(r.get('key')) == 56
assert len(r.get('secret')) == 88
| 25.416667
| 64
| 0.670492
|
d88a9d99ebef6f134db6e0e3a62a0169b8208174
| 287
|
py
|
Python
|
app/users/urls.py
|
alirezashahali/recipe-api
|
745cb3dd5d2371571a62ce1624bd53828ff4e262
|
[
"MIT"
] | null | null | null |
app/users/urls.py
|
alirezashahali/recipe-api
|
745cb3dd5d2371571a62ce1624bd53828ff4e262
|
[
"MIT"
] | null | null | null |
app/users/urls.py
|
alirezashahali/recipe-api
|
745cb3dd5d2371571a62ce1624bd53828ff4e262
|
[
"MIT"
] | null | null | null |
from django.urls import path
from users import views
app_name = 'users'
urlpatterns= [
path('create/', views.CreateUserView.as_view(), name='create'),
path('token/', views.CreateTokenView.as_view(), name = 'token'),
path('me/', views.ManageUserView.as_view(), name = 'me')
]
| 31.888889
| 68
| 0.686411
|
b7adcd82b678603c583f86da175044e4063a0dbb
| 1,913
|
py
|
Python
|
actu psg/cogs/ticket.py
|
Yuhiro-sama/psg-bot
|
ba9a63025a3ce1e85bb62625e36c3caaa5c1ef1f
|
[
"BSD-2-Clause"
] | null | null | null |
actu psg/cogs/ticket.py
|
Yuhiro-sama/psg-bot
|
ba9a63025a3ce1e85bb62625e36c3caaa5c1ef1f
|
[
"BSD-2-Clause"
] | null | null | null |
actu psg/cogs/ticket.py
|
Yuhiro-sama/psg-bot
|
ba9a63025a3ce1e85bb62625e36c3caaa5c1ef1f
|
[
"BSD-2-Clause"
] | null | null | null |
import discord
from discord.ext import commands
from discord.errors import Forbidden
import aiofiles
color = 0xff1100
class Ticket(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bot.reaction_roles = []
@commands.command()
async def ticket(self, ctx):
ticket_embed = discord.Embed(title="Ticket", description="Réagissez pour ouvrir un ticket", color=color)
msg = await ctx.send(embed=ticket_embed)
await msg.add_reaction("🎫")
emoji = "🎫"
self.bot.reaction_roles.append((ctx.guild.id, msg.id, str(emoji.encode("utf-8"))))
async with aiofiles.open("reaction_roles.txt", mode="a") as file:
emoji_utf = emoji.encode("utf-8")
await file.write(f"{ctx.guild.id} {msg.id} {emoji_utf}\n")
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
for guild_id, msg_id, emoji in self.bot.reaction_roles:
guild = self.bot.get_guild(guild_id)
if payload.member.name == self.bot.user.name:
return
for channel in guild.text_channels:
memb = str(payload.member.name).lower()
if channel.name == f"ticket-{memb}":
return
if msg_id == payload.message_id and emoji == str(payload.emoji.name.encode("utf-8")):
try:
p = await guild.create_text_channel(f"ticket-{payload.member.name}")
await p.send(f"{payload.member.mention} opened a ticket")
except Forbidden:
await payload.member.send("I don't have the rights to do this, please contact an administrator.")
return
def setup(bot):
bot.add_cog(Ticket(bot))
| 36.09434
| 118
| 0.559331
|
e0966602b3012cbcc27a2249c8882c5a3c354898
| 2,462
|
py
|
Python
|
src/fbsrankings/infrastructure/unit_of_work.py
|
mikee385/fbsrankings
|
2b50e26a302b53c21cd8f5c965943d6fbf0680a1
|
[
"MIT"
] | null | null | null |
src/fbsrankings/infrastructure/unit_of_work.py
|
mikee385/fbsrankings
|
2b50e26a302b53c21cd8f5c965943d6fbf0680a1
|
[
"MIT"
] | null | null | null |
src/fbsrankings/infrastructure/unit_of_work.py
|
mikee385/fbsrankings
|
2b50e26a302b53c21cd8f5c965943d6fbf0680a1
|
[
"MIT"
] | null | null | null |
from types import TracebackType
from typing import ContextManager
from typing import Optional
from typing import Type
from typing_extensions import Literal
from fbsrankings.common import EventBus
from fbsrankings.common import EventRecorder
from fbsrankings.domain import AffiliationRepository
from fbsrankings.domain import GameRankingRepository
from fbsrankings.domain import GameRepository
from fbsrankings.domain import SeasonRepository
from fbsrankings.domain import TeamRankingRepository
from fbsrankings.domain import TeamRecordRepository
from fbsrankings.domain import TeamRepository
from fbsrankings.infrastructure.transaction import TransactionFactory
class UnitOfWork(ContextManager["UnitOfWork"]):
def __init__(self, data_source: TransactionFactory, bus: EventBus) -> None:
self._outer_bus = bus
self._inner_bus = EventRecorder(EventBus())
self._transaction = data_source.transaction(self._inner_bus)
@property
def season(self) -> SeasonRepository:
return self._transaction.season
@property
def team(self) -> TeamRepository:
return self._transaction.team
@property
def affiliation(self) -> AffiliationRepository:
return self._transaction.affiliation
@property
def game(self) -> GameRepository:
return self._transaction.game
@property
def team_record(self) -> TeamRecordRepository:
return self._transaction.team_record
@property
def team_ranking(self) -> TeamRankingRepository:
return self._transaction.team_ranking
@property
def game_ranking(self) -> GameRankingRepository:
return self._transaction.game_ranking
def commit(self) -> None:
self._transaction.commit()
for event in self._inner_bus.events:
self._outer_bus.publish(event)
self._inner_bus.clear()
def rollback(self) -> None:
self._transaction.rollback()
self._inner_bus.clear()
def close(self) -> None:
self._transaction.close()
self._inner_bus.clear()
def __enter__(self) -> "UnitOfWork":
self._transaction.__enter__()
return self
def __exit__(
self,
type_: Optional[Type[BaseException]],
value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Literal[False]:
self._transaction.__exit__(type_, value, traceback)
self._inner_bus.clear()
return False
| 29.662651
| 79
| 0.722177
|
4d01712d9a14b136671e84ba240a0baef931ab41
| 858
|
py
|
Python
|
20160902_HowManyBananas.py
|
lemmingapex/TheRiddler
|
232d550a1b243970977d785ea7d703daa714435a
|
[
"MIT"
] | null | null | null |
20160902_HowManyBananas.py
|
lemmingapex/TheRiddler
|
232d550a1b243970977d785ea7d703daa714435a
|
[
"MIT"
] | null | null | null |
20160902_HowManyBananas.py
|
lemmingapex/TheRiddler
|
232d550a1b243970977d785ea7d703daa714435a
|
[
"MIT"
] | null | null | null |
import math
# inputs
initalNumberOfBananas = 3000
numberOfUnitsToDestination = 1000
cammelCompacity = 1000
bananasPerUnitThatCammelConsumes = 1
distanceToTravel = numberOfUnitsToDestination
numberOfBananasRemainingAtDistanceToTravel = initalNumberOfBananas
while(distanceToTravel > 0):
#print("distanceToTravel", distanceToTravel)
#print("numberOfBananasRemainingAtDistanceToTravel", numberOfBananasRemainingAtDistanceToTravel)
decreaseInBananas = math.ceil(numberOfBananasRemainingAtDistanceToTravel/cammelCompacity) * bananasPerUnitThatCammelConsumes
#print("decreaseInBananas", decreaseInBananas)
numberOfBananasRemainingAtDistanceToTravel = numberOfBananasRemainingAtDistanceToTravel - decreaseInBananas
distanceToTravel = distanceToTravel - 1
print("numberOfBananasRemainingAtZeroDistanceToTravel", numberOfBananasRemainingAtDistanceToTravel)
| 40.857143
| 125
| 0.881119
|
b293c4e951eab343a95232f50c197cd3ae253ad6
| 126
|
py
|
Python
|
database_email_backend/__init__.py
|
enderlabs/django-database-email-backend
|
aad6bade66d076b5425f772430adc7e77e60f5ce
|
[
"MIT"
] | 1
|
2016-01-15T18:54:59.000Z
|
2016-01-15T18:54:59.000Z
|
database_email_backend/__init__.py
|
enderlabs/django-database-email-backend
|
aad6bade66d076b5425f772430adc7e77e60f5ce
|
[
"MIT"
] | 1
|
2015-11-04T22:19:21.000Z
|
2015-11-04T22:19:21.000Z
|
database_email_backend/__init__.py
|
enderlabs/django-database-email-backend
|
aad6bade66d076b5425f772430adc7e77e60f5ce
|
[
"MIT"
] | 4
|
2015-11-04T20:45:16.000Z
|
2021-03-03T06:28:20.000Z
|
# -*- coding: utf-8 -*-
VERSION = (1, 0, 4)
__version__ = "1.0.4"
__authors__ = ["Stefan Foulis <stefan.foulis@gmail.com>", ]
| 25.2
| 59
| 0.611111
|
5d4275bc2f43bd2dd4636b2f8747e163efe084a6
| 8,815
|
py
|
Python
|
utils/generateCode.py
|
KingMagic/django-RESTfulAPI
|
eb7a2234e606bdcc366a642f5731c64313047a0d
|
[
"MIT"
] | null | null | null |
utils/generateCode.py
|
KingMagic/django-RESTfulAPI
|
eb7a2234e606bdcc366a642f5731c64313047a0d
|
[
"MIT"
] | null | null | null |
utils/generateCode.py
|
KingMagic/django-RESTfulAPI
|
eb7a2234e606bdcc366a642f5731c64313047a0d
|
[
"MIT"
] | null | null | null |
import os
def main(app_list):
try:
for data in app_list:
print('app:',data)
app_name = data.get('name')
models = data.get('models')
print('所有模型:',models)
app_path = os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),'apps'),app_name)
if os.path.isdir(app_path):
# 序列化器
MySerializer = """
from rest_framework import serializers
from rest_framework.serializers import SerializerMethodField
from rest_framework.validators import UniqueValidator, UniqueTogetherValidator
from base.serializers import BaseModelSerializer
from rest_framework.utils import model_meta
import threading
from .models import *
import time
import datetime
from django.db.models import F, Q
from django.db import transaction
from decimal import Decimal
from django.conf import settings
from django.core.cache import cache
"""
# ModelViewSet视图
MyViewSet = """
import uuid, os, sys, requests, json, re, time, datetime, random, hashlib, hmac, base64, xml, subprocess, threading
from django.db import transaction
from decimal import Decimal
from django.db.models import F, Q
from rest_framework import serializers, status, generics, mixins, viewsets
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet, GenericViewSet
from rest_framework.response import Response
from rest_framework.filters import SearchFilter, OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from django.forms.models import model_to_dict
# 官方JWT
# from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler ,jwt_response_payload_handler
# from rest_framework_jwt.authentication import JSONWebTokenAuthentication
# 缓存配置
from django.core.cache import cache
# 自定义的JWT配置 公共插件
from utils.utils import jwt_decode_handler,jwt_encode_handler,jwt_payload_handler,jwt_payload_handler,jwt_response_payload_handler,google_otp,VisitThrottle,getDistance,NormalObj
from utils.jwtAuth import JWTAuthentication
from utils.pagination import Pagination
from utils.permissions import JWTAuthPermission, AllowAllPermission, BaseAuthPermission
from .models import *
from .serializers import *
# from .filters import *
from functools import reduce
from urllib.parse import unquote_plus
from django.conf import settings
from django.forms.models import model_to_dict
'''
serializers 常用字段
name = serializers.CharField(required=False, label='描述', max_length=None, min_length=None, allow_blank=False, trim_whitespace=True)
name = serializers.EmailField(max_length=None, min_length=None, allow_blank=False)
name = serializers.FloatField(max_value=None, min_value=None)
name = serializers.IntegerField(max_value=None, min_value=None)
name = serializers.DateTimeField(format=api_settings.DATETIME_FORMAT, input_formats=None)
name = serializers.DateField(format=api_settings.DATE_FORMAT, input_formats=None)
name = serializers.BooleanField()
name = serializers.ListField(child=serializers.IntegerField(min_value=0, max_value=100))
name = serializers.DictField(child=<A_FIELD_INSTANCE>, allow_empty=True) DictField(child=CharField())
name = serializers.DecimalField(default=0, max_digits=15, decimal_places=2, verbose_name='金额')
(mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet, generics.GenericAPIView)
Q(name__icontains=keyword) 内部是like模糊搜索
__gt 大于
__gte 大于等于
__lt 小于
__lte 小于等于
__in 在某某范围内
is null / is not null 为空/非空
.exclude(age=10) 查询年龄不为10的数据
'''
"""
# 写入 urls.py
url_viewsets = ''
for model_item in models:
name = model_item.get('name')
url_viewsets += name + 'Viewset, '
MyUrls = '''from {app_name}.views import {viewsets}'''.format(app_name=app_name,viewsets=url_viewsets)
# 生成 基本serializers 序列化器 'serializers.py'
with open(os.path.join(app_path,'serializers.py'),'w',encoding='utf-8') as f:
f.write(MySerializer)
# 生成 基本ViewSet 视图
with open(os.path.join(app_path,'views.py'),'w',encoding='utf-8') as f:
f.write(MyViewSet)
# 生成 基本urls 路由
with open(os.path.join(app_path,'urls.py'),'w',encoding='utf-8') as f:
f.write(MyUrls)
for model_item in models:
name = model_item.get('name')
verbose = model_item.get('verbose')
searchs = model_item.get('searchs')
filters = model_item.get('filters')
# 序列化器
MySerializer = """
# 新增 {verbose} 序列化器
class Add{name}Serializer(serializers.ModelSerializer):
class Meta:
model = {name}
exclude = ('deleted',) # or fields = '__all__' or fields = ['field01','field01',]
# read_only_fields = ('field01', )
# 修改 {verbose} 序列化器
class Update{name}Serializer(serializers.ModelSerializer):
class Meta:
model = {name}
exclude = ('deleted',) # or fields = '__all__' or fields = ['field01','field01',]
# read_only_fields = ('field01', )
# 返回 {verbose} 序列化器
class Return{name}Serializer(serializers.ModelSerializer):
class Meta:
model = {name}
exclude = ('deleted',) # or fields = '__all__' or fields = ['field01','field01',]
# read_only_fields = ('field01', )
""".format(name=name, verbose=verbose)
# ModelViewSet视图
MyViewSet = """
# {verbose} ModelViewSet视图
class {name}Viewset(ModelViewSet):
'''
修改局部数据
create: 创建{verbose}
retrieve: 检索某个{verbose}
update: 更新{verbose}
destroy: 删除{verbose}
list: 获取{verbose}列表
'''
queryset = {name}.objects.all().order_by('-create_time')
authentication_classes = (JWTAuthentication,)
permission_classes = [BaseAuthPermission, ]
throttle_classes = [VisitThrottle]
serializer_class = Return{name}Serializer
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter,)
# search_fields = ({searchs})
# filter_fields = ({filters})
ordering_fields = ('update_time', 'create_time',)
pagination_class = Pagination
def get_serializer_class(self):
if self.action in ['create']:
return Add{name}Serializer
if self.action in ['update', 'partial_update']:
return Update{name}Serializer
return Return{name}Serializer
def get_queryset(self):
return {name}.objects.all(filter_time=self.request.query_params.get('filter_time')).filter().order_by('-create_time')
if bool(self.request.auth) and self.request.user.group_id == 1:
return {name}.objects.all().order_by('-create_time')
elif bool(self.request.auth):
return {name}.objects.filter(user_id=self.request.user.id).order_by('-create_time')
else:
return {name}.objects.filter(id=0).order_by('-create_time')
""".format(name=name, verbose=verbose, searchs=searchs, filters=filters)
# 路由
MyUrl = """
# {verbose}管理
router.register(r'{lower}', {name}Viewset, base_name='{verbose}管理')""".format(name=name, verbose=verbose, lower=name.lower(), app_name=app_name)
# 开始自动生成代码
# 生成 serializers 序列化器 'serializers.py'
with open(os.path.join(app_path,'serializers.py'),'a',encoding='utf-8') as f:
f.write(MySerializer)
# 生成 ViewSet 视图
with open(os.path.join(app_path,'views.py'),'a',encoding='utf-8') as f:
f.write(MyViewSet)
# 生成 path 路由
with open(os.path.join(app_path,'urls.py'),'a',encoding='utf-8') as f:
f.write(MyUrl)
print("%s生成完毕!"%name)
print("app:%s 生成完毕!"%app_name)
else:
print('app:%s 不存在...' % app_name)
except Exception as e:
print("代码生成过程出错...错误原因:%s" % str(e))
if __name__ == '__main__':
# 自动生成代码,将表内容按照格式放入,使用Python脚本运行即可
# 存放 app名称、模型以及表名
# 示例:app_list = [{'name': 'tests','models': [{'name':'Group','verbose':'用户组表','searchs':"'field1', ",'filters':"'field1', "},{'name':'User','verbose':'用户表'}]}]
app_list = [
{'name': 'tests','models': [
{'name':'Ftable','verbose':'测试父表','searchs':"",'filters':""},
{'name':'Stable','verbose':'测试子表','searchs':"'field1', ",'filters':"'field1', "},
]
},
]
app_list = []
main(app_list)
| 44.075
| 177
| 0.65207
|
2a5ebe76a1dc7f822819a394ba16103c04d383a3
| 11,942
|
py
|
Python
|
toolcraft/error/validation.py
|
SpikingNeurons/_toolcraft_
|
070c79ea9248e4082bd69b5344f7b532e57f7730
|
[
"BSD-3-Clause"
] | null | null | null |
toolcraft/error/validation.py
|
SpikingNeurons/_toolcraft_
|
070c79ea9248e4082bd69b5344f7b532e57f7730
|
[
"BSD-3-Clause"
] | 1
|
2021-09-20T22:22:05.000Z
|
2021-09-20T22:22:05.000Z
|
toolcraft/error/validation.py
|
SpikingNeurons/_toolcraft_
|
070c79ea9248e4082bd69b5344f7b532e57f7730
|
[
"BSD-3-Clause"
] | null | null | null |
"""
todo: check validators library
https://validators.readthedocs.io/en/latest/
"""
import typing as t
import inspect
import dataclasses
import numpy as np
from . import CustomException
from ..logger import MESSAGES_TYPE
class ShouldBeOneOf(CustomException):
def __init__(
self, *,
value: t.Any,
values: t.Union[t.List, t.Tuple],
msgs: MESSAGES_TYPE
):
if value in values:
return
super().__init__(
msgs=[
*msgs,
f"Supplied value `{value}` should be one of: ",
values if bool(values) else "[]",
]
)
class ShouldNotBeOneOf(CustomException):
def __init__(
self, *,
value: t.Any,
values: t.Union[t.List, t.Tuple],
msgs: MESSAGES_TYPE
):
if value not in values:
return
super().__init__(
msgs=[
*msgs,
f"Supplied value `{value}` should not be one of:",
values,
]
)
class ShouldBeEqual(CustomException):
def __init__(
self, *,
value1, value2,
msgs: MESSAGES_TYPE
):
_in_npy_test = isinstance(value1, np.ndarray) and \
isinstance(value2, np.ndarray)
if _in_npy_test:
if np.array_equal(value1, value2):
return
if value1 == value2:
return
if _in_npy_test:
_msgs = [
f"The numpy arrays are not equal:",
{
"shape": f"{value1.shape}, {value2.shape}",
"dtype": f"{value1.dtype}, {value2.dtype}",
"value1": f"{value1}",
"value2": f"{value2}",
}
]
else:
_msgs = [
f"Value {value1} != {value2}",
f"Value types are {type(value1)}, {type(value2)}"
]
super().__init__(msgs=[*msgs, *_msgs])
class ShouldNotBeEqual(CustomException):
def __init__(
self, *,
value1, value2,
msgs: MESSAGES_TYPE
):
if value1 != value2:
return
super().__init__(
msgs=[
*msgs,
f"Value {value1} == {value2}",
f"Value types are {type(value1)}, {type(value2)}"
]
)
class ValueNotAllowed(CustomException):
def __init__(
self, *,
value, not_be_value,
msgs: MESSAGES_TYPE
):
if value != not_be_value:
return
super().__init__(
msgs=[
*msgs,
f"We do not allow value {value!r}.",
]
)
class SliceShouldNotOverlap(CustomException):
def __init__(
self, *,
slice1: slice, slice2: slice,
msgs: MESSAGES_TYPE
):
# step should be always None
if slice1.step is not None or slice2.step is not None:
super().__init__(
msgs=[
*msgs,
f"One of the supplied slices have step==None.",
{
"slice1": slice1,
"slice2": slice2,
}
]
)
# check overlap
if slice1.start <= slice2.start < slice1.stop:
super().__init__(
msgs=[
*msgs,
f"The start of slice overlaps slice2",
{
"slice1": slice1,
"slice2": slice2,
}
]
)
if slice1.start < slice2.stop <= slice1.stop:
super().__init__(
msgs=[
*msgs,
f"The stop of slice overlaps slice2",
{
"slice1": slice1,
"slice2": slice2,
}
]
)
class NotAllowed(CustomException):
def __init__(
self, *,
msgs: MESSAGES_TYPE
):
super().__init__(
msgs=[
"This is Not Allowed !!!",
*msgs
]
)
class OnlyValueAllowed(CustomException):
def __init__(
self, *,
value, to_be_value,
msgs: MESSAGES_TYPE
):
if value == to_be_value:
return
super().__init__(
msgs=[
*msgs,
f"We do not allow value {value!r}. "
f"Only value allowed is {to_be_value!r}.",
]
)
class IsSliceOrListWithin(CustomException):
def __init__(
self, *,
value: t.Union[slice, t.List[int]],
min_value: int,
max_value: int,
msgs: MESSAGES_TYPE
):
_raise = False
if isinstance(value, slice):
if value.start < 0:
msgs += [f"Please do not provide negative `start`, "
f"found start={value.start!r} "]
_raise = True
if value.stop < 0:
msgs += [f"Please do not provide negative `stop`, "
f"found stop={value.stop!r} "]
_raise = True
if value.start > value.stop:
msgs += [f"Slice `stop` should be greater that `start`, "
f"found `start={value.start}` > `stop={value.stop}`"]
_raise = True
if not (min_value <= value.start < max_value):
msgs += [f"We need slice `start` to be between "
f"`{min_value} <= start < {max_value}`, "
f"found start={value.start!r}"]
_raise = True
if not (min_value < value.stop <= max_value):
msgs += [f"We need slice `stop` to be between "
f"`{min_value} < stop <= {max_value}`, "
f"found stop={value.stop!r}"]
_raise = True
elif isinstance(value, list):
for i, index in enumerate(value):
if not(min_value <= index < max_value):
msgs += [
f"The item {i} in the list is not within range i.e. "
f"between {min_value} and {max_value}",
f"Found value {index}"
]
_raise = True
break
else:
msgs += [f"Expected a int, slice or list of int instead found "
f"type {type(value)}"]
_raise = True
if not _raise:
return
super().__init__(
msgs=[
*msgs
]
)
class ShouldBeBetween(CustomException):
def __init__(
self, *,
value: t.Union[int, float],
minimum: t.Union[int, float],
maximum: t.Union[int, float],
msgs: MESSAGES_TYPE
):
if not isinstance(value, (int, float)):
super().__init__(
msgs=[
*msgs,
f"The value supplied is not a int or float. "
f"Found unrecognized type {type(value)}."
]
)
if minimum <= value < maximum:
return
super().__init__(
msgs=[
*msgs,
f"Value should be in the range {minimum} <= value < {maximum}",
f"Instead found value {value!r} which is out of range."
f"{type(value)}."
]
)
class ShouldBeGreaterThan(CustomException):
def __init__(
self, *,
value: t.Union[int, float],
minimum_value: t.Union[int, float],
msgs: MESSAGES_TYPE
):
if value > minimum_value:
return
super().__init__(
msgs=[
*msgs,
f"Value {value} should be greater than {minimum_value}."
]
)
class ShouldBeInstanceOf(CustomException):
def __init__(
self, *,
value: t.Any,
value_types: t.Tuple,
msgs: MESSAGES_TYPE
):
if isinstance(value, value_types):
return
super().__init__(
msgs=[
*msgs,
f"Supplied value type {type(value)!r} is not one of: ",
value_types,
]
)
class ShouldBeSubclassOf(CustomException):
def __init__(
self, *,
value: t.Any,
value_types: t.Tuple,
msgs: MESSAGES_TYPE
):
if issubclass(value, value_types):
return
super().__init__(
msgs=[
*msgs,
f"Supplied class {value!r} is not a subclass of: ",
value_types,
]
)
class ShouldBeClassMethod(CustomException):
def __init__(
self, *,
value: t.Callable,
msgs: MESSAGES_TYPE
):
if inspect.ismethod(value):
return
super().__init__(
msgs=[
*msgs,
f"We expect a method instead found value {value} with type"
f" {type(value)}",
f"Note that this helps us obtain respective instance .__self__"
]
)
class ShouldBeFunction(CustomException):
def __init__(
self, *,
value: t.Callable,
msgs: MESSAGES_TYPE
):
if inspect.isfunction(value):
return
super().__init__(
msgs=[
*msgs,
f"We expect a function instead found value {value} with type"
f" {type(value)}",
]
)
class ShouldBeDataClass(CustomException):
def __init__(
self, *,
obj: t.Callable,
msgs: MESSAGES_TYPE
):
if dataclasses.is_dataclass(obj):
return
super().__init__(
msgs=[
*msgs,
f"We expect a dataclass instead found value {obj} with "
f"type {type(obj)}",
]
)
class ShouldHaveAttribute(CustomException):
def __init__(
self, *,
attr_name: str,
obj: t.Any,
msgs: MESSAGES_TYPE
):
if hasattr(obj, attr_name):
return
super().__init__(
msgs=[
*msgs,
f"We expect an attribute {attr_name!r} in object:",
f"{obj}.",
]
)
class ShouldHaveProperty(CustomException):
def __init__(
self, *,
attr_name: str,
obj: t.Any,
msgs: MESSAGES_TYPE
):
if not hasattr(obj.__class__, attr_name):
super().__init__(
msgs=[
*msgs,
f"We expect class {obj.__class__} to have a property "
f"named {attr_name!r}",
]
)
if not isinstance(
getattr(obj.__class__, attr_name), property
):
super().__init__(
msgs=[
*msgs,
f"The member {attr_name!r} of class {obj.__class__} is "
f"not a property.",
f"Instead, found type "
f"{type(getattr(obj.__class__, attr_name))}"
]
)
return
class ShouldNotHaveAttribute(CustomException):
def __init__(
self, *,
attr_name: str,
obj: t.Any,
msgs: MESSAGES_TYPE
):
if not hasattr(obj, attr_name):
return
super().__init__(
msgs=[
*msgs,
f"We do not expect a attribute {attr_name} in object:",
f"{obj}.",
]
)
| 26.537778
| 79
| 0.448417
|
5880e447bc83bdb6842677fb2975200a030a19da
| 2,126
|
py
|
Python
|
resolwe_bio/tests/processes/test_enrichment.py
|
HudoGriz/resolwe-bio
|
4f7363cfa7d9d5a43f1a70ef36c69be3faed7fea
|
[
"Apache-2.0"
] | null | null | null |
resolwe_bio/tests/processes/test_enrichment.py
|
HudoGriz/resolwe-bio
|
4f7363cfa7d9d5a43f1a70ef36c69be3faed7fea
|
[
"Apache-2.0"
] | null | null | null |
resolwe_bio/tests/processes/test_enrichment.py
|
HudoGriz/resolwe-bio
|
4f7363cfa7d9d5a43f1a70ef36c69be3faed7fea
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=missing-docstring
from resolwe.test import tag_process, with_resolwe_host
from resolwe_bio.utils.test import KBBioProcessTestCase
class EnrichmentProcessorTestCase(KBBioProcessTestCase):
@with_resolwe_host
@tag_process('goenrichment')
def test_go_enrichment_dicty(self):
with self.preparation_stage():
inputs = {'src': 'ontology_dicty_cropped.obo.gz'}
ontology = self.run_process('upload-obo', inputs)
inputs = {'src': 'gaf_dicty_cropped.gz', 'source': 'DICTYBASE', 'species': 'Dictyostelium discoideum'}
annotation = self.run_process('upload-gaf', inputs)
inputs = {
'ontology': ontology.pk,
'gaf': annotation.pk,
'pval_threshold': 1,
'source': 'DICTYBASE',
'species': 'Dictyostelium discoideum',
'genes': ['DDB_G0277589', 'DDB_G0286855', 'DDB_G0267640']
}
enrichment = self.run_process('goenrichment', inputs)
self.assertEqual(len(enrichment.process_warning), 0)
self.assertJSON(enrichment, enrichment.output['terms'], '', 'go_enriched_terms_dicty.json.gz')
@with_resolwe_host
@tag_process('goenrichment')
def test_go_enrichment(self):
with self.preparation_stage():
inputs = {'src': 'ontology_mus_cropped.obo.gz'}
ontology = self.run_process('upload-obo', inputs)
inputs = {'src': 'gaf_mgi_cropped.gz', 'source': 'MGI', 'species': 'Mus musculus'}
gaf = self.run_process('upload-gaf', inputs)
inputs = {
'ontology': ontology.pk,
'gaf': gaf.pk,
'pval_threshold': 1,
'genes': ['193202', '56535'],
'source': 'NCBI',
'species': 'Mus musculus'
}
enrichment = self.run_process('goenrichment', inputs)
self.assertEqual(len(enrichment.process_warning), 1)
self.assertEqual(enrichment.process_warning[0], "Not all features could be mapped.")
self.assertJSON(enrichment, enrichment.output['terms'], '', 'go_enriched_terms_mouse.json.gz')
| 39.37037
| 114
| 0.626529
|
e4b894bf1b40aca3b88e3b9db3fe6390c3009216
| 330
|
py
|
Python
|
ProjectEuler/problem_5.py
|
aaditkamat/competitive-programming
|
d0b8f30d3cb3411d2467b98363c12d75d852e245
|
[
"MIT"
] | null | null | null |
ProjectEuler/problem_5.py
|
aaditkamat/competitive-programming
|
d0b8f30d3cb3411d2467b98363c12d75d852e245
|
[
"MIT"
] | 3
|
2019-02-24T11:42:28.000Z
|
2019-06-03T14:15:46.000Z
|
ProjectEuler/problem_5.py
|
aaditkamat/online-judge-submissions
|
d0b8f30d3cb3411d2467b98363c12d75d852e245
|
[
"MIT"
] | null | null | null |
def gcd(x, y):
arr = []
for i in range(1, min(x, y) + 1):
if x % i == 0 and y % i == 0:
arr.append(i)
return arr[-1]
def lcm(x, y):
return (x * y) // gcd(x, y)
def solution(num):
result = 2
for i in range(3, num):
result = lcm(result, i)
return result
print(solution(20))
| 19.411765
| 37
| 0.484848
|
95a613cbb08ffea1b1bf1702b220e4742dfc6195
| 848
|
py
|
Python
|
test/Keypad.py
|
tratitude/BridgeMaster
|
e3916b077d96f3520d0a8ed9bb548d614465aa2e
|
[
"Apache-2.0"
] | 1
|
2021-01-05T14:40:08.000Z
|
2021-01-05T14:40:08.000Z
|
test/Keypad.py
|
fdmdkw/BridgeMaster
|
e3916b077d96f3520d0a8ed9bb548d614465aa2e
|
[
"Apache-2.0"
] | 1
|
2021-10-19T08:05:06.000Z
|
2021-10-19T08:05:06.000Z
|
test/Keypad.py
|
fdmdkw/BridgeMaster
|
e3916b077d96f3520d0a8ed9bb548d614465aa2e
|
[
"Apache-2.0"
] | 2
|
2019-10-21T15:25:37.000Z
|
2021-03-17T06:59:09.000Z
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
#♣♦♥♠
MATRIX = [
['S','H','D','C'],
['NT', 3, 6, 9],
['X', 2, 5, 8],
[ 0, 1, 4, 7]]
ROW = [ 6, 13, 19, 26]
COL = [12, 16, 20, 21]
for j in range(4):
GPIO.setup(COL[j], GPIO.OUT)
GPIO.output(COL[j], 0)
for i in range(4):
GPIO.setup(ROW[i], GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def Read_Key():
Key = -1
while (Key==-1):
for j in range(4):
GPIO.output(COL[j],1)
for i in range(4):
if GPIO.input(ROW[i])==1:
Key = MATRIX[i][j]
#time.sleep(0.01)
GPIO.output(COL[j],0)
return Key
"""
try:
while (True):
print (Read_Key())
time.sleep(0.5)
except KeyboardInterupt:
GPIO.cleanup()
"""
| 19.272727
| 59
| 0.481132
|
367730f83b8876d266b6f0489a83eaf548c59f28
| 5,826
|
py
|
Python
|
spintronic/examples/spin_torque/stt_01.py
|
ddas-04/fantasi
|
c44dc58d754ecbb12c131f03a4c85d75bb7f6077
|
[
"CC0-1.0"
] | null | null | null |
spintronic/examples/spin_torque/stt_01.py
|
ddas-04/fantasi
|
c44dc58d754ecbb12c131f03a4c85d75bb7f6077
|
[
"CC0-1.0"
] | 1
|
2021-12-13T05:06:38.000Z
|
2021-12-14T02:17:59.000Z
|
spintronic/examples/spin_torque/stt_01.py
|
ddas-04/fantasi
|
c44dc58d754ecbb12c131f03a4c85d75bb7f6077
|
[
"CC0-1.0"
] | 2
|
2021-05-31T03:34:03.000Z
|
2021-11-22T06:44:14.000Z
|
from dolfin import *
from mshr import *
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from gryphon import *
from HFields import *
'''
FEniCS controls
'''
parameters["linear_algebra_backend"] = "PETSc"
parameters["mesh_partitioner"] = "ParMETIS"
'''
Plot control
'''
outputStats = False
'''
FANTASI simulation name
'''
simName = "stt_01"
'''
Mesh
'''
global_normal = Expression(("x[0]", "x[1]", "x[2]"), degree=3)
meshid = "sphere_ico6"
meshfile = "meshes/%s.xml.gz" % meshid
mesh = Mesh(meshfile)
mesh.init_cell_orientations(global_normal)
OutLine = "Mesh dimensions: %d" % mesh.topology().dim()
print(OutLine)
'''
Control parameters for FEM solver
'''
q_degree = 3
absTol = 1e-15
relTol = 1e-5
'''
Control parameters for time stepping and time integration
'''
tscale = 1e-9 # Scale factor to convert simulation time to real-time
num_steps = 31 # total number of steps
dt = 1e-18 / tscale # time step size (in simulation time)
startDt = 1e-4 # First time step to try (time-scaled)
startMaxDt = 1e-2 # Maximum Dt in time-stepper (time-scaled)
startMinDt = 1e-18 # Minimum Dt in time-stepper (time-scaled)
T = [0, 1] # Time range of simulation per stage (time-scaled)
stageCountOuter = 1 # Number of stages to simulate
stageCountInner = 20 # Number of stages to simulate
'''
Constant parameters for LLG problem
'''
kBoltzmann = 1.38064852e-23 # in J/K
mu0 = 4*np.pi * 1.0e-7 # in N/A^2
gamFac = 1.7595e11 # in rad/(s.T)
'''
Free layer description
'''
alpha = 0.0135 # Unitless damping factor
Ms = 450e3 # in A/m
t_FL = 1.0e-9 # thickness of FL
diameter = 40.0e-9 # diameter of FL with circular cross-section
magVolume = t_FL * (diameter**2) * (np.pi/4.0) # in m^3
G = Constant((gamFac*tscale*mu0)/(1+alpha**2)) # Scale factor for LLG equation (time-scaled)
'''
Temperature parameters
'''
Temperature = 300 # in K
D = Constant(alpha * gamFac * tscale * kBoltzmann * Temperature / ((1+alpha**2)*Ms * magVolume)) # per unit time based on that for gamFac
'''
Definitions for uniaxial anisotropy
'''
delta = 44.0
Eb = delta*kBoltzmann*Temperature
Ku2 = Eb/magVolume
H_uni = Constant((2*Ku2)/(mu0*Ms))
'''
Definitions for spin torque
'''
P_fix = 0.5
P_free = 0.5
Lambda_fix = 1.0
Lambda_free = 1.0
epsPrime = 0.0
Icurr = 25e-6
mp = np.array([0.0, 0.0, -1.0])
'''
The LLG equation
'''
dmdt = dmdt_huaz(gam_fac=G, alph_damp=alpha, Huaz=H_uni, q_degree=q_degree)
dmdt_stt = dmdt_mp(gam_fac=G, alph_damp=alpha, Pfix=P_fix, Pfree=P_free, LambFree=Lambda_free, LambFix=Lambda_fix, Ms_=Ms, Icurr=Icurr, vol=magVolume, epsPrime=epsPrime, mp=mp, q_degree=q_degree)
'''
Set up variational form of Fokker-Planck equation for initial value problem (IVP)
'''
#### Basis space
V = FunctionSpace(mesh,'CG',q_degree)
V_vec = VectorFunctionSpace(mesh,'CG',degree=q_degree,dim=3)
#### Create time series file to import nodal values
timeseries_rho = TimeSeries('rho_series')
#### Define initial value on the mesh
rho_curr = Function(V)
timeseries_rho.retrieve(rho_curr.vector(),0.0)
#### Set up LLG equation to be solved
velocity_uniaxial = interpolate(dmdt,V_vec)
velocity_stt = interpolate(dmdt_stt,V_vec)
#### Set up variational form
rho_ = TrialFunction(V)
v0 = TestFunction(V)
fpe_rhs = dot((velocity_uniaxial+velocity_stt)*rho_, grad(v0))*dx - D*dot(grad(rho_),grad(v0))*dx
#### Create VTK file for saving solution and save initial value
outDirName = simName+"_results"
vtkfile = File(outDirName+"/solution.pvd")
print('VTK File saved')
vtkfile << (rho_curr, 0)
#### Create time series file to save nodal values
timeseries_rho = TimeSeries(outDirName+"/rho_series")
#### Perform initial integration to get estimated error in the beginning
print('Initial probability:')
print(assemble(rho_curr*dx))
'''
Using Gryphon toolbox to perform time-stepping
'''
#### Start the solver to calculate transient solution
for idx1 in range(0, stageCountOuter):
for idx in range(0, stageCountInner):
#### Get initial Gryphon object
obj = ESDIRK(T, rho_curr, fpe_rhs, bcs=[], tdfBC=[], tdf=[], method="mumps")
#### Set up Gryphon time-stepping control
obj.parameters["timestepping"]["dtmin"] = startMinDt
obj.parameters["timestepping"]["dtmax"] = startMaxDt
obj.parameters["timestepping"]["stepsizeselector"] = "gustafsson" # Time step adaptation scheme
obj.parameters["timestepping"]["convergence_criterion"] = "relative" # Error check scheme
obj.parameters["timestepping"]["dt"] = startDt # First time step to try
#### Set up solver verbosity
obj.parameters["verbose"] = True
#### Save plot of solution at every internal time step
obj.parameters["output"]["plot"] = False
#### Set that the plot of selected step sizes should be saved in jpg.
#### Available choices are jpg, png and eps.
if outputStats:
obj.parameters["output"]["imgformat"] = "jpg"
obj.solve()
rho_curr = obj.u
t = obj.t
print('VTK File saved')
vtkfile << (rho_curr, (t*(1+idx) + idx1*T[1]))
print('Updated probability:')
print(assemble(rho_curr*dx))
startDt = startDt * 10
startMaxDt = startMaxDt * 10
startMinDt = startMinDt * 10
T[1] = 10 * T[1]
#### Save final solution in HDF5 file for easy import by other codes
timeseries_rho.store(rho_curr.vector(), 0.0)
| 31.322581
| 195
| 0.643323
|
0b30c909b4867793d56befbfc478a5cc191be689
| 1,212
|
py
|
Python
|
src/utils/logger.py
|
ArielXL/web-scrapping
|
5aa24c04ca493ba1aa5b544a585adbedca2afb73
|
[
"MIT"
] | null | null | null |
src/utils/logger.py
|
ArielXL/web-scrapping
|
5aa24c04ca493ba1aa5b544a585adbedca2afb73
|
[
"MIT"
] | null | null | null |
src/utils/logger.py
|
ArielXL/web-scrapping
|
5aa24c04ca493ba1aa5b544a585adbedca2afb73
|
[
"MIT"
] | null | null | null |
import logging
from utils.tools import *
from utils.colors import *
parseLevel = lambda x: getattr(logging, x)
def LoggerFactory(name='root'):
'''
Create a custom logger to use colors in the logs
'''
logging.setLoggerClass(Logger)
logging.basicConfig(format=FORMAT, datefmt=DATE_TIME)
return logging.getLogger(name=name)
class Logger(logging.getLoggerClass()):
def __init__(self, name='root', level=logging.NOTSET):
self.debug_color = BLUEB
self.info_color = YELLOWB
self.error_color = REDB
self.ok_color = GREENB
return super().__init__(name, level)
def debug(self, message, method=''):
super().debug(message, extra={'color': self.debug_color, 'method': method})
def info(self, message, method=''):
super().info(message, extra={'color': self.info_color, 'method': method})
def error(self, message, method=''):
super().error(message, extra={'color': self.error_color, 'method': method})
def ok(self, message, method=''):
super().error(message, extra={'color': self.ok_color, 'method': method})
def change_color(self, method, color):
setattr(self, f"{method}_color", color)
| 31.076923
| 83
| 0.657591
|
c448d8dc1b095968b796b4a330ab1e73141e5550
| 10,192
|
py
|
Python
|
modules/chanlogs.py
|
znuxor/casualbotler
|
faad16a34de8e047f94c4b7e02a4547917990b1a
|
[
"BSD-3-Clause"
] | 1
|
2019-06-07T16:06:16.000Z
|
2019-06-07T16:06:16.000Z
|
modules/chanlogs.py
|
znuxor/casualbotler
|
faad16a34de8e047f94c4b7e02a4547917990b1a
|
[
"BSD-3-Clause"
] | 18
|
2018-06-08T21:53:10.000Z
|
2020-02-24T02:31:38.000Z
|
modules/chanlogs.py
|
znuxor/casualbotler
|
faad16a34de8e047f94c4b7e02a4547917990b1a
|
[
"BSD-3-Clause"
] | 1
|
2019-05-26T00:19:49.000Z
|
2019-05-26T00:19:49.000Z
|
# coding=utf8
"""
Adapted by Znuxor
Based off:
chanlogs.py - Sopel Channel Logger module
Copyright 2014, David Baumgold <david@davidbaumgold.com>
Licensed under the Eiffel Forum License 2
http://sopel.chat
"""
from __future__ import unicode_literals
import os
import os.path
import re
import threading
from datetime import datetime
from collections import defaultdict
try:
from pytz import timezone
import pytz
except ImportError:
pytz = None
import sopel.module
import sopel.tools
from sopel.config.types import StaticSection, ValidatedAttribute, FilenameAttribute
MESSAGE_TPL = "{datetime} {trigger.nick} ({trigger.hostmask}) {message}"
ACTION_TPL = "{datetime} {trigger.nick} ({trigger.hostmask}) * {message}"
MODE_TPL_2 = "{datetime} -- Mode {trigger.sender} ({trigger.args[1]}) " \
"by {trigger.nick} ({trigger.hostmask})"
MODE_TPL_3 = "{datetime} -- Mode {trigger.sender} ({trigger.args[1]} {trigger.args[2]}) " \
"by {trigger.nick} ({trigger.hostmask})"
KICK_TPL = "{datetime} <-- {trigger.nick} ({trigger.hostmask}) " \
"has kicked {trigger.args[1]} ({trigger.args[2]})"
NICK_TPL = "{datetime} -- {trigger.nick} ({trigger.hostmask}) is now known as {trigger.sender}"
JOIN_TPL = "{datetime} --> {trigger.nick} ({trigger.hostmask}) has joined {trigger}"
PART_TPL = "{datetime} <-- {trigger.nick} ({trigger.hostmask}) has left ({trigger})"
QUIT_TPL = "{datetime} *** {trigger.nick} ({trigger.hostmask}) has quit IRC ({trigger.args[0]})"
# According to Wikipedia
BAD_CHARS = re.compile(r'[\/?%*:|"<>. ]')
class ChanlogsSection(StaticSection):
'''A data class containing all the module parameter definitions.'''
dir = FilenameAttribute('dir', directory=True, default='~/chanlogs')
"""Path to channel log storage directory"""
by_day = ValidatedAttribute('by_day', parse=bool, default=True)
"""Split log files by day"""
privmsg = ValidatedAttribute('privmsg', parse=bool, default=True)
"""Record private messages"""
microseconds = ValidatedAttribute('microseconds', parse=bool, default=False)
"""Microsecond precision"""
localtime = ValidatedAttribute('localtime', parse=bool, default=False)
"""Attempt to use preferred timezone instead of UTC"""
message_template = ValidatedAttribute('message_template', default=None)
action_template = ValidatedAttribute('action_template', default=None)
mode_template = ValidatedAttribute('mode_template', default=None)
kick_template = ValidatedAttribute('kick_template', default=None)
join_template = ValidatedAttribute('join_template', default=None)
part_template = ValidatedAttribute('part_template', default=None)
quit_template = ValidatedAttribute('quit_template', default=None)
nick_template = ValidatedAttribute('nick_template', default=None)
def configure(config):
'''Invoked when in configuration mode.'''
config.define_section('chanlogs', ChanlogsSection, validate=False)
config.chanlogs.configure_setting(
'dir',
'Path to channel log storage directory',
)
def get_datetime(bot):
"""
Returns a datetime object of the current time.
"""
dt_obj = datetime.utcnow()
if pytz:
dt_obj = dt_obj.replace(tzinfo=timezone('UTC'))
if bot.config.chanlogs.localtime:
dt_obj = dt_obj.astimezone(timezone(bot.config.clock.tz))
if not bot.config.chanlogs.microseconds:
dt_obj = dt_obj.replace(microsecond=0)
return dt_obj
def get_fpath(bot, trigger, channel=None):
"""
Returns a string corresponding to the path to the file where the message
currently being handled should be logged.
"""
basedir = bot.config.chanlogs.dir
channel = channel or trigger.sender
channel = channel.lstrip("#")
channel = BAD_CHARS.sub('__', channel)
channel = sopel.tools.Identifier(channel).lower()
dt_obj = get_datetime(bot)
if bot.config.chanlogs.by_day:
fname = "{channel}-{date}.log".format(channel=channel, date=dt_obj.date().isoformat())
else:
fname = "{channel}.log".format(channel=channel)
return os.path.join(basedir, fname)
def _format_template(tpl, bot, trigger, **kwargs):
dt_obj = get_datetime(bot)
formatted = tpl.format(
trigger=trigger, datetime=dt_obj.isoformat(),
date=dt_obj.date().isoformat(), time=dt_obj.time().isoformat(),
**kwargs
) + "\n"
return formatted
def setup(bot):
'''Invoked upon module loading.'''
bot.config.define_section('chanlogs', ChanlogsSection)
# locks for log files
if not bot.memory.contains('chanlog_locks'):
bot.memory['chanlog_locks'] = sopel.tools.SopelMemoryWithDefault(threading.Lock)
# to keep track of joins parts and quits of users to log QUIT events correctly
if not bot.memory.contains('channels_of_user'):
bot.memory['channels_of_user'] = defaultdict(list)
@sopel.module.rule('.*')
@sopel.module.unblockable
def log_message(bot, message):
"Log every message in a channel"
# if this is a private message and we're not logging those, return early
if message.sender.is_nick() and not bot.config.chanlogs.privmsg:
return
# determine which template we want, message or action
if message.tags.get('intent') == 'ACTION':
tpl = bot.config.chanlogs.action_template or ACTION_TPL
else:
tpl = bot.config.chanlogs.message_template or MESSAGE_TPL
logline = _format_template(tpl, bot, message, message=message)
fpath = get_fpath(bot, message)
with bot.memory['chanlog_locks'][fpath]:
with open(fpath, "ab") as file_handle:
file_handle.write(logline.encode('utf8'))
# user channels management
if message.sender not in bot.memory['channels_of_user'][message.nick]:
bot.memory['channels_of_user'][message.nick].append(message.sender)
@sopel.module.rule('.*')
@sopel.module.event("MODE")
@sopel.module.unblockable
def log_mode(bot, trigger):
'''Logs a mode change string.'''
if len(trigger.args) == 3:
tpl = bot.config.chanlogs.mode_template or MODE_TPL_3
elif len(trigger.args) == 2:
tpl = bot.config.chanlogs.mode_template or MODE_TPL_2
else:
return
logline = _format_template(tpl, bot, trigger)
fpath = get_fpath(bot, trigger, channel=trigger.sender)
with bot.memory['chanlog_locks'][fpath]:
with open(fpath, "ab") as file_handle:
file_handle.write(logline.encode('utf8'))
@sopel.module.rule('.*')
@sopel.module.event("KICK")
@sopel.module.unblockable
def log_kick(bot, trigger):
'''logs a kick line.'''
tpl = bot.config.chanlogs.mode_template or KICK_TPL
logline = _format_template(tpl, bot, trigger)
fpath = get_fpath(bot, trigger, channel=trigger.sender)
with bot.memory['chanlog_locks'][fpath]:
with open(fpath, "ab") as file_handle:
file_handle.write(logline.encode('utf8'))
# user channels management
if trigger.sender in bot.memory['channels_of_user'][trigger.nick]:
bot.memory['channels_of_user'][trigger.nick].remove(trigger.sender)
@sopel.module.rule('.*')
@sopel.module.event("JOIN")
@sopel.module.unblockable
def log_join(bot, trigger):
'''logs a join line.'''
tpl = bot.config.chanlogs.join_template or JOIN_TPL
logline = _format_template(tpl, bot, trigger)
fpath = get_fpath(bot, trigger, channel=trigger.sender)
with bot.memory['chanlog_locks'][fpath]:
with open(fpath, "ab") as file_handle:
file_handle.write(logline.encode('utf8'))
# user channels management
bot.memory['channels_of_user'][trigger.nick].append(trigger.sender)
@sopel.module.rule('.*')
@sopel.module.event("PART")
@sopel.module.unblockable
def log_part(bot, trigger):
'''logs a part line.'''
tpl = bot.config.chanlogs.part_template or PART_TPL
logline = _format_template(tpl, bot, trigger=trigger)
fpath = get_fpath(bot, trigger, channel=trigger.sender)
with bot.memory['chanlog_locks'][fpath]:
with open(fpath, "ab") as file_handle:
file_handle.write(logline.encode('utf8'))
# user channels management
if trigger.sender in bot.memory['channels_of_user'][trigger.nick]:
bot.memory['channels_of_user'][trigger.nick].remove(trigger.sender)
@sopel.module.rule('.*')
@sopel.module.event("QUIT")
@sopel.module.unblockable
@sopel.module.thread(False)
@sopel.module.priority('high')
def log_quit(bot, trigger):
'''logs a quit line'''
tpl = bot.config.chanlogs.quit_template or QUIT_TPL
logline = _format_template(tpl, bot, trigger)
# make a copy of bot.privileges that we can safely iterate over
privcopy = list(bot.privileges.items())
# write logline to *all* channels that the user was present in
for channel, _ in privcopy:
if channel in bot.memory['channels_of_user'][trigger.nick]:
fpath = get_fpath(bot, trigger, channel)
with bot.memory['chanlog_locks'][fpath]:
with open(fpath, "ab") as file_handle:
file_handle.write(logline.encode('utf8'))
# user channels management
del bot.memory['channels_of_user'][trigger.nick]
@sopel.module.rule('.*')
@sopel.module.event("NICK")
@sopel.module.unblockable
def log_nick_change(bot, trigger):
'''logs a nick change line.'''
tpl = bot.config.chanlogs.nick_template or NICK_TPL
logline = _format_template(tpl, bot, trigger)
old_nick = trigger.nick
new_nick = trigger.sender
# make a copy of bot.privileges that we can safely iterate over
privcopy = list(bot.privileges.items())
# write logline to *all* channels that the user is present in
for channel, privileges in privcopy:
if old_nick in privileges or new_nick in privileges:
fpath = get_fpath(bot, trigger, channel)
with bot.memory['chanlog_locks'][fpath]:
with open(fpath, "ab") as file_handle:
file_handle.write(logline.encode('utf8'))
# user channels management
bot.memory['channels_of_user'][new_nick].extend(bot.memory['channels_of_user'][old_nick])
del bot.memory['channels_of_user'][old_nick]
| 38.029851
| 96
| 0.691425
|
4e53d2b7ebb662a3f68bfcfd22ea98e1d1633c34
| 2,105
|
py
|
Python
|
metric_M_diagonal.py
|
bobchengyang/SDP_RUN
|
f6976ed24255b026a68438085225293c780b0065
|
[
"MIT"
] | null | null | null |
metric_M_diagonal.py
|
bobchengyang/SDP_RUN
|
f6976ed24255b026a68438085225293c780b0065
|
[
"MIT"
] | null | null | null |
metric_M_diagonal.py
|
bobchengyang/SDP_RUN
|
f6976ed24255b026a68438085225293c780b0065
|
[
"MIT"
] | null | null | null |
import torch
from graph_construction import graph_construction
def metric_M_diagonal(M_normalizer,feature,\
b_ind,\
label,\
n_feature,\
M_d_in,\
n_train,\
metric_M_step,Q_mask,optimizer_M,\
M_rec,\
low_rank_yes_no):
if low_rank_yes_no==0:
tril_idx=torch.tril_indices(n_feature,n_feature)
Cholesky_U_0=torch.zeros(n_feature,n_feature)
Cholesky_U_0[tril_idx[0,Q_mask],tril_idx[1,Q_mask]]=M_d_in
M0=Cholesky_U_0@Cholesky_U_0.T
else:
M0=M_rec@M_rec.T
factor_for_diag=torch.trace(M0)/M_normalizer
M=M0/factor_for_diag
# v = Variable(M_d_in.reshape(n_feature), requires_grad=True)
# M_0=torch.diag(v)
feature_train=feature[b_ind,:]
L=graph_construction(feature_train, n_train, n_feature, M)
metric_M_obj=torch.matmul(torch.matmul(label[b_ind].reshape(1,n_train),L),\
label[b_ind].reshape(n_train,1))
metric_M_obj.backward()
optimizer_M.step()
# print(metric_M_obj)
# projection
# M_d=F.relu(M_d_in-metric_M_step*v.grad)
# trace(M) <= n_feature
# while M_d.sum()>n_feature:
# try_num=(M_d.sum()-n_feature)/M_d.count_nonzero()
# M_d=F.relu(M_d-try_num)
# M_d_out=M_d.reshape(n_feature)
# M_d_out=torch.multiply(M_d,n_feature/M_d.sum()).reshape(n_feature)
# M=torch.diag(M_d)
if low_rank_yes_no==0:
Cholesky_U_0[tril_idx[0,Q_mask],tril_idx[1,Q_mask]]=M_d_in
M0=Cholesky_U_0@Cholesky_U_0.T
else:
M0=M_rec@M_rec.T
factor_for_diag=torch.trace(M0)/M_normalizer
M=M0/factor_for_diag
L_M=graph_construction(feature_train, n_train, n_feature, M)
metric_M_obj_M=torch.matmul(torch.matmul(label[b_ind].reshape(1,n_train),L_M),\
label[b_ind].reshape(n_train,1))
tol_current=torch.norm(metric_M_obj_M-metric_M_obj)
return M_d_in,M,tol_current,M_rec
| 36.293103
| 83
| 0.621378
|
28943c0b4362b8767c7a24e4e9811046c5d5479d
| 6,284
|
py
|
Python
|
fashion_mnist_gan.py
|
marshall4471/fashion_mnist_gan
|
f68c88e01ac6e113758d697957a3d7b0c3583a3d
|
[
"MIT"
] | null | null | null |
fashion_mnist_gan.py
|
marshall4471/fashion_mnist_gan
|
f68c88e01ac6e113758d697957a3d7b0c3583a3d
|
[
"MIT"
] | null | null | null |
fashion_mnist_gan.py
|
marshall4471/fashion_mnist_gan
|
f68c88e01ac6e113758d697957a3d7b0c3583a3d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""fashion_mnist_gan.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/10iLMRTE0rgTJ4qriv633vIzjr5ppKfIz
"""
import tensorflow as tf
from IPython import display
import PIL
import imageio
(train_images, train_labels), (_, _) = tf.keras.datasets.fashion_mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5
BUFFER_SIZE = 60000
BATCH_SIZE = 256
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
import matplotlib.pyplot as plt
import os
import time
def make_generator_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU())
model.add(tf.keras.layers.Reshape((7, 7, 256)))
assert model.output_shape == (None, 7, 7, 256)
model.add(tf.keras.layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
assert model.output_shape == (None, 7, 7, 128)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU())
model.add(tf.keras.layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 14, 14, 64)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU())
model.add(tf.keras.layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 28, 28, 1)
return model
generator = make_generator_model()
noise = tf.random.normal([1, 100])
generated_image = generator(noise, training=False)
plt.figure(figsize=(12,12))
plt.imshow(generated_image[0, :, :, 0], cmap='gray')
plt.show()
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
input_shape=[28, 28, 1]))
model.add(tf.keras.layers.LeakyReLU())
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(tf.keras.layers.LeakyReLU())
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(1))
return model
discriminator = make_discriminator_model()
decision = discriminator(generated_image)
print(decision)
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16
seed = tf.random.normal([num_examples_to_generate, noise_dim])
@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
def generate_and_save_images(model, epoch, test_input):
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4,4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
display.clear_output(wait=True)
generate_and_save_images(generator,epoch + 1,seed)
if (epoch + 1) % 5 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
display.clear_output(wait=True)
generate_and_save_images(generator,epochs,seed)
EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16
seed = tf.random.normal([num_examples_to_generate, noise_dim])
train(train_dataset, EPOCHS)
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
def display_image(epoch_no):
return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))
display_image(EPOCHS)
import glob
anim_file = 'dcgan.gif'
with imageio.get_writer(anim_file, mode='I') as writer:
filenames = glob.glob('image*.png')
filenames = sorted(filenames)
last = -1
for i,filename in enumerate(filenames):
frame = 2*(i**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
| 37.183432
| 124
| 0.709262
|
c17606a1e3c8d8b22105d5c6bcdf5e6d1a19e5bd
| 12,940
|
py
|
Python
|
plugins/http/cafe/engine/http/client.py
|
rcbops-qa/opencafe
|
4e6d99ce763fd28289cd35cbd56ca5722824f89e
|
[
"Apache-2.0"
] | null | null | null |
plugins/http/cafe/engine/http/client.py
|
rcbops-qa/opencafe
|
4e6d99ce763fd28289cd35cbd56ca5722824f89e
|
[
"Apache-2.0"
] | null | null | null |
plugins/http/cafe/engine/http/client.py
|
rcbops-qa/opencafe
|
4e6d99ce763fd28289cd35cbd56ca5722824f89e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
import six
from time import time
from cafe.common.reporting import cclogging
from cafe.engine.clients.base import BaseClient
from requests.packages import urllib3
urllib3.disable_warnings()
def _log_transaction(log, level=cclogging.logging.DEBUG):
def _safe_decode(text, incoming='utf-8', errors='replace'):
"""Decodes incoming text/bytes string using `incoming`
if they're not already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
"""
if isinstance(text, six.text_type):
return text
return text.decode(incoming, errors)
""" Paramaterized decorator
Takes a python Logger object and an optional logging level.
"""
def _decorator(func):
"""Accepts a function and returns wrapped version of that function."""
def _wrapper(*args, **kwargs):
"""Logging wrapper for any method that returns a requests response.
Logs requestslib response objects, and the args and kwargs
sent to the request() method, to the provided log at the provided
log level.
"""
logline = '{0} {1}'.format(args, kwargs)
try:
log.debug(_safe_decode(logline))
except Exception as exception:
# Ignore all exceptions that happen in logging, then log them
log.info(
'Exception occured while logging signature of calling'
'method in http client')
log.exception(exception)
# Make the request and time it's execution
response = None
elapsed = None
try:
start = time()
response = func(*args, **kwargs)
elapsed = time() - start
except Exception as exception:
log.critical('Call to Requests failed due to exception')
log.exception(exception)
raise exception
# requests lib 1.0.0 renamed body to data in the request object
request_body = ''
if 'body' in dir(response.request):
request_body = response.request.body
elif 'data' in dir(response.request):
request_body = response.request.data
else:
log.info(
"Unable to log request body, neither a 'data' nor a "
"'body' object could be found")
# requests lib 1.0.4 removed params from response.request
request_params = ''
request_url = response.request.url
if 'params' in dir(response.request):
request_params = response.request.params
elif '?' in request_url:
request_url, request_params = request_url.split('?')
logline = ''.join([
'\n{0}\nREQUEST SENT\n{0}\n'.format('-' * 12),
'request method..: {0}\n'.format(response.request.method),
'request url.....: {0}\n'.format(request_url),
'request params..: {0}\n'.format(request_params),
'request headers.: {0}\n'.format(response.request.headers),
'request body....: {0}\n'.format(request_body)])
try:
log.log(level, _safe_decode(logline))
except Exception as exception:
# Ignore all exceptions that happen in logging, then log them
log.log(level, '\n{0}\nREQUEST INFO\n{0}\n'.format('-' * 12))
log.exception(exception)
logline = ''.join([
'\n{0}\nRESPONSE RECEIVED\n{0}\n'.format('-' * 17),
'response status..: {0}\n'.format(response),
'response time....: {0}\n'.format(elapsed),
'response headers.: {0}\n'.format(response.headers),
'response body....: {0}\n'.format(response.content),
'-' * 79])
try:
log.log(level, _safe_decode(logline))
except Exception as exception:
# Ignore all exceptions that happen in logging, then log them
log.log(level, '\n{0}\nRESPONSE INFO\n{0}\n'.format('-' * 13))
log.exception(exception)
return response
return _wrapper
return _decorator
def _inject_exception(exception_handlers):
"""Paramaterized decorator takes a list of exception_handler objects"""
def _decorator(func):
"""Accepts a function and returns wrapped version of that function."""
def _wrapper(*args, **kwargs):
"""Wrapper for any function that returns a Requests response.
Allows exception handlers to raise custom exceptions based on
response object attributes such as status_code.
"""
response = func(*args, **kwargs)
if exception_handlers:
for handler in exception_handlers:
handler.check_for_errors(response)
return response
return _wrapper
return _decorator
class BaseHTTPClient(BaseClient):
"""Re-implementation of Requests' api.py that removes many assumptions.
Adds verbose logging.
Adds support for response-code based exception injection.
(Raising exceptions based on response code)
@see: http://docs.python-requests.org/en/latest/api/#configurations
"""
_exception_handlers = []
_log = cclogging.getLogger(__name__)
def __init__(self):
super(BaseHTTPClient, self).__init__()
@_inject_exception(_exception_handlers)
@_log_transaction(log=_log)
def request(self, method, url, **kwargs):
""" Performs <method> HTTP request to <url> using the requests lib"""
return requests.request(method, url, **kwargs)
def put(self, url, **kwargs):
""" HTTP PUT request """
return self.request('PUT', url, **kwargs)
def copy(self, url, **kwargs):
""" HTTP COPY request """
return self.request('COPY', url, **kwargs)
def post(self, url, data=None, **kwargs):
""" HTTP POST request """
return self.request('POST', url, data=data, **kwargs)
def get(self, url, **kwargs):
""" HTTP GET request """
return self.request('GET', url, **kwargs)
def head(self, url, **kwargs):
""" HTTP HEAD request """
return self.request('HEAD', url, **kwargs)
def delete(self, url, **kwargs):
""" HTTP DELETE request """
return self.request('DELETE', url, **kwargs)
def options(self, url, **kwargs):
""" HTTP OPTIONS request """
return self.request('OPTIONS', url, **kwargs)
def patch(self, url, **kwargs):
""" HTTP PATCH request """
return self.request('PATCH', url, **kwargs)
@classmethod
def add_exception_handler(cls, handler):
"""Adds a specific L{ExceptionHandler} to the HTTP client
@warning: SHOULD ONLY BE CALLED FROM A PROVIDER THROUGH A TEST
FIXTURE
"""
cls._exception_handlers.append(handler)
@classmethod
def delete_exception_handler(cls, handler):
"""Removes a L{ExceptionHandler} from the HTTP client
@warning: SHOULD ONLY BE CALLED FROM A PROVIDER THROUGH A TEST
FIXTURE
"""
if handler in cls._exception_handlers:
cls._exception_handlers.remove(handler)
class HTTPClient(BaseHTTPClient):
"""
@summary: Allows clients to inherit all requests-defined RESTful
verbs. Redefines request() so that keyword args are passed
through a named dictionary instead of kwargs.
Client methods can then take parameters that may overload
request parameters, which allows client method calls to
override parts of the request with parameters sent directly
to requests, overriding the client method logic either in
part or whole on the fly.
@see: http://docs.python-requests.org/en/latest/api/#configurations
"""
def __init__(self):
super(HTTPClient, self).__init__()
self.default_headers = {}
def request(
self, method, url, headers=None, params=None, data=None,
requestslib_kwargs=None):
# set requestslib_kwargs to an empty dict if None
requestslib_kwargs = requestslib_kwargs if (
requestslib_kwargs is not None) else {}
# Set defaults
params = params if params is not None else {}
verify = False
# If headers are provided by both, headers "wins" over default_headers
headers = dict(self.default_headers, **(headers or {}))
# Override url if present in requestslib_kwargs
if 'url' in list(requestslib_kwargs.keys()):
url = requestslib_kwargs.get('url', None) or url
del requestslib_kwargs['url']
# Override method if present in requestslib_kwargs
if 'method' in list(requestslib_kwargs.keys()):
method = requestslib_kwargs.get('method', None) or method
del requestslib_kwargs['method']
# The requests lib already removes None key/value pairs, but we force
# it here in case that behavior ever changes
for key in list(requestslib_kwargs.keys()):
if requestslib_kwargs[key] is None:
del requestslib_kwargs[key]
# Create the final parameters for the call to the base request()
# Wherever a parameter is provided both by the calling method AND
# the requests_lib kwargs dictionary, requestslib_kwargs "wins"
requestslib_kwargs = dict(
{'headers': headers, 'params': params, 'verify': verify,
'data': data}, **requestslib_kwargs)
# Make the request
return super(HTTPClient, self).request(
method, url, **requestslib_kwargs)
class AutoMarshallingHTTPClient(HTTPClient):
"""@TODO: Turn serialization and deserialization into decorators so
that we can support serialization and deserialization on a per-method
basis"""
def __init__(self, serialize_format=None, deserialize_format=None):
super(AutoMarshallingHTTPClient, self).__init__()
self.serialize_format = serialize_format
self.deserialize_format = deserialize_format or self.serialize_format
self.default_headers = {'Content-Type': 'application/{format}'.format(
format=serialize_format)}
def request(
self, method, url, headers=None, params=None, data=None,
response_entity_type=None, request_entity=None,
requestslib_kwargs=None):
# defaults requestslib_kwargs to a dictionary if it is None
requestslib_kwargs = requestslib_kwargs if (requestslib_kwargs is not
None) else {}
# set the 'data' parameter of the request to either what's already in
# requestslib_kwargs, or the deserialized output of the request_entity
if request_entity is not None:
requestslib_kwargs = dict(
{'data': request_entity.serialize(self.serialize_format)},
**requestslib_kwargs)
# Make the request
response = super(AutoMarshallingHTTPClient, self).request(
method, url, headers=headers, params=params, data=data,
requestslib_kwargs=requestslib_kwargs)
# Append the deserialized data object to the response
response.request.__dict__['entity'] = None
response.__dict__['entity'] = None
# If present, append the serialized request data object to
# response.request
if response.request is not None:
response.request.__dict__['entity'] = request_entity
if response_entity_type is not None:
response.__dict__['entity'] = response_entity_type.deserialize(
response.content,
self.deserialize_format)
return response
| 40.06192
| 79
| 0.613369
|
2a23107049a15dc6eb279ae5f95ad2885d66d6f4
| 640
|
py
|
Python
|
elements/python/13/8/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 11
|
2019-02-08T06:54:34.000Z
|
2021-08-07T18:57:39.000Z
|
elements/python/13/8/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 1
|
2019-05-21T08:14:10.000Z
|
2019-05-21T08:14:10.000Z
|
elements/python/13/8/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | null | null | null |
def closest_repeat(words):
last_position = dict()
d = float('inf')
i, j = None, None
for idx, word in enumerate(words):
last_idx = last_position.get(word, None)
if last_idx is not None and idx - last_idx < d:
i, j, d = last_idx, idx, idx - last_idx
last_position[word] = idx
return i, j
def test():
words = 'All work and no play makes for no work no fun and no results'.split()
i, j = closest_repeat(words)
assert i == 7 and j == 9
assert 'no' == words[i]
assert 'no' == words[j]
print 'pass'
def main():
test()
if __name__ == '__main__':
main()
| 22.857143
| 82
| 0.579688
|
92736148aba4ea5825c65d27bb5b2e0432abe271
| 4,368
|
py
|
Python
|
IGC/BiFModule/resource_embedder.py
|
lfelipe/intel-graphics-compiler
|
da6c84a62d5d499544b2ae5f70ae7d1cb4d78fbd
|
[
"MIT"
] | null | null | null |
IGC/BiFModule/resource_embedder.py
|
lfelipe/intel-graphics-compiler
|
da6c84a62d5d499544b2ae5f70ae7d1cb4d78fbd
|
[
"MIT"
] | null | null | null |
IGC/BiFModule/resource_embedder.py
|
lfelipe/intel-graphics-compiler
|
da6c84a62d5d499544b2ae5f70ae7d1cb4d78fbd
|
[
"MIT"
] | null | null | null |
#===================== begin_copyright_notice ==================================
#Copyright (c) 2017 Intel Corporation
#Permission is hereby granted, free of charge, to any person obtaining a
#copy of this software and associated documentation files (the
#"Software"), to deal in the Software without restriction, including
#without limitation the rights to use, copy, modify, merge, publish,
#distribute, sublicense, and/or sell copies of the Software, and to
#permit persons to whom the Software is furnished to do so, subject to
#the following conditions:
#The above copyright notice and this permission notice shall be included
#in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
#OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
#TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#======================= end_copyright_notice ==================================
import binascii
import os
import re
import sys
def PrintHelp():
sys.stdout.write('Usage: {0} <input file> <output file> <symbol name> <attribute>\n'.format(os.path.basename(__file__)))
sys.stdout.write('\n')
sys.stdout.write(' <input file> - Path to input file which will be embedded.\n')
sys.stdout.write(' <output file> - Path to output .cpp file which embedded data\n')
sys.stdout.write(' will be written to.\n')
sys.stdout.write(' <symbol name> - Base name of symbol which identifies embedded data.\n')
sys.stdout.write(' <attribute> - "visibility" to add visibility attribute, "no_attr" to add no attribute\n')
symRe = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
lineSize = 20
chunkSize = 131072
numArgs = 5
if len(sys.argv) < numArgs:
PrintHelp()
exit(0)
for arg in sys.argv:
if arg == '-h' or arg == '--help':
PrintHelp()
exit(0)
if len(sys.argv) > numArgs:
sys.stderr.write('WARNING: Number of arguments is greater than number of supported arguments.\n')
sys.stderr.write(' All additional arguments will be ignored.\n')
symName = sys.argv[3].strip()
if not symRe.match(symName) or symName.endswith('_size'):
sys.stderr.write('ERROR: Invalid symbol name "{0}".\n'.format(symName))
exit(1)
attrOpt = sys.argv[4].strip()
if attrOpt == "visibility":
attr = '__attribute__((visibility("default")))'
elif attrOpt == "no_attr":
attr = ""
else:
sys.stderr.write('ERROR: Invalid attribute argument: "{0}".\n'.format(attrOpt))
exit(1)
try:
openedFiles = list()
try:
inFile = open(sys.argv[1], 'rb')
except EnvironmentError as ex:
sys.stderr.write('ERROR: Cannot open input file "{0}".\n {1}.\n'.format(sys.argv[1], ex.strerror))
exit(1)
openedFiles.append(inFile)
try:
outFile = open(sys.argv[2], 'w')
except EnvironmentError as ex:
sys.stderr.write('ERROR: Cannot create/open output file "{0}".\n {1}.\n'.format(sys.argv[2], ex.strerror))
exit(1)
openedFiles.append(outFile)
outFile.write('// This file is auto generated by resource_embedder, DO NOT EDIT\n\n')
outFile.write('unsigned char {0} {1}[] = {{'.format(attr, symName))
embeddedSize = 0;
readBytes = inFile.read(chunkSize)
while len(readBytes) > 0:
readSize = len(readBytes)
hexBytes = binascii.hexlify(readBytes)
if embeddedSize > 0:
outFile.write(',')
outFile.write(','.join((('\n 0x' if (embeddedSize + i) % lineSize == 0 else ' 0x') + hexBytes[2*i:2*i+2].decode("utf-8")) for i in range(readSize)))
embeddedSize += readSize
readBytes = inFile.read(chunkSize)
outFile.write('\n };\n\n');
outFile.write('unsigned int {0} {1}_size = {2};\n\n'.format(attr, symName, embeddedSize))
except Exception as ex:
sys.stderr.write('ERROR: Unknown error.\n {0}.\n'.format(repr(ex)))
for openedFile in openedFiles:
openedFile.close()
exit(1)
finally:
for openedFile in openedFiles:
openedFile.close()
| 35.512195
| 163
| 0.651786
|
af034edc89b3cf71c5ae087366c4dbf68fef40ae
| 80
|
py
|
Python
|
scripts/class-3/espiral_quadrado.py
|
GabrielMMelo/python4teens
|
287f79ada2f8ded669f6e26210e1407202e8ff80
|
[
"CC-BY-4.0"
] | 2
|
2021-04-15T13:23:16.000Z
|
2022-02-01T18:31:58.000Z
|
scripts/class-3/espiral_quadrado.py
|
GabrielMMelo/python4teens
|
287f79ada2f8ded669f6e26210e1407202e8ff80
|
[
"CC-BY-4.0"
] | null | null | null |
scripts/class-3/espiral_quadrado.py
|
GabrielMMelo/python4teens
|
287f79ada2f8ded669f6e26210e1407202e8ff80
|
[
"CC-BY-4.0"
] | null | null | null |
import turtle
t = turtle.Pen()
for x in range(100):
t.forward(x)
t.left(90)
| 10
| 20
| 0.65
|
3b89138874b0cb15d79ed69a859b55398e1bb325
| 4,680
|
py
|
Python
|
hparams.py
|
wyb330/multi-speaker-tacotron-tensorflow
|
4644d0b2bbce5c351a3f8d3af94ff7461b07a6d6
|
[
"MIT"
] | null | null | null |
hparams.py
|
wyb330/multi-speaker-tacotron-tensorflow
|
4644d0b2bbce5c351a3f8d3af94ff7461b07a6d6
|
[
"MIT"
] | null | null | null |
hparams.py
|
wyb330/multi-speaker-tacotron-tensorflow
|
4644d0b2bbce5c351a3f8d3af94ff7461b07a6d6
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
SCALE_FACTOR = 1
def f(num):
return num // SCALE_FACTOR
basic_params = {
# Comma-separated list of cleaners to run on text prior to training and eval. For non-English
# text, you may want to use "basic_cleaners" or "transliteration_cleaners" See TRAINING_DATA.md.
'cleaners': 'english_cleaners', # originally korean_cleaners
}
basic_params.update({
# Audio
'num_mels': 80,
'num_freq': 1025,
'sample_rate': 24000, # trained as 20000 but need to be 24000
'frame_length_ms': 50,
'frame_shift_ms': 12.5,
'preemphasis': 0.97,
'min_level_db': -100,
'ref_level_db': 20,
})
if True:
basic_params.update({
'sample_rate': 22050, # originally 24000 (krbook), 22050(lj-data), 20000(others)
})
basic_params.update({
# Model
'model_type': 'single', # [single, simple, deepvoice]
'speaker_embedding_size': f(16),
'embedding_size': f(256),
'dropout_prob': 0.5,
# Encoder
'enc_prenet_sizes': [f(256), f(128)],
'enc_bank_size': 16,
'enc_bank_channel_size': f(128),
'enc_maxpool_width': 2,
'enc_highway_depth': 2,
'enc_rnn_size': f(128),
'enc_proj_sizes': [f(128), f(128)],
'enc_proj_width': 3,
# Attention
'attention_type': 'luong_mon', # ntm2-5
'attention_size': f(256),
'attention_state_size': f(256),
# Decoder recurrent network
'dec_layer_num': 2,
'dec_rnn_size': f(256),
# Decoder
'dec_prenet_sizes': [f(256), f(128)],
'post_bank_size': 8,
'post_bank_channel_size': f(256),
'post_maxpool_width': 2,
'post_highway_depth': 2,
'post_rnn_size': f(128),
'post_proj_sizes': [f(256), 80], # num_mels=80
'post_proj_width': 3,
'reduction_factor': 4,
})
if False: # Deep Voice 2 AudioBook Dataset
basic_params.update({
'dropout_prob': 0.8,
'attention_size': f(512),
'dec_prenet_sizes': [f(256), f(128), f(64)],
'post_bank_channel_size': f(512),
'post_rnn_size': f(256),
'reduction_factor': 5, # changed from 4
})
elif False: # Deep Voice 2 VCTK dataset
basic_params.update({
'dropout_prob': 0.8,
# 'attention_size': f(512),
# 'dec_prenet_sizes': [f(256), f(128)],
# 'post_bank_channel_size': f(512),
'post_rnn_size': f(256),
'reduction_factor': 5,
})
elif True: # Single Speaker
basic_params.update({
'dropout_prob': 0.5,
'attention_size': f(256),
'post_bank_channel_size': f(256),
# 'post_rnn_size': f(128),
'reduction_factor': 5, # chhanged from 4
})
elif False: # Single Speaker with generalization
basic_params.update({
'dropout_prob': 0.8,
'attention_size': f(256),
'dec_prenet_sizes': [f(256), f(128), f(64)],
'post_bank_channel_size': f(128),
'post_rnn_size': f(128),
'reduction_factor': 4,
})
basic_params.update({
# Training
'batch_size': 10,
'adam_beta1': 0.9,
'adam_beta2': 0.999,
'use_fixed_test_inputs': False,
'initial_learning_rate': 0.001,
'decay_learning_rate_mode': 1, # True in deepvoice2 paper
'initial_data_greedy': True,
'initial_phase_step': 8000,
'main_data_greedy_factor': 0,
'main_data': [''],
'prioritize_loss': False,
'recognition_loss_coeff': 0.2,
'ignore_recognition_level': 0, # 0: use all, 1: ignore only unmatched_alignment, 2: fully ignore recognition
# Eval
'min_tokens': 50, # originally 50, 30 is good for korean,
'min_iters': 30,
'max_iters': 200,
'skip_inadequate': False,
'griffin_lim_iters': 60,
'power': 1.5, # Power to raise magnitudes to prior to Griffin-Lim
'max_N': 90, # Maximum number of characters.
'max_T': 180 # Maximum number of mel frames.
})
# Tacotron2 Hyperparameter
basic_params.update({
'embedding_dim': 512,
'encoder_lstm_units': 256, # For each direction
'attention_depth': 128,
'decoder_lstm_units': 1024,
'encoder_conv_layers': 3,
'encoder_conv_width': 5,
'encoder_conv_channels': 512,
'postnet_conv_layers': 5,
'postnet_conv_width': 5,
'postnet_conv_channels': 512,
'expand_conv_layers': 5,
'expand_conv_width': 5,
'expand_conv_channels': 512,
'expand_lstm_units': 256,
'outputs_per_step': 5,
'learning_rate_decay_halflife': 100000,
})
# Default hyperparameters:
hparams = tf.contrib.training.HParams(**basic_params)
def hparams_debug_string():
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values)]
return 'Hyperparameters:\n' + '\n'.join(hp)
| 26.145251
| 113
| 0.62906
|
f477fad922daaa31c87be44e449f3b5d4a5e6814
| 1,107
|
py
|
Python
|
checksumming_io/checksumming_sink.py
|
HumanCellAtlas/checksum_reader
|
1bf28c1f20d420d905955db7e27fe2798bae3545
|
[
"Apache-2.0"
] | null | null | null |
checksumming_io/checksumming_sink.py
|
HumanCellAtlas/checksum_reader
|
1bf28c1f20d420d905955db7e27fe2798bae3545
|
[
"Apache-2.0"
] | null | null | null |
checksumming_io/checksumming_sink.py
|
HumanCellAtlas/checksum_reader
|
1bf28c1f20d420d905955db7e27fe2798bae3545
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
import crcmod
from .s3_etag import S3Etag
"""
A file-like object that computes checksums for the data written to it, discarding the actual data.
"""
class ChecksummingSink:
def __init__(self, hash_functions=('crc32c', 'sha1', 'sha256', 's3_etag')):
self._hashers = dict()
for hasher in hash_functions:
if hasher == 'crc32c':
self._hashers['crc32c'] = crcmod.predefined.Crc("crc-32c")
elif hasher == 'sha1':
self._hashers['sha1'] = hashlib.sha1()
elif hasher == 'sha256':
self._hashers['sha256'] = hashlib.sha256()
elif hasher == 's3_etag':
self._hashers['s3_etag'] = S3Etag()
def write(self, data):
for hasher in self._hashers.values():
hasher.update(data)
def get_checksums(self):
checksums = {}
checksums.update({name: hasher.hexdigest() for name, hasher in self._hashers.items()})
return checksums
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
| 29.131579
| 98
| 0.591689
|
90c7aedd04ecf5a7428c5b3f3da7eba647e7d50d
| 10,865
|
py
|
Python
|
doc/conf.py
|
LucaLJ/PyPSA
|
d69bcdb9eec0c71b485648537c10338c661f906c
|
[
"MIT"
] | 594
|
2017-10-20T19:02:15.000Z
|
2022-03-31T10:16:23.000Z
|
doc/conf.py
|
LucaLJ/PyPSA
|
d69bcdb9eec0c71b485648537c10338c661f906c
|
[
"MIT"
] | 271
|
2017-10-23T15:12:03.000Z
|
2022-03-29T10:20:36.000Z
|
doc/conf.py
|
LucaLJ/PyPSA
|
d69bcdb9eec0c71b485648537c10338c661f906c
|
[
"MIT"
] | 286
|
2017-10-23T09:45:15.000Z
|
2022-03-28T15:23:40.000Z
|
# -*- coding: utf-8 -*-
#
# PyPSA documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 5 10:04:42 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'nbsphinx',
'nbsphinx_link',
# 'sphinx.ext.pngmath',
# 'sphinxcontrib.tikz',
#'rinoh.frontend.sphinx',
'sphinx.ext.imgconverter', # for SVG conversion
]
autodoc_default_flags = ['members']
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyPSA'
copyright = u'2015-2021 PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html'
author = u'PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.18.1'
# The full version, including alpha/beta/rc tags.
release = u'0.18.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'canonical_url': 'https://www.pypsa.org/doc',
'display_version': True,
'sticky_navigation': True,
#'style_nav_header_background': '#009682',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# These folders are copied to the documentation's HTML output
html_static_path = ["_static"]
# These paths are either relative to html_static_path
# or fully qualified paths (eg. https://...)
html_css_files = ["theme_overrides.css"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyPSAdoc'
# -- Options for nbsphinx -------------------------------------------------
# nbsphinx_kernel_name = 'pypsa'
nbsphinx_prolog = """
{% set docname = env.doc2path(env.docname, base=None).replace("nblink","ipynb").replace("examples/", "examples/notebooks/") %}
.. note::
You can `download <https://github.com/pypsa/pypsa/tree/v{{ env.config.release|e }}/{{ docname }}>`_ this example as a Jupyter notebook
or start it `in interactive mode <https://mybinder.org/v2/gh/PyPSA/pypsa/v{{ env.config.release|e }}?filepath={{ docname|e }}>`_.
"""
nbsphinx_allow_errors = True
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyPSA.tex', u'PyPSA Documentation',
u'PyPSA Developers', 'manual'),
]
#Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html
rinoh_documents = [(master_doc, # top-level file (index.rst)
'PyPSA', # output (target.pdf)
'PyPSA Documentation', # document title
'PyPSA Developers')] # document author
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pypsa', u'PyPSA Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyPSA', u'PyPSA Documentation',
author, 'PyPSA', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 32.432836
| 138
| 0.706305
|
d2e283912fcdb020be11b2d7a05554fa38646cff
| 7,871
|
py
|
Python
|
mydialogs.py
|
zhj12138/ebook-manager
|
773974c9da3de1e79af4cfffe06ee1c546ef3e86
|
[
"MIT"
] | null | null | null |
mydialogs.py
|
zhj12138/ebook-manager
|
773974c9da3de1e79af4cfffe06ee1c546ef3e86
|
[
"MIT"
] | 1
|
2020-10-05T10:47:04.000Z
|
2020-10-05T10:47:04.000Z
|
mydialogs.py
|
zhj12138/ebook-manager
|
773974c9da3de1e79af4cfffe06ee1c546ef3e86
|
[
"MIT"
] | null | null | null |
import time
from PyQt5.QtGui import QIntValidator
from PyQt5.QtWidgets import *
from PyQt5.QtCore import QDate, pyqtSignal
from basic import strListToString
from classes import Book
from mydatabase import MyDb
from mythreads import convertThread
from fileMethods import *
class EditDataDialog(QDialog):
changeSignal = pyqtSignal(int)
def __init__(self, db: MyDb, book: Book, parent=None):
super(EditDataDialog, self).__init__(parent)
self.db = db
self.book = book
self.form = QFormLayout()
self.nameLabel = QLabel("书名")
self.nameInput = QLineEdit()
if book.name:
self.nameInput.setText(book.name)
self.authorLabel = QLabel("作者")
self.authorInput = QLineEdit()
if book.authors:
self.authorInput.setText(strListToString(book.authors))
self.pub_dateLabel = QLabel("出版日期")
self.pub_dateInput = QDateEdit()
if book.pub_date:
date = QDate()
self.pub_dateInput.setDate(date.fromString(book.pub_date, 'yyyyMMdd'))
self.publisherLabel = QLabel("出版社")
self.publisherInput = QLineEdit()
if book.publisher:
self.publisherInput.setText(book.publisher)
self.isbnLabel = QLabel("ISBN")
self.isbnInput = QLineEdit()
if book.isbn:
self.isbnInput.setText(book.isbn)
self.languageLabel = QLabel("语言")
self.languageInput = QLineEdit()
if book.language:
self.languageInput.setText(book.language)
self.ratingLabel = QLabel("评分")
self.ratingInput = QLineEdit()
self.ratingInput.setValidator(QIntValidator(0, 5))
if book.rating:
self.ratingInput.setText(str(book.rating))
self.tagsLabel = QLabel("标签")
self.tagsInput = QLineEdit()
if book.tags:
self.tagsInput.setText(strListToString(book.tags))
self.booklistLabel = QLabel("书单")
self.booklistInput = QLineEdit()
if book.bookLists:
self.booklistInput.setText(strListToString(book.bookLists))
self.okButton = QPushButton("保存并退出")
self.cancleButton = QPushButton("不保存退出")
self.form.addRow(self.nameLabel, self.nameInput)
self.form.addRow(self.authorLabel, self.authorInput)
self.form.addRow(self.pub_dateLabel, self.pub_dateInput)
self.form.addRow(self.publisherLabel, self.publisherInput)
self.form.addRow(self.isbnLabel, self.isbnInput)
self.form.addRow(self.languageLabel, self.languageInput)
self.form.addRow(self.ratingLabel, self.ratingInput)
self.form.addRow(self.tagsLabel, self.tagsInput)
self.form.addRow(self.booklistLabel, self.booklistInput)
self.form.addRow(self.okButton, self.cancleButton)
self.setLayout(self.form)
self.okButton.clicked.connect(self.onOK)
self.cancleButton.clicked.connect(self.onCancle)
def onOK(self):
self.book.name = self.nameInput.text()
self.book.setAuthors(self.db, parseStrListString(self.authorInput.text()))
self.book.pub_date = self.pub_dateInput.date().toString('yyyyMMdd')
self.book.publisher = self.publisherInput.text()
self.book.isbn = self.isbnInput.text()
self.book.language = self.languageInput.text()
if self.ratingInput.text():
self.book.rating = int(self.ratingInput.text())
else:
self.book.rating = 0
self.book.tags = parseStrListString(self.tagsInput.text())
self.book.setBookLists(self.db, parseStrListString(self.booklistInput.text()))
self.book.updateDB(self.db)
self.changeSignal.emit(self.book.ID)
self.close()
def onCancle(self):
self.close()
class ImportFileEditDialog(QDialog):
changeSignal = pyqtSignal(str, list, str, int)
def __init__(self, name=None, parent=None):
super(ImportFileEditDialog, self).__init__(parent)
self.nameLabel = QLabel("书名")
self.nameInput = QLineEdit()
if name:
self.nameInput.setText(name)
self.authorLabel = QLabel("作者")
self.authorInput = QLineEdit()
self.languageLabel = QLabel("语言")
self.languageInput = QLineEdit()
self.ratingLabel = QLabel("评分")
self.ratingInput = QLineEdit()
self.ratingInput.setValidator(QIntValidator(0, 5))
self.okBtn = QPushButton("确定")
self.okBtn.clicked.connect(self.onClicked)
self.cancleBtn = QPushButton("取消转换")
self.cancleBtn.clicked.connect(self.onCancle)
self.form = QFormLayout()
self.form.addRow(self.nameLabel, self.nameInput)
self.form.addRow(self.authorLabel, self.authorInput)
self.form.addRow(self.languageLabel, self.languageInput)
self.form.addRow(self.ratingLabel, self.ratingInput)
self.form.addRow(self.okBtn, self.cancleBtn)
self.setLayout(self.form)
def onClicked(self):
name = self.nameInput.text()
authors = parseStrListString(self.authorInput.text())
language = self.languageInput.text()
if self.ratingInput.text():
rating = int(self.ratingInput.text())
else:
rating = 0
self.changeSignal.emit(name, authors, language, rating)
self.close()
def onCancle(self):
self.close()
class ImportFileDialog(QDialog):
finishSignal = pyqtSignal(str, str, list, str, int)
def __init__(self, basepath, db, parent=None):
super(ImportFileDialog, self).__init__(parent)
self.basePath = basepath
self.db = db
self.filepath, _ = QFileDialog.getOpenFileName(self, "选择文件", ".", "docx or markdown file(*.docx *.md)")
if self.filepath:
direcPath, file = os.path.split(self.filepath)
self.filename, self.filesufix = file.split('.')
dig = ImportFileEditDialog(self.filename, self)
dig.changeSignal.connect(self.onConvert)
dig.show()
def onConvert(self, name, authors, language, rating):
if not name:
name = self.filename
bookPath, bookFilePath = getFilePath(self.basePath, name, self.db.getID(), self.filepath)
if not bookPath:
return
pdfFilePath = os.path.join(bookPath, name+'.pdf')
if self.filesufix == 'md':
t = convertThread(mdToPdf, (bookFilePath, pdfFilePath))
else: # docx
t = convertThread(docxToPdf, (bookFilePath, pdfFilePath))
t.finishSignal.connect(lambda: self.finishConvert(pdfFilePath, name, authors, language, rating))
t.start()
time.sleep(1)
def finishConvert(self, pdfFilePath, name, authors, language, rating):
self.finishSignal.emit(pdfFilePath, name, authors, language, rating)
class ExportINFODialog(QDialog):
finishSignal = pyqtSignal(str)
def __init__(self, db: MyDb, parent=None):
super(ExportINFODialog, self).__init__(parent)
self.db = db
file_name, _ = QFileDialog.getSaveFileName(self, "保存文件", ".", "csv file(*.csv)")
if file_name:
rows = self.db.getAllBookRows()
headers = ['书名', '作者', '出版日期', '出版社', 'ISBN', '语言', '文件路径', '封面路径', '评分', '标签', '书单']
t = convertThread(toCSV, (file_name, headers, rows))
t.finishSignal.connect(lambda: self.FinishExport(file_name))
t.start()
time.sleep(1)
def FinishExport(self, filename):
self.finishSignal.emit(filename)
class SettingDialog(QDialog):
finishSignal = pyqtSignal()
def __init__(self, parent=None):
super(SettingDialog, self).__init__(parent)
class HighSearchDialog(QDialog):
finishSignal = pyqtSignal()
def __init__(self, parent=None):
super(HighSearchDialog, self).__init__(parent)
| 38.773399
| 111
| 0.645407
|
529a98b870c146b2ee8b907314f0caa66749243b
| 2,778
|
py
|
Python
|
utils/spectral_norm.py
|
loayghawji/CPM
|
8d1c1d0e15bba04c0ef06997411a09765f736cfa
|
[
"Apache-2.0"
] | 182
|
2021-04-20T04:53:07.000Z
|
2022-03-30T02:54:24.000Z
|
utils/spectral_norm.py
|
loayghawji/CPM
|
8d1c1d0e15bba04c0ef06997411a09765f736cfa
|
[
"Apache-2.0"
] | 16
|
2021-04-21T10:59:57.000Z
|
2022-03-10T05:27:55.000Z
|
utils/spectral_norm.py
|
loayghawji/CPM
|
8d1c1d0e15bba04c0ef06997411a09765f736cfa
|
[
"Apache-2.0"
] | 30
|
2021-04-26T03:55:41.000Z
|
2022-03-20T02:00:34.000Z
|
import torch
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(object):
def __init__(self):
self.name = "weight"
# print(self.name)
self.power_iterations = 1
def compute_weight(self, module):
u = getattr(module, self.name + "_u")
v = getattr(module, self.name + "_v")
w = getattr(module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
return w / sigma.expand_as(w)
@staticmethod
def apply(module):
name = "weight"
fn = SpectralNorm()
try:
u = getattr(module, name + "_u")
v = getattr(module, name + "_v")
w = getattr(module, name + "_bar")
except AttributeError:
w = getattr(module, name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
w_bar = Parameter(w.data)
# del module._parameters[name]
module.register_parameter(name + "_u", u)
module.register_parameter(name + "_v", v)
module.register_parameter(name + "_bar", w_bar)
# remove w from parameter list
del module._parameters[name]
setattr(module, name, fn.compute_weight(module))
# recompute weight before every forward()
module.register_forward_pre_hook(fn)
return fn
def remove(self, module):
weight = self.compute_weight(module)
delattr(module, self.name)
del module._parameters[self.name + "_u"]
del module._parameters[self.name + "_v"]
del module._parameters[self.name + "_bar"]
module.register_parameter(self.name, Parameter(weight.data))
def __call__(self, module, inputs):
setattr(module, self.name, self.compute_weight(module))
def spectral_norm(module):
SpectralNorm.apply(module)
return module
def remove_spectral_norm(module):
name = "weight"
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(name, module))
| 31.931034
| 84
| 0.601512
|
89cb8e63d4caf0ab21b5cfb9fbbc5e9793d171fc
| 2,045
|
py
|
Python
|
test/test_brand_search_query.py
|
Inch4Tk/amparex_python_api
|
f372c15a1e67293329bdd8bee8ad66624ed1341f
|
[
"Apache-2.0"
] | null | null | null |
test/test_brand_search_query.py
|
Inch4Tk/amparex_python_api
|
f372c15a1e67293329bdd8bee8ad66624ed1341f
|
[
"Apache-2.0"
] | null | null | null |
test/test_brand_search_query.py
|
Inch4Tk/amparex_python_api
|
f372c15a1e67293329bdd8bee8ad66624ed1341f
|
[
"Apache-2.0"
] | null | null | null |
"""
AMPAREX Rest API Documentation
This is the description of the AMPAREX Rest API. All REST calls plus the corresponding data model are described in this documentation. Direct calls to the server are possible over this page.<br/>Following steps are needed to use the API:<br/><br/>1. Get the alias identifier of your login account from AMPAREX Software (Branch office administration) -> Service accounts -> your service account -> copy alias token)<br/>2. Please use the login URL /alias/{alias}/login under section \"Login\" below with your credentials to get a valid bearer token.<br/>3. Copy bearer token from login response<br/>3. Then click \"Authorize\" on the top of this page<br/>4. Insert into the field \"value\": \"Bearer {Your Bearer token}\" (without {}) for example \"Bearer 334d34d3dgh5tz5h5h\"<br/>4. Click Authorize<br/>5. Bearer token will be automatically used in the header for every following API call.<br/>6. Now you are ready to use the API<br/><br/>See also [documentation](https://manual.amparex.com/display/HAN/AMPAREX+API) for help<br/><br/>Documentation of all the used fields and objects is at the bottom of this page called \"Models\" # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import amparex
from amparex.model.search_query_meta_data import SearchQueryMetaData
globals()['SearchQueryMetaData'] = SearchQueryMetaData
from amparex.model.brand_search_query import BrandSearchQuery
class TestBrandSearchQuery(unittest.TestCase):
"""BrandSearchQuery unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBrandSearchQuery(self):
"""Test BrandSearchQuery"""
# FIXME: construct object with mandatory attributes with example values
# model = BrandSearchQuery() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 53.815789
| 1,241
| 0.733985
|
7949e11430f9258dd2a181a66625fa88cfe610ac
| 1,892
|
py
|
Python
|
url_tldr.py
|
AmanKishore/GPT3-Experiments
|
ea661b66d6cab843e406c65f7a1db1de41c62a57
|
[
"MIT"
] | null | null | null |
url_tldr.py
|
AmanKishore/GPT3-Experiments
|
ea661b66d6cab843e406c65f7a1db1de41c62a57
|
[
"MIT"
] | null | null | null |
url_tldr.py
|
AmanKishore/GPT3-Experiments
|
ea661b66d6cab843e406c65f7a1db1de41c62a57
|
[
"MIT"
] | null | null | null |
import os
import openai
import re
from collections import OrderedDict
import requests, PyPDF2
from io import BytesIO
from WebScraper import scraper
openai.api_key = os.environ["OPENAI_API_KEY"]
def getTLDRfromURL():
# creating a pdf file object
url = input("Enter the pdf url: ")
response = requests.get(url)
my_raw_data = response.content
final_text = ""
with BytesIO(my_raw_data) as data:
try:
read_pdf = PyPDF2.PdfFileReader(data)
# Iterate through pages (max of 1 to save money)
for page in range(min(read_pdf.getNumPages(), 1)):
ai_text = read_pdf.getPage(page).extractText()
response = openai.Completion.create(
engine="davinci",
prompt=ai_text + "\n\ntl;dr:",
temperature=0.3,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
final_text += response["choices"][0]["text"]
except:
data = scraper(url)
response = openai.Completion.create(
engine="davinci",
prompt=data[500:] + "\n\ntl;dr:",
temperature=0.3,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
final_text = response["choices"][0]["text"]
"\n".join(list(OrderedDict.fromkeys(final_text.split("\n"))))
final_text = final_text.replace('Click to expand... ', '')
final_text = final_text.replace('\n\n', '\n')
final_text = re.sub(r'^\\s+|[\\s&&[^\\r\\n]](?=\\s|$)|\\s+\\z', '', final_text)
return final_text
if __name__ == "__main__":
response = getTLDRfromURL()
print(response)
| 30.031746
| 83
| 0.534355
|
ff2a8fad372895590ec41336c96cbdcd720ea613
| 3,083
|
py
|
Python
|
tax_data_test_task/tax_data/settings.py
|
saibottrenham/online_pajak
|
d123e421a6c629bb0ebdfb1582391eae600cb905
|
[
"MIT"
] | 1
|
2021-11-03T00:11:01.000Z
|
2021-11-03T00:11:01.000Z
|
tax_data_test_task/tax_data/settings.py
|
saibottrenham/online_pajak
|
d123e421a6c629bb0ebdfb1582391eae600cb905
|
[
"MIT"
] | null | null | null |
tax_data_test_task/tax_data/settings.py
|
saibottrenham/online_pajak
|
d123e421a6c629bb0ebdfb1582391eae600cb905
|
[
"MIT"
] | null | null | null |
"""
Django settings for tax_data project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's53=p-tx@h-254*a0*8_es-ew=#2_1xps=ju8b$hfejnj@13n7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tax_data.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tax_data.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.270492
| 91
| 0.695751
|
f295c70e4efe272e5d8c262a8d6576a790e1f840
| 2,122
|
py
|
Python
|
tests/test_blob.py
|
Atipriya/cle
|
e0de9fbfde45a446656873e406340b146c401381
|
[
"BSD-2-Clause"
] | 317
|
2015-05-27T10:10:32.000Z
|
2022-03-13T02:56:31.000Z
|
tests/test_blob.py
|
Atipriya/cle
|
e0de9fbfde45a446656873e406340b146c401381
|
[
"BSD-2-Clause"
] | 238
|
2015-05-27T17:50:18.000Z
|
2022-03-24T23:47:27.000Z
|
tests/test_blob.py
|
Atipriya/cle
|
e0de9fbfde45a446656873e406340b146c401381
|
[
"BSD-2-Clause"
] | 134
|
2015-05-29T03:45:11.000Z
|
2022-03-13T02:56:34.000Z
|
import nose
import os
import pickle
import cle
TEST_BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.join('..', '..', 'binaries'))
def test_blob_0():
BASE_ADDR = 0x8000000
ENTRYPOINT = 0x8001337
blob_file = os.path.join(TEST_BASE, 'tests', 'i386', 'all')
blob_file_size = os.stat(blob_file).st_size
ld = cle.Loader(blob_file, main_opts={
'backend': 'blob',
'base_addr': BASE_ADDR,
'entry_point': ENTRYPOINT,
'arch': "ARM",
})
nose.tools.assert_equal(ld.main_object.linked_base, BASE_ADDR)
nose.tools.assert_equal(ld.main_object.mapped_base, BASE_ADDR)
nose.tools.assert_equal(ld.main_object.min_addr, BASE_ADDR)
nose.tools.assert_equal(ld.main_object.max_addr, BASE_ADDR + blob_file_size - 1)
nose.tools.assert_equal(ld.main_object.entry, ENTRYPOINT)
nose.tools.assert_true(ld.main_object.contains_addr(BASE_ADDR))
nose.tools.assert_false(ld.main_object.contains_addr(BASE_ADDR - 1))
# ensure that pickling works
ld_pickled = pickle.loads(pickle.dumps(ld))
nose.tools.assert_equal(ld_pickled.main_object.mapped_base, BASE_ADDR)
def test_blob_1():
# Make sure the base address behaves as expected regardless of whether offset is specified or not.
BASE_ADDR = 0x8000000
ENTRYPOINT = 0x8001337
blob_file = os.path.join(TEST_BASE, 'tests', 'i386', 'all')
offset = 0x200
blob_file_size = os.stat(blob_file).st_size - offset
ld = cle.Loader(blob_file, main_opts={
'backend': 'blob',
'base_addr': BASE_ADDR,
'entry_point': ENTRYPOINT,
'arch': "ARM",
'offset': offset,
})
nose.tools.assert_equal(ld.main_object.linked_base, BASE_ADDR)
nose.tools.assert_equal(ld.main_object.mapped_base, BASE_ADDR)
nose.tools.assert_equal(ld.main_object.min_addr, BASE_ADDR)
nose.tools.assert_equal(ld.main_object.max_addr, BASE_ADDR + blob_file_size - 1)
nose.tools.assert_equal(ld.main_object.entry, ENTRYPOINT)
if __name__ == "__main__":
test_blob_0()
test_blob_1()
| 31.671642
| 102
| 0.690386
|
43c5463313b4fe7d348f9083ad8c51eb72775390
| 728
|
py
|
Python
|
tcp-client.py
|
pedrohenri15/Arquivos-IOT-comentados
|
2406f1a0c6f73e545c1c4b5ee1306e89a2c2f404
|
[
"BSD-2-Clause"
] | null | null | null |
tcp-client.py
|
pedrohenri15/Arquivos-IOT-comentados
|
2406f1a0c6f73e545c1c4b5ee1306e89a2c2f404
|
[
"BSD-2-Clause"
] | null | null | null |
tcp-client.py
|
pedrohenri15/Arquivos-IOT-comentados
|
2406f1a0c6f73e545c1c4b5ee1306e89a2c2f404
|
[
"BSD-2-Clause"
] | null | null | null |
# importamos o socket para realizar a conexao e tranferencia dos dados
import socket
# determinamos o destino e a porta de acesso
HOST = 'localhost'
PORT = 5000
# criamos uma varialvel que irá armazenar dos argumentos de soccket, que linka o IPv4 e TCP
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# criamos uma variavel para armazenar o local do acesso
destino = (HOST, PORT)
# a conexao tcp com o server
tcp.connect(destino)
# criamos o loop while para retornar ao cliente atraves do input a mensagem que deseja enviar ao servidor
while(True):
mensagem = bytes(input("Digite a mensagem: "), encoding='utf-8')
# manda a mensagem
tcp.send(mensagem)
# finaliza
tcp.close()
| 29.12
| 107
| 0.723901
|
9301135f43a200a793061030f624db47790760e1
| 15,112
|
py
|
Python
|
agents/navigation/autonomous_agent.py
|
neetmehta/Autonomous-turning-of-vehicle-and-overtaking-in-CARLA
|
afda62dbf6b75343afaaf4cd49753cfc1e90ac84
|
[
"MIT"
] | 2
|
2021-07-08T07:32:16.000Z
|
2021-07-16T08:45:55.000Z
|
agents/navigation/autonomous_agent.py
|
neetmehta/Autonomous-turning-of-vehicle-and-overtaking-in-CARLA
|
afda62dbf6b75343afaaf4cd49753cfc1e90ac84
|
[
"MIT"
] | 1
|
2021-05-14T05:10:15.000Z
|
2021-07-08T09:05:22.000Z
|
agents/navigation/autonomous_agent.py
|
neetmehta/Autonomous-turning-of-vehicle-and-overtaking-in-CARLA
|
afda62dbf6b75343afaaf4cd49753cfc1e90ac84
|
[
"MIT"
] | 2
|
2021-07-08T09:16:52.000Z
|
2021-09-08T18:10:48.000Z
|
#!/usr/bin/env python
import pygame
import carla
import math
import numpy as np
from agents.navigation.agent import Agent, AgentState
from agents.navigation.local_planner import LocalPlanner, RoadOption
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
from agents.navigation.lange_change import BezierTurn, BezierOverTake
from agents.tools.misc import transform_to_frame
class AutonomousAgent(Agent):
"""
BasicAgent implements a basic agent that navigates scenes to reach a given
target destination. This agent respects traffic lights and other vehicles.
"""
def __init__(self, world):
"""
:param vehicle: actor to apply to local planner logic onto
"""
super(AutonomousAgent, self).__init__(world.player)
self._world_obj = world
self._THW = 2
self._target_speed = None
# Local plannar
self._local_planner = LocalPlanner(world.player)
self.update_parameters()
# Global plannar
self._proximity_threshold = 10.0 # meter # Distance between waypoints
self._state = AgentState.NAVIGATING
self._hop_resolution = 0.2
self._path_seperation_hop = 2
self._path_seperation_threshold = 0.5
self._grp = None # global route planar
# Behavior planning
self._hazard_detected = False
self._blocked_time = None
self._perform_lane_change = False
self._front_r = []
self._left_front_r = []
self._left_back_r = []
# Turns positions
self.right_positions = None
self.left_positions = None
# Turn flags
self.right_turn = False
self.left_turn = False
self.temp_flag = True
self.left_positions = None
def update_parameters(self):
self._THW = 2
self._target_speed = 30
CONTROLLER_TYPE = 'PID' # options:MPC, PID, STANLEY
args_lateral_dict = {'K_P': 1.0, 'K_I': 0.4, 'K_D': 0.01, 'control_type': CONTROLLER_TYPE}
args_longitudinal_dict = {'K_P': 0.3, 'K_I': 0.2, 'K_D': 0.002}
self._local_planner.init_controller(opt_dict={'target_speed': self._target_speed,
'lateral_control_dict': args_lateral_dict,
'longitudinal_control_dict': args_longitudinal_dict})
# Set global destination and get global waypoints
def set_destination(self, location):
"""
This method creates a list of waypoints from agent's position to destination location
based on the route returned by the global router
"""
start_waypoint = self._map.get_waypoint(self._vehicle.get_location())
end_waypoint = self._map.get_waypoint(carla.Location(location[0], location[1], location[2]))
route_trace = self._trace_route(start_waypoint, end_waypoint)
assert route_trace
self._local_planner.set_global_plan(route_trace)
# Get global waypoints
def _trace_route(self, start_waypoint, end_waypoint):
"""
This method sets up a global router and returns the optimal route
from start_waypoint to end_waypoint
"""
# Setting up global router
if self._grp is None:
dao = GlobalRoutePlannerDAO(self._vehicle.get_world().get_map(), self._hop_resolution)
grp = GlobalRoutePlanner(dao)
grp.setup()
self._grp = grp
# Obtain route plan
route = self._grp.trace_route(start_waypoint.transform.location, end_waypoint.transform.location)
self.turn_positions_getter(route, RoadOption.RIGHT)
self.turn_positions_getter(route, RoadOption.LEFT)
return route
def turn_positions_getter(self, route, state):
"""
Returns list of all Left and right turns waypoints
"""
count_flag = False
temp_list=[]
list_of_turn_waypoints=[]
for i,j in route:
if j==state:
count_flag=True
temp_list.append(i)
continue
if count_flag:
start_waypoint = temp_list[0]
end_waypoint = temp_list[-1]
list_of_turn_waypoints.append((start_waypoint,end_waypoint))
temp_list=[]
count_flag=False
if state == RoadOption.RIGHT:
self.right_positions = list_of_turn_waypoints
else:
self.left_positions = list_of_turn_waypoints
# Get vehicle speed
def _get_speed(self):
v = self._vehicle.get_velocity()
ego_speed = math.sqrt(v.x**2 + v.y**2 + v.z**2)
return ego_speed
# Run step
def run_step(self, debug=False):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
## Update Environment ##
# Check all the radars
try:
if self._state==AgentState.EMERGENCY_BRAKE:
pass
pass
except:
pass
if self._world_obj.front_radar.detected:
if abs(self._world_obj.front_radar.rel_pos[1]) < 1:
self._front_r = [pygame.time.get_ticks(), self._world_obj.front_radar.rel_pos,
self._world_obj.front_radar.rel_vel]
self._world_obj.front_radar.detected = False
if self._world_obj.left_front_radar.detected:
if self._world_obj.left_front_radar.rel_pos[1] < -1:
self._left_front_r =[pygame.time.get_ticks(), self._world_obj.left_front_radar.rel_pos,
self._world_obj.left_front_radar.rel_vel]
self._world_obj.left_front_radar.detected = False
if self._world_obj.left_back_radar.detected:
if self._world_obj.left_back_radar.rel_pos[1] < -1:
self._left_back_r = [pygame.time.get_ticks(), self._world_obj.left_back_radar.rel_pos,
self._world_obj.left_back_radar.rel_vel]
self._world_obj.left_back_radar.detected = False
# Remove radar data if not detected again in 0.5 second
if self._front_r and (pygame.time.get_ticks() - self._front_r[0] > 5000):
self._front_r = []
if self._left_front_r and (pygame.time.get_ticks() - self._left_front_r[0] > 5000):
self._left_front_r = []
if self._left_back_r and (pygame.time.get_ticks() - self._left_back_r[0] > 5000):
self._left_back_r = []
# Detect vehicles in front
self._hazard_detected = False
if self._front_r and (self._front_r[1][0] < 20.0):
self._hazard_detected = True
# update hazard existing time
if self._hazard_detected:
if self._blocked_time is None:
self._blocked_time = pygame.time.get_ticks()
hazard_time = 0
else:
hazard_time = pygame.time.get_ticks() - self._blocked_time
else:
self._blocked_time = None
# Get a safe_distance
safe_distance = self._THW * self._get_speed()
try:
i=self.right_positions[0][0]
j=self.right_positions[0][1]
loc_start = i.transform.location
loc_start_yaw = i.transform.rotation.yaw
loc = loc_start
loc_end_yaw = j.transform.rotation.yaw
loc_end = j.transform.location
if (abs(loc.x-self._vehicle.get_location().x)+\
abs(loc.y-self._vehicle.get_location().y)+\
abs(loc.z-self._vehicle.get_location().z))<=10:
self.right_turn=True
self.temp_flag = False
except:
pass
try:
i=self.left_positions[0][0]
j=self.left_positions[0][1]
loc2_start = i.transform.location
loc2_start_yaw = i.transform.rotation.yaw
loc2 = loc2_start
loc2_end = j.transform.location
loc2_end_yaw = j.transform.rotation.yaw
if (abs(loc2.x-self._vehicle.get_location().x)+\
abs(loc2.y-self._vehicle.get_location().y)+\
abs(loc2.z-self._vehicle.get_location().z))<=10:
self.left_turn=True
self.temp_flag = False
except:
pass
# Finite State Machine
# 1, Navigating
if self._state == AgentState.NAVIGATING:
if self._hazard_detected:
self._state = AgentState.BLOCKED_BY_VEHICLE
# 2, Blocked by Vehicle
elif self._state == AgentState.BLOCKED_BY_VEHICLE:
if not self._hazard_detected:
self._state = AgentState.NAVIGATING
# The vehicle is driving at a certain speed
# There is enough space
else:
if hazard_time > 5000 and \
190 > self._vehicle.get_location().x > 10 and \
10 > self._vehicle.get_location().y > 7:
self._state = AgentState.PREPARE_LANE_CHANGING
# 4, Prepare Lane Change
elif self._state == AgentState.PREPARE_LANE_CHANGING:
if not (self._front_r and self._front_r[1][0] < safe_distance) and \
not (self._left_front_r and self._left_front_r[1][0] < safe_distance) and \
not (self._left_back_r and self._left_back_r[1][0] > -10):
self._state = AgentState.LANE_CHANGING
self._perform_lane_change = True
# 5, Lane Change
elif self._state == AgentState.LANE_CHANGING:
if abs(self._vehicle.get_velocity().y) < 0.5 and \
self._vehicle.get_location().y < 7.0:
self._state = AgentState.NAVIGATING
# 6, Emergency Brake
emergency_distance = safe_distance *3/5
emergency_front_speed = 1.0
if self._front_r and (self._front_r[1][0] < emergency_distance or
self._front_r[2][0] < emergency_front_speed):
self._state = AgentState.EMERGENCY_BRAKE
# Local Planner Behavior according to states
if self._state == AgentState.NAVIGATING or self._state == AgentState.LANE_CHANGING:
control = self._local_planner.run_step(debug=debug)
elif self._state == AgentState.PREPARE_LANE_CHANGING:
if self._left_front_r and self._left_front_r[1][0] < safe_distance or \
self._front_r and self._front_r[1][0] < safe_distance:
control = self._local_planner.empty_control(debug=debug)
else:
control = self._local_planner.run_step(debug=debug)
elif self._state == AgentState.BLOCKED_BY_VEHICLE:
# ACC
front_dis = self._front_r[1][0]
front_vel = self._front_r[2][0]
ego_speed = self._get_speed()
desired_speed = front_vel - (ego_speed-front_vel)/front_dis
if ego_speed > 1:
desired_speed += 2*(front_dis/ego_speed - self._THW)
control = self._local_planner.run_step(debug=debug, target_speed=desired_speed*3.6)
elif self._state == AgentState.EMERGENCY_BRAKE:
control = self._local_planner.brake()
if self._front_r:
if self._front_r[1][0] >= emergency_distance and \
self._front_r[2][0] > emergency_front_speed:
self._state = AgentState.NAVIGATING
elif self._state == AgentState.BLOCKED_RED_LIGHT:
control = self._local_planner.empty_control(debug=debug)
# When performing a lane change
if self._perform_lane_change:
# Record original destination
destination = self._local_planner.get_global_destination()
# Get lane change start location
ref_location = self._world_obj.player.get_location()
ref_yaw = self._world_obj.player.get_transform().rotation.yaw
if self._local_planner.waypoint_buffer:
waypoint = self._local_planner.waypoint_buffer[-1][0]
ref_location = waypoint.transform.location
wait_dist = 0.0 # need some time to plan
ref = [ref_location.x + wait_dist, ref_location.y, ref_yaw]
# Replace current plan with a lane change plan
overtake = BezierOverTake(self._world_obj)
overtake_plan = overtake.get_waypoints(ref)
self._local_planner.set_local_plan(overtake_plan)
# replan globally with new vehicle position after lane changing
new_start = self._map.get_waypoint(overtake_plan[-1][0].transform.location)
route_trace = self._trace_route(new_start, destination)
assert route_trace
self._local_planner.add_global_plan(route_trace)
self._perform_lane_change = False
print("overtake")
if self.right_turn or self.left_turn:
# Record original destination
destination = self._local_planner.get_global_destination()
# Get lane change start location
ref_location = self._world_obj.player.get_location()
ref_yaw = self._world_obj.player.get_transform().rotation.yaw
if self._local_planner.waypoint_buffer:
waypoint = self._local_planner.waypoint_buffer[-1][0]
ref_location = waypoint.transform.location
if self.right_turn:
ref1 = [loc_start.x , loc_start.y, loc_start_yaw]
ref2 = [loc_end.x, loc_end.y, loc_end_yaw]
turner = BezierTurn(self._world_obj, True)
turn_plan = turner.get_waypoints(ref1,ref2)
self.right_turn = False
print('Right Turn')
elif self.left_turn:
ref1 = [loc2_start.x, loc2_start.y, loc2_start_yaw]
ref2 = [loc2_end.x, loc2_end.y, loc2_end_yaw]
turner = BezierTurn(self._world_obj,False)
turn_plan = turner.get_waypoints(ref1,ref2)
self.left_turn = False
print('Left turn')
self._local_planner.set_local_plan(turn_plan)
# replan globally with new vehicle position after lane changing
new_start = self._map.get_waypoint(turn_plan[-1][0].transform.location)
route_trace = self._trace_route(new_start, destination)
assert route_trace
self._local_planner.add_global_plan(route_trace)
return control
def done(self):
"""
Check whether the agent has reached its destination.
:return bool
"""
return self._local_planner.done()
| 39.873351
| 107
| 0.601707
|
9bd5a7a00c7dbbc617851109e37292f934ebf73f
| 1,344
|
py
|
Python
|
Operator/UVIsland_HardEdges.py
|
BreakPointOo/Stitches_BlenderAddon
|
a7d7e200d92ed35225c3b61f82062d6a20a2b6d7
|
[
"Xnet",
"X11"
] | null | null | null |
Operator/UVIsland_HardEdges.py
|
BreakPointOo/Stitches_BlenderAddon
|
a7d7e200d92ed35225c3b61f82062d6a20a2b6d7
|
[
"Xnet",
"X11"
] | null | null | null |
Operator/UVIsland_HardEdges.py
|
BreakPointOo/Stitches_BlenderAddon
|
a7d7e200d92ed35225c3b61f82062d6a20a2b6d7
|
[
"Xnet",
"X11"
] | null | null | null |
'''bl_info = {
"name": "UVs_to_Hard_Edges",
"author": "Ferran M.Clar, 2.80 update NecroFriedChicken",
"version": (0, 1),
"blender": (2, 80, 0),
"location": "3D View -> Tools Panel",
"description": "Sets the object UV islands borders' edges to hard edges and an Edge Split modifier",
"category": "Object"}
'''
import bpy
import bmesh
class UV_OT_Island2HardEdges(bpy.types.Operator):
bl_idname = "uv.islandtohardedges"
bl_label = "UVIslandToHardEdges"
bl_description = "Sets the object UV islands borders' edges to hard edges and an Edge Split modifier"
# bl_options = {'REGISTER', 'UNDO'}
def execute(self,context):
if context.active_object.mode != 'EDIT':
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.uv.seams_from_islands()
bpy.ops.object.mode_set(mode = 'OBJECT')
mesh = bpy.context.object.data
bm = bmesh.new()
bm.from_mesh(mesh)
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
edges = []
for edge in bm.edges:
if edge.seam:
edge.smooth = False
bpy.ops.object.mode_set(mode = 'OBJECT')
bm.to_mesh(mesh)
bm.free()
return {'FINISHED'}
| 24.888889
| 105
| 0.582589
|
30832d0ff617f11963fe33ee5f31df066fb1907d
| 828
|
py
|
Python
|
common/pyredis.py
|
Allen-lang/FXTest
|
ccbc5e986f4d0f9d3145a857674529380d873719
|
[
"MIT"
] | 1
|
2019-12-31T01:53:04.000Z
|
2019-12-31T01:53:04.000Z
|
common/pyredis.py
|
fuyang123/FXTest
|
ccbc5e986f4d0f9d3145a857674529380d873719
|
[
"MIT"
] | null | null | null |
common/pyredis.py
|
fuyang123/FXTest
|
ccbc5e986f4d0f9d3145a857674529380d873719
|
[
"MIT"
] | null | null | null |
'''
@author: lileilei
@file: pyredis.py
@time: 2018/9/19 10:49
'''
import redis
from config import redis_password,max_connec_redis
class ConRedisOper(object):
def __init__(self,host,port,db):
self.host=host
self.port=port
self.db=db
def connect(self):
pool = redis.ConnectionPool(host=self.host, port=self.port, password=redis_password, db=self.db,
max_connections=max_connec_redis)
coon = redis.Redis(connection_pool=pool)
return coon
def sethase(self,key,value,time=None):
if time :
res=self.connect().setex(key,value,time)
else:
res=self.connect().set(key, value)
return res
def getset(self,key):
res=self.connect().get(key)
return res
| 31.846154
| 105
| 0.596618
|
cbbb0a67f971529e10df9a0666033b70de32f01a
| 16,253
|
py
|
Python
|
docs/conf_common.py
|
genichin/esp-idf_for_ebell
|
117bb7805aac9384f6b55d839871c5933882824c
|
[
"Apache-2.0"
] | 1
|
2022-01-05T13:22:24.000Z
|
2022-01-05T13:22:24.000Z
|
docs/conf_common.py
|
AnishChristo96/SmartPocket
|
cef048e31b9d9c14cc9ece6e1cfb37c06ab47b0d
|
[
"Apache-2.0"
] | 2
|
2020-01-03T10:29:38.000Z
|
2020-04-20T10:39:40.000Z
|
docs/conf_common.py
|
BadrBouaddi/esp-idf
|
3b59d9c71042d2e4ee2058c142fa1cc1959091df
|
[
"Apache-2.0"
] | 1
|
2022-01-17T07:07:05.000Z
|
2022-01-17T07:07:05.000Z
|
# -*- coding: utf-8 -*-
#
# Common (non-language-specific) configuration for Read The Docs & Sphinx
#
# Based on a Read the Docs Template documentation build configuration file,
# created by sphinx-quickstart on Tue Aug 26 14:19:49 2014.
#
# This file is imported from a language-specific conf.py (ie en/conf.py or
# zh_CN/conf.py)
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import re
import subprocess
# Note: If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute
from local_util import run_cmd_get_output, copy_if_modified
# build_docs on the CI server sometimes fails under Python3. This is a workaround:
sys.setrecursionlimit(3500)
try:
builddir = os.environ['BUILDDIR']
except KeyError:
builddir = '_build'
# Fill in a default IDF_PATH if it's missing (ie when Read The Docs is building the docs)
try:
idf_path = os.environ['IDF_PATH']
except KeyError:
idf_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
def call_with_python(cmd):
# using sys.executable ensures that the scripts are called with the same Python interpreter
if os.system('{} {}'.format(sys.executable, cmd)) != 0:
raise RuntimeError('{} failed'.format(cmd))
# Call Doxygen to get XML files from the header files
print("Calling Doxygen to generate latest XML files")
if os.system("doxygen ../Doxyfile") != 0:
raise RuntimeError('Doxygen call failed')
# Doxygen has generated XML files in 'xml' directory.
# Copy them to 'xml_in', only touching the files which have changed.
copy_if_modified('xml/', 'xml_in/')
# Generate 'api_name.inc' files using the XML files by Doxygen
call_with_python('../gen-dxd.py')
def find_component_files(parent_dir, target_filename):
parent_dir = os.path.abspath(parent_dir)
result = []
component_files = dict()
for (dirpath, dirnames, filenames) in os.walk(parent_dir):
try:
# note: trimming "examples" dir as MQTT submodule
# has its own examples directory in the submodule, not part of IDF
dirnames.remove("examples")
except ValueError:
pass
if target_filename in filenames:
component_files[os.path.basename(dirpath)] = os.path.join(dirpath, target_filename)
components = sorted(component_files.keys())
for component in components:
result.append(component_files[component])
print("List of %s: %s" % (target_filename, ", ".join(components)))
return result
# Generate 'kconfig.inc' file from components' Kconfig files
print("Generating kconfig.inc from kconfig contents")
kconfig_inc_path = '{}/inc/kconfig.inc'.format(builddir)
temp_sdkconfig_path = '{}/sdkconfig.tmp'.format(builddir)
kconfigs = find_component_files("../../components", "Kconfig")
kconfig_projbuilds = find_component_files("../../components", "Kconfig.projbuild")
sdkconfig_renames = find_component_files("../../components", "sdkconfig.rename")
# trim the esp32s2beta component, until we have proper multi-target support
kconfigs = [k for k in kconfigs if "esp32s2beta" not in k]
kconfig_projbuilds = [k for k in kconfig_projbuilds if "esp32s2beta" not in k]
sdkconfig_renames = [r for r in sdkconfig_renames if "esp32s2beta" not in r]
kconfigs_source_path = '{}/inc/kconfigs_source.in'.format(builddir)
kconfig_projbuilds_source_path = '{}/inc/kconfig_projbuilds_source.in'.format(builddir)
prepare_kconfig_files_args = [sys.executable,
"../../tools/kconfig_new/prepare_kconfig_files.py",
"--env", "COMPONENT_KCONFIGS={}".format(" ".join(kconfigs)),
"--env", "COMPONENT_KCONFIGS_PROJBUILD={}".format(" ".join(kconfig_projbuilds)),
"--env", "COMPONENT_KCONFIGS_SOURCE_FILE={}".format(kconfigs_source_path),
"--env", "COMPONENT_KCONFIGS_PROJBUILD_SOURCE_FILE={}".format(kconfig_projbuilds_source_path),
]
subprocess.check_call(prepare_kconfig_files_args)
confgen_args = [sys.executable,
"../../tools/kconfig_new/confgen.py",
"--kconfig", "../../Kconfig",
"--sdkconfig-rename", "../../sdkconfig.rename",
"--config", temp_sdkconfig_path,
"--env", "COMPONENT_KCONFIGS={}".format(" ".join(kconfigs)),
"--env", "COMPONENT_KCONFIGS_PROJBUILD={}".format(" ".join(kconfig_projbuilds)),
"--env", "COMPONENT_SDKCONFIG_RENAMES={}".format(" ".join(sdkconfig_renames)),
"--env", "COMPONENT_KCONFIGS_SOURCE_FILE={}".format(kconfigs_source_path),
"--env", "COMPONENT_KCONFIGS_PROJBUILD_SOURCE_FILE={}".format(kconfig_projbuilds_source_path),
"--env", "IDF_PATH={}".format(idf_path),
"--output", "docs", kconfig_inc_path + '.in'
]
subprocess.check_call(confgen_args)
copy_if_modified(kconfig_inc_path + '.in', kconfig_inc_path)
# Generate 'esp_err_defs.inc' file with ESP_ERR_ error code definitions
esp_err_inc_path = '{}/inc/esp_err_defs.inc'.format(builddir)
call_with_python('../../tools/gen_esp_err_to_name.py --rst_output ' + esp_err_inc_path + '.in')
copy_if_modified(esp_err_inc_path + '.in', esp_err_inc_path)
# Generate version-related includes
#
# (Note: this is in a function as it needs to access configuration to get the language)
def generate_version_specific_includes(app):
print("Generating version-specific includes...")
version_tmpdir = '{}/version_inc'.format(builddir)
call_with_python('../gen-version-specific-includes.py {} {}'.format(app.config.language, version_tmpdir))
copy_if_modified(version_tmpdir, '{}/inc'.format(builddir))
# Generate toolchain download links
print("Generating toolchain download links")
base_url = 'https://dl.espressif.com/dl/'
toolchain_tmpdir = '{}/toolchain_inc'.format(builddir)
call_with_python('../gen-toolchain-links.py ../../tools/toolchain_versions.mk {} {}'.format(base_url, toolchain_tmpdir))
copy_if_modified(toolchain_tmpdir, '{}/inc'.format(builddir))
# http://stackoverflow.com/questions/12772927/specifying-an-online-image-in-sphinx-restructuredtext-format
#
suppress_warnings = ['image.nonlocal_uri']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe',
'link-roles',
'sphinxcontrib.blockdiag',
'sphinxcontrib.seqdiag',
'sphinxcontrib.actdiag',
'sphinxcontrib.nwdiag',
'sphinxcontrib.rackdiag',
'sphinxcontrib.packetdiag',
'html_redirects',
'sphinx.ext.todo',
]
# sphinx.ext.todo extension parameters
# If the below parameter is True, the extension
# produces output, else it produces nothing.
todo_include_todos = False
# Enabling this fixes cropping of blockdiag edge labels
seqdiag_antialias = True
# Breathe extension variables
# Doxygen regenerates files in 'xml/' directory every time,
# but we copy files to 'xml_in/' only when they change, to speed up
# incremental builds.
breathe_projects = {"esp32-idf": "xml_in/"}
breathe_default_project = "esp32-idf"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
source_parsers = {'.md': 'recommonmark.parser.CommonMarkParser',
}
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Readthedocs largely ignores 'version' and 'release', and displays one of
# 'latest', tag name, or branch name, depending on the build type.
# Still, this is useful for non-RTD builds.
# This is supposed to be "the short X.Y version", but it's the only version
# visible when you open index.html.
# Display full version to make things less confusing.
version = run_cmd_get_output('git describe')
# The full version, including alpha/beta/rc tags.
# If needed, nearest tag is returned by 'git describe --abbrev=0'.
release = version
print('Version: {0} Release: {1}'.format(version, release))
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build','README.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# Custom added feature to allow redirecting old URLs
#
# Redirects should be listed in page_redirects.xt
#
with open("../page_redirects.txt") as f:
lines = [re.sub(" +", " ", line.strip()) for line in f.readlines() if line.strip() != "" and not line.startswith("#")]
for line in lines: # check for well-formed entries
if len(line.split(' ')) != 2:
raise RuntimeError("Invalid line in page_redirects.txt: %s" % line)
html_redirect_pages = [tuple(line.split(' ')) for line in lines]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../_static/espressif-logo.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReadtheDocsTemplatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
#
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
#
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ReadtheDocsTemplate.tex', u'Read the Docs Template Documentation',
u'Read the Docs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'readthedocstemplate', u'Read the Docs Template Documentation',
[u'Read the Docs'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ReadtheDocsTemplate', u'Read the Docs Template Documentation',
u'Read the Docs', 'ReadtheDocsTemplate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Override RTD CSS theme to introduce the theme corrections
# https://github.com/rtfd/sphinx_rtd_theme/pull/432
def setup(app):
app.add_stylesheet('theme_overrides.css')
generate_version_specific_includes(app)
| 36.688488
| 124
| 0.701409
|
3742745d8c02070937e3f9eed8c5cd4fce9b9e16
| 5,351
|
py
|
Python
|
tests/test_serializer.py
|
AbsaOSS/py2k
|
4e28810dd0a370bb5eb31c8cb34f63b888b9c450
|
[
"Apache-2.0"
] | 10
|
2021-03-12T09:01:50.000Z
|
2021-09-08T12:33:46.000Z
|
tests/test_serializer.py
|
AbsaOSS/py2k
|
4e28810dd0a370bb5eb31c8cb34f63b888b9c450
|
[
"Apache-2.0"
] | 91
|
2021-03-12T08:08:06.000Z
|
2022-03-28T00:16:41.000Z
|
tests/test_serializer.py
|
AbsaOSS/py2k
|
4e28810dd0a370bb5eb31c8cb34f63b888b9c450
|
[
"Apache-2.0"
] | 1
|
2021-04-26T02:42:12.000Z
|
2021-04-26T02:42:12.000Z
|
# Copyright 2021 ABSA Group Limited
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pandas as pd
import pytest
import py2k.serializer
from py2k.record import KafkaRecord
from py2k.serializer import KafkaSerializer
class ParamMock:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def test_schema_string_of_value_serializer(serializer_without_key):
value_serializer = serializer_without_key.value_serializer()
schema = json.loads(value_serializer.kwargs.get('schema_str'))
expected_schema = {
'fields': [{'name': 'Field', 'type': 'string'}],
'name': 'ModelResult',
'namespace': 'python.kafka.modelresult',
'type': 'record'
}
assert schema == expected_schema
def test_key_serializer_none_if_no_key_fiedls(serializer_without_key):
key_serializer = serializer_without_key.key_serializer()
assert key_serializer is None
def test_schema_string_of_key_serializer(serializer_with_key):
key_serializer = serializer_with_key.key_serializer()
schema = json.loads(key_serializer.kwargs.get('schema_str'))
expected_schema = {
'fields': [{'name': 'Key', 'type': 'string'}],
'name': 'ModelResultKey',
'namespace': 'python.kafka.modelresult',
'type': 'record'
}
assert schema == expected_schema
def test_schema_string_of_multi_key_serializer(serializer_with_multiple_key):
key_serializer = serializer_with_multiple_key.key_serializer()
schema = json.loads(key_serializer.kwargs.get('schema_str'))
expected_schema = {
'fields': [{'name': 'Key1', 'type': 'string'},
{'name': 'Key2', 'type': 'string'}],
'name': 'ModelResultKey',
'namespace': 'python.kafka.modelresult',
'type': 'record'
}
assert schema == expected_schema
def test_value_serializer_without_key_by_default(serializer_with_multiple_key):
value_serializer = serializer_with_multiple_key.value_serializer()
schema = json.loads(value_serializer.kwargs.get('schema_str'))
expected_schema = {
'fields': [{'name': 'Field', 'type': 'string'}],
'name': 'ModelResult',
'namespace': 'python.kafka.modelresult',
'type': 'record'
}
assert schema == expected_schema
def test_value_serializer_with_key_when_specified(serializer_key_included):
value_serializer = serializer_key_included.value_serializer()
schema = json.loads(value_serializer.kwargs.get('schema_str'))
expected_schema = {
'fields': [{'name': 'Field', 'type': 'string'},
{'name': 'Key1', 'type': 'string'},
{'name': 'Key2', 'type': 'string'}],
'name': 'ModelResult',
'namespace': 'python.kafka.modelresult',
'type': 'record'
}
assert schema == expected_schema
@pytest.fixture
def serializer_without_key(monkeypatch, schema_registry_config):
monkeypatch.setattr(py2k.serializer, 'AvroSerializer', ParamMock)
class ModelResult(KafkaRecord):
Field: str
df = pd.DataFrame({'Field': ['field_value']})
record = ModelResult.from_pandas(df)[0]
return KafkaSerializer(record, schema_registry_config)
@pytest.fixture
def serializer_with_key(monkeypatch, schema_registry_config):
monkeypatch.setattr(py2k.serializer, 'AvroSerializer', ParamMock)
class ModelResult(KafkaRecord):
__key_fields__ = {'Key'}
Field: str
Key: str
df = pd.DataFrame({'Field': ['field_value'], 'Key': ['key_value']})
record = ModelResult.from_pandas(df)[0]
return KafkaSerializer(record, schema_registry_config)
@pytest.fixture
def serializer_with_multiple_key(monkeypatch, schema_registry_config):
monkeypatch.setattr(py2k.serializer, 'AvroSerializer', ParamMock)
class ModelResult(KafkaRecord):
__key_fields__ = {'Key1', 'Key2'}
Field: str
Key1: str
Key2: str
df = pd.DataFrame({
'Field': ['field_value'],
'Key1': ['key1_value'],
'Key2': ['key2_value']
})
record = ModelResult.from_pandas(df)[0]
return KafkaSerializer(record, schema_registry_config)
@pytest.fixture
def serializer_key_included(monkeypatch, schema_registry_config):
monkeypatch.setattr(py2k.serializer, 'AvroSerializer', ParamMock)
class ModelResult(KafkaRecord):
__key_fields__ = {'Key1', 'Key2'}
__include_key__ = True
Field: str
Key1: str
Key2: str
df = pd.DataFrame({
'Field': ['field_value'],
'Key1': ['key1_value'],
'Key2': ['key2_value']
})
record = ModelResult.from_pandas(df)[0]
return KafkaSerializer(record, schema_registry_config)
@pytest.fixture
def schema_registry_config():
return {
'url': "http://test.schema.registry"
}
| 29.893855
| 79
| 0.681555
|
0f4e5e728dc36ee6fa2cf6ac7a1350bb8a43a0a8
| 1,740
|
py
|
Python
|
home/Gareth/NoseMoov_butane_sensorMQ2_speech_voice_control.py
|
sola1993/inmoov
|
34e7bb6e214bd9bf3eee808c19f0ab09ec79345f
|
[
"Apache-2.0"
] | 1
|
2021-02-24T17:05:52.000Z
|
2021-02-24T17:05:52.000Z
|
home/Gareth/NoseMoov_butane_sensorMQ2_speech_voice_control.py
|
sola1993/inmoov
|
34e7bb6e214bd9bf3eee808c19f0ab09ec79345f
|
[
"Apache-2.0"
] | null | null | null |
home/Gareth/NoseMoov_butane_sensorMQ2_speech_voice_control.py
|
sola1993/inmoov
|
34e7bb6e214bd9bf3eee808c19f0ab09ec79345f
|
[
"Apache-2.0"
] | 1
|
2020-06-03T20:48:47.000Z
|
2020-06-03T20:48:47.000Z
|
# Gas sensor MQ2
# reads a butane sensor on analog 5
# waits for voice control from user before speaking back the butane level measured
from java.lang import String
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
ear = Runtime.createAndStart("ear","Sphinx")
mouth = Runtime.createAndStart("mouth","Speech")
ear.startListening("hi james|gas levels please|again")
ear.addListener("recognized", python.name, "heard");
arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM4")
readAnalogPin = 5 # butane sensor MQ2 is wired to analog 5
arduino.setSampleRate(8000) # make friendly sample rate
butain=0 # butane level variable
global butane
arduino.addListener("publishPin", "python", "publishPin") # add call back route
def heard(data):
global butane
#print "heard ", data
if (data == "hi james"):
#mouth.speak("hi james")
mouth.speak("hi gareth")
mouth.speak("how can i help you")
elif (data == "gas levels please"):
print ("give me gas levels please")
mouth.speak("for sure")
mouth.speak(str(butane))
mouth.speak("parts per million") # p.p.m needs to be calibrated for this
elif (data == "again"):
print ("give me gas levels please")
mouth.speak(str(butane))
mouth.speak("parts per million") # p.p.m needs to be calibrated for this
# my call-back
def publishPin(pin):
global butane
butane= pin.value
print butane
# get data from analog pin for 5 seconds
arduino.analogReadPollingStart(readAnalogPin)
sleep(200)
arduino.analogReadPollingStop(readAnalogPin)
ear.attach("mouth")
| 32.222222
| 84
| 0.690805
|
46998ba68af57803e3abfd950dea19ea7cd8650d
| 9,132
|
py
|
Python
|
test/unittest_base.py
|
CAVED123/Tensorforce
|
823177f77f9047b1e71eccfffc08315ed1636878
|
[
"Apache-2.0"
] | null | null | null |
test/unittest_base.py
|
CAVED123/Tensorforce
|
823177f77f9047b1e71eccfffc08315ed1636878
|
[
"Apache-2.0"
] | null | null | null |
test/unittest_base.py
|
CAVED123/Tensorforce
|
823177f77f9047b1e71eccfffc08315ed1636878
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from copy import deepcopy
from datetime import datetime
import os
import sys
import warnings
from tensorforce import Agent, Environment, Runner, TensorforceError
from tensorforce.core.layers import Layer
from test.unittest_environment import UnittestEnvironment
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class UnittestBase(object):
"""
Unit-test base class.
"""
# Unittest
num_updates = None
num_episodes = None
num_timesteps = None
# Environment
min_timesteps = 1
states = dict(
bool_state=dict(type='bool', shape=(1,)),
int_state=dict(type='int', shape=(2,), num_values=4),
float_state=dict(type='float', shape=(1, 1, 2)),
bounded_state=dict(type='float', shape=(), min_value=-0.5, max_value=0.5)
)
actions = dict(
bool_action=dict(type='bool', shape=(1,)),
int_action=dict(type='int', shape=(2,), num_values=4),
float_action=dict(type='float', shape=(1, 1)),
bounded_action=dict(type='float', shape=(2,), min_value=-0.5, max_value=0.5)
)
max_episode_timesteps = 5
# Exclude action types
exclude_bool_action = False
exclude_int_action = False
exclude_float_action = False
exclude_bounded_action = False
# Agent
agent = dict(
update=4, policy=dict(network=dict(type='auto', size=8, depth=1, internal_rnn=2)),
objective='policy_gradient', reward_estimation=dict(horizon=3)
)
# Tensorforce config
require_observe = False
require_all = False
def setUp(self):
warnings.filterwarnings(
action='ignore',
message='Converting sparse IndexedSlices to a dense Tensor of unknown shape'
)
def start_tests(self, name=None):
"""
Start unit-test method.
"""
if name is None:
sys.stdout.write('\n{} {}: '.format(
datetime.now().strftime('%H:%M:%S'), self.__class__.__name__[4:]
))
else:
sys.stdout.write('\n{} {} ({}): '.format(
datetime.now().strftime('%H:%M:%S'), self.__class__.__name__[4:], name
))
sys.stdout.flush()
def finished_test(self, assertion=None):
"""
Finished unit-test.
"""
if assertion is None:
assertion = True
else:
self.assertTrue(expr=assertion)
if assertion:
sys.stdout.write('.')
sys.stdout.flush()
def environment_spec(
self, max_episode_timesteps=None, min_timesteps=None, states=None, actions=None,
exclude_bool_action=False, exclude_int_action=False, exclude_float_action=False,
exclude_bounded_action=False
):
if states is None:
states = deepcopy(self.__class__.states)
if actions is None:
actions = deepcopy(self.__class__.actions)
if exclude_bool_action or self.__class__.exclude_bool_action:
actions.pop('bool_action')
if exclude_int_action or self.__class__.exclude_int_action:
actions.pop('int_action')
if exclude_float_action or self.__class__.exclude_float_action:
actions.pop('float_action')
if exclude_bounded_action or self.__class__.exclude_bounded_action:
actions.pop('bounded_action')
if min_timesteps is None:
min_timesteps = self.__class__.min_timesteps
if max_episode_timesteps is None:
max_episode_timesteps = self.__class__.max_episode_timesteps
return dict(
environment=UnittestEnvironment, max_episode_timesteps=max_episode_timesteps,
states=states, actions=actions, min_timesteps=min_timesteps
)
def agent_spec(self, require_observe=False, require_all=False, **agent):
for key, value in self.__class__.agent.items():
if key not in agent:
agent[key] = value
if self.__class__.require_all or require_all:
config = None
elif self.__class__.require_observe or require_observe:
config = dict(api_functions=['reset', 'act', 'independent_act', 'observe'])
else:
config = dict(api_functions=['reset', 'act', 'independent_act'])
return dict(agent=agent, config=config)
def prepare(
self,
# general environment
environment=None, max_episode_timesteps=None,
# unit-test environment
min_timesteps=None, states=None, actions=None,
# exclude action types
exclude_bool_action=False, exclude_int_action=False, exclude_float_action=False,
exclude_bounded_action=False,
# agent
require_observe=False, require_all=False, **agent
):
"""
Generic unit-test preparation.
"""
Layer.layers = None
if environment is None:
environment = self.environment_spec(
max_episode_timesteps=max_episode_timesteps, min_timesteps=min_timesteps,
states=states, actions=actions, exclude_bool_action=exclude_bool_action,
exclude_int_action=exclude_int_action, exclude_float_action=exclude_float_action,
exclude_bounded_action=exclude_bounded_action
)
environment.pop('max_episode_timesteps') # given separately below
elif min_timesteps is not None:
raise TensorforceError.unexpected()
if max_episode_timesteps is None:
max_episode_timesteps = self.__class__.max_episode_timesteps
environment = Environment.create(
environment=environment, max_episode_timesteps=max_episode_timesteps
)
agent = self.agent_spec(require_observe=require_observe, require_all=require_all, **agent)
agent = Agent.create(agent=agent, environment=environment)
return agent, environment
def unittest(
self,
# runner
num_updates=None, num_episodes=None, num_timesteps=None,
# general environment
environment=None, max_episode_timesteps=None,
# unit-test environment
min_timesteps=None, states=None, actions=None,
# exclude action types
exclude_bool_action=False, exclude_int_action=False, exclude_float_action=False,
exclude_bounded_action=False,
# agent
require_observe=False, require_all=False, **agent
):
"""
Generic unit-test.
"""
if environment is None:
environment = self.environment_spec(
max_episode_timesteps=max_episode_timesteps, min_timesteps=min_timesteps,
states=states, actions=actions, exclude_bool_action=exclude_bool_action,
exclude_int_action=exclude_int_action, exclude_float_action=exclude_float_action,
exclude_bounded_action=exclude_bounded_action
)
environment.pop('max_episode_timesteps') # given separately below
elif min_timesteps is not None:
raise TensorforceError.unexpected()
if max_episode_timesteps is None:
max_episode_timesteps = self.__class__.max_episode_timesteps
agent = self.agent_spec(require_observe=require_observe, require_all=require_all, **agent)
assert (num_updates is not None) + (num_episodes is not None) + \
(num_timesteps is not None) <= 1
if num_updates is None and num_episodes is None and num_timesteps is None:
num_updates = self.__class__.num_updates
num_episodes = self.__class__.num_episodes
num_timesteps = self.__class__.num_timesteps
if num_updates is None and num_episodes is None and num_timesteps is None:
num_updates = 2
assert (num_updates is not None) + (num_episodes is not None) + \
(num_timesteps is not None) == 1
evaluation = not any([
require_all, require_observe, self.__class__.require_all,
self.__class__.require_observe
])
runner = Runner(
agent=agent, environment=environment, max_episode_timesteps=max_episode_timesteps,
evaluation=evaluation
)
runner.run(
num_episodes=num_episodes, num_timesteps=num_timesteps, num_updates=num_updates,
use_tqdm=False
)
runner.close()
self.finished_test()
| 36.38247
| 98
| 0.645313
|
2430c474bf6877abdd2418cf04308406a9bad79f
| 16
|
py
|
Python
|
sonnenblume/inventory/models.py
|
Sylver11/sonnenblume
|
a214bc68e110eb04d2505807e8d468b0d747d534
|
[
"MIT"
] | null | null | null |
sonnenblume/inventory/models.py
|
Sylver11/sonnenblume
|
a214bc68e110eb04d2505807e8d468b0d747d534
|
[
"MIT"
] | null | null | null |
sonnenblume/inventory/models.py
|
Sylver11/sonnenblume
|
a214bc68e110eb04d2505807e8d468b0d747d534
|
[
"MIT"
] | null | null | null |
# product model
| 8
| 15
| 0.75
|
1f443cb8b4a6b7f087da825d43f83e1da173de51
| 1,903
|
py
|
Python
|
igvc_ws/src/igvc_ekf/src/ekf_sensor_spoof.py
|
SoonerRobotics/igvc_software_2022
|
906e6a4fca22d2b0c06ef1b8a4a3a9df7f1d17dd
|
[
"MIT"
] | 4
|
2020-07-07T14:56:56.000Z
|
2021-08-13T23:31:07.000Z
|
igvc_ws/src/igvc_ekf/src/ekf_sensor_spoof.py
|
pradumn203/igvc-winners-2021
|
658233609054eafac59603a77b2a092dc002e145
|
[
"MIT"
] | 13
|
2019-11-12T02:57:54.000Z
|
2020-03-17T17:04:22.000Z
|
igvc_ws/src/igvc_ekf/src/ekf_sensor_spoof.py
|
pradumn203/igvc-winners-2021
|
658233609054eafac59603a77b2a092dc002e145
|
[
"MIT"
] | 3
|
2021-06-29T05:21:18.000Z
|
2021-08-23T05:03:27.000Z
|
#!/usr/bin/env python
from numpy import genfromtxt, savetxt, vectorize
from math import degrees
from os.path import expanduser
import rospy
from std_msgs.msg import Float64
from geometry_msgs.msg import Vector3
if __name__ == "__main__":
# Get home directory
home = expanduser("~")
# Load data
gps_data = genfromtxt(home + "/gps_log.csv", delimiter=',')
vel_data = genfromtxt(home + "/vel_log.csv", delimiter=',')
accel_data = genfromtxt(home + "/accel_log.csv", delimiter=',')
hdg_data = genfromtxt(home + "/hdg_log.csv", delimiter=',')
# Set the iterator
i = 0
# Initialize the node
rospy.init_node("sensor_spoof_node")
# Set up publishers
gps_pub = rospy.Publisher("/igvc/gps", Vector3, queue_size=10)
vel_pub = rospy.Publisher("/igvc/velocity", Float64, queue_size=10)
accel_pub = rospy.Publisher("/igvc/acceleration", Float64, queue_size=10)
hdg_pub = rospy.Publisher("/igvc/heading", Float64, queue_size=10)
# Define the loop rate
loop_rate = rospy.Rate(60)
# Wait for the EKF to connect
while gps_pub.get_num_connections() == 0:
pass
# Run a loop to publish data to the EKF
while rospy.is_shutdown() == False:
# GPS
if i < gps_data.shape[0]:
gps_msg = Vector3(gps_data[i, 0], gps_data[i, 1], 0)
gps_pub.publish(gps_msg)
# Velocity
if i < vel_data.shape[0]:
vel_msg = Float64(vel_data[i])
vel_pub.publish(vel_msg)
# Acceleration
if i < accel_data.shape[0]:
accel_msg = Float64(accel_data[i])
accel_pub.publish(accel_msg)
# Heading
if i < hdg_data.shape[0]:
hdg_msg = Float64(hdg_data[i, 0])
hdg_pub.publish(hdg_msg)
# Increment
i = i + 1
# Limit the loop to a certain rate
loop_rate.sleep()
| 27.185714
| 77
| 0.622701
|
7331854c5471e45cafae98d99241b8013c85e04f
| 6,824
|
py
|
Python
|
pysnmp-with-texts/CDR-DS2-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/CDR-DS2-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/CDR-DS2-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CDR-DS2-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CDR-DS2-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:47:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectName, enterprises, TimeTicks, IpAddress, snmpModules, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, NotificationType, ModuleIdentity, Gauge32, Unsigned32, MibIdentifier, ObjectIdentity, Counter64, iso, Counter32, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectName", "enterprises", "TimeTicks", "IpAddress", "snmpModules", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "NotificationType", "ModuleIdentity", "Gauge32", "Unsigned32", "MibIdentifier", "ObjectIdentity", "Counter64", "iso", "Counter32", "Bits")
DisplayString, TextualConvention, TimeStamp, TestAndIncr, TruthValue, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "TimeStamp", "TestAndIncr", "TruthValue", "RowStatus")
lucent = MibIdentifier((1, 3, 6, 1, 4, 1, 1751))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1))
softSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1, 1198))
cdrDeviceServer = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7))
cdrDS2 = ModuleIdentity((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2))
if mibBuilder.loadTexts: cdrDS2.setLastUpdated('240701')
if mibBuilder.loadTexts: cdrDS2.setOrganization('Lucent Technologies')
if mibBuilder.loadTexts: cdrDS2.setContactInfo('')
if mibBuilder.loadTexts: cdrDS2.setDescription('The MIB module for entities implementing the xxxx protocol.')
cdrSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1))
cdrClient = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 256))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: cdrClient.setStatus('current')
if mibBuilder.loadTexts: cdrClient.setDescription('A newClient name.')
callState = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: callState.setStatus('current')
if mibBuilder.loadTexts: callState.setDescription('Call state.')
fCAppID = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: fCAppID.setStatus('current')
if mibBuilder.loadTexts: fCAppID.setDescription("FullCircle Server' Application ID.")
fCAppInstance = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: fCAppInstance.setStatus('current')
if mibBuilder.loadTexts: fCAppInstance.setDescription("FullCircle Server' Application ID.")
severity = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: severity.setStatus('current')
if mibBuilder.loadTexts: severity.setDescription('Severity of a Long Duration Call (LDC) alarm. This is configurable, thus had to be a variable.')
originationNumber = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 256))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: originationNumber.setStatus('current')
if mibBuilder.loadTexts: originationNumber.setDescription('Originator of the LDC.')
destinationNumber = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 256))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: destinationNumber.setStatus('current')
if mibBuilder.loadTexts: destinationNumber.setDescription('Destination of the LDC.')
callAnswerTime = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 256))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: callAnswerTime.setStatus('current')
if mibBuilder.loadTexts: callAnswerTime.setDescription("Call AnswerTime of the LDC, start of billing. Formatted as YYYYMMDD:hhmmssms (example '20010506:172335100' means may/06/2000 05:23PM, 35 seconds, 1000milliseconds).")
switchId = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 256))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: switchId.setStatus('current')
if mibBuilder.loadTexts: switchId.setDescription('SwitchId.')
callId = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 256))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: callId.setStatus('current')
if mibBuilder.loadTexts: callId.setDescription('CallId, internal call handle may be used for debugging.')
fullPercent = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: fullPercent.setStatus('current')
if mibBuilder.loadTexts: fullPercent.setDescription('FullPercent, the DiskFull percent value.')
fileSystem = MibScalar((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 7, 2, 1, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 256))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: fileSystem.setStatus('current')
if mibBuilder.loadTexts: fileSystem.setDescription('FileSystem, File System.')
mibBuilder.exportSymbols("CDR-DS2-MIB", cdrDS2=cdrDS2, callState=callState, callId=callId, lucent=lucent, cdrClient=cdrClient, cdrSystem=cdrSystem, PYSNMP_MODULE_ID=cdrDS2, fCAppInstance=fCAppInstance, fileSystem=fileSystem, callAnswerTime=callAnswerTime, originationNumber=originationNumber, destinationNumber=destinationNumber, fullPercent=fullPercent, severity=severity, cdrDeviceServer=cdrDeviceServer, softSwitch=softSwitch, switchId=switchId, products=products, fCAppID=fCAppID)
| 111.868852
| 597
| 0.771688
|
02afa94dbb1df16096d58279a18cef27d17c9435
| 2,093
|
py
|
Python
|
backend/edw/models/sql/datastructures.py
|
MMotionMan/django-edw
|
0f686429d29e0f40409a3b2318664973b2844c08
|
[
"BSD-3-Clause"
] | 4
|
2019-09-18T05:51:12.000Z
|
2020-10-23T08:50:00.000Z
|
backend/edw/models/sql/datastructures.py
|
Vvvnukova/django-edw
|
18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f
|
[
"BSD-3-Clause"
] | 10
|
2020-04-29T11:46:44.000Z
|
2022-03-11T23:38:27.000Z
|
backend/edw/models/sql/datastructures.py
|
Vvvnukova/django-edw
|
18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f
|
[
"BSD-3-Clause"
] | 13
|
2020-04-09T07:49:48.000Z
|
2022-03-02T07:06:28.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.models.sql.datastructures import Join
class CustomJoin(Join):
def __init__(self, subquery, subquery_params, parent_alias, table_alias, join_type, join_field, nullable):
self.subquery_params = subquery_params
super(CustomJoin, self).__init__(subquery, parent_alias, table_alias, join_type, join_field, nullable)
def as_sql(self, compiler, connection):
"""
Generates the full
LEFT OUTER JOIN (somequery) alias ON alias.somecol = othertable.othercol, params
clause for this join.
"""
params = []
sql = []
alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias)
params.extend(self.subquery_params)
qn = compiler.quote_name_unless_alias
qn2 = connection.ops.quote_name
sql.append('%s (%s)%s ON (' % (self.join_type, self.table_name, alias_str))
for index, (lhs_col, rhs_col) in enumerate(self.join_cols):
if index != 0:
sql.append(' AND ')
sql.append('%s.%s = %s.%s' % (
qn(self.parent_alias),
qn2(lhs_col),
qn(self.table_alias),
qn2(rhs_col),
))
extra_cond = self.join_field.get_extra_restriction(
compiler.query.where_class, self.table_alias, self.parent_alias)
if extra_cond:
extra_sql, extra_params = compiler.compile(extra_cond)
extra_sql = 'AND (%s)' % extra_sql
params.extend(extra_params)
sql.append('%s' % extra_sql)
sql.append(')')
return ' '.join(sql), params
def relabeled_clone(self, change_map):
new_parent_alias = change_map.get(self.parent_alias, self.parent_alias)
new_table_alias = change_map.get(self.table_alias, self.table_alias)
return self.__class__(
self.table_name, self.subquery_params, new_parent_alias, new_table_alias, self.join_type,
self.join_field, self.nullable)
| 41.86
| 110
| 0.627329
|
fd60c2c4387e372223bb0c91645d10c11a66b155
| 3,774
|
py
|
Python
|
sdk/servicefabric/azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/cluster_health_policy_py3.py
|
vchske/azure-sdk-for-python
|
6383ed3676b7355af7be394562b126209961ec13
|
[
"MIT"
] | null | null | null |
sdk/servicefabric/azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/cluster_health_policy_py3.py
|
vchske/azure-sdk-for-python
|
6383ed3676b7355af7be394562b126209961ec13
|
[
"MIT"
] | 1
|
2019-06-04T18:12:16.000Z
|
2019-06-04T18:12:16.000Z
|
sdk/servicefabric/azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/cluster_health_policy_py3.py
|
vchske/azure-sdk-for-python
|
6383ed3676b7355af7be394562b126209961ec13
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ClusterHealthPolicy(Model):
"""Defines a health policy used to evaluate the health of the cluster or of a
cluster node.
.
:param max_percent_unhealthy_nodes: The maximum allowed percentage of
unhealthy nodes before reporting an error. For example, to allow 10% of
nodes to be unhealthy, this value would be 10.
The percentage represents the maximum tolerated percentage of nodes that
can be unhealthy before the cluster is considered in error.
If the percentage is respected but there is at least one unhealthy node,
the health is evaluated as Warning.
The percentage is calculated by dividing the number of unhealthy nodes
over the total number of nodes in the cluster.
The computation rounds up to tolerate one failure on small numbers of
nodes. Default percentage is zero.
In large clusters, some nodes will always be down or out for repairs, so
this percentage should be configured to tolerate that.
. Default value: 0 .
:type max_percent_unhealthy_nodes: int
:param max_percent_unhealthy_applications: The maximum allowed percentage
of unhealthy applications before reporting an error. For example, to allow
10% of applications to be unhealthy, this value would be 10.
The percentage represents the maximum tolerated percentage of applications
that can be unhealthy before the cluster is considered in error.
If the percentage is respected but there is at least one unhealthy
application, the health is evaluated as Warning.
This is calculated by dividing the number of unhealthy applications over
the total number of application instances in the cluster, excluding
applications of application types that are included in the
ApplicationTypeHealthPolicyMap.
The computation rounds up to tolerate one failure on small numbers of
applications. Default percentage is zero.
. Default value: 0 .
:type max_percent_unhealthy_applications: int
:param application_health_policies: Defines the application health policy
map used to evaluate the health of an application or one of its children
entities.
:type application_health_policies: dict[str,
~azure.mgmt.servicefabric.models.ApplicationHealthPolicy]
"""
_validation = {
'max_percent_unhealthy_nodes': {'maximum': 100, 'minimum': 0},
'max_percent_unhealthy_applications': {'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'max_percent_unhealthy_nodes': {'key': 'maxPercentUnhealthyNodes', 'type': 'int'},
'max_percent_unhealthy_applications': {'key': 'maxPercentUnhealthyApplications', 'type': 'int'},
'application_health_policies': {'key': 'applicationHealthPolicies', 'type': '{ApplicationHealthPolicy}'},
}
def __init__(self, *, max_percent_unhealthy_nodes: int=0, max_percent_unhealthy_applications: int=0, application_health_policies=None, **kwargs) -> None:
super(ClusterHealthPolicy, self).__init__(**kwargs)
self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes
self.max_percent_unhealthy_applications = max_percent_unhealthy_applications
self.application_health_policies = application_health_policies
| 51.69863
| 157
| 0.720191
|
11b8d3a4fd054a75dda0a62c82cba8e23af77861
| 11,584
|
py
|
Python
|
src/main/python/rlbot/training/training.py
|
ericmburgess/RLBot
|
a4ce061bae1a773adf60d99f232f2a228de820a2
|
[
"MIT"
] | 1
|
2019-08-24T11:53:35.000Z
|
2019-08-24T11:53:35.000Z
|
src/main/python/rlbot/training/training.py
|
Acidburn0zzz/RLBot
|
b6365e0c92183563fb7ff67d4ba81b58a31b1ff5
|
[
"MIT"
] | null | null | null |
src/main/python/rlbot/training/training.py
|
Acidburn0zzz/RLBot
|
b6365e0c92183563fb7ff67d4ba81b58a31b1ff5
|
[
"MIT"
] | null | null | null |
from typing import Union, Optional, Iterator, Iterable, Callable
import random
import time
import traceback
from contextlib import contextmanager
from rlbot.training.status_rendering import training_status_renderer_context, Row
from rlbot.matchconfig.match_config import MatchConfig
from rlbot.setup_manager import SetupManager
from rlbot.utils import rate_limiter
from rlbot.utils.game_state_util import GameState
from rlbot.utils.logging_utils import get_logger, DEFAULT_LOGGER
from rlbot.utils.rendering.rendering_manager import RenderingManager
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlbot.utils.structures.game_interface import GameInterface
from rlbot.matchcomms.client import MatchcommsClient
"""
This file contains a minimal API to implement training.
For a more useful API see: https://github.com/RLBot/RLBotTraining/
"""
# Extend Pass and/or Fail to add your own, more detailed metrics.
class Pass:
""" Indicates that the bot passed the exercise. """
def __repr__(self):
return 'PASS'
class Fail:
""" Indicates that the bot failed the exercise. """
def __repr__(self):
return 'FAIL'
class FailDueToExerciseException(Fail):
""" Indicates that the test code threw an expetion. """
def __init__(self, exception: Exception, traceback_string: str):
self.exception = exception
self.traceback_string = traceback_string
def __repr__(self):
return 'FAIL: Exception raised by Exercise:\n' + self.traceback_string
# Note: not using Grade as a abstract base class for Pass/Fail
# as there should not be Grades which are neither Pass nor Fail.
Grade = Union[Pass, Fail]
class Exercise:
"""
Statisfy this interface to define your test cases.
This class provides a seeded random generator to support variation testing.
The responsibility of detecting timeouts lies with the implementation of
on_tick().
"""
# Creates a matchcomms client connected to the current match.
# Initialized before on_briefing() is called.
_matchcomms_factory: Callable[[], MatchcommsClient] = None
def get_name(self) -> str:
"""
Gets the name to be displayed on screen.
"""
raise NotImplementedError()
def get_match_config(self) -> MatchConfig:
"""
Gets the config with which this exercise should be run.
"""
raise NotImplementedError()
def setup(self, rng: random.Random) -> GameState:
"""
Returns the state in which the game should start in.
The implementing class is responsible for resetting the state after setup is called,
such that the exercise can be run multiple times to get the same result.
:param random: A seeded random number generator. For repeated runs of this
exercise, this parameter and the bots should be the only things which
causes variations between runs.
"""
raise NotImplementedError()
def on_tick(self, game_tick_packet: GameTickPacket) -> Optional[Grade]:
"""
This method is called each tick to allow you to make an assessment of the
performance of the bot(s).
The order for whether on_tick comes before the bots recieving the packet is undefined.
If this method returns None, the run of the exercise will continue.
If this method returns Pass() or Fail() or raises an exceptions, the run of
the exercise is terminated and any metrics will be returned.
"""
raise NotImplementedError()
def on_briefing(self) -> Optional[Grade]:
"""
This method is called before state-setting such that bots can be "briefed" on the upcoming exercise.
The "briefing" is usually for using matchcomms to convey objectives and parameters.
A grade can be returned in case bot responded sufficient to pass or fail the exercise
before any on_tick() grading happens.
"""
pass
def set_matchcomms_factory(self, matchcomms_factory: Callable[[], MatchcommsClient]):
self._matchcomms_factory = matchcomms_factory
def render(self, renderer: RenderingManager):
"""
This method is called each tick to render exercise debug information.
This method is called after on_tick().
It is optional to override this method.
"""
pass
class Result:
def __init__(self, input_exercise: Exercise, input_seed: int, grade: Grade):
assert grade
self.seed = input_seed
self.exercise = input_exercise
self.grade = grade
def run_exercises(setup_manager: SetupManager, exercises: Iterable[Exercise], seed: int) -> Iterator[Result]:
"""
It is recommended to use setup_manager_context() to generate your setup_manager.
"""
game_interface = setup_manager.game_interface
names = [exercise.get_name() for exercise in exercises]
with training_status_renderer_context(names, game_interface.renderer) as ren:
for i, exercise in enumerate(exercises):
with safe_matchcomms_factory(setup_manager) as matchcomms_factory:
def update_row(status: str, status_color_func):
nonlocal i
nonlocal exercise
ren.update(i, Row(exercise.get_name(), status, status_color_func))
update_row('config', ren.renderman.white)
# Only reload the match if the config has changed.
new_match_config = exercise.get_match_config()
if new_match_config != setup_manager.match_config:
update_row('match', ren.renderman.white)
_setup_match(new_match_config, setup_manager)
update_row('bots', ren.renderman.white)
_wait_until_bots_ready(setup_manager, new_match_config)
update_row('reload', ren.renderman.white)
setup_manager.reload_all_agents(quiet=True)
# Briefing
update_row('brief', ren.renderman.white)
try:
exercise.set_matchcomms_factory(matchcomms_factory)
early_result = exercise.on_briefing()
except Exception as e:
update_row('brief', ren.renderman.red)
yield Result(exercise, seed, FailDueToExerciseException(e, traceback.format_exc()))
continue
if early_result is not None:
if isinstance(early_result.grade, Pass):
update_row('PASS', ren.renderman.green)
else:
update_row('FAIL', ren.renderman.red)
yield early_result
continue
update_row('wait', ren.renderman.white)
_wait_until_good_ticks(game_interface)
update_row('setup', ren.renderman.white)
error_result = _setup_exercise(game_interface, exercise, seed)
if error_result is not None:
update_row('setup', ren.renderman.red)
yield error_result
continue
# Wait for the set_game_state() to propagate before we start running ex.on_tick()
# TODO: wait until the game looks similar.
update_row('sleep', ren.renderman.white)
time.sleep(0.03)
update_row('>>>>', ren.renderman.white)
result = _grade_exercise(game_interface, exercise, seed)
if isinstance(result.grade, Pass):
update_row('PASS', ren.renderman.green)
else:
update_row('FAIL', ren.renderman.red)
yield result
@contextmanager
def safe_matchcomms_factory(setup_manager: SetupManager):
clients = []
has_finished = False
def matchcomms_factory() -> MatchcommsClient:
assert not has_finished
client = MatchcommsClient(setup_manager.matchcomms_server.root_url)
clients.append(client)
return client
try:
yield matchcomms_factory
finally:
has_finished = True
for client in clients:
client.close()
def _wait_until_bots_ready(setup_manager: SetupManager, match_config: MatchConfig):
total_ready = 0
total_ready += setup_manager.try_recieve_agent_metadata()
logger = get_logger(DEFAULT_LOGGER)
expected_metadata_calls = sum(1 for player in match_config.player_configs if player.rlbot_controlled)
while total_ready < expected_metadata_calls:
logger.debug('Waiting on all bots to post their metadata.')
time.sleep(0.1)
total_ready += setup_manager.try_recieve_agent_metadata()
def _wait_until_good_ticks(game_interface: GameInterface, required_new_ticks: int=3):
"""Blocks until we're getting new packets, indicating that the match is ready."""
rate_limit = rate_limiter.RateLimiter(120)
last_tick_game_time = None # What the tick time of the last observed tick was
packet = GameTickPacket() # We want to do a deep copy for game inputs so people don't mess with em
seen_times = 0
while seen_times < required_new_ticks:
game_interface.update_live_data_packet(packet)
def is_good_tick():
if packet.game_info.seconds_elapsed == last_tick_game_time: return False
if not packet.game_info.is_round_active: return False
if any(car.is_demolished for car in packet.game_cars): return False
return True
if is_good_tick():
seen_times += 1
last_tick_game_time = packet.game_info.seconds_elapsed
rate_limit.acquire()
def _setup_match(match_config: MatchConfig, manager: SetupManager):
manager.shut_down(kill_all_pids=True, quiet=True) # To be safe.
manager.load_match_config(match_config)
manager.launch_quick_chat_manager()
manager.start_match()
manager.launch_bot_processes()
def _setup_exercise(game_interface: GameInterface, ex: Exercise, seed: int) -> Optional[Result]:
"""
Set the game state.
Only returns a result if there was an error in ex.setup()
"""
rng = random.Random()
rng.seed(seed)
try:
game_state = ex.setup(rng)
except Exception as e:
return Result(ex, seed, FailDueToExerciseException(e, traceback.format_exc()))
game_interface.set_game_state(game_state)
def _grade_exercise(game_interface: GameInterface, ex: Exercise, seed: int) -> Result:
grade = None
rate_limit = rate_limiter.RateLimiter(120)
last_tick_game_time = None # What the tick time of the last observed tick was
game_tick_packet = GameTickPacket() # We want to do a deep copy for game inputs so people don't mess with em
# Run until the Exercise finishes.
while grade is None:
# Read from game data shared memory
game_interface.update_live_data_packet(game_tick_packet)
# Run ex.on_tick() only if the game_info has updated.
tick_game_time = game_tick_packet.game_info.seconds_elapsed
if tick_game_time != last_tick_game_time:
last_tick_game_time = tick_game_time
try:
grade = ex.on_tick(game_tick_packet)
ex.render(game_interface.renderer)
except Exception as e:
return Result(ex, seed, FailDueToExerciseException(e, traceback.format_exc()))
rate_limit.acquire()
return Result(ex, seed, grade)
| 39.267797
| 113
| 0.668767
|
df1b0914141fb8c564347696098dd1d2a23570ef
| 356
|
py
|
Python
|
scriptable/api/ast.py
|
c7nw3r/scriptable
|
b285d865da41774c8321ebf51f5a468ef1c92402
|
[
"Apache-2.0"
] | null | null | null |
scriptable/api/ast.py
|
c7nw3r/scriptable
|
b285d865da41774c8321ebf51f5a468ef1c92402
|
[
"Apache-2.0"
] | null | null | null |
scriptable/api/ast.py
|
c7nw3r/scriptable
|
b285d865da41774c8321ebf51f5a468ef1c92402
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import TypeVar, Generic
from scriptable.api.ast_binding import ASTBinding
T = TypeVar("T")
class AST(ABC, Generic[T]):
@abstractmethod
def execute(self, context: ASTBinding) -> T:
pass
class EmptyAST(AST[None]):
def execute(self, context: ASTBinding) -> None:
return None
| 17.8
| 51
| 0.69382
|
d2afc456846368f66b9f3e5a202d96749fa1c833
| 2,079
|
py
|
Python
|
ww/settings.py
|
hbradleyiii/ww
|
5a537ab6865fcfb43a253cdb8cf2397a53d940a7
|
[
"MIT"
] | null | null | null |
ww/settings.py
|
hbradleyiii/ww
|
5a537ab6865fcfb43a253cdb8cf2397a53d940a7
|
[
"MIT"
] | null | null | null |
ww/settings.py
|
hbradleyiii/ww
|
5a537ab6865fcfb43a253cdb8cf2397a53d940a7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# name: settings.py
# author: Harold Bradley III
# email: harold@bradleystudio.net
# created on: 01/23/2016
#
# description: The settings file for ww
#
from __future__ import absolute_import, print_function
import os
import time
TEMPLATE_PATH = os.path.dirname(os.path.realpath(__file__)) + '/../templates/'
## Change these settings to your hearts content ##
# Site Settings
SITE_ADMIN_EMAIL = 'email@mail.com'
SITE_ERROR_LOG = 'error.log'
SITE_ACCESS_LOG = 'access.log'
WWW_DIR = '/var/www/'
WWW_USR = 'www-data'
WWW_ADMIN = 'admin_usr'
GITIGNORE_TEMPLATE = TEMPLATE_PATH + 'gitignore.template'
HTA_5G_TEMPLATE = TEMPLATE_PATH + '5g-htaccess.template'
VHOST_PATH = '/etc/apache2/sites-available/'
VHOST_TEMPLATE = TEMPLATE_PATH + 'vhost.template'
VHOST_SSL_TEMPLATE = TEMPLATE_PATH + 'vhost-ssl.template'
MYSQL = {
'host' : 'localhost',
'user' : 'username',
'password' : 'password123',
}
# WordPress Settings
WP_LATEST = 'http://wordpress.org/latest.tar.gz'
WP_SETUP_URL = '/wp-admin/setup-config.php?step=2'
WP_INSTALL_URL = '/wp-admin/install.php?step=2'
WP_HTA_TEMPLATE = TEMPLATE_PATH + 'wordpress-htaccess.template'
WP_HTA_HARDENED_TEMPLATE = TEMPLATE_PATH + 'hardened-wordpress-htaccess.template'
WP_CONFIG_TEMPLATE = TEMPLATE_PATH + 'wp-config.php.template'
WP_ADMIN_USER = 'admin'
WP_ADMIN_EMAIL = 'admin@wp.com'
WP_ADMIN_PW = 'password123' # Please change this.
WP_SALT_URL = 'https://api.wordpress.org/secret-key/1.1/salt/'
# Apache commands
CMD_RESTART_APACHE = 'sudo service apache2 reload'
CMD_ENABLE_CONFIG = 'sudo a2ensite ' # run as: {command} domain
CMD_DISABLE_CONFIG = 'sudo a2dissite ' # run as: {command} domain
CMD_CHECK_IF_ENABLED = "apache2ctl -S | grep ' namevhost {0} '" # See if apache is serving domain ({})
# Try to import local settings. This is a temporary work-around for now.
try:
from .settings_local import *
except ImportError:
print("Can't find settings_local. Using default settings.")
| 28.479452
| 103
| 0.71621
|
39c75d6da777d1bafc4673f875ec9200a461af16
| 9,017
|
py
|
Python
|
photutils/psf/groupstars.py
|
Onoddil/photutils
|
433f3e54d3f53282ae04eadde9e1ddf657944590
|
[
"BSD-3-Clause"
] | null | null | null |
photutils/psf/groupstars.py
|
Onoddil/photutils
|
433f3e54d3f53282ae04eadde9e1ddf657944590
|
[
"BSD-3-Clause"
] | null | null | null |
photutils/psf/groupstars.py
|
Onoddil/photutils
|
433f3e54d3f53282ae04eadde9e1ddf657944590
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides classes to perform grouping of stars.
"""
import abc
from astropy.table import Column
import numpy as np
__all__ = ['DAOGroup', 'DBSCANGroup', 'GroupStarsBase']
class GroupStarsBase(metaclass=abc.ABCMeta):
"""
This base class provides the basic interface for subclasses that
are capable of classifying stars in groups.
"""
def __call__(self, starlist):
"""
Classify stars into groups.
Parameters
----------
starlist : `~astropy.table.Table`
List of star positions. Columns named as ``x_0`` and
``y_0``, which corresponds to the centroid coordinates of
the sources, must be provided.
Returns
-------
group_starlist : `~astropy.table.Table`
``starlist`` with an additional column named ``group_id``
whose unique values represent groups of mutually overlapping
stars.
"""
return self.group_stars(starlist)
@abc.abstractmethod
def group_stars(self, starlist):
"""
Classify stars into groups.
Parameters
----------
starlist : `~astropy.table.Table`
List of star positions. Columns named as ``x_0`` and
``y_0``, which corresponds to the centroid coordinates of
the sources, must be provided.
Returns
-------
group_starlist : `~astropy.table.Table`
``starlist`` with an additional column named ``group_id``
whose unique values represent groups of mutually overlapping
stars.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
class DAOGroup(GroupStarsBase):
"""
This class implements the DAOGROUP algorithm presented by
Stetson (1987).
The method ``group_stars`` divides an entire starlist into sets of
distinct, self-contained groups of mutually overlapping stars.
It accepts as input a list of stars and determines which stars are close
enough to be capable of adversely influencing each others' profile fits.
Parameters
----------
crit_separation : float or int
Distance, in units of pixels, such that any two stars separated by
less than this distance will be placed in the same group.
Notes
-----
Assuming the psf fwhm to be known, ``crit_separation`` may be set to
k*fwhm, for some positive real k.
See Also
--------
photutils.detection.DAOStarFinder
References
----------
[1] Stetson, Astronomical Society of the Pacific, Publications,
(ISSN 0004-6280), vol. 99, March 1987, p. 191-222.
Available at:
https://ui.adsabs.harvard.edu/abs/1987PASP...99..191S/abstract
"""
def __init__(self, crit_separation):
self.crit_separation = crit_separation
@property
def crit_separation(self):
return self._crit_separation
@crit_separation.setter
def crit_separation(self, crit_separation):
if not isinstance(crit_separation, (float, int)):
raise ValueError('crit_separation is expected to be either float'
f'or int. Received {type(crit_separation)}.')
elif crit_separation < 0.0:
raise ValueError('crit_separation is expected to be a positive '
f'real number. Got {crit_separation}.')
else:
self._crit_separation = crit_separation
def group_stars(self, starlist):
cstarlist = starlist.copy()
if 'id' not in cstarlist.colnames:
cstarlist.add_column(Column(name='id',
data=np.arange(len(cstarlist)) + 1))
cstarlist.add_column(Column(name='group_id',
data=np.zeros(len(cstarlist),
dtype=int)))
if not np.array_equal(cstarlist['id'], np.arange(len(cstarlist)) + 1):
raise ValueError('id colum must be an integer-valued ' +
'sequence starting from 1. ' +
f"Got {cstarlist['id']}")
n = 1
while (cstarlist['group_id'] == 0).sum() > 0:
init_star = cstarlist[np.where(cstarlist['group_id'] == 0)[0][0]]
index = self.find_group(init_star,
cstarlist[cstarlist['group_id'] == 0])
cstarlist['group_id'][index-1] = n
k = 1
K = len(index)
while k < K:
init_star = cstarlist[cstarlist['id'] == index[k]]
tmp_index = self.find_group(
init_star, cstarlist[cstarlist['group_id'] == 0])
if len(tmp_index) > 0:
cstarlist['group_id'][tmp_index-1] = n
index = np.append(index, tmp_index)
K = len(index)
k += 1
n += 1
return cstarlist
def find_group(self, star, starlist):
"""
Find the ids of those stars in ``starlist`` which are at a
distance less than ``crit_separation`` from ``star``.
Parameters
----------
star : `~astropy.table.Row`
Star which will be either the head of a cluster or an
isolated one.
starlist : `~astropy.table.Table`
List of star positions. Columns named as ``x_0`` and
``y_0``, which corresponds to the centroid coordinates of
the sources, must be provided.
Returns
-------
Array containing the ids of those stars which are at a distance less
than ``crit_separation`` from ``star``.
"""
star_distance = np.hypot(star['x_0'] - starlist['x_0'],
star['y_0'] - starlist['y_0'])
distance_criteria = star_distance < self.crit_separation
return np.asarray(starlist[distance_criteria]['id'])
class DBSCANGroup(GroupStarsBase):
"""
Class to create star groups according to a distance criteria using
the Density-based Spatial Clustering of Applications with Noise
(DBSCAN) from scikit-learn.
Parameters
----------
crit_separation : float or int
Distance, in units of pixels, such that any two stars separated
by less than this distance will be placed in the same group.
min_samples : int, optional (default=1)
Minimum number of stars necessary to form a group.
metric : string or callable (default='euclidean')
The metric to use when calculating distance between each pair of
stars.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used to actually find nearest neighbors.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree.
References
----------
[1] Scikit Learn DBSCAN.
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html#sklearn.cluster.DBSCAN
Notes
-----
* The attribute ``crit_separation`` corresponds to ``eps`` in
`sklearn.cluster.DBSCAN
<https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html#sklearn.cluster.DBSCAN>`_.
* This class provides more general algorithms than
`photutils.psf.DAOGroup`. More precisely,
`photutils.psf.DAOGroup` is a special case of
`photutils.psf.DBSCANGroup` when ``min_samples=1`` and
``metric=euclidean``. Additionally, `photutils.psf.DBSCANGroup`
may be faster than `photutils.psf.DAOGroup`.
"""
def __init__(self, crit_separation, min_samples=1, metric='euclidean',
algorithm='auto', leaf_size=30):
self.crit_separation = crit_separation
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
def group_stars(self, starlist):
from sklearn.cluster import DBSCAN
cstarlist = starlist.copy()
if 'id' not in cstarlist.colnames:
cstarlist.add_column(Column(name='id',
data=np.arange(len(cstarlist)) + 1))
if not np.array_equal(cstarlist['id'], np.arange(len(cstarlist)) + 1):
raise ValueError('id colum must be an integer-valued ' +
'sequence starting from 1. ' +
f"Got {cstarlist['id']}")
pos_stars = np.transpose((cstarlist['x_0'], cstarlist['y_0']))
dbscan = DBSCAN(eps=self.crit_separation,
min_samples=self.min_samples, metric=self.metric,
algorithm=self.algorithm, leaf_size=self.leaf_size)
cstarlist['group_id'] = (dbscan.fit(pos_stars).labels_ +
np.ones(len(cstarlist), dtype=int))
return cstarlist
| 36.358871
| 111
| 0.594211
|
5ad5e17120d64e87ca54b8756c0198116d57959d
| 6,857
|
py
|
Python
|
scripts/stateful_utils.py
|
triton-inference-server/stateful_backend
|
d07ead0dfcfb3af78f3f93f7f28d78ac00c49829
|
[
"MIT"
] | 4
|
2021-12-04T17:19:42.000Z
|
2022-01-05T23:14:57.000Z
|
scripts/stateful_utils.py
|
triton-inference-server/stateful_backend
|
d07ead0dfcfb3af78f3f93f7f28d78ac00c49829
|
[
"MIT"
] | null | null | null |
scripts/stateful_utils.py
|
triton-inference-server/stateful_backend
|
d07ead0dfcfb3af78f3f93f7f28d78ac00c49829
|
[
"MIT"
] | 3
|
2021-10-12T22:39:30.000Z
|
2022-03-15T04:48:49.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pkg_resources import fixup_namespace_packages
import docker
from docker.api import image, network
from docker.models.containers import Container
from docker.models.images import Image
from docker.types.containers import DeviceRequest, Ulimit
import subprocess
import shlex
from datetime import datetime
import stateful_config
def LogPrint(*args, **kwargs):
now = datetime.now()
print(f"[{now}] ", *args, **kwargs)
return
docker_client = None
def get_docker_client():
global docker_client
if docker_client is None:
docker_client = docker.from_env()
return docker_client
def remove_container(cnt: Container):
try:
cnt.stop()
cnt.remove()
except:
pass
return
def remove_container_by_name(cnt_name):
dcl = get_docker_client()
cnt: Container
for cnt in dcl.containers.list(all=True, filters={"name": cnt_name}):
if cnt_name == cnt.name:
LogPrint("Removing container: ", cnt_name)
remove_container(cnt)
return
def remove_image_by_name(img_name):
dcl = get_docker_client()
LogPrint("Removing image: ", img_name)
dcl.images.remove(img_name)
return
def remove_image_with_containers(img_name):
dcl = get_docker_client()
cnt: Container
for cnt in dcl.containers.list(all=True):
LogPrint("Found container :", cnt.name, cnt.image.tags)
for tag in cnt.image.tags:
if tag == img_name:
LogPrint("Stopping and removing container :", cnt.name)
remove_container(cnt)
# now that all containers are stopped/removed, remove the image
dcl.images.remove(img_name)
return
def is_image_ready(img_name):
dcl = get_docker_client()
img: Image
for img in dcl.images.list():
for tag in img.tags:
if tag == img_name:
return True
return False
def pull_image(img_name):
dcl = get_docker_client()
img_repo = img_name.split(":")[0]
img_tag = img_name.split(":")[1]
dcl.images.pull(repository=img_repo, tag=img_tag)
return
def is_container_ready(cnt_name:str) -> Container:
dcl = get_docker_client()
cnt: Container
for cnt in dcl.containers.list(all=True, filters={"name": cnt_name}):
if cnt_name == cnt.name:
return cnt
return None
def is_container_running(cnt_name:str) -> Container:
dcl = get_docker_client()
cnt: Container
for cnt in dcl.containers.list(filters={"name": cnt_name, "status":"running"}):
if cnt_name == cnt.name:
return cnt
return None
def get_running_container(cnt_name:str) -> Container:
LogPrint("Looking for running container: ", cnt_name)
dcl = get_docker_client()
cnt: Container
cnt = is_container_ready(cnt_name)
if cnt is None:
return None
# cnt = is_container_running(cnt_name)
if cnt.status != 'running':
cnt.start()
return cnt
def install_default_cmake(ccnt: Container):
print("Installing default cmake ...")
status = ccnt.exec_run(stateful_config.TRITON_CLIENT_DEFAULT_CMAKE_INSTALL_CMD)
# print(status[0], status[1].decode())
assert status[0] == 0
return
def fix_pubkey_issue(ccnt: Container):
print("Fixing pubkey before installing newer cmake ...")
status = ccnt.exec_run(stateful_config.TRITON_CLIENT_PUBKEY_FIX_CMD)
# print(status[0], status[1].decode())
assert status[0] == 0
return
def install_newer_cmake(ccnt: Container):
# This key fix should be temporary until Triton SDK container is updated
fix_pubkey_issue(ccnt)
print("Installing newer cmake ...")
# The following are necessary for 22.04 and newer SDK containers
status = ccnt.exec_run(stateful_config.TRITON_CLIENT_CMAKE_WGET_KEY_CMD)
# print(status[0], status[1].decode())
assert status[0] == 0
status = ccnt.exec_run(stateful_config.TRITON_CLIENT_CMAKE_GPG_KEY_CMD)
# print(status[0], status[1].decode())
assert status[0] == 0
status = ccnt.exec_run(stateful_config.TRITON_CLIENT_CMAKE_ADD_KEY_CMD)
# print(status[0], status[1].decode())
assert status[0] == 0
status = ccnt.exec_run(stateful_config.TRITON_CLIENT_CMAKE_ADD_REPO_CMD)
# print(status[0], status[1].decode())
assert status[0] == 0
status = ccnt.exec_run(stateful_config.TRITON_CLIENT_CMAKE_APT_UPDATE_CMD)
# print(status[0], status[1].decode())
assert status[0] == 0
status = ccnt.exec_run(stateful_config.TRITON_CLIENT_CMAKE_INSTALL_CMD)
# print(status[0], status[1].decode())
assert status[0] == 0
return
def create_container(img_name:str, cnt_name:str=None, auto_remove=True, \
with_gpus=True, ports=None, \
shm_size=None, memlock=None, \
stack_size=None, volumes=None, \
as_root=False):
# set the user parameter
user_param = None
if not as_root:
uid = subprocess.check_output(shlex.split("id -u")).decode().strip()
gid = subprocess.check_output(shlex.split("id -g")).decode().strip()
user_param = uid + ":" + gid
# pull the image if it is missing
if not is_image_ready(img_name):
pull_image(img_name)
LogPrint("Creating new container:{0} from Image: {1}".format(cnt_name, img_name))
dcl = get_docker_client()
devs = []
if with_gpus:
devs.append( DeviceRequest(count=-1, capabilities=[['gpu']]) )
network_mode = "host"
if ports is not None:
network_mode = "" ## TODO?
ulimits = []
if memlock is not None:
ulimits.append( Ulimit(name="memlock", soft=memlock, hard=memlock) )
if stack_size is not None:
ulimits.append( Ulimit(name="stack", soft=stack_size, hard=stack_size) )
cnt = dcl.containers.create(img_name, name=cnt_name, auto_remove=auto_remove, \
tty=True, device_requests=devs, ports=ports, shm_size=shm_size, \
network_mode=network_mode, ulimits=ulimits, volumes=volumes, \
user=user_param)
return cnt
| 34.114428
| 83
| 0.72189
|
5a99abd7727c56508aade2aa1c185afd12fbaa0f
| 1,031
|
py
|
Python
|
suzieq/sqobjects/network.py
|
zxiiro/suzieq
|
eca92820201c05bc80081599f69e41cd6b991107
|
[
"Apache-2.0"
] | null | null | null |
suzieq/sqobjects/network.py
|
zxiiro/suzieq
|
eca92820201c05bc80081599f69e41cd6b991107
|
[
"Apache-2.0"
] | null | null | null |
suzieq/sqobjects/network.py
|
zxiiro/suzieq
|
eca92820201c05bc80081599f69e41cd6b991107
|
[
"Apache-2.0"
] | null | null | null |
from pandas import DataFrame
from suzieq.sqobjects.basicobj import SqObject
class NetworkObj(SqObject):
def __init__(self, **kwargs):
super().__init__(table='network', **kwargs)
self._valid_get_args = ['namespace', 'hostname', 'version',
'os', 'model', 'vendor', 'columns', 'query_str']
self._valid_find_args = ['namespace', 'hostname', 'vrf', 'vlan',
'address', 'asn', 'resolve_bond']
def find(self, **kwargs):
addr = kwargs.get('address', '')
asn = kwargs.get('asn', '')
if not self.ctxt.engine:
raise AttributeError('No analysis engine specified')
if not addr and not asn:
raise AttributeError('Must specify address or asn')
try:
self._check_input_for_valid_args(self._valid_find_args, **kwargs)
except Exception as error:
df = DataFrame({'error': [f'{error}']})
return df
return self.engine.find(**kwargs)
| 33.258065
| 80
| 0.5742
|
863259e789e6f153f78d2104c792274ac7f61889
| 490
|
py
|
Python
|
ex016.py
|
ErosMLima/python-server-connection
|
a15706a007a95eff64597fa02e64d95b6b2da6a5
|
[
"MIT"
] | 2
|
2020-07-27T06:33:59.000Z
|
2021-02-02T15:17:56.000Z
|
ex016.py
|
ErosMLima/python-server-connection
|
a15706a007a95eff64597fa02e64d95b6b2da6a5
|
[
"MIT"
] | null | null | null |
ex016.py
|
ErosMLima/python-server-connection
|
a15706a007a95eff64597fa02e64d95b6b2da6a5
|
[
"MIT"
] | null | null | null |
#Exercicio 16 python guanabara
#Crie um programa que leia um número real qualquer pelo teclado e mostre na tela sua porção inteira.
#Ex: digite um número> 6.127
#O número 6.127 tem a parte inteira 6.
'''from math import trunc
num = float(input('Digite um valor: '))
print('O valor digitado foi {} e a sua porção inteira é {}'.format(num, trunc(num)))'''
num = float(input('Digite um valor: '))
print('O valor digitado foi {} e a sua porção inteira é {}'.format(num, int(num)))
| 40.833333
| 101
| 0.693878
|
f6a9d6e6ca69536bd166cd1a295fb0e8dad08994
| 4,933
|
py
|
Python
|
SC101_Projects/Babynames/babynames.py
|
Yu-Hsuan-Lin/SC101-Projects
|
d8c3cca313996e0c7e21acf02a31ed8faf335e20
|
[
"MIT"
] | null | null | null |
SC101_Projects/Babynames/babynames.py
|
Yu-Hsuan-Lin/SC101-Projects
|
d8c3cca313996e0c7e21acf02a31ed8faf335e20
|
[
"MIT"
] | null | null | null |
SC101_Projects/Babynames/babynames.py
|
Yu-Hsuan-Lin/SC101-Projects
|
d8c3cca313996e0c7e21acf02a31ed8faf335e20
|
[
"MIT"
] | null | null | null |
"""
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
YOUR DESCRIPTION HERE
"""
import sys
def add_data_for_name(name_data, year, rank, name):
"""
Adds the given year and rank to the associated name in the name_data dict.
Input:
name_data (dict): dict holding baby name data
year (str): the year of the data entry to add
rank (str): the rank of the data entry to add
name (str): the name of the data entry to add
Output:
This function modifies the name_data dict to store the provided
name, year, and rank. This function does not return any values.
"""
# check if name in name_data
if name in name_data:
# if already has rank in a year, change the rank to a smaller one
if year in name_data[name]:
if int(rank) < int(name_data[name][year]):
name_data[name][year] = rank
# if year is not added, add the year->rank pair
else:
name_data[name][year] = rank
else:
name_data[name] = {}
name_data[name][year] = rank
def add_file(name_data, filename):
"""
Reads the information from the specified file and populates the name_data
dict with the data found in the file.
Input:
name_data (dict): dict holding baby name data
filename (str): name of the file holding baby name data
Output:
This function modifies the name_data dict to store information from
the provided file name. This function does not return any value.
"""
with open(filename, 'r') as f:
# the first line is year
year = f.readline()
year = year.strip()
# start from the second line
for line in f:
name_lst = line.split(',')
rank = name_lst[0].strip()
# male's name
name1 = name_lst[1].strip()
# female's name
name2 = name_lst[2].strip()
add_data_for_name(name_data, year, rank, name1)
add_data_for_name(name_data, year, rank, name2)
def read_files(filenames):
"""
Reads the data from all files specified in the provided list
into a single name_data dict and then returns that dict.
Input:
filenames (List[str]): a list of filenames containing baby name data
Returns:
name_data (dict): the dict storing all baby name data in a structured manner
"""
name_data = {}
for filename in filenames:
add_file(name_data, filename)
return name_data
def search_names(name_data, target):
"""
Given a name_data dict that stores baby name information and a target string,
returns a list of all names in the dict that contain the target string. This
function should be case-insensitive with respect to the target string.
Input:
name_data (dict): a dict containing baby name data organized by name
target (str): a string to look for in the names contained within name_data
Returns:
matching_names (List[str]): a list of all names from name_data that contain
the target string
"""
matching_names = []
for name in name_data:
if case_insensitive(target) in case_insensitive(name):
matching_names.append(name)
return matching_names
def case_insensitive(string):
new_string = ''
for ch in string:
if ch.isupper():
new_string += ch.lower()
else:
new_string += ch
return new_string
def print_names(name_data):
"""
(provided, DO NOT MODIFY)
Given a name_data dict, print out all its data, one name per line.
The names are printed in alphabetical order,
with the corresponding years data displayed in increasing order.
Input:
name_data (dict): a dict containing baby name data organized by name
Returns:
This function does not return anything
"""
for key, value in sorted(name_data.items()):
print(key, sorted(value.items()))
def main():
# (provided, DO NOT MODIFY)
args = sys.argv[1:]
# Two command line forms
# 1. file1 file2 file3 ..
# 2. -search target file1 file2 file3 ..
# Assume no search, so list of filenames to read
# is the args list
filenames = args
# Check if we are doing search, set target variable
target = ''
if len(args) >= 2 and args[0] == '-search':
target = args[1]
filenames = args[2:] # Update filenames to skip first 2
# Read in all the filenames: baby-1990.txt, baby-2000.txt, ...
names = read_files(filenames)
# Either we do a search or just print everything.
if len(target) > 0:
search_results = search_names(names, target)
for name in search_results:
print(name)
else:
print_names(names)
if __name__ == '__main__':
main()
| 29.538922
| 84
| 0.631056
|
a5185978683785d561191953d442d35cffca17be
| 2,957
|
py
|
Python
|
tests/python/pants_test/backend/graph_info/tasks/test_cloc.py
|
GoingTharn/pants
|
d3f2005dcae0745f3e3bf525fc46e7d28f04f073
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/graph_info/tasks/test_cloc.py
|
GoingTharn/pants
|
d3f2005dcae0745f3e3bf525fc46e7d28f04f073
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/graph_info/tasks/test_cloc.py
|
GoingTharn/pants
|
d3f2005dcae0745f3e3bf525fc46e7d28f04f073
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import filter
from pants.backend.graph_info.tasks.cloc import CountLinesOfCode
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants_test.engine.scheduler_test_base import SchedulerTestBase
from pants_test.task_test_base import ConsoleTaskTestBase
class ClocTest(ConsoleTaskTestBase, SchedulerTestBase):
@classmethod
def task_type(cls):
return CountLinesOfCode
def test_counts(self):
self.create_file('src/py/foo/foo.py', '# A comment.\n\nprint("some code")\n# Another comment.')
self.create_file('src/py/foo/bar.py', '# A comment.\n\nprint("some more code")')
self.create_file('src/py/dep/dep.py', 'print("a dependency")')
self.create_file('src/java/foo/Foo.java', '// A comment. \n class Foo(){}\n')
self.create_file('src/java/foo/Bar.java', '// We do not expect this file to appear in counts.')
dep_py_tgt = self.make_target('src/py/dep', PythonLibrary, sources=['dep.py'])
py_tgt = self.make_target(
'src/py/foo',
PythonLibrary,
dependencies=[dep_py_tgt],
sources=['foo.py', 'bar.py'],
)
java_tgt = self.make_target('src/java/foo', JavaLibrary, sources=['Foo.java'])
def assert_counts(res, lang, files, blank, comment, code):
for line in res:
fields = line.split()
if len(fields) >= 5:
if fields[0] == lang:
self.assertEquals(files, int(fields[1]))
self.assertEquals(blank, int(fields[2]))
self.assertEquals(comment, int(fields[3]))
self.assertEquals(code, int(fields[4]))
return
self.fail('Found no output line for {}'.format(lang))
res = self.execute_console_task(
targets=[py_tgt, java_tgt],
options={'transitive': True},
)
assert_counts(res, 'Python', files=3, blank=2, comment=3, code=3)
assert_counts(res, 'Java', files=1, blank=0, comment=1, code=1)
res = self.execute_console_task(
targets=[py_tgt, java_tgt],
options={'transitive': False},
)
assert_counts(res, 'Python', files=2, blank=2, comment=3, code=2)
assert_counts(res, 'Java', files=1, blank=0, comment=1, code=1)
def test_ignored(self):
self.create_file('src/py/foo/foo.py', 'print("some code")')
self.create_file('src/py/foo/empty.py', '')
py_tgt = self.make_target('src/py/foo', PythonLibrary, sources=['foo.py', 'empty.py'])
res = self.execute_console_task(
targets=[py_tgt],
options={'ignored': True},
)
self.assertEquals(['Ignored the following files:',
'src/py/foo/empty.py: zero sized file'],
list(filter(None, res))[-2:])
| 39.959459
| 99
| 0.668583
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.