hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2c2176452024a68569a05aa63afc1709de969fb6 | 3,289 | py | Python | src/pyPreprocessing/transform.py | Snijderfrey/pyPreprocessing | 3faab0579aeb3e98b10caeeac3cccb101a48e825 | [
"MIT"
] | 3 | 2021-06-26T10:10:45.000Z | 2021-11-20T14:52:38.000Z | src/pyPreprocessing/transform.py | Snijderfrey/pyPreprocessing | 3faab0579aeb3e98b10caeeac3cccb101a48e825 | [
"MIT"
] | null | null | null | src/pyPreprocessing/transform.py | Snijderfrey/pyPreprocessing | 3faab0579aeb3e98b10caeeac3cccb101a48e825 | [
"MIT"
] | 1 | 2022-03-21T12:41:09.000Z | 2022-03-21T12:41:09.000Z | # -*- coding: utf-8 -*-
"""
Provides functions for data transformation (currently only LLS) and
normalization.
"""
import numpy as np
def transform(raw_data, mode, direction='direct', **kwargs):
"""
Apply mathematical transformations to data.
Parameters
----------
raw_data : ndarray
2D numpy array with the shape (N, M) containing N data rows to be
smoothed. Each data row is represented by row in numpy array and
contains M values. If only one data row is present, raw_data has the
shape (1, M).
mode : str
Maths used for transformation. Allowed mode is 'log_log_sqrt' only at
the moment which first takes the square root and then does the
logarithm twice.
direction : str, optional
Gives the direction of the tranformation. If 'direct', the data is
transformed, if 'inverse', the inverse of the transformation is
calculated. The default is 'direct'.
**kwargs for the different modes
mode is 'log_log_sqrt' and direction is 'inverse':
min_value : float
Original minimum value of the data before transformation. Has
to be known because it is lost upon transformation. Default is
1.
Raises
------
ValueError
If the value passed as mode or direction is not understood.
Returns
-------
raw_data : ndarray
Transformed data with the same shape as raw_data.
"""
# list of allowed modes for data transformation
transform_modes = ['log_log_sqrt']
if direction == 'direct':
if mode == transform_modes[0]:
minimum_value = np.min(raw_data)
raw_data -= minimum_value
raw_data = np.log(np.log(np.sqrt(raw_data + 1) + 1) + 1)
else:
raise ValueError('No valid transform mode entered. Allowed modes '
'are {0}'.format(transform_modes))
elif direction == 'inverse':
if mode == transform_modes[0]:
minimum_value = kwargs.get('min_value', 1)
raw_data = (np.exp(np.exp(raw_data) - 1) - 1)**2 - 1
raw_data += minimum_value
else:
raise ValueError('No valid transform mode entered. Allowed modes '
'are {0}'.format(transform_modes))
else:
raise ValueError('No valid transform direction entered. Allowed '
'directions are [\'direct\', \'inverse\']')
return raw_data
def normalize(raw_data, mode, factor=1, **kwargs):
raw_data = np.asarray(raw_data)
# list of allowed modes for normalization
normalize_modes = ['total_intensity']
if mode == normalize_modes[0]:
x_data_points = raw_data.shape[1]
x_data = kwargs.get('x_data', np.arange(x_data_points))
conversion_factor = 1/np.repeat(np.trapz(raw_data, x=x_data, axis=1),
x_data_points).reshape(
(-1, x_data_points))
normalized_data = raw_data * conversion_factor * factor
else:
raise ValueError('No valid normalization mode entered. Allowed modes '
'are {0}'.format(normalize_modes))
return normalized_data
| 35.365591 | 78 | 0.604743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,829 | 0.556096 |
2c21c566d2ca466187f4db034e82279f282017b2 | 1,183 | py | Python | Hard/Trapping_Rain_Water.py | dianjiaogit/LeetCode_Python_solution | 390693c839d1be8802c21ea81062443b6d5ea36f | [
"MIT"
] | null | null | null | Hard/Trapping_Rain_Water.py | dianjiaogit/LeetCode_Python_solution | 390693c839d1be8802c21ea81062443b6d5ea36f | [
"MIT"
] | null | null | null | Hard/Trapping_Rain_Water.py | dianjiaogit/LeetCode_Python_solution | 390693c839d1be8802c21ea81062443b6d5ea36f | [
"MIT"
] | null | null | null | # Given n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining.
# The above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In this case, 6 units of rain water (blue section) are being trapped. Thanks Marcos for contributing this image!
# Example:
# Input: [0,1,0,2,1,0,1,3,2,1,2,1]
# Output: 6
class Solution:
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
if (height == []):
return 0
maximum = max(height)
x = height.count(maximum) - 1
index = height.index(maximum)
result = 0
if (index > 1):
result += Solution.trap(self, height[:index] + [max(height[:index])])
while x > 0:
nextIndex = height[index + 1:].index(maximum) + index + 1
for i in height[index + 1: nextIndex]:
result += maximum - i
index = nextIndex
x -= 1
if (index < len(height) - 1):
result += Solution.trap(self, [max(height[index + 1:])] + height[index + 1:])
return result | 35.848485 | 189 | 0.56213 | 777 | 0.656805 | 0 | 0 | 0 | 0 | 0 | 0 | 463 | 0.391378 |
2c225109d5668fc0aa323264567d8fc0cf77c6dc | 3,172 | py | Python | core/common.py | shenzebang/DMTRPO | 3beb528c5b5aebe35dfa873788c5be3f5977f53f | [
"MIT"
] | 1 | 2019-11-17T11:05:26.000Z | 2019-11-17T11:05:26.000Z | core/common.py | shenzebang/DMTRPO | 3beb528c5b5aebe35dfa873788c5be3f5977f53f | [
"MIT"
] | null | null | null | core/common.py | shenzebang/DMTRPO | 3beb528c5b5aebe35dfa873788c5be3f5977f53f | [
"MIT"
] | null | null | null | import torch
from utils2.torch import to_device
import numpy as np
import multiprocessing as mp
import math
def estimate_advantages(memories, value_net, gamma, tau, device='cpu', dtype=torch.double, queue=None, pid=None, num_agent=None):
advantages_list = []
states_list =[]
actions_list = []
returns_list = []
for memory, memory_index in zip(memories, range(len(memories))):
batch = memory.sample()
states = torch.from_numpy(np.stack(batch.state)).to(dtype).to(device)
rewards = torch.from_numpy(np.stack(batch.reward)).to(dtype).to(device)
actions = torch.from_numpy(np.stack(batch.action)).to(dtype).to(device)
masks = torch.from_numpy(np.stack(batch.mask)).to(dtype).to(device)
# with torch.no_grad():
values = value_net(states).detach()
rewards, masks, values = to_device(torch.device('cpu'), rewards, masks, values)
tensor_type = type(rewards)
deltas = tensor_type(rewards.size(0), 1)
advantages = tensor_type(rewards.size(0), 1)
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i]
advantages[i] = deltas[i] + gamma * tau * prev_advantage * masks[i]
prev_value = values[i, 0]
prev_advantage = advantages[i, 0]
returns = values + advantages
advantages = (advantages - advantages.mean()) / advantages.std()
advantages, returns, states, actions = to_device(device, advantages, returns, states, actions)
states_list.append(states)
actions_list.append(actions)
advantages_list.append(advantages)
returns_list.append(returns)
if queue is not None:
queue.put([pid*num_agent+memory_index, advantages, returns, states, actions])
if queue is None:
return advantages_list, returns_list, states_list, actions_list
def estimate_advantages_parallel(memories, value_net, gamma, tau, device='cpu', dtype=torch.float64, num_parallel_workers=mp.cpu_count()):
workers = []
queue = mp.Queue()
num_agents = len(memories)
process_agent_count = int(math.floor(num_agents / num_parallel_workers))
for i in range(num_parallel_workers):
worker_args = (memories[i*process_agent_count:(i+1)*process_agent_count], value_net, gamma, tau, device, dtype, queue, i, process_agent_count)
workers.append(mp.Process(target=estimate_advantages, args=worker_args))
for worker in workers:
worker.start()
advantages_list = [None]*len(memories)
returns_list = [None]*len(memories)
states_list = [None] * len(memories)
actions_list = [None] * len(memories)
for _ in range(len(memories)):
pid, advantages, returns, states, actions = queue.get(timeout=10)
# print("pid {}. done".format(pid))
advantages_list[pid] = advantages
returns_list[pid] = returns
states_list[pid] = states
actions_list[pid] = actions
queue.close()
queue.join_thread()
return advantages_list, returns_list, states_list, actions_list
| 39.160494 | 150 | 0.669294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.023014 |
2c22a320a2928428c08eb5053a706398b340f23f | 2,235 | py | Python | src/predict.py | ashishnegi2000/FinalYr_1 | 14fddaa7463141a19bb6c2a25003115847f63395 | [
"MIT"
] | null | null | null | src/predict.py | ashishnegi2000/FinalYr_1 | 14fddaa7463141a19bb6c2a25003115847f63395 | [
"MIT"
] | null | null | null | src/predict.py | ashishnegi2000/FinalYr_1 | 14fddaa7463141a19bb6c2a25003115847f63395 | [
"MIT"
] | null | null | null | #Predictions performed by this module
#dependencies
import base64
import numpy as np
import io
from PIL import Image
import keras
from keras import backend as K
from keras.models import Sequential
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator, img_to_array
from model import Model, DecoderType
from main import infer2
from flask import request
from flask import jsonify
from flask import Flask
from imageio import imread
app = Flask(__name__)
"""
def get_model():
This function loads the already-built keras model
global model
model = load_model('model.h5')
print("Model loaded!")"""
def preprocess_image(image, target_size):
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize(target_size)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
return image
"""print(" * Loading Keras model ... ")
get_model()"""
@app.route("/predict", methods=["POST"])
def predict():
"""
whenever something is posted from /predict,
this function will process the info posted through POST http method
message: json from POST method
encoded: key is 'image', value is base64encoded image sent from client
decoded: as it says
image: decoded is bytes in a file, not an actual image,
image.open converts those bytes into PIL file
"""
message = request.get_json(force=True)
encoded = message['image']
encoded = encoded.replace("data:image/jpeg;base64,", "")
print(encoded)
decoded = base64.b64decode(encoded)
image = imread(io.BytesIO(decoded))
"""
processed_image = preprocess_image(image, target_size=(224,224))"""
"""prediction = model.predict(processed_image).tolist()"""
model = Model(list(open("/home/shikhar/Desktop/simpleHTR/SimpleHTR/model/charList.txt").read()), decoder_type=0, must_restore=True, dump=True)
response = infer2(model, image)
response = {
'text': response['text'],
'probability': str(response['probability'])
}
return jsonify(response)
@app.route("/", methods=["GET"])
def hello():
return 'Hello'
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000) | 27.592593 | 146 | 0.694407 | 0 | 0 | 0 | 0 | 1,214 | 0.543177 | 0 | 0 | 977 | 0.437136 |
2c22fe7b5968c4acadcc023580c5ccbb977f7642 | 11,875 | py | Python | fair-data-model/scripts/rdfizer/rdfizer.py | longmanplus/beat-covid | fc5c88b191d7aa1e70cef8a055c25803b6d013a6 | [
"MIT"
] | 1 | 2021-11-09T23:26:49.000Z | 2021-11-09T23:26:49.000Z | fair-data-model/scripts/rdfizer/rdfizer.py | longmanplus/beat-covid | fc5c88b191d7aa1e70cef8a055c25803b6d013a6 | [
"MIT"
] | 1 | 2021-07-08T01:25:55.000Z | 2021-07-08T01:25:55.000Z | fair-data-model/scripts/rdfizer/rdfizer.py | longmanplus/beat-covid | fc5c88b191d7aa1e70cef8a055c25803b6d013a6 | [
"MIT"
] | 4 | 2020-11-16T06:31:58.000Z | 2021-07-14T12:50:23.000Z | # @name: rdfizer.py
# @description: Script to generate RDF data
# @version: 1.0
# @date: 28-04-2021
# @author: Núria Queralt Rosinach
# @email: n.queralt_rosinach@lumc.nl
"""Script to generate RDF data for Beat-COVID cytokine clinical measurements"""
import sys, os
from rdflib import Namespace, Graph, BNode, Literal
from rdflib.namespace import RDF, RDFS, XSD, DCTERMS
# Prefixes
bc = Namespace("https://rdf.biosemantics.org/resources/beat-covid/")
bco = Namespace("http://purl.org/beat-covid/cytokines-semantic-model.owl#")
obo = Namespace("http://purl.obolibrary.org/obo/")
sio = Namespace("http://semanticscience.org/resource/")
efo = Namespace("http://www.ebi.ac.uk/efo/")
prov = Namespace("http://www.w3.org/ns/prov#")
has_output = sio.SIO_000229
has_value = sio.SIO_000300
# Functions
def generate_rdf(variables_dict):
"""
This function generates an RDF data structure from a tuple of values
:param variables_dict:
:return:
"""
# binds
rdf = Graph()
rdf.bind("bc", bc)
rdf.bind("bco", bco)
rdf.bind("obo", obo)
rdf.bind("sio", sio)
rdf.bind("efo", efo)
rdf.bind("prov", prov)
# entries
# Entity
if not variables_dict['clinical_id']: variables_dict['clinical_id'] = "NA"
person = bc["person/BEATCOVID_" + variables_dict['beat_id'] + "_CLINICAL_" + variables_dict['clinical_id']]
# LAB MEASUREMENTS (MEASUREMENT PROCESS) MODEL
# Identifier
person_study_id = bc["person_study_id/" + variables_dict['beat_id']]
# Role
person_study_role = bc["person_study_role/BEATCOVID_" + variables_dict['record_id']
+ "_" + variables_dict['beat_id']]
# age
age = bc["person_age/" + variables_dict['age']]
# ward
ward = bc["ward/" + variables_dict['ward']]
# institute
institute = bc["institute/" + variables_dict['institute_abbreviation']]
# measurement process date
measurement_process_date = bc["lab/measurement_process_date/BEATCOVID_"
+ variables_dict['lum_date_meas']]
# BIOSAMPLES (SAMPLING PROCESS) MODEL
# Biosample
biosample = bc["biosample/BEATCOVID_" + variables_dict['record_id']]
# Process
sampling_process = bc["biosample/sampling_process/BEATCOVID_"
+ variables_dict['record_id']]
# order
order = bc["biosample/order_" + variables_dict['order']]
# sampling process date
sampling_process_date = bc["biosample/sampling_process_date/BEATCOVID_"
+ variables_dict['date_sampling']]
# Attribute/object
organ = "blood_serum"
biosample_object = bc["object/" + organ]
# Role
person_donor_role = bc["person_donor_role/BEATCOVID_" + variables_dict['record_id']]
# Identifier
person_donor_id = bc["person_donor_id/" + variables_dict['beat_id']]
biosample_id = bc["biosample/biosample_id/BEATCOVID_" + variables_dict['record_id']]
# CLINICAL OBSERVATIONS (EXAMINATION PROCESS) MODEL
# Identifier
clinical = bc["clinical/patient_id/" + variables_dict['clinical_id']]
# Observation
# observation = bc["clinical/observation/BEATCOVID_" + variables_dict['clinical_observations']]
# add triples to entry
# LAB MEASUREMENTS (MEASUREMENT PROCESS) MODEL
# Entity
rdf.add((person, RDF.type, sio.SIO_000498))
rdf.add((person, sio.SIO_000228, person_study_role))
rdf.add((person, sio.SIO_000228, person_donor_role))
#rdf.add((person, sio.SIO_000228, person_patient_role))
#rdf.add((person, sio.SIO_000008, bc.phenotype_))
# Identifier
rdf.add((person_study_id, RDF.type, bco.beat_covid_id))
rdf.add((person_study_id, sio.SIO_000300, Literal(variables_dict['beat_id'], datatype=XSD.string)))
rdf.add((person_study_id, obo.IAO_0000219, person_study_role))
# age
rdf.add((age, RDF.type, sio.SIO_001013))
rdf.add((age, sio.SIO_000300, Literal(variables_dict['age'], datatype=XSD.integer)))
rdf.add((age, sio.SIO_000001, person_study_role))
# ward
rdf.add((ward, RDF.type, obo.NCIT_C21541))
rdf.add((ward, sio.SIO_000300, Literal(variables_dict['ward'], datatype=XSD.string)))
rdf.add((ward, RDFS.label, Literal(variables_dict['ward'], lang='en')))
rdf.add((ward, obo.BFO_0000050, institute))
# institute
rdf.add((institute, RDF.type, sio.SIO_000688))
rdf.add((institute, sio.SIO_000300, Literal(variables_dict['institute_abbreviation'], datatype=XSD.string)))
rdf.add((institute, RDFS.label, Literal(variables_dict['institute_abbreviation'], lang='en')))
# Role
rdf.add((person_study_role, RDF.type, sio.SIO_000883))
rdf.add((person_study_role, obo.RO_0001025, ward))
rdf.add((person_study_role, obo.RO_0001025, institute))
# measurement process date
rdf.add((measurement_process_date, RDF.type, obo.NCIT_C25164))
rdf.add((measurement_process_date, DCTERMS.date, Literal(variables_dict['lum_date_meas'], datatype=XSD.date)))
# BIOSAMPLES (SAMPLING PROCESS) MODEL
# Process
rdf.add((sampling_process, RDF.type, sio.SIO_001049))
rdf.add((sampling_process, sio.SIO_000291, biosample_object))
rdf.add((sampling_process, sio.SIO_000230, person))
rdf.add((sampling_process, sio.SIO_000229, biosample))
rdf.add((sampling_process, obo.RO_0002091, order))
rdf.add((sampling_process, sio.SIO_000008, sampling_process_date))
# Biosample
rdf.add((biosample, RDF.type, sio.SIO_001050))
rdf.add((biosample, sio.SIO_000628, biosample_object))
# Attribute/object
rdf.add((biosample_object, RDF.type, sio.SIO_010003))
rdf.add((biosample_object, obo.BFO_0000050, person))
# order
rdf.add((order, RDF.type, obo.NCIT_C48906))
rdf.add((order, sio.SIO_000300, Literal(variables_dict['order'], datatype=XSD.string)))
# sampling process date
rdf.add((sampling_process_date, RDF.type, obo.NCIT_C25164))
rdf.add((sampling_process_date, DCTERMS.date, Literal(variables_dict['date_sampling'], datatype=XSD.date)))
# Role
rdf.add((person_donor_role, RDF.type, obo.OBI_1110087))
rdf.add((person_donor_role, sio.SIO_000356, sampling_process))
# Identifier
# biosample
rdf.add((biosample_id, RDF.type, bco.record_id))
rdf.add((biosample_id, sio.SIO_000300, Literal(variables_dict['record_id'], datatype=XSD.string)))
rdf.add((biosample_id, sio.SIO_000672, biosample))
# person_donor
rdf.add((person_donor_id, RDF.type, obo.NCIT_C164796))
rdf.add((person_donor_id, obo.IAO_0000219, person_donor_role))
# CLINICAL OBSERVATIONS (EXAMINATION PROCESS) MODEL
# Identifier
rdf.add((clinical, RDF.type, bco.clinical_id))
# Observation
# observation = bc["clinical/observation/BEATCOVID_" + variables_dict['clinical_observations']]
# Lab measurement information
measurement_number = 0
for measurement in variables_dict.keys():
if "lum_date_" in measurement:
continue
if "lum" in measurement:
measurement_number += 1
device_string, protein_string, kit_string = measurement.split("_")
# kit
kit = bc["lab/kit_" + kit_string]
rdf.add((kit, RDF.type, obo.OBI_0000272))
rdf.add((kit, sio.SIO_000300, Literal(kit_string, datatype=XSD.string)))
rdf.add((kit, RDFS.label, Literal(f"Kit {kit_string}", lang='en')))
# device
device = bc["lab/device_" + device_string]
rdf.add((device, RDF.type, obo.OBI_0000968))
rdf.add((device, sio.SIO_000300, Literal(device_string, datatype=XSD.string)))
if device_string == "lum":
rdf.add((device, RDFS.label, Literal("Luminex", lang='en')))
# Attribute/object
trait = bc["trait/" + protein_string]
rdf.add((trait, RDF.type, sio.SIO_010043))
rdf.add((trait, sio.SIO_000300, Literal(protein_string, datatype=XSD.string)))
rdf.add((trait, obo.BFO_0000050, person))
# cytokine gene
gene = bc["gene/" + protein_string]
rdf.add((gene, RDF.type, sio.SIO_010035))
rdf.add((trait, sio.SIO_010079, gene))
# Measurement
quantitative_trait = bc["lab/quantitative_trait/BEATCOVID_" + variables_dict['record_id']
+ "_" + measurement + "_" + str(measurement_number)]
rdf.add((quantitative_trait, RDF.type, obo.IAO_0000109))
rdf.add((quantitative_trait, RDFS.label, Literal(measurement, datatype=XSD.string)))
rdf.add((quantitative_trait, sio.SIO_000221, efo.EFO_0004385))
if variables_dict[measurement] == 'OOR <' or variables_dict[measurement] == 'OOR >':
rdf.add((quantitative_trait, sio.SIO_000300, Literal(variables_dict[measurement], datatype=XSD.string)))
else:
rdf.add((quantitative_trait, sio.SIO_000300, Literal(variables_dict[measurement], datatype=XSD.float)))
rdf.add((quantitative_trait, sio.SIO_000628, trait))
#rdf.add((trait, sio.SIO_000216, quantitative_trait))
# unit
unit = bc["lab/measurement_unit/pg_ml"]
rdf.add((unit, RDF.type, obo.IAO_0000003))
rdf.add((unit, RDFS.label, Literal("pg/ml", datatype=XSD.string)))
# print(measurement_number, measurement, device, protein, kit)
# Process
lab_meas_process = bc["lab/measurement_process/BEATCOVID_" + variables_dict['record_id']
+ measurement]
rdf.add((lab_meas_process, RDF.type, obo.OBI_0000070))
rdf.add((lab_meas_process, sio.SIO_000291, trait))
rdf.add((lab_meas_process, sio.SIO_000230, biosample))
rdf.add((lab_meas_process, sio.SIO_000229, quantitative_trait))
rdf.add((lab_meas_process, sio.SIO_000008, measurement_process_date))
rdf.add((lab_meas_process, DCTERMS.conformsTo, kit))
rdf.add((lab_meas_process, sio.SIO_000132, device))
rdf.add((lab_meas_process, sio.SIO_000628, clinical))
rdf.add((lab_meas_process, prov.wasInformedBy, sampling_process))
# role
rdf.add((person_study_role, sio.SIO_000356, lab_meas_process))
return rdf
if __name__ == "__main__":
# # args
# if len(sys.argv) < 3:
# print("Missing input parameters. Usage:")
# print(f"\tpython {sys.arv[0] cytokine_csv_file_path rdf_dir_path}")
# exit(1)
#
# # output
# out_path = sys.argv[2]
# if not os.path.isdir(out_path): os.makedirs(out_path)
#
# # rdf
# with open(sys.argv[1]) as file:
# # skip header
# next(file)
#
# for line in file:
# values_tuple = line.rstrip().split(",")
# rdf = generate_rdf(values_tuple)
# rdf.serialize(f"{out_path}/{values_tuple[0].zfill(5)}.ttl", format="turtle")
out_path = "/home/nur/workspace/beat-covid/fair-data-model/rdf"
if not os.path.isdir(out_path): os.makedirs(out_path)
header = 1
rows_list = list()
for line in open("/home/nur/workspace/beat-covid/fair-data-model/cytokine/synthetic-data/"
"BEAT-COVID1_excel_export_2020-05-28_Luminex_synthetic-data.csv"):
if header:
header_tuple = line.rstrip().split("\t")
header = 0
continue
values_tuple = line.rstrip().split("\t")
raw_data_dict = dict(zip(header_tuple,values_tuple))
rows_list.append(raw_data_dict)
for row in rows_list:
crf = generate_rdf(row)
crf.serialize(f"{out_path}/{row['record_id'].zfill(5)}.ttl", format="turtle")
# print(f"row: {row}\nheader: {header_tuple}\nvalues: {values_tuple}")
print(f"row: {row}")
| 40.52901 | 120 | 0.655747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,749 | 0.315679 |
2c23038de9f75723e8fc2c45f00ef78866fa1bc7 | 793 | py | Python | fastapi/app/api/api_v1/endpoints/utils.py | Kitware/adios-dashboard | a21cf290a31897f77e5bc7d2037b2bbcc97944b5 | [
"BSD-3-Clause"
] | null | null | null | fastapi/app/api/api_v1/endpoints/utils.py | Kitware/adios-dashboard | a21cf290a31897f77e5bc7d2037b2bbcc97944b5 | [
"BSD-3-Clause"
] | 19 | 2019-02-25T20:40:41.000Z | 2020-04-20T17:25:09.000Z | fastapi/app/api/api_v1/endpoints/utils.py | Kitware/adios-dashboard | a21cf290a31897f77e5bc7d2037b2bbcc97944b5 | [
"BSD-3-Clause"
] | 2 | 2019-06-15T11:13:45.000Z | 2019-10-17T17:46:49.000Z | from typing import Any
import msgpack
from app.core.config import settings
from girder_client import GirderClient
from fastapi import HTTPException
from fastapi import Response
cache_settings = {
"directory": "/tmp/cache",
"eviction_policy": "least-frequently-used",
"size_limit": 2**20, # 1g
}
_gc = None
def get_girder_client(girder_token):
if girder_token is None:
raise HTTPException(status_code=400, detail="Invalid token.")
global _gc
if _gc is None:
_gc = GirderClient(apiUrl=settings.GIRDER_API_URL, cacheSettings=cache_settings)
_gc.setToken(girder_token)
return _gc
class MsgpackResponse(Response):
media_type = "application/msgpack"
def render(self, content: Any) -> bytes:
return msgpack.packb(content)
| 20.868421 | 88 | 0.722573 | 155 | 0.19546 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.14628 |
2c241102162d05e25463bf9c3e17c6e7e04a0fc0 | 8,838 | py | Python | src/data/preprocess/git_data_preparation.py | SpirinEgor/gnn_pretraining | 116017cfad3548a1f36d1c2ffd1e1881c99d4768 | [
"MIT"
] | 2 | 2021-06-11T09:22:18.000Z | 2021-07-14T03:16:54.000Z | src/data/preprocess/git_data_preparation.py | SpirinEgor/gnn_pretraining | 116017cfad3548a1f36d1c2ffd1e1881c99d4768 | [
"MIT"
] | null | null | null | src/data/preprocess/git_data_preparation.py | SpirinEgor/gnn_pretraining | 116017cfad3548a1f36d1c2ffd1e1881c99d4768 | [
"MIT"
] | 1 | 2021-07-30T04:02:55.000Z | 2021-07-30T04:02:55.000Z | import collections
import glob
import json
import os
import random
import re
from typing import Tuple, Iterator, List, Dict, Optional
from src.data.preprocess.example import Example
_DEFAULT_STATS_BOUNDARIES = {
"Python": {"max_line_len": (37, 741), "content_len": (111, 42476)},
"Java": {"max_line_len": (56, 177), "content_len": (305, 48661)},
"Kotlin": {"max_line_len": (25, 158), "content_len": (69, 20402)},
}
_BAD_TEXT_REGEX = re.compile(r"auto[- ]?generated file", flags=re.IGNORECASE)
_BUCKET_SIZE = 1_000_000
class GitProjectExtractor:
def __init__(
self,
raw_data_path: str,
random_seed: int,
val_part: Optional[float],
test_part: Optional[float],
languages: Tuple[str] = ("Python",),
):
self._path: str = raw_data_path
self._rng: random.Random = random.Random(random_seed)
self._found_files_amount: Optional[int] = None
self._holdout_sizes: Dict[str, float] = dict()
self._holdout_sizes["val"] = val_part if val_part is not None else 0.0
self._holdout_sizes["test"] = test_part if test_part is not None else 0.0
assert self._holdout_sizes["val"] + self._holdout_sizes["test"] <= 1.0
self._holdout_sizes["train"] = 1.0 - self._holdout_sizes["val"] - self._holdout_sizes["test"]
self._processed_projects: Optional[Dict[str, List[List[Tuple[str, str, str, str]]]]] = None
print(f"Extracting projects metainfo...")
self._extract_projects(languages)
def get_num_examples(self, holdout: str) -> int:
assert self._found_files_amount is not None
return int(self._found_files_amount * self._holdout_sizes[holdout])
# Main method
def get_examples(self, holdout: str) -> Iterator[Example]:
"""Read all files in specified language from dataset and return a project iterator"
:param holdout: which holdout to return. Can be either "train", "val" and "test"
:return: Iterator, which returns projects - Lists of Tuples, each of which represent project's files
"""
return self._generate_examples_iter(holdout)
# -------------------------------------- Stage methods -------------------------------------- #
def _extract_projects(self, languages: Tuple[str]):
lang_files = self._get_lang_files(languages)
projects = self._get_files_projects(lang_files)
found_projects_amount = len(projects)
(
processed_projects,
skipped_projects,
self._found_files_amount,
) = self._process_projects(projects)
self._processed_projects = dict()
self._rng.shuffle(processed_projects)
train_projects_amount = int(self._holdout_sizes["train"] * len(processed_projects))
val_projects_amount = int(self._holdout_sizes["val"] * len(processed_projects))
self._processed_projects["train"] = processed_projects[:train_projects_amount]
self._processed_projects["val"] = processed_projects[
train_projects_amount : train_projects_amount + val_projects_amount
]
self._processed_projects["test"] = processed_projects[train_projects_amount + val_projects_amount :]
print(
f"Found {found_projects_amount} projects with {self._found_files_amount} files, "
f"skipped {len(skipped_projects)} projects\n"
)
if len(skipped_projects) != 0:
print(f"Skipped projects: {skipped_projects}\n")
def _generate_examples_iter(self, holdout: str) -> Iterator[Example]:
"""Yield all project files, one project at a time"""
def read_file(path):
with open(path, "rt", encoding="utf-8", errors="ignore") as f:
return f.read()
bucket_to_shuffle: List[Example] = []
assert self._processed_projects is not None
for project in self._processed_projects[holdout]:
examples = (
Example(language, proj_name, filename, read_file(path))
for language, proj_name, filename, path in project
)
bucket_to_shuffle.extend(
example
for example in examples
if GitProjectExtractor._is_good_example(example.language, example.file_name, example.source_code)
)
if len(bucket_to_shuffle) > _BUCKET_SIZE:
self._rng.shuffle(bucket_to_shuffle)
yield from bucket_to_shuffle
bucket_to_shuffle = []
yield from bucket_to_shuffle
@staticmethod
def _is_good_example(language: str, filename: str, source_code: str) -> bool:
if not filename or not source_code:
return False
# Check stats
if not (
_DEFAULT_STATS_BOUNDARIES[language]["content_len"][0]
<= len(source_code)
<= _DEFAULT_STATS_BOUNDARIES[language]["content_len"][1]
and _DEFAULT_STATS_BOUNDARIES[language]["max_line_len"][0]
<= max(len(line) for line in source_code.split("\n"))
<= _DEFAULT_STATS_BOUNDARIES[language]["max_line_len"][1]
):
return False
# Regex check
if re.search(_BAD_TEXT_REGEX, source_code):
return False
return True
# --------------------------------- Paths processing methods -------------------------------- #
def _get_lang_files(self, languages: Tuple[str]) -> List[Tuple[str, str]]:
res: List[Tuple[str, str]] = []
for language in languages:
lang_files = glob.glob(
os.path.join(
self._path,
"languages",
language,
".*",
"*",
"*",
"**",
"*.*",
),
recursive=True,
)
assert lang_files, f"There are no files in {self._path} with language {language}"
print(f"Found {len(lang_files)} files' metainfos for {language} lang")
res.extend((lang_file, language) for lang_file in lang_files)
return res
@staticmethod
def _get_files_projects(lang_files: List[Tuple[str, str]]) -> List[Tuple[str, List[Tuple[str, str]]]]:
"""Group all files by projects"""
projects = collections.defaultdict(list)
for (file, lang) in lang_files:
if os.path.isfile(file):
project_name = os.sep.join(file.split(os.sep)[-3:-1])
projects[project_name].append((file, lang))
return list(projects.items())
def _process_projects(
self, projects: List[Tuple[str, List[Tuple[str, str]]]]
) -> Tuple[List[List[Tuple[str, str, str, str]]], List[str], int]:
"""Search for projects, extract real project names from dataset
:param projects: output of _get_files_projects.
:return: a Tuple,
first item of which is a List, each item of which represents a single GitHub project
and is itself a List, each item of which represents a single file in the project
which is written in the specified language
and is itself a Tuple, first item of which is the path to a file in the project structure,
the second one is the path to the file in our dataset structure
the third one is the language of the file.
second item is the length of projects list.
"""
processed_projects = []
skipped_projects = []
files_amount = 0
for project_name, files in projects:
author, repo, branch, filename = files[0][0].split(os.sep)[-4:]
paths_dict_path = os.path.join(
self._path,
"repositories",
author,
repo,
branch,
"paths.json",
)
if os.path.exists(paths_dict_path):
with open(paths_dict_path, "rt") as f:
paths_dict = json.load(f)
names_and_paths = []
for (file, lang) in files:
if os.path.basename(file) in paths_dict:
names_and_paths.append(
(
lang,
project_name,
paths_dict[os.path.basename(file)],
file,
)
)
processed_projects.append(names_and_paths)
files_amount += len(names_and_paths)
else:
skipped_projects.append(f"{author}/{repo}")
return processed_projects, skipped_projects, files_amount
| 40.541284 | 113 | 0.581014 | 8,303 | 0.939466 | 1,062 | 0.120163 | 1,192 | 0.134872 | 0 | 0 | 2,010 | 0.227427 |
2c257b998065928806b35178c7ceda8c16c41579 | 508 | py | Python | Stack/1249. Minimum Remove to Make Valid Parentheses.py | Into-Y0u/Github-Baby | 5e4e6b02f49c2c99533289be9d49911006cad919 | [
"MIT"
] | 2 | 2022-01-25T04:30:26.000Z | 2022-01-25T10:36:15.000Z | Stack/1249. Minimum Remove to Make Valid Parentheses.py | Into-Y0u/Leetcode-Baby | 681ad4df01ee908f76d888aa4ccc10f04c03c56f | [
"MIT"
] | null | null | null | Stack/1249. Minimum Remove to Make Valid Parentheses.py | Into-Y0u/Leetcode-Baby | 681ad4df01ee908f76d888aa4ccc10f04c03c56f | [
"MIT"
] | null | null | null | class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
if not s :
return ""
s = list(s)
st = []
for i,n in enumerate(s):
if n == "(":
st.append(i)
elif n == ")" :
if st :
st.pop()
else :
s[i] = ""
while st:
s[st.pop()] = ""
return "".join(s)
| 21.166667 | 50 | 0.275591 | 459 | 0.903543 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.027559 |
2c268ab3d2dfe53839294a3c79fe0f16232d5339 | 4,021 | py | Python | src/docs.py | techouse/alfred-nova-docs | 6874eff74f68661de9707391bf00e9c0a037edfb | [
"MIT"
] | 4 | 2019-01-15T12:10:40.000Z | 2021-01-17T22:11:14.000Z | src/docs.py | techouse/alfred-nova-docs | 6874eff74f68661de9707391bf00e9c0a037edfb | [
"MIT"
] | 1 | 2020-03-31T23:46:00.000Z | 2020-04-01T12:51:18.000Z | src/docs.py | techouse/alfred-nova-docs | 6874eff74f68661de9707391bf00e9c0a037edfb | [
"MIT"
] | 1 | 2019-07-04T03:49:59.000Z | 2019-07-04T03:49:59.000Z | #!/usr/bin/python
# encoding: utf-8
from __future__ import print_function, unicode_literals, absolute_import
import functools
import re
import sys
from textwrap import wrap
from urllib import quote_plus
from algoliasearch.search_client import SearchClient
from config import Config
from workflow import Workflow3, ICON_INFO
# Algolia client
client = SearchClient.create(Config.ALGOLIA_APP_ID, Config.ALGOLIA_SEARCH_ONLY_API_KEY)
index = client.init_index(Config.ALGOLIA_SEARCH_INDEX)
# log
log = None
def cache_key(query, version=Config.DEFAULT_NOVA_VERSION):
"""Make filesystem-friendly cache key"""
key = "{}_{}".format(query, version)
key = key.lower()
key = re.sub(r"[^a-z0-9-_;.]", "-", key)
key = re.sub(r"-+", "-", key)
# log.debug("Cache key : {!r} {!r} -> {!r}".format(query, version, key))
return key
def handle_result(api_dict):
"""Extract relevant info from API result"""
result = {}
for key in {
"objectID",
"version",
"title",
"id",
"permalink",
"content",
"categories",
}:
result[key] = api_dict[key]
return result
def search(query=None, version=Config.DEFAULT_NOVA_VERSION, limit=Config.RESULT_COUNT):
if query:
results = index.search(
query,
{
"facetFilters": ["version:{}".format(version)],
"page": 0,
"hitsPerPage": limit,
},
)
if results is not None and "hits" in results:
return results["hits"]
return []
def main(wf):
if wf.update_available:
# Add a notification to top of Script Filter results
wf.add_item(
"New version available",
"Action this item to install the update",
autocomplete="workflow:update",
icon=ICON_INFO,
)
query = wf.args[0].strip()
# Tag prefix only. Treat as blank query
if query == "v":
query = ""
if not query:
wf.add_item("Search the Nova docs...")
wf.send_feedback()
return 0
# Parse query into query string and tags
words = query.split(" ")
query = []
version = Config.DEFAULT_NOVA_VERSION
for word in words:
if word in Config.SUPPORTED_NOVA_VERSIONS:
version = word.replace("v", "")
else:
query.append(word)
query = " ".join(query)
# log.debug("version: {!r}".format(version))
# log.debug("query: {!r}".format(query))
key = cache_key(query, version)
results = [
handle_result(result)
for result in wf.cached_data(
key, functools.partial(search, query, version), max_age=Config.CACHE_MAX_AGE
)
]
# log.debug("{} results for {!r}, version {!r}".format(len(results), query, version))
# Show results
if not results:
url = "https://www.google.com/search?q={}".format(
quote_plus('"Laravel Nova" {}'.format(query))
)
wf.add_item(
"No matching answers found",
"Shall I try and search Google?",
valid=True,
arg=url,
copytext=url,
quicklookurl=url,
icon=Config.GOOGLE_ICON,
)
for result in results:
subtitle = wrap(result["content"], width=75)[0]
if len(result["content"]) > 75:
subtitle += " ..."
wf.add_item(
uid=result["objectID"],
title=result["title"],
subtitle=subtitle,
arg=result["permalink"],
valid=True,
largetext=result["content"],
copytext=result["permalink"],
quicklookurl=result["permalink"],
icon=Config.NOVA_ICON,
)
# log.debug(result)
wf.send_feedback()
if __name__ == "__main__":
wf = Workflow3(
update_settings={"github_slug": "techouse/alfred-nova-docs", "frequency": 7}
)
log = wf.logger
sys.exit(wf.run(main))
| 25.289308 | 89 | 0.573489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,075 | 0.267346 |
2c277cfbbfaebb1229ffb7154b1e6cc7851d5fad | 317 | py | Python | lambdataeedwardsa/example_module.py | EEdwardsA/lambdata | 11267522408266b5556d5607a7c875f237aea348 | [
"MIT"
] | null | null | null | lambdataeedwardsa/example_module.py | EEdwardsA/lambdata | 11267522408266b5556d5607a7c875f237aea348 | [
"MIT"
] | null | null | null | lambdataeedwardsa/example_module.py | EEdwardsA/lambdata | 11267522408266b5556d5607a7c875f237aea348 | [
"MIT"
] | 1 | 2020-10-27T22:08:51.000Z | 2020-10-27T22:08:51.000Z | """Lambdata - a collection of Data Science helper functions"""
# import pandas as pd
import numpy as np
fav_numbers = [7,22,4.14]
colors = ['purple','cyan','dark blue','crimson']
def df_cleaner(df):
"""Cleans a dataframe"""
# TODO - implement df_cleaner
pass
def increment(x):
return x + 1
| 15.85 | 62 | 0.646688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.539432 |
2c2a539bab202c4d327932ae0ff14bcd68090a01 | 3,335 | py | Python | nlptk/topicmodeling/utils.py | GarryGaller/nlp_toolkit | df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b | [
"MIT"
] | null | null | null | nlptk/topicmodeling/utils.py | GarryGaller/nlp_toolkit | df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b | [
"MIT"
] | null | null | null | nlptk/topicmodeling/utils.py | GarryGaller/nlp_toolkit | df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b | [
"MIT"
] | null | null | null | import glob,os,sys
class Path():
'''
>>> paths = Path(source,"*.txt")
>>> for path in paths:
lines = Stream(path)
for line in lines:
print(line)
'''
def __init__(self, source, pattern):
self.source = source
self.pattern = pattern
def __getpaths__(self):
source = os.path.join(self.source, self.pattern)
files = glob.glob(source)
for filename in files:
yield os.path.join(source, filename)
def __iter__(self):
return self.__getpaths__()
class Stream():
'''
>>> lines = Stream(path)
>>> for line in lines:
print(line)
'''
def __init__(self,
encoding=None,
sentencizer=None,
text_filters=[]
):
self.encoding = encoding
self.__sentencizer = sentencizer
self.__text_filters = text_filters
def __call__(self,path):
"""Read lines from filepath."""
with open(path,'r',
encoding = (
self.encoding(path)
if callable(self.encoding)
else self.encoding)
) as fd:
# обрабатываем либо по предложению
if self.__sentencizer:
text = self.preprocess_text(fd.read())
for sentence in self.__sentencizer(text):
yield sentence
# либо по строке
else:
for line in fd:
yield line
def preprocess_text(self,text):
for text_filter in self.__text_filters:
text = text_filter(text)
return text
class Lemmatizer():
def __init__(self, lemmatizer=None,
allowed_tags=set(), disallowed_tags=set()):
self.lemmatize = lemmatizer
self.allowed_tags = set(allowed_tags) - set(disallowed_tags)
def __call__(self,data):
if isinstance(data,(str)):
data = [data]
self.allowed_tags
for lemma,pos in self.lemmatize(data,pos=True):
if self.allowed_tags:
if (self.allowed_tags) and (pos in self.allowed_tags):
yield lemma
else:
yield lemma
class Tokenizer():
def __init__(self,tokenizer=None):
self.tokenize = tokenizer
def __call__(self,data):
return self.tokenize(data)
class CharCleaner():
def __init__(self,cleaners=None):
self.cleaners = cleaners
def __call__(self,data):
for cleaner in self.cleaners:
data = cleaner(data)
return data
class TokenCleaner():
def __init__(self,cleaners=None):
self.cleaners = cleaners
def __call__(self,data):
for cleaner in self.cleaners:
data = cleaner(data)
return data
class LemmaCleaner():
def __init__(self,cleaners=None):
self.cleaners = cleaners
def __call__(self,data):
for cleaner in self.cleaners:
data = cleaner(data)
return data
| 24.703704 | 70 | 0.508246 | 3,283 | 0.972453 | 1,242 | 0.367891 | 0 | 0 | 0 | 0 | 395 | 0.117002 |
2c2b2054f72388db3047d42a5a6ed152993d2b96 | 1,188 | py | Python | opbank/test_opbank.py | Meeshkan/meeshkan-examples | b5a179d380f60ba72b06144c05b232accccc74bd | [
"MIT"
] | 1 | 2020-03-26T11:22:37.000Z | 2020-03-26T11:22:37.000Z | opbank/test_opbank.py | Meeshkan/meeshkan-examples | b5a179d380f60ba72b06144c05b232accccc74bd | [
"MIT"
] | 3 | 2020-03-09T09:30:43.000Z | 2020-05-04T14:42:02.000Z | opbank/test_opbank.py | Meeshkan/meeshkan-examples | b5a179d380f60ba72b06144c05b232accccc74bd | [
"MIT"
] | 1 | 2020-09-17T11:53:16.000Z | 2020-09-17T11:53:16.000Z | import requests
from opbank.opbank_client import OPBankClient
def test_opbank():
requests.delete("http://localhost:8888/admin/storage")
client = OPBankClient()
client.API_URL = 'http://localhost:8000/https://sandbox.apis.op-palvelut.fi/'
payer_iban = 'FI3959986920207073'
receiver_iban = 'FI2350009421535899'
amount = 5
accounts = client.get_accounts()
print('Account list before payment: {}'.format(accounts))
assert 2215.81 == accounts[payer_iban]['balance']
assert 0 == accounts[receiver_iban]['balance']
payment = client.init_payment(payer_iban, receiver_iban, amount)
payment_id = payment['paymentId']
print("Created payment {}".format(payment))
accounts = client.get_accounts()
print('Account list before confirmation: {}'.format(accounts))
assert 2215.81 == accounts[payer_iban]['balance']
assert 0 == accounts[receiver_iban]['balance']
confirmation = client.confirm_payment(payment_id)
accounts = client.get_accounts()
print('Account list after confirmation: {}'.format(accounts))
assert 2210.81 == accounts[payer_iban]['balance']
assert 5 == accounts[receiver_iban]['balance']
| 29.7 | 81 | 0.707912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.277778 |
2c2c960a6de32452ffb7ccc18e589bd83c133e7c | 12,816 | py | Python | probnmn/trainers/_trainer.py | kdexd/probnmn-clevr | 9c1b2286cf30e9fb045370153c9242a39760e02e | [
"MIT"
] | 69 | 2019-05-14T06:34:25.000Z | 2022-03-07T17:19:40.000Z | probnmn/trainers/_trainer.py | kdexd/probnmn-clevr | 9c1b2286cf30e9fb045370153c9242a39760e02e | [
"MIT"
] | 5 | 2019-07-21T23:00:55.000Z | 2020-11-16T12:38:04.000Z | probnmn/trainers/_trainer.py | kdexd/probnmn-clevr | 9c1b2286cf30e9fb045370153c9242a39760e02e | [
"MIT"
] | 11 | 2019-05-28T03:03:26.000Z | 2021-01-30T12:04:58.000Z | from typing import Any, Dict, Generator, List, Optional
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from probnmn.config import Config
from probnmn.utils.checkpointing import CheckpointManager
class _Trainer(object):
r"""
A base class for generic training of models. This class can have multiple models interacting
with each other, rather than a single model, which is suitable to our use-case (for example,
``module_training`` phase has two models:
:class:`~probnmn.models.program_generator.ProgramGenerator` and
:class:`~probnmn.models.nmn.NeuralModuleNetwork`). It offers full flexibility, with sensible
defaults which may be changed (or disabled) while extending this class.
Extended Summary
----------------
1. Default :class:`~torch.optim.Adam` Optimizer, updates parameters of all models in this
trainer. Learning rate and weight decay for this optimizer are picked up from the provided
config.
2. Default :class:`~torch.optim.lr_scheduler.ReduceLROnPlateau` learning rate scheduler. Gamma
and patience arguments are picked up from the provided config. Observed metric is assumed
to be of type "higher is better". For 'lower is better" metrics, make sure to reciprocate.
3. Tensorboard logging of loss curves, metrics etc.
4. Serialization of models and optimizer as checkpoint (.pth) files after every validation.
The observed metric for keeping track of best checkpoint is of type "higher is better",
follow (2) above if the observed metric is of type "lower is better".
Extend this class and override suitable methods as per requirements, some important ones are:
1. :meth:`step`, provides complete customization, this is the method which comprises of one
full training iteration, and internally calls (in order) - :meth:`_before_iteration`,
:meth:`_do_iteration` and :meth:`_after_iteration`. Most of the times you may not require
overriding this method, instead one of the mentioned three methods called by `:meth:`step`.
2. :meth:`_do_iteration`, with core training loop - what happens every iteration, given a
``batch`` from the dataloader this class holds.
3. :meth:`_before_iteration` and :meth:`_after_iteration`, for any pre- or post-processing
steps. Default behaviour:
* :meth:`_before_iteration` - call ``optimizer.zero_grad()``
* :meth:`_after_iteration` - call ``optimizer.step()`` and do tensorboard logging.
4. :meth:`after_validation`, to specify any steps after evaluation. Default behaviour is to
do learning rate scheduling and log validation metrics on tensorboard.
Notes
-----
All models are `passed by assignment`, so they could be shared with an external evaluator.
Do not set ``self._models = ...`` anywhere while extending this class.
Parameters
----------
config: Config
A :class:`~probnmn.Config` object with all the relevant configuration parameters.
dataloader: torch.utils.data.DataLoader
A :class:`~torch.utils.data.DataLoader` which provides batches of training examples. It
wraps one of :mod:`probnmn.data.datasets` depending on the evaluation phase.
models: Dict[str, Type[nn.Module]]
All the models which interact with each other during training. These are one or more from
:mod:`probnmn.models` depending on the training phase.
serialization_dir: str
Path to a directory for tensorboard logging and serializing checkpoints.
gpu_ids: List[int], optional (default=[0])
List of GPU IDs to use or evaluation, ``[-1]`` - use CPU.
"""
def __init__(
self,
config: Config,
dataloader: DataLoader,
models: Dict[str, nn.Module],
serialization_dir: str,
gpu_ids: List[int] = [0],
):
self._C = config
# Make dataloader cyclic for sampling batches perpetually.
self._dataloader = self._cycle(dataloader)
self._models = models
# Set device according to specified GPU ids.
self._device = torch.device(f"cuda:{gpu_ids[0]}" if gpu_ids[0] >= 0 else "cpu")
# Shift models to device, and wrap in DataParallel for Multi-GPU execution (if needed).
for model_name in self._models:
self._models[model_name] = self._models[model_name].to(self._device)
if len(gpu_ids) > 1 and -1 not in gpu_ids:
# Don't wrap to DataParallel if single GPU ID or -1 (CPU) is provided.
self._models[model_name] = nn.DataParallel(self._models[model_name], gpu_ids)
# Accumulate parameters of all models to construct Adam Optimizer.
all_parameters: List[Any] = []
for model_name in self._models:
all_parameters.extend(list(self._models[model_name].parameters()))
self._optimizer = optim.Adam(
all_parameters, lr=self._C.OPTIM.LR_INITIAL, weight_decay=self._C.OPTIM.WEIGHT_DECAY
)
# Default learning rate scheduler: (lr *= gamma) when observed metric plateaus for
# "patience" number of validation steps.
self._lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self._optimizer,
mode="max",
factor=self._C.OPTIM.LR_GAMMA,
patience=self._C.OPTIM.LR_PATIENCE,
threshold=1e-3,
)
# Tensorboard summary writer for logging losses and metrics.
self._tensorboard_writer = SummaryWriter(log_dir=serialization_dir)
# Checkpoint manager to serialize model, optimizer and lr scheduler periodically.
self._checkpoint_manager = CheckpointManager(
serialization_dir=serialization_dir,
keep_recent=100,
optimizer=self._optimizer,
scheduler=self._lr_scheduler,
**models,
)
# Initialize a counter to keep track of the iteration number.
# This increments everytime ``step`` is called.
self._iteration: int = -1
def step(self, iteration: Optional[int] = None):
r"""
Perform one iteration of training.
Parameters
----------
iteration: int, optional (default = None)
Iteration number (useful to hard set to any number when loading checkpoint).
If ``None``, use the internal :attr:`self._iteration` counter.
"""
self._before_iteration()
batch = next(self._dataloader)
output_dict = self._do_iteration(batch)
self._after_iteration(output_dict)
self._iteration = iteration or self._iteration + 1
def _before_iteration(self):
r"""
Steps to do before doing the forward pass of iteration. Default behavior is to simply
call :meth:`zero_grad` for optimizer. Called inside :meth:`step`.
"""
self._optimizer.zero_grad()
def _do_iteration(self, batch: Dict[str, Any]) -> Dict[str, Any]:
r"""
Forward and backward passes on models, given a batch sampled from dataloader.
Parameters
----------
batch: Dict[str, Any]
A batch of training examples sampled from dataloader. See :func:`step` and
:meth:`_cycle` on how this batch is sampled.
Returns
-------
Dict[str, Any]
An output dictionary typically returned by the models. This would be passed to
:meth:`_after_iteration` for tensorboard logging.
"""
# What a single iteration usually would look like.
iteration_output_dict = self._models["model"](batch)
batch_loss = iteration_output_dict["loss"].mean()
batch_loss.backward()
return {"loss": batch_loss}
def _after_iteration(self, output_dict: Dict[str, Any]):
r"""
Steps to do after doing the forward pass of iteration. Default behavior is to simply
do gradient update through ``optimizer.step()``, and log metrics to tensorboard.
Parameters
----------
output_dict: Dict[str, Any]
This is exactly the object returned by :meth:_do_iteration`, which would contain all
the required losses for tensorboard logging.
"""
self._optimizer.step()
# keys: {"loss"} + ... {other keys such as "elbo"}
for key in output_dict:
if isinstance(output_dict[key], dict):
# Use ``add_scalars`` for dicts in a nested ``output_dict``.
self._tensorboard_writer.add_scalars(
f"train/{key}", output_dict[key], self._iteration
)
else:
# Use ``add_scalar`` for floats / zero-dim tensors in ``output_dict``.
self._tensorboard_writer.add_scalar(
f"train/{key}", output_dict[key], self._iteration
)
def after_validation(self, val_metrics: Dict[str, Any], iteration: Optional[int] = None):
r"""
Steps to do after an external :class:`~probnmn.evaluators._evaluator._Evaluator` performs
evaluation. This is not called by :meth:`step`, call it from outside at appropriate time.
Default behavior is to perform learning rate scheduling, serializaing checkpoint and to
log validation metrics to tensorboard.
Since this implementation assumes a key ``"metric"`` in ``val_metrics``, it is convenient
to set this key while overriding this method, when there are multiple models and multiple
metrics and there is one metric which decides best checkpoint.
Parameters
----------
val_metrics: Dict[str, Any]
Validation metrics for all the models. Returned by ``evaluate`` method of
:class:`~probnmn.evaluators._evaluator._Evaluator` (or its extended class).
iteration: int, optional (default = None)
Iteration number. If ``None``, use the internal :attr:`self._iteration` counter.
"""
if iteration is not None:
self._iteration = iteration
# Serialize model and optimizer and keep track of best checkpoint.
self._checkpoint_manager.step(self._iteration, val_metrics["metric"])
# Perform learning rate scheduling based on validation perplexity.
self._lr_scheduler.step(val_metrics["metric"])
# Log learning rate after scheduling.
self._tensorboard_writer.add_scalar(
"train/lr", self._optimizer.param_groups[0]["lr"], self._iteration
)
# Log all validation metrics to tensorboard (pop the "metric" key, which was only relevant
# to learning rate scheduling and checkpointing).
val_metrics.pop("metric")
for model_name in val_metrics:
for metric_name in val_metrics[model_name]:
self._tensorboard_writer.add_scalar(
f"val/metrics/{model_name}/{metric_name}",
val_metrics[model_name][metric_name],
self._iteration,
)
def load_checkpoint(self, checkpoint_path: str, iteration: Optional[int] = None):
r"""
Load a checkpoint to continue training from. The iteration when this checkpoint was
serialized, is inferred from its name (so do not rename after serialization).
Parameters
----------
checkpoint_path: str
Path to a checkpoint containing models and optimizers of the phase which is being
trained on.
iteration: int, optional (default = None)
Iteration number. If ``None``, get it from the checkpoint.
"""
_iteration = self._checkpoint_manager.load(checkpoint_path)
# By default, the provided iteration overrides what is found in checkpoint.
iteration = iteration or _iteration
self._iteration = iteration
def _cycle(self, dataloader: DataLoader) -> Generator[Dict[str, torch.Tensor], None, None]:
r"""
A generator which yields a random batch from dataloader perpetually. This generator is
used in the constructor.
Extended Summary
----------------
This is done so because we train for a fixed number of iterations, and do not have the
notion of 'epochs'. Using ``itertools.cycle`` with dataloader is harmful and may cause
unexpeced memory leaks.
"""
while True:
for batch in dataloader:
for key in batch:
batch[key] = batch[key].to(self._device)
yield batch
@property
def iteration(self):
return self._iteration
@property
def models(self):
return self._models
| 43.297297 | 98 | 0.650983 | 12,543 | 0.978699 | 697 | 0.054385 | 124 | 0.009675 | 0 | 0 | 8,287 | 0.646614 |
2c2cbc36c0799379271e1626bb734e2b62052e7b | 3,334 | py | Python | testplan/common/utils/exceptions.py | dobragab/testplan | 407ac1dfd33d19753e41235a1f576aeb06118840 | [
"Apache-2.0"
] | null | null | null | testplan/common/utils/exceptions.py | dobragab/testplan | 407ac1dfd33d19753e41235a1f576aeb06118840 | [
"Apache-2.0"
] | 64 | 2019-04-15T20:56:40.000Z | 2021-03-23T01:00:30.000Z | testplan/common/utils/exceptions.py | dobragab/testplan | 407ac1dfd33d19753e41235a1f576aeb06118840 | [
"Apache-2.0"
] | 1 | 2019-09-11T09:13:18.000Z | 2019-09-11T09:13:18.000Z | """Utilities for exception handling."""
import logging
import functools
import inspect
LOGGER = logging.getLogger(__name__)
def should_raise(exception, item, args=None, kwargs=None, pattern=None):
"""
"Utility that validates callable should raise specific exception.
:param exception: Exception should be raised.
:type exception: ``Exception``
:param item: Callable that should raise.
:type item: ``callable``
:param args: Callable args.
:type args: ``tuple``
:param kwargs: Callable kwargs.
:type kwargs: ``dict``
:param pattern: Compiled regex pattern that needs to match the exception.
:type pattern: Compiled ``regex``
"""
try:
item(*args or tuple(), **kwargs or {})
except Exception as exc:
assert isinstance(exc, exception)
if pattern:
if not pattern.match(str(exc)):
raise Exception('Exception msg incorrect - {}'.format(exc))
else:
raise Exception('Should raise {} exception.'.format(exception))
def suppress_exception(logger=LOGGER):
"""
Suppress & log exceptions for the given function.
This is mostly used during exporters steps, as we would like to
return the original retcode from testplan run, without raising any
non-test-related errors.
"""
def _wrapper(func):
@functools.wraps(func)
def _inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
logger.exception(exc)
return _inner
return _wrapper
def _safe_repr(obj):
"""
Exception safe repr()
"""
try:
return repr(obj)
except Exception:
return '<?>'
def _format_args(args):
"""
Format function arguments for a stack trace
"""
def _format_line(line):
"""
Format a single line
"""
if len(line) > 120:
return ' {}...'.format(line[:113])
else:
return ' {}'.format(line)
rargs = [_safe_repr(arg) for arg in args]
rargs_size = sum(len(rarg) for rarg in rargs)
if rargs_size > 80:
return '(\n{})'.format(',\n'.join(
[_format_line(rarg) for rarg in rargs]))
else:
return '({})'.format(', '.join(rargs))
def format_trace(trace, exception=None):
"""
Return a string containing a stack trace, including an attempt
at displaying argument values, within a certain size.
Example::
>>> format_trace(inspect.trace(), exception)
"""
output = ['Traceback (most recent call last):\n']
for frame, fpath, line, parent, code, _ in trace:
fmtted_args = ''
try:
args = inspect.getargvalues(frame)
args = [args.locals[name] for name in args.args]
fmtted_args = _format_args(args)
except Exception:
pass
finally:
del frame
output.append(
' File "{}", line {}, in {}{}\n'.format(
fpath, line, parent, fmtted_args))
for line in code or []:
output.append(' {}\n'.format(line.strip()))
if exception is not None:
output.append('{}: {}'.format(
getattr(type(exception), '__name__', '<Unknown>'), exception))
return ''.join(output)
| 28.741379 | 77 | 0.586383 | 0 | 0 | 0 | 0 | 196 | 0.058788 | 0 | 0 | 1,321 | 0.396221 |
2c2e48e936899f7a8f86e2d400517f71ba883959 | 941 | py | Python | models.py | jarbaugh5/laundry_stats | 39e79854728e1c8012b2876bb3e3f93f48b42e04 | [
"Apache-2.0"
] | null | null | null | models.py | jarbaugh5/laundry_stats | 39e79854728e1c8012b2876bb3e3f93f48b42e04 | [
"Apache-2.0"
] | null | null | null | models.py | jarbaugh5/laundry_stats | 39e79854728e1c8012b2876bb3e3f93f48b42e04 | [
"Apache-2.0"
] | null | null | null | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.types import DateTime
Base = declarative_base()
class Building(Base):
__tablename__ = "buildings"
id = Column(Integer, primary_key=True)
name = Column(String)
class MachineType(Base):
__tablename__ = "machinetypes"
id = Column(Integer, primary_key=True)
name = Column(String)
class Machine(Base):
__tablename__ = "machines"
id = Column(Integer, primary_key=True)
tufts_id = Column(Integer)
room = Column(Integer, ForeignKey("buildings.id"))
type = Column(Integer, ForeignKey("machinetypes.id"))
class UsageRecord(Base):
__tablename__ = "usagerecords"
id = Column(Integer, primary_key=True)
machine = Column(Integer, ForeignKey("machines.id"))
available = Column(Integer)
time_remaining = Column(Integer)
timestamp = Column(DateTime) | 26.885714 | 58 | 0.724761 | 753 | 0.800213 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.098831 |
2c2e65f16dfdb4e288612747e4ae2bfab58d0f33 | 1,093 | py | Python | 35_search_insert_position.py | sreedharg/leetcode | 28c162c0f6923e4feaa0d6279c00dfe8237c726d | [
"MIT"
] | null | null | null | 35_search_insert_position.py | sreedharg/leetcode | 28c162c0f6923e4feaa0d6279c00dfe8237c726d | [
"MIT"
] | null | null | null | 35_search_insert_position.py | sreedharg/leetcode | 28c162c0f6923e4feaa0d6279c00dfe8237c726d | [
"MIT"
] | null | null | null | #Given a sorted array and a target value, return the index if the target is found. If not, return the index
#where it would be if it were inserted in order.
#You may assume no duplicates in the array.
class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
self._nums = nums
self._target = target
if len(nums) == 0:
return 0
return self.findPos(0, len(nums) - 1)
def findPos(self, low, high):
#print(low, high, nums[low:high + 1])
if low >= high:
if self._target <= self._nums[low]:
return low
else:
return low + 1
mid = (low + high) // 2
if self._target == self._nums[mid]:
return mid
elif self._target < self._nums[mid]:
return self.findPos(low, mid - 1)
else:
return self.findPos(mid + 1, high)
sol = Solution()
res = sol.searchInsert([1,3], 0)
print(res) | 28.025641 | 108 | 0.52882 | 799 | 0.731016 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.299177 |
2c2e6d621114acc6ba86ff17d3529ddf9840b59d | 104,760 | py | Python | Examples/ApiExamples/ex_document.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 3 | 2021-12-04T22:17:28.000Z | 2022-02-22T03:30:01.000Z | Examples/ApiExamples/ex_document.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 4 | 2021-11-26T10:01:06.000Z | 2021-12-14T15:01:11.000Z | Examples/ApiExamples/ex_document.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 2 | 2021-10-20T18:06:22.000Z | 2021-10-29T20:59:18.000Z | # Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved.
#
# This file is part of Aspose.Words. The source code in this file
# is only intended as a supplement to the documentation, and is provided
# "as is", without warranty of any kind, either expressed or implied.
import unittest
import io
import os
import glob
from urllib.request import urlopen
from datetime import datetime, timedelta, timezone
import aspose.words as aw
import aspose.pydrawing as drawing
from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR, GOLDS_DIR
from document_helper import DocumentHelper
class ExDocument(ApiExampleBase):
def test_constructor(self):
#ExStart
#ExFor:Document.__init__()
#ExFor:Document.__init__(str,LoadOptions)
#ExSummary:Shows how to create and load documents.
# There are two ways of creating a Document object using Aspose.Words.
# 1 - Create a blank document:
doc = aw.Document()
# New Document objects by default come with the minimal set of nodes
# required to begin adding content such as text and shapes: a Section, a Body, and a Paragraph.
doc.first_section.body.first_paragraph.append_child(aw.Run(doc, "Hello world!"))
# 2 - Load a document that exists in the local file system:
doc = aw.Document(MY_DIR + "Document.docx")
# Loaded documents will have contents that we can access and edit.
self.assertEqual("Hello World!", doc.first_section.body.first_paragraph.get_text().strip())
# Some operations that need to occur during loading, such as using a password to decrypt a document,
# can be done by passing a LoadOptions object when loading the document.
doc = aw.Document(MY_DIR + "Encrypted.docx", aw.loading.LoadOptions("docPassword"))
self.assertEqual("Test encrypted document.", doc.first_section.body.first_paragraph.get_text().strip())
#ExEnd
def test_load_from_stream(self):
#ExStart
#ExFor:Document.__init__(BytesIO)
#ExSummary:Shows how to load a document using a stream.
with open(MY_DIR + "Document.docx", "rb") as stream:
doc = aw.Document(stream)
self.assertEqual("Hello World!\r\rHello Word!\r\r\rHello World!", doc.get_text().strip())
#ExEnd
def test_load_from_web(self):
#ExStart
#ExFor:Document.__init__(BytesIO)
#ExSummary:Shows how to load a document from a URL.
# Create a URL that points to a Microsoft Word document.
url = "https://omextemplates.content.office.net/support/templates/en-us/tf16402488.dotx"
# Download the document into a byte array, then load that array into a document using a memory stream.
data_bytes = urlopen(url).read()
with io.BytesIO(data_bytes) as byte_stream:
doc = aw.Document(byte_stream)
# At this stage, we can read and edit the document's contents and then save it to the local file system.
self.assertEqual("Use this section to highlight your relevant passions, activities, and how you like to give back. " +
"It’s good to include Leadership and volunteer experiences here. " +
"Or show off important extras like publications, certifications, languages and more.",
doc.first_section.body.paragraphs[4].get_text().strip())
doc.save(ARTIFACTS_DIR + "Document.load_from_web.docx")
#ExEnd
self.verify_web_response_status_code(200, url)
def test_convert_to_pdf(self):
#ExStart
#ExFor:Document.__init__(str)
#ExFor:Document.save(str)
#ExSummary:Shows how to open a document and convert it to .PDF.
doc = aw.Document(MY_DIR + "Document.docx")
doc.save(ARTIFACTS_DIR + "Document.convert_to_pdf.pdf")
#ExEnd
def test_save_to_image_stream(self):
#ExStart
#ExFor:Document.save(BytesIO,SaveFormat)
#ExSummary:Shows how to save a document to an image via stream, and then read the image from that stream.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.font.name = "Times New Roman"
builder.font.size = 24
builder.writeln("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
builder.insert_image(IMAGE_DIR + "Logo.jpg")
with io.BytesIO() as stream:
doc.save(stream, aw.SaveFormat.BMP)
stream.seek(0, os.SEEK_SET)
# Read the stream back into an image.
with drawing.Image.from_stream(stream) as image:
self.assertEqual(drawing.imaging.ImageFormat.bmp, image.raw_format)
self.assertEqual(816, image.width)
self.assertEqual(1056, image.height)
#ExEnd
#def test_open_type(self):
# #ExStart
# #ExFor:LayoutOptions.text_shaper_factory
# #ExSummary:Shows how to support OpenType features using the HarfBuzz text shaping engine.
# doc = aw.Document(MY_DIR + "OpenType text shaping.docx")
# # Aspose.Words can use externally provided text shaper objects,
# # which represent fonts and compute shaping information for text.
# # A text shaper factory is necessary for documents that use multiple fonts.
# # When the text shaper factory set, the layout uses OpenType features.
# # An Instance property returns a static BasicTextShaperCache object wrapping HarfBuzzTextShaperFactory.
# doc.layout_options.text_shaper_factory = aw.shaping.harfbuzz.HarfBuzzTextShaperFactory.instance
# # Currently, text shaping is performing when exporting to PDF or XPS formats.
# doc.save(ARTIFACTS_DIR + "Document.open_type.pdf")
# #ExEnd
def test_detect_pdf_document_format(self):
info = aw.FileFormatUtil.detect_file_format(MY_DIR + "Pdf Document.pdf")
self.assertEqual(info.load_format, aw.LoadFormat.PDF)
def test_open_pdf_document(self):
doc = aw.Document(MY_DIR + "Pdf Document.pdf")
self.assertEqual(
"Heading 1\rHeading 1.1.1.1 Heading 1.1.1.2\rHeading 1.1.1.1.1.1.1.1.1 Heading 1.1.1.1.1.1.1.1.2\u000c",
doc.range.text)
def test_open_protected_pdf_document(self):
doc = aw.Document(MY_DIR + "Pdf Document.pdf")
save_options = aw.saving.PdfSaveOptions()
save_options.encryption_details = aw.saving.PdfEncryptionDetails("Aspose", None)
doc.save(ARTIFACTS_DIR + "Document.open_protected_pdf_document.pdf", save_options)
load_options = aw.loading.PdfLoadOptions()
load_options.password = "Aspose"
load_options.load_format = aw.LoadFormat.PDF
doc = aw.Document(ARTIFACTS_DIR + "Document.open_protected_pdf_document.pdf", load_options)
def test_open_from_stream_with_base_uri(self):
#ExStart
#ExFor:Document.__init__(BytesIO,LoadOptions)
#ExFor:LoadOptions.__init__()
#ExFor:LoadOptions.base_uri
#ExSummary:Shows how to open an HTML document with images from a stream using a base URI.
with open(MY_DIR + "Document.html", "rb") as stream:
# Pass the URI of the base folder while loading it
# so that any images with relative URIs in the HTML document can be found.
load_options = aw.loading.LoadOptions()
load_options.base_uri = IMAGE_DIR
doc = aw.Document(stream, load_options)
# Verify that the first shape of the document contains a valid image.
shape = doc.get_child(aw.NodeType.SHAPE, 0, True).as_shape()
self.assertTrue(shape.is_image)
self.assertIsNotNone(shape.image_data.image_bytes)
self.assertAlmostEqual(32.0, aw.ConvertUtil.point_to_pixel(shape.width), delta=0.01)
self.assertAlmostEqual(32.0, aw.ConvertUtil.point_to_pixel(shape.height), delta=0.01)
#ExEnd
@unittest.skip("Need to rework.")
def test_insert_html_from_web_page(self):
#ExStart
#ExFor:Document.__init__(BytesIO,LoadOptions)
#ExFor:LoadOptions.__init__(LoadFormat,str,str)
#ExFor:LoadFormat
#ExSummary:Shows how save a web page as a .docx file.
url = "http://www.aspose.com/"
client = WebClient()
with io.BytesIO(client.download_data(url)) as stream:
# The URL is used again as a "base_uri" to ensure that any relative image paths are retrieved correctly.
options = aw.loading.LoadOptions(aw.LoadFormat.HTML, "", url)
# Load the HTML document from stream and pass the LoadOptions object.
doc = aw.Document(stream, options)
# At this stage, we can read and edit the document's contents and then save it to the local file system.
self.assertEqual("File Format APIs", doc.first_section.body.paragraphs[1].runs[0].get_text().strip()) #ExSkip
doc.save(ARTIFACTS_DIR + "Document.insert_html_from_web_page.docx")
#ExEnd
self.verify_web_response_status_code(200, url)
def test_load_encrypted(self):
#ExStart
#ExFor:Document.__init__(BytesIO,LoadOptions)
#ExFor:Document.__init__(str,LoadOptions)
#ExFor:LoadOptions
#ExFor:LoadOptions.__init__(str)
#ExSummary:Shows how to load an encrypted Microsoft Word document.
# Aspose.Words throw an exception if we try to open an encrypted document without its password.
with self.assertRaises(Exception):
doc = aw.Document(MY_DIR + "Encrypted.docx")
# When loading such a document, the password is passed to the document's constructor using a LoadOptions object.
options = aw.loading.LoadOptions("docPassword")
# There are two ways of loading an encrypted document with a LoadOptions object.
# 1 - Load the document from the local file system by filename:
doc = aw.Document(MY_DIR + "Encrypted.docx", options)
self.assertEqual("Test encrypted document.", doc.get_text().strip()) #ExSkip
# 2 - Load the document from a stream:
with open(MY_DIR + "Encrypted.docx", "rb") as stream:
doc = aw.Document(stream, options)
self.assertEqual("Test encrypted document.", doc.get_text().strip()) #ExSkip
#ExEnd
def test_temp_folder(self):
#ExStart
#ExFor:LoadOptions.temp_folder
#ExSummary:Shows how to load a document using temporary files.
# Note that such an approach can reduce memory usage but degrades speed
load_options = aw.loading.LoadOptions()
load_options.temp_folder = "C:\\TempFolder\\"
# Ensure that the directory exists and load
os.makedirs(load_options.temp_folder, exist_ok=True)
doc = aw.Document(MY_DIR + "Document.docx", load_options)
#ExEnd
def test_convert_to_html(self):
#ExStart
#ExFor:Document.save(str,SaveFormat)
#ExFor:SaveFormat
#ExSummary:Shows how to convert from DOCX to HTML format.
doc = aw.Document(MY_DIR + "Document.docx")
doc.save(ARTIFACTS_DIR + "Document.convert_to_html.html", aw.SaveFormat.HTML)
#ExEnd
def test_convert_to_mhtml(self):
doc = aw.Document(MY_DIR + "Document.docx")
doc.save(ARTIFACTS_DIR + "Document.convert_to_mhtml.mht")
def test_convert_to_txt(self):
doc = aw.Document(MY_DIR + "Document.docx")
doc.save(ARTIFACTS_DIR + "Document.convert_to_txt.txt")
def test_convert_to_epub(self):
doc = aw.Document(MY_DIR + "Rendering.docx")
doc.save(ARTIFACTS_DIR + "Document.convert_to_epub.epub")
def test_save_to_stream(self):
#ExStart
#ExFor:Document.save(BytesIO,SaveFormat)
#ExSummary:Shows how to save a document to a stream.
doc = aw.Document(MY_DIR + "Document.docx")
with io.BytesIO() as dst_stream:
doc.save(dst_stream, aw.SaveFormat.DOCX)
# Verify that the stream contains the document.
self.assertEqual("Hello World!\r\rHello Word!\r\r\rHello World!", aw.Document(dst_stream).get_text().strip())
#ExEnd
##ExStart
##ExFor:INodeChangingCallback
##ExFor:INodeChangingCallback.node_inserting
##ExFor:INodeChangingCallback.node_inserted
##ExFor:INodeChangingCallback.node_removing
##ExFor:INodeChangingCallback.node_removed
##ExFor:NodeChangingArgs
##ExFor:NodeChangingArgs.node
##ExFor:DocumentBase.node_changing_callback
##ExSummary:Shows how customize node changing with a callback.
#def test_font_change_via_callback(self):
# doc = aw.Document()
# builder = aw.DocumentBuilder(doc)
# # Set the node changing callback to custom implementation,
# # then add/remove nodes to get it to generate a log.
# callback = ExDocument.HandleNodeChangingFontChanger()
# doc.node_changing_callback = callback
# builder.writeln("Hello world!")
# builder.writeln("Hello again!")
# builder.insert_field(" HYPERLINK \"https://www.google.com/\" ")
# builder.insert_shape(aw.drawing.ShapeType.RECTANGLE, 300, 300)
# doc.range.fields[0].remove()
# print(callback.get_log())
# self._test_font_change_via_callback(callback.get_log()) #ExSkip
#class HandleNodeChangingFontChanger(aw.INodeChangingCallback):
# """Logs the date and time of each node insertion and removal.
# Sets a custom font name/size for the text contents of Run nodes."""
# def __init__(self):
# self.log = io.StringIO()
# def node_inserted(self, args: aw.NodeChangingArgs):
# self.log.write(f"\tType:\t{args.node.node_type}\n")
# self.log.write(f"\tHash:\t{args.node.get_hash_code()}\n")
# if args.node.node_type == aw.NodeType.RUN:
# font = args.node.as_run().font
# self.log.write(f"\tFont:\tChanged from \"{font.Name}\" {font.Size}pt")
# font.size = 24
# font.name = "Arial"
# self.log.write(f" to \"{font.Name}\" {font.Size}pt\n")
# self.log.write(f"\tContents:\n\t\t\"{args.node.get_text()}\"\n")
# def node_inserting(self, args: aw.NodeChangingArgs):
# self.log.write(f"\n{datetime.now().strftime('%d/%m/%Y %H:%M:%S')}\tNode insertion:\n")
# def node_removed(self, args: aw.NodeChangingArgs):
# self.log.write(f"\tType:\t{args.node.node_type}\n")
# self.log.write(f"\tHash code:\t{hash(args.node)}\n")
# def node_removing(self, args: aw.NodeChangingArgs):
# self.log.write(f"\n{datetime.now().strftime('%d/%m/%Y %H:%M:%S')}\tNode removal:\n")
# def get_log(self) -> str:
# return self.log.getvalue()
##ExEnd
#def _test_font_change_via_callback(self, log: str):
# self.assertEqual(10, log.count("insertion"))
# self.assertEqual(5, log.count("removal"))
def test_append_document(self):
#ExStart
#ExFor:Document.append_document(Document,ImportFormatMode)
#ExSummary:Shows how to append a document to the end of another document.
src_doc = aw.Document()
src_doc.first_section.body.append_paragraph("Source document text. ")
dst_doc = aw.Document()
dst_doc.first_section.body.append_paragraph("Destination document text. ")
# Append the source document to the destination document while preserving its formatting,
# then save the source document to the local file system.
dst_doc.append_document(src_doc, aw.ImportFormatMode.KEEP_SOURCE_FORMATTING)
self.assertEqual(2, dst_doc.sections.count) #ExSkip
dst_doc.save(ARTIFACTS_DIR + "Document.append_document.docx")
#ExEnd
out_doc_text = aw.Document(ARTIFACTS_DIR + "Document.append_document.docx").get_text()
self.assertTrue(out_doc_text.startswith(dst_doc.get_text()))
self.assertTrue(out_doc_text.endswith(src_doc.get_text()))
# The file path used below does not point to an existing file.
def test_append_document_from_automation(self):
doc = aw.Document()
# We should call this method to clear this document of any existing content.
doc.remove_all_children()
record_count = 5
for i in range(1, record_count + 1):
src_doc = aw.Document()
with self.assertRaises(Exception):
src_doc = aw.Document("C:\\DetailsList.doc")
# Append the source document at the end of the destination document.
doc.append_document(src_doc, aw.ImportFormatMode.USE_DESTINATION_STYLES)
# Automation required you to insert a new section break at this point, however, in Aspose.Words we
# do not need to do anything here as the appended document is imported as separate sections already
# Unlink all headers/footers in this section from the previous section headers/footers
# if this is the second document or above being appended.
if i > 1:
with self.assertRaises(Exception):
doc.sections[i].headers_footers.link_to_previous(False)
def test_import_list(self):
for is_keep_source_numbering in (True, False):
with self.subTest(is_keep_source_numbering=is_keep_source_numbering):
#ExStart
#ExFor:ImportFormatOptions.keep_source_numbering
#ExSummary:Shows how to import a document with numbered lists.
src_doc = aw.Document(MY_DIR + "List source.docx")
dst_doc = aw.Document(MY_DIR + "List destination.docx")
self.assertEqual(4, dst_doc.lists.count)
options = aw.ImportFormatOptions()
# If there is a clash of list styles, apply the list format of the source document.
# Set the "keep_source_numbering" property to "False" to not import any list numbers into the destination document.
# Set the "keep_source_numbering" property to "True" import all clashing
# list style numbering with the same appearance that it had in the source document.
options.keep_source_numbering = is_keep_source_numbering
dst_doc.append_document(src_doc, aw.ImportFormatMode.KEEP_SOURCE_FORMATTING, options)
dst_doc.update_list_labels()
if is_keep_source_numbering:
self.assertEqual(5, dst_doc.lists.count)
else:
self.assertEqual(4, dst_doc.lists.count)
#ExEnd
def test_keep_source_numbering_same_list_ids(self):
#ExStart
#ExFor:ImportFormatOptions.keep_source_numbering
#ExFor:NodeImporter.__init__(DocumentBase,DocumentBase,ImportFormatMode,ImportFormatOptions)
#ExSummary:Shows how resolve a clash when importing documents that have lists with the same list definition identifier.
src_doc = aw.Document(MY_DIR + "List with the same definition identifier - source.docx")
dst_doc = aw.Document(MY_DIR + "List with the same definition identifier - destination.docx")
# Set the "keep_source_numbering" property to "True" to apply a different list definition ID
# to identical styles as Aspose.Words imports them into destination documents.
import_format_options = aw.ImportFormatOptions()
import_format_options.keep_source_numbering = True
dst_doc.append_document(src_doc, aw.ImportFormatMode.USE_DESTINATION_STYLES, import_format_options)
dst_doc.update_list_labels()
#ExEnd
para_text = dst_doc.sections[1].body.last_paragraph.get_text()
self.assertTrue(para_text.startswith("13->13"))
self.assertEqual("1.", dst_doc.sections[1].body.last_paragraph.list_label.label_string)
def test_merge_pasted_lists(self):
#ExStart
#ExFor:ImportFormatOptions.merge_pasted_lists
#ExSummary:Shows how to merge lists from a documents.
src_doc = aw.Document(MY_DIR + "List item.docx")
dst_doc = aw.Document(MY_DIR + "List destination.docx")
options = aw.ImportFormatOptions()
options.merge_pasted_lists = True
# Set the "merge_pasted_lists" property to "True" pasted lists will be merged with surrounding lists.
dst_doc.append_document(src_doc, aw.ImportFormatMode.USE_DESTINATION_STYLES, options)
dst_doc.save(ARTIFACTS_DIR + "Document.merge_pasted_lists.docx")
#ExEnd
def test_validate_individual_document_signatures(self):
#ExStart
#ExFor:CertificateHolder.certificate
#ExFor:Document.digital_signatures
#ExFor:DigitalSignature
#ExFor:DigitalSignatureCollection
#ExFor:DigitalSignature.is_valid
#ExFor:DigitalSignature.comments
#ExFor:DigitalSignature.sign_time
#ExFor:DigitalSignature.signature_type
#ExSummary:Shows how to validate and display information about each signature in a document.
doc = aw.Document(MY_DIR + "Digitally signed.docx")
for signature in doc.digital_signatures:
print(f"\n{'Valid' if signature.is_valid else 'Invalid'} signature: ")
print(f"\tReason:\t{signature.comments}")
print(f"\tType:\t{signature.signature_type}")
print(f"\tSign time:\t{signature.sign_time}")
# System.Security.Cryptography.X509Certificates.X509Certificate2 is not supported. That is why the following information is not accesible.
#print(f"\tSubject name:\t{signature.certificate_holder.certificate.subject_name}")
#print(f"\tIssuer name:\t{signature.certificate_holder.certificate.issuer_name.name}")
print()
#ExEnd
self.assertEqual(1, doc.digital_signatures.count)
digital_sig = doc.digital_signatures[0]
self.assertTrue(digital_sig.is_valid)
self.assertEqual("Test Sign", digital_sig.comments)
self.assertEqual(aw.digitalsignatures.DigitalSignatureType.XML_DSIG, digital_sig.signature_type)
# System.Security.Cryptography.X509Certificates.X509Certificate2 is not supported. That is why the following information is not accesible.
#self.assertTrue(digital_sig.certificate_holder.certificate.subject.contains("Aspose Pty Ltd"))
#self.assertIsNotNone(digital_sig.certificate_holder.certificate.issuer_name.name is not None)
#self.assertIn("VeriSign", digital_sig.certificate_holder.certificate.issuer_name.name)
def test_digital_signature(self):
#ExStart
#ExFor:DigitalSignature.certificate_holder
#ExFor:DigitalSignature.issuer_name
#ExFor:DigitalSignature.subject_name
#ExFor:DigitalSignatureCollection
#ExFor:DigitalSignatureCollection.is_valid
#ExFor:DigitalSignatureCollection.count
#ExFor:DigitalSignatureCollection.__getitem__(int)
#ExFor:DigitalSignatureUtil.sign(BytesIO,BytesIO,CertificateHolder)
#ExFor:DigitalSignatureUtil.sign(str,str,CertificateHolder)
#ExFor:DigitalSignatureType
#ExFor:Document.digital_signatures
#ExSummary:Shows how to sign documents with X.509 certificates.
# Verify that a document is not signed.
self.assertFalse(aw.FileFormatUtil.detect_file_format(MY_DIR + "Document.docx").has_digital_signature)
# Create a CertificateHolder object from a PKCS12 file, which we will use to sign the document.
certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + "morzal.pfx", "aw", None)
# There are two ways of saving a signed copy of a document to the local file system:
# 1 - Designate a document by a local system filename and save a signed copy at a location specified by another filename.
sign_options = aw.digitalsignatures.SignOptions()
sign_options.sign_time = datetime.utcnow()
aw.digitalsignatures.DigitalSignatureUtil.sign(
MY_DIR + "Document.docx", ARTIFACTS_DIR + "Document.digital_signature.docx",
certificate_holder, sign_options)
self.assertTrue(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + "Document.digital_signature.docx").has_digital_signature)
# 2 - Take a document from a stream and save a signed copy to another stream.
with open(MY_DIR + "Document.docx", "rb") as in_doc:
with open(ARTIFACTS_DIR + "Document.digital_signature.docx", "wb") as out_doc:
aw.digitalsignatures.DigitalSignatureUtil.sign(in_doc, out_doc, certificate_holder)
self.assertTrue(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + "Document.digital_signature.docx").has_digital_signature)
# Please verify that all of the document's digital signatures are valid and check their details.
signed_doc = aw.Document(ARTIFACTS_DIR + "Document.digital_signature.docx")
digital_signature_collection = signed_doc.digital_signatures
self.assertTrue(digital_signature_collection.is_valid)
self.assertEqual(1, digital_signature_collection.count)
self.assertEqual(aw.digitalsignatures.DigitalSignatureType.XML_DSIG, digital_signature_collection[0].signature_type)
self.assertEqual("CN=Morzal.Me", signed_doc.digital_signatures[0].issuer_name)
self.assertEqual("CN=Morzal.Me", signed_doc.digital_signatures[0].subject_name)
#ExEnd
def test_append_all_documents_in_folder(self):
#ExStart
#ExFor:Document.append_document(Document,ImportFormatMode)
#ExSummary:Shows how to append all the documents in a folder to the end of a template document.
dst_doc = aw.Document()
builder = aw.DocumentBuilder(dst_doc)
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1
builder.writeln("Template Document")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL
builder.writeln("Some content here")
self.assertEqual(5, dst_doc.styles.count) #ExSkip
self.assertEqual(1, dst_doc.sections.count) #ExSkip
# Append all unencrypted documents with the .doc extension
# from our local file system directory to the base document.
doc_files = glob.glob(MY_DIR + "*.doc")
for file_name in doc_files:
info = aw.FileFormatUtil.detect_file_format(file_name)
if info.is_encrypted:
continue
src_doc = aw.Document(file_name)
dst_doc.append_document(src_doc, aw.ImportFormatMode.USE_DESTINATION_STYLES)
dst_doc.save(ARTIFACTS_DIR + "Document.append_all_documents_in_folder.doc")
#ExEnd
self.assertEqual(7, dst_doc.styles.count)
self.assertEqual(9, dst_doc.sections.count)
def test_join_runs_with_same_formatting(self):
#ExStart
#ExFor:Document.join_runs_with_same_formatting
#ExSummary:Shows how to join runs in a document to reduce unneeded runs.
# Open a document that contains adjacent runs of text with identical formatting,
# which commonly occurs if we edit the same paragraph multiple times in Microsoft Word.
doc = aw.Document(MY_DIR + "Rendering.docx")
# If any number of these runs are adjacent with identical formatting,
# then the document may be simplified.
self.assertEqual(317, doc.get_child_nodes(aw.NodeType.RUN, True).count)
# Combine such runs with this method and verify the number of run joins that will take place.
self.assertEqual(121, doc.join_runs_with_same_formatting())
# The number of joins and the number of runs we have after the join
# should add up the number of runs we had initially.
self.assertEqual(196, doc.get_child_nodes(aw.NodeType.RUN, True).count)
#ExEnd
def test_default_tab_stop(self):
#ExStart
#ExFor:Document.default_tab_stop
#ExFor:ControlChar.TAB
#ExFor:ControlChar.TAB_CHAR
#ExSummary:Shows how to set a custom interval for tab stop positions.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Set tab stops to appear every 72 points (1 inch).
builder.document.default_tab_stop = 72
# Each tab character snaps the text after it to the next closest tab stop position.
builder.writeln("Hello" + aw.ControlChar.TAB + "World!")
#ExEnd
doc = DocumentHelper.save_open(doc)
self.assertEqual(72, doc.default_tab_stop)
def test_clone_document(self):
#ExStart
#ExFor:Document.clone()
#ExSummary:Shows how to deep clone a document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.write("Hello world!")
# Cloning will produce a new document with the same contents as the original,
# but with a unique copy of each of the original document's nodes.
clone = doc.clone()
self.assertEqual(doc.first_section.body.first_paragraph.runs[0].get_text(),
clone.first_section.body.first_paragraph.runs[0].text)
self.assertIsNot(doc.first_section.body.first_paragraph.runs[0], clone.first_section.body.first_paragraph.runs[0])
#ExEnd
def test_document_get_text_to_string(self):
#ExStart
#ExFor:CompositeNode.get_text
#ExFor:Node.to_string(SaveFormat)
#ExSummary:Shows the difference between calling the get_text and to_string methods on a node.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.insert_field("MERGEFIELD Field")
# get_text will retrieve the visible text as well as field codes and special characters.
self.assertEqual("\u0013MERGEFIELD Field\u0014«Field»\u0015\u000c", doc.get_text())
# to_string will give us the document's appearance if saved to a passed save format.
self.assertEqual("«Field»\r\n", doc.to_string(aw.SaveFormat.TEXT))
#ExEnd
def test_document_byte_array(self):
doc = aw.Document(MY_DIR + "Document.docx")
stream_out = io.BytesIO()
doc.save(stream_out, aw.SaveFormat.DOCX)
doc_bytes = stream_out.getvalue()
stream_in = io.BytesIO(doc_bytes)
load_doc = aw.Document(stream_in)
self.assertEqual(doc.get_text(), load_doc.get_text())
def test_protect_unprotect(self):
#ExStart
#ExFor:Document.protect(ProtectionType,str)
#ExFor:Document.protection_type
#ExFor:Document.unprotect()
#ExFor:Document.unprotect(str)
#ExSummary:Shows how to protect and unprotect a document.
doc = aw.Document()
doc.protect(aw.ProtectionType.READ_ONLY, "password")
self.assertEqual(aw.ProtectionType.READ_ONLY, doc.protection_type)
# If we open this document with Microsoft Word intending to edit it,
# we will need to apply the password to get through the protection.
doc.save(ARTIFACTS_DIR + "Document.protect_unprotect.docx")
# Note that the protection only applies to Microsoft Word users opening our document.
# We have not encrypted the document in any way, and we do not need the password to open and edit it programmatically.
protected_doc = aw.Document(ARTIFACTS_DIR + "Document.protect_unprotect.docx")
self.assertEqual(aw.ProtectionType.READ_ONLY, protected_doc.protection_type)
builder = aw.DocumentBuilder(protected_doc)
builder.writeln("Text added to a protected document.")
self.assertEqual("Text added to a protected document.", protected_doc.range.text.strip()) #ExSkip
# There are two ways of removing protection from a document.
# 1 - With no password:
doc.unprotect()
self.assertEqual(aw.ProtectionType.NO_PROTECTION, doc.protection_type)
doc.protect(aw.ProtectionType.READ_ONLY, "NewPassword")
self.assertEqual(aw.ProtectionType.READ_ONLY, doc.protection_type)
doc.unprotect("WrongPassword")
self.assertEqual(aw.ProtectionType.READ_ONLY, doc.protection_type)
# 2 - With the correct password:
doc.unprotect("NewPassword")
self.assertEqual(aw.ProtectionType.NO_PROTECTION, doc.protection_type)
#ExEnd
def test_document_ensure_minimum(self):
#ExStart
#ExFor:Document.ensure_minimum
#ExSummary:Shows how to ensure that a document contains the minimal set of nodes required for editing its contents.
# A newly created document contains one child Section, which includes one child Body and one child Paragraph.
# We can edit the document body's contents by adding nodes such as Runs or inline Shapes to that paragraph.
doc = aw.Document()
nodes = doc.get_child_nodes(aw.NodeType.ANY, True)
self.assertEqual(aw.NodeType.SECTION, nodes[0].node_type)
self.assertEqual(doc, nodes[0].parent_node)
self.assertEqual(aw.NodeType.BODY, nodes[1].node_type)
self.assertEqual(nodes[0], nodes[1].parent_node)
self.assertEqual(aw.NodeType.PARAGRAPH, nodes[2].node_type)
self.assertEqual(nodes[1], nodes[2].parent_node)
# This is the minimal set of nodes that we need to be able to edit the document.
# We will no longer be able to edit the document if we remove any of them.
doc.remove_all_children()
self.assertEqual(0, doc.get_child_nodes(aw.NodeType.ANY, True).count)
# Call this method to make sure that the document has at least those three nodes so we can edit it again.
doc.ensure_minimum()
self.assertEqual(aw.NodeType.SECTION, nodes[0].node_type)
self.assertEqual(aw.NodeType.BODY, nodes[1].node_type)
self.assertEqual(aw.NodeType.PARAGRAPH, nodes[2].node_type)
nodes[2].as_paragraph().runs.add(aw.Run(doc, "Hello world!"))
#ExEnd
self.assertEqual("Hello world!", doc.get_text().strip())
def test_remove_macros_from_document(self):
#ExStart
#ExFor:Document.remove_macros
#ExSummary:Shows how to remove all macros from a document.
doc = aw.Document(MY_DIR + "Macro.docm")
self.assertTrue(doc.has_macros)
self.assertEqual("Project", doc.vba_project.name)
# Remove the document's VBA project, along with all its macros.
doc.remove_macros()
self.assertFalse(doc.has_macros)
self.assertIsNone(doc.vba_project)
#ExEnd
def test_get_page_count(self):
#ExStart
#ExFor:Document.page_count
#ExSummary:Shows how to count the number of pages in the document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.write("Page 1")
builder.insert_break(aw.BreakType.PAGE_BREAK)
builder.write("Page 2")
builder.insert_break(aw.BreakType.PAGE_BREAK)
builder.write("Page 3")
# Verify the expected page count of the document.
self.assertEqual(3, doc.page_count)
# Getting the page_count property invoked the document's page layout to calculate the value.
# This operation will not need to be re-done when rendering the document to a fixed page save format,
# such as .pdf. So you can save some time, especially with more complex documents.
doc.save(ARTIFACTS_DIR + "Document.get_page_count.pdf")
#ExEnd
def test_get_updated_page_properties(self):
#ExStart
#ExFor:Document.update_word_count()
#ExFor:Document.update_word_count(bool)
#ExFor:BuiltInDocumentProperties.characters
#ExFor:BuiltInDocumentProperties.words
#ExFor:BuiltInDocumentProperties.paragraphs
#ExFor:BuiltInDocumentProperties.lines
#ExSummary:Shows how to update all list labels in a document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Lorem ipsum dolor sit amet, consectetur adipiscing elit, " +
"sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
builder.write("Ut enim ad minim veniam, " +
"quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.")
# Aspose.Words does not track document metrics like these in real time.
self.assertEqual(0, doc.built_in_document_properties.characters)
self.assertEqual(0, doc.built_in_document_properties.words)
self.assertEqual(1, doc.built_in_document_properties.paragraphs)
self.assertEqual(1, doc.built_in_document_properties.lines)
# To get accurate values for three of these properties, we will need to update them manually.
doc.update_word_count()
self.assertEqual(196, doc.built_in_document_properties.characters)
self.assertEqual(36, doc.built_in_document_properties.words)
self.assertEqual(2, doc.built_in_document_properties.paragraphs)
# For the line count, we will need to call a specific overload of the updating method.
self.assertEqual(1, doc.built_in_document_properties.lines)
doc.update_word_count(True)
self.assertEqual(4, doc.built_in_document_properties.lines)
#ExEnd
def test_table_style_to_direct_formatting(self):
#ExStart
#ExFor:CompositeNode.get_child
#ExFor:Document.expand_table_styles_to_direct_formatting
#ExSummary:Shows how to apply the properties of a table's style directly to the table's elements.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
table = builder.start_table()
builder.insert_cell()
builder.write("Hello world!")
builder.end_table()
table_style = doc.styles.add(aw.StyleType.TABLE, "MyTableStyle1").as_table_style()
table_style.row_stripe = 3
table_style.cell_spacing = 5
table_style.shading.background_pattern_color = drawing.Color.antique_white
table_style.borders.color = drawing.Color.blue
table_style.borders.line_style = aw.LineStyle.DOT_DASH
table.style = table_style
# This method concerns table style properties such as the ones we set above.
doc.expand_table_styles_to_direct_formatting()
doc.save(ARTIFACTS_DIR + "Document.table_style_to_direct_formatting.docx")
#ExEnd
self.verify_doc_package_file_contains_string("<w:tblStyleRowBandSize w:val=\"3\" />",
ARTIFACTS_DIR + "Document.table_style_to_direct_formatting.docx", "word/document.xml")
self.verify_doc_package_file_contains_string("<w:tblCellSpacing w:w=\"100\" w:type=\"dxa\" />",
ARTIFACTS_DIR + "Document.table_style_to_direct_formatting.docx", "word/document.xml")
self.verify_doc_package_file_contains_string("<w:tblBorders><w:top w:val=\"dotDash\" w:sz=\"2\" w:space=\"0\" w:color=\"0000FF\" /><w:left w:val=\"dotDash\" w:sz=\"2\" w:space=\"0\" w:color=\"0000FF\" /><w:bottom w:val=\"dotDash\" w:sz=\"2\" w:space=\"0\" w:color=\"0000FF\" /><w:right w:val=\"dotDash\" w:sz=\"2\" w:space=\"0\" w:color=\"0000FF\" /><w:insideH w:val=\"dotDash\" w:sz=\"2\" w:space=\"0\" w:color=\"0000FF\" /><w:insideV w:val=\"dotDash\" w:sz=\"2\" w:space=\"0\" w:color=\"0000FF\" /></w:tblBorders>",
ARTIFACTS_DIR + "Document.table_style_to_direct_formatting.docx", "word/document.xml")
def test_get_original_file_info(self):
#ExStart
#ExFor:Document.original_file_name
#ExFor:Document.original_load_format
#ExSummary:Shows how to retrieve details of a document's load operation.
doc = aw.Document(MY_DIR + "Document.docx")
self.assertEqual(MY_DIR + "Document.docx", doc.original_file_name)
self.assertEqual(aw.LoadFormat.DOCX, doc.original_load_format)
#ExEnd
# WORDSNET-16099
def test_footnote_columns(self):
#ExStart
#ExFor:FootnoteOptions
#ExFor:FootnoteOptions.columns
#ExSummary:Shows how to split the footnote section into a given number of columns.
doc = aw.Document(MY_DIR + "Footnotes and endnotes.docx")
self.assertEqual(0, doc.footnote_options.columns) #ExSkip
doc.footnote_options.columns = 2
doc.save(ARTIFACTS_DIR + "Document.footnote_columns.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "Document.footnote_columns.docx")
self.assertEqual(2, doc.first_section.page_setup.footnote_options.columns)
def test_compare(self):
#ExStart
#ExFor:Document.compare(Document,str,datetime)
#ExFor:RevisionCollection.accept_all
#ExSummary:Shows how to compare documents.
doc_original = aw.Document()
builder = aw.DocumentBuilder(doc_original)
builder.writeln("This is the original document.")
doc_edited = aw.Document()
builder = aw.DocumentBuilder(doc_edited)
builder.writeln("This is the edited document.")
# Comparing documents with revisions will throw an exception.
if doc_original.revisions.count == 0 and doc_edited.revisions.count == 0:
doc_original.compare(doc_edited, "authorName", datetime.now())
# After the comparison, the original document will gain a new revision
# for every element that is different in the edited document.
self.assertEqual(2, doc_original.revisions.count) #ExSkip
for revision in doc_original.revisions:
print(f"Revision type: {revision.revision_type}, on a node of type \"{revision.parent_node.node_type}\"")
print(f"\tChanged text: \"{revision.parent_node.get_text()}\"")
# Accepting these revisions will transform the original document into the edited document.
doc_original.revisions.accept_all()
self.assertEqual(doc_original.get_text(), doc_edited.get_text())
#ExEnd
doc_original = DocumentHelper.save_open(doc_original)
self.assertEqual(0, doc_original.revisions.count)
def test_compare_document_with_revisions(self):
doc1 = aw.Document()
builder = aw.DocumentBuilder(doc1)
builder.writeln("Hello world! This text is not a revision.")
doc_with_revision = aw.Document()
builder = aw.DocumentBuilder(doc_with_revision)
doc_with_revision.start_track_revisions("John Doe")
builder.writeln("This is a revision.")
with self.assertRaises(Exception):
doc_with_revision.compare(doc1, "John Doe", datetime.now())
def test_compare_options(self):
#ExStart
#ExFor:CompareOptions
#ExFor:CompareOptions.ignore_formatting
#ExFor:CompareOptions.ignore_case_changes
#ExFor:CompareOptions.ignore_comments
#ExFor:CompareOptions.ignore_tables
#ExFor:CompareOptions.ignore_fields
#ExFor:CompareOptions.ignore_footnotes
#ExFor:CompareOptions.ignore_textboxes
#ExFor:CompareOptions.ignore_headers_and_footers
#ExFor:CompareOptions.target
#ExFor:ComparisonTargetType
#ExFor:Document.compare(Document,str,datetime,CompareOptions)
#ExSummary:Shows how to filter specific types of document elements when making a comparison.
# Create the original document and populate it with various kinds of elements.
doc_original = aw.Document()
builder = aw.DocumentBuilder(doc_original)
# Paragraph text referenced with an endnote:
builder.writeln("Hello world! This is the first paragraph.")
builder.insert_footnote(aw.notes.FootnoteType.ENDNOTE, "Original endnote text.")
# Table:
builder.start_table()
builder.insert_cell()
builder.write("Original cell 1 text")
builder.insert_cell()
builder.write("Original cell 2 text")
builder.end_table()
# Textbox:
text_box = builder.insert_shape(aw.drawing.ShapeType.TEXT_BOX, 150, 20)
builder.move_to(text_box.first_paragraph)
builder.write("Original textbox contents")
# DATE field:
builder.move_to(doc_original.first_section.body.append_paragraph(""))
builder.insert_field(" DATE ")
# Comment:
new_comment = aw.Comment(doc_original, "John Doe", "J.D.", datetime.now())
new_comment.set_text("Original comment.")
builder.current_paragraph.append_child(new_comment)
# Header:
builder.move_to_header_footer(aw.HeaderFooterType.HEADER_PRIMARY)
builder.writeln("Original header contents.")
# Create a clone of our document and perform a quick edit on each of the cloned document's elements.
doc_edited = doc_original.clone(True).as_document()
first_paragraph = doc_edited.first_section.body.first_paragraph
first_paragraph.runs[0].text = "hello world! this is the first paragraph, after editing."
first_paragraph.paragraph_format.style = doc_edited.styles[aw.StyleIdentifier.HEADING1]
doc_edited.get_child(aw.NodeType.FOOTNOTE, 0, True).as_footnote().first_paragraph.runs[1].text = "Edited endnote text."
doc_edited.get_child(aw.NodeType.TABLE, 0, True).as_table().first_row.cells[1].first_paragraph.runs[0].text = "Edited Cell 2 contents"
doc_edited.get_child(aw.NodeType.SHAPE, 0, True).as_shape().first_paragraph.runs[0].text = "Edited textbox contents"
doc_edited.range.fields[0].as_field_date().use_lunar_calendar = True
doc_edited.get_child(aw.NodeType.COMMENT, 0, True).as_comment().first_paragraph.runs[0].text = "Edited comment."
doc_edited.first_section.headers_footers[aw.HeaderFooterType.HEADER_PRIMARY].first_paragraph.runs[0].text = "Edited header contents."
# Comparing documents creates a revision for every edit in the edited document.
# A CompareOptions object has a series of flags that can suppress revisions
# on each respective type of element, effectively ignoring their change.
compare_options = aw.comparing.CompareOptions()
compare_options.ignore_formatting = False
compare_options.ignore_case_changes = False
compare_options.ignore_comments = False
compare_options.ignore_tables = False
compare_options.ignore_fields = False
compare_options.ignore_footnotes = False
compare_options.ignore_textboxes = False
compare_options.ignore_headers_and_footers = False
compare_options.target = aw.comparing.ComparisonTargetType.NEW
doc_original.compare(doc_edited, "John Doe", datetime.now(), compare_options)
doc_original.save(ARTIFACTS_DIR + "Document.compare_options.docx")
#ExEnd
doc_original = aw.Document(ARTIFACTS_DIR + "Document.compare_options.docx")
self.verify_footnote(aw.notes.FootnoteType.ENDNOTE, True, "",
"OriginalEdited endnote text.", doc_original.get_child(aw.NodeType.FOOTNOTE, 0, True).as_footnote())
def test_ignore_dml_unique_id(self):
for is_ignore_dml_unique_id in (False, True):
with self.subTest(is_ignore_dml_unique_id=is_ignore_dml_unique_id):
#ExStart
#ExFor:CompareOptions.ignore_dml_unique_id
#ExSummary:Shows how to compare documents ignoring DML unique ID.
doc_a = aw.Document(MY_DIR + "DML unique ID original.docx")
doc_b = aw.Document(MY_DIR + "DML unique ID compare.docx")
# By default, Aspose.Words do not ignore DML's unique ID, and the revisions count was 2.
# If we are ignoring DML's unique ID, and revisions count were 0.
compare_options = aw.comparing.CompareOptions()
compare_options.ignore_dml_unique_id = is_ignore_dml_unique_id
doc_a.compare(doc_b, "Aspose.Words", datetime.now(), compare_options)
self.assertEqual(0 if is_ignore_dml_unique_id else 2, doc_a.revisions.count)
#ExEnd
def test_remove_external_schema_references(self):
#ExStart
#ExFor:Document.remove_external_schema_references
#ExSummary:Shows how to remove all external XML schema references from a document.
doc = aw.Document(MY_DIR + "External XML schema.docx")
doc.remove_external_schema_references()
#ExEnd
def test_track_revisions(self):
#ExStart
#ExFor:Document.start_track_revisions(str)
#ExFor:Document.start_track_revisions(str,datetime)
#ExFor:Document.stop_track_revisions
#ExSummary:Shows how to track revisions while editing a document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Editing a document usually does not count as a revision until we begin tracking them.
builder.write("Hello world! ")
self.assertEqual(0, doc.revisions.count)
self.assertFalse(doc.first_section.body.paragraphs[0].runs[0].is_insert_revision)
doc.start_track_revisions("John Doe")
builder.write("Hello again! ")
self.assertEqual(1, doc.revisions.count)
self.assertTrue(doc.first_section.body.paragraphs[0].runs[1].is_insert_revision)
self.assertEqual("John Doe", doc.revisions[0].author)
self.assertAlmostEqual(doc.revisions[0].date_time, datetime.now(tz=timezone.utc), delta=timedelta(seconds=1))
# Stop tracking revisions to not count any future edits as revisions.
doc.stop_track_revisions()
builder.write("Hello again! ")
self.assertEqual(1, doc.revisions.count)
self.assertFalse(doc.first_section.body.paragraphs[0].runs[2].is_insert_revision)
# Creating revisions gives them a date and time of the operation.
# We can disable this by passing "datetime.min" when we start tracking revisions.
doc.start_track_revisions("John Doe", datetime.min)
builder.write("Hello again! ")
self.assertEqual(2, doc.revisions.count)
self.assertEqual("John Doe", doc.revisions[1].author)
self.assertEqual(datetime.min, doc.revisions[1].date_time)
# We can accept/reject these revisions programmatically
# by calling methods such as "Document.accept_all_revisions", or each revision's "accept" method.
# In Microsoft Word, we can process them manually via "Review" -> "Changes".
doc.save(ARTIFACTS_DIR + "Document.track_revisions.docx")
#ExEnd
def test_accept_all_revisions(self):
#ExStart
#ExFor:Document.accept_all_revisions
#ExSummary:Shows how to accept all tracking changes in the document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Edit the document while tracking changes to create a few revisions.
doc.start_track_revisions("John Doe")
builder.write("Hello world! ")
builder.write("Hello again! ")
builder.write("This is another revision.")
doc.stop_track_revisions()
self.assertEqual(3, doc.revisions.count)
# We can iterate through every revision and accept/reject it as a part of our document.
# If we know we wish to accept every revision, we can do it more straightforwardly so by calling this method.
doc.accept_all_revisions()
self.assertEqual(0, doc.revisions.count)
self.assertEqual("Hello world! Hello again! This is another revision.", doc.get_text().strip())
#ExEnd
def test_get_revised_properties_of_list(self):
#ExStart
#ExFor:RevisionsView
#ExFor:Document.revisions_view
#ExSummary:Shows how to switch between the revised and the original view of a document.
doc = aw.Document(MY_DIR + "Revisions at list levels.docx")
doc.update_list_labels()
paragraphs = doc.first_section.body.paragraphs
self.assertEqual("1.", paragraphs[0].list_label.label_string)
self.assertEqual("a.", paragraphs[1].list_label.label_string)
self.assertEqual("", paragraphs[2].list_label.label_string)
# View the document object as if all the revisions are accepted. Currently supports list labels.
doc.revisions_view = aw.RevisionsView.FINAL
self.assertEqual("", paragraphs[0].list_label.label_string)
self.assertEqual("1.", paragraphs[1].list_label.label_string)
self.assertEqual("a.", paragraphs[2].list_label.label_string)
#ExEnd
doc.revisions_view = aw.RevisionsView.ORIGINAL
doc.accept_all_revisions()
self.assertEqual("a.", paragraphs[0].list_label.label_string)
self.assertEqual("", paragraphs[1].list_label.label_string)
self.assertEqual("b.", paragraphs[2].list_label.label_string)
def test_update_thumbnail(self):
#ExStart
#ExFor:Document.update_thumbnail()
#ExFor:Document.update_thumbnail(ThumbnailGeneratingOptions)
#ExFor:ThumbnailGeneratingOptions
#ExFor:ThumbnailGeneratingOptions.generate_from_first_page
#ExFor:ThumbnailGeneratingOptions.thumbnail_size
#ExSummary:Shows how to update a document's thumbnail.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
builder.insert_image(IMAGE_DIR + "Logo.jpg")
# There are two ways of setting a thumbnail image when saving a document to .epub.
# 1 - Use the document's first page:
doc.update_thumbnail()
doc.save(ARTIFACTS_DIR + "Document.update_thumbnail.first_page.epub")
# 2 - Use the first image found in the document:
options = aw.rendering.ThumbnailGeneratingOptions()
self.assertEqual(drawing.Size(600, 900), options.thumbnail_size) #ExSKip
self.assertTrue(options.generate_from_first_page) #ExSkip
options.thumbnail_size = drawing.Size(400, 400)
options.generate_from_first_page = False
doc.update_thumbnail(options)
doc.save(ARTIFACTS_DIR + "Document.update_thumbnail.first_image.epub")
#ExEnd
def test_hyphenation_options(self):
#ExStart
#ExFor:Document.hyphenation_options
#ExFor:HyphenationOptions
#ExFor:HyphenationOptions.auto_hyphenation
#ExFor:HyphenationOptions.consecutive_hyphen_limit
#ExFor:HyphenationOptions.hyphenation_zone
#ExFor:HyphenationOptions.hyphenate_caps
#ExSummary:Shows how to configure automatic hyphenation.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.font.size = 24
builder.writeln("Lorem ipsum dolor sit amet, consectetur adipiscing elit, " +
"sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
doc.hyphenation_options.auto_hyphenation = True
doc.hyphenation_options.consecutive_hyphen_limit = 2
doc.hyphenation_options.hyphenation_zone = 720
doc.hyphenation_options.hyphenate_caps = True
doc.save(ARTIFACTS_DIR + "Document.hyphenation_options.docx")
#ExEnd
self.assertTrue(doc.hyphenation_options.auto_hyphenation)
self.assertEqual(2, doc.hyphenation_options.consecutive_hyphen_limit)
self.assertEqual(720, doc.hyphenation_options.hyphenation_zone)
self.assertTrue(doc.hyphenation_options.hyphenate_caps)
self.assertTrue(DocumentHelper.compare_docs(ARTIFACTS_DIR + "Document.hyphenation_options.docx",
GOLDS_DIR + "Document.HyphenationOptions Gold.docx"))
def test_hyphenation_options_default_values(self):
doc = aw.Document()
doc = DocumentHelper.save_open(doc)
self.assertEqual(False, doc.hyphenation_options.auto_hyphenation)
self.assertEqual(0, doc.hyphenation_options.consecutive_hyphen_limit)
self.assertEqual(360, doc.hyphenation_options.hyphenation_zone) # 0.25 inch
self.assertTrue(doc.hyphenation_options.hyphenate_caps)
def test_hyphenation_options_exceptions(self):
doc = aw.Document()
doc.hyphenation_options.consecutive_hyphen_limit = 0
with self.assertRaises(Exception):
doc.hyphenation_options.hyphenation_zone = 0
with self.assertRaises(Exception):
doc.hyphenation_options.consecutive_hyphen_limit = -1
doc.hyphenation_options.hyphenation_zone = 360
def test_ooxml_compliance_version(self):
#ExStart
#ExFor:Document.compliance
#ExSummary:Shows how to read a loaded document's Open Office XML compliance version.
# The compliance version varies between documents created by different versions of Microsoft Word.
doc = aw.Document(MY_DIR + "Document.doc")
self.assertEqual(doc.compliance, aw.saving.OoxmlCompliance.ECMA376_2006)
doc = aw.Document(MY_DIR + "Document.docx")
self.assertEqual(doc.compliance, aw.saving.OoxmlCompliance.ISO29500_2008_TRANSITIONAL)
#ExEnd
@unittest.skip("WORDSNET-20342")
def test_image_save_options(self):
#ExStart
#ExFor:Document.save(str,SaveOptions)
#ExFor:SaveOptions.use_anti_aliasing
#ExFor:SaveOptions.use_high_quality_rendering
#ExSummary:Shows how to improve the quality of a rendered document with SaveOptions.
doc = aw.Document(MY_DIR + "Rendering.docx")
builder = aw.DocumentBuilder(doc)
builder.font.size = 60
builder.writeln("Some text.")
options = aw.saving.ImageSaveOptions(aw.SaveFormat.JPEG)
self.assertFalse(options.use_anti_aliasing) #ExSkip
self.assertFalse(options.use_high_quality_rendering) #ExSkip
doc.save(ARTIFACTS_DIR + "Document.image_save_options.default.jpg", options)
options.use_anti_aliasing = True
options.use_high_quality_rendering = True
doc.save(ARTIFACTS_DIR + "Document.image_save_options.high_quality.jpg", options)
#ExEnd
self.verify_image(794, 1122, ARTIFACTS_DIR + "Document.image_save_options.default.jpg")
self.verify_image(794, 1122, ARTIFACTS_DIR + "Document.image_save_options.high_quality.jpg")
def test_cleanup(self):
#ExStart
#ExFor:Document.cleanup()
#ExSummary:Shows how to remove unused custom styles from a document.
doc = aw.Document()
doc.styles.add(aw.StyleType.LIST, "MyListStyle1")
doc.styles.add(aw.StyleType.LIST, "MyListStyle2")
doc.styles.add(aw.StyleType.CHARACTER, "MyParagraphStyle1")
doc.styles.add(aw.StyleType.CHARACTER, "MyParagraphStyle2")
# Combined with the built-in styles, the document now has eight styles.
# A custom style counts as "used" while applied to some part of the document,
# which means that the four styles we added are currently unused.
self.assertEqual(8, doc.styles.count)
# Apply a custom character style, and then a custom list style. Doing so will mark the styles as "used".
builder = aw.DocumentBuilder(doc)
builder.font.style = doc.styles.get_by_name("MyParagraphStyle1")
builder.writeln("Hello world!")
builder.list_format.list = doc.lists.add(doc.styles.get_by_name("MyListStyle1"))
builder.writeln("Item 1")
builder.writeln("Item 2")
doc.cleanup()
self.assertEqual(6, doc.styles.count)
# Removing every node that a custom style is applied to marks it as "unused" again.
# Run the "cleanup" method again to remove them.
doc.first_section.body.remove_all_children()
doc.cleanup()
self.assertEqual(4, doc.styles.count)
#ExEnd
def test_automatically_update_styles(self):
#ExStart
#ExFor:Document.automatically_update_styles
#ExSummary:Shows how to attach a template to a document.
doc = aw.Document()
# Microsoft Word documents by default come with an attached template called "Normal.dotm".
# There is no default template for blank Aspose.Words documents.
self.assertEqual("", doc.attached_template)
# Attach a template, then set the flag to apply style changes
# within the template to styles in our document.
doc.attached_template = MY_DIR + "Business brochure.dotx"
doc.automatically_update_styles = True
doc.save(ARTIFACTS_DIR + "Document.automatically_update_styles.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "Document.automatically_update_styles.docx")
self.assertTrue(doc.automatically_update_styles)
self.assertEqual(MY_DIR + "Business brochure.dotx", doc.attached_template)
self.assertTrue(os.path.exists(doc.attached_template))
def test_default_template(self):
#ExStart
#ExFor:Document.attached_template
#ExFor:Document.automatically_update_styles
#ExFor:SaveOptions.create_save_options(str)
#ExFor:SaveOptions.default_template
#ExSummary:Shows how to set a default template for documents that do not have attached templates.
doc = aw.Document()
# Enable automatic style updating, but do not attach a template document.
doc.automatically_update_styles = True
self.assertEqual("", doc.attached_template)
# Since there is no template document, the document had nowhere to track style changes.
# Use a SaveOptions object to automatically set a template
# if a document that we are saving does not have one.
options = aw.saving.SaveOptions.create_save_options("Document.default_template.docx")
options.default_template = MY_DIR + "Business brochure.dotx"
doc.save(ARTIFACTS_DIR + "Document.default_template.docx", options)
#ExEnd
self.assertTrue(os.path.exists(options.default_template))
def test_use_substitutions(self):
#ExStart
#ExFor:FindReplaceOptions.use_substitutions
#ExFor:FindReplaceOptions.legacy_mode
#ExSummary:Shows how to recognize and use substitutions within replacement patterns.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.write("Jason gave money to Paul.")
options = aw.replacing.FindReplaceOptions()
options.use_substitutions = True
# Using legacy mode does not support many advanced features, so we need to set it to 'False'.
options.legacy_mode = False
doc.range.replace_regex(r"([A-z]+) gave money to ([A-z]+)", r"$2 took money from $1", options)
self.assertEqual(doc.get_text(), "Paul took money from Jason.\f")
#ExEnd
def test_set_invalidate_field_types(self):
#ExStart
#ExFor:Document.normalize_field_types
#ExSummary:Shows how to get the keep a field's type up to date with its field code.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
field = builder.insert_field("DATE", None)
# Aspose.Words automatically detects field types based on field codes.
self.assertEqual(aw.fields.FieldType.FIELD_DATE, field.type)
# Manually change the raw text of the field, which determines the field code.
field_text = doc.first_section.body.first_paragraph.get_child_nodes(aw.NodeType.RUN, True)[0].as_run()
self.assertEqual("DATE", field_text.text) #ExSkip
field_text.text = "PAGE"
# Changing the field code has changed this field to one of a different type,
# but the field's type properties still display the old type.
self.assertEqual("PAGE", field.get_field_code())
self.assertEqual(aw.fields.FieldType.FIELD_DATE, field.type)
self.assertEqual(aw.fields.FieldType.FIELD_DATE, field.start.field_type)
self.assertEqual(aw.fields.FieldType.FIELD_DATE, field.separator.field_type)
self.assertEqual(aw.fields.FieldType.FIELD_DATE, field.end.field_type)
# Update those properties with this method to display current value.
doc.normalize_field_types()
self.assertEqual(aw.fields.FieldType.FIELD_PAGE, field.type)
self.assertEqual(aw.fields.FieldType.FIELD_PAGE, field.start.field_type)
self.assertEqual(aw.fields.FieldType.FIELD_PAGE, field.separator.field_type)
self.assertEqual(aw.fields.FieldType.FIELD_PAGE, field.end.field_type)
#ExEnd
def test_layout_options_revisions(self):
#ExStart
#ExFor:Document.layout_options
#ExFor:LayoutOptions
#ExFor:LayoutOptions.revision_options
#ExFor:RevisionColor
#ExFor:RevisionOptions
#ExFor:RevisionOptions.inserted_text_color
#ExFor:RevisionOptions.show_revision_bars
#ExSummary:Shows how to alter the appearance of revisions in a rendered output document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert a revision, then change the color of all revisions to green.
builder.writeln("This is not a revision.")
doc.start_track_revisions("John Doe", datetime.now())
self.assertEqual(aw.layout.RevisionColor.BY_AUTHOR, doc.layout_options.revision_options.inserted_text_color) #ExSkip
self.assertTrue(doc.layout_options.revision_options.show_revision_bars) #ExSkip
builder.writeln("This is a revision.")
doc.stop_track_revisions()
builder.writeln("This is not a revision.")
# Remove the bar that appears to the left of every revised line.
doc.layout_options.revision_options.inserted_text_color = aw.layout.RevisionColor.BRIGHT_GREEN
doc.layout_options.revision_options.show_revision_bars = False
doc.save(ARTIFACTS_DIR + "Document.layout_options_revisions.pdf")
#ExEnd
def test_layout_options_hidden_text(self):
for show_hidden_text in (False, True):
with self.subTest(show_hidden_text=show_hidden_text):
#ExStart
#ExFor:Document.layout_options
#ExFor:LayoutOptions
#ExFor:LayoutOptions.show_hidden_text
#ExSummary:Shows how to hide text in a rendered output document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
self.assertFalse(doc.layout_options.show_hidden_text) #ExSkip
# Insert hidden text, then specify whether we wish to omit it from a rendered document.
builder.writeln("This text is not hidden.")
builder.font.hidden = True
builder.writeln("This text is hidden.")
doc.layout_options.show_hidden_text = show_hidden_text
doc.save(ARTIFACTS_DIR + "Document.layout_options_hidden_text.pdf")
#ExEnd
#pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + "Document.layout_options_hidden_text.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#text_absorber.visit(pdf_doc)
#if show_hidden_text:
# self.assertEqual("This text is not hidden.\nThis text is hidden.", text_absorber.text)
#else:
# self.assertEqual("This text is not hidden.", text_absorber.text)
def test_layout_options_paragraph_marks(self):
for show_paragraph_marks in (False, True):
with self.subTest(show_paragraph_marks=show_paragraph_marks):
#ExStart
#ExFor:Document.layout_options
#ExFor:LayoutOptions
#ExFor:LayoutOptions.show_paragraph_marks
#ExSummary:Shows how to show paragraph marks in a rendered output document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
self.assertFalse(doc.layout_options.show_paragraph_marks) #ExSkip
# Add some paragraphs, then enable paragraph marks to show the ends of paragraphs
# with a pilcrow (¶) symbol when we render the document.
builder.writeln("Hello world!")
builder.writeln("Hello again!")
doc.layout_options.show_paragraph_marks = show_paragraph_marks
doc.save(ARTIFACTS_DIR + "Document.layout_options_paragraph_marks.pdf")
#ExEnd
#pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + "Document.layout_options_paragraph_marks.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#text_absorber.visit(pdf_doc)
#self.assertEqual("Hello world!¶\nHello again!¶\n¶" if show_paragraph_marks "Hello world!\nHello again!", text_absorber.text)
def test_update_page_layout(self):
#ExStart
#ExFor:StyleCollection.__getitem__(str)
#ExFor:SectionCollection.__getitem__(int)
#ExFor:Document.update_page_layout
#ExSummary:Shows when to recalculate the page layout of the document.
doc = aw.Document(MY_DIR + "Rendering.docx")
# Saving a document to PDF, to an image, or printing for the first time will automatically
# cache the layout of the document within its pages.
doc.save(ARTIFACTS_DIR + "Document.update_page_layout.1.pdf")
# Modify the document in some way.
doc.styles.get_by_name("Normal").font.size = 6
doc.sections[0].page_setup.orientation = aw.Orientation.LANDSCAPE
# In the current version of Aspose.Words, modifying the document does not automatically rebuild
# the cached page layout. If we wish for the cached layout
# to stay up to date, we will need to update it manually.
doc.update_page_layout()
doc.save(ARTIFACTS_DIR + "Document.update_page_layout.2.pdf")
#ExEnd
def test_doc_package_custom_parts(self):
#ExStart
#ExFor:CustomPart
#ExFor:CustomPart.content_type
#ExFor:CustomPart.relationship_type
#ExFor:CustomPart.is_external
#ExFor:CustomPart.data
#ExFor:CustomPart.name
#ExFor:CustomPart.clone
#ExFor:CustomPartCollection
#ExFor:CustomPartCollection.add(CustomPart)
#ExFor:CustomPartCollection.clear
#ExFor:CustomPartCollection.clone
#ExFor:CustomPartCollection.count
#ExFor:CustomPartCollection.__iter__
#ExFor:CustomPartCollection.__getitem__(int)
#ExFor:CustomPartCollection.remove_at(int)
#ExFor:Document.package_custom_parts
#ExSummary:Shows how to access a document's arbitrary custom parts collection.
doc = aw.Document(MY_DIR + "Custom parts OOXML package.docx")
self.assertEqual(2, doc.package_custom_parts.count)
# Clone the second part, then add the clone to the collection.
cloned_part = doc.package_custom_parts[1].clone()
doc.package_custom_parts.add(cloned_part)
self._test_doc_package_custom_parts(doc.package_custom_parts) #ExSkip
self.assertEqual(3, doc.package_custom_parts.count)
# Enumerate over the collection and print every part.
for index, part in enumerate(doc.package_custom_parts):
print(f"Part index {index}:")
print(f"\tName:\t\t\t\t{part.name}")
print(f"\tContent type:\t\t{part.content_type}")
print(f"\tRelationship type:\t{part.relationship_type}")
if part.is_external:
print("\tSourced from outside the document")
else:
print(f"\tStored within the document, length: {len(part.data)} bytes")
# We can remove elements from this collection individually, or all at once.
doc.package_custom_parts.remove_at(2)
self.assertEqual(2, doc.package_custom_parts.count)
doc.package_custom_parts.clear()
self.assertEqual(0, doc.package_custom_parts.count)
#ExEnd
def _test_doc_package_custom_parts(self, parts: aw.markup.CustomPartCollection):
self.assertEqual(3, parts.count)
self.assertEqual("/payload/payload_on_package.test", parts[0].name)
self.assertEqual("mytest/somedata", parts[0].content_type)
self.assertEqual("http://mytest.payload.internal", parts[0].relationship_type)
self.assertEqual(False, parts[0].is_external)
self.assertEqual(18, len(parts[0].data))
self.assertEqual("http://www.aspose.com/Images/aspose-logo.jpg", parts[1].name)
self.assertEqual("", parts[1].content_type)
self.assertEqual("http://mytest.payload.external", parts[1].relationship_type)
self.assertTrue(parts[1].is_external)
self.assertEqual(0, len(parts[1].data))
self.assertEqual("http://www.aspose.com/Images/aspose-logo.jpg", parts[2].name)
self.assertEqual("", parts[2].content_type)
self.assertEqual("http://mytest.payload.external", parts[2].relationship_type)
self.assertTrue(parts[2].is_external)
self.assertEqual(0, len(parts[2].data))
def test_shade_form_data(self):
for use_grey_shading in (False, True):
with self.subTest(use_grey_shading=use_grey_shading):
#ExStart
#ExFor:Document.shade_form_data
#ExSummary:Shows how to apply gray shading to form fields.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
self.assertTrue(doc.shade_form_data) #ExSkip
builder.write("Hello world! ")
builder.insert_text_input("My form field", aw.fields.TextFormFieldType.REGULAR, "",
"Text contents of form field, which are shaded in grey by default.", 0)
# We can turn the grey shading off, so the bookmarked text will blend in with the other text.
doc.shade_form_data = use_grey_shading
doc.save(ARTIFACTS_DIR + "Document.shade_form_data.docx")
#ExEnd
def test_versions_count(self):
#ExStart
#ExFor:Document.versions_count
#ExSummary:Shows how to work with the versions count feature of older Microsoft Word documents.
doc = aw.Document(MY_DIR + "Versions.doc")
# We can read this property of a document, but we cannot preserve it while saving.
self.assertEqual(4, doc.versions_count)
doc.save(ARTIFACTS_DIR + "Document.versions_count.doc")
doc = aw.Document(ARTIFACTS_DIR + "Document.versions_count.doc")
self.assertEqual(0, doc.versions_count)
#ExEnd
def test_write_protection(self):
#ExStart
#ExFor:Document.write_protection
#ExFor:WriteProtection
#ExFor:WriteProtection.is_write_protected
#ExFor:WriteProtection.read_only_recommended
#ExFor:WriteProtection.set_password(str)
#ExFor:WriteProtection.validate_password(str)
#ExSummary:Shows how to protect a document with a password.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world! This document is protected.")
self.assertFalse(doc.write_protection.is_write_protected) #ExSkip
self.assertFalse(doc.write_protection.read_only_recommended) #ExSkip
# Enter a password up to 15 characters in length, and then verify the document's protection status.
doc.write_protection.set_password("MyPassword")
doc.write_protection.read_only_recommended = True
self.assertTrue(doc.write_protection.is_write_protected)
self.assertTrue(doc.write_protection.validate_password("MyPassword"))
# Protection does not prevent the document from being edited programmatically, nor does it encrypt the contents.
doc.save(ARTIFACTS_DIR + "Document.write_protection.docx")
doc = aw.Document(ARTIFACTS_DIR + "Document.write_protection.docx")
self.assertTrue(doc.write_protection.is_write_protected)
builder = aw.DocumentBuilder(doc)
builder.move_to_document_end()
builder.writeln("Writing text in a protected document.")
self.assertEqual("Hello world! This document is protected." +
"\rWriting text in a protected document.", doc.get_text().strip())
#ExEnd
self.assertTrue(doc.write_protection.read_only_recommended)
self.assertTrue(doc.write_protection.validate_password("MyPassword"))
self.assertFalse(doc.write_protection.validate_password("wrongpassword"))
def test_remove_personal_information(self):
for save_without_personal_info in (False, True):
with self.subTest(save_without_personal_info=save_without_personal_info):
#ExStart
#ExFor:Document.remove_personal_information
#ExSummary:Shows how to enable the removal of personal information during a manual save.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert some content with personal information.
doc.built_in_document_properties.author = "John Doe"
doc.built_in_document_properties.company = "Placeholder Inc."
doc.start_track_revisions(doc.built_in_document_properties.author, datetime.now())
builder.write("Hello world!")
doc.stop_track_revisions()
# This flag is equivalent to File -> Options -> Trust Center -> Trust Center Settings... ->
# Privacy Options -> "Remove personal information from file properties on save" in Microsoft Word.
doc.remove_personal_information = save_without_personal_info
# This option will not take effect during a save operation made using Aspose.Words.
# Personal data will be removed from our document with the flag set when we save it manually using Microsoft Word.
doc.save(ARTIFACTS_DIR + "Document.remove_personal_information.docx")
doc = aw.Document(ARTIFACTS_DIR + "Document.remove_personal_information.docx")
self.assertEqual(save_without_personal_info, doc.remove_personal_information)
self.assertEqual("John Doe", doc.built_in_document_properties.author)
self.assertEqual("Placeholder Inc.", doc.built_in_document_properties.company)
self.assertEqual("John Doe", doc.revisions[0].author)
#ExEnd
def test_show_comments(self):
#ExStart
#ExFor:LayoutOptions.comment_display_mode
#ExFor:CommentDisplayMode
#ExSummary:Shows how to show comments when saving a document to a rendered format.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.write("Hello world!")
comment = aw.Comment(doc, "John Doe", "J.D.", datetime.now())
comment.set_text("My comment.")
builder.current_paragraph.append_child(comment)
# SHOW_IN_ANNOTATIONS is only available in Pdf1.7 and Pdf1.5 formats.
# In other formats, it will work similarly to Hide.
doc.layout_options.comment_display_mode = aw.layout.CommentDisplayMode.SHOW_IN_ANNOTATIONS
doc.save(ARTIFACTS_DIR + "Document.show_comments_in_annotations.pdf")
# Note that it's required to rebuild the document page layout (via Document.update_page_layout() method)
# after changing the "Document.layout_options" values.
doc.layout_options.comment_display_mode = aw.layout.CommentDisplayMode.SHOW_IN_BALLOONS
doc.update_page_layout()
doc.save(ARTIFACTS_DIR + "Document.show_comments_in_balloons.pdf")
#ExEnd
#pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + "Document.show_comments_in_balloons.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#text_absorber.visit(pdf_doc)
#self.assertEqual(
# "Hello world! Commented [J.D.1]: My comment.",
# text_absorber.text)
def test_copy_template_styles_via_document(self):
#ExStart
#ExFor:Document.copy_styles_from_template(Document)
#ExSummary:Shows how to copies styles from the template to a document via Document.
template = aw.Document(MY_DIR + "Rendering.docx")
target = aw.Document(MY_DIR + "Document.docx")
self.assertEqual(18, template.styles.count) #ExSkip
self.assertEqual(12, target.styles.count) #ExSkip
target.copy_styles_from_template(template)
self.assertEqual(22, target.styles.count) #ExSkip
#ExEnd
def test_copy_template_styles_via_document_new(self):
#ExStart
#ExFor:Document.copy_styles_from_template(Document)
#ExFor:Document.copy_styles_from_template(str)
#ExSummary:Shows how to copy styles from one document to another.
# Create a document, and then add styles that we will copy to another document.
template = aw.Document()
style = template.styles.add(aw.StyleType.PARAGRAPH, "TemplateStyle1")
style.font.name = "Times New Roman"
style.font.color = drawing.Color.navy
style = template.styles.add(aw.StyleType.PARAGRAPH, "TemplateStyle2")
style.font.name = "Arial"
style.font.color = drawing.Color.deep_sky_blue
style = template.styles.add(aw.StyleType.PARAGRAPH, "TemplateStyle3")
style.font.name = "Courier New"
style.font.color = drawing.Color.royal_blue
self.assertEqual(7, template.styles.count)
# Create a document which we will copy the styles to.
target = aw.Document()
# Create a style with the same name as a style from the template document and add it to the target document.
style = target.styles.add(aw.StyleType.PARAGRAPH, "TemplateStyle3")
style.font.name = "Calibri"
style.font.color = drawing.Color.orange
self.assertEqual(5, target.styles.count)
# There are two ways of calling the method to copy all the styles from one document to another.
# 1 - Passing the template document object:
target.copy_styles_from_template(template)
# Copying styles adds all styles from the template document to the target
# and overwrites existing styles with the same name.
self.assertEqual(7, target.styles.count)
self.assertEqual("Courier New", target.styles.get_by_name("TemplateStyle3").font.name)
self.assertEqual(drawing.Color.royal_blue.to_argb(), target.styles.get_by_name("TemplateStyle3").font.color.to_argb())
# 2 - Passing the local system filename of a template document:
target.copy_styles_from_template(MY_DIR + "Rendering.docx")
self.assertEqual(21, target.styles.count)
#ExEnd
def test_read_macros_from_existing_document(self):
#ExStart
#ExFor:Document.vba_project
#ExFor:VbaModuleCollection
#ExFor:VbaModuleCollection.count
#ExFor:VbaModuleCollection.__getitem__(int)
#ExFor:VbaModuleCollection.__getitem__(string)
#ExFor:VbaModuleCollection.remove
#ExFor:VbaModule
#ExFor:VbaModule.name
#ExFor:VbaModule.source_code
#ExFor:VbaProject
#ExFor:VbaProject.name
#ExFor:VbaProject.modules
#ExFor:VbaProject.code_page
#ExFor:VbaProject.is_signed
#ExSummary:Shows how to access a document's VBA project information.
doc = aw.Document(MY_DIR + "VBA project.docm")
# A VBA project contains a collection of VBA modules.
vba_project = doc.vba_project
self.assertTrue(vba_project.is_signed) #ExSkip
if vba_project.is_signed:
print(f"Project name: {vba_project.name} signed; Project code page: {vba_project.code_page}; Modules count: {vba_project.modules.count}\n")
else:
print(f"Project name: {vba_project.name} not signed; Project code page: {vba_project.code_page}; Modules count: {vba_project.modules.count}\n")
vba_modules = doc.vba_project.modules
self.assertEqual(vba_modules.count, 3)
for module in vba_modules:
print(f"Module name: {module.name};\nModule code:\n{module.source_code}\n")
# Set new source code for VBA module. You can access VBA modules in the collection either by index or by name.
vba_modules[0].source_code = "Your VBA code..."
vba_modules.get_by_name("Module1").source_code = "Your VBA code..."
# Remove a module from the collection.
vba_modules.remove(vba_modules[2])
#ExEnd
self.assertEqual("AsposeVBAtest", vba_project.name)
self.assertEqual(2, vba_project.modules.count)
self.assertEqual(1251, vba_project.code_page)
self.assertFalse(vba_project.is_signed)
self.assertEqual("ThisDocument", vba_modules[0].name)
self.assertEqual("Your VBA code...", vba_modules[0].source_code)
self.assertEqual("Module1", vba_modules[1].name)
self.assertEqual("Your VBA code...", vba_modules[1].source_code)
def test_save_output_parameters(self):
#ExStart
#ExFor:SaveOutputParameters
#ExFor:SaveOutputParameters.content_type
#ExSummary:Shows how to access output parameters of a document's save operation.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
# After we save a document, we can access the Internet Media Type (MIME type) of the newly created output document.
parameters = doc.save(ARTIFACTS_DIR + "Document.save_output_parameters.doc")
self.assertEqual("application/msword", parameters.content_type)
# This property changes depending on the save format.
parameters = doc.save(ARTIFACTS_DIR + "Document.save_output_parameters.pdf")
self.assertEqual("application/pdf", parameters.content_type)
#ExEnd
def test_sub_document(self):
#ExStart
#ExFor:SubDocument
#ExFor:SubDocument.node_type
#ExSummary:Shows how to access a master document's subdocument.
doc = aw.Document(MY_DIR + "Master document.docx")
sub_documents = doc.get_child_nodes(aw.NodeType.SUB_DOCUMENT, True)
self.assertEqual(1, sub_documents.count) #ExSkip
# This node serves as a reference to an external document, and its contents cannot be accessed.
sub_document = sub_documents[0].as_sub_document()
self.assertFalse(sub_document.is_composite)
#ExEnd
def test_create_web_extension(self):
#ExStart
#ExFor:BaseWebExtensionCollection.add()
#ExFor:BaseWebExtensionCollection.clear
#ExFor:TaskPane
#ExFor:TaskPane.dock_state
#ExFor:TaskPane.is_visible
#ExFor:TaskPane.width
#ExFor:TaskPane.is_locked
#ExFor:TaskPane.web_extension
#ExFor:TaskPane.row
#ExFor:WebExtension
#ExFor:WebExtension.reference
#ExFor:WebExtension.properties
#ExFor:WebExtension.bindings
#ExFor:WebExtension.is_frozen
#ExFor:WebExtensionReference.id
#ExFor:WebExtensionReference.version
#ExFor:WebExtensionReference.store_type
#ExFor:WebExtensionReference.store
#ExFor:WebExtensionPropertyCollection
#ExFor:WebExtensionBindingCollection
#ExFor:WebExtensionProperty.__init__(str,str)
#ExFor:WebExtensionBinding.__init__(str,WebExtensionBindingType,str)
#ExFor:WebExtensionStoreType
#ExFor:WebExtensionBindingType
#ExFor:TaskPaneDockState
#ExFor:TaskPaneCollection
#ExSummary:Shows how to add a web extension to a document.
doc = aw.Document()
# Create task pane with "MyScript" add-in, which will be used by the document,
# then set its default location.
my_script_task_pane = aw.webextensions.TaskPane()
doc.web_extension_task_panes.add(my_script_task_pane)
my_script_task_pane.dock_state = aw.webextensions.TaskPaneDockState.RIGHT
my_script_task_pane.is_visible = True
my_script_task_pane.width = 300
my_script_task_pane.is_locked = True
# If there are multiple task panes in the same docking location, we can set this index to arrange them.
my_script_task_pane.row = 1
# Create an add-in called "MyScript Math Sample", which the task pane will display within.
web_extension = my_script_task_pane.web_extension
# Set application store reference parameters for our add-in, such as the ID.
web_extension.reference.id = "WA104380646"
web_extension.reference.version = "1.0.0.0"
web_extension.reference.store_type = aw.webextensions.WebExtensionStoreType.OMEX
web_extension.reference.store = "en-US"
web_extension.properties.add(aw.webextensions.WebExtensionProperty("MyScript", "MyScript Math Sample"))
web_extension.bindings.add(aw.webextensions.WebExtensionBinding("MyScript", aw.webextensions.WebExtensionBindingType.TEXT, "104380646"))
# Allow the user to interact with the add-in.
web_extension.is_frozen = False
# We can access the web extension in Microsoft Word via Developer -> Add-ins.
doc.save(ARTIFACTS_DIR + "Document.create_web_extension.docx")
# Remove all web extension task panes at once like this.
doc.web_extension_task_panes.clear()
self.assertEqual(0, doc.web_extension_task_panes.count)
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "Document.create_web_extension.docx")
my_script_task_pane = doc.web_extension_task_panes[0]
self.assertEqual(aw.webextensions.TaskPaneDockState.RIGHT, my_script_task_pane.dock_state)
self.assertTrue(my_script_task_pane.is_visible)
self.assertEqual(300.0, my_script_task_pane.width)
self.assertTrue(my_script_task_pane.is_locked)
self.assertEqual(1, my_script_task_pane.row)
web_extension = my_script_task_pane.web_extension
self.assertEqual("WA104380646", web_extension.reference.id)
self.assertEqual("1.0.0.0", web_extension.reference.version)
self.assertEqual(aw.webextensions.WebExtensionStoreType.OMEX, web_extension.reference.store_type)
self.assertEqual("en-US", web_extension.reference.store)
self.assertEqual("MyScript", web_extension.properties[0].name)
self.assertEqual("MyScript Math Sample", web_extension.properties[0].value)
self.assertEqual("MyScript", web_extension.bindings[0].id)
self.assertEqual(aw.webextensions.WebExtensionBindingType.TEXT, web_extension.bindings[0].binding_type)
self.assertEqual("104380646", web_extension.bindings[0].app_ref)
self.assertFalse(web_extension.is_frozen)
def test_get_web_extension_info(self):
#ExStart
#ExFor:BaseWebExtensionCollection
#ExFor:BaseWebExtensionCollection.__iter__
#ExFor:BaseWebExtensionCollection.remove(int)
#ExFor:BaseWebExtensionCollection.count
#ExFor:BaseWebExtensionCollection.__getitem__(int)
#ExSummary:Shows how to work with a document's collection of web extensions.
doc = aw.Document(MY_DIR + "Web extension.docx")
self.assertEqual(1, doc.web_extension_task_panes.count)
# Print all properties of the document's web extension.
web_extension_property_collection = doc.web_extension_task_panes[0].web_extension.properties
for web_extension_property in web_extension_property_collection:
print(f"Binding name: {web_extension_property.name}; Binding value: {web_extension_property.value}")
# Remove the web extension.
doc.web_extension_task_panes.remove(0)
self.assertEqual(0, doc.web_extension_task_panes.count)
#ExEnd
def test_epub_cover(self):
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
# When saving to .epub, some Microsoft Word document properties convert to .epub metadata.
doc.built_in_document_properties.author = "John Doe"
doc.built_in_document_properties.title = "My Book Title"
# The thumbnail we specify here can become the cover image.
with open(IMAGE_DIR + "Transparent background logo.png", "rb") as file:
image = file.read()
doc.built_in_document_properties.thumbnail = image
doc.save(ARTIFACTS_DIR + "Document.epub_cover.epub")
def test_text_watermark(self):
#ExStart
#ExFor:Watermark.set_text(str)
#ExFor:Watermark.set_text(str,TextWatermarkOptions)
#ExFor:Watermark.remove
#ExFor:TextWatermarkOptions.font_family
#ExFor:TextWatermarkOptions.font_size
#ExFor:TextWatermarkOptions.color
#ExFor:TextWatermarkOptions.layout
#ExFor:TextWatermarkOptions.is_semitrasparent
#ExFor:WatermarkLayout
#ExFor:WatermarkType
#ExSummary:Shows how to create a text watermark.
doc = aw.Document()
# Add a plain text watermark.
doc.watermark.set_text("Aspose Watermark")
# If we wish to edit the text formatting using it as a watermark,
# we can do so by passing a TextWatermarkOptions object when creating the watermark.
text_watermark_options = aw.TextWatermarkOptions()
text_watermark_options.font_family = "Arial"
text_watermark_options.font_size = 36
text_watermark_options.color = drawing.Color.black
text_watermark_options.layout = aw.WatermarkLayout.DIAGONAL
text_watermark_options.is_semitrasparent = False
doc.watermark.set_text("Aspose Watermark", text_watermark_options)
doc.save(ARTIFACTS_DIR + "Document.text_watermark.docx")
# We can remove a watermark from a document like this.
if doc.watermark.type == aw.WatermarkType.TEXT:
doc.watermark.remove()
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "Document.text_watermark.docx")
self.assertEqual(aw.WatermarkType.TEXT, doc.watermark.type)
def test_image_watermark(self):
#ExStart
#ExFor:Watermark.set_image(Image,ImageWatermarkOptions)
#ExFor:ImageWatermarkOptions.scale
#ExFor:ImageWatermarkOptions.is_washout
#ExSummary:Shows how to create a watermark from an image in the local file system.
doc = aw.Document()
# Modify the image watermark's appearance with an ImageWatermarkOptions object,
# then pass it while creating a watermark from an image file.
image_watermark_options = aw.ImageWatermarkOptions()
image_watermark_options.scale = 5
image_watermark_options.is_washout = False
doc.watermark.set_image(drawing.Image.from_file(IMAGE_DIR + "Logo.jpg"), image_watermark_options)
doc.save(ARTIFACTS_DIR + "Document.image_watermark.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "Document.image_watermark.docx")
self.assertEqual(aw.WatermarkType.IMAGE, doc.watermark.type)
def test_spelling_and_grammar_errors(self):
for show_errors in (False, True):
with self.subTest(show_errors=show_errors):
#ExStart
#ExFor:Document.show_grammatical_errors
#ExFor:Document.show_spelling_errors
#ExSummary:Shows how to show/hide errors in the document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert two sentences with mistakes that would be picked up
# by the spelling and grammar checkers in Microsoft Word.
builder.writeln("There is a speling error in this sentence.")
builder.writeln("Their is a grammatical error in this sentence.")
# If these options are enabled, then spelling errors will be underlined
# in the output document by a jagged red line, and a double blue line will highlight grammatical mistakes.
doc.show_grammatical_errors = show_errors
doc.show_spelling_errors = show_errors
doc.save(ARTIFACTS_DIR + "Document.spelling_and_grammar_errors.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "Document.spelling_and_grammar_errors.docx")
self.assertEqual(show_errors, doc.show_grammatical_errors)
self.assertEqual(show_errors, doc.show_spelling_errors)
def test_granularity_compare_option(self):
for granularity in (aw.comparing.Granularity.CHAR_LEVEL,
aw.comparing.Granularity.WORD_LEVEL):
with self.subTest(granularity=granularity):
#ExStart
#ExFor:CompareOptions.granularity
#ExFor:Granularity
#ExSummary:Shows to specify a granularity while comparing documents.
doc_a = aw.Document()
builder_a = aw.DocumentBuilder(doc_a)
builder_a.writeln("Alpha Lorem ipsum dolor sit amet, consectetur adipiscing elit")
doc_b = aw.Document()
builder_b = aw.DocumentBuilder(doc_b)
builder_b.writeln("Lorems ipsum dolor sit amet consectetur - \"adipiscing\" elit")
# Specify whether changes are tracking
# by character ('Granularity.CHAR_LEVEL'), or by word ('Granularity.WORD_LEVEL').
compare_options = aw.comparing.CompareOptions()
compare_options.granularity = granularity
doc_a.compare(doc_b, "author", datetime.now(), compare_options)
# The first document's collection of revision groups contains all the differences between documents.
groups = doc_a.revisions.groups
self.assertEqual(5, groups.count)
#ExEnd
if granularity == aw.comparing.Granularity.CHAR_LEVEL:
self.assertEqual(aw.RevisionType.DELETION, groups[0].revision_type)
self.assertEqual("Alpha ", groups[0].text)
self.assertEqual(aw.RevisionType.DELETION, groups[1].revision_type)
self.assertEqual(",", groups[1].text)
self.assertEqual(aw.RevisionType.INSERTION, groups[2].revision_type)
self.assertEqual("s", groups[2].text)
self.assertEqual(aw.RevisionType.INSERTION, groups[3].revision_type)
self.assertEqual("- \"", groups[3].text)
self.assertEqual(aw.RevisionType.INSERTION, groups[4].revision_type)
self.assertEqual("\"", groups[4].text)
else:
self.assertEqual(aw.RevisionType.DELETION, groups[0].revision_type)
self.assertEqual("Alpha Lorem", groups[0].text)
self.assertEqual(aw.RevisionType.DELETION, groups[1].revision_type)
self.assertEqual(",", groups[1].text)
self.assertEqual(aw.RevisionType.INSERTION, groups[2].revision_type)
self.assertEqual("Lorems", groups[2].text)
self.assertEqual(aw.RevisionType.INSERTION, groups[3].revision_type)
self.assertEqual("- \"", groups[3].text)
self.assertEqual(aw.RevisionType.INSERTION, groups[4].revision_type)
self.assertEqual("\"", groups[4].text)
def test_ignore_printer_metrics(self):
#ExStart
#ExFor:LayoutOptions.ignore_printer_metrics
#ExSummary:Shows how to ignore 'Use printer metrics to lay out document' option.
doc = aw.Document(MY_DIR + "Rendering.docx")
doc.layout_options.ignore_printer_metrics = False
doc.save(ARTIFACTS_DIR + "Document.ignore_printer_metrics.docx")
#ExEnd
def test_extract_pages(self):
#ExStart
#ExFor:Document.extract_pages
#ExSummary:Shows how to get specified range of pages from the document.
doc = aw.Document(MY_DIR + "Layout entities.docx")
doc = doc.extract_pages(0, 2)
doc.save(ARTIFACTS_DIR + "Document.extract_pages.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "Document.extract_pages.docx")
self.assertEqual(doc.page_count, 2)
def test_spelling_or_grammar(self):
for check_spelling_grammar in (True, False):
with self.subTest(check_spelling_grammar=check_spelling_grammar):
#ExStart
#ExFor:Document.spelling_checked
#ExFor:Document.grammar_checked
#ExSummary:Shows how to set spelling or grammar verifying.
doc = aw.Document()
# The string with spelling errors.
doc.first_section.body.first_paragraph.runs.add(aw.Run(doc, "The speeling in this documentz is all broked."))
# Spelling/Grammar check start if we set properties to False.
# We can see all errors in Microsoft Word via Review -> Spelling & Grammar.
# Note that Microsoft Word does not start grammar/spell check automatically for DOC and RTF document format.
doc.spelling_checked = check_spelling_grammar
doc.grammar_checked = check_spelling_grammar
doc.save(ARTIFACTS_DIR + "Document.spelling_or_grammar.docx")
#ExEnd
def test_allow_embedding_post_script_fonts(self):
#ExStart
#ExFor:SaveOptions.allow_embedding_post_script_fonts
#ExSummary:Shows how to save the document with PostScript font.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.font.name = "PostScriptFont"
builder.writeln("Some text with PostScript font.")
# Load the font with PostScript to use in the document.
with open(FONTS_DIR + "AllegroOpen.otf", "rb") as file:
otf = aw.fonts.MemoryFontSource(file.read())
doc.font_settings = aw.fonts.FontSettings()
doc.font_settings.set_fonts_sources([ otf ])
# Embed TrueType fonts.
doc.font_infos.embed_true_type_fonts = True
# Allow embedding PostScript fonts while embedding TrueType fonts.
# Microsoft Word does not embed PostScript fonts, but can open documents with embedded fonts of this type.
save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.DOCX)
save_options.allow_embedding_post_script_fonts = True
doc.save(ARTIFACTS_DIR + "Document.allow_embedding_post_script_fonts.docx", save_options)
#ExEnd
def test_frameset(self):
#ExStart
#ExFor:Document.frameset
#ExFor:Frameset
#ExFor:Frameset.frame_default_url
#ExFor:Frameset.is_frame_link_to_file
#ExFor:Frameset.child_framesets
#ExSummary:Shows how to access frames on-page.
# Document contains several frames with links to other documents.
doc = aw.Document(MY_DIR + "Frameset.docx")
# We can check the default URL (a web page URL or local document) or if the frame is an external resource.
self.assertEqual("https://file-examples-com.github.io/uploads/2017/02/file-sample_100kB.docx",
doc.frameset.child_framesets[0].child_framesets[0].frame_default_url)
self.assertTrue(doc.frameset.child_framesets[0].child_framesets[0].is_frame_link_to_file)
self.assertEqual("Document.docx", doc.frameset.child_framesets[1].frame_default_url)
self.assertFalse(doc.frameset.child_framesets[1].is_frame_link_to_file)
# Change properties for one of our frames.
doc.frameset.child_framesets[0].child_framesets[0].frame_default_url = "https://github.com/aspose-words/Aspose.Words-for-.NET/blob/master/Examples/Data/Absolute%20position%20tab.docx"
doc.frameset.child_framesets[0].child_framesets[0].is_frame_link_to_file = False
#ExEnd
doc = DocumentHelper.save_open(doc)
self.assertEqual(
"https://github.com/aspose-words/Aspose.Words-for-.NET/blob/master/Examples/Data/Absolute%20position%20tab.docx",
doc.frameset.child_framesets[0].child_framesets[0].frame_default_url)
self.assertFalse(doc.frameset.child_framesets[0].child_framesets[0].is_frame_link_to_file)
| 43.979849 | 525 | 0.682379 | 104,153 | 0.994111 | 0 | 0 | 2,311 | 0.022058 | 0 | 0 | 46,083 | 0.439849 |
2c2f817fd0220fe68a499dd03c634d8f2853d57a | 2,865 | py | Python | code/tests/functional/tests/test_refer.py | CiscoSecurity/tr-05-serverless-pulsedive | 00d25695ad5d904a30390151e7c695ac5e64476c | [
"MIT"
] | 3 | 2020-03-08T20:30:55.000Z | 2021-11-30T06:01:15.000Z | code/tests/functional/tests/test_refer.py | CiscoSecurity/tr-05-serverless-pulsedive | 00d25695ad5d904a30390151e7c695ac5e64476c | [
"MIT"
] | 6 | 2020-04-02T16:11:42.000Z | 2020-12-17T11:06:06.000Z | code/tests/functional/tests/test_refer.py | CiscoSecurity/tr-05-serverless-pulsedive | 00d25695ad5d904a30390151e7c695ac5e64476c | [
"MIT"
] | 1 | 2020-10-12T18:10:36.000Z | 2020-10-12T18:10:36.000Z | import pytest
from ctrlibrary.core.utils import get_observables
from ctrlibrary.threatresponse.enrich import enrich_refer_observables
from tests.functional.tests.constants import (
MODULE_NAME,
PULSEDIVE_URL,
OBSERVABLE_HUMAN_READABLE_NAME
)
from urllib.parse import quote
@pytest.mark.parametrize(
'observable,observable_type',
(
('1.1.1.1', 'ip'),
('brehmen.com', 'domain'),
('2a01:238:20a:202:1159::', 'ipv6'),
('http://juanthradio.com/Script/DOC/', 'url'),
)
)
def test_positive_refer_observable(module_headers, observable,
observable_type):
"""Perform testing for enrich refer observables endpoint to get
data for observable from Pulsedive
ID: CCTRI-1007-e6401994-dbef-4467-9792-72f80fd2faa1
Steps:
1. Send request to enrich refer observable endpoint
Expectedresults:
1. Check that data in response body contains expected refer field for
observable from Pulsedive
Importance: Critical
"""
observables = [{'type': observable_type, 'value': observable}]
response_from_all_modules = enrich_refer_observables(
payload=observables,
**{'headers': module_headers}
)
references = get_observables(response_from_all_modules, MODULE_NAME)
assert len(references) == 2, 'You got only one entity from Pusledive'
for reference in references:
assert reference['id'].startswith('ref-pulsedive') and (
reference['id'].endswith(
f'{observable_type}-{quote(observable, safe="")}'))
assert reference['module'] == MODULE_NAME
assert reference['module_instance_id']
assert reference['module_type_id']
if reference['title'].startswith('Search'):
assert reference['title'] == (
'Search for this '
f'{OBSERVABLE_HUMAN_READABLE_NAME[observable_type]}')
assert reference['description'] == (
'Lookup this '
f'{OBSERVABLE_HUMAN_READABLE_NAME[observable_type]} '
f'on {MODULE_NAME}')
assert reference['categories'] == [MODULE_NAME, 'Search']
assert reference['url'].startswith(f'{PULSEDIVE_URL}/browse/')
elif reference['title'].startswith('Browse'):
assert reference['title'] == (
f'Browse {OBSERVABLE_HUMAN_READABLE_NAME[observable_type]}')
assert reference['description'] == (
'Browse this '
f'{OBSERVABLE_HUMAN_READABLE_NAME[observable_type]}'
f' on {MODULE_NAME}')
assert reference['categories'] == [MODULE_NAME, 'Browse']
assert reference['url'].startswith(f'{PULSEDIVE_URL}/indicator/')
else:
raise AssertionError(f'Unknown reference: {reference["title"]!r}.')
| 37.207792 | 79 | 0.6363 | 0 | 0 | 0 | 0 | 2,577 | 0.899476 | 0 | 0 | 1,236 | 0.431414 |
2c2f9b90ebf5604014575bad5dad0fa9a8c11c47 | 857 | py | Python | dsvfile/Models/FactorySystem/AssemblerComponent.py | phoenixx-666/dsvread | 8a073c12343b2f0d34f9b728282dfefe10999f24 | [
"MIT"
] | 2 | 2021-03-01T19:57:20.000Z | 2021-08-02T20:54:48.000Z | dsvfile/Models/FactorySystem/AssemblerComponent.py | phoenixx-666/dsvread | 8a073c12343b2f0d34f9b728282dfefe10999f24 | [
"MIT"
] | null | null | null | dsvfile/Models/FactorySystem/AssemblerComponent.py | phoenixx-666/dsvread | 8a073c12343b2f0d34f9b728282dfefe10999f24 | [
"MIT"
] | null | null | null | from ...Fields import BoolField, ConditionalBlockStart
from ...Fields.Enums import ERecipe, ERecipeType
from ...Func import g
from . import Model, Int32Field, ArrayField
class AssemblerComponent(Model):
version = Int32Field()
id = Int32Field()
entityId = Int32Field()
pcId = Int32Field()
replicating = BoolField()
outputing = BoolField()
speed = Int32Field()
time = Int32Field()
recipeId = ERecipe()
recipeIdChecker = ConditionalBlockStart(arg_fields='recipeId', condition_func=g(0))
recipeType = ERecipeType()
timeSpend = Int32Field()
requires = ArrayField(Int32Field)
requireCounts = ArrayField(Int32Field)
served = ArrayField(Int32Field)
needs = ArrayField(Int32Field)
products = ArrayField(Int32Field)
productCounts = ArrayField(Int32Field)
produced = ArrayField(Int32Field)
| 31.740741 | 87 | 0.719953 | 684 | 0.798133 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.011669 |
2c31499cb4bfcf205bb5c15d2566ed72e48c35b6 | 13,612 | py | Python | src/functions.py | kkFlashK1/Redis-Clone | e2754a4367d6f78421ab8bc32277eec7306ccdbc | [
"MIT"
] | null | null | null | src/functions.py | kkFlashK1/Redis-Clone | e2754a4367d6f78421ab8bc32277eec7306ccdbc | [
"MIT"
] | null | null | null | src/functions.py | kkFlashK1/Redis-Clone | e2754a4367d6f78421ab8bc32277eec7306ccdbc | [
"MIT"
] | 4 | 2020-04-20T06:33:05.000Z | 2020-05-04T06:18:50.000Z | import time
from sortedcontainers import SortedSet
from .exception import *
import traceback
import random
class Redis:
def __init__(self):
self.hash_table = {}
self.expire = {}
self.exp_queue = SortedSet()
self.changed = {}
self.z_map = {}
def __repr__(self):
return f'curr_time : {time.time()}\n\n' \
f'hash_table :\n{self.hash_table}\n\n' \
f'expire :\n{self.expire}\n\n' \
f'changed :\n{self.changed}\n\n' \
f'z_map :\n{self.z_map}'
def log(self, curr_time, func, *args):
command = [str(curr_time), func]
for i in args:
command.append(i)
command = ' '.join(command)
command = command.strip(' ')
command += '\n'
with open("log.txt", "a") as logfile:
logfile.write(command)
def search_util(self, item):
low, high = 0, len(self.exp_queue)
while low <= high:
mid = (low + high) // 2
if self.exp_queue[mid][0] == item:
self.exp_queue.pop(mid)
return True
elif self.exp_queue[mid][0] < item[0]:
low = mid + 1
else:
high = mid - 1
return False
def search_index_util(self, sorted_set, item):
low, high = 0, len(sorted_set)-1
while low <= high:
mid = (low + high) // 2
if sorted_set[mid][0] == item:
return mid
elif sorted_set[mid][0] < item:
low = mid + 1
else:
high = mid - 1
# return str(None)
return 'Index Not Found'
def destroy_util(self, curr_time, hash_key):
"""
:param param:
:return:
"""
if hash_key in self.hash_table and self.expire[hash_key] is not None and self.expire[hash_key] <= round(time.time(), 4):
self.DELETE(curr_time, hash_key)
return True
else:
return False
def auto_clean_util(self, curr_time):
count = 0
while count < 10 and len(self.hash_table) > 100:
sample = random.sample(self.hash_table.keys(), 10)
for key in sample:
self.destroy_util(curr_time, key)
count += 1
def START(self):
pass
def GET(self, curr_time, hash_key):
"""
:param param:
:return:
"""
self.destroy_util(curr_time, hash_key)
if hash_key in self.hash_table:
if isinstance(self.hash_table[hash_key], str):
# Element Found
return self.hash_table[hash_key]
else:
# Throw Error -> Element found but not string
raise NoStringValueError('NoStringValueError')
else:
# Element not found
return str(None)
def EXPIRE(self, curr_time, hash_key, seconds):
"""
:param hash_key:
:param seconds:
:return:
"""
if hash_key in self.hash_table:
exp_time = round(curr_time, 4) + float(seconds)
self.expire[hash_key] = exp_time
if self.expire[hash_key] is None:
self.exp_queue.add((exp_time, hash_key))
else:
# old_exp = self.expire[hash_key]
# self.search_util((old_exp, hash_key))
self.exp_queue.add((exp_time, hash_key))
self.log(curr_time, "EXPIRE", hash_key, seconds)
return '1'
else:
return '0'
def TTL(self, curr_time, hash_key):
self.destroy_util(curr_time, hash_key)
if hash_key in self.hash_table and self.expire[hash_key] is not None:
return str(time.time() - self.expire[hash_key])
else:
return str(None)
def ping(self):
return 'PONG'
def DELETE(self, curr_time, hash_key):
"""
:param param:
:return:
"""
if hash_key in self.hash_table:
del self.hash_table[hash_key]
del self.expire[hash_key]
if hash_key in self.changed:
del self.changed[hash_key]
if hash_key in self.z_map:
del self.z_map[hash_key]
self.log(curr_time, "DELETE", hash_key)
return '1'
else:
return '0'
def SET(self, curr_time, hash_key, hash_val, *args):
self.destroy_util(curr_time, hash_key)
x = hash_key in self.hash_table
ex, px, xx, nx, keepttl = [False] * 5
kwargs = {}
for i in range(len(args)):
if args[i].lower() == 'ex':
ex = True
try:
kwargs['EX'] = int(args[i + 1])
except:
raise InvalidRequest('InvalidRequest')
elif args[i].lower() == 'px':
px = True
try:
kwargs['PX'] = int(args[i + 1])
except:
raise InvalidRequest('InvalidRequest')
elif args[i].lower() == 'xx':
xx = True
elif args[i].lower() == 'nx':
nx = True
elif args[i].lower() == 'keepttl':
keepttl = True
if (x is False and xx) or (x is True and nx):
return str(None)
# Make entry in both expiry table and hash table
if (ex and px) or (nx and xx):
raise InvalidRequest('InvalidRequest')
if keepttl:
if ex or nx:
raise InvalidRequest('InvalidRequest')
else:
if hash_key in self.hash_table and self.expire[hash_key] is not None:
self.hash_table[hash_key] = str(hash_val)
else:
self.hash_table[hash_key] = str(hash_val)
self.expire[hash_key] = None
self.log(curr_time, 'SET', hash_key, hash_val, *args)
return 'OK'
else:
self.hash_table[hash_key] = str(hash_val)
old_exp = None
if hash_key in self.expire:
old_exp = self.expire[hash_key]
self.expire[hash_key] = None
if old_exp is not None:
self.search_util((old_exp, hash_key))
if ex:
exp_time = round(curr_time, 4) + kwargs['EX']
self.expire[hash_key] = exp_time
# Remove then Add in Queue
self.exp_queue.add((exp_time, hash_key))
elif px:
seconds = round((kwargs['PX'] / 1000), 4)
exp_time = round(curr_time, 4) + seconds
self.expire[hash_key] = exp_time
# Remove and Add in Queue
self.exp_queue.add((exp_time, hash_key))
self.log(curr_time, 'SET', hash_key, hash_val, *args)
return 'OK'
def ZINCRBY(self, hash_key, score, key):
# if hash_key in self.hash_table and type(self.hash_table[hash_key]) == SortedSet:
pass
def ZADD(self, curr_time, hash_key, score, key, *args):
self.destroy_util(curr_time, hash_key)
nx, xx, ch, incr, index = False, False, False, False, len(args)
ind = 0
for arg in args:
if arg.lower() == 'nx':
nx = True
index = min(ind, index)
elif arg.lower() == 'xx':
xx = True
index = min(ind, index)
elif arg.lower() == 'ch':
ch = True
index = min(ind, index)
elif arg.lower() == 'incr':
incr = True
index = min(ind, index)
ind += 1
keyword = ['NX', 'XX', 'CH', 'INCR']
if index % 2 != 0 or (nx and xx):
raise SyntaxError('SyntaxError')
pairs = [(int(score), key)]
# self.z_map[hash_key] = {key: int(score)}
for i in range(0, index, 2):
score1 = args[i]
key1 = args[i + 1]
pairs.append((int(score1), key1))
if incr and len(pairs) > 0:
raise SyntaxError('SyntaxError')
# handle INCR Case
# if incr:
x = hash_key in self.hash_table
if (x is False and xx) or (x is True and nx):
if ch:
if x:
return str(self.changed[hash_key])
else:
return '0'
else:
if x:
return str(len(self.hash_table[hash_key]))
else:
return '0'
if hash_key not in self.hash_table:
self.hash_table[hash_key] = SortedSet()
self.changed[hash_key] = 0
self.expire[hash_key] = None
self.z_map[hash_key] = {}
elif hash_key in self.hash_table and type(self.hash_table[hash_key]) != SortedSet:
raise InvalidDataType('InvalidDataType')
for score1, key1 in pairs:
self.hash_table[hash_key].add((score1, key1))
self.changed[hash_key] += 1
self.z_map[hash_key][key1] = score1
if ch:
self.log(curr_time, 'ZADD', hash_key, str(score), key, *args)
return str(self.changed[hash_key])
else:
self.log(curr_time, 'ZADD', hash_key, str(score), key, *args)
return str(len(self.hash_table[hash_key]))
def ZRANK(self, curr_time, hash_key, value):
self.destroy_util(curr_time, hash_key)
if hash_key in self.hash_table and value in self.z_map[hash_key]:
x = self.hash_table[hash_key]
if type(x) == SortedSet:
index = self.search_index_util(x, self.z_map[hash_key][value])
return str(index)
else:
raise InvalidDataType('WrongDataType')
else:
return str(None)
def ZREVRANK(self, curr_time, hash_key, value):
rank = self.ZRANK(curr_time, hash_key, value)
if rank == 'None':
return str(None)
else:
rank = int(rank)
rev_rank = len(hash_key[value]) - rank - 1
return str(rev_rank)
def ZRANGE(self, curr_time, hash_key, start_index, end_index, *args):
self.destroy_util(curr_time, hash_key)
if hash_key in self.hash_table:
ss = self.hash_table[hash_key]
if type(ss) == SortedSet:
if int(start_index) < 0:
temp = abs(int(start_index))
start_index = len(ss) - temp
if int(end_index) < 0:
temp = abs(int(end_index))
end_index = len(ss) - temp
if int(start_index) <= int(end_index) <= len(ss) - 1:
ans = []
s = ''
if len(args) == 1 and args[0].lower() == 'withscores':
for i in range(int(start_index), int(end_index) + 1):
ans.append(' '.join(map(str, ss[i])))
# for a, b in ans:
# s += str(a) + " " + str(b) + ", "
s = ', '.join(ans)
elif len(args) == 0:
for i in range(int(start_index), int(end_index) + 1):
ans.append(ss[i][1])
# for a in ans:
# s += str(a) + ", "
s = ', '.join(ans)
else:
raise SyntaxError('SyntaxError')
return s
else:
raise InvalidFormat('WrongFormat')
else:
raise InvalidDataType('WrongDataType')
else:
return str(None)
def ZREVRANGE(self, curr_time, hash_key, start_index, end_index, *args):
self.destroy_util(curr_time, hash_key)
if hash_key in self.hash_table:
ss = self.hash_table[hash_key]
if type(ss) == SortedSet:
if int(start_index) < 0:
temp = abs(int(start_index))
start_index = len(ss) - temp
if int(end_index) < 0:
temp = abs(int(end_index))
end_index = len(ss) - temp
if int(start_index) <= int(end_index) <= len(ss) - 1:
ans = []
s = ''
n = len(ss)
start, stop = n - int(start_index) - 1, n - int(end_index) - 1
if len(args) == 1 and args[0].lower() == 'withscores':
for i in range(start, stop - 1, -1):
ans.append(' '.join(map(str, ss[i])))
# for a, b in ans:
# s += str(a) + " " + str(b) + ", "
s = ', '.join(ans)
elif len(args) == 0:
for i in range(start, stop - 1, -1):
ans.append(ss[i][1])
# for a in ans:
# s += str(a) + ", "
s = ', '.join(ans)
else:
raise SyntaxError('SyntaxError')
return s
else:
raise InvalidFormat('WrongFormat')
else:
raise InvalidDataType('WrongDataType')
else:
return str(None)
| 36.010582 | 128 | 0.475169 | 13,503 | 0.991992 | 0 | 0 | 0 | 0 | 0 | 0 | 1,477 | 0.108507 |
2c31a024dccd00a7e17a289a6b5ec9699ad4bb7a | 4,749 | py | Python | code/gamescene.py | prake71/blackandwhite | 2f23f4b3dc57080d230a4c423332000fe02b8024 | [
"MIT"
] | null | null | null | code/gamescene.py | prake71/blackandwhite | 2f23f4b3dc57080d230a4c423332000fe02b8024 | [
"MIT"
] | null | null | null | code/gamescene.py | prake71/blackandwhite | 2f23f4b3dc57080d230a4c423332000fe02b8024 | [
"MIT"
] | null | null | null | import pygame
import constants
from player import *
from scene import *
from level01 import *
from level03 import *
from level02 import *
from customscene import *
import titlescene
class GameScene(Scene):
scr_w = constants.SCREENWIDTH
scr_h = constants.SCREENHEIGHT
def __init__(self, levelno):
super(GameScene, self).__init__()
# Create the player
self.player = Player()
self.player.inlevelno = levelno
# Create all the levels
self.level_list = []
self.level_list.append(Level_01(self.player))
self.level_list.append(Level_03(self.player))
# Set the current level
self.current_level_no = levelno
self.current_level = self.level_list[self.current_level_no]
self.player.level = self.current_level
self.active_sprite_list = pygame.sprite.Group()
self.set_player_pos()
# music
pygame.mixer.init()
self.music = pygame.mixer.music.load("music/jumpandrun.ogg")
pygame.mixer.music.play(-1)
def set_player_pos(self):
if self.current_level_no == 0:
self.player.rect.x = 0
self.player.rect.y = self.scr_h - self.player.rect.height
self.active_sprite_list.add(self.player)
else:
print("in player mirror")
self.player.rect.x = constants.SCREENWIDTH - 20
self.player.rect.y = 0
self.active_sprite_list.add(self.player)
def render(self, screen):
# ALL CODE TO DRAW SHOULD GO BELOW THIS COMMENT
self.current_level.draw(screen)
self.active_sprite_list.draw(screen)
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
def update(self):
# Update the player.
self.active_sprite_list.update()
# Update items in the level
self.current_level.update()
# If the player gets near the right side, shift the world left (-x)
if self.player.rect.right > self.scr_w:
self.player.rect.right = self.scr_w
# If the player gets near the left side, shift the world right (+x)
if self.player.rect.left < 0:
self.player.rect.left = 0
if self.player.level_completed():
self.player.goal_reached = False
self.current_level_no += 1
if self.current_level_no > len(self.level_list) - 1:
self.exit()
else:
self.current_level = self.level_list[self.current_level_no]
self.manager.go_to(GameScene(self.current_level_no))
def exit(self):
self.manager.go_to(CustomScene("You Won!"))
def die(self):
self.manager.go_to(CustomScene("You lose!"))
def handle_events(self, events):
if not self.current_level_no % 2:
for e in events:
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
self.manager.go_to(titlescene.TitleScene())
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_LEFT:
self.player.go_left()
if e.key == pygame.K_RIGHT:
self.player.go_right()
if e.key == pygame.K_SPACE:
self.player.jump()
if e.type == pygame.KEYUP:
if e.key == pygame.K_LEFT and self.player.change_x < 0:
self.player.stop()
if e.key == pygame.K_RIGHT and self.player.change_x > 0:
self.player.stop()
if e.key == pygame.K_r:
self.set_player_pos()
# skip level (for testing)
if e.key == pygame.K_s:
self.manager.go_to(GameScene(1))
else:
for e in events:
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
self.manager.go_to(titlescene.TitleScene())
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_LEFT:
self.player.go_right()
if e.key == pygame.K_RIGHT:
self.player.go_left()
if e.key == pygame.K_SPACE:
self.player.jump_mirror()
if e.type == pygame.KEYUP:
if e.key == pygame.K_LEFT and self.player.change_x > 0:
self.player.stop()
if e.key == pygame.K_RIGHT and self.player.change_x < 0:
self.player.stop()
if e.key == pygame.K_r:
self.set_player_pos()
#self.current_level.check_keys()
| 35.706767 | 76 | 0.549379 | 4,565 | 0.961255 | 0 | 0 | 0 | 0 | 0 | 0 | 466 | 0.098126 |
2c31b0fdfb0fd6536f8cea2947f69df4b6872d46 | 2,336 | py | Python | migrations/versions/6245d75fa12_exceptions_table.py | boladmin/security_monkey | c28592ffd518fa399527d26262683fc860c30eef | [
"Apache-2.0"
] | 4,258 | 2015-01-04T22:06:10.000Z | 2022-03-31T23:40:27.000Z | migrations/versions/6245d75fa12_exceptions_table.py | boladmin/security_monkey | c28592ffd518fa399527d26262683fc860c30eef | [
"Apache-2.0"
] | 1,013 | 2015-01-12T02:31:03.000Z | 2021-09-16T19:09:03.000Z | migrations/versions/6245d75fa12_exceptions_table.py | boladmin/security_monkey | c28592ffd518fa399527d26262683fc860c30eef | [
"Apache-2.0"
] | 965 | 2015-01-11T21:06:07.000Z | 2022-03-17T16:53:57.000Z | """Exceptions Table
Revision ID: 6245d75fa12
Revises: e0a6af364a3f
Create Date: 2016-08-16 11:35:38.575026
"""
# revision identifiers, used by Alembic.
revision = '6245d75fa12'
down_revision = 'e0a6af364a3f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('exceptions',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('source', sa.String(length=256), nullable=False),
sa.Column('occurred', sa.DateTime(), nullable=False),
sa.Column('ttl', sa.DateTime(), nullable=False),
sa.Column('type', sa.String(length=256), nullable=False),
sa.Column('message', sa.String(length=512), nullable=True),
sa.Column('stacktrace', sa.Text(), nullable=True),
sa.Column('region', sa.String(length=32), nullable=True),
sa.Column('tech_id', sa.Integer(), nullable=True),
sa.Column('item_id', sa.Integer(), nullable=True),
sa.Column('account_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['account_id'], ['account.id'], ),
sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),
sa.ForeignKeyConstraint(['tech_id'], ['technology.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_exceptions_account_id', 'exceptions', ['account_id'], unique=False)
op.create_index('ix_exceptions_item_id', 'exceptions', ['item_id'], unique=False)
op.create_index('ix_exceptions_region', 'exceptions', ['region'], unique=False)
op.create_index('ix_exceptions_source', 'exceptions', ['source'], unique=False)
op.create_index('ix_exceptions_tech_id', 'exceptions', ['tech_id'], unique=False)
op.create_index('ix_exceptions_type', 'exceptions', ['type'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_exceptions_type', table_name='exceptions')
op.drop_index('ix_exceptions_tech_id', table_name='exceptions')
op.drop_index('ix_exceptions_source', table_name='exceptions')
op.drop_index('ix_exceptions_region', table_name='exceptions')
op.drop_index('ix_exceptions_item_id', table_name='exceptions')
op.drop_index('ix_exceptions_account_id', table_name='exceptions')
op.drop_table('exceptions')
### end Alembic commands ###
| 42.472727 | 91 | 0.704623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,007 | 0.431079 |
2c32d82c9a78a282495fbc7831ecf41937c490f8 | 4,422 | py | Python | DigitalBiomarkers-Preprocessing/Signal-Alignment/sdtw/test.py | jessilyn/DBDP-1 | 49308994853d36fa8b24066222851133f6bbf4c7 | [
"Apache-2.0"
] | 20 | 2020-01-27T16:32:25.000Z | 2021-05-27T15:06:29.000Z | DigitalBiomarkers-Preprocessing/Signal-Alignment/sdtw/test.py | chopeter27/DBDP | 99357dac197ceeb8c240ead804dd2c8bd3e3fc93 | [
"Apache-2.0"
] | 11 | 2020-01-27T16:22:09.000Z | 2020-07-29T20:11:22.000Z | DigitalBiomarkers-Preprocessing/Signal-Alignment/sdtw/test.py | chopeter27/DBDP | 99357dac197ceeb8c240ead804dd2c8bd3e3fc93 | [
"Apache-2.0"
] | 16 | 2019-04-05T15:01:46.000Z | 2021-07-07T05:42:27.000Z | import pandas as pd
import math
import linecache
import numpy as np
from scipy import stats
from parameter_cal import cf
from dtw import dtw
from scipy.misc import *
from sdtw.config import sub_len, nBlocks
from sdtw.utils import cal_descriptor, samplingSequences, norm, get_link_graph
from parameter_cal.utils import get_fact_align, get_reverse_dict, get_SS2, get_SS1
from parameter_cal.cf import ds_time
from downsample.utils import get_true_aligned, get_group_number, get_k_accuracy
import matplotlib.pyplot as plt
# when x = 0, sigmoid's derivative value is 1/4a
def sigmoid0(x):
return (4 * cf.warp_width) / (1 + math.exp(-x / cf.warp_width))
def gaussian_bump(x, a=1):
return math.exp(1 / (pow((x / a), 2) - 1))
sigmoid = np.vectorize(sigmoid0)
# generate warped signal
y = linecache.getline('data/Beef_TRAIN', 1)
y_list = y.split(',')
# delete the index
y_list.pop(0)
y_list = [float(item) for item in y_list]
reference = pd.DataFrame(y_list)
reference['t'] = [i for i in range(0, len(reference))]
reference.columns = ['q', 't']
anchor_index = 220
anchor_shift = 10
reference['shift'] = [derivative(sigmoid, math.fabs(anchor_index - i), dx=1e-6) * anchor_shift for i in reference['t']]
query = pd.DataFrame(reference)
query.columns = ['q', 't', 'shift']
query['t2'] = 0.1
temp = []
for i, j in zip(query['t'].values, query['shift'].values):
temp.append(i - j)
query['t2'] = temp
# add gaussian bump
range_of_gaussian = 40
height_of_gaussian = 1.2
temp = query[(query['t'] < anchor_index + 40) & (query['t'] > anchor_index - 40)].index
for i in temp:
query.loc[i, 'q'] = query.loc[i, 'q'] + height_of_gaussian * gaussian_bump(i - anchor_index, range_of_gaussian)
# plot warped signal
_, ax = plt.subplots(1, 1, figsize=(20, 10))
ax.scatter(x=query['t'], y=reference['q'], c='b', marker='.', label='before warp')
ax.scatter(x=query['t2'], y=query['q'], c='r', marker='.', label='after warp')
xvals = np.linspace(0, len(query['t']) - 1, math.floor(len(query['t']) / cf.ds_time))
x = query['t2']
y = query['q']
yinterp = np.array(np.interp(xvals, x, y))
xvals = np.array(xvals)
ax.scatter(x=xvals, y=yinterp, marker='.', c='g', label='after interp')
ax.legend(fontsize='30')
# normalize the signal
reference_norm = stats.zscore(reference['q'])
yinterp_norm = stats.zscore(yinterp)
# store the corresponding point pair
query.drop('shift', axis=1)
query.drop('t', axis=1)
query2 = pd.DataFrame({'t': xvals, 'q2': yinterp})
query2['close_index'] = 0
true_align_dict = get_true_aligned(cf.ds_time, query, query2)
group_num_dict = get_group_number(true_align_dict, query)
query2.loc[len(query2) - 1, 'close_index'] = len(query) - 1
for i in range(len(query2) - 1):
for j in range(len(query['t2']) - 1):
if query['t2'][j] <= query2['t'][i] < query['t2'][j + 1]:
if abs(query2['q2'][i] - query['q'][j]) < abs(query2['q2'][i] - query['q'][j + 1]):
query2.loc[i, 'close_index'] = j
else:
query2.loc[i, 'close_index'] = j + 1
if sub_len % 2 == 0:
raise Exception("Sub_len must be odd number!")
refer_subsequences = samplingSequences(reference_norm, sub_len)
query_subsequences = samplingSequences(yinterp_norm, int(sub_len/cf.ds_time))
refer_descriptors = np.zeros((len(refer_subsequences), nBlocks * 8))
query_descriptors = np.zeros((len(query_subsequences), nBlocks * 8))
refer_nsubsequences = len(refer_subsequences)
query_nsubsequences = len(query_subsequences)
for i in range(refer_nsubsequences):
sub_seq = refer_subsequences[i]
refer_descriptors[i] = cal_descriptor(sub_seq, sub_len)
for i in range(query_nsubsequences):
sub_seq = query_subsequences[i]
query_descriptors[i] = cal_descriptor(sub_seq, int(sub_len/cf.ds_time))
d, cost_matrix, acc_cost_matrix, path = dtw(refer_descriptors, query_descriptors, dist=norm)
query2.columns = ['t2', 'q', 'close_index'] # adapt to the get_link_graph
get_link_graph(reference, query2, path, -3, 'downsampled shapedtw')
fact_align_dict = get_fact_align(path)
reverse_dict = get_reverse_dict(path)
print("error rate of shapedtw is " + str(get_k_accuracy(true_align_dict, fact_align_dict, group_num_dict)))
print("SS1 of shapedtw is " + str(get_SS1(path, cf.ds_time)))
print("SS2 of shapedtw is " + str(get_SS2(fact_align_dict, reverse_dict, ds_time)))
| 39.482143 | 120 | 0.688602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 635 | 0.1436 |
2574c91dd0017e03291fbe071a3fc02152437d2a | 193 | py | Python | base_model/admin.py | kriwil/django-base-model | e6e989fce282200df3f6d114af27cfa4a618203f | [
"0BSD"
] | null | null | null | base_model/admin.py | kriwil/django-base-model | e6e989fce282200df3f6d114af27cfa4a618203f | [
"0BSD"
] | null | null | null | base_model/admin.py | kriwil/django-base-model | e6e989fce282200df3f6d114af27cfa4a618203f | [
"0BSD"
] | null | null | null | from django.contrib import admin
class BaseModelAdmin(admin.ModelAdmin):
exclude = (
'created_time',
'modified_time',
'is_removed',
'removed_time',
)
| 16.083333 | 39 | 0.601036 | 157 | 0.813472 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.284974 |
257570ef08bf6f96adf3ca076eab3e37b42bac17 | 6,083 | py | Python | results/migrations/0001_initial.py | lilbex/bitcom | c0d09155b655de3ebe84851f24e5c07ef60da611 | [
"MIT"
] | null | null | null | results/migrations/0001_initial.py | lilbex/bitcom | c0d09155b655de3ebe84851f24e5c07ef60da611 | [
"MIT"
] | null | null | null | results/migrations/0001_initial.py | lilbex/bitcom | c0d09155b655de3ebe84851f24e5c07ef60da611 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-24 18:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='agentname',
fields=[
('name_id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('firstname', models.CharField(max_length=200)),
('lastname', models.CharField(max_length=200)),
('email', models.CharField(max_length=200)),
('phone', models.CharField(max_length=200)),
('pollingunit_uniqueid', models.IntegerField()),
],
),
migrations.CreateModel(
name='announced_lga_results',
fields=[
('result_id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('lga_name', models.CharField(max_length=200)),
('party_abbreviation', models.CharField(max_length=50)),
('party_score', models.IntegerField()),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='announced_pu_results',
fields=[
('result_id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('polling_unit_uniqueid', models.CharField(max_length=200)),
('party_abbreviation', models.CharField(max_length=50)),
('party_score', models.IntegerField()),
('entered_by_user', models.CharField(max_length=7)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='announced_state_results',
fields=[
('result_id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('state_name', models.CharField(max_length=200)),
('party_abbreviation', models.CharField(max_length=50)),
('party_score', models.IntegerField()),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='announced_ward_results',
fields=[
('result_id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('ward_name', models.CharField(max_length=200)),
('party_abbreviation', models.CharField(max_length=50)),
('party_score', models.IntegerField()),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='lga',
fields=[
('uniqueid', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('lga_id', models.IntegerField()),
('lga_name', models.CharField(max_length=200)),
('state_id', models.IntegerField()),
('lga_description', models.TextField()),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField(max_length=200)),
('user_ip_address', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='party',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('partyid', models.CharField(max_length=200)),
('partyname', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='polling_unit',
fields=[
('uniqueid', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('polling_unit_id', models.IntegerField()),
('ward_id', models.IntegerField()),
('lga_id', models.IntegerField()),
('uniquewardid', models.IntegerField()),
('polling_unit_number', models.CharField(max_length=200)),
('polling_unit_name', models.CharField(max_length=200)),
('polling_unit_description', models.TextField()),
('lat', models.CharField(max_length=50)),
('long', models.CharField(max_length=200)),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='states',
fields=[
('state_id', models.IntegerField(editable=False, primary_key=True, serialize=False, unique=True)),
('state_name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='ward',
fields=[
('uniqueid', models.IntegerField(editable=False, primary_key=True, serialize=False, unique=True)),
('ward_id', models.IntegerField()),
('ward_name', models.CharField(max_length=50)),
('lga_id', models.IntegerField()),
('ward_description', models.TextField()),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=50)),
],
),
]
| 45.736842 | 114 | 0.554661 | 5,990 | 0.984711 | 0 | 0 | 0 | 0 | 0 | 0 | 1,118 | 0.183791 |
2575c1d96c57160a201fba4b65403230c9c3cfc4 | 12,137 | py | Python | quantization/Quantizelayer.py | fengxiaoshuai/CNN_model_optimizer | 4c48420989ffe31a4075d36a5133fee0d999466a | [
"Apache-2.0"
] | null | null | null | quantization/Quantizelayer.py | fengxiaoshuai/CNN_model_optimizer | 4c48420989ffe31a4075d36a5133fee0d999466a | [
"Apache-2.0"
] | 1 | 2021-01-05T10:41:24.000Z | 2021-01-05T10:41:24.000Z | quantization/Quantizelayer.py | fengxiaoshuai/CNN_model_optimizer | 4c48420989ffe31a4075d36a5133fee0d999466a | [
"Apache-2.0"
] | 1 | 2020-08-07T02:56:20.000Z | 2020-08-07T02:56:20.000Z | from __future__ import division
from __future__ import print_function
import numpy as np
import copy
from scipy import stats
class QuantizeLayer:
def __init__(self, name="None", num_bin=2001):
self.name = name
self.min = 0.0
self.max = 0.0
self.edge = 0.0
self.num_bins = num_bin
self.distribution_interval = 0.0
self.data_distribution = []
@staticmethod
def get_max_min_edge(blob_data):
max_val = np.max(blob_data)
min_val = np.min(blob_data)
data_edge = max(abs(max_val), abs(min_val))
return max_val, min_val, data_edge
def initial_histograms(self, blob_data):
max_val, min_val, data_edge = self.get_max_min_edge(blob_data)
hist, hist_edges = np.histogram(blob_data, bins=self.num_bins, range=(-data_edge, data_edge))
self.distribution_interval = 2 * data_edge / len(hist)
self.data_distribution = hist
self.edge = data_edge
self.min = min_val
self.max = max_val
def combine_histograms(self, blob_data):
"""
:param blob_data:
:return:
"""
# hist is the num of each bin, the edge of each bin is [)
max_val, min_val, data_edge = self.get_max_min_edge(blob_data)
if data_edge <= self.edge:
hist, _ = np.histogram(blob_data, bins=len(self.data_distribution), range=(-self.edge, self.edge))
self.data_distribution += hist
else:
old_num_bins = len(self.data_distribution)
old_step = 2 * self.edge / old_num_bins
half_increased_bins = int((data_edge - self.edge) // old_step + 1)
new_num_bins = half_increased_bins * 2 + old_num_bins
data_edge = half_increased_bins * old_step + self.edge
hist, hist_edges = np.histogram(blob_data, bins=new_num_bins, range=(-data_edge, data_edge))
hist[half_increased_bins:new_num_bins - half_increased_bins] += self.data_distribution
self.data_distribution = hist
self.edge = data_edge
self.min = min(min_val, self.min)
self.max = max(max_val, self.max)
self.distribution_interval = 2 * self.edge / len(self.data_distribution)
@staticmethod
def smooth_distribution(p, eps=0.0001):
is_zeros = (p == 0).astype(np.float32)
is_nonzeros = (p != 0).astype(np.float32)
n_zeros = is_zeros.sum()
n_nonzeros = p.size - n_zeros
if not n_nonzeros:
raise ValueError('The discrete probability distribution is malformed. All entries are 0.')
eps1 = eps * float(n_zeros) / float(n_nonzeros)
assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1)
hist = p.astype(np.float32)
hist += eps * is_zeros + (-eps1) * is_nonzeros
assert (hist <= 0).sum() == 0
return hist
@property
def threshold_distribution(self, target_bin=256):
"""
:param quantized_dtype:
:param target_bin:
:return:
"""
num_bins = len(self.data_distribution)
distribution = self.data_distribution
assert (num_bins % 2 == 1)
# if min_val >= 0 and quantized_dtype in ['auto', 'uint8']:
# target_bin = 128
threshold_sum = sum(distribution[target_bin:])
kl_divergence = np.zeros(num_bins - target_bin)
for threshold in range(target_bin, num_bins):
sliced_nd_hist = copy.deepcopy(distribution[:threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
p[threshold - 1] += threshold_sum
threshold_sum = threshold_sum - distribution[threshold]
# is_nonzeros[k] indicates whether hist[k] is nonzero
p = np.array(p)
nonzero_loc = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = len(sliced_nd_hist) // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = nonzero_loc[start:stop].sum()
if norm != 0:
q[start:stop] = quantized_bins[j] / norm
q[p == 0] = 0.0001
p = self.smooth_distribution(p)
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.entropy(p, q)
min_kl_divergence = np.argmin(kl_divergence)
threshold_bin = min_kl_divergence + target_bin
threshold_value = (threshold_bin + 0.5) * self.distribution_interval + (-self.edge)
return threshold_value
@staticmethod
def max_slide_window(seq, m):
num = len(seq)
seq = seq.tolist()
assert isinstance(seq, (list, tuple, set)) and isinstance(m, int), "seq array"
assert len(seq) > m, "len(seq) must >m"
max_seq = 0
loc = 0
for i in range(0, num):
if (i + m) <= num:
temp_seq = seq[i:i + m]
temp_sum = sum(temp_seq)
if max_seq <= temp_sum:
max_seq = temp_sum
loc = i
else:
return max_seq, loc
@property
def distribution_min_max(self, target_bin=256):
num_bins = len(self.data_distribution)
distribution = self.data_distribution
assert (num_bins % 2 == 1)
kl_divergence = np.zeros(num_bins - target_bin)
kl_loc = np.zeros(num_bins - target_bin)
for threshold in range(target_bin, num_bins):
#print("num:", threshold)
_, loc = self.max_slide_window(distribution, threshold)
sliced_nd_hist = copy.deepcopy(distribution[loc:loc + threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
right_sum = sum(distribution[loc + threshold:])
left_sum = sum(distribution[:loc])
p[threshold - 1] += right_sum
p[0] += left_sum
# is_nonzeros[k] indicates whether hist[k] is nonzero
p = np.array(p)
nonzero_loc = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = len(sliced_nd_hist) // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = nonzero_loc[start:stop].sum()
if norm != 0:
q[start:stop] = quantized_bins[j] / norm
q[p == 0] = 0.0001
p = self.smooth_distribution(p)
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.entropy(p, q)
kl_loc[threshold - target_bin] = loc
min_kl_divergence = np.argmin(kl_divergence)
min = kl_loc[min_kl_divergence]
max = min + target_bin + min_kl_divergence
min = (min + 0.5) * self.distribution_interval + (-self.edge)
max = (max + 0.5) * self.distribution_interval + (-self.edge)
return min, max
@property
def distribution_test(self, target_bin=256):
num_bins = len(self.data_distribution)
distribution = self.data_distribution
assert (num_bins % 2 == 1)
kl_divergence = np.zeros(num_bins - target_bin)
kl_loc = np.zeros(num_bins - target_bin)
for threshold in range(target_bin, num_bins):
#print("num:", threshold)
_, loc = self.max_slide_window(distribution, threshold)
sliced_nd_hist = copy.deepcopy(distribution[loc:loc + threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
right_sum = sum(distribution[loc + threshold:])
left_sum = sum(distribution[:loc])
p[threshold - 1] += right_sum
p[0] += left_sum
# is_nonzeros[k] indicates whether hist[k] is nonzero
p = np.array(p)
nonzero_loc = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = len(sliced_nd_hist) // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = nonzero_loc[start:stop].sum()
if norm != 0:
q[start:stop] = quantized_bins[j] / norm
q[p == 0] = 0.0001
p = self.smooth_distribution(p)
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.wasserstein_distance(p, q)
kl_loc[threshold - target_bin] = loc
min_kl_divergence = np.argmin(kl_divergence)
min = kl_loc[min_kl_divergence]
max = min + target_bin + min_kl_divergence
min = (min + 0.5) * self.distribution_interval + (-self.edge)
max = (max + 0.5) * self.distribution_interval + (-self.edge)
return min, max
data = np.random.randn(10000,)
print(data)
layer = QuantizeLayer(name="con_1")
layer.initial_histograms(data)
print("min:", layer.min)
print("max:", layer.max)
print("edge:", layer.edge)
print("distribution_interval:", layer.distribution_interval)
print("bins:", len(layer.data_distribution))
data = np.random.randn(10000,).astype()
layer.combine_histograms(data)
print("min:", layer.min)
print("max:", layer.max)
print("edge:", layer.edge)
print("distribution_interval:", layer.distribution_interval)
print("bins:", len(layer.data_distribution))
data = np.random.randn(10000,)
data[9999] = 20
layer.combine_histograms(data)
print("min:", layer.min)
print("max:", layer.max)
print("edge:", layer.edge)
print("distribution_interval:", layer.distribution_interval)
print("bins:", len(layer.data_distribution))
import matplotlib.pyplot as plt
plt.plot(layer.data_distribution)
plt.show()
print(layer.threshold_distribution)
print(layer.distribution_min_max)
#print(layer.distribution_test) | 37.928125 | 110 | 0.596853 | 11,020 | 0.907967 | 0 | 0 | 9,087 | 0.748702 | 0 | 0 | 1,539 | 0.126802 |
2576626ceba771ec76431abf1f815e63a7c7ef75 | 5,484 | py | Python | SRFlow_seungjae/code/test.py | scey26/srdualglow | 23dbd0ce94f12abc6885c9a3ed3b7ca6f6f15170 | [
"MIT"
] | 1 | 2021-06-29T06:39:42.000Z | 2021-06-29T06:39:42.000Z | SRFlow_seungjae/code/test.py | scey26/srdualglow | 23dbd0ce94f12abc6885c9a3ed3b7ca6f6f15170 | [
"MIT"
] | null | null | null | SRFlow_seungjae/code/test.py | scey26/srdualglow | 23dbd0ce94f12abc6885c9a3ed3b7ca6f6f15170 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Huawei Technologies Co., Ltd.
# Licensed under CC BY-NC-SA 4.0 (Attribution-NonCommercial-ShareAlike 4.0 International) (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
#
# The code is released for academic research use only. For commercial use, please contact Huawei Technologies Co., Ltd.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains content licensed by https://github.com/xinntao/BasicSR/blob/master/LICENSE/LICENSE
import glob
import sys
from collections import OrderedDict
from natsort import natsort
import options.options as option
from Measure import Measure, psnr
from imresize import imresize
from models import create_model
import torch
from utils.util import opt_get
import numpy as np
import pandas as pd
import os
import cv2
def fiFindByWildcard(wildcard):
return natsort.natsorted(glob.glob(wildcard, recursive=True))
def load_model(conf_path):
opt = option.parse(conf_path, is_train=False)
opt['gpu_ids'] = None
opt = option.dict_to_nonedict(opt)
model = create_model(opt)
model_path = opt_get(opt, ['model_path'], None)
model.load_network(load_path=model_path, network=model.netG)
return model, opt
def predict(model, lr):
model.feed_data({"LQ": t(lr)}, need_GT=False)
model.test()
visuals = model.get_current_visuals(need_GT=False)
return visuals.get('rlt', visuals.get("SR"))
def t(array): return torch.Tensor(np.expand_dims(array.transpose([2, 0, 1]), axis=0).astype(np.float32)) / 255
def rgb(t): return (
np.clip((t[0] if len(t.shape) == 4 else t).detach().cpu().numpy().transpose([1, 2, 0]), 0, 1) * 255).astype(
np.uint8)
def imread(path):
return cv2.imread(path)[:, :, [2, 1, 0]]
def imwrite(path, img):
os.makedirs(os.path.dirname(path), exist_ok=True)
cv2.imwrite(path, img[:, :, [2, 1, 0]])
def imCropCenter(img, size):
h, w, c = img.shape
h_start = max(h // 2 - size // 2, 0)
h_end = min(h_start + size, h)
w_start = max(w // 2 - size // 2, 0)
w_end = min(w_start + size, w)
return img[h_start:h_end, w_start:w_end]
def impad(img, top=0, bottom=0, left=0, right=0, color=255):
return np.pad(img, [(top, bottom), (left, right), (0, 0)], 'reflect')
def main():
conf_path = sys.argv[1]
conf = conf_path.split('/')[-1].replace('.yml', '')
model, opt = load_model(conf_path)
lr_dir = opt['dataroot_LR']
hr_dir = opt['dataroot_GT']
lr_paths = fiFindByWildcard(os.path.join(lr_dir, '*.png'))
hr_paths = fiFindByWildcard(os.path.join(hr_dir, '*.png'))
this_dir = os.path.dirname(os.path.realpath(__file__))
test_dir = os.path.join(this_dir, '..', 'results', conf)
print(f"Out dir: {test_dir}")
measure = Measure(use_gpu=False)
fname = f'measure_full.csv'
fname_tmp = fname + "_"
path_out_measures = os.path.join(test_dir, fname_tmp)
path_out_measures_final = os.path.join(test_dir, fname)
if os.path.isfile(path_out_measures_final):
df = pd.read_csv(path_out_measures_final)
elif os.path.isfile(path_out_measures):
df = pd.read_csv(path_out_measures)
else:
df = None
scale = opt['scale']
pad_factor = 2
for lr_path, hr_path, idx_test in zip(lr_paths, hr_paths, range(len(lr_paths))):
lr = imread(lr_path)
hr = imread(hr_path)
# Pad image to be % 2
h, w, c = lr.shape
lq_orig = lr.copy()
lr = impad(lr, bottom=int(np.ceil(h / pad_factor) * pad_factor - h),
right=int(np.ceil(w / pad_factor) * pad_factor - w))
lr_t = t(lr)
heat = opt['heat']
if df is not None and len(df[(df['heat'] == heat) & (df['name'] == idx_test)]) == 1:
continue
sr_t = model.get_sr(lq=lr_t, heat=heat)
sr = rgb(torch.clamp(sr_t, 0, 1))
sr = sr[:h * scale, :w * scale]
path_out_sr = os.path.join(test_dir, "{:0.2f}".format(heat).replace('.', ''), "{:06d}.png".format(idx_test))
imwrite(path_out_sr, sr)
meas = OrderedDict(conf=conf, heat=heat, name=idx_test)
meas['PSNR'], meas['SSIM'], meas['LPIPS'] = measure.measure(sr, hr)
lr_reconstruct_rgb = imresize(sr, 1 / opt['scale'])
meas['LRC PSNR'] = psnr(lq_orig, lr_reconstruct_rgb)
str_out = format_measurements(meas)
print(str_out)
df = pd.DataFrame([meas]) if df is None else pd.concat([pd.DataFrame([meas]), df])
df.to_csv(path_out_measures + "_", index=False)
os.rename(path_out_measures + "_", path_out_measures)
df.to_csv(path_out_measures, index=False)
os.rename(path_out_measures, path_out_measures_final)
str_out = format_measurements(df.mean())
print(f"Results in: {path_out_measures_final}")
print('Mean: ' + str_out)
def format_measurements(meas):
s_out = []
for k, v in meas.items():
v = f"{v:0.2f}" if isinstance(v, float) else v
s_out.append(f"{k}: {v}")
str_out = ", ".join(s_out)
return str_out
if __name__ == "__main__":
main()
| 29.967213 | 119 | 0.654632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,213 | 0.221189 |
25774050e4ebc5c3ab6263fbb98ab241ce4786a3 | 2,620 | py | Python | wit/dep/triplet-learning.py | bkj/what-is-this | 49c6e4f9809623d8580433baf00e507faacb04f0 | [
"Apache-2.0"
] | 21 | 2016-01-12T05:20:29.000Z | 2022-01-05T18:14:40.000Z | wit/dep/triplet-learning.py | bkj/what-is-this | 49c6e4f9809623d8580433baf00e507faacb04f0 | [
"Apache-2.0"
] | null | null | null | wit/dep/triplet-learning.py | bkj/what-is-this | 49c6e4f9809623d8580433baf00e507faacb04f0 | [
"Apache-2.0"
] | 38 | 2016-11-08T04:49:17.000Z | 2019-01-10T04:47:52.000Z | import keras
import pandas as pd
import urllib2
from bs4 import BeautifulSoup
from pprint import pprint
from matplotlib import pyplot as plt
import sys
sys.path.append('/Users/BenJohnson/projects/what-is-this/wit/')
from wit import *
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 120)
np.set_printoptions(linewidth=100)
# --
# Config + Init
num_features = 75 # Character
# max_len = 100 # Character
max_len = 350
formatter = KerasFormatter(num_features, max_len)
# --
# Load and format data
in_store = pd.HDFStore(
'/Users/BenJohnson/projects/what-is-this/qpr/gun_leaves_20151118_v2.h5',
complevel = 9,
complib = 'bzip2'
)
source = in_store.keys()[3]
df = in_store[source]
in_store.close()
# Subset to frequent paths
chash = df.groupby('hash').apply(lambda x: len(x.obj.unique()))
keep = list(chash[chash > 100].index)
df = df[df.hash.apply(lambda x: x in keep)]
df['content'] = df.obj.apply(lambda x: BeautifulSoup(x).text.encode('utf8'))
# --
# Make all pairs
train = make_triplet_train(df, N = 600)
pd.crosstab(train.doc, train.hash)
trn, _ = formatter.format(train, ['content'], 'hash')
# Test set of all unique points
unq = df.copy()
del unq['id']
unq = unq.drop_duplicates()
awl, _ = formatter.format(unq, ['content'], 'hash')
# --
# Defining model
recurrent_size = 32
dense_size = 5
model = Sequential()
model.add(Embedding(num_features, recurrent_size))
model.add(LSTM(recurrent_size))
model.add(Dense(dense_size))
model.add(Activation('unit_norm'))
model.compile(loss = 'triplet_euclidean', optimizer = 'adam')
# --
# Training model
# Shuffles while maintaining groups
ms = modsel(train.shape[0], N = 3)
_ = model.fit(
trn['x'][0][ms], trn['x'][0][ms],
nb_epoch = 1,
batch_size = 3 * 250,
shuffle = False
)
preds = model.predict(awl['x'][0], verbose = True)
colors = awl['y'].argmax(1)
plt.scatter(preds[:,0], preds[:,1], c = colors)
plt.show()
# --
# Clustering results
#
# Could do better -- actually may want some kind of metric for "projection overlap"
from sklearn.cluster import DBSCAN
db = DBSCAN(eps = .1, min_samples = 50).fit(preds)
res = unq.hash.groupby(db.labels_).apply(lambda x: x.value_counts()).reset_index()
res.columns = ('cluster', 'hash', 'cnt')
res = res.sort('hash')
good_res = res[(res.cnt > 50) & (res.cluster > -1)]
good_res
sorted(res.hash.unique())
sorted(good_res.hash.unique())
eqv = list(good_res.groupby('cluster').hash.apply(lambda x: list(x)))
eqv = map(eval, np.unique(map(str, eqv)))
print_eqv(eqv, df)
| 22.782609 | 90 | 0.684351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 665 | 0.253817 |
257953b2686a30760d1c8853c684474288fcc931 | 3,826 | py | Python | selfdrive/dragonpilot/gpxd.py | lijunhao731/dragonpilot | 460890bc2149fb0f344943683052278afd359f2d | [
"MIT"
] | 8 | 2020-11-07T17:01:27.000Z | 2022-01-08T04:54:08.000Z | selfdrive/dragonpilot/gpxd.py | lijunhao731/dragonpilot | 460890bc2149fb0f344943683052278afd359f2d | [
"MIT"
] | 2 | 2021-03-21T10:24:12.000Z | 2021-07-26T11:50:56.000Z | selfdrive/dragonpilot/gpxd.py | lijunhao731/dragonpilot | 460890bc2149fb0f344943683052278afd359f2d | [
"MIT"
] | 5 | 2020-10-16T11:29:20.000Z | 2021-09-22T04:23:54.000Z | #!/usr/bin/env python3
import cereal.messaging as messaging
import os
import datetime
import signal
import threading
from common.realtime import Ratekeeper
# customisable values
GPX_LOG_PATH = '/data/media/0/gpx_logs/'
LOG_HERTZ = 10 # 10 hz = 0.1 sec, higher for higher accuracy, 10hz seems fine
LOG_LENGTH = 10 # mins, higher means it keeps more data in the memory, will take more time to write into a file too.
LOST_SIGNAL_COUNT_LENGTH = 30 # secs, output log file if we lost signal for this long
# do not change
LOST_SIGNAL_COUNT_MAX = LOST_SIGNAL_COUNT_LENGTH * LOG_HERTZ # secs,
LOGS_PER_FILE = LOG_LENGTH * 60 * LOG_HERTZ # e.g. 10 * 60 * 10 = 6000 points per file
class WaitTimeHelper:
ready_event = threading.Event()
shutdown = False
def __init__(self):
signal.signal(signal.SIGTERM, self.graceful_shutdown)
signal.signal(signal.SIGINT, self.graceful_shutdown)
signal.signal(signal.SIGHUP, self.graceful_shutdown)
def graceful_shutdown(self, signum, frame):
self.shutdown = True
self.ready_event.set()
class GpxD():
def __init__(self):
self.log_count = 0
self.logs = list()
self.lost_signal_count = 0
self.wait_helper = WaitTimeHelper()
self.started_time = datetime.datetime.utcnow().isoformat()
def log(self, sm):
gps = sm['gpsLocationExternal']
# do not log when no fix or accuracy is too low, add lost_signal_count
if gps.flags % 2 == 0 or gps.accuracy > 5.:
if self.log_count > 0:
self.lost_signal_count += 1
else:
self.logs.append([datetime.datetime.utcfromtimestamp(gps.timestamp*0.001).isoformat(), str(gps.latitude), str(gps.longitude), str(gps.altitude)])
self.log_count += 1
self.lost_signal_count = 0
def write_log(self, force = False):
if self.log_count == 0:
return
if force or (self.log_count >= LOGS_PER_FILE or self.lost_signal_count >= LOST_SIGNAL_COUNT_MAX):
self._write_gpx()
self.lost_signal_count = 0
self.log_count = 0
self.logs.clear()
self.started_time = datetime.datetime.utcnow().isoformat()
def _write_gpx(self):
if len(self.logs) > 0:
if not os.path.exists(GPX_LOG_PATH):
os.makedirs(GPX_LOG_PATH)
filename = self.started_time.replace(':','-')
str = ''
str += "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>\n"
str += "<gpx version=\"1.1\" creator=\"dragonpilot https://github.com/dragonpilot-community/dragonpilot\" xmlns=\"http://www.topografix.com/GPX/1/1\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd\">\n"
str += "<trk>\n"
str += " <name>" + self.started_time + "</name>"
str += " <trkseg>\n"
for trkpt in self.logs:
str += self._trkpt_template(trkpt[1], trkpt[2], trkpt[3], trkpt[0])
str += " </trkseg>\n"
str += "</trk>\n"
str += "</gpx>\n"
try:
f = open('%s%sZ.gpx' % (GPX_LOG_PATH, filename), 'w')
f.write(str)
f.close()
except:
pass
def _trkpt_template(self, lat, lon, ele, time):
str = ""
str += " <trkpt lat=\"" + lat + "\" lon=\"" + lon + "\">\n"
str += " <ele>" + ele + "</ele>\n"
str += " <time>" + time + "</time>\n"
str += " </trkpt>\n"
return str
def gpxd_thread(sm=None, pm=None):
if sm is None:
sm = messaging.SubMaster(['gpsLocationExternal'])
wait_helper = WaitTimeHelper()
gpxd = GpxD()
rk = Ratekeeper(LOG_HERTZ, print_delay_threshold=None)
while True:
sm.update(0)
gpxd.log(sm)
gpxd.write_log()
if wait_helper.shutdown:
gpxd.write_log(True)
break
rk.keep_time()
def main(sm=None, pm=None):
gpxd_thread(sm, pm)
if __name__ == "__main__":
main()
| 32.982759 | 314 | 0.646106 | 2,689 | 0.702823 | 0 | 0 | 0 | 0 | 0 | 0 | 1,039 | 0.271563 |
25796623b047f49a864e729351b380a30f77c0c5 | 26 | py | Python | Unlock_webapp/libs/grpc/_grpcio_metadata.py | maxathon2020/Enk | 1bcf9c788e8b0ab5d21821b694292a3c110eb526 | [
"MIT"
] | 1 | 2021-01-15T18:00:01.000Z | 2021-01-15T18:00:01.000Z | Unlock_webapp/libs/grpc/_grpcio_metadata.py | maxathon2020/Enk | 1bcf9c788e8b0ab5d21821b694292a3c110eb526 | [
"MIT"
] | null | null | null | Unlock_webapp/libs/grpc/_grpcio_metadata.py | maxathon2020/Enk | 1bcf9c788e8b0ab5d21821b694292a3c110eb526 | [
"MIT"
] | null | null | null | __version__ = """1.32.0""" | 26 | 26 | 0.576923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.461538 |
2579c36b1d400b2989548b5ef20920bc5aa3d5ac | 17,767 | py | Python | ASV/ASV/nodes/averager.py | Southampton-Maritime-Robotics/Autonomous-Ship-and-Wavebuoys | bea27ac87b0e2991096da7f1b1c2197f1d620a51 | [
"MIT"
] | 4 | 2017-11-09T12:05:14.000Z | 2021-06-25T05:59:15.000Z | ASV/ASV/nodes/averager.py | Southampton-Maritime-Robotics/Autonomous-Ship-and-Wavebuoys | bea27ac87b0e2991096da7f1b1c2197f1d620a51 | [
"MIT"
] | null | null | null | ASV/ASV/nodes/averager.py | Southampton-Maritime-Robotics/Autonomous-Ship-and-Wavebuoys | bea27ac87b0e2991096da7f1b1c2197f1d620a51 | [
"MIT"
] | 1 | 2021-05-08T20:09:50.000Z | 2021-05-08T20:09:50.000Z | #!/usr/bin/python
##############################################################################
#averager.py
#
#This code has been created by Enrico Anderlini (ea3g09@soton.ac.uk) for
#averaging the main readings required during the QinetiQ tests. These values
#averaged over one minute will be published to an external logfile.
#
#Modifications to code
#16/02/2013 code created
#17/02/2013 removal of the calls to library_highlevel.py because whenever
# one of the nodes was not being published the node exited with
# errors.
#
##############################################################################
#Notes
#
#At the moment this file publishes to an external log file the values for the
#motor demand (rpm, voltage or power), the propeller rpm, the motor voltage or
#power, the battery voltage and the case temperature (hence, 4 values in total
#plus the time at which they have been sampled). Other variables may be added
#as required.
#
##############################################################################
import roslib; roslib.load_manifest('ASV')
import rospy
import time
import csv
import os
import numpy
from datetime import datetime
from std_msgs.msg import Float32
from std_msgs.msg import Int8
from std_msgs.msg import String
from ASV.msg import status
# Defining global variables
global time_zero
global counter
global Motor_setting
global Motor_target
global total_motor
global Prop_rpm
global total_rpm
global avg_rpm
global Voltage
global total_voltage
global avg_voltage
global Motor_current
global total_current
global avg_current
global Power
global total_power
global avg_power
global battery_voltage
global total_BatteryVoltage
global avg_BatteryVoltage
global Temperature
global total_temperature
global avg_temperature
global Thrust
global total_thrust
global avg_thrust
###############################################################
#The following functions write the values this node subscribes to into different
#log files in .cvs format within the folder ~/logFiles created within the main
#function.
###############################################################
def printer(setting, target, rpm, voltage, current, power, BatteryVoltage, temperature, thrust):
#The stringtime variable is used in all these functions to store the time of
#the reading (starting from the time of the start-up (zero))-expressed in seconds.
stringtime = time.time()-time_zero
averageList = [stringtime, setting, target, rpm, voltage, current, power, BatteryVoltage, temperature, thrust]
title = ['time', 'setting', 'target', 'rpm', 'volt', 'current', 'power', 'battery', 'temp', 'thrust']
print title
print averageList
with open('%s/averageLog.csv' %(dirname), "a") as f:
try:
Writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
Writer.writerow(title)
Writer.writerow(averageList)
except ValueError:
print 'writerow error'
########################## Callback Functions #################################
def motor_setting_cb(Motor_setting):
global motor_setting
motor_setting = Motor_setting.data
def motor_target_cb(Motor_target):
global motor_target
motor_target = Motor_target.data
def prop_rpm_cb(Prop_rpm):
global prop_rpm
prop_rpm = Prop_rpm.data
def motor_voltage_cb(Voltage):
global voltage
voltage = Voltage.data
def motor_current_cb(Motor_current):
global motor_current
motor_current = Motor_current.data
def motor_power_cb(Motor_power):
global motor_power
motor_power = Motor_power.data
def thrust_cb(Thrust):
global thrust
thrust = Thrust.data
def battery_voltage_cb(battery_voltage):
global BatteryVoltage
BatteryVoltage = battery_voltage.data
def temperature_cb(Temperature):
global temperature
temperature = Temperature.data
##############################################################
#def shutdown():
#shutdown behaviour - close all files
#print 'shutting down'
# with open('%s/path.kml' %(dirname), "a") as f:
# try:
# f.write('</coordinates>\n </LineString>\n </Placemark>\n </kml>\n')
# except ValueError:
# print 'write error'
################################## MAIN FUNCTION ###############################
if __name__ == '__main__':
#Initialising the node
rospy.init_node('averager')
stringtime = datetime.now()
stringtime = stringtime.strftime('%Y-%m-%d_%H-%M-%S')
rospy.loginfo('Logger started at %s.'%(stringtime))
pub_folder = rospy.Publisher('folder', String)
########################################################################
######## FOLDERS #######################################################
########################################################################
#define files and writers
logfolder = 'AverageValues'
dirname = logfolder + '/' + stringtime
if not os.path.isdir(logfolder):
print 'made logfolder'
os.mkdir(logfolder)
if not os.path.isdir(dirname):
print 'made test folder'
os.mkdir(dirname)
time.sleep(5)
pub_folder.publish(dirname)
########################################################################
#Setting the zero time
time_zero = time.time()
# Initialising global variables
counter =0
motor_setting =0
motor_target =0
prop_rpm =0
voltage =0
motor_current =0
motor_power =0
BatteryVoltage =0
temperature =0
thrust =0
total_motor =0
avg_motor =0
total_rpm =0
avg_rpm =0
total_voltage =0
avg_voltage =0
total_current =0
avg_current =0
total_power =0
avg_power =0
total_BatteryVoltage =0
avg_BatteryVoltage =0
total_temperature =0
avg_temperature =0
total_thrust =0
avg_thrust =0
########################SET UP THE SUBSCRIBERS##########################
rospy.Subscriber('setMotorTargetMethod', Int8, motor_setting_cb)
rospy.Subscriber('setMotorTarget', Float32, motor_target_cb)
rospy.Subscriber('prop_rpm', Float32, prop_rpm_cb)
rospy.Subscriber('motor_voltage', Float32, motor_voltage_cb)
rospy.Subscriber('motor_current', Float32, motor_current_cb)
rospy.Subscriber('motor_power', Float32, motor_power_cb)
rospy.Subscriber('thrust', Float32, thrust_cb)
rospy.Subscriber('battery_voltage', Float32, battery_voltage_cb)
rospy.Subscriber('CaseTemperature', Float32, temperature_cb)
#Publish the propeller rpm demand only when the node is not shutdown
#while not rospy.is_shutdown():
while (time.time()-time_zero)<=20:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>20 and (time.time()-time_zero)<=40:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>40 and (time.time()-time_zero)<=60:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>60 and (time.time()-time_zero)<=80:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>80 and (time.time()-time_zero)<=100:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>100 and (time.time()-time_zero)<=120:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>120 and (time.time()-time_zero)<=140:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>140 and (time.time()-time_zero)<=160:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>160 and (time.time()-time_zero)<=180:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
| 39.394678 | 139 | 0.585017 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,336 | 0.187764 |
257a8339f2250ee65e5f7d0aa4d5a83f1b6f928b | 12,827 | py | Python | joulescope_ui/oscilloscope/marker.py | Axel-Jacobsen/pyjoulescope_ui | 7d296b1ead0d36c6524dc399372f7888a340e9fa | [
"Apache-2.0"
] | 1 | 2019-08-08T21:10:26.000Z | 2019-08-08T21:10:26.000Z | joulescope_ui/oscilloscope/marker.py | Axel-Jacobsen/pyjoulescope_ui | 7d296b1ead0d36c6524dc399372f7888a340e9fa | [
"Apache-2.0"
] | null | null | null | joulescope_ui/oscilloscope/marker.py | Axel-Jacobsen/pyjoulescope_ui | 7d296b1ead0d36c6524dc399372f7888a340e9fa | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import weakref
from .signal import Signal
from .signal_statistics import si_format, html_format
import logging
TIME_STYLE_DEFAULT = 'color: #FFF; background-color: #000; font-size: 8pt'
class Marker(pg.GraphicsObject):
"""A vertical x-axis marker for display on the oscilloscope.
:param x_axis: The x-axis :class:`pg.AxisItem` instance.
:param color: The [R,G,B] or [R,G,B,A] color for the marker.
:param shape: The marker flag shape which is one of:
['full', 'left', 'right', 'none'].
"""
sigUpdateRequest = QtCore.Signal(object)
"""Request a value update when x-axis position changes.
:param marker: The :class:`Marker` instance requesting the update.
For dual markers, the marker that moved will signal.
"""
sigRemoveRequest = QtCore.Signal(object)
"""Indicate that the user has requested to remove this marker
:param marker: The marker instance to remove.
"""
sigExportDataRequest = QtCore.Signal(float, float)
"""Indicate that the user has requested to export data.
:param x_start: The starting position in x-axis units.
:param x_stop: The stopping position in x-axis units.
Export is only triggered for dual markers. Exporting data for a single
marker is not supported.
"""
def __init__(self, name, x_axis: pg.AxisItem, color=None, shape=None):
pg.GraphicsObject.__init__(self)
self.log = logging.getLogger('%s.%s' % (__name__, name))
self._name = name
self._axis = weakref.ref(x_axis)
self.color = (64, 255, 64, 255) if color is None else color
self._boundingRect = None
self.picture = None
self._shape = shape
self.setPos(pg.Point(0, 0))
self._x = 0.0 # in self._axis coordinates
# self.setZValue(2000000)
self._pair = None
self.moving = False
self._marker_time_text = pg.TextItem("t=0.00")
self._delta_time_text = pg.TextItem("Δt=0.00")
self._delta_time_text.setAnchor([0.5, 0])
self.graphic_items = [self._marker_time_text, self._delta_time_text]
self.text = {} #: Dict[str, List[weakref.ReferenceType[Signal], TextItem]]
def __str__(self):
return f'Marker({self.name})'
@property
def name(self):
return self._name
@property
def is_right(self):
return self._pair is not None and self.name[1] == '2'
@property
def is_left(self):
return self._pair is not None and self.name[1] == '1'
@property
def pair(self):
return self._pair
@pair.setter
def pair(self, value):
self._pair = value
if self.is_left:
self._marker_time_text.setAnchor([1, 0])
self._delta_time_text.setVisible(True)
else:
self._marker_time_text.setAnchor([0, 0])
self._delta_time_text.setVisible(False)
def signal_add(self, signal: Signal):
txt = pg.TextItem()
self.text[signal.name] = [weakref.ref(signal), txt]
signal.vb.addItem(txt)
txt.setVisible(True)
def signal_update(self, signal: Signal):
if signal.name not in self.text:
self.signal_add(signal)
_, txt = self.text[signal.name]
xv = self.get_pos()
labels = signal.statistics_at(xv)
if len(labels):
txt_result = si_format(labels, units=signal.units)
html = html_format(txt_result, x=xv)
self.html_set(signal.name, html)
def signal_update_all(self):
for signal_ref, _ in self.text.values():
s = signal_ref()
if s is not None:
self.signal_update(s)
def signal_remove(self, name):
if isinstance(name, Signal):
name = name.name
if name not in self.text:
self.log.warning('signal_remove(%s) but not found', name)
return
signal_ref, txt = self.text.pop(name)
signal = signal_ref()
if signal is not None:
signal.vb.scene().removeItem(txt)
def signal_remove_all(self):
for name in list(self.text.keys()):
self.signal_remove(name)
def html_set(self, signal_name, html):
if signal_name not in self.text:
self.log.debug('html_set(%s) but does not exist', signal_name)
return
signal_ref, txt = self.text[signal_name]
signal = signal_ref()
if signal is None:
self.log.warning('html_set(%s) but signal ref not valid', signal_name)
return
xv = self.get_pos()
vb = signal.vb
ys = vb.geometry().top()
yv = vb.mapSceneToView(pg.Point(0.0, ys)).y()
txt.setPos(pg.Point(xv, yv))
txt.setHtml(html)
def _endpoints(self):
"""Get the endpoints in the scene's (parent) coordinates.
:return: (top, bottom) pg.Point instances
"""
axis = self._axis()
if axis is None:
return None, None
vb = axis.linkedView()
if vb is None:
return None, None
bounds = axis.geometry()
tickBounds = vb.geometry()
point = pg.Point(self._x, 0.0)
x = vb.mapViewToScene(point).x()
p1 = pg.Point(x, bounds.bottom())
p2 = pg.Point(x, tickBounds.bottom())
return p1, p2
def boundingRect(self):
r = self._boundingRect
if r is not None: # use cache
return r
axis = self._axis()
if axis is None:
return QtCore.QRectF()
top = axis.geometry().top()
h = axis.geometry().height()
w = h // 2 + 1
p1, p2 = self._endpoints()
if p2 is None:
return QtCore.QRectF()
x = p2.x()
bottom = p2.y()
self._boundingRect = QtCore.QRectF(x - w, top, 2 * w, bottom - top)
self.log.debug('boundingRect: %s => %s', self._x, str(self._boundingRect))
return self._boundingRect
def paint_flag(self, painter, p1):
axis = self._axis()
if axis is None:
return
h = axis.geometry().height()
he = h // 3
w2 = h // 2
if self._shape in [None, 'none']:
return
if self._shape in ['right']:
wl, wr = -w2, 0
elif self._shape in ['left']:
wl, wr = 0, w2
else:
wl, wr = -w2, w2
brush = pg.mkBrush(self.color)
painter.setBrush(brush)
painter.setPen(None)
painter.resetTransform()
painter.translate(p1)
painter.drawConvexPolygon([
pg.Point(0, 0),
pg.Point(wl, -he),
pg.Point(wl, -h),
pg.Point(wr, -h),
pg.Point(wr, -he)
])
def paint(self, p, opt, widget):
profiler = pg.debug.Profiler()
axis = self._axis()
if axis is None or axis.linkedView() is None:
return
if self.picture is None:
try:
p.resetTransform()
picture = QtGui.QPicture()
painter = QtGui.QPainter(picture)
pen = pg.mkPen(self.color)
pen.setWidth(1)
painter.setPen(pen)
p1, p2 = self._endpoints()
painter.drawLine(p1, p2)
self.paint_flag(painter, p1)
profiler('draw picture')
finally:
painter.end()
self.picture = picture
self.picture.play(p)
def _redraw(self):
self.picture = None
self._boundingRect = None
self._update_marker_text()
self.prepareGeometryChange()
self.update()
def resizeEvent(self, ev=None):
self._redraw()
def viewRangeChanged(self):
self._redraw()
def viewTransformChanged(self):
self._redraw()
def linkedViewChanged(self, view, newRange=None):
self._redraw()
def set_pos(self, x, no_emit=None):
"""Set the x-axis position for the marker.
:param x: The new x-axis position in Axis coordinates.
:param no_emit: When True, do not emit any updates.
When False or None (default) emit updates.
"""
self._x = x
for signal_ref, text in self.text.values():
text.setText('') # better to have nothing than be wrong
s = signal_ref()
if s is not None:
vby = s.vb.geometry().top()
px = s.vb.mapViewToScene(pg.Point(x, 0.0)).x()
text.setPos(px, vby)
# signal the update request as necessary
if not bool(no_emit):
self.sigUpdateRequest.emit(self)
self._redraw()
def _update_marker_text(self):
x = self._x
style = TIME_STYLE_DEFAULT
self._marker_time_text.setHtml(f'<div><span style="{style}">t={x:.6f}</span></div>')
axis = self._axis()
if axis is None:
return
vb = axis.linkedView()
if vb is None:
return
g = axis.geometry()
axis_top = g.top()
axis_height = axis.geometry().height()
text_offset = axis_height // 2
x_scene = vb.mapViewToScene(pg.Point(x, 0.0)).x()
if self._pair is None:
self._marker_time_text.setPos(x_scene + text_offset, axis_top)
elif self.is_left:
self._marker_time_text.setPos(x_scene, axis_top)
self._update_delta_time()
else:
self._marker_time_text.setPos(x_scene, axis_top)
self._pair._update_delta_time()
def _update_delta_time(self):
if self.is_left:
style = TIME_STYLE_DEFAULT
axis = self._axis()
if axis is None:
return
axis_top = axis.geometry().top()
vb = axis.linkedView()
if vb is None:
return
x_left = self._x
x_right = self._pair._x
dx = abs(x_right - x_left)
x_center = (x_left + x_right) / 2
x_scene = vb.mapViewToScene(pg.Point(x_center, 0.0)).x()
self._delta_time_text.setHtml(f'<div><span style="{style}">Δt={dx:.3g}</span></div>')
self._delta_time_text.setPos(x_scene, axis_top)
elif self.is_right:
self._pair._update_delta_time()
def get_pos(self):
"""Get the current x-axis position for the marker.
:return: The current x-axis position in the Axis coordinates.
"""
return self._x
def on_xChangeSignal(self, x_min, x_max, x_count):
self._redraw()
def mouseClickEvent(self, ev):
self.log.info('mouseClickEvent(%s)', ev)
ev.accept()
if not self.moving:
if ev.button() == QtCore.Qt.LeftButton:
self.moving = True
elif ev.button() == QtCore.Qt.RightButton:
pos = ev.screenPos().toPoint()
self.menu_exec(pos)
else:
if ev.button() == QtCore.Qt.LeftButton:
self.moving = False
elif ev.button() == QtCore.Qt.RightButton:
pass # todo restore original position
@QtCore.Slot()
def export_data(self):
if self._pair is None:
raise RuntimeError('export_data only available on dual markers')
p1 = self.get_pos()
p2 = self._pair.get_pos()
x_start = min(p1, p2)
x_stop = max(p1, p2)
self.sigExportDataRequest.emit(x_start, x_stop)
def menu_exec(self, pos):
menu = QtGui.QMenu()
menu.setToolTipsVisible(True)
if self._pair is not None:
export_data = QtGui.QAction('&Export data', self)
export_data.triggered.connect(self.export_data)
menu.addAction(export_data)
marker_remove = QtGui.QAction('&Remove', self)
marker_remove.triggered.connect(lambda: self.sigRemoveRequest.emit(self))
menu.addAction(marker_remove)
menu.exec_(pos)
def setVisible(self, visible):
super().setVisible(visible)
for item in self.graphic_items:
item.setVisible(visible)
| 33.23057 | 97 | 0.580416 | 11,999 | 0.935303 | 0 | 0 | 939 | 0.073194 | 0 | 0 | 2,656 | 0.207031 |
257b5ca1436a1b26014145a5d43f0c4a06c06313 | 2,267 | py | Python | usb-camera-gst.py | jetsonhacks/USB-Camera | 53d34b974428bfc15469cd52c92ceac60feb60fe | [
"MIT"
] | 14 | 2022-01-21T23:56:58.000Z | 2022-03-10T02:46:10.000Z | usb-camera-gst.py | JetsonHacksNano/USB-Camera | 53d34b974428bfc15469cd52c92ceac60feb60fe | [
"MIT"
] | null | null | null | usb-camera-gst.py | JetsonHacksNano/USB-Camera | 53d34b974428bfc15469cd52c92ceac60feb60fe | [
"MIT"
] | 4 | 2022-01-28T18:52:31.000Z | 2022-03-05T20:03:55.000Z | #!/usr/bin/env python3
#
# USB Camera - Simple
#
# Copyright (C) 2021-22 JetsonHacks (info@jetsonhacks.com)
#
# MIT License
#
import sys
import cv2
window_title = "USB Camera"
# ASSIGN CAMERA ADRESS to DEVICE HERE!
pipeline = " ! ".join(["v4l2src device=/dev/video0",
"video/x-raw, width=640, height=480, framerate=30/1",
"videoconvert",
"video/x-raw, format=(string)BGR",
"appsink"
])
# Sample pipeline for H.264 video, tested on Logitech C920
h264_pipeline = " ! ".join(["v4l2src device=/dev/video0",
"video/x-h264, width=1280, height=720, framerate=30/1, format=H264",
"avdec_h264",
"videoconvert",
"video/x-raw, format=(string)BGR",
"appsink sync=false"
])
def show_camera():
# Full list of Video Capture APIs (video backends): https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html
# For webcams, we use V4L2
video_capture = cv2.VideoCapture(pipeline, cv2.CAP_GSTREAMER)
if video_capture.isOpened():
try:
window_handle = cv2.namedWindow(
window_title, cv2.WINDOW_AUTOSIZE)
# Window
while True:
ret_val, frame = video_capture.read()
# Check to see if the user closed the window
# Under GTK+ (Jetson Default), WND_PROP_VISIBLE does not work correctly. Under Qt it does
# GTK - Substitute WND_PROP_AUTOSIZE to detect if window has been closed by user
if cv2.getWindowProperty(window_title, cv2.WND_PROP_AUTOSIZE) >= 0:
cv2.imshow(window_title, frame)
else:
break
keyCode = cv2.waitKey(10) & 0xFF
# Stop the program on the ESC key or 'q'
if keyCode == 27 or keyCode == ord('q'):
break
finally:
video_capture.release()
cv2.destroyAllWindows()
else:
print("Error: Unable to open camera")
if __name__ == "__main__":
show_camera()
| 33.338235 | 123 | 0.538597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 998 | 0.440229 |
257c7887bbadadf7ad524f6cc017c11ec7078dfe | 219 | py | Python | louis/webdev/myblog/blog/admin.py | GadinganJayHarley06/intern-blog | b442c6f307da63d8687773df7bcbf28ceab3e6a9 | [
"MIT"
] | null | null | null | louis/webdev/myblog/blog/admin.py | GadinganJayHarley06/intern-blog | b442c6f307da63d8687773df7bcbf28ceab3e6a9 | [
"MIT"
] | null | null | null | louis/webdev/myblog/blog/admin.py | GadinganJayHarley06/intern-blog | b442c6f307da63d8687773df7bcbf28ceab3e6a9 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Index, Tag, Category, Post
# Register your models here.
admin.site.register(Index)
admin.site.register(Tag)
admin.site.register(Category)
admin.site.register(Post) | 21.9 | 46 | 0.794521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.127854 |
257cae7ff7dac86cc983c3e6d70faff1d9548ba3 | 588 | py | Python | 035.py | canis/euler-py | 56e500fcaebd06dc59c85e2b70d095fc9f268ce0 | [
"MIT"
] | null | null | null | 035.py | canis/euler-py | 56e500fcaebd06dc59c85e2b70d095fc9f268ce0 | [
"MIT"
] | null | null | null | 035.py | canis/euler-py | 56e500fcaebd06dc59c85e2b70d095fc9f268ce0 | [
"MIT"
] | null | null | null | from utils.math import primes
def rotate(num):
s = str(num)
l = len(s)
rot = set()
for i in range (l):
s = s[1:] + s[0]
if num > 9 and int(s[-1]) in [2, 4, 5, 6, 8, 0]:
return None
else:
rot.add(int(s))
return frozenset(rot)
p = primes(100)
print (p)
r = set()
for num in p:
rot = rotate(num)
if rot:
r.add(rot)
s = len(r)
for fs in r:
print(fs)
for num in fs:
print(num)
if num not in p:
print ('---brk')
s -= 1
break
print(s)
| 15.473684 | 56 | 0.440476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.013605 |
257e3291467a365d825dce2ce9baa7043728edeb | 10,227 | py | Python | results.py | MariusWirtz/optimus-py | 09378f6daf759ee0ff8a0e83d614ffba1720b956 | [
"MIT"
] | 1 | 2020-07-03T22:15:45.000Z | 2020-07-03T22:15:45.000Z | results.py | MariusWirtz/tm1-optipyzer | 09378f6daf759ee0ff8a0e83d614ffba1720b956 | [
"MIT"
] | 3 | 2020-02-17T23:28:47.000Z | 2020-05-06T20:21:56.000Z | results.py | MariusWirtz/optimus-py | 09378f6daf759ee0ff8a0e83d614ffba1720b956 | [
"MIT"
] | null | null | null | import itertools
import logging
import os
import statistics
from pathlib import WindowsPath
from typing import List, Union
import seaborn as sns
sns.set_theme()
import matplotlib.pyplot as plt
import pandas as pd
SEPARATOR = ","
HEADER = ["ID", "Mode", "Is Best", "Mean Query Time", "Query Ratio", "Mean Process Time", "Process Ratio", "RAM",
"RAM in GB"]
class PermutationResult:
counter = 1
current_ram = None
def __init__(self, mode: str, cube_name: str, view_names: list, process_name: str, dimension_order: list,
query_times_by_view: dict, process_times_by_process: dict, ram_usage: float = None,
ram_percentage_change: float = None,
reset_counter: bool = False):
from optimuspy import ExecutionMode
self.mode = ExecutionMode(mode)
self.cube_name = cube_name
self.view_names = view_names
self.process_name = process_name
self.dimension_order = dimension_order
self.query_times_by_view = query_times_by_view
self.process_times_by_process = process_times_by_process
self.is_best = False
if process_name is None:
self.include_process = False
else:
self.include_process = True
# from original dimension order
if ram_usage:
self.ram_usage = ram_usage
# from all other dimension orders
elif ram_percentage_change is not None:
self.ram_usage = PermutationResult.current_ram + (
PermutationResult.current_ram * ram_percentage_change / 100)
else:
raise RuntimeError("Either 'ram_usage' or 'ram_percentage_change' must be provided")
PermutationResult.current_ram = self.ram_usage
self.ram_percentage_change = ram_percentage_change or 0
if reset_counter:
PermutationResult.counter = 1
self.permutation_id = PermutationResult.counter
PermutationResult.counter += 1
def median_query_time(self, view_name: str = None) -> float:
view_name = view_name or self.view_names[0]
median = statistics.median(self.query_times_by_view[view_name])
if not median:
raise RuntimeError(f"view '{view_name}' in cube '{self.cube_name}' is too small")
return median
def median_process_time(self, process_name: str = None) -> float:
process_name = process_name or self.process_name
median = statistics.median(self.process_times_by_process[process_name])
return median
def build_header(self) -> list:
dimensions = []
for d in range(1, len(self.dimension_order) + 1):
dimensions.append("Dimension" + str(d))
header = HEADER + dimensions
return header
def build_csv_header(self) -> str:
return SEPARATOR.join(self.build_header()) + "\n"
def to_row(self, view_name: str, process_name: str, original_order_result: 'PermutationResult') -> List[str]:
from optimuspy import LABEL_MAP
median_query_time = float(self.median_query_time(view_name))
original_median_query_time = float(original_order_result.median_query_time(view_name))
query_time_ratio = median_query_time / original_median_query_time - 1
row = [
str(self.permutation_id),
LABEL_MAP[self.mode],
str(self.is_best),
median_query_time,
query_time_ratio]
if process_name is not None:
median_process_time = float(self.median_process_time(process_name))
original_median_process_time = float(original_order_result.median_process_time(process_name))
process_time_ratio = median_process_time / original_median_process_time - 1
row += [median_process_time, process_time_ratio]
else:
row += [0, 0]
ram_in_gb = float(self.ram_usage) / (1024 ** 3)
row += [self.ram_usage, ram_in_gb] + list(self.dimension_order)
return row
def to_csv_row(self, view_name: str, process_name: str, original_order_result: 'PermutationResult') -> str:
row = [str(i) for i in self.to_row(view_name, process_name, original_order_result)]
return SEPARATOR.join(row) + "\n"
class OptimusResult:
TEXT_FONT_SIZE = 5
def __init__(self, cube_name: str, permutation_results: List[PermutationResult]):
self.cube_name = cube_name
self.permutation_results = permutation_results
if len(permutation_results) == 0:
raise RuntimeError("Number of permutation results can not be 0")
self.include_process = permutation_results[0].include_process
self.best_result = self.determine_best_result()
if self.best_result:
for permutation_result in permutation_results:
if permutation_result.permutation_id == self.best_result.permutation_id:
permutation_result.is_best = True
def to_dataframe(self, view_name: str, process_name: str) -> pd.DataFrame:
header = self.permutation_results[0].build_header()
rows = []
for result in self.permutation_results:
rows.append(result.to_row(view_name, process_name, self.original_order_result))
return pd.DataFrame(rows, columns=header)
def to_lines(self, view_name: str, process_name: str) -> List[str]:
lines = itertools.chain(
[self.permutation_results[0].build_csv_header()],
[result.to_csv_row(view_name, process_name, self.original_order_result) for result in
self.permutation_results])
return list(lines)
def to_csv(self, view_name: str, process_name: str, file_name: 'WindowsPath'):
lines = self.to_lines(view_name, process_name)
os.makedirs(os.path.dirname(str(file_name)), exist_ok=True)
with open(str(file_name), "w") as file:
file.writelines(lines)
def to_xlsx(self, view_name: str, process_name: str, file_name: 'WindowsPath'):
try:
import xlsxwriter
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook(file_name)
worksheet = workbook.add_worksheet()
# Iterate over the data and write it out row by row.
for row, line in enumerate(self.to_lines(view_name, process_name)):
for col, item in enumerate(line.split(SEPARATOR)):
worksheet.write(row, col, item)
workbook.close()
except ImportError:
logging.warning("Failed to import xlsxwriter. Writing to csv instead")
file_name = file_name.with_suffix(".csv")
return self.to_csv(view_name, process_name, file_name)
# create scatter plot ram vs. performance
def to_png(self, view_name: str, process_name: str, file_name: str):
df = self.to_dataframe(view_name, process_name)
plt.figure(figsize=(8, 8))
sns.set_style("ticks")
p = sns.scatterplot(
data=df,
x="RAM in GB",
y="Query Ratio",
size="Mean Process Time" if process_name is not None else None,
hue="Mode",
palette="viridis",
edgecolors="black",
legend=True,
alpha=0.4,
sizes=(20, 500) if process_name is not None else None)
for index, row in df.iterrows():
p.text(row["RAM in GB"],
row["Query Ratio"],
row["ID"],
color='black')
sns.despine(trim=True, offset=2)
p.set_xlabel("RAM (GB)")
p.set_ylabel("Query Time Compared to Original Order")
p.legend(title='Legend', loc='best')
plt.grid()
plt.tight_layout()
os.makedirs(os.path.dirname(str(file_name)), exist_ok=True)
plt.savefig(file_name, dpi=400)
plt.clf()
@property
def original_order_result(self) -> PermutationResult:
from optimuspy import ExecutionMode
for result in self.permutation_results:
if result.mode == ExecutionMode.ORIGINAL_ORDER:
return result
def determine_best_result(self) -> Union[PermutationResult, None]:
ram_range = [result.ram_usage for result in self.permutation_results]
min_ram, max_ram = min(ram_range), max(ram_range)
query_speed_range = [result.median_query_time() for result in self.permutation_results]
min_query_speed, max_query_speed = min(query_speed_range), max(query_speed_range)
if self.include_process:
process_speed_range = [result.median_process_time(result.process_name)
for result
in self.permutation_results]
min_process_execution, max_process_execution = min(process_speed_range), max(process_speed_range)
else:
min_process_execution = max_process_execution = 1
# find a good balance between speed and ram and process speed
for value in (0.01, 0.025, 0.05):
ram_threshold = min_ram + value * (max_ram - min_ram)
query_speed_threshold = min_query_speed + value * (max_query_speed - min_query_speed)
if self.include_process:
process_speed_threshold = min_process_execution + value * (
max_process_execution - min_process_execution)
for permutation_result in self.permutation_results:
if all([permutation_result.ram_usage <= ram_threshold,
permutation_result.median_query_time() <= query_speed_threshold,
permutation_result.median_process_time() <= process_speed_threshold]):
return permutation_result
else:
for permutation_result in self.permutation_results:
if all([permutation_result.ram_usage <= ram_threshold,
permutation_result.median_query_time() <= query_speed_threshold]):
return permutation_result
# no dimension order falls in sweet spot
return None
| 39.183908 | 113 | 0.641048 | 9,852 | 0.963332 | 0 | 0 | 250 | 0.024445 | 0 | 0 | 884 | 0.086438 |
257fa21be7c52550321debef98c5629ed602cf83 | 3,412 | py | Python | model.py | shivam13verma/han-chainer | ca1e34b1dcd8ecfdf55690de62b89c59c3699f82 | [
"MIT"
] | null | null | null | model.py | shivam13verma/han-chainer | ca1e34b1dcd8ecfdf55690de62b89c59c3699f82 | [
"MIT"
] | null | null | null | model.py | shivam13verma/han-chainer | ca1e34b1dcd8ecfdf55690de62b89c59c3699f82 | [
"MIT"
] | null | null | null | from keras.models import Sequential
from keras.layers import Dense, Activation
model = Sequential()
model.add(Embedding(vocabulary_size, embedding_dim, input_shape=(90582, 517)))
model.add(GRU(512, return_sequences=True))
model.add(Dropout(0.2))
model.add(GRU(512, return_sequences=True))
model.add(Dropout(0.2))
model.add(TimeDistributedDense(1))
model.add(Activation('softmax'))
#word-gru layer
language_model = Sequential()
language_model.add(Embedding(vocab_size, 256, input_length=max_caption_len))
language_model.add(GRU(output_dim=128, return_sequences=True))
#word-attention
model = Sequential()
model.add(Dense(50, input_dim=100, init='uniform'))
model.add(Activation('tanh'))
#sentence-gru layer
#sentence-attention
def build_model(opts, verbose=False):
k = 2 * opts.lstm_units # 300
L = opts.xmaxlen # 20
N = opts.xmaxlen + opts.ymaxlen + 1 # for delim
print "x len", L, "total len", N
print "k", k, "L", L
main_input = Input(shape=(N,), dtype='int32', name='main_input')
x = Embedding(output_dim=opts.emb, input_dim=opts.max_features, input_length=N, name='x')(main_input)
drop_out = Dropout(0.1, name='dropout')(x)
lstm_fwd = LSTM(opts.lstm_units, return_sequences=True, name='lstm_fwd')(drop_out)
lstm_bwd = LSTM(opts.lstm_units, return_sequences=True, go_backwards=True, name='lstm_bwd')(drop_out)
bilstm = merge([lstm_fwd, lstm_bwd], name='bilstm', mode='concat')
drop_out = Dropout(0.1)(bilstm)
h_n = Lambda(get_H_n, output_shape=(k,), name="h_n")(drop_out)
Y = Lambda(get_Y, arguments={"xmaxlen": L}, name="Y", output_shape=(L, k))(drop_out)
Whn = Dense(k, W_regularizer=l2(0.01), name="Wh_n")(h_n)
Whn_x_e = RepeatVector(L, name="Wh_n_x_e")(Whn)
WY = TimeDistributed(Dense(k, W_regularizer=l2(0.01)), name="WY")(Y)
merged = merge([Whn_x_e, WY], name="merged", mode='sum')
M = Activation('tanh', name="M")(merged)
alpha_ = TimeDistributed(Dense(1, activation='linear'), name="alpha_")(M)
flat_alpha = Flatten(name="flat_alpha")(alpha_)
alpha = Dense(L, activation='softmax', name="alpha")(flat_alpha)
Y_trans = Permute((2, 1), name="y_trans")(Y) # of shape (None,300,20)
r_ = merge([Y_trans, alpha], output_shape=(k, 1), name="r_", mode=get_R)
r = Reshape((k,), name="r")(r_)
Wr = Dense(k, W_regularizer=l2(0.01))(r)
Wh = Dense(k, W_regularizer=l2(0.01))(h_n)
merged = merge([Wr, Wh], mode='sum')
h_star = Activation('tanh')(merged)
out = Dense(3, activation='softmax')(h_star)
output = out
model = Model(input=[main_input], output=output)
if verbose:
model.summary()
# plot(model, 'model.png')
# # model.compile(loss={'output':'binary_crossentropy'}, optimizer=Adam())
# model.compile(loss={'output':'categorical_crossentropy'}, optimizer=Adam(options.lr))
model.compile(loss='categorical_crossentropy',optimizer=Adam(options.lr))
return model
def compute_acc(X, Y, vocab, model, opts):
scores = model.predict(X, batch_size=options.batch_size)
prediction = np.zeros(scores.shape)
for i in range(scores.shape[0]):
l = np.argmax(scores[i])
prediction[i][l] = 1.0
assert np.array_equal(np.ones(prediction.shape[0]), np.sum(prediction, axis=1))
plabels = np.argmax(prediction, axis=1)
tlabels = np.argmax(Y, axis=1)
acc = accuracy(tlabels, plabels)
return acc, acc
| 37.086957 | 105 | 0.681125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 579 | 0.169695 |
2581eb1b55c042ee77c302c7a30814d99058fc85 | 2,901 | py | Python | chaospy/distributions/collection/gamma.py | lblonk/chaospy | 1759a4307c6134b74ce63ff44973195f1e185f94 | [
"MIT"
] | 1 | 2020-04-29T20:53:25.000Z | 2020-04-29T20:53:25.000Z | chaospy/distributions/collection/gamma.py | agonzs11/Polinomio-del-caos | 5a415ece07e6535488174bac69a6c0fcc2ca272d | [
"MIT"
] | null | null | null | chaospy/distributions/collection/gamma.py | agonzs11/Polinomio-del-caos | 5a415ece07e6535488174bac69a6c0fcc2ca272d | [
"MIT"
] | null | null | null | """Gamma distribution."""
import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
class gamma(Dist):
def __init__(self, a=1):
Dist.__init__(self, a=a)
def _pdf(self, x, a):
return x**(a-1)*numpy.e**(-x) / special.gamma(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a, q)
def _mom(self, k, a):
return special.gamma(a+k)/special.gamma(a)
def _ttr(self, n, a):
return 2.*n+a, n*n+n*(a-1)
def _lower(self, a):
return 0.
def _upper(self, a):
return 40+2*a
class Gamma(Add):
"""
Gamma distribution.
Also an Erlang distribution when shape=k and scale=1./lamb.
Args:
shape (float, Dist):
Shape parameter. a>0.
scale (float, Dist):
Scale parameter. scale!=0
shift (float, Dist):
Location of the lower bound.
Examples:
>>> distribution = chaospy.Gamma(1, 1, 1)
>>> distribution
Gamma(scale=1, shape=1, shift=1)
>>> q = numpy.linspace(0,1,6)[1:-1]
>>> distribution.inv(q).round(4)
array([1.2231, 1.5108, 1.9163, 2.6094])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0.2, 0.4, 0.6, 0.8])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0.8, 0.6, 0.4, 0.2])
>>> distribution.sample(4).round(4)
array([2.0601, 1.1222, 4.0014, 1.6581])
>>> distribution.mom(1)
array(2.)
>>> distribution.ttr([1, 2, 3]).round(4)
array([[4., 6., 8.],
[1., 4., 9.]])
"""
def __init__(self, shape=1, scale=1, shift=0):
self._repr = {"shape": shape, "scale": scale, "shift": shift}
Add.__init__(self, left=gamma(shape)*scale, right=shift)
class Exponential(Add):
R"""
Exponential Probability Distribution
Args:
scale (float, Dist):
Scale parameter. scale!=0
shift (float, Dist):
Location of the lower bound.
Examples;:
>>> distribution = chaospy.Exponential(2, 3)
>>> distribution
Exponential(scale=2, shift=3)
>>> q = numpy.linspace(0,1,6)[1:-1]
>>> distribution.inv(q).round(4)
array([3.4463, 4.0217, 4.8326, 6.2189])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0.2, 0.4, 0.6, 0.8])
>>> distribution.sample(4).round(4)
array([5.1203, 3.2444, 9.0028, 4.3163])
>>> distribution.mom(1).round(4)
5.0
>>> distribution.ttr([1, 2, 3]).round(4)
array([[ 9., 13., 17.],
[ 4., 16., 36.]])
"""
def __init__(self, scale=1, shift=0):
self._repr = {"scale": scale, "shift": shift}
Add.__init__(self, left=gamma(1)*scale, right=shift)
| 27.367925 | 69 | 0.536711 | 2,760 | 0.951396 | 0 | 0 | 0 | 0 | 0 | 0 | 1,895 | 0.653223 |
258221159670850092053b3b46e03afa8f767d41 | 7,449 | py | Python | simplivity/resources/external_stores.py | HewlettPackard/simplivity-python-sdk | 03d8e92a02fe66e878ed22b37944e5a6ce991ef1 | [
"Apache-2.0"
] | 7 | 2020-02-28T09:03:09.000Z | 2022-03-28T15:52:23.000Z | simplivity/resources/external_stores.py | HewlettPackard/simplivity-python-sdk | 03d8e92a02fe66e878ed22b37944e5a6ce991ef1 | [
"Apache-2.0"
] | 47 | 2020-01-16T20:32:19.000Z | 2020-08-27T04:43:00.000Z | simplivity/resources/external_stores.py | HewlettPackard/simplivity-python-sdk | 03d8e92a02fe66e878ed22b37944e5a6ce991ef1 | [
"Apache-2.0"
] | 16 | 2020-01-10T14:15:17.000Z | 2021-04-06T13:31:01.000Z | ###
# (C) Copyright [2019-2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from simplivity.resources.resource import ResourceBase
from simplivity.resources import omnistack_clusters
URL = '/external_stores'
DATA_FIELD = 'external_stores'
class ExternalStores(ResourceBase):
"""Implements features available for SimpliVity External store resources."""
def __init__(self, connection):
super(ExternalStores, self).__init__(connection)
def get_all(self, pagination=False, page_size=0, limit=500, offset=0,
sort=None, order='descending', filters=None, fields=None,
case_sensitive=True):
"""
Get all external stores
Args:
pagination: True if need pagination
page_size: Size of the page (Required when pagination is on)
limit: A positive integer that represents the maximum number of results to return
offset: A positive integer that directs the service to start returning
the <offset value> instance, up to the limit.
sort: The name of the field where the sort occurs
order: The sort order preference. Valid values: ascending or descending.
filters: Dictionary with filter values. Example: {'name': 'name'}
name: The name of the external_stores to return.
Accepts: Single value, comma-separated list, pattern using one or more asterisk characters as a wildcard.
omnistack_cluster_id: The name of the omnistack_cluster that is associated with the instances to return
cluster_group_id:The unique identifiers (UIDs) of the cluster_groups associated with the external stores to return
Accepts: Single value, comma-separated list
management_ip: The IP address of the external store
Accepts: Single value, comma-separated list, pattern using one or more asterisk characters as a wildcard
type: The type of external store
Default: StoreOnceOnPrem
Returns:
list: list of resources
"""
return self._client.get_all(URL,
members_field=DATA_FIELD,
pagination=pagination,
page_size=page_size,
limit=limit,
offset=offset,
sort=sort,
order=order,
filters=filters,
fields=fields,
case_sensitive=case_sensitive)
def get_by_data(self, data):
"""Gets ExternalStore object from data.
Args:
data: ExternalStore data
Returns:
object: ExternalStore object.
"""
return ExternalStore(self._connection, self._client, data)
def register_external_store(self, management_ip, name, cluster, username, password, management_port=9387,
storage_port=9388, external_store_type='StoreOnceOnPrem', timeout=-1):
""" Register the external store.
Args:
management_ip: The IP address of the external store
name: The name of the external_store
cluster: Destination OmnistackCluster object/name.
username: The client name of the external store
password: The client password of the external store
management_port: The management IP port of the external store. Default: 9387
storage_port: The storage IP port of the external store. Default: 9388
external_store_type: The type of external store. Default: StoreOnceOnPrem
timeout: Time out for the request in seconds.
Returns:
object: External store object.
"""
data = {'management_ip': management_ip, 'management_port': management_port, 'name': name,
'username': username, 'password': password, 'storage_port': storage_port,
'type': external_store_type}
if not isinstance(cluster, omnistack_clusters.OmnistackCluster):
# if passed name of the cluster
clusters_obj = omnistack_clusters.OmnistackClusters(self._connection)
cluster = clusters_obj.get_by_name(cluster)
data['omnistack_cluster_id'] = cluster.data['id']
custom_headers = {'Content-type': 'application/vnd.simplivity.v1.11+json'}
self._client.do_post(URL, data, timeout, custom_headers)
return self.get_by_name(name)
def update_credentials(self, name, username, password, management_ip=None, timeout=-1):
"""Update the IP address or credentials that HPE SimpliVity uses to access the external stores
Args:
name: The name of the external_store
username: The client name of the external store
password: The client password of the external store
management_ip: The IP address of the external store
timeout: Time out for the request in seconds.
Returns:
object: External store object.
"""
resource_uri = "{}/update_credentials".format(URL)
data = {'name': name, 'username': username, 'password': password}
if management_ip:
data['management_ip'] = management_ip
custom_headers = {'Content-type': 'application/vnd.simplivity.v1.15+json'}
self._client.do_post(resource_uri, data, timeout, custom_headers)
class ExternalStore(object):
"""Implements features available for a single External store resources."""
def __init__(self, connection, resource_client, data):
self.data = data
self._connection = connection
self._client = resource_client
def unregister_external_store(self, cluster, timeout=-1):
""" Removes the external store as a backup destination for the cluster.
Backups remain on the external store,but they can no longer be managed by HPE SimpliVity.
Args:
cluster: Destination OmnistackCluster object/name.
timeout: Time out for the request in seconds.
Returns:
None
"""
resource_uri = "{}/unregister".format(URL)
data = {'name': self.data["name"]}
if not isinstance(cluster, omnistack_clusters.OmnistackCluster):
# if passed name of the cluster
clusters_obj = omnistack_clusters.OmnistackClusters(self._connection)
cluster = clusters_obj.get_by_name(cluster)
data['omnistack_cluster_id'] = cluster.data['id']
custom_headers = {'Content-type': 'application/vnd.simplivity.v1.15+json'}
self._client.do_post(resource_uri, data, timeout, custom_headers)
| 44.873494 | 130 | 0.638475 | 6,657 | 0.893677 | 0 | 0 | 0 | 0 | 0 | 0 | 4,480 | 0.601423 |
25823a8b6363471624240c48f9dff5e83c6d7d22 | 12,775 | py | Python | items/migrations/0001_initial.py | LluisoCP/BooksAndFilms | a972091e920cb94313ab1f005a01bd72df538891 | [
"MIT"
] | null | null | null | items/migrations/0001_initial.py | LluisoCP/BooksAndFilms | a972091e920cb94313ab1f005a01bd72df538891 | [
"MIT"
] | null | null | null | items/migrations/0001_initial.py | LluisoCP/BooksAndFilms | a972091e920cb94313ab1f005a01bd72df538891 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-09-17 12:09
from django.db import migrations, models
import django.db.models.deletion
import items.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('date_of_birth', models.DateField(blank=True, null=True, verbose_name='Born')),
('date_of_death', models.DateField(blank=True, null=True, verbose_name='Died')),
('genre', models.CharField(choices=[('', "Select the author's genre"), ('M', 'Male'), ('F', 'Female'), ('X', 'Other')], max_length=1)),
('short_bio', models.CharField(blank=True, default='No biography has been set for this author', max_length=255)),
('role', models.CharField(choices=[('Writter', 'Writter'), ('Director', 'Director')], max_length=8)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateField(auto_now_add=True)),
('title', models.CharField(max_length=32, unique=True)),
('description', models.CharField(blank=True, max_length=400)),
('phrase', models.CharField(blank=True, max_length=100)),
('release_year', models.SmallIntegerField(choices=[(1800, 1800), (1801, 1801), (1802, 1802), (1803, 1803), (1804, 1804), (1805, 1805), (1806, 1806), (1807, 1807), (1808, 1808), (1809, 1809), (1810, 1810), (1811, 1811), (1812, 1812), (1813, 1813), (1814, 1814), (1815, 1815), (1816, 1816), (1817, 1817), (1818, 1818), (1819, 1819), (1820, 1820), (1821, 1821), (1822, 1822), (1823, 1823), (1824, 1824), (1825, 1825), (1826, 1826), (1827, 1827), (1828, 1828), (1829, 1829), (1830, 1830), (1831, 1831), (1832, 1832), (1833, 1833), (1834, 1834), (1835, 1835), (1836, 1836), (1837, 1837), (1838, 1838), (1839, 1839), (1840, 1840), (1841, 1841), (1842, 1842), (1843, 1843), (1844, 1844), (1845, 1845), (1846, 1846), (1847, 1847), (1848, 1848), (1849, 1849), (1850, 1850), (1851, 1851), (1852, 1852), (1853, 1853), (1854, 1854), (1855, 1855), (1856, 1856), (1857, 1857), (1858, 1858), (1859, 1859), (1860, 1860), (1861, 1861), (1862, 1862), (1863, 1863), (1864, 1864), (1865, 1865), (1866, 1866), (1867, 1867), (1868, 1868), (1869, 1869), (1870, 1870), (1871, 1871), (1872, 1872), (1873, 1873), (1874, 1874), (1875, 1875), (1876, 1876), (1877, 1877), (1878, 1878), (1879, 1879), (1880, 1880), (1881, 1881), (1882, 1882), (1883, 1883), (1884, 1884), (1885, 1885), (1886, 1886), (1887, 1887), (1888, 1888), (1889, 1889), (1890, 1890), (1891, 1891), (1892, 1892), (1893, 1893), (1894, 1894), (1895, 1895), (1896, 1896), (1897, 1897), (1898, 1898), (1899, 1899), (1900, 1900), (1901, 1901), (1902, 1902), (1903, 1903), (1904, 1904), (1905, 1905), (1906, 1906), (1907, 1907), (1908, 1908), (1909, 1909), (1910, 1910), (1911, 1911), (1912, 1912), (1913, 1913), (1914, 1914), (1915, 1915), (1916, 1916), (1917, 1917), (1918, 1918), (1919, 1919), (1920, 1920), (1921, 1921), (1922, 1922), (1923, 1923), (1924, 1924), (1925, 1925), (1926, 1926), (1927, 1927), (1928, 1928), (1929, 1929), (1930, 1930), (1931, 1931), (1932, 1932), (1933, 1933), (1934, 1934), (1935, 1935), (1936, 1936), (1937, 1937), (1938, 1938), (1939, 1939), (1940, 1940), (1941, 1941), (1942, 1942), (1943, 1943), (1944, 1944), (1945, 1945), (1946, 1946), (1947, 1947), (1948, 1948), (1949, 1949), (1950, 1950), (1951, 1951), (1952, 1952), (1953, 1953), (1954, 1954), (1955, 1955), (1956, 1956), (1957, 1957), (1958, 1958), (1959, 1959), (1960, 1960), (1961, 1961), (1962, 1962), (1963, 1963), (1964, 1964), (1965, 1965), (1966, 1966), (1967, 1967), (1968, 1968), (1969, 1969), (1970, 1970), (1971, 1971), (1972, 1972), (1973, 1973), (1974, 1974), (1975, 1975), (1976, 1976), (1977, 1977), (1978, 1978), (1979, 1979), (1980, 1980), (1981, 1981), (1982, 1982), (1983, 1983), (1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019)], verbose_name='Year')),
('art', models.CharField(editable=False, max_length=32)),
('language', models.CharField(blank=True, choices=[('', 'Choose Languange'), ('EN', 'English'), ('FR', 'French'), ('ES', 'Spanish'), ('CA', 'Catalan'), ('IT', 'Italian'), ('PT', 'Portuguese'), ('GK', 'Greek'), ('GM', 'German'), ('AR', 'Arabic'), ('RU', 'Rusian'), ('JP', 'Japanese'), ('CH', 'Chinese'), ('TK', 'Turkish'), ('DN', 'Danish'), ('SW', 'Swedish'), ('NW', 'Norwegian')], max_length=2, verbose_name='Original Language')),
('image', models.ImageField(blank=True, null=True, upload_to=items.models.b_img_directory_path)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Author')),
],
options={
'ordering': ['title'],
'abstract': False,
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(max_length=64)),
('content', models.CharField(max_length=1000)),
('commented_at', models.DateTimeField(auto_now_add=True)),
('grade', models.SmallIntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=5)),
('book', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', related_query_name='whose_comments', to='items.Book')),
],
options={
'ordering': ['-commented_at'],
},
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=31)),
('last_name', models.CharField(max_length=31)),
('organisation', models.CharField(max_length=31)),
('content', models.CharField(max_length=511)),
],
),
migrations.CreateModel(
name='Film',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateField(auto_now_add=True)),
('title', models.CharField(max_length=32, unique=True)),
('description', models.CharField(blank=True, max_length=400)),
('phrase', models.CharField(blank=True, max_length=100)),
('release_year', models.SmallIntegerField(choices=[(1800, 1800), (1801, 1801), (1802, 1802), (1803, 1803), (1804, 1804), (1805, 1805), (1806, 1806), (1807, 1807), (1808, 1808), (1809, 1809), (1810, 1810), (1811, 1811), (1812, 1812), (1813, 1813), (1814, 1814), (1815, 1815), (1816, 1816), (1817, 1817), (1818, 1818), (1819, 1819), (1820, 1820), (1821, 1821), (1822, 1822), (1823, 1823), (1824, 1824), (1825, 1825), (1826, 1826), (1827, 1827), (1828, 1828), (1829, 1829), (1830, 1830), (1831, 1831), (1832, 1832), (1833, 1833), (1834, 1834), (1835, 1835), (1836, 1836), (1837, 1837), (1838, 1838), (1839, 1839), (1840, 1840), (1841, 1841), (1842, 1842), (1843, 1843), (1844, 1844), (1845, 1845), (1846, 1846), (1847, 1847), (1848, 1848), (1849, 1849), (1850, 1850), (1851, 1851), (1852, 1852), (1853, 1853), (1854, 1854), (1855, 1855), (1856, 1856), (1857, 1857), (1858, 1858), (1859, 1859), (1860, 1860), (1861, 1861), (1862, 1862), (1863, 1863), (1864, 1864), (1865, 1865), (1866, 1866), (1867, 1867), (1868, 1868), (1869, 1869), (1870, 1870), (1871, 1871), (1872, 1872), (1873, 1873), (1874, 1874), (1875, 1875), (1876, 1876), (1877, 1877), (1878, 1878), (1879, 1879), (1880, 1880), (1881, 1881), (1882, 1882), (1883, 1883), (1884, 1884), (1885, 1885), (1886, 1886), (1887, 1887), (1888, 1888), (1889, 1889), (1890, 1890), (1891, 1891), (1892, 1892), (1893, 1893), (1894, 1894), (1895, 1895), (1896, 1896), (1897, 1897), (1898, 1898), (1899, 1899), (1900, 1900), (1901, 1901), (1902, 1902), (1903, 1903), (1904, 1904), (1905, 1905), (1906, 1906), (1907, 1907), (1908, 1908), (1909, 1909), (1910, 1910), (1911, 1911), (1912, 1912), (1913, 1913), (1914, 1914), (1915, 1915), (1916, 1916), (1917, 1917), (1918, 1918), (1919, 1919), (1920, 1920), (1921, 1921), (1922, 1922), (1923, 1923), (1924, 1924), (1925, 1925), (1926, 1926), (1927, 1927), (1928, 1928), (1929, 1929), (1930, 1930), (1931, 1931), (1932, 1932), (1933, 1933), (1934, 1934), (1935, 1935), (1936, 1936), (1937, 1937), (1938, 1938), (1939, 1939), (1940, 1940), (1941, 1941), (1942, 1942), (1943, 1943), (1944, 1944), (1945, 1945), (1946, 1946), (1947, 1947), (1948, 1948), (1949, 1949), (1950, 1950), (1951, 1951), (1952, 1952), (1953, 1953), (1954, 1954), (1955, 1955), (1956, 1956), (1957, 1957), (1958, 1958), (1959, 1959), (1960, 1960), (1961, 1961), (1962, 1962), (1963, 1963), (1964, 1964), (1965, 1965), (1966, 1966), (1967, 1967), (1968, 1968), (1969, 1969), (1970, 1970), (1971, 1971), (1972, 1972), (1973, 1973), (1974, 1974), (1975, 1975), (1976, 1976), (1977, 1977), (1978, 1978), (1979, 1979), (1980, 1980), (1981, 1981), (1982, 1982), (1983, 1983), (1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019)], verbose_name='Year')),
('art', models.CharField(editable=False, max_length=32)),
('language', models.CharField(blank=True, choices=[('', 'Choose Languange'), ('EN', 'English'), ('FR', 'French'), ('ES', 'Spanish'), ('CA', 'Catalan'), ('IT', 'Italian'), ('PT', 'Portuguese'), ('GK', 'Greek'), ('GM', 'German'), ('AR', 'Arabic'), ('RU', 'Rusian'), ('JP', 'Japanese'), ('CH', 'Chinese'), ('TK', 'Turkish'), ('DN', 'Danish'), ('SW', 'Swedish'), ('NW', 'Norwegian')], max_length=2, verbose_name='Original Language')),
('image', models.ImageField(blank=True, null=True, upload_to=items.models.f_img_directory_path)),
('director', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Author')),
],
options={
'ordering': ['title'],
'abstract': False,
},
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter a genre', max_length=32, unique=True)),
],
),
migrations.AddField(
model_name='film',
name='genres',
field=models.ManyToManyField(help_text='Select the genres for this artpiece', related_name='films_related', related_query_name='whose_films', to='items.Genre'),
),
migrations.AddField(
model_name='comment',
name='film',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', related_query_name='whose_comments', to='items.Film'),
),
migrations.AddField(
model_name='book',
name='genres',
field=models.ManyToManyField(help_text='Select the genres for this artpiece', related_name='books_related', related_query_name='whose_books', to='items.Genre'),
),
]
| 112.061404 | 3,170 | 0.56955 | 12,629 | 0.988571 | 0 | 0 | 0 | 0 | 0 | 0 | 1,534 | 0.120078 |
25825cd19c25741651ec1092171b226484d87889 | 2,039 | py | Python | init.py | lanvstn/workshops-api | e52461f78b7032cd5377aa5d5a05246d1b827bdd | [
"MIT"
] | null | null | null | init.py | lanvstn/workshops-api | e52461f78b7032cd5377aa5d5a05246d1b827bdd | [
"MIT"
] | 1 | 2019-10-15T18:52:45.000Z | 2019-10-15T18:52:45.000Z | init.py | lanvstn/workshops-api | e52461f78b7032cd5377aa5d5a05246d1b827bdd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import sqlalchemy.exc
from workshops_api.app import app
from workshops_api.database import db
from workshops_api.auth import ACCESS_LEVELS
from workshops_api.models.user import UserModel
from workshops_api.config import config
class Setup():
def __init__(self, config):
self.settings = config["init"]
def run(self):
with app.app_context():
db.create_all()
db.session.commit()
print("Initialized database")
# Create admin user
try:
adminuser = UserModel(
id=1,
full_name='admin',
identity=self.settings["admin_identity"],
permission=ACCESS_LEVELS["admin"],
event_id=-1
)
db.session.add(adminuser)
db.session.commit()
print("Created admin")
# Add test data if needed
if self.settings["add_testdata"]:
from workshops_api.testdata import testdata
db.session.add_all(testdata)
db.session.commit()
print("Added test data")
except sqlalchemy.exc.IntegrityError:
print("Error inserting initial data into database, it already exists. "
"Run with --force to delete. Quitting.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--force",
help="Force initialization, DELETE existing database",
action="store_true")
args = parser.parse_args()
config.load_config('.')
setup = Setup(config.config)
if args.force:
print("=== WARNING: YOU ARE ABOUT TO DELETE ALL TABLES IN THE DATABASE!!! ===")
input(' Enter to continue, Ctrl-C to quit > ')
with app.app_context():
db.drop_all()
print("Deleted database.")
setup.run()
| 30.893939 | 87 | 0.557626 | 1,161 | 0.569397 | 0 | 0 | 0 | 0 | 0 | 0 | 493 | 0.241785 |
25858779947cdd6eff807639ff98f34b7425aeeb | 1,285 | py | Python | setup.py | bentettmar/nertivia4py | a9b758000632e40306bc610a6966cb8d0a643c20 | [
"MIT"
] | 3 | 2022-01-24T16:31:20.000Z | 2022-02-03T22:44:51.000Z | setup.py | bentettmar/nertivia4py | a9b758000632e40306bc610a6966cb8d0a643c20 | [
"MIT"
] | 9 | 2022-03-05T19:01:48.000Z | 2022-03-06T11:38:53.000Z | setup.py | bentettmar/nertivia4py | a9b758000632e40306bc610a6966cb8d0a643c20 | [
"MIT"
] | null | null | null | from distutils.core import setup
readme = """
# Nertivia4PY
A Python wrapper for the Nertivia API.
Support Nertivia server : https://nertivia.net/i/nertivia4py
> ### Install
> ```
> pip install nertivia4py
> ```
> ### Example
> ```python
> import nertivia4py
>
> token = "TOKEN_HERE"
> prefix = "!"
>
> bot = nertivia4py.Bot(prefix)
>
> @bot.event
> def on_success(event):
> print("Connected!")
>
> @bot.command(name="ping", description="Ping command.")
> def ping_command(message, args):
> message.reply("Pong!")
>
> bot.run(token)
> ```
>
> For more examples, take a look at the examples folder in the github repo.
"""
setup(
name='nertivia4py',
packages=['nertivia4py', 'nertivia4py.gateway', 'nertivia4py.utils', 'nertivia4py.commands'],
version='1.0.8',
license='MIT',
description='A Python wrapper for the Nertivia API',
long_description_content_type="text/markdown",
long_description=readme,
author='Ben Tettmar',
author_email='hello@benny.fun',
url='https://github.com/bentettmar/nertivia4py',
keywords=["nertivia", "api", "wrapper", "python",
"bot", "nertivia.py", "nertivia4py"],
install_requires=["requests", 'python-socketio[client]'],
)
| 25.196078 | 98 | 0.633463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 955 | 0.743191 |
258626b69bb2d19543b8603f01ee4f2de96f5e1d | 7,867 | py | Python | scheduler/args.py | jian-yu/autotx | eed17a8881b6c3ee80d93d044abd2c67b150ccf1 | [
"Apache-2.0"
] | 1 | 2019-10-14T04:58:13.000Z | 2019-10-14T04:58:13.000Z | scheduler/args.py | jian-yu/autotx | eed17a8881b6c3ee80d93d044abd2c67b150ccf1 | [
"Apache-2.0"
] | 1 | 2021-06-02T00:30:31.000Z | 2021-06-02T00:30:31.000Z | scheduler/args.py | jian-yu/autotx | eed17a8881b6c3ee80d93d044abd2c67b150ccf1 | [
"Apache-2.0"
] | 1 | 2020-08-11T02:48:38.000Z | 2020-08-11T02:48:38.000Z | class PoolArgs:
def __init__(self, bankerBufCap, bankerMaxBufNumber, signerBufCap, signerBufMaxNumber, broadcasterBufCap, broadcasterMaxNumber, stakingBufCap, stakingMaxNumber, distributionBufCap, distributionMaxNumber, errorBufCap, errorMaxNumber):
self.BankerBufCap = bankerBufCap
self.BankerMaxBufNumber = bankerMaxBufNumber
self.SignerBufCap = signerBufCap
self.SignerBufMaxNumber = signerBufMaxNumber
self.BroadcasterBufCap = broadcasterBufCap
self.BroadcasterMaxNumber = broadcasterMaxNumber
self.StakingBufCap = stakingBufCap
self.StakingMaxNumber = stakingMaxNumber
self.DistributionBufCap = distributionBufCap
self.DistributionMaxNumber = distributionMaxNumber
self.ErrorBufCap = errorBufCap
self.ErrorMaxNumber = errorMaxNumber
def Check(self):
if self.BankerBufCap == 0:
return PoolArgsError('zero banker buffer capacity')
if self.BankerMaxBufNumber == 0:
return PoolArgsError('zero banker max buffer number')
if self.SignerBufCap == 0:
return PoolArgsError('zero signer buffer capacity')
if self.SignerBufMaxNumber == 0:
return PoolArgsError('zero signer max buffer number')
if self.BroadcasterBufCap == 0:
return PoolArgsError('zero broadcaster buffer capacity')
if self.BroadcasterMaxNumber == 0:
return PoolArgsError('zero broadcaster max buffer number')
if self.StakingBufCap == 0:
return PoolArgsError('zero staking buffer capacity')
if self.StakingMaxNumber == 0:
return PoolArgsError('zero staking max buffer number')
if self.DistributionBufCap == 0:
return PoolArgsError('zero distribution buffer capacity')
if self.DistributionMaxNumber == 0:
return PoolArgsError('zero distribution max buffer number')
if self.ErrorBufCap == 0:
return PoolArgsError('zero error buffer capacity')
if self.ErrorMaxNumber == 0:
return PoolArgsError('zero error max buffer number')
return None
class PoolArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class ModuleArgs:
def __init__(self, bankers, signers, broadcasters, stakings, distributors):
self.Bankers = bankers
self.Signers = signers
self.Broadcasters = broadcasters
self.Stakings = stakings
self.Distributors = distributors
def Check(self):
if len(self.Bankers) == 0:
return ModuleArgsError('empty banker list')
if len(self.Signers) == 0:
return ModuleArgsError('empty signer list')
if len(self.Broadcasters) == 0:
return ModuleArgsError('empty broadcaster list')
if len(self.Stakings) == 0:
return ModuleArgsError('empty stakinger list')
if len(self.Distributors) == 0:
return ModuleArgsError('empty distributor list')
return None
class ModuleArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class SendCoinArgs:
def __init__(self, srcAccount, dstAccount, coins, fees, gas, gasAdjust):
self.srcAccount = srcAccount
self.dstAccount = dstAccount
self.coins = coins
self.fees = fees
self.gas = gas
self.gasAdjust = gasAdjust
def Check(self):
if self.srcAccount is None or self.srcAccount.getAddress() == '':
return SendCoinArgsError('srcAccount is invalid')
if self.dstAccount is None or self.dstAccount.getAddress() == '':
return SendCoinArgsError('dstAccount is invalid')
if self.coins is None or len(self.coins) == 0:
return SendCoinArgsError('empty coins')
if self.fees is None or len(self.fees) == 0:
return SendCoinArgsError('empty fess')
if self.gas is None:
return SendCoinArgsError('empty gas')
if self.gasAdjust is None:
return SendCoinArgsError('empty gasAdjust')
return None
class SendCoinArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class SendSignArgs:
def __init__(self, srcAccount, sendedJsonFilePath, node):
self.srcAccount = srcAccount
self.sendedJsonFilePath = sendedJsonFilePath
self.node = node
def Check(self):
if self.srcAccount is None or self.srcAccount.getAddress() == '':
return SendSignArgsError('srcAccount is invalid')
if self.sendedJsonFilePath is None:
return SendSignArgsError('empty sendedJsonFilePath')
if self.node is None:
return SendSignArgsError('empty node')
return None
class SendSignArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class SendBroadcastArgs:
def __init__(self, srcAccount, body, mode='sync'):
self.srcAccount = srcAccount
self.body = body
self.mode = mode
def Check(self):
if self.body is None:
return SendBroadcastArgsError('empty broadcast body')
if self.srcAccount is None:
return SendBroadcastArgsError('unknown tx src account')
return None
class SendBroadcastArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class DelegateArgs():
def __init__(self, delegator, validator, coin, fees, gas, gasAdjust):
self.delegator = delegator
self.validator = validator
self.coin = coin
self.fees = fees
self.gas = gas
self.gasAdjust = gasAdjust
def Check(self):
if self.delegator is None or self.delegator.getAddress() == '':
return DelegateArgsError('delegator is invalid')
if self.validator is None:
return DelegateArgsError('validator is invalid')
if self.coin is None:
return DelegateArgsError('empty coins')
if self.fees is None or len(self.fees) == 0:
return DelegateArgsError('empty fess')
if self.gas is None:
return DelegateArgsError('empty gas')
if self.gasAdjust is None:
return DelegateArgsError('empty gasAdjust')
return None
class StakingArgs():
def __init__(self, _type, data):
self._type = _type
self.data = data
def getType(self):
return self._type
def getData(self):
return self.data
class WithdrawDelegatorOneRewardArgs():
def __init__(self, delegator, validator, fees, gas, gasAdjust):
self.delegator = delegator
self.validator = validator
self.fees = fees
self.gas = gas
self.gasAdjust = gasAdjust
def Check(self):
if self.delegator is None or self.delegator.getAddress() == '':
return DelegateArgsError('delegator is invalid')
if self.validator is None:
return DelegateArgsError('validator is invalid')
if self.fees is None or len(self.fees) == 0:
return DelegateArgsError('empty fess')
if self.gas is None:
return DelegateArgsError('empty gas')
if self.gasAdjust is None:
return DelegateArgsError('empty gasAdjust')
return None
class DistributionArgs():
def __init__(self, _type, data):
self._type = _type
self.data = data
def getType(self):
return self._type
def getData(self):
return self.data
class DelegateArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
| 33.054622 | 237 | 0.643702 | 7,824 | 0.994534 | 0 | 0 | 0 | 0 | 0 | 0 | 893 | 0.113512 |
258642582e345c59fc75a59b993ff30a2400aec6 | 3,030 | py | Python | generative/stylclip/operators.py | armaank/archlectures | ec4caec6efa1bf419dec24ad850e38ad6c123905 | [
"BSD-3-Clause"
] | 1 | 2021-04-09T16:10:55.000Z | 2021-04-09T16:10:55.000Z | generative/stylclip/operators.py | armaank/archlectures | ec4caec6efa1bf419dec24ad850e38ad6c123905 | [
"BSD-3-Clause"
] | null | null | null | generative/stylclip/operators.py | armaank/archlectures | ec4caec6efa1bf419dec24ad850e38ad6c123905 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import torch
from scipy.stats import truncnorm
from pymoo.factory import get_sampling, get_crossover, get_mutation
from pymoo.operators.mixed_variable_operator import MixedVariableSampling, MixedVariableMutation, MixedVariableCrossover
from pymoo.model.sampling import Sampling
class TruncatedNormalRandomSampling(Sampling):
def __init__(self, var_type=np.float):
super().__init__()
self.var_type = var_type
def _do(self, problem, n_samples, **kwargs):
return truncnorm.rvs(-2, 2, size=(n_samples, problem.n_var)).astype(np.float32)
class NormalRandomSampling(Sampling):
def __init__(self, mu=0, std=1, var_type=np.float):
super().__init__()
self.mu = mu
self.std = std
self.var_type = var_type
def _do(self, problem, n_samples, **kwargs):
return np.random.normal(self.mu, self.std, size=(n_samples, problem.n_var))
class BinaryRandomSampling(Sampling):
def __init__(self, prob=0.5):
super().__init__()
self.prob = prob
def _do(self, problem, n_samples, **kwargs):
val = np.random.random((n_samples, problem.n_var))
return (val < self.prob).astype(np.bool)
def get_operators(config):
if config.config == "DeepMindBigGAN256" or config.config == "DeepMindBigGAN512":
mask = ["real"]*config.dim_z + ["bool"]*config.num_classes
real_sampling = None
if config.config == "DeepMindBigGAN256" or config.config == "DeepMindBigGAN512":
real_sampling = TruncatedNormalRandomSampling()
sampling = MixedVariableSampling(mask, {
"real": real_sampling,
"bool": BinaryRandomSampling(prob=5/1000)
})
crossover = MixedVariableCrossover(mask, {
"real": get_crossover("real_sbx", prob=1.0, eta=3.0),
"bool": get_crossover("bin_hux", prob=0.2)
})
mutation = MixedVariableMutation(mask, {
"real": get_mutation("real_pm", prob=0.5, eta=3.0),
"bool": get_mutation("bin_bitflip", prob=10/1000)
})
return dict(
sampling=sampling,
crossover=crossover,
mutation=mutation
)
elif config.config.split("_")[0] == "StyleGAN2":
return dict(
sampling=NormalRandomSampling(),
crossover=get_crossover("real_sbx", prob=1.0, eta=3.0),
mutation=get_mutation("real_pm", prob=0.5, eta=3.0)
)
elif config.config.split("_")[0] == "Adaily":
return dict(
sampling=NormalRandomSampling(),
crossover=get_crossover("real_sbx", prob=1.0, eta=3.0),
mutation=get_mutation("real_pm", prob=0.5, eta=3.0)
)
elif config.config == "GPT2":
return dict(
sampling=get_sampling("int_random"),
crossover=get_crossover("int_sbx", prob=1.0, eta=3.0),
mutation=get_mutation("int_pm", prob=0.5, eta=3.0)
)
else:
raise Exception("Unknown config")
| 33.666667 | 120 | 0.628053 | 899 | 0.2967 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.092079 |
2586cc665c211863710efc72964860d0e8a2e6e2 | 723 | py | Python | setup.py | VENULLLC/Monoclock | e393d94f89b39f9613c63167d4c911ab4c9d21a9 | [
"MIT"
] | 2 | 2015-11-04T19:16:00.000Z | 2016-03-03T20:51:32.000Z | setup.py | VENULLLC/Monoclock | e393d94f89b39f9613c63167d4c911ab4c9d21a9 | [
"MIT"
] | 1 | 2017-08-08T16:56:35.000Z | 2017-08-08T16:56:35.000Z | setup.py | VENULLLC/Monoclock | e393d94f89b39f9613c63167d4c911ab4c9d21a9 | [
"MIT"
] | 2 | 2020-12-28T20:13:38.000Z | 2021-07-26T16:39:31.000Z | #!/usr/bin/env python
import sys
from distutils.core import setup
from distutils.extension import Extension
if sys.platform == 'darwin':
monoclock_libraries = []
else:
monoclock_libraries = ['rt']
setup(
name='Monoclock',
version='14.4.18',
description="Monotonic clock access for Python",
url="https://github.com/ludios/Monoclock",
author="Ivan Kozik",
author_email="ivan@ludios.org",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
],
ext_modules=[
Extension("monoclock", ["monoclock.c"], libraries=monoclock_libraries),
],
)
| 24.1 | 73 | 0.706777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.511757 |
2586e81e4e9946ad1a836db00f8a88e5409f7e9b | 871 | py | Python | models/user.py | NeonWizard/php-mood-tracker | 51f7945412d3077b81af29a229a9dbe66d2abdc2 | [
"MIT"
] | null | null | null | models/user.py | NeonWizard/php-mood-tracker | 51f7945412d3077b81af29a229a9dbe66d2abdc2 | [
"MIT"
] | null | null | null | models/user.py | NeonWizard/php-mood-tracker | 51f7945412d3077b81af29a229a9dbe66d2abdc2 | [
"MIT"
] | null | null | null | class UserModel(Table):
def __init__(self):
self.tableName = "User"
self.requiredFields = ['firstName', 'lastName', 'username', 'password']
self.optionalFields = ['email']
def check(self, data):
for req in self.requiredFields:
if req not in data:
return False
for opt in self.optionalFields:
if opt not in data:
data[opt] = ""
return data
def getById(self, id):
rows = self.select([
"id LIKE {}".format(id)
])
if rows:
return rows[0]
else:
None
def getByUsername(self, username):
rows = self.select([
"username LIKE '{}'".format(username)
])
if rows:
return rows[0]
else:
None
def add(self, data):
import bcrypt
data = self.check(data)
if not data:
return False
data['password'] = bcrypt.hashpw(data['password'].encode("utf-8"), bcrypt.gensalt()).decode("utf-8")
self.insert(data)
| 17.77551 | 102 | 0.639495 | 870 | 0.998852 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.140069 |
25873fb3ea0c2524e1adfd5dff21649fc4c13e56 | 9,844 | py | Python | mrobpy/examples/vis_utils.py | miloserdova-l/mrob | 48bef772ba3158d2122991069196d6efd4a39f8c | [
"Apache-2.0"
] | 12 | 2020-09-22T15:33:48.000Z | 2022-03-02T17:27:39.000Z | mrobpy/examples/vis_utils.py | MobileRoboticsSkoltech/mrob | 7668a3ee35345c4878aa86fff082cc017992d205 | [
"Apache-2.0"
] | 46 | 2020-09-22T15:47:08.000Z | 2022-01-22T10:56:44.000Z | mrobpy/examples/vis_utils.py | MobileRoboticsSkoltech/mrob | 7668a3ee35345c4878aa86fff082cc017992d205 | [
"Apache-2.0"
] | 9 | 2020-09-22T15:59:33.000Z | 2021-12-20T20:15:16.000Z | import numpy as np
import pandas as pd
import mrob
from test_utils import get_mc
from sys import platform
import matplotlib
if platform == "darwin":
matplotlib.use('PS')
import matplotlib.pyplot as plt
# Here the Cholesky decomposition for singular covariance matrix is implemented
def cholesky(sigma):
# obtaining map M between original and truncated matrix
condition =~ (np.all(sigma == 0, axis=1) & (np.all(sigma == 0, axis=0)))
m = [int(x) for x in condition]
counter = 0
res = []
for el in m:
if el > 0:
res.append(counter)
counter +=1
else:
res.append(None)
M = []
for i in range(6):
tmp = []
for j in range(6):
tmp.append([res[i],res[j]])
M.append(tmp)
M = np.array(M)
# obtaining matrix that is obtained by removing zero columns and rows
block = (sigma[condition,:])[:,condition]
# applying regular cholesky decomposition
L = np.linalg.cholesky(block)
# mapping block decomposition into original matrix
LL = np.zeros_like(sigma)
for i in range(LL.shape[0]):
for j in range(LL.shape[1]):
if all(M[i,j] != None):
k = M[i,j][0]
l = M[i,j][1]
LL[i,j] = L[k,l]
# returning resulting factor
return LL
def get_axis_points(T, sigma, N, K = 1, index = -1, A = None):
if A is None:
A = cholesky(sigma)
points = np.zeros((N,6))
points[:,index] = np.linspace(-K, K, num = N)
points_img = (A@points.T).T
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
pts = np.array([x.t() for x in propagated])
pts = pts.reshape((-1,3))
pts = np.vstack((pts,np.array([np.nan,np.nan,np.nan]).reshape(-1,3)))
return pts
def get_circumference(T,sigma,N,K=1, index_1=-1, index_2=-1, A=None):
if A is None:
A = cholesky(sigma)
points = np.zeros((N,6))
points[:,index_1] = K*np.cos(np.linspace(0,2*np.pi, num = N))
points[:,index_2] = K*np.sin(np.linspace(0,2*np.pi, num = N))
points_img = (A@points.T).T
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
pts = np.array([x.t() for x in propagated])
pts = pts.reshape((-1,3))
pts = np.vstack((pts,np.array([np.nan,np.nan,np.nan]).reshape(-1,3)))
return pts
def sigma_visualize_3d(T, sigma, N=100, K=1):
N = 100
colors = list(matplotlib.colors.CSS4_COLORS.keys())
A = cholesky(sigma)
axes = {
'yaw': get_axis_points(T,sigma,N,K,0,A),
'pitch': get_axis_points(T,sigma,N,K,1,A),
'roll': get_axis_points(T,sigma,N,K,2,A),
'x': get_axis_points(T,sigma,N,K,3,A),
'y': get_axis_points(T,sigma,N,K,4,A),
'z': get_axis_points(T,sigma,N,K,5,A)
}
circumferences = {
'yaw vs pitch' : get_circumference(T,sigma,N,K,0,1,A),
'yaw vs roll' : get_circumference(T,sigma,N,K,0,2,A),
'yaw vs x' : get_circumference(T,sigma,N,K,0,3,A),
'yaw vs y' : get_circumference(T,sigma,N,K,0,4,A),
'yaw vs z' : get_circumference(T,sigma,N,K,0,5,A),
'pitch vs roll' : get_circumference(T,sigma,N,K,1,2,A),
'pitch vs x' : get_circumference(T,sigma,N,K,1,3,A),
'pitch vs y' : get_circumference(T,sigma,N,K,1,4,A),
'pitch vs z' : get_circumference(T,sigma,N,K,1,5,A),
'roll vs x' : get_circumference(T,sigma,N,K,2,3,A),
'roll vs y' : get_circumference(T,sigma,N,K,2,4,A),
'roll vs z' : get_circumference(T,sigma,N,K,2,5,A),
'x vs y' : get_circumference(T,sigma,N,K,3,4,A),
'x vs z' : get_circumference(T,sigma,N,K,3,5,A),
'y vs z' : get_circumference(T,sigma,N,K,4,5,A),
}
return axes, circumferences
def sigma_visualize(T, sigma, N=100, K=[1,1], label="", color=None, ax = None):
N = 100
colors = list(matplotlib.colors.CSS4_COLORS.keys())
if color is None:
color = colors[np.random.randint(0, len(colors))]
if ax is None:
ax = matplotlib.pyplot
ax.plot(T.t()[0], T.t()[1],'x',color=color)
ax.annotate(label, (T.t()[0], T.t()[1]))
A = cholesky(sigma)
for k in set(K):
# plotting yaw & x plane
labels = ['+yaw','-yaw','+x','-x']
points = []
points.append([0,0,k,0,0,0])
points.append([0,0,-k,0,0,0])
points.append([0,0,0,k,0,0])
points.append([0,0,0,-k,0,0])
for i in range(N+1):
points.append([0,0,k*np.cos(2*np.pi/N*i), k*np.sin(2*np.pi/N*i),0,0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[4:,0],poses[4:,1], label="{}-sigma yaw & x".format(k), color=color)
for i in range(len(labels)):
# ax.annotate(labels[i],xy = (poses[i,0],poses[i,1]), xytext = (poses[i,0]+0.01,poses[i,1]+0.01))
ax.plot(poses[i,0],poses[i,1],'x',color=color)
# plotting x & y plane
labels = ['+x','-x','+y','-y']
points = []
points.append([0,0,0,k,0,0])
points.append([0,0,0,-k,0,0])
points.append([0,0,0,0,k,0])
points.append([0,0,0,0,-k,0])
for i in range(N+1):
points.append([0,0,0,k*np.cos(2*np.pi/N*i), k*np.sin(2*np.pi/N*i),0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[4:,0],poses[4:,1], label="{}-sigma x & y".format(k), color=color)
for i in range(len(labels)):
# ax.annotate(labels[i],xy = (poses[i,0],poses[i,1]), xytext = (poses[i,0]+0.01,poses[i,1]+0.01))
ax.plot(poses[i,0],poses[i,1],'x',color=color)
# plotting yaw & y plane
labels = ['+yaw','-yaw','+y','-y']
points = []
points.append([0,0,k,0,0,0])
points.append([0,0,-k,0,0,0])
points.append([0,0,0,0,k,0])
points.append([0,0,0,0,k,0])
for i in range(N+1):
points.append([0,0,k*np.cos(2*np.pi/N*i),0, k*np.sin(2*np.pi/N*i),0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[4:,0],poses[4:,1], label="{}-sigma yaw & y".format(k),color=color)
for i in range(len(labels)):
# ax.annotate(labels[i],xy = (poses[i,0],poses[i,1]), xytext = (poses[i,0]+0.01,poses[i,1]+0.01))
ax.plot(poses[i,0],poses[i,1],'x',color=color)
# plotting yaw axis of ellipsoid
points = []
for i in range(N+1):
points.append([0,0,k - i*(2*k)/N, 0,0,0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[:,0],poses[:,1],color=color)
# plotting x axis
points = []
for i in range(N+1):
points.append([0,0,0,k - i*(2*k)/N,0,0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[:,0],poses[:,1],color=color)
# plotting y axis
points = []
for i in range(N+1):
points.append([0,0,0,0,k - i*(2*k)/N, 0])
points = np.array(points)
points_img = np.dot(A,points.transpose()).transpose()
propagated = []
for i in range(len(points_img)):
tmp = mrob.geometry.SE3(T)
tmp.update_lhs(points_img[i])
propagated.append(tmp)
poses = np.array([x.t() for x in propagated])
poses = poses.reshape((-1,3))
ax.plot(poses[:,0],poses[:,1],color=color)
def ellipsoid_wireframe_df(T,sigma,N = 100, K = 1):
axes, circumferences = sigma_visualize_3d(T=T,sigma=sigma,N = N, K = K)
df = pd.DataFrame(columns=['x','y','z'])
for key,val in axes.items():
tmp = pd.DataFrame(val,columns=['x','y','z'])
tmp['label'] = key
df = pd.concat([df,tmp])
for key,val in circumferences.items():
tmp = pd.DataFrame(val,columns=['x','y','z'])
tmp['label'] = key
df = pd.concat([df,tmp])
return df
def mc_pointcloud_df(T, sigma, mean=np.zeros(6),N=100):
poses, xi = get_mc(T=T, sigma=sigma, mean=mean,N=N)
particles = pd.DataFrame(poses, columns=['x','y','z'])
return particles
| 29.650602 | 109 | 0.548151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,161 | 0.11794 |
2589eb77b53e81f30f42249e4a59f220a70d8f3e | 573 | py | Python | api/tf_auth/migrations/0009_populate_username.py | prattl/teamfinder-web | 85ded666879c2ee4b51cb59ffdedc2dedbfd9c7e | [
"Apache-2.0"
] | 9 | 2017-04-10T09:40:01.000Z | 2020-01-31T17:15:41.000Z | api/tf_auth/migrations/0009_populate_username.py | prattl/teamfinder-web | 85ded666879c2ee4b51cb59ffdedc2dedbfd9c7e | [
"Apache-2.0"
] | 19 | 2017-02-22T16:26:02.000Z | 2020-02-04T16:08:16.000Z | api/tf_auth/migrations/0009_populate_username.py | prattl/teamfinder-web | 85ded666879c2ee4b51cb59ffdedc2dedbfd9c7e | [
"Apache-2.0"
] | 3 | 2017-04-17T06:37:58.000Z | 2021-04-09T18:16:28.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-04-17 00:30
from __future__ import unicode_literals
from django.db import migrations
def forwards(apps, schema_editor):
TFUser = apps.get_model('tf_auth.TFUser')
for user in TFUser.objects.all():
try:
user.username = user.player.username
except:
pass
class Migration(migrations.Migration):
dependencies = [
('tf_auth', '0008_auto_20170417_0012'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
| 21.222222 | 65 | 0.649215 | 206 | 0.359511 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.211169 |
258dfd4cf3f12a84a78632665686b4481a1602df | 1,947 | py | Python | emotion_detection/f_emotion_detection.py | juan-csv/Architecture-for-real-time-video-streaming-analytics | bf4f03553fa856aa5f704ea25666d163b90cd5e9 | [
"MIT"
] | 16 | 2020-09-14T07:46:45.000Z | 2022-03-29T02:53:40.000Z | emotion_detection/f_emotion_detection.py | juan-csv/Architecture-for-real-time-video-streaming-analytics | bf4f03553fa856aa5f704ea25666d163b90cd5e9 | [
"MIT"
] | null | null | null | emotion_detection/f_emotion_detection.py | juan-csv/Architecture-for-real-time-video-streaming-analytics | bf4f03553fa856aa5f704ea25666d163b90cd5e9 | [
"MIT"
] | 4 | 2020-09-16T02:23:14.000Z | 2022-01-12T06:49:43.000Z | import config as cfg
import cv2
import numpy as np
from keras.models import load_model
from keras.preprocessing.image import img_to_array
from keras import backend as K
import tensorflow as tf
import keras
'''
esto es necesario para que no haya errores a la hora de exponer el servicio con flask
info --> https://github.com/tensorflow/tensorflow/issues/28287#issuecomment-495005162
'''
from keras.backend import set_session
sess = tf.Session()
graph = tf.get_default_graph()
set_session(sess)
model_emotions = load_model(cfg.path_model)
class predict_emotions():
'''
def __init__(self):
# cargo modelo de deteccion de emociones
global graph
self.graph = tf.get_default_graph()
self.model_emotions = load_model(cfg.path_model)
'''
def preprocess_img(self,face_image,rgb=True,w=48,h=48):
face_image = cv2.resize(face_image, (w,h))
if rgb == False:
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
face_image = face_image.astype("float") / 255.0
face_image= img_to_array(face_image)
face_image = np.expand_dims(face_image, axis=0)
return face_image
def get_emotion(self,img,boxes_face):
emotions = []
if len(boxes_face)!=0:
for box in boxes_face:
y0,x0,y1,x1 = box
face_image = img[x0:x1,y0:y1]
# preprocesar data
face_image = self.preprocess_img(face_image ,cfg.rgb, cfg.w, cfg.h)
# predecir imagen
global sess
global graph
with graph.as_default():
set_session(sess)
prediction = model_emotions.predict(face_image)
emotion = cfg.labels[prediction.argmax()]
emotions.append(emotion)
else:
emotions = []
boxes_face = []
return boxes_face,emotions
| 31.918033 | 85 | 0.625578 | 1,404 | 0.721109 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.219312 |
259148bec81d808f337cab94d5e80017025d5d2d | 455 | py | Python | src/gui/migrations/0014_feedback_childprotection.py | digitalfabrik/ish-goalkeeper | a500c7a628ef66897941dadc0addb0be01658e02 | [
"MIT"
] | 12 | 2021-10-30T12:57:26.000Z | 2021-10-31T11:33:20.000Z | src/gui/migrations/0014_feedback_childprotection.py | digitalfabrik/ish-goalkeeper | a500c7a628ef66897941dadc0addb0be01658e02 | [
"MIT"
] | 53 | 2019-07-31T12:44:44.000Z | 2021-10-21T12:40:29.000Z | src/gui/migrations/0014_feedback_childprotection.py | digitalfabrik/ish-goalkeeper | a500c7a628ef66897941dadc0addb0be01658e02 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2020-03-10 18:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gui', '0013_auto_20200310_1742'),
]
operations = [
migrations.AddField(
model_name='feedback',
name='childprotection',
field=models.TextField(blank=True, max_length=1000, verbose_name='Kinderschutzrelevante Information'),
),
]
| 23.947368 | 114 | 0.63956 | 362 | 0.795604 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.305495 |
2592664d08f4a0c9eb9bf2b57177c0908bfc0dd1 | 45,764 | py | Python | assignment/assignment2/test/ASTGenSuite.py | khoidohpc/ppl-course | 3bcff3eeeeebc24f0fc9e3f844779f439aa97544 | [
"MIT"
] | 2 | 2020-10-21T13:04:18.000Z | 2022-01-12T11:06:31.000Z | assignment/assignment2/test/ASTGenSuite.py | khoidohpc/ppl-course | 3bcff3eeeeebc24f0fc9e3f844779f439aa97544 | [
"MIT"
] | null | null | null | assignment/assignment2/test/ASTGenSuite.py | khoidohpc/ppl-course | 3bcff3eeeeebc24f0fc9e3f844779f439aa97544 | [
"MIT"
] | 1 | 2022-01-12T11:06:45.000Z | 2022-01-12T11:06:45.000Z | import unittest
from TestUtils import TestAST
from AST import *
class ASTGenSuite(unittest.TestCase):
# Test variable declaration
def test_single_variable_declaration(self):
input = """int a;"""
expect = "Program([VarDecl(a,IntType)])"
self.assertTrue(TestAST.checkASTGen(input,expect,300))
def test_single_array_declaration_float(self):
input = """float a[5];"""
expect = "Program([VarDecl(a,ArrayType(FloatType,5))])"
self.assertTrue(TestAST.checkASTGen(input,expect,301))
def test_single_array_declaration_int(self):
input = """int a[5];"""
expect = "Program([VarDecl(a,ArrayType(IntType,5))])"
self.assertTrue(TestAST.checkASTGen(input,expect,302))
def test_multi_variable_declaration(self):
input = """boolean a,b;"""
expect = "Program([VarDecl(a,BoolType),VarDecl(b,BoolType)])"
self.assertTrue(TestAST.checkASTGen(input,expect,303))
def test_multi_array_declaration(self):
input = """boolean a[10],b[5];"""
expect = "Program([VarDecl(a,ArrayType(BoolType,10)),VarDecl(b,ArrayType(BoolType,5))])"
self.assertTrue(TestAST.checkASTGen(input,expect,304))
def test_mix_variable_declaration(self):
input = """boolean a,b[5];"""
expect = "Program([VarDecl(a,BoolType),VarDecl(b,ArrayType(BoolType,5))])"
self.assertTrue(TestAST.checkASTGen(input,expect,305))
# Test program with variable declaration
def test_program_multi_variable_declaration(self):
input = """int a; float b; boolean c;"""
expect = "Program([VarDecl(a,IntType),VarDecl(b,FloatType),VarDecl(c,BoolType)])"
self.assertTrue(TestAST.checkASTGen(input,expect,306))
def test_program_multi_array_declaration(self):
input = """int a[5]; float b[6]; boolean c[7];"""
expect = "Program([VarDecl(a,ArrayType(IntType,5)),VarDecl(b,ArrayType(FloatType,6)),VarDecl(c,ArrayType(BoolType,7))])"
self.assertTrue(TestAST.checkASTGen(input,expect,307))
def test_program_multi_variable_array_declaration(self):
input = """int a; float b; string a[10]; boolean d[5];"""
expect = "Program([VarDecl(a,IntType),VarDecl(b,FloatType),VarDecl(a,ArrayType(StringType,10)),VarDecl(d,ArrayType(BoolType,5))])"
self.assertTrue(TestAST.checkASTGen(input,expect,308))
def test_program_multi_variable_array_declaration_hard(self):
input = """int a,x; float b,z; string a[10], l[6], r; boolean d[5];"""
expect = "Program([VarDecl(a,IntType),VarDecl(x,IntType),VarDecl(b,FloatType),VarDecl(z,FloatType),VarDecl(a,ArrayType(StringType,10)),VarDecl(l,ArrayType(StringType,6)),VarDecl(r,StringType),VarDecl(d,ArrayType(BoolType,5))])"
self.assertTrue(TestAST.checkASTGen(input,expect,309))
def test_expr_recursion(self):
input = """void main() {(((1)));}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([IntLiteral(1)]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,310))
# Test function parameter
def test_function_para_empty(self):
input = """int main() {}"""
expect = "Program([FuncDecl(Id(main),[],IntType,Block([]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,311))
def test_function_single_para(self):
input = """int main(int a) {}"""
expect = "Program([FuncDecl(Id(main),[VarDecl(a,IntType)],IntType,Block([]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,312))
def test_function_multi_para(self):
input = """void main(boolean a, float b) {}"""
expect = "Program([FuncDecl(Id(main),[VarDecl(a,BoolType),VarDecl(b,FloatType)],VoidType,Block([]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,313))
def test_function_single_array(self):
input = """void main(boolean a[]) {}"""
expect = "Program([FuncDecl(Id(main),[VarDecl(a,ArrayTypePointer(BoolType))],VoidType,Block([]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,314))
def test_function_multi_array(self):
input = """void main(boolean a[], float b[]) {}"""
expect = "Program([FuncDecl(Id(main),[VarDecl(a,ArrayTypePointer(BoolType)),VarDecl(b,ArrayTypePointer(FloatType))],VoidType,Block([]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,315))
# Test program with function parameter
def test_function_type_array(self):
input = """int[] main() {}"""
expect = "Program([FuncDecl(Id(main),[],ArrayTypePointer(IntType),Block([]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,316))
def test_function_void_type_array(self):
input = """void main() {}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,317))
def test_program_multi_function(self):
input = """int[] main() {} float[] pow() {}"""
expect = "Program([FuncDecl(Id(main),[],ArrayTypePointer(IntType),Block([])),FuncDecl(Id(pow),[],ArrayTypePointer(FloatType),Block([]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,318))
def test_program_multi_function_variable(self):
input = """int[] main() {} float[] pow() {} int a,b,c;"""
expect = "Program([FuncDecl(Id(main),[],ArrayTypePointer(IntType),Block([])),FuncDecl(Id(pow),[],ArrayTypePointer(FloatType),Block([])),VarDecl(a,IntType),VarDecl(b,IntType),VarDecl(c,IntType)])"
self.assertTrue(TestAST.checkASTGen(input,expect,319))
def test_program_multi_function_array(self):
input = """int[] main() {} float[] pow() {} int a[5],b[5],c[5];"""
expect = "Program([FuncDecl(Id(main),[],ArrayTypePointer(IntType),Block([])),FuncDecl(Id(pow),[],ArrayTypePointer(FloatType),Block([])),VarDecl(a,ArrayType(IntType,5)),VarDecl(b,ArrayType(IntType,5)),VarDecl(c,ArrayType(IntType,5))])"
self.assertTrue(TestAST.checkASTGen(input,expect,320))
# Test if statement
def test_if_stmt_base(self):
input = """void main() {if(true) print("hello");}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([If(BooleanLiteral(true),CallExpr(Id(print),[StringLiteral(hello)]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,321))
def test_if__else_stmt_base(self):
input = """void main() {if(true) print("hello"); else put(5);}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([If(BooleanLiteral(true),CallExpr(Id(print),[StringLiteral(hello)]),CallExpr(Id(put),[IntLiteral(5)]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,322))
def test_if_stmt_hard(self):
input = """void main() {if(true) {
print("hello");
int a;
a = a + 100;
a = a || 100;
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([If(BooleanLiteral(true),Block([CallExpr(Id(print),[StringLiteral(hello)]),VarDecl(a,IntType),BinaryOp(=,Id(a),BinaryOp(+,Id(a),IntLiteral(100))),BinaryOp(=,Id(a),BinaryOp(||,Id(a),IntLiteral(100)))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,323))
def test_if_else_stmt_hard(self):
input = """void main() {if(a && b || c) print("hello"); else put(5);}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([If(BinaryOp(||,BinaryOp(&&,Id(a),Id(b)),Id(c)),CallExpr(Id(print),[StringLiteral(hello)]),CallExpr(Id(put),[IntLiteral(5)]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,324))
def test_if_else_stmt_mix(self):
input = """void main() {
if(a && b || c) {
print("hello");
int a[5];
string b[10];
}
else {
HPC(kafka);
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([If(BinaryOp(||,BinaryOp(&&,Id(a),Id(b)),Id(c)),Block([CallExpr(Id(print),[StringLiteral(hello)]),VarDecl(a,ArrayType(IntType,5)),VarDecl(b,ArrayType(StringType,10))]),Block([CallExpr(Id(HPC),[Id(kafka)])]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,325))
# Test do while statement
def test_do_while_base(self):
input = """void main() {do print(); while(a==1);}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([Dowhile([CallExpr(Id(print),[])],BinaryOp(==,Id(a),IntLiteral(1)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,326))
def test_do_while_medium(self):
input = """void main() {do {int a[5]; print();} while(a==1&&2);}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([Dowhile([Block([VarDecl(a,ArrayType(IntType,5)),CallExpr(Id(print),[])])],BinaryOp(&&,BinaryOp(==,Id(a),IntLiteral(1)),IntLiteral(2)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,327))
def test_do_while_hard(self):
input = """void main() {do {if(true) print();} while(func(a[5]));}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([Dowhile([Block([If(BooleanLiteral(true),CallExpr(Id(print),[]))])],CallExpr(Id(func),[ArrayCell(Id(a),IntLiteral(5))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,328))
# Test for statemet
def test_for_base(self):
input = """void main() {for(i=0;i<10;i=i+1) print();}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([For(BinaryOp(=,Id(i),IntLiteral(0));BinaryOp(<,Id(i),IntLiteral(10));BinaryOp(=,Id(i),BinaryOp(+,Id(i),IntLiteral(1)));CallExpr(Id(print),[]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,329))
def test_for_medium(self):
input = """void main() {for(i=0;i && 1;i=i+1) {print();}}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([For(BinaryOp(=,Id(i),IntLiteral(0));BinaryOp(&&,Id(i),IntLiteral(1));BinaryOp(=,Id(i),BinaryOp(+,Id(i),IntLiteral(1)));Block([CallExpr(Id(print),[])]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,330))
def test_for_hard(self):
input = """void main() {for(i = 1+a[6];a[i]>=(2/5);i = 10/i) print();}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([For(BinaryOp(=,Id(i),BinaryOp(+,IntLiteral(1),ArrayCell(Id(a),IntLiteral(6))));BinaryOp(>=,ArrayCell(Id(a),Id(i)),BinaryOp(/,IntLiteral(2),IntLiteral(5)));BinaryOp(=,Id(i),BinaryOp(/,IntLiteral(10),Id(i)));CallExpr(Id(print),[]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,331))
# Test break and continue
def test_break(self):
input = """void main() {for(i = 1+a[6];a[i]>=(2/5);i = 10/i) break;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([For(BinaryOp(=,Id(i),BinaryOp(+,IntLiteral(1),ArrayCell(Id(a),IntLiteral(6))));BinaryOp(>=,ArrayCell(Id(a),Id(i)),BinaryOp(/,IntLiteral(2),IntLiteral(5)));BinaryOp(=,Id(i),BinaryOp(/,IntLiteral(10),Id(i)));Break())]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,332))
def test_continue(self):
input = """void main() {for(i = 1+a[6];a[i]>=(2/5);i = 10/i) continue;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([For(BinaryOp(=,Id(i),BinaryOp(+,IntLiteral(1),ArrayCell(Id(a),IntLiteral(6))));BinaryOp(>=,ArrayCell(Id(a),Id(i)),BinaryOp(/,IntLiteral(2),IntLiteral(5)));BinaryOp(=,Id(i),BinaryOp(/,IntLiteral(10),Id(i)));Continue())]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,333))
def test_assign(self):
input = """void main() {a = 5;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),IntLiteral(5))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,334))
def test_or(self):
input = """void main() {a = a || 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(||,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,335))
def test_and(self):
input = """void main() {a = a && 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(&&,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,336))
def test_equal(self):
input = """void main() {a = a == 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(==,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,337))
def test_not_equal(self):
input = """void main() {a = a != 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(!=,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,338))
def test_less_than(self):
input = """void main() {a = a < 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(<,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,339))
def test_less_equal(self):
input = """void main() {a = a <= 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(<=,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,340))
def test_great_than(self):
input = """void main() {a = a > 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(>,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,341))
def test_great_equal(self):
input = """void main() {a = a >= 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(>=,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,342))
def test_add(self):
input = """void main() {a = a + 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(+,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,343))
def test_subtract(self):
input = """void main() {a = a - 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(-,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,344))
def test_mul(self):
input = """void main() {a = a * 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(*,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,345))
def test_div(self):
input = """void main() {a = a / 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(/,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,346))
def test_mod(self):
input = """void main() {a = a % 0;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),BinaryOp(%,Id(a),IntLiteral(0)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,347))
def test_not(self):
input = """void main() {a = !a;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),UnaryOp(!,Id(a)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,348))
def test_sub(self):
input = """void main() {a = -a;}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(a),UnaryOp(-,Id(a)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,349))
# Test array cell
def test_array_cell_base(self):
input = """void main() {a[5];}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([ArrayCell(Id(a),IntLiteral(5))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,350))
def test_array_cell_medium(self):
input = """void main() {a[5+x];}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([ArrayCell(Id(a),BinaryOp(+,IntLiteral(5),Id(x)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,351))
def test_array_cell_hard(self):
input = """void main() {a[pow(2,3)];}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([ArrayCell(Id(a),CallExpr(Id(pow),[IntLiteral(2),IntLiteral(3)]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,352))
def test_array_cell_expr_base(self):
input = """void main() {(5+x)[a];}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([ArrayCell(BinaryOp(+,IntLiteral(5),Id(x)),Id(a))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,353))
def test_array_cell_expr_medium(self):
input = """void main() {foo()[5];}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([ArrayCell(CallExpr(Id(foo),[]),IntLiteral(5))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,354))
# TODO : Find bug !
def test_array_cell_expr_hard(self):
input = """void main() {(foo()[5])[5];}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([ArrayCell(ArrayCell(CallExpr(Id(foo),[]),IntLiteral(5)),IntLiteral(5))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,355))
def test_array_cell_expr_super_hard(self):
input = """void main() {foo(foo(foo(foo()[5])[5])[5])[5];}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([ArrayCell(CallExpr(Id(foo),[ArrayCell(CallExpr(Id(foo),[ArrayCell(CallExpr(Id(foo),[ArrayCell(CallExpr(Id(foo),[]),IntLiteral(5))]),IntLiteral(5))]),IntLiteral(5))]),IntLiteral(5))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,356))
def test_cal_func_base(self):
input = """void main() {foo();}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([CallExpr(Id(foo),[])]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,357))
def test_cal_func_single_variable(self):
input = """void main() {foo(a);}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([CallExpr(Id(foo),[Id(a)])]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,358))
def test_cal_func_single_array(self):
input = """void main() {foo(a[5]);}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([CallExpr(Id(foo),[ArrayCell(Id(a),IntLiteral(5))])]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,359))
def test_cal_func_multi_variable(self):
input = """void main() {foo(a, b, c);}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([CallExpr(Id(foo),[Id(a),Id(b),Id(c)])]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,360))
def test_cal_func_multi_array(self):
input = """void main() {a[5]; b[5];}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([ArrayCell(Id(a),IntLiteral(5)),ArrayCell(Id(b),IntLiteral(5))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,361))
def test_cal_func_multi_variable_array(self):
input = """void main() {foo(a, b[6], c);}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([CallExpr(Id(foo),[Id(a),ArrayCell(Id(b),IntLiteral(6)),Id(c)])]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,362))
# # Test program with multi function declaration
def test_multi_function_declaration(self):
input = """void main() {} void main() {} void main() {}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([])),FuncDecl(Id(main),[],VoidType,Block([])),FuncDecl(Id(main),[],VoidType,Block([]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,363))
def test_multi_function_variable_declaration(self):
input = """void main() {} int a; void main() {} float a[5];"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([])),VarDecl(a,IntType),FuncDecl(Id(main),[],VoidType,Block([])),VarDecl(a,ArrayType(FloatType,5))])"
self.assertTrue(TestAST.checkASTGen(input,expect,364))
# Test long program
def test_long_program_1(self):
input = r"""void main(){
for (a=1;a<10;a=a*2){
for(b=2;b==10;b=b*2){
int a;
string b;
b = a + 1;
}
}
for(d=1;d!=1;d=d+1){
int e;
e = d;
}
for(c=100;c!=0;c=c%2){
for(d=1000;d>0;d=d%10){
int e;
e = d;
string d;
d = e;
}
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([For(BinaryOp(=,Id(a),IntLiteral(1));BinaryOp(<,Id(a),IntLiteral(10));BinaryOp(=,Id(a),BinaryOp(*,Id(a),IntLiteral(2)));Block([For(BinaryOp(=,Id(b),IntLiteral(2));BinaryOp(==,Id(b),IntLiteral(10));BinaryOp(=,Id(b),BinaryOp(*,Id(b),IntLiteral(2)));Block([VarDecl(a,IntType),VarDecl(b,StringType),BinaryOp(=,Id(b),BinaryOp(+,Id(a),IntLiteral(1)))]))])),For(BinaryOp(=,Id(d),IntLiteral(1));BinaryOp(!=,Id(d),IntLiteral(1));BinaryOp(=,Id(d),BinaryOp(+,Id(d),IntLiteral(1)));Block([VarDecl(e,IntType),BinaryOp(=,Id(e),Id(d))])),For(BinaryOp(=,Id(c),IntLiteral(100));BinaryOp(!=,Id(c),IntLiteral(0));BinaryOp(=,Id(c),BinaryOp(%,Id(c),IntLiteral(2)));Block([For(BinaryOp(=,Id(d),IntLiteral(1000));BinaryOp(>,Id(d),IntLiteral(0));BinaryOp(=,Id(d),BinaryOp(%,Id(d),IntLiteral(10)));Block([VarDecl(e,IntType),BinaryOp(=,Id(e),Id(d)),VarDecl(d,StringType),BinaryOp(=,Id(d),Id(e))]))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,365))
def test_long_program_2(self):
input = r"""void main(){
for (a=1;a<10;a=a*2){
for(b=2;b==10;b=b*2){
for(c=100;c!=0;c=c%2){
for(d=1000;d>0;d=d%10){
int e;
e = d;
string d;
d = e;
}
}
}
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([For(BinaryOp(=,Id(a),IntLiteral(1));BinaryOp(<,Id(a),IntLiteral(10));BinaryOp(=,Id(a),BinaryOp(*,Id(a),IntLiteral(2)));Block([For(BinaryOp(=,Id(b),IntLiteral(2));BinaryOp(==,Id(b),IntLiteral(10));BinaryOp(=,Id(b),BinaryOp(*,Id(b),IntLiteral(2)));Block([For(BinaryOp(=,Id(c),IntLiteral(100));BinaryOp(!=,Id(c),IntLiteral(0));BinaryOp(=,Id(c),BinaryOp(%,Id(c),IntLiteral(2)));Block([For(BinaryOp(=,Id(d),IntLiteral(1000));BinaryOp(>,Id(d),IntLiteral(0));BinaryOp(=,Id(d),BinaryOp(%,Id(d),IntLiteral(10)));Block([VarDecl(e,IntType),BinaryOp(=,Id(e),Id(d)),VarDecl(d,StringType),BinaryOp(=,Id(d),Id(e))]))]))]))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,366))
def test_long_program_3(self):
input = r"""void main(){
int a,b,c;
for (a=1;a<100;a=a+1){
for(b=1;b<10;b=b+1){
for(c=1;c<50;c=c+1){
if (c){
string rlt;
rlt = c;
}
}
}
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([VarDecl(a,IntType),VarDecl(b,IntType),VarDecl(c,IntType),For(BinaryOp(=,Id(a),IntLiteral(1));BinaryOp(<,Id(a),IntLiteral(100));BinaryOp(=,Id(a),BinaryOp(+,Id(a),IntLiteral(1)));Block([For(BinaryOp(=,Id(b),IntLiteral(1));BinaryOp(<,Id(b),IntLiteral(10));BinaryOp(=,Id(b),BinaryOp(+,Id(b),IntLiteral(1)));Block([For(BinaryOp(=,Id(c),IntLiteral(1));BinaryOp(<,Id(c),IntLiteral(50));BinaryOp(=,Id(c),BinaryOp(+,Id(c),IntLiteral(1)));Block([If(Id(c),Block([VarDecl(rlt,StringType),BinaryOp(=,Id(rlt),Id(c))]))]))]))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,367))
def test_long_program_4(self):
input = r"""void main(){
int a;
float b;
string c;
for (a=1; a % 10 == 0; a=a+1){
if (a % 2 == 0){
for (b=0; b != 1;b=b+2){
int a;
float b;
b = a;
for (b=1;b==10;b=b+1){
string c;
c = b;
if (c){
float a;
string d;
d = c;
}
}
}
}
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([VarDecl(a,IntType),VarDecl(b,FloatType),VarDecl(c,StringType),For(BinaryOp(=,Id(a),IntLiteral(1));BinaryOp(==,BinaryOp(%,Id(a),IntLiteral(10)),IntLiteral(0));BinaryOp(=,Id(a),BinaryOp(+,Id(a),IntLiteral(1)));Block([If(BinaryOp(==,BinaryOp(%,Id(a),IntLiteral(2)),IntLiteral(0)),Block([For(BinaryOp(=,Id(b),IntLiteral(0));BinaryOp(!=,Id(b),IntLiteral(1));BinaryOp(=,Id(b),BinaryOp(+,Id(b),IntLiteral(2)));Block([VarDecl(a,IntType),VarDecl(b,FloatType),BinaryOp(=,Id(b),Id(a)),For(BinaryOp(=,Id(b),IntLiteral(1));BinaryOp(==,Id(b),IntLiteral(10));BinaryOp(=,Id(b),BinaryOp(+,Id(b),IntLiteral(1)));Block([VarDecl(c,StringType),BinaryOp(=,Id(c),Id(b)),If(Id(c),Block([VarDecl(a,FloatType),VarDecl(d,StringType),BinaryOp(=,Id(d),Id(c))]))]))]))]))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,368))
def test_long_program_5(self):
input = r"""void main(){
int a;
float b;
string c;
for (a=1; a % 10 == 0; a=a+1){
if (a % 2 == 0){
for (b=0; b != 1;b=b+2){
int a;
float b;
b = a;
}
}
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([VarDecl(a,IntType),VarDecl(b,FloatType),VarDecl(c,StringType),For(BinaryOp(=,Id(a),IntLiteral(1));BinaryOp(==,BinaryOp(%,Id(a),IntLiteral(10)),IntLiteral(0));BinaryOp(=,Id(a),BinaryOp(+,Id(a),IntLiteral(1)));Block([If(BinaryOp(==,BinaryOp(%,Id(a),IntLiteral(2)),IntLiteral(0)),Block([For(BinaryOp(=,Id(b),IntLiteral(0));BinaryOp(!=,Id(b),IntLiteral(1));BinaryOp(=,Id(b),BinaryOp(+,Id(b),IntLiteral(2)));Block([VarDecl(a,IntType),VarDecl(b,FloatType),BinaryOp(=,Id(b),Id(a))]))]))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,369))
def test_long_program_6(self):
input = r"""void main(){
for (a=1;a<10;a=a+1){
for(b=2;b%10==0;b=b+1){
int c;
float d;
c = b;
d = a;
}
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([For(BinaryOp(=,Id(a),IntLiteral(1));BinaryOp(<,Id(a),IntLiteral(10));BinaryOp(=,Id(a),BinaryOp(+,Id(a),IntLiteral(1)));Block([For(BinaryOp(=,Id(b),IntLiteral(2));BinaryOp(==,BinaryOp(%,Id(b),IntLiteral(10)),IntLiteral(0));BinaryOp(=,Id(b),BinaryOp(+,Id(b),IntLiteral(1)));Block([VarDecl(c,IntType),VarDecl(d,FloatType),BinaryOp(=,Id(c),Id(b)),BinaryOp(=,Id(d),Id(a))]))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,370))
def test_long_program_7(self):
input = r"""void main(){
int a;
int b;
boolean c;
for (a=1; a < 10; a=a+1){
for (b=0; b != 10; b=b+1){
c = b;
}
b = a + 1;
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([VarDecl(a,IntType),VarDecl(b,IntType),VarDecl(c,BoolType),For(BinaryOp(=,Id(a),IntLiteral(1));BinaryOp(<,Id(a),IntLiteral(10));BinaryOp(=,Id(a),BinaryOp(+,Id(a),IntLiteral(1)));Block([For(BinaryOp(=,Id(b),IntLiteral(0));BinaryOp(!=,Id(b),IntLiteral(10));BinaryOp(=,Id(b),BinaryOp(+,Id(b),IntLiteral(1)));Block([BinaryOp(=,Id(c),Id(b))])),BinaryOp(=,Id(b),BinaryOp(+,Id(a),IntLiteral(1)))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,371))
def test_long_program_8(self):
input = r"""void main(){
int a;
int b;
boolean c;
for (a=1; a < 10; a=a+1){
if (a % 2 == 0){
c = false;
b = b + 1;
}
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([VarDecl(a,IntType),VarDecl(b,IntType),VarDecl(c,BoolType),For(BinaryOp(=,Id(a),IntLiteral(1));BinaryOp(<,Id(a),IntLiteral(10));BinaryOp(=,Id(a),BinaryOp(+,Id(a),IntLiteral(1)));Block([If(BinaryOp(==,BinaryOp(%,Id(a),IntLiteral(2)),IntLiteral(0)),Block([BinaryOp(=,Id(c),BooleanLiteral(false)),BinaryOp(=,Id(b),BinaryOp(+,Id(b),IntLiteral(1)))]))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,372))
def test_long_program_9(self):
input = r"""void main(){
int a;
a = true;
if (true){
if (a == true){
if (!a){
a = false;
string b;
b = a;
if (b){
boolean c;
c = b;
if (!c){
int d;
d = c;
if (d == c || !c){
string e;
e = d;
}
else{
string e;
e = d;
}
}
else{
int d;
d = e;
boolean t;
t = e;
if (d && !e){
string t;
t = d;
}
}
}
}
else{
if ((a == b || c != b) && a > b){
int e;
e = a;
}
}
}
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([VarDecl(a,IntType),BinaryOp(=,Id(a),BooleanLiteral(true)),If(BooleanLiteral(true),Block([If(BinaryOp(==,Id(a),BooleanLiteral(true)),Block([If(UnaryOp(!,Id(a)),Block([BinaryOp(=,Id(a),BooleanLiteral(false)),VarDecl(b,StringType),BinaryOp(=,Id(b),Id(a)),If(Id(b),Block([VarDecl(c,BoolType),BinaryOp(=,Id(c),Id(b)),If(UnaryOp(!,Id(c)),Block([VarDecl(d,IntType),BinaryOp(=,Id(d),Id(c)),If(BinaryOp(||,BinaryOp(==,Id(d),Id(c)),UnaryOp(!,Id(c))),Block([VarDecl(e,StringType),BinaryOp(=,Id(e),Id(d))]),Block([VarDecl(e,StringType),BinaryOp(=,Id(e),Id(d))]))]),Block([VarDecl(d,IntType),BinaryOp(=,Id(d),Id(e)),VarDecl(t,BoolType),BinaryOp(=,Id(t),Id(e)),If(BinaryOp(&&,Id(d),UnaryOp(!,Id(e))),Block([VarDecl(t,StringType),BinaryOp(=,Id(t),Id(d))]))]))]))]),Block([If(BinaryOp(&&,BinaryOp(||,BinaryOp(==,Id(a),Id(b)),BinaryOp(!=,Id(c),Id(b))),BinaryOp(>,Id(a),Id(b))),Block([VarDecl(e,IntType),BinaryOp(=,Id(e),Id(a))]))]))]))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,373))
def test_long_program_10(self):
input = r"""void main(){
int a;
a = true;
if (true){
if (a == true){
if (!a){
a = false;
string b;
b = a;
}
else{
string b;
b = a;
}
}
else{
a = false;
}
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([VarDecl(a,IntType),BinaryOp(=,Id(a),BooleanLiteral(true)),If(BooleanLiteral(true),Block([If(BinaryOp(==,Id(a),BooleanLiteral(true)),Block([If(UnaryOp(!,Id(a)),Block([BinaryOp(=,Id(a),BooleanLiteral(false)),VarDecl(b,StringType),BinaryOp(=,Id(b),Id(a))]),Block([VarDecl(b,StringType),BinaryOp(=,Id(b),Id(a))]))]),Block([BinaryOp(=,Id(a),BooleanLiteral(false))]))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,374))
def test_more_simple_program(self):
"""More complex program"""
input = """int main () {
putIntLn(4);
}"""
expect = "Program([FuncDecl(Id(main),[],IntType,Block([CallExpr(Id(putIntLn),[IntLiteral(4)])]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,375))
def test_call_without_parameter(self):
"""More complex program"""
input = """int main () {
getIntLn();
}"""
expect = "Program([FuncDecl(Id(main),[],IntType,Block([CallExpr(Id(getIntLn),[])]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,376))
def test_true_and_false(self):
input = """void f(int a,float b, float c){
true && false || (2 > 3/5);
}"""
expect = "Program([FuncDecl(Id(f),[VarDecl(a,IntType),VarDecl(b,FloatType),VarDecl(c,FloatType)],VoidType,Block([BinaryOp(||,BinaryOp(&&,BooleanLiteral(true),BooleanLiteral(false)),BinaryOp(>,IntLiteral(2),BinaryOp(/,IntLiteral(3),IntLiteral(5))))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,377))
def test_more_call_function(self):
input = """int main () {
putIntLn(4);
ar[12];
foo(a[10],r);
break;continue;
}"""
expect = "Program([FuncDecl(Id(main),[],IntType,Block([CallExpr(Id(putIntLn),[IntLiteral(4)]),ArrayCell(Id(ar),IntLiteral(12)),CallExpr(Id(foo),[ArrayCell(Id(a),IntLiteral(10)),Id(r)]),Break(),Continue()]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,378))
def test_if_and_have_not_semiconlon(self):
input = """int main () {
if( (c > x) < d){
int a,b;
}
}"""
expect = "Program([FuncDecl(Id(main),[],IntType,Block([If(BinaryOp(<,BinaryOp(>,Id(c),Id(x)),Id(d)),Block([VarDecl(a,IntType),VarDecl(b,IntType)]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,379))
def test_if_in_if(self):
input = """int foo () {
if (a+1) {{{{if(b+a) foo();}}}} else {if (c+d) t+a; else func(a(b(c)))[f+6*d()];}
}"""
expect = "Program([FuncDecl(Id(foo),[],IntType,Block([If(BinaryOp(+,Id(a),IntLiteral(1)),Block([Block([Block([Block([If(BinaryOp(+,Id(b),Id(a)),CallExpr(Id(foo),[]))])])])]),Block([If(BinaryOp(+,Id(c),Id(d)),BinaryOp(+,Id(t),Id(a)),ArrayCell(CallExpr(Id(func),[CallExpr(Id(a),[CallExpr(Id(b),[Id(c)])])]),BinaryOp(+,Id(f),BinaryOp(*,IntLiteral(6),CallExpr(Id(d),[])))))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,380))
def test_array_type_and_invol(self):
input = """int[] ham(int a[], float b[]) {
return;
}"""
expect = "Program([FuncDecl(Id(ham),[VarDecl(a,ArrayTypePointer(IntType)),VarDecl(b,ArrayTypePointer(FloatType))],ArrayTypePointer(IntType),Block([Return()]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,381))
def test_do_while(self):
input = """void fo() {
do{ f(foo(fr(aaa(e(r()))))); } while a>d;
}"""
expect = "Program([FuncDecl(Id(fo),[],VoidType,Block([Dowhile([Block([CallExpr(Id(f),[CallExpr(Id(foo),[CallExpr(Id(fr),[CallExpr(Id(aaa),[CallExpr(Id(e),[CallExpr(Id(r),[])])])])])])])],BinaryOp(>,Id(a),Id(d)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,382))
def test_bool_in_do(self):
input = """int main () {
do{ true;} while d>a;
}"""
expect = "Program([FuncDecl(Id(main),[],IntType,Block([Dowhile([Block([BooleanLiteral(true)])],BinaryOp(>,Id(d),Id(a)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,383))
def test_if_in_do_while(self):
input = """float d () {
do if (a==s) {if (t>a) if (d>=e) if (a<y) if (r<=o) {x+1;}} while foo();
}"""
expect = "Program([FuncDecl(Id(d),[],FloatType,Block([Dowhile([If(BinaryOp(==,Id(a),Id(s)),Block([If(BinaryOp(>,Id(t),Id(a)),If(BinaryOp(>=,Id(d),Id(e)),If(BinaryOp(<,Id(a),Id(y)),If(BinaryOp(<=,Id(r),Id(o)),Block([BinaryOp(+,Id(x),IntLiteral(1))])))))]))],CallExpr(Id(foo),[]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,384))
def test_func_decl_for_if(self):
input = """int foo(int a){
for(i = 0;i!= 100; i=i+1){
if(i%2==0) i=i*2;
else i = i -1;
}
}"""
expect = "Program([FuncDecl(Id(foo),[VarDecl(a,IntType)],IntType,Block([For(BinaryOp(=,Id(i),IntLiteral(0));BinaryOp(!=,Id(i),IntLiteral(100));BinaryOp(=,Id(i),BinaryOp(+,Id(i),IntLiteral(1)));Block([If(BinaryOp(==,BinaryOp(%,Id(i),IntLiteral(2)),IntLiteral(0)),BinaryOp(=,Id(i),BinaryOp(*,Id(i),IntLiteral(2))),BinaryOp(=,Id(i),BinaryOp(-,Id(i),IntLiteral(1))))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,385))
def test_func_decl_if_var(self):
input = """int main() {
if(true) a=10;
}"""
expect = "Program([FuncDecl(Id(main),[],IntType,Block([If(BooleanLiteral(true),BinaryOp(=,Id(a),IntLiteral(10)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,386))
def test_var_many_decl(self):
input = """int a; float b,c,d[3]; boolean e; string s; """
expect = "Program([VarDecl(a,IntType),VarDecl(b,FloatType),VarDecl(c,FloatType),VarDecl(d,ArrayType(FloatType,3)),VarDecl(e,BoolType),VarDecl(s,StringType)])"
self.assertTrue(TestAST.checkASTGen(input,expect,387))
def test_func_println(self):
input = """void b(int a[],int b){
int a;a=1;println(a);
{
int b;b=1;
println(b);
}
}"""
expect = "Program([FuncDecl(Id(b),[VarDecl(a,ArrayTypePointer(IntType)),VarDecl(b,IntType)],VoidType,Block([VarDecl(a,IntType),BinaryOp(=,Id(a),IntLiteral(1)),CallExpr(Id(println),[Id(a)]),Block([VarDecl(b,IntType),BinaryOp(=,Id(b),IntLiteral(1)),CallExpr(Id(println),[Id(b)])])]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,388))
def test_func_empty(self):
input = """void Calculate(){}"""
expect = "Program([FuncDecl(Id(Calculate),[],VoidType,Block([]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,389))
def test_func_decl_foo(self):
input = """void foo(){
boolean b ;
b = true;
if( !b == false)
println(" b is true");
}"""
expect = "Program([FuncDecl(Id(foo),[],VoidType,Block([VarDecl(b,BoolType),BinaryOp(=,Id(b),BooleanLiteral(true)),If(BinaryOp(==,UnaryOp(!,Id(b)),BooleanLiteral(false)),CallExpr(Id(println),[StringLiteral( b is true)]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,390))
def test_func_many_func(self):
input = """
void main(){
int oddSum, evenSum,arr[10],i;
oddSum = evenSum =0;
for(i=0;i<10;i=i+1)
arr[i]=i;
for(i=0;i<10;i=i+1){
if(arr[i]%2==0)
evenSum = evenSum + arr[i];
else
oddSum = oddSum + arr[i];
}
}"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([VarDecl(oddSum,IntType),VarDecl(evenSum,IntType),VarDecl(arr,ArrayType(IntType,10)),VarDecl(i,IntType),BinaryOp(=,Id(oddSum),BinaryOp(=,Id(evenSum),IntLiteral(0))),For(BinaryOp(=,Id(i),IntLiteral(0));BinaryOp(<,Id(i),IntLiteral(10));BinaryOp(=,Id(i),BinaryOp(+,Id(i),IntLiteral(1)));BinaryOp(=,ArrayCell(Id(arr),Id(i)),Id(i))),For(BinaryOp(=,Id(i),IntLiteral(0));BinaryOp(<,Id(i),IntLiteral(10));BinaryOp(=,Id(i),BinaryOp(+,Id(i),IntLiteral(1)));Block([If(BinaryOp(==,BinaryOp(%,ArrayCell(Id(arr),Id(i)),IntLiteral(2)),IntLiteral(0)),BinaryOp(=,Id(evenSum),BinaryOp(+,Id(evenSum),ArrayCell(Id(arr),Id(i)))),BinaryOp(=,Id(oddSum),BinaryOp(+,Id(oddSum),ArrayCell(Id(arr),Id(i)))))]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,391))
def test_func_switch_case(self):
input = """
void main(){
int mark;
}
void result(int mark){
if(mark<5)
println("Trung binh");
else if (5<=mark&&mark<8)
println("Kha");
else
println("Gioi");
}
"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([VarDecl(mark,IntType)])),FuncDecl(Id(result),[VarDecl(mark,IntType)],VoidType,Block([If(BinaryOp(<,Id(mark),IntLiteral(5)),CallExpr(Id(println),[StringLiteral(Trung binh)]),If(BinaryOp(&&,BinaryOp(<=,IntLiteral(5),Id(mark)),BinaryOp(<,Id(mark),IntLiteral(8))),CallExpr(Id(println),[StringLiteral(Kha)]),CallExpr(Id(println),[StringLiteral(Gioi)])))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,392))
def test_func_break_continue(self):
input = """
void main(){
int i;
for(i=0;i<10;i=i+1)
{
println(i);
if(i == 5)
continue;
if(i==9)
break;
}
}
"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([VarDecl(i,IntType),For(BinaryOp(=,Id(i),IntLiteral(0));BinaryOp(<,Id(i),IntLiteral(10));BinaryOp(=,Id(i),BinaryOp(+,Id(i),IntLiteral(1)));Block([CallExpr(Id(println),[Id(i)]),If(BinaryOp(==,Id(i),IntLiteral(5)),Continue()),If(BinaryOp(==,Id(i),IntLiteral(9)),Break())]))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,393))
def test_func_do_while(self):
input = """
void main(){
int i;
i = 0;
do
println(i);
i=i+1;
if(i==9)
break;
while(i<10);
}
"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([VarDecl(i,IntType),BinaryOp(=,Id(i),IntLiteral(0)),Dowhile([CallExpr(Id(println),[Id(i)]),BinaryOp(=,Id(i),BinaryOp(+,Id(i),IntLiteral(1))),If(BinaryOp(==,Id(i),IntLiteral(9)),Break())],BinaryOp(<,Id(i),IntLiteral(10)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,394))
def test_maximum_recursion(self):
input = """
void main() {n = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((n))))))))))))))))))))))))))))))))))))))))))))))))/2))))))))));}
"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(n),BinaryOp(/,Id(n),IntLiteral(2)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,395))
def test_easy_recursion(self):
input = """
void main() {n = ((((((n))/2))));}
"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([BinaryOp(=,Id(n),BinaryOp(/,Id(n),IntLiteral(2)))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,396))
def test_func_recursion(self):
input = """
void main() {print();{print();}{print();}{print();}{print();}{print();}{print();}{print();}{print();}{print();}}
"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([CallExpr(Id(print),[]),Block([CallExpr(Id(print),[])]),Block([CallExpr(Id(print),[])]),Block([CallExpr(Id(print),[])]),Block([CallExpr(Id(print),[])]),Block([CallExpr(Id(print),[])]),Block([CallExpr(Id(print),[])]),Block([CallExpr(Id(print),[])]),Block([CallExpr(Id(print),[])]),Block([CallExpr(Id(print),[])])]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,397))
def test_for_recursion(self):
input = """
void main() {for(1;1;1) for(1;1;1) for(1;1;1) for(1;1;1) for(1;1;1) for(1;1;1) for(1;1;1) for(1;1;1) print();}
"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([For(IntLiteral(1);IntLiteral(1);IntLiteral(1);For(IntLiteral(1);IntLiteral(1);IntLiteral(1);For(IntLiteral(1);IntLiteral(1);IntLiteral(1);For(IntLiteral(1);IntLiteral(1);IntLiteral(1);For(IntLiteral(1);IntLiteral(1);IntLiteral(1);For(IntLiteral(1);IntLiteral(1);IntLiteral(1);For(IntLiteral(1);IntLiteral(1);IntLiteral(1);For(IntLiteral(1);IntLiteral(1);IntLiteral(1);CallExpr(Id(print),[])))))))))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,398))
def test_do_while_recursion(self):
input = """
void main() {do do do do do do print(); while true; while true; while true; while true; while true; while true;}
"""
expect = "Program([FuncDecl(Id(main),[],VoidType,Block([Dowhile([Dowhile([Dowhile([Dowhile([Dowhile([Dowhile([CallExpr(Id(print),[])],BooleanLiteral(true))],BooleanLiteral(true))],BooleanLiteral(true))],BooleanLiteral(true))],BooleanLiteral(true))],BooleanLiteral(true))]))])"
self.assertTrue(TestAST.checkASTGen(input,expect,399)) | 59.126615 | 991 | 0.558976 | 45,699 | 0.99858 | 0 | 0 | 0 | 0 | 0 | 0 | 31,926 | 0.697623 |
259322df87478be7b4ebcda05e307a4d2631a759 | 7,966 | py | Python | lib/reinteract/notebook_window.py | rschroll/reinteract | d74b679fed00c9eb43a1cd167bb6abd9e389da1a | [
"BSD-2-Clause"
] | 1 | 2015-11-05T06:50:03.000Z | 2015-11-05T06:50:03.000Z | lib/reinteract/notebook_window.py | rschroll/reinteract | d74b679fed00c9eb43a1cd167bb6abd9e389da1a | [
"BSD-2-Clause"
] | null | null | null | lib/reinteract/notebook_window.py | rschroll/reinteract | d74b679fed00c9eb43a1cd167bb6abd9e389da1a | [
"BSD-2-Clause"
] | null | null | null | # Copyright 2008-2009 Owen Taylor
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import gtk
import os
from base_notebook_window import BaseNotebookWindow
from file_list import FileList
from format_escaped import format_escaped
from notebook import NotebookFile, WorksheetFile, LibraryFile
from save_file import SaveFileBuilder
gtk.rc_parse_string(
"""
style "notebook-close-button" {
GtkWidget::focus-line-width = 0
GtkWidget::focus-padding = 0
GtkButton::inner-border = { 0, 0, 0, 0 }
}
widget "*.notebook-close-button" style : highest "notebook-close-button"
""")
class NotebookWindow(BaseNotebookWindow):
UI_STRING="""
<ui>
<menubar name="TopMenu">
<menu action="file">
<menuitem action="new-notebook"/>
<menuitem action="open-notebook"/>
<menuitem action="notebook-properties"/>
<separator/>
<menuitem action="new-worksheet"/>
<menuitem action="new-library"/>
<menuitem action="open"/>
<menuitem action="save"/>
<menuitem action="rename"/>
<menuitem action="close"/>
<separator/>
<menuitem action="quit"/>
</menu>
<menu action="edit">
<menuitem action="cut"/>
<menuitem action="copy"/>
<menuitem action="copy-as-doctests"/>
<menuitem action="paste"/>
<menuitem action="delete"/>
<separator/>
<menuitem action="calculate"/>
<menuitem action="calculate-to-line"/>
<menuitem action="break"/>
<separator/>
<menuitem action="calculate-all"/>
<separator/>
<menuitem action="preferences"/>
</menu>
<menu action="help">
<menuitem action="about"/>
</menu>
</menubar>
<toolbar name="ToolBar">
<toolitem action="save"/>
<separator/>
<toolitem action="calculate"/>
<toolitem action="break"/>
</toolbar>
</ui>
"""
def __init__(self, notebook):
BaseNotebookWindow.__init__(self, notebook)
self.window.set_default_size(800, 800)
#######################################################
# Overrides
#######################################################
def _fill_content(self):
hpaned = gtk.HPaned()
position = self.state.get_pane_position()
if position == -1:
hpaned.set_position(200)
else:
hpaned.set_position(position)
hpaned.connect('notify::position', self.on_hpaned_notify_position)
self.main_vbox.pack_start(hpaned, expand=True, fill=True)
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
hpaned.pack1(scrolled_window, resize=False)
self.__file_list = FileList(self.notebook)
scrolled_window.add(self.__file_list)
self.__file_list.connect('open-file', self.on_file_list_open_file)
self.__file_list.connect('close-file', self.on_file_list_close_file)
self.__file_list.connect('rename-file', self.on_file_list_rename_file)
self.__file_list.connect('delete-file', self.on_file_list_delete_file)
hpaned.pack2(self.nb_widget, resize=True)
self.nb_widget.set_scrollable(True)
def _add_editor(self, editor):
# Set first since _add_editor() calls _update_editor_title()
editor._notebook_tab_label = gtk.Label()
editor._notebook_tab_status = gtk.Image()
editor._notebook_tab_status.props.icon_size = gtk.ICON_SIZE_MENU
BaseNotebookWindow._add_editor(self, editor)
label_widget = gtk.HBox(False, 4)
label_widget.pack_start(editor._notebook_tab_status, True, True, 0)
label_widget.pack_start(editor._notebook_tab_label, True, True, 0)
tab_button = gtk.Button()
tab_button.set_name('notebook-close-button')
tab_button.set_relief(gtk.RELIEF_NONE)
tab_button.props.can_focus = False
tab_button.connect('clicked', lambda *args: self.on_tab_close_button_clicked(editor))
label_widget.pack_start(tab_button, False, False, 0)
close = gtk.image_new_from_stock('gtk-close', gtk.ICON_SIZE_MENU)
tab_button.add(close)
label_widget.show_all()
self.nb_widget.set_tab_label(editor.widget, label_widget)
self.nb_widget.set_tab_reorderable(editor.widget, True)
def _update_editor_title(self, editor):
BaseNotebookWindow._update_editor_title(self, editor)
editor._notebook_tab_label.set_text(editor.title)
def _update_editor_state(self, editor):
BaseNotebookWindow._update_editor_state(self, editor)
editor._notebook_tab_status.props.stock = NotebookFile.stock_id_for_state(editor.state)
#######################################################
# Callbacks
#######################################################
def on_tab_close_button_clicked(self, editor):
self._close_editor(editor)
def on_file_list_open_file(self, file_list, file):
self.open_file(file)
def on_file_list_close_file(self, file_list, file):
for editor in self.editors:
if editor.file == file:
self._close_editor(editor)
def on_file_list_rename_file(self, file_list, file):
if file.active:
# If we have the file open, we need to rename via the editor
for editor in self.editors:
if editor.file == file:
editor.rename()
# Reselect the new item in the list
new_file = self.notebook.file_for_absolute_path(editor.filename)
file_list.select_file(new_file)
else:
# Otherwise do it directly
def check_name(name):
return name != "" and name != file.path
def do_rename(new_path):
old_path = os.path.join(self.notebook.folder, file.path)
os.rename(old_path, new_path)
self.notebook.refresh()
# Reselect the new item in the list
new_file = self.notebook.file_for_absolute_path(new_path)
file_list.select_file(new_file)
title = "Rename '%s'" % file.path
builder = SaveFileBuilder(title, file.path, "Rename", check_name)
builder.dialog.set_transient_for(self.window)
builder.name_entry.set_text(file.path)
if isinstance(file, WorksheetFile):
extension = "rws"
elif isinstance(file, LibraryFile):
extension = "py"
else:
extension = ""
builder.prompt_for_name(self.notebook.folder, extension, do_rename)
builder.dialog.destroy()
def on_file_list_delete_file(self, file_list, file):
dialog = gtk.MessageDialog(parent=self.window, buttons=gtk.BUTTONS_NONE,
type=gtk.MESSAGE_WARNING)
message = format_escaped("<big><b>Really delete '%s'?</b></big>", file.path)
dialog.set_markup(message)
dialog.add_buttons(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_DELETE, gtk.RESPONSE_OK)
dialog.set_default_response(gtk.RESPONSE_CANCEL)
response = dialog.run()
dialog.destroy()
if response != gtk.RESPONSE_OK:
return
for editor in self.editors:
if editor.file == file:
self._close_editor(editor)
abspath = os.path.join(self.notebook.folder, file.path)
os.remove(abspath)
self.notebook.refresh()
def on_hpaned_notify_position(self, pane, gparamspec):
self.state.set_pane_position(pane.get_property('position'))
| 36.709677 | 95 | 0.614863 | 7,163 | 0.899197 | 0 | 0 | 0 | 0 | 0 | 0 | 2,455 | 0.308185 |
2594fb6de6dffcce3372d066529fcf8255ec3b49 | 573 | py | Python | regulation/settings.py | cfpb/regulations-xml-parser | e3bcbd9025f6fb6fa9ef2671fb8ed061c8de3e88 | [
"CC0-1.0"
] | 4 | 2016-01-02T21:04:42.000Z | 2019-08-17T06:30:36.000Z | regulation/settings.py | DalavanCloud/regulations-xml-parser | e3bcbd9025f6fb6fa9ef2671fb8ed061c8de3e88 | [
"CC0-1.0"
] | 49 | 2016-01-25T15:19:04.000Z | 2017-12-06T20:02:09.000Z | regulation/settings.py | DalavanCloud/regulations-xml-parser | e3bcbd9025f6fb6fa9ef2671fb8ed061c8de3e88 | [
"CC0-1.0"
] | 9 | 2016-01-21T19:25:30.000Z | 2021-02-20T10:53:47.000Z | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
import importlib
import os
import sys
# Try to load the settings module
try:
local_settings = importlib.import_module(
os.environ.get('REGML_SETTINGS_FILE', 'settings'))
globals().update(local_settings.__dict__)
except ImportError:
logger.error("Unable to import settings module. "
"Please double-check your REGML_SETTINGS_FILE "
"environment variable")
sys.exit(1)
globals().update(local_settings.__dict__)
| 27.285714 | 64 | 0.722513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.331588 |
2596f72f06a517f88b80e5187a646537bcd3ae06 | 16,968 | py | Python | src/ui/ui_send_payout_dlg.py | muteio/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | 1 | 2019-11-02T01:39:52.000Z | 2019-11-02T01:39:52.000Z | src/ui/ui_send_payout_dlg.py | NixPlatform/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | null | null | null | src/ui/ui_send_payout_dlg.py | NixPlatform/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | 1 | 2019-09-21T15:08:36.000Z | 2019-09-21T15:08:36.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_send_payout_dlg.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SendPayoutDlg(object):
def setupUi(self, SendPayoutDlg):
SendPayoutDlg.setObjectName("SendPayoutDlg")
SendPayoutDlg.resize(832, 507)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(SendPayoutDlg.sizePolicy().hasHeightForWidth())
SendPayoutDlg.setSizePolicy(sizePolicy)
SendPayoutDlg.setSizeGripEnabled(True)
SendPayoutDlg.setModal(True)
self.verticalLayout = QtWidgets.QVBoxLayout(SendPayoutDlg)
self.verticalLayout.setObjectName("verticalLayout")
self.pnl_input = QtWidgets.QWidget(SendPayoutDlg)
self.pnl_input.setObjectName("pnl_input")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.pnl_input)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.lay_input = QtWidgets.QHBoxLayout()
self.lay_input.setSpacing(8)
self.lay_input.setObjectName("lay_input")
self.label_3 = QtWidgets.QLabel(self.pnl_input)
self.label_3.setObjectName("label_3")
self.lay_input.addWidget(self.label_3)
self.cbo_address_source_mode = QtWidgets.QComboBox(self.pnl_input)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbo_address_source_mode.sizePolicy().hasHeightForWidth())
self.cbo_address_source_mode.setSizePolicy(sizePolicy)
self.cbo_address_source_mode.setMinimumSize(QtCore.QSize(0, 0))
self.cbo_address_source_mode.setMaximumSize(QtCore.QSize(160, 16777215))
self.cbo_address_source_mode.setObjectName("cbo_address_source_mode")
self.cbo_address_source_mode.addItem("")
self.cbo_address_source_mode.addItem("")
self.cbo_address_source_mode.addItem("")
self.lay_input.addWidget(self.cbo_address_source_mode)
self.sw_address_source = QtWidgets.QStackedWidget(self.pnl_input)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sw_address_source.sizePolicy().hasHeightForWidth())
self.sw_address_source.setSizePolicy(sizePolicy)
self.sw_address_source.setObjectName("sw_address_source")
self.wdg_address_source_1 = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.wdg_address_source_1.sizePolicy().hasHeightForWidth())
self.wdg_address_source_1.setSizePolicy(sizePolicy)
self.wdg_address_source_1.setObjectName("wdg_address_source_1")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.wdg_address_source_1)
self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_6.setSpacing(1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.lbl_account = QtWidgets.QLabel(self.wdg_address_source_1)
self.lbl_account.setObjectName("lbl_account")
self.horizontalLayout_6.addWidget(self.lbl_account)
self.cbo_hw_account_nr = QtWidgets.QComboBox(self.wdg_address_source_1)
self.cbo_hw_account_nr.setObjectName("cbo_hw_account_nr")
self.horizontalLayout_6.addWidget(self.cbo_hw_account_nr)
self.btn_add_hw_account_nr = QtWidgets.QToolButton(self.wdg_address_source_1)
self.btn_add_hw_account_nr.setObjectName("btn_add_hw_account_nr")
self.horizontalLayout_6.addWidget(self.btn_add_hw_account_nr)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem)
self.lbl_hw_account_base_path = QtWidgets.QLabel(self.wdg_address_source_1)
self.lbl_hw_account_base_path.setObjectName("lbl_hw_account_base_path")
self.horizontalLayout_6.addWidget(self.lbl_hw_account_base_path)
self.sw_address_source.addWidget(self.wdg_address_source_1)
self.wdg_address_source_2 = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.wdg_address_source_2.sizePolicy().hasHeightForWidth())
self.wdg_address_source_2.setSizePolicy(sizePolicy)
self.wdg_address_source_2.setObjectName("wdg_address_source_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.wdg_address_source_2)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lblSourceBip32Path = QtWidgets.QLabel(self.wdg_address_source_2)
self.lblSourceBip32Path.setObjectName("lblSourceBip32Path")
self.horizontalLayout_2.addWidget(self.lblSourceBip32Path)
self.edt_src_bip32_path = QtWidgets.QLineEdit(self.wdg_address_source_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.edt_src_bip32_path.sizePolicy().hasHeightForWidth())
self.edt_src_bip32_path.setSizePolicy(sizePolicy)
self.edt_src_bip32_path.setMaximumSize(QtCore.QSize(100, 16777215))
self.edt_src_bip32_path.setStyleSheet("background-color: lightgray;")
self.edt_src_bip32_path.setReadOnly(True)
self.edt_src_bip32_path.setObjectName("edt_src_bip32_path")
self.horizontalLayout_2.addWidget(self.edt_src_bip32_path)
self.btn_src_bip32_path = QtWidgets.QToolButton(self.wdg_address_source_2)
self.btn_src_bip32_path.setObjectName("btn_src_bip32_path")
self.horizontalLayout_2.addWidget(self.btn_src_bip32_path)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.sw_address_source.addWidget(self.wdg_address_source_2)
self.wdg_address_source_3 = QtWidgets.QWidget()
self.wdg_address_source_3.setObjectName("wdg_address_source_3")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.wdg_address_source_3)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.lbl_src_masternode = QtWidgets.QLabel(self.wdg_address_source_3)
self.lbl_src_masternode.setObjectName("lbl_src_masternode")
self.horizontalLayout.addWidget(self.lbl_src_masternode)
self.cbo_src_masternodes = QtWidgets.QComboBox(self.wdg_address_source_3)
self.cbo_src_masternodes.setObjectName("cbo_src_masternodes")
self.horizontalLayout.addWidget(self.cbo_src_masternodes)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.sw_address_source.addWidget(self.wdg_address_source_3)
self.lay_input.addWidget(self.sw_address_source)
self.btnLoadTransactions = QtWidgets.QPushButton(self.pnl_input)
self.btnLoadTransactions.setAutoDefault(False)
self.btnLoadTransactions.setObjectName("btnLoadTransactions")
self.lay_input.addWidget(self.btnLoadTransactions)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.lay_input.addItem(spacerItem3)
self.verticalLayout_4.addLayout(self.lay_input)
self.verticalLayout.addWidget(self.pnl_input)
self.splitter = QtWidgets.QSplitter(SendPayoutDlg)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.main_widget = QtWidgets.QWidget(self.splitter)
self.main_widget.setObjectName("main_widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.main_widget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.lbl_message_2 = QtWidgets.QLabel(self.main_widget)
self.lbl_message_2.setText("")
self.lbl_message_2.setOpenExternalLinks(True)
self.lbl_message_2.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByMouse)
self.lbl_message_2.setObjectName("lbl_message_2")
self.verticalLayout_2.addWidget(self.lbl_message_2)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(-1, 8, -1, -1)
self.horizontalLayout_4.setSpacing(6)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.btnCheckAll = QtWidgets.QToolButton(self.main_widget)
self.btnCheckAll.setToolTip("")
self.btnCheckAll.setIconSize(QtCore.QSize(12, 12))
self.btnCheckAll.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.btnCheckAll.setObjectName("btnCheckAll")
self.horizontalLayout_4.addWidget(self.btnCheckAll)
self.btnUncheckAll = QtWidgets.QToolButton(self.main_widget)
self.btnUncheckAll.setToolTip("")
self.btnUncheckAll.setIconSize(QtCore.QSize(12, 12))
self.btnUncheckAll.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.btnUncheckAll.setObjectName("btnUncheckAll")
self.horizontalLayout_4.addWidget(self.btnUncheckAll)
self.chbHideCollateralTx = QtWidgets.QCheckBox(self.main_widget)
self.chbHideCollateralTx.setStyleSheet("")
self.chbHideCollateralTx.setObjectName("chbHideCollateralTx")
self.horizontalLayout_4.addWidget(self.chbHideCollateralTx)
self.lbl_message = QtWidgets.QLabel(self.main_widget)
self.lbl_message.setStyleSheet("margin-left:20px;\n"
"font-size:11px;\n"
"background-color: rgb(56, 181, 255);\n"
"color: rgb(255, 255, 255);")
self.lbl_message.setWordWrap(False)
self.lbl_message.setOpenExternalLinks(True)
self.lbl_message.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByMouse)
self.lbl_message.setObjectName("lbl_message")
self.horizontalLayout_4.addWidget(self.lbl_message)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem4)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.tableView = QtWidgets.QTableView(self.main_widget)
self.tableView.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContentsOnFirstShow)
self.tableView.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableView.setShowGrid(True)
self.tableView.setSortingEnabled(False)
self.tableView.setObjectName("tableView")
self.tableView.verticalHeader().setVisible(False)
self.tableView.verticalHeader().setCascadingSectionResizes(True)
self.tableView.verticalHeader().setHighlightSections(False)
self.verticalLayout_2.addWidget(self.tableView)
self.dest_widget1 = QtWidgets.QWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dest_widget1.sizePolicy().hasHeightForWidth())
self.dest_widget1.setSizePolicy(sizePolicy)
self.dest_widget1.setObjectName("dest_widget1")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.dest_widget1)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.dest_widget = QtWidgets.QFrame(self.dest_widget1)
self.dest_widget.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.dest_widget.setObjectName("dest_widget")
self.verticalLayout_3.addWidget(self.dest_widget)
self.verticalLayout.addWidget(self.splitter)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem5)
self.btnSend = QtWidgets.QPushButton(SendPayoutDlg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnSend.sizePolicy().hasHeightForWidth())
self.btnSend.setSizePolicy(sizePolicy)
self.btnSend.setMinimumSize(QtCore.QSize(200, 0))
self.btnSend.setMaximumSize(QtCore.QSize(200, 16777215))
self.btnSend.setAutoDefault(False)
self.btnSend.setObjectName("btnSend")
self.horizontalLayout_3.addWidget(self.btnSend)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem6)
self.btnClose = QtWidgets.QPushButton(SendPayoutDlg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnClose.sizePolicy().hasHeightForWidth())
self.btnClose.setSizePolicy(sizePolicy)
self.btnClose.setMinimumSize(QtCore.QSize(0, 0))
self.btnClose.setLayoutDirection(QtCore.Qt.LeftToRight)
self.btnClose.setAutoDefault(False)
self.btnClose.setObjectName("btnClose")
self.horizontalLayout_3.addWidget(self.btnClose, 0, QtCore.Qt.AlignRight)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.retranslateUi(SendPayoutDlg)
self.sw_address_source.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(SendPayoutDlg)
def retranslateUi(self, SendPayoutDlg):
_translate = QtCore.QCoreApplication.translate
SendPayoutDlg.setWindowTitle(_translate("SendPayoutDlg", "Dialog"))
self.label_3.setText(_translate("SendPayoutDlg", "View as"))
self.cbo_address_source_mode.setItemText(0, _translate("SendPayoutDlg", "Wallet Account"))
self.cbo_address_source_mode.setItemText(1, _translate("SendPayoutDlg", "BIP32 Path"))
self.cbo_address_source_mode.setItemText(2, _translate("SendPayoutDlg", "Ghostnode Address"))
self.lbl_account.setText(_translate("SendPayoutDlg", "Account "))
self.btn_add_hw_account_nr.setToolTip(_translate("SendPayoutDlg", "Add new account number"))
self.btn_add_hw_account_nr.setText(_translate("SendPayoutDlg", "."))
self.lbl_hw_account_base_path.setText(_translate("SendPayoutDlg", "..."))
self.lblSourceBip32Path.setText(_translate("SendPayoutDlg", "BIP32 path"))
self.btn_src_bip32_path.setToolTip(_translate("SendPayoutDlg", "Change BIP32 path"))
self.btn_src_bip32_path.setText(_translate("SendPayoutDlg", "..."))
self.lbl_src_masternode.setText(_translate("SendPayoutDlg", "Ghostnode"))
self.btnLoadTransactions.setText(_translate("SendPayoutDlg", "Reload"))
self.btnCheckAll.setText(_translate("SendPayoutDlg", "Select All"))
self.btnUncheckAll.setText(_translate("SendPayoutDlg", "Unselect All"))
self.chbHideCollateralTx.setText(_translate("SendPayoutDlg", "Hide collateral utxos"))
self.lbl_message.setText(_translate("SendPayoutDlg", "...."))
self.btnSend.setText(_translate("SendPayoutDlg", "Prepare Transaction"))
self.btnClose.setText(_translate("SendPayoutDlg", "Close"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
SendPayoutDlg = QtWidgets.QDialog()
ui = Ui_SendPayoutDlg()
ui.setupUi(SendPayoutDlg)
SendPayoutDlg.show()
sys.exit(app.exec_())
| 61.701818 | 116 | 0.753713 | 16,480 | 0.97124 | 0 | 0 | 0 | 0 | 0 | 0 | 1,589 | 0.093647 |
25976939b96e834c2bfbe96de1759d0543a5294c | 254 | py | Python | server/src/models/embedded_models/method_step_embedded_model.py | minhman727/miao-nutrition-assistant | c0f999756ae1a03371975be03ec7b7470f4b39e2 | [
"MIT"
] | null | null | null | server/src/models/embedded_models/method_step_embedded_model.py | minhman727/miao-nutrition-assistant | c0f999756ae1a03371975be03ec7b7470f4b39e2 | [
"MIT"
] | null | null | null | server/src/models/embedded_models/method_step_embedded_model.py | minhman727/miao-nutrition-assistant | c0f999756ae1a03371975be03ec7b7470f4b39e2 | [
"MIT"
] | null | null | null | from mongoengine import *
class MethodStep(EmbeddedDocument):
description = StringField(require=True)
images = ListField(ImageField(db_alias="miao"), null=True)
meta = {
"db_alias": "miao",
"collection": "method_steps"
} | 25.4 | 62 | 0.661417 | 227 | 0.893701 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.188976 |
25978d1f7d69811ce75ba04968a1787f942af8b3 | 1,579 | py | Python | silot/experiments/moving_mnist/silot_run.py | e2crawfo/silot | d49a41dde74db62d62bdd9ba5d35ff54c07fd9bc | [
"MIT"
] | 16 | 2019-11-27T18:25:55.000Z | 2021-05-28T06:15:13.000Z | silot/experiments/moving_mnist/silot_run.py | e2crawfo/silot | d49a41dde74db62d62bdd9ba5d35ff54c07fd9bc | [
"MIT"
] | 5 | 2020-08-03T16:17:53.000Z | 2022-02-09T23:38:55.000Z | silot/experiments/moving_mnist/silot_run.py | e2crawfo/silot | d49a41dde74db62d62bdd9ba5d35ff54c07fd9bc | [
"MIT"
] | 1 | 2020-11-20T10:37:57.000Z | 2020-11-20T10:37:57.000Z | from dps.hyper import run_experiment
from dps.utils import copy_update
from dps.tf.updater import DummyUpdater
from silot.run import basic_config, alg_configs, env_configs
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--max-digits', type=int, choices=[6, 12], required=True)
args, _ = parser.parse_known_args()
readme = "Running SILOT experiment on moving_mnist."
run_kwargs = dict(
max_hosts=1, ppn=6, cpp=2, gpu_set="0,1", pmem=10000, project="rpp-bengioy",
wall_time="96hours", cleanup_time="5mins", slack_time="5mins", n_repeats=6,
copy_locally=True, config=dict(render_step=1000000)
)
durations = dict(
long=copy_update(run_kwargs),
short=dict(
wall_time="180mins", gpu_set="0", ppn=4, n_repeats=4, distributions=None,
config=dict(max_steps=3000, render_step=500, eval_step=100, display_step=100, stage_steps=600, curriculum=[dict()]),
),
build=dict(
ppn=1, cpp=1, gpu_set="0", wall_time="180mins", n_repeats=1, distributions=None,
config=dict(
do_train=False, get_updater=DummyUpdater, render_hook=None,
curriculum=[dict()] + [dict(max_digits=i, n_train=100, n_val=1000) for i in range(1, 13)]
)
),
)
config = basic_config.copy()
config.update(env_configs['moving_mnist'])
config.update(alg_configs['silot'], max_digits=args.max_digits)
config.update(final_count_prior_log_odds=0.0125, stage_steps=40000)
run_experiment(
"moving_mnist_silot",
config, "silot on moving_mnist.",
name_variables="max_digits",
durations=durations
)
| 34.326087 | 124 | 0.718809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.126029 |
2597942237584a092a777b8ebc52564660ff2499 | 251 | py | Python | evo_mwc/__init__.py | mrazomej/evo_mwc | b69c800c5518d906cd2c65334c6feffdbab5acf1 | [
"MIT"
] | null | null | null | evo_mwc/__init__.py | mrazomej/evo_mwc | b69c800c5518d906cd2c65334c6feffdbab5acf1 | [
"MIT"
] | 2 | 2020-06-01T22:36:08.000Z | 2020-07-01T23:32:06.000Z | evo_mwc/__init__.py | mrazomej/evo_mwc | b69c800c5518d906cd2c65334c6feffdbab5acf1 | [
"MIT"
] | 1 | 2019-07-09T21:18:52.000Z | 2019-07-09T21:18:52.000Z | # -*- coding: utf-8 -*-
"""Top level package for evo_utils utilities"""
from . import viz
from . import fitderiv
from . import model
__author__ = """Manuel Razo"""
__email__ = """mrazomej {at} caltech.edu"""
__version__ = '0.0.1'
name = 'evo_mwc'
| 17.928571 | 47 | 0.661355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.533865 |
2597d104fd536046cfa0f78ca2aa1bd236dccb01 | 112 | py | Python | arclet/alconna/__main__.py | SocialSisterYi/Alconna | 3e1d986ca5486dfd3c7bd80118a75364ab6831b8 | [
"MIT"
] | null | null | null | arclet/alconna/__main__.py | SocialSisterYi/Alconna | 3e1d986ca5486dfd3c7bd80118a75364ab6831b8 | [
"MIT"
] | null | null | null | arclet/alconna/__main__.py | SocialSisterYi/Alconna | 3e1d986ca5486dfd3c7bd80118a75364ab6831b8 | [
"MIT"
] | null | null | null | """Alconna 命令行入口"""
from arclet.alconna.builtin.commandline import main
if __name__ == "__main__":
main()
| 16 | 51 | 0.705357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.319672 |
2597ff84931dd28d221994ec4d847762df2128d6 | 2,427 | py | Python | profiles/viewsets.py | Hacksbr/resume | 2568bb1f4a75af9d72ed6f3013c9112b93001b43 | [
"MIT"
] | null | null | null | profiles/viewsets.py | Hacksbr/resume | 2568bb1f4a75af9d72ed6f3013c9112b93001b43 | [
"MIT"
] | 11 | 2020-10-25T04:11:08.000Z | 2021-10-03T05:38:11.000Z | profiles/viewsets.py | Hacksbr/resume-api | 2568bb1f4a75af9d72ed6f3013c9112b93001b43 | [
"MIT"
] | 1 | 2020-10-30T16:55:08.000Z | 2020-10-30T16:55:08.000Z | from django.contrib.auth import get_user_model
from rest_framework import viewsets, status, permissions
from rest_framework.response import Response
from profiles.models import Profile
from profiles.permissions import IsUserProfileOrAdmin
from profiles import serializers
User = get_user_model()
class ProfileViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all()
lookup_field = 'uuid'
permission_classes = []
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance = self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
instance_serializer = serializers.ProfileSerializer(instance)
return Response(instance_serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
return serializer.save()
def update(self, request, *args, **kwargs):
if not request.data.get('user'):
return Response(dict(error='Attribute \'user\' is missing.'), status=status.HTTP_400_BAD_REQUEST)
if not request.data.get('social_link'):
return Response(dict(error='Attribute \'social_link\' is missing.'), status=status.HTTP_400_BAD_REQUEST)
return super().update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
user = instance.user
social_link = instance.social_link
social_link.delete()
self.perform_destroy(instance)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def get_serializer_class(self):
if self.action == 'create':
return serializers.ProfileCreateSerializer
if self.action in ['update', 'partial_update']:
return serializers.ProfileUpdateSerializer
return serializers.ProfileSerializer
def get_permissions(self):
if self.action in ['list']:
self.permission_classes = (
permissions.IsAuthenticated,
permissions.IsAdminUser
)
if self.action in ['update', 'partial_update', 'destroy']:
self.permission_classes = (
permissions.IsAuthenticated,
IsUserProfileOrAdmin
)
return super().get_permissions()
| 32.36 | 116 | 0.677379 | 2,125 | 0.875567 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.068809 |
259eedab428d052f5de5cef2f33e8a5144b57d54 | 1,180 | py | Python | setup.py | transientlunatic/gravitic | 3f818b5b52dafd8db0cef8f7da930996c84125be | [
"BSD-3-Clause"
] | 2 | 2021-04-12T10:38:58.000Z | 2021-04-12T13:53:16.000Z | setup.py | transientlunatic/gravitic | 3f818b5b52dafd8db0cef8f7da930996c84125be | [
"BSD-3-Clause"
] | null | null | null | setup.py | transientlunatic/gravitic | 3f818b5b52dafd8db0cef8f7da930996c84125be | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
# with open('README.rst') as readme_file:
# readme = readme_file.read()
# with open('HISTORY.rst') as history_file:
# history = history_file.read()
with open("requirements.txt") as requires_file:
requirements = requires_file.read().split("\n")
requirements = [requirement for requirement in requirements if not ("+" in requirement)]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='gravitic',
use_scm_version=True,
setup_requires=['setuptools_scm'],
description="""An abstract gravitational wave pipeline constructor.""",
#long_description=readme + '\n\n' + history,
author="Daniel Williams",
author_email='daniel.williams@ligo.org',
url='https://github.com/transientlunatic/gravitic',
packages=['gravitic'],
package_dir={'gravitic': 'gravitic'},
entry_points={
'console_scripts': [
'gravitic=gravitic.cli:gravitic'
]
},
include_package_data=True,
# install_requires=requirements,
zip_safe=True,
# keywords='supervisor, pe, ligo, asimov',
test_suite='tests',
tests_require=test_requirements,
)
| 28.780488 | 88 | 0.683051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.505085 |
259fbbdca0af61061f255ad37e3048c4085965f8 | 307 | py | Python | src/briefcase/apps/spreadsheet/events.py | Briefcase/Briefcase | 34403c69c19cee1e682293a2c3c3f17c631b9246 | [
"BSD-2-Clause"
] | 2 | 2017-10-19T15:39:31.000Z | 2022-02-09T02:59:27.000Z | src/briefcase/apps/spreadsheet/events.py | Briefcase/Briefcase | 34403c69c19cee1e682293a2c3c3f17c631b9246 | [
"BSD-2-Clause"
] | 2 | 2021-06-16T02:08:42.000Z | 2021-12-06T07:43:32.000Z | src/briefcase/apps/spreadsheet/events.py | Briefcase/Briefcase | 34403c69c19cee1e682293a2c3c3f17c631b9246 | [
"BSD-2-Clause"
] | 2 | 2016-05-25T07:28:13.000Z | 2021-04-02T03:55:08.000Z | def onMessage(request, message, socket):
print "ON MESSAGE! :" + message
socket.sendToMe("Hello Back!")
pass
def onConnect(requestData):
#socket.sendToAll("Test All")
#socket.sendToAllButMe("Test Not Me")
#socket.sendToMe("Test Me")
return True
def onDisconnect():
pass | 23.615385 | 41 | 0.661238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.394137 |
25a0391cf57456757d52425e8939034685150c52 | 6,278 | py | Python | hal/vanille_hal.py | ThotAlion/vanille | b5b43fce39110b44f11f128e123611c988ac7bfc | [
"MIT"
] | 1 | 2021-04-03T09:36:40.000Z | 2021-04-03T09:36:40.000Z | hal/vanille_hal.py | ThotAlion/vanille_hal | b5b43fce39110b44f11f128e123611c988ac7bfc | [
"MIT"
] | null | null | null | hal/vanille_hal.py | ThotAlion/vanille_hal | b5b43fce39110b44f11f128e123611c988ac7bfc | [
"MIT"
] | null | null | null | import rclpy
import json,numpy
from numpy import clip
from rclpy.node import Node
from std_msgs.msg import Float64MultiArray
from sensor_msgs.msg import JointState
from diagnostic_msgs.msg import DiagnosticStatus, KeyValue
import can
from tinymovr import Tinymovr
from tinymovr.iface.can import CAN
from tinymovr.units import get_registry
from math import pi
ureg = get_registry()
amps = ureg.ampere
s = ureg.second
minute = ureg.minute
tick = ureg.tick
rad = ureg.radian
turn = ureg.turn
deg = ureg.degree
class HardwareAbstractionLayer(Node):
def __init__(self):
super().__init__('HardwareAbstractionLayer')
# Lecture du fichier de configuration des moteurs
f = open("/home/vanille/ros2_ws/src/hal/config.json","r")
self.config = json.load(f)
f.close()
self.can_bus = can.Bus(bustype='slcan',channel='/dev/ttyACM0',bitrate=1000000)
self.iface = CAN(self.can_bus)
for kmotor,motor in self.config['motors'].items():
if "id_can" in motor :
motor["tm"]=Tinymovr(node_id=int(motor["id_can"]), iface=self.iface)
assert(motor["tm"].motor_config.flags == 1)
motor["offset"] = motor["tm"].encoder_estimates.position
self.declare_parameter(kmotor+"_max_speed",motor["max_speed"])
self.declare_parameter(kmotor+"_max_current",motor["max_current"])
motor["tm"].set_limits(motor["max_speed"]*turn/minute,motor["max_current"]*amps)
self.declare_parameter(kmotor+"_gain_integrator",motor["gain_integrator"])
motor["tm"].set_integrator_gains(motor["gain_integrator"])
self.publisherJoint_ = self.create_publisher(JointState, '/vanille/joint_states', 1)
self.publisherDiag_ = self.create_publisher(DiagnosticStatus, 'diagnostic',1)
self.subscription = self.create_subscription(
JointState,
'/vanille/joint_position_cmd',
self.update_position_cmd,
1)
timer_period = 0.01 # seconds
timer_period_diag = 2 # seconds
self.timer = self.create_timer(timer_period, self.routine)
self.timerDiag = self.create_timer(timer_period_diag, self.updateDiagnostic)
def update_position_cmd(self, msg : JointState):
for imotor in range(len(msg.name)):
kmotor = msg.name[imotor]
if kmotor in self.config['motors']:
motor = self.config['motors'][kmotor]
position_target = msg.position[imotor]*rad
if numpy.isnan(position_target) :
motor["tm"].current_control()
motor["tm"].set_cur_setpoint(0.0*amps)
else:
position_target = clip(position_target,motor["limit_lower"]*deg, motor["limit_upper"]*deg)
if motor["orientation"] == "direct":
motor["tm"].position_control()
# motor["tm"].set_pos_setpoint(motor["offset"]+position_target*float(motor["ratio"]))
motor["tm"].set_pos_setpoint(motor["offset"]+position_target*motor["ratio"])
elif motor["orientation"] == "indirect":
motor["tm"].position_control()
# motor["tm"].set_pos_setpoint(motor["offset"]-position_target*float(motor["ratio"]))
motor["tm"].set_pos_setpoint(motor["offset"]-position_target*motor["ratio"])
def read_positions(self):
msg = JointState()
msg.header.stamp = super().get_clock().now().to_msg()
msg.name = []
msg.position = []
msg.velocity = []
msg.effort = []
for kmotor,motor in self.config['motors'].items():
msg.name.append(motor["joint_name"])
if motor["orientation"] == "direct":
msg.position.append(float((motor["tm"].encoder_estimates.position-motor["offset"])/float(motor["ratio"])))
msg.velocity.append(motor["tm"].encoder_estimates.velocity.to(rad/s).m/float(motor["ratio"]))
msg.effort.append(motor["tm"].Iq.estimate.m*float(motor["ratio"]))
elif motor["orientation"] == "indirect":
msg.position.append(float(-(motor["tm"].encoder_estimates.position-motor["offset"])/float(motor["ratio"])))
msg.velocity.append(-motor["tm"].encoder_estimates.velocity.to(rad/s).m/float(motor["ratio"]))
msg.effort.append(-motor["tm"].Iq.estimate.m*float(motor["ratio"]))
self.publisherJoint_.publish(msg)
def updateDiagnostic(self):
# tmx.device_info = {"device_id": 99999, "fw_major": 0, "fw_minor": 7, "fw_patch": 1, "temp": 45}
# tmx.motor_config = {"flags": 1, "R": 200, "pole_pairs": 11, "L": 100}
msg = DiagnosticStatus()
msg1 = KeyValue()
for kmotor,motor in self.config['motors'].items():
msg.values= []
msg.hardware_id = kmotor
msg.name = kmotor
msg.message = "device_info motor_config"
for kinfo,info in motor["tm"].device_info.items():
msg1 = KeyValue()
msg1.key=kinfo
msg1.value=str(info)
msg.values.append(msg1)
for kinfo,info in motor["tm"].motor_config.items():
msg1 = KeyValue()
msg1.key=kinfo
msg1.value=str(info)
msg.values.append(msg1)
self.publisherDiag_.publish(msg)
def routine(self):
self.read_positions()
def stop(self):
self.get_logger().info(f'Stopping HAL Node')
for kmotor,motor in self.config['motors'].items():
motor["tm"].idle()
def main(args=None):
print('Hi from hal.')
rclpy.init(args=args)
hal_node = HardwareAbstractionLayer()
try:
rclpy.spin(hal_node)
except KeyboardInterrupt:
pass
hal_node.stop()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
hal_node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | 41.03268 | 123 | 0.600988 | 5,309 | 0.845651 | 0 | 0 | 0 | 0 | 0 | 0 | 1,260 | 0.200701 |
25a13c41bb4dc3ac0459cd775b2eec9b971b28e3 | 383 | py | Python | pretrained-model/speaker-embedding/vggvox/constants.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | null | null | null | pretrained-model/speaker-embedding/vggvox/constants.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | null | null | null | pretrained-model/speaker-embedding/vggvox/constants.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | 1 | 2021-08-19T02:34:41.000Z | 2021-08-19T02:34:41.000Z | # Signal processing
SAMPLE_RATE = 16000
PREEMPHASIS_ALPHA = 0.97
FRAME_LEN = 0.025
FRAME_STEP = 0.01
NUM_FFT = 512
BUCKET_STEP = 1
MAX_SEC = 10
# Model
WEIGHTS_FILE = "data/model/weights.h5"
COST_METRIC = "cosine" # euclidean or cosine
INPUT_SHAPE=(NUM_FFT,None,1)
# IO
ENROLL_LIST_FILE = "cfg/enroll_list.csv"
TEST_LIST_FILE = "cfg/test_list.csv"
RESULT_FILE = "res/results.csv"
| 20.157895 | 45 | 0.754569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.362924 |
25a3cd2e397d3a4fa1405f0e90b2284ec1f6c787 | 6,351 | py | Python | pyramid/tests/test_scripts/test_proutes.py | danielpronych/pyramid-doxygen | ad95a8c151c2c4e029e03aed2feda2993380f36f | [
"BSD-2-Clause"
] | null | null | null | pyramid/tests/test_scripts/test_proutes.py | danielpronych/pyramid-doxygen | ad95a8c151c2c4e029e03aed2feda2993380f36f | [
"BSD-2-Clause"
] | null | null | null | pyramid/tests/test_scripts/test_proutes.py | danielpronych/pyramid-doxygen | ad95a8c151c2c4e029e03aed2feda2993380f36f | [
"BSD-2-Clause"
] | null | null | null | import unittest
from pyramid.tests.test_scripts import dummy
class TestPRoutesCommand(unittest.TestCase):
def _getTargetClass(self):
from pyramid.scripts.proutes import PRoutesCommand
return PRoutesCommand
def _makeOne(self):
cmd = self._getTargetClass()([])
cmd.bootstrap = (dummy.DummyBootstrap(),)
cmd.args = ('/foo/bar/myapp.ini#myapp',)
return cmd
def test_good_args(self):
cmd = self._getTargetClass()([])
cmd.bootstrap = (dummy.DummyBootstrap(),)
cmd.args = ('/foo/bar/myapp.ini#myapp', 'a=1')
route = dummy.DummyRoute('a', '/a')
mapper = dummy.DummyMapper(route)
cmd._get_mapper = lambda *arg: mapper
L = []
cmd.out = lambda msg: L.append(msg)
cmd.run()
self.assertTrue('<unknown>' in ''.join(L))
def test_bad_args(self):
cmd = self._getTargetClass()([])
cmd.bootstrap = (dummy.DummyBootstrap(),)
cmd.args = ('/foo/bar/myapp.ini#myapp', 'a')
route = dummy.DummyRoute('a', '/a')
mapper = dummy.DummyMapper(route)
cmd._get_mapper = lambda *arg: mapper
self.assertRaises(ValueError, cmd.run)
def test_no_routes(self):
command = self._makeOne()
mapper = dummy.DummyMapper()
command._get_mapper = lambda *arg: mapper
L = []
command.out = L.append
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L, [])
def test_no_mapper(self):
command = self._makeOne()
command._get_mapper = lambda *arg:None
L = []
command.out = L.append
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L, [])
def test_single_route_no_route_registered(self):
command = self._makeOne()
route = dummy.DummyRoute('a', '/a')
mapper = dummy.DummyMapper(route)
command._get_mapper = lambda *arg: mapper
L = []
command.out = L.append
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(len(L), 3)
self.assertEqual(L[-1].split(), ['a', '/a', '<unknown>'])
def test_route_with_no_slash_prefix(self):
command = self._makeOne()
route = dummy.DummyRoute('a', 'a')
mapper = dummy.DummyMapper(route)
command._get_mapper = lambda *arg: mapper
L = []
command.out = L.append
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(len(L), 3)
self.assertEqual(L[-1].split(), ['a', '/a', '<unknown>'])
def test_single_route_no_views_registered(self):
from zope.interface import Interface
from pyramid.registry import Registry
from pyramid.interfaces import IRouteRequest
registry = Registry()
def view():pass
class IMyRoute(Interface):
pass
registry.registerUtility(IMyRoute, IRouteRequest, name='a')
command = self._makeOne()
route = dummy.DummyRoute('a', '/a')
mapper = dummy.DummyMapper(route)
command._get_mapper = lambda *arg: mapper
L = []
command.out = L.append
command.bootstrap = (dummy.DummyBootstrap(registry=registry),)
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(len(L), 3)
self.assertEqual(L[-1].split()[:3], ['a', '/a', 'None'])
def test_single_route_one_view_registered(self):
from zope.interface import Interface
from pyramid.registry import Registry
from pyramid.interfaces import IRouteRequest
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IView
registry = Registry()
def view():pass
class IMyRoute(Interface):
pass
registry.registerAdapter(view,
(IViewClassifier, IMyRoute, Interface),
IView, '')
registry.registerUtility(IMyRoute, IRouteRequest, name='a')
command = self._makeOne()
route = dummy.DummyRoute('a', '/a')
mapper = dummy.DummyMapper(route)
command._get_mapper = lambda *arg: mapper
L = []
command.out = L.append
command.bootstrap = (dummy.DummyBootstrap(registry=registry),)
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(len(L), 3)
compare_to = L[-1].split()[:3]
self.assertEqual(compare_to, ['a', '/a', '<function'])
def test_single_route_one_view_registered_with_factory(self):
from zope.interface import Interface
from pyramid.registry import Registry
from pyramid.interfaces import IRouteRequest
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IView
registry = Registry()
def view():pass
class IMyRoot(Interface):
pass
class IMyRoute(Interface):
pass
registry.registerAdapter(view,
(IViewClassifier, IMyRoute, IMyRoot),
IView, '')
registry.registerUtility(IMyRoute, IRouteRequest, name='a')
command = self._makeOne()
def factory(request): pass
route = dummy.DummyRoute('a', '/a', factory=factory)
mapper = dummy.DummyMapper(route)
command._get_mapper = lambda *arg: mapper
L = []
command.out = L.append
command.bootstrap = (dummy.DummyBootstrap(registry=registry),)
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(len(L), 3)
self.assertEqual(L[-1].split()[:3], ['a', '/a', '<unknown>'])
def test__get_mapper(self):
from pyramid.registry import Registry
from pyramid.urldispatch import RoutesMapper
command = self._makeOne()
registry = Registry()
result = command._get_mapper(registry)
self.assertEqual(result.__class__, RoutesMapper)
class Test_main(unittest.TestCase):
def _callFUT(self, argv):
from pyramid.scripts.proutes import main
return main(argv, quiet=True)
def test_it(self):
result = self._callFUT(['proutes'])
self.assertEqual(result, 2)
| 36.5 | 72 | 0.600063 | 6,277 | 0.988348 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.039994 |
25a4112bd4c2cf802cc07a430e00d86257820ce8 | 2,120 | py | Python | pystatreduce/doc/plot/plot_quadratic-distribution_overlay.py | OptimalDesignLab/pyStatReduce | 9ea128409b91dd582e574e2e1cc153572b6c60a4 | [
"MIT"
] | null | null | null | pystatreduce/doc/plot/plot_quadratic-distribution_overlay.py | OptimalDesignLab/pyStatReduce | 9ea128409b91dd582e574e2e1cc153572b6c60a4 | [
"MIT"
] | null | null | null | pystatreduce/doc/plot/plot_quadratic-distribution_overlay.py | OptimalDesignLab/pyStatReduce | 9ea128409b91dd582e574e2e1cc153572b6c60a4 | [
"MIT"
] | null | null | null | # plot normal quadratic distribution overlay
# Plots the effects of theta in the 2D quadratic by showing the normal
# distribution on top of the regular quadratic
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib
import matplotlib.pyplot as plt
import os
import sys
import errno
sys.path.insert(0, '../../src')
import numpy as np
import chaospy as cp
from stochastic_collocation import StochasticCollocation
from quantity_of_interest import QuantityOfInterest
from dimension_reduction import DimensionReduction
from stochastic_arnoldi.arnoldi_sample import ArnoldiSampling
import examples
# Quadratic object
mu = np.zeros(2)
sigma = np.ones(2) # np.array([2, 0.1])
theta = 0.0 # np.pi/3
tuple = (theta,)
jdist = cp.MvNormal(mu, np.diag(sigma))
QoI = examples.Paraboloid2D(2, tuple)
# Plot the normal distribution
nx = 100
xi_1 = np.linspace(-3,3,nx)
xi_2 = np.linspace(-3,3,nx)
xi = np.zeros(2)
probability_density = np.zeros([nx,nx])
J_xi = np.zeros([nx, nx])
for i in xrange(0, nx):
for j in xrange(0,nx):
xi[:] = np.array([xi_1[i], xi_2[j]])
probability_density[j,i] = jdist.pdf(xi)
J_xi[j,i] = QoI.eval_QoI(mu, xi)
# Plot the distribution
if theta == 0.0:
fname = "./pdfs/stadard_quadratic-distribution_0.pdf"
elif theta == np.pi/3:
fname = "./pdfs/2_01_quadratic-distribution_60.pdf"
plt.rc('text', usetex=True)
matplotlib.rcParams['mathtext.fontset'] = 'cm'
fig = plt.figure("probability_distribution", figsize=(6,6))
ax = plt.axes()
cp1 = ax.contour(xi_1, xi_2, probability_density, colors="red", linestyles='dashed', linewidths=1.0)
cp2 = ax.contour(xi_1, xi_2, J_xi, levels=[2,4,8,16,32,64,128,256, 512], colors="black", linewidths=1.0)
# ax.clabel(cp, inline=1, fmt='%1.1f', fontsize=8)
lines = [cp1.collections[0], cp2.collections[0]]
labels = [r'Probability density, $P_{\Xi} (\xi)$', r'Quantity of Interest, $J$']
ax.set_xlabel(r'$\xi_1$', fontsize=16)
ax.set_ylabel(r'$\xi_2$', fontsize=16)
ax.tick_params(axis='both', labelsize=16)
plt.legend(lines, labels, fontsize=16)
plt.tight_layout()
# plt.show()
fig.savefig(fname, format="pdf")
| 31.176471 | 104 | 0.718868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 593 | 0.279717 |
25a41d20123f1a8a57294e429d62e5549539156f | 518 | py | Python | utils/sqlite.py | ztytotoro/python-scripts | a337abd6c2705483d3167f611d671a2b7ee517f7 | [
"MIT"
] | null | null | null | utils/sqlite.py | ztytotoro/python-scripts | a337abd6c2705483d3167f611d671a2b7ee517f7 | [
"MIT"
] | null | null | null | utils/sqlite.py | ztytotoro/python-scripts | a337abd6c2705483d3167f611d671a2b7ee517f7 | [
"MIT"
] | null | null | null | # 导入:
from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# 创建对象的基类:
Base = declarative_base()
# 定义Project对象:
class Project(Base):
# 表的名字:
__tablename__ = 'project'
# 表的结构:
id = Column(String(20), primary_key=True)
name = Column(String(20))
# 初始化数据库连接:
engine = create_engine(
'mysql+mysqlconnector://root:password@localhost:3306/test')
# 创建DBSession类型:
DBSession = sessionmaker(bind=engine) | 21.583333 | 63 | 0.737452 | 167 | 0.285959 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.347603 |
25a44b7c5e0019b3e4d2a53a51bc6e03d4f6513c | 4,753 | py | Python | cracking-the-coding-interview/1-chapter3.py | tranquan/coding-dojo | 538a1bdab2bae2df2a68ca2b4fb4ad11070c6049 | [
"MIT"
] | 1 | 2019-02-24T18:51:25.000Z | 2019-02-24T18:51:25.000Z | cracking-the-coding-interview/1-chapter3.py | tranquan/coding-dojo | 538a1bdab2bae2df2a68ca2b4fb4ad11070c6049 | [
"MIT"
] | null | null | null | cracking-the-coding-interview/1-chapter3.py | tranquan/coding-dojo | 538a1bdab2bae2df2a68ca2b4fb4ad11070c6049 | [
"MIT"
] | 1 | 2020-07-02T13:50:21.000Z | 2020-07-02T13:50:21.000Z | import os
import sys
import math
# 3.1 using single array to implement 3 stacks
# 1) - using 3 indexes to for each stack pointers, and 3 size to set maximum of stack
# 2) - using 3 indexes, 1 begin at the first index and increase, 1 begin at the bottom and decrease, 1 at the bottom and re-balanced every time
class ArrayStack:
array = []
stack_size = 0
i1 = 0; i2 = 0; i3 = 0
def __init__(self, size):
self.stack_size = size
self.i1 = 0
self.i2 = 0 #self.stack_size
self.i3 = 0 #self.stack_size * 2
self.array = [0] * (self.stack_size * 3)
def dump(self):
print(self.array)
def get_stack_index(self, stack):
if stack == 0:
return self.i1
elif stack == 1:
return self.i2
elif stack == 2:
return self.i3
return -1
def move_stack_pointer(self, stack, offset):
if stack == 0:
self.i1 += offset
elif stack == 1:
self.i2 += offset
elif stack == 2:
self.i3 += offset
def push(self, stack, value):
if stack < 0 or stack > 2:
return False
index = self.get_stack_index(stack)
if index < 0 or index >= self.stack_size:
return False
array_index = stack * self.stack_size + index
self.array[array_index] = value
self.move_stack_pointer(stack, 1)
def pop(self, stack):
if stack < 0 or stack > 2:
return False
index = self.get_stack_index(stack)
if index < 0 or index >= self.stack_size:
return False
array_index = stack * self.stack_size + index
value = array[index]
self.move_stack_pointer(stack, -1)
return value
# Test
# stack = ArrayStack(10)
# stack.push(1, 10)
# stack.dump()
# 3.2 implement stack that support get min in O(1)
# -> stack still operate normally,
# -> every node will have a next_min to point to another min when it removed
class MinNode:
value = 0
next_node = None
next_min_node = None
def __init__(self, value):
self.value = value
self.next_node = None
self.next_min_node = None
class MinStack:
root = None
min_node = None
def __init__(self):
self.root = None
def push(self, value):
if self.root == None:
self.root = MinNode(value)
self.min_node = self.root
else:
node = MinNode(value)
node.next_node = self.root
self.root = node
if node.value < self.min_node.value:
node.next_min_node = self.min_node
self.min_node = node
def pop(self):
if self.root != None:
node = self.root
self.root = self.root.next_node
if node == self.min_node:
self.min_node = self.min_node.next_min_node
return node.value
def min(self):
return self.min_node.value
# Test
# min_stack = MinStack()
# min_stack.push(10)
# min_stack.push(5)
# min_stack.push(12)
# min_stack.push(1)
# print(min_stack.min())
# min_stack.pop()
# print(min_stack.min())
# 3.3 Stack of Plates: combine many stack to create unlimitted stack
class SetStack:
stacks = list()
stack = None
stack_size = 0
def __init__(self, stack_size):
self.stacks = list()
self.stack_size = stack_size
self.stack = list()
self.stacks.append(self.stack)
def dumpStack(self):
for stack in self.stacks:
print(stack)
def push(self, value):
if len(self.stack) >= self.stack_size:
self.stack = list()
self.stacks.append(self.stack)
self.stack.append(value)
def pop(self):
if len(self.stack) == 0:
if len(self.stacks) == 0:
return None
else:
self.stack = self.stacks.pop()
return self.stack.pop()
else:
return self.stack.pop()
def popAt(self, stack_index):
if stack_index < len(self.stacks):
stack = self.stacks[stack_index]
return stack.pop()
else:
return None
# Test
# stack = SetStack(3)
# stack.push(1)
# stack.push(2)
# stack.push(3)
# stack.push(4)
# stack.push(5)
# stack.dumpStack()
# t = stack.popAt(0)
# print(t)
# 3.4 Using two queue to create a stack
# -> using list1 to push and list2 to pop
# -> when list2 is empty, get all data from list1 and push to iter
class MyQueue:
lst1 = None
lst2 = None
def __init__(self):
self.lst1 = list()
self.lst2 = list()
def add(self, value):
self.lst1.append(value)
def remove(self):
if len(self.lst2) > 0:
return self.lst2.pop()
else:
while len(self.lst1) > 0:
value = self.lst1.pop()
self.lst2.append(value)
if len(self.lst2) > 0:
return self.lst2.pop()
else:
return None
# Test
# queue = MyQueue()
# queue.add(1)
# queue.add(2)
# queue.add(3)
# print(queue.remove())
# print(queue.remove())
# queue.add(4)
# queue.add(5)
# print(queue.remove())
# print(queue.remove()) | 21.903226 | 143 | 0.627814 | 3,432 | 0.72207 | 0 | 0 | 0 | 0 | 0 | 0 | 1,259 | 0.264885 |
25a7128988ab4fbad08c8732aefc9716accd10af | 531 | py | Python | Network-communicator-client.py | TomFang1/Network-Communicator | 8036c210412621ec3b253a446238ba6bf5601a9b | [
"MIT"
] | null | null | null | Network-communicator-client.py | TomFang1/Network-Communicator | 8036c210412621ec3b253a446238ba6bf5601a9b | [
"MIT"
] | null | null | null | Network-communicator-client.py | TomFang1/Network-Communicator | 8036c210412621ec3b253a446238ba6bf5601a9b | [
"MIT"
] | null | null | null | import socket
HOST = ""
PORT = ""
def address():
global HOST
print("What is the IP of the computer you want to connect to? ")
HOST = input(":")
global PORT
print("What is the PORT of the computer you want to connect to? ")
PORT = int(input(":"))
connector()
def connector():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.sendall(b"test")
data = s.recv(1024)
print(f"Received {data!r}")
address()
| 22.125 | 71 | 0.574388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.288136 |
25a949c22437a642b924c26eebc537b68bb644d1 | 1,789 | py | Python | pipelines/head-pose-dataset-pipeline/data-chunk-spliter/main.py | tonouchi510/kfp-project | 67b78ae53cc3de594b8254999a4f553a8d5cec27 | [
"MIT"
] | null | null | null | pipelines/head-pose-dataset-pipeline/data-chunk-spliter/main.py | tonouchi510/kfp-project | 67b78ae53cc3de594b8254999a4f553a8d5cec27 | [
"MIT"
] | null | null | null | pipelines/head-pose-dataset-pipeline/data-chunk-spliter/main.py | tonouchi510/kfp-project | 67b78ae53cc3de594b8254999a4f553a8d5cec27 | [
"MIT"
] | null | null | null | import json
import hashlib
from absl import app
from absl import flags
from google.cloud import storage
FLAGS = flags.FLAGS
flags.DEFINE_string(
"pipeline", None,
"Name of pipeline")
flags.DEFINE_string(
"job_id", "test",
"ID for job management.")
flags.DEFINE_string(
"bucket_name", "",
"GCS bucket name")
flags.DEFINE_string(
"dataset", None,
"Directory where dataset is stored.")
flags.DEFINE_integer(
"chunk_size", 1000,
"Num of files by tfrecord.")
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if not FLAGS.dataset:
raise ValueError("dataset param is not nullable.")
job_dir = f"tmp/{FLAGS.pipeline}/{FLAGS.job_id}/data-chunk-spliter"
client = storage.Client()
bucket = client.bucket(FLAGS.bucket_name)
prefix = FLAGS.dataset.replace(f"gs://{FLAGS.bucket_name}/", "")
blobs = bucket.list_blobs(prefix=prefix)
files = []
for b in blobs:
files.append(b.name)
n_files = len(files)
i = 0
chunk_files = []
while i < n_files:
targets = []
j = 0
while (i + j) < n_files and j < FLAGS.chunk_size:
targets.append(files[i + j])
j += 1
f_number: str = hashlib.md5(str(targets[0:5]).encode()).hexdigest()
chunk_file = f"chunk_{f_number}.txt"
with open(chunk_file, "w") as f:
for id in targets:
f.write("%s\n" % id)
blob = bucket.blob(f"{job_dir}/{chunk_file}")
blob.upload_from_filename(chunk_file)
chunk_files.append(f"{job_dir}/{chunk_file}")
i += j
with open("/tmp/out.json", "w") as f:
json.dump(chunk_files, f, indent=4)
if __name__ == '__main__':
app.run(main)
| 24.847222 | 75 | 0.608161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 445 | 0.248742 |
25ab42f8ae212d4baa50016ccf09ca3732826f64 | 10,236 | py | Python | book_parser_alternate_approach.py | shankze/python_book_parser | 66a93466c0978f707276b088721b3e65ae5c863c | [
"MIT"
] | null | null | null | book_parser_alternate_approach.py | shankze/python_book_parser | 66a93466c0978f707276b088721b3e65ae5c863c | [
"MIT"
] | null | null | null | book_parser_alternate_approach.py | shankze/python_book_parser | 66a93466c0978f707276b088721b3e65ae5c863c | [
"MIT"
] | null | null | null | import re
import _pickle as cPickle
import logging
import argparse
#This script is not dependant on table of contents. It detects books and chapters based their titles
# Dictionary containing key and regex pattern to match the keys
pattern_dict = {
'blank_line': re.compile(r'^\s*$'),
'book_number': re.compile(r'(BOOK\s\w+):?\s?(.+)?'),
'chapter_number': re.compile(r'CHAPTER\s(\w+)'),
'epilogue_number': re.compile(r'([A-Za-z]+\sEPILOGUE):?\s?(.+)?')
}
BODY_START_CONSEC_BLANK_LINE_COUNT = 9 #Number of blank lines between table of contents and chapter 1
FOOTER_START_CONSEC_BLANK_LINE_COUNT = 9 #Number of blank lines between end of last chapter start of footer
END_OF_CHAPTER_CONSEC_BLANK_LINE_COUNT = 4 #Number of blank lines between
class Book(object):
def __init__(self, bk_number, bk_year, chapter_list):
self.bk_number = bk_number
self.bk_year = bk_year
self.chapter_list = chapter_list
logging.info('Created book: {}'.format(self.bk_number))
class Chapter(object):
def __init__(self, ch_index, paragraph_list):
self.ch_index = ch_index
self.paragraph_list = paragraph_list
class Paragraph(object):
def __init__(self, p_index, sentence_list):
self.p_index = p_index
self.sentence_list = sentence_list
class Sentence(object):
def __init__(self, s_index,wordObj_list):
self.s_index = s_index
self.wordObj_list = wordObj_list
class Word(object):
def __init__(self, w_index, word):
self.w_index = w_index
self.word = word
def parse_line(line):
"""
Do a regex search against regexes defined in pattern_dict and
return the key and match result of the first matching regex
"""
for key, rx in pattern_dict.items():
match = rx.search(line)
if match:
return key, match
# if there are no matches
return None, None
def obj_dict(obj):
"""
Default method to serialize objects json.dump cannor serialize
"""
return obj.__dict__
def process_file(filepath):
"""
Process file line by line.
Input:
filepath: location of the file to be processed
Return:
book_list: A list if Book objects containing chapters, paragraphs, sentences and words
"""
book_list = []
try:
with open(filepath, encoding="utf8", mode='r') as file: # open file
header_end_found = False # True if active line is in the body section of the file(and not header)
prev_key,book_index,chapter_index = '','',''
paragraph_index,sentence_index,word_index = 1,1,1
# temporary lists to store the lower level objects before adding to the higher level object
sentence_list,paragraph_list,chapter_list,word_list = [],[],[],[]
# I am assuming that the whole book may not be available at once. So I am going with the safe option of
# reading a line at once. Does not load the whole file in memory
for line in file:
key, match = parse_line(line) # evaluates the line against regex expressions in pattern_dict
if key == 'blank_line' and prev_key == 'blank_line':
consec_empty_line_count += 1 # found consecutive blank lines, increment counter
else:
consec_empty_line_count = 0 # did not find consecutive blank line, so reset it to 0
if not header_end_found: # continue till end of header is found. no processing requirements in header
if consec_empty_line_count == BODY_START_CONSEC_BLANK_LINE_COUNT:
header_end_found = True
else: # in book body
if key == 'book_number' or key == 'epilogue_number': # current line is beginning of a book
if chapter_list: # also, end of previous book and its last chapter (not true for first book)
book_ob = Book(book_index,book_year,chapter_list)
# create a book object to store previous book, set its index,
# year and chapter list and clear chapters list
book_list.append(book_ob)
chapter_list = []
# get the name and index of the new book
book_index = match.group(1)
book_year = match.group(2)
elif key == 'chapter_number': # current line is beginning of a new chapter
# get chapter name
chapter_index = match.group(1)
# reset paragraph, sentence and word indices
paragraph_index = 1
sentence_index = 1
word_index = 1
elif key == 'blank_line': # current line is blank line
if consec_empty_line_count == FOOTER_START_CONSEC_BLANK_LINE_COUNT:
# 10 consecutive lines, so end of last book
book_ob = Book(book_index, book_year, chapter_list) # create book object for last book
book_list.append(book_ob) # append it to books list
break # exiting the loop as processing of footer is not required
if word_list: # paragraph ended without a .? or ! (could be a paragraph ending with:)
# end the sentence and add it to the sentence list
sen_ob = Sentence(sentence_index, word_list)
sentence_list.append(sen_ob)
word_list = []
#if consec_empty_line_count == END_OF_CHAPTER_CONSEC_BLANK_LINE_COUNT and paragraph_list.__len__() > 0:
if consec_empty_line_count == END_OF_CHAPTER_CONSEC_BLANK_LINE_COUNT and paragraph_list:
# end of chapter. Create chapter object and save the chapter
chap_ob = Chapter(chapter_index,paragraph_list)
chapter_list.append(chap_ob)
paragraph_list = []
elif sentence_list:
#end of paragraph. add paragraph to paragraph list
par_ob = Paragraph(paragraph_index,sentence_list)
sentence_list = []
paragraph_list.append(par_ob)
paragraph_index += 1
sentence_index = 1
word_index = 1
else: # line with content
line = line.replace("’","") # remove apostrophes from line
# split lines into sentences
sen_in_line = re.split(r'(?<!St)[.!?]', line)
if sen_in_line.__len__() == 1: #line without sentence endings
words_in_line = re.findall(r'[\w]+',line)
# find words and add them to the list
for word in words_in_line:
word_index = add_word_to_list(word, word_index, word_list)
else: #line containing sentence endings
for idx, split in enumerate(sen_in_line):
if split: #check to exclude multiple consecutive periods (...)
words_in_line = re.findall(r'[\w]+', split)
# find words and add them to the list
for word in words_in_line:
word_index = add_word_to_list(word, word_index, word_list)
if (idx+1) < sen_in_line.__len__():
# line contains end of sentence. add sentence to sentence list
sen_ob = Sentence(sentence_index,word_list)
sentence_list.append(sen_ob)
word_list = []
sentence_index += 1
word_index = 1
prev_key = key
if not header_end_found:
logging.error("Header end not defined")
except FileNotFoundError as ex:
print(ex)
except IOError as ex:
print(ex)
except Exception as ex:
print(ex)
return book_list
def add_word_to_list(word, word_index, word_list):
"""
Add words to word list. Increment word index
Input:
word: Word to be added
word_index: Index of the word in the word list
word_list: List of words to which the word will be added
Return:
word_index
"""
word_ob = Word(word_index, word)
word_list.append(word_ob)
word_index += 1
return word_index
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s:%(levelname)s:%(message)s')
def main_wrapper(args):
"""
:param args:
:return:
"""
inp_filepath = args.input_file_path
out_filepath = args.output_file_path
logging.info('Working on book: {}'.format(inp_filepath))
book_list = process_file(inp_filepath)
if book_list:
try:
with open(out_filepath,mode='wb') as cpickle_file:
cPickle.dump(book_list,cpickle_file)
except Exception as ex:
print(ex)
else:
print('No books found')
def args_parser():
"""
handles and validates CLI
:return:
"""
parser = argparse.ArgumentParser(description="Parses files containing books and serializes the structure")
parser.add_argument("-inp",help="full path of the file to parse",dest = "input_file_path",type=str,required=True)
parser.add_argument("-out", help="output path to the serialized file", dest="output_file_path", type=str, required=True)
parser.set_defaults(func=main_wrapper)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
args_parser() | 45.493333 | 127 | 0.57288 | 800 | 0.07814 | 0 | 0 | 0 | 0 | 0 | 0 | 3,526 | 0.344403 |
25ae6c1ba93e797d3822a5f53bd87f019d8ffea6 | 1,420 | py | Python | modules/Registry/lv1_os_win_reg_mac_address.py | naaya17/carpe | fa2e3cfebe20f8839c985e5b9b78b538800172a1 | [
"Apache-2.0"
] | null | null | null | modules/Registry/lv1_os_win_reg_mac_address.py | naaya17/carpe | fa2e3cfebe20f8839c985e5b9b78b538800172a1 | [
"Apache-2.0"
] | null | null | null | modules/Registry/lv1_os_win_reg_mac_address.py | naaya17/carpe | fa2e3cfebe20f8839c985e5b9b78b538800172a1 | [
"Apache-2.0"
] | null | null | null |
class Mac_Address_Information:
par_id = ''
case_id = ''
evd_id = ''
mac_address = ''
description = ''
backup_flag = ''
source_location = []
def MACADDRESS(reg_system):
mac_address_list = []
mac_address_count = 0
reg_key = reg_system.find_key(r"ControlSet001\Control\Class\{4d36e972-e325-11ce-bfc1-08002be10318}")
for reg_subkey in reg_key.subkeys():
try:
for reg_subkey_value in reg_subkey.values():
if reg_subkey_value.name() == 'DeviceInstanceID':
if 'FFFF' in reg_subkey_value.data():
mac_address_information = Mac_Address_Information()
mac_address_list.append(mac_address_information)
mac_address_list[mac_address_count].source_location = []
mac_address_list[mac_address_count].source_location.append('SYSTEM-ControlSet001/Control/Class/{4d36e972-e325-11ce-bfc1-08002be10318}')
mac_address_list[mac_address_count].mac_address = reg_subkey_value.data().split('\\')[-1][0:6] + reg_subkey_value.data().split('\\')[-1][10:16]
mac_address_list[mac_address_count].description = reg_subkey.value(name='DriverDesc').data()
mac_address_count = mac_address_count + 1
except:
print('-----MAC Address Error')
return mac_address_list | 45.806452 | 167 | 0.625352 | 167 | 0.117606 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.157746 |
25aeac09d273a2e17e46546268aae95b302c15ff | 4,819 | py | Python | src/audio/extract_audio.py | usc-sail/tiles-2019-dataset | c91c2da3a282757f67981c5e10aa93faab7b414b | [
"MIT"
] | null | null | null | src/audio/extract_audio.py | usc-sail/tiles-2019-dataset | c91c2da3a282757f67981c5e10aa93faab7b414b | [
"MIT"
] | null | null | null | src/audio/extract_audio.py | usc-sail/tiles-2019-dataset | c91c2da3a282757f67981c5e10aa93faab7b414b | [
"MIT"
] | null | null | null | from __future__ import print_function
from fileinput import filename
import os
import pandas as pd
import pdb
from datetime import timedelta
import datetime
import shutil
date_time_format = '%Y-%m-%dT%H:%M:%S.%f'
date_format = '%Y-%m-%d'
def make_dir(data_path):
if os.path.exists(data_path) is False:
os.mkdir(data_path)
def check_micu_data_valid(data_time, start_date1, end_date1, start_date2, end_date2):
cond1 = (pd.to_datetime(data_time) - pd.to_datetime(start_date1)).total_seconds() >= 0
cond2 = (pd.to_datetime(end_date1) - pd.to_datetime(data_time)).total_seconds() >= 0
cond3 = False
cond4 = False
if start_date2 != 'nan':
cond3 = (pd.to_datetime(data_time) - pd.to_datetime(start_date2)).total_seconds() >= 0
cond4 = (pd.to_datetime(end_date2) - pd.to_datetime(data_time)).total_seconds() >= 0
if (cond1 and cond2) or (cond3 and cond4):
return True
else:
return False
if __name__ == '__main__':
# Read data root path
root_dir = '/media/data/tiles-processed/tiles-phase2-delivery'
output_dir = '/media/data/tiles-opendataset/tiles-phase2-opendataset-audio'
delevery_root_path = os.path.abspath(os.path.join(root_dir, 'delivery_data'))
setup_root_path = os.path.abspath(os.path.join(root_dir, 'setup_data'))
participant_info_path = os.path.abspath(os.path.join(root_dir, 'participant-info'))
# read study period data frame
consent_df = pd.read_csv(os.path.join(root_dir, 'consents.csv'), index_col=5)
study_period = pd.read_csv(os.path.join(participant_info_path, 'study-periods.csv'), index_col=0)
micu_df = pd.read_csv(os.path.join(participant_info_path, 'p2_micuschedules_public_5.21.csv'), index_col=0)
micu_df = micu_df.dropna(subset=['MICU Start Date 1'])
participant_list = list(study_period.index)
consent_participant_list = list(consent_df.index)
participant_list.sort()
for id in participant_list:
# if no consent
if id not in consent_participant_list:
continue
print(id, consent_df.loc[id, 'audio_future'])
if consent_df.loc[id, 'audio_future'] is False:
continue
# if no data, continue
audio_data_path = os.path.join('/media/data/tiles-processed', 'tiles-phase2-opendataset-audio', 'raw-features', id)
if os.path.exists(audio_data_path) is False:
continue
micu_start1 = pd.to_datetime(micu_df.loc[id, 'MICU Start Date 1']).strftime(date_time_format)[:-3]
micu_start2 = str(micu_df.loc[id, 'MICU Start Date 2'])
micu_end1 = (pd.to_datetime(micu_df.loc[id, 'MICU End Date 1'])+timedelta(days=1, minutes=-1)).strftime(date_time_format)[:-3]
micu_end2 = str(micu_df.loc[id, 'MICU End Date 2'])
if str(micu_start2) != 'nan':
number_of_days1 = int((pd.to_datetime(micu_end1) - pd.to_datetime(micu_start1)).total_seconds() / (24 * 3600)) + 1
left_days = 21 - number_of_days1
if left_days:
micu_end2 = (pd.to_datetime(micu_start2) + timedelta(days=left_days, minutes=-1)).strftime(date_time_format)[:-3]
else:
micu_start2, micu_end2 = 'nan', 'nan'
file_list = os.listdir(audio_data_path)
for file_name in file_list:
if 'RawFeatures' in file_name:
continue
time = file_name.split('.csv.gz')[0]
date_time = datetime.datetime.fromtimestamp(int(time)).strftime(date_format)
if check_micu_data_valid(date_time, micu_start1, micu_end1, micu_start2, micu_end2) is True:
make_dir(output_dir)
make_dir(os.path.join(output_dir, 'raw-features'))
make_dir(os.path.join(output_dir, 'raw-features', id))
make_dir(os.path.join(output_dir, 'fg-predictions'))
make_dir(os.path.join(output_dir, 'fg-predictions', id))
# original file
raw_feature_output_path = os.path.join(output_dir, 'raw-features', id, file_name)
fg_predictions_output_path = os.path.join(output_dir, 'fg-predictions', id, str(time)+'.npy')
# output file
raw_feature_path = os.path.join(audio_data_path, file_name)
fg_predictions_path = os.path.join('/media/data/tiles-processed', 'tiles-phase2-opendataset-audio', 'fg-predictions', id, str(time)+'.npy')
shutil.copy(raw_feature_path, raw_feature_output_path)
if os.path.exists(fg_predictions_path) is True:
shutil.copy(fg_predictions_path, fg_predictions_output_path)
print('save %s, %s' % (id, raw_feature_path))
| 45.037383 | 155 | 0.646192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 811 | 0.168292 |
25afe3c2688bb5a8a98ef07920c42001d054b61e | 6,737 | py | Python | Python_files/extract_tmc_links.py | jingzbu/InverseVITraffic | c0d33d91bdd3c014147d58866c1a2b99fb8a9608 | [
"MIT"
] | null | null | null | Python_files/extract_tmc_links.py | jingzbu/InverseVITraffic | c0d33d91bdd3c014147d58866c1a2b99fb8a9608 | [
"MIT"
] | null | null | null | Python_files/extract_tmc_links.py | jingzbu/InverseVITraffic | c0d33d91bdd3c014147d58866c1a2b99fb8a9608 | [
"MIT"
] | null | null | null | from util_data_storage_and_load import *
import openpyxl
data_folder = '/home/jzh/Dropbox/Research/\
Data-driven_estimation_inverse_optimization/INRIX/Raw_data/'
########## extract tmc info for link_1
# load attribute table link_1 data
wb_link_1 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_1.xlsx')
# get sheet name from workbook
sheet_link_1_name = wb_link_1.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_1 data
sheet_link_1 = wb_link_1.get_sheet_by_name(sheet_link_1_name)
tmc_list_link_1 = []
for i in xrange(2, 1 + sheet_link_1.max_row):
tmc_list_link_1.append(sheet_link_1.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_2
# load attribute table link_2 data
wb_link_2 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_2.xlsx')
# get sheet name from workbook
sheet_link_2_name = wb_link_2.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_2 data
sheet_link_2 = wb_link_2.get_sheet_by_name(sheet_link_2_name)
tmc_list_link_2 = []
for i in xrange(2, 1 + sheet_link_2.max_row):
tmc_list_link_2.append(sheet_link_2.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_3
# load attribute table link_3 data
wb_link_3 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_3.xlsx')
# get sheet name from workbook
sheet_link_3_name = wb_link_3.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_3 data
sheet_link_3 = wb_link_3.get_sheet_by_name(sheet_link_3_name)
tmc_list_link_3 = []
for i in xrange(2, 1 + sheet_link_3.max_row):
tmc_list_link_3.append(sheet_link_3.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_4
# load attribute table link_4 data
wb_link_4 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_4.xlsx')
# get sheet name from workbook
sheet_link_4_name = wb_link_4.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_4 data
sheet_link_4 = wb_link_4.get_sheet_by_name(sheet_link_4_name)
tmc_list_link_4 = []
for i in xrange(2, 1 + sheet_link_4.max_row):
tmc_list_link_4.append(sheet_link_4.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_5
# load attribute table link_5 data
wb_link_5 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_5.xlsx')
# get sheet name from workbook
sheet_link_5_name = wb_link_5.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_5 data
sheet_link_5 = wb_link_5.get_sheet_by_name(sheet_link_5_name)
tmc_list_link_5 = []
for i in xrange(2, 1 + sheet_link_5.max_row):
tmc_list_link_5.append(sheet_link_5.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_6
# load attribute table link_6 data
wb_link_6 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_6.xlsx')
# get sheet name from workbook
sheet_link_6_name = wb_link_6.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_6 data
sheet_link_6 = wb_link_6.get_sheet_by_name(sheet_link_6_name)
tmc_list_link_6 = []
for i in xrange(2, 1 + sheet_link_6.max_row):
tmc_list_link_6.append(sheet_link_6.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_7
# load attribute table link_7 data
wb_link_7 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_7.xlsx')
# get sheet name from workbook
sheet_link_7_name = wb_link_7.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_7 data
sheet_link_7 = wb_link_7.get_sheet_by_name(sheet_link_7_name)
tmc_list_link_7 = []
for i in xrange(2, 1 + sheet_link_7.max_row):
tmc_list_link_7.append(sheet_link_7.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_8
# load attribute table link_8 data
wb_link_8 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_8.xlsx')
# get sheet name from workbook
sheet_link_8_name = wb_link_8.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_8 data
sheet_link_8 = wb_link_8.get_sheet_by_name(sheet_link_8_name)
tmc_list_link_8 = []
for i in xrange(2, 1 + sheet_link_8.max_row):
tmc_list_link_8.append(sheet_link_8.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_9
# load attribute table link_9 data
wb_link_9 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_9.xlsx')
# get sheet name from workbook
sheet_link_9_name = wb_link_9.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_9 data
sheet_link_9 = wb_link_9.get_sheet_by_name(sheet_link_9_name)
tmc_list_link_9 = []
for i in xrange(2, 1 + sheet_link_9.max_row):
tmc_list_link_9.append(sheet_link_9.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_10
# load attribute table link_10 data
wb_link_10 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_10.xlsx')
# get sheet name from workbook
sheet_link_10_name = wb_link_10.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_10 data
sheet_link_10 = wb_link_10.get_sheet_by_name(sheet_link_10_name)
tmc_list_link_10 = []
for i in xrange(2, 1 + sheet_link_10.max_row):
tmc_list_link_10.append(sheet_link_10.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_11
# load attribute table link_11 data
wb_link_11 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_11.xlsx')
# get sheet name from workbook
sheet_link_11_name = wb_link_11.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_11 data
sheet_link_11 = wb_link_11.get_sheet_by_name(sheet_link_11_name)
tmc_list_link_11 = []
for i in xrange(2, 1 + sheet_link_11.max_row):
tmc_list_link_11.append(sheet_link_11.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_12
# load attribute table link_12 data
wb_link_12 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_12.xlsx')
# get sheet name from workbook
sheet_link_12_name = wb_link_12.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_12 data
sheet_link_12 = wb_link_12.get_sheet_by_name(sheet_link_12_name)
tmc_list_link_12 = []
for i in xrange(2, 1 + sheet_link_12.max_row):
tmc_list_link_12.append(sheet_link_12.cell(row=i, column=2).value.encode('utf-8'))
zdump([tmc_list_link_1, tmc_list_link_2, tmc_list_link_3, tmc_list_link_4, tmc_list_link_5, \
tmc_list_link_6, tmc_list_link_7, tmc_list_link_8, tmc_list_link_9, tmc_list_link_10, \
tmc_list_link_11, tmc_list_link_12], '../temp_files/tmc_list_links.pkz')
| 34.372449 | 96 | 0.77824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,560 | 0.379991 |
25b0b90b288bd6174770a4217e3b26a9e55a4cbb | 26,777 | py | Python | python/analysis/fp_fit/fp_file.py | ACTCollaboration/moby2 | b0f6bd6add7170999eb964d18f16d795520426e9 | [
"BSD-2-Clause"
] | 3 | 2020-06-23T15:59:37.000Z | 2022-03-29T16:04:35.000Z | python/analysis/fp_fit/fp_file.py | ACTCollaboration/moby2 | b0f6bd6add7170999eb964d18f16d795520426e9 | [
"BSD-2-Clause"
] | 1 | 2020-04-08T15:10:46.000Z | 2020-04-08T15:10:46.000Z | python/analysis/fp_fit/fp_file.py | ACTCollaboration/moby2 | b0f6bd6add7170999eb964d18f16d795520426e9 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
from __future__ import print_function
from __future__ import absolute_import
from past.builtins import basestring
import sys
import numpy as np
import moby2
trace = moby2.util.log.logger.trace
# transitional...
_fp_formats = {
'det_uid': '%4d',
'ok': '%1d',
'x0': '%9.6f',
'x0_err': '%9.6f',
'y0': '%9.6f',
'y0_err': '%9.6f',
'tau': '%8.5f',
'tau_err': '%8.5f',
'h': '%.4e',
'w': '%9.6f',
'sn': '%9.1f',
'base': '%.5e',
'n_obs': '%3d',
}
_fp_fields = ['ok', 'x0', 'x0_err', 'y0', 'y0_err', 'tau', 'tau_err',
'h', 'w', 'sn', 'base', 'n_obs']
_fp_columns_format_str = ' '.join(['{%s:%s}'%(k, _fp_formats[k][1:])
for k in _fp_fields]) + '\n'
class FPFitFile(moby2.detectors._SimpleDetData):
fields = _fp_fields
dtypes = {'ok': bool, 'n_obs': int}
columns_format_str = _fp_columns_format_str
xcfs = '{det_uid:4d} {ok:1d} '\
'{x0:9.6f} {x0_err:9.6f} {y0:9.6f} {y0_err:9.6f} '\
'{tau:8.5f} {tau_err:8.5f} '\
'{h:.4e} {w:9.6f} {sn:9.1f} {n_obs:3d}\n'
header = '# det_uid ok x0 x0_err y0 y0_err '\
'tau tau_err h w sn n_obs'
def __init__(self, det_uid=None):
if det_uid is not None:
self.det_uid = np.array(det_uid, dtype='int64')
n = len(det_uid)
for f in self.fields:
setattr(self, f, np.zeros(n, self.dtypes.get(f, 'float64')))
def __repr__(self):
name = repr(self.__class__)
return '%s with %i det_uid for fields ' % (name, len(self.det_uid)) + \
','.join(self.fields)
def update_row(self, row, data):
for k in self.fields:
if k in data:
getattr(self, k)[row] = data[k]
@classmethod
def from_columns_file(cls, filename):
data = np.loadtxt(filename, unpack=1)
det_uid = data[0].astype('int')
self = cls(det_uid)
self.ok = data[1].astype('int').astype('bool')
if len(data[2:]) == 11:
self.x0, self.x0_err, self.y0, self.y0_err, self.tau, self.tau_err, self.h, self.w, self.sn, self.base, self.n_obs = data[2:]
elif len(data[2:-1]) == 9:
self.x0, self.x0_err, self.y0, self.y0_err, self.tau, self.tau_err, self.h, self.w, self.sn = data[2:-1]
self.base = 0 * self.w
elif len(data[2:-1]) == 8:
self.x0, self.x0_err, self.y0, self.y0_err, self.tau, self.tau_err, self.h, self.sn = data[2:-1]
self.w = 0 * self.x0
self.base = 0 * self.x0
elif len(data[2:-1]) == 4:
self.x0, self.x0_err, self.y0, self.y0_err = data[2:-1]
self.base = 0
else:
raise ValueError("Strange number of columns in %s" % filename)
self.n_obs = data[-1].astype('int')
return self
@classmethod
def from_file(cls, filename):
if filename.endswith('fits') or filename.endswith('fits.gz'):
return cls.from_fits_table(filename)
return cls.from_columns_file(filename)
# This supercedes _SimpleDetData.write
def write(self, filename, format=None):
if format is None:
if filename.endswith('fits') or filename.endswith('fits.gz'):
format = 'fits'
else:
format = 'txt'
data = [('det_uid', self.det_uid)]
for k in self.fields:
v = getattr(self, k)
if v.dtype == bool:
v = v.astype('int8')
data.append((k, v))
odb = moby2.util.StructDB.from_data(data,formats=_fp_formats)
if format == 'fits':
odb.to_fits_table(filename)
elif format == 'txt':
odb.to_column_file(filename)
else:
raise ValueError("Unknown format request, %s." % format)
def write_reduced(self, filename, scale_amp=1.):
format = 'txt'
if filename.endswith('.fits') or filename.endswith('.fits.gz'):
format = 'fits'
s = self.ok.astype(bool)
# det_uid peak_DAC SN tau
data = [('det_uid', self.det_uid[s]),
('peak_dac', self.h[s] * scale_amp),
('time_const', self.tau[s]),
('sn', self.sn[s]),
]
odb = moby2.util.StructDB.from_data(
data, formats={'peak_dac': '%12.3f',
'time_const': '%12.5f',
'sn': '%12.3f'})
if format == 'txt':
odb.to_column_file(filename)
elif format == 'fits':
odb.to_fits_table(filename)
@classmethod
def from_focal_plane(cls, fp):
"""
Initialize from a FocalPlane object.
"""
self = cls(fp.det_uid)
self.x0 = fp.x.copy()
self.y0 = fp.y.copy()
self.ok = fp.mask.copy()
zeros = np.zeros(self.ok.shape)
self.tau, self.h, self.w = zeros.copy(), zeros.copy(), zeros.copy()
self.base = zeros
return self
@classmethod
def combine_fits(cls, fits, template=None, params={}):
"""
Combine fits by shifting each one to match a template, and
averaging the good fits for each detector.
If a template is not provided, match to the first one.
"""
trace(1, 'Fitting and averaging %i fits' % len(fits))
if template is None:
template = fits[0]
# Start by shifting each fit to match the template.
orig_fits, fits = fits, []
fitter = FPTemplateFitter()
fitter.set_template(template)
fit_params = {'shift': True,
'rotation': False}
fit_params.update(params)
fit_results = [None for fi in range(len(orig_fits))]
for fi,f0 in enumerate(orig_fits):
if f0.ok.sum() < params.get('min_dets', 50):
trace(2, 'Discarding fit with only %i good fits' % f0.ok.sum())
continue
ok, result = fitter.fit(f0, fit_params)
if not ok:
trace(2, 'Discarding fit due to failed template match')
continue
f1 = f0.copy()
f1.x0 += result[0]
f1.y0 += result[1]
fits.append(f1)
fit_results[fi] = result
trace(1, 'Cut %i of %i fits (increase verbosity to see why).' % \
(len(orig_fits) - len(fits), len(orig_fits)))
if len(fits) == 0:
return None, None
print([len(f.det_uid) for f in fits])
n_det_uid = max([f.det_uid.max() for f in fits]) + 1
output = cls(np.arange(n_det_uid))
output.ok[:] = False
ARCMIN = np.pi/180/60
trace(1, 'Combining data for %i detectors' % n_det_uid)
for uid in output.det_uid:
ok = np.array([f.get_property('ok', det_uid=uid)[1]
for f in fits])
x, y, tau = np.transpose([f.get_property(['x0','y0','tau'], det_uid=uid)[1]
for f in fits])
for _x in [x, y, tau]:
# Yes, this happens...
ok *= ~np.isnan(_x) * ~np.isinf(_x)
x, y, tau = [_x[ok] for _x in [x,y,tau]]
if ok.sum() < params.get('min_obs', 1):
trace(2, 'Discarding det_uid=%i due to only %i contributors'
% (uid, ok.sum()))
continue
# Majority rules.
x0, y0 = np.median(x), np.median(y)
for iteration in [0,1,2]:
d0 = ((x - x0)**2 + (y-y0)**2)**.5
s0 = d0 < params.get('max_separation', 1)*ARCMIN
if s0.sum() == 0:
break
x0, y0 = x[s0].mean(), y[s0].mean()
if s0.sum() <= 0:
trace(2, 'Discarding det_uid=%i due to only %i items in '\
' combination' % (uid, s0.sum()))
continue
vals = {
'x0': x0, 'y0': y0,
'x0_err': x[s0].std(),
'y0_err': y[s0].std(),
'tau': tau[s0].mean(),
'tau_err': tau[s0].std(),
'n_obs': s0.sum(),
'ok': s0.sum() >= params.get('min_obs', 1) }
output.update_row(uid, vals)
trace(2, 'Result for det_uid=%i' % uid)
for k in ['x0', 'y0', 'tau']:
trace(2, ' %s = %10.5f +- %10.5f' % (k, vals[k], vals[k+'_err']))
return output, fit_results
def plot_positions(self, filename, auto_zoom=True, params={},
title='', fig=None):
import pylab as pl
if fig is None:
pl.figure()
pl.gcf().set_size_inches(6., 6.)
else:
pl.figure(fig.number)
s = self.ok
if s.sum() == 0:
pl.title(title + ' - no good fits')
pl.savefig(filename)
pl.clf()
units = params.get('units', 'deg')
scale = {'rad': 1., 'deg': 180/np.pi, 'arcmin': 60*180/np.pi}[units]
x, y = self.x0[s]*scale, self.y0[s]*scale
x0, y0 = np.median(x), np.median(y)
r = ((x-x0)**2 + (y-y0)**2)**.5
window = np.median(r)*3
inside = r < params.get('zoom', scale*window)
pl.scatter(x, y, alpha=0.5)
if params.get('limits') is None:
if np.any(inside):
for vect,limiter in [(x,pl.xlim), (y,pl.ylim)]:
lo, hi = limiter()
lo = min(lo, vect[inside].min())
hi = max(hi, vect[inside].max())
limiter(lo, hi)
else:
xlims, ylims = params['limits']
pl.xlim(*xlims), pl.ylim(*ylims)
pl.title(title + ' - %i dets outside window' % (~inside).sum())
pl.xlabel('X (%s)' % units)
pl.ylabel('Y (%s)' % units)
def smart_locate(ax, n_max, bases=[1,2,5]):
x0, x1 = ax.get_view_interval()
if x1 == x0:
return
delta = (x1-x0) / (n_max-1)
# Find smallest base and p such delta < base*10^p
log_spacing = min([
np.ceil(np.log10(delta) - np.log10(b)) + np.log10(b)
for b in bases])
loc = pl.MultipleLocator(10**log_spacing)
ax.set_major_locator(loc)
smart_locate(pl.gca().xaxis, 6)
smart_locate(pl.gca().yaxis, 9)
pl.savefig(filename)
pl.clf()
pl.figure()
def plot_rowcol_summaries(self, filename, array_data):
import pylab as pl
def x_eyes(bads=None):
# Mark bad fits with an x.
if bads is None:
bads = ~s
pl.scatter(cols[bads], rows[bads], marker='x', edgecolor='gray')
def limit_args(data, kw={}):
lo, hi = data.min(), data.max()
if s.sum() > 1:
lo, hi = data[s].min(), data[s].max()
if hi == lo:
hi = lo + 1
kw.update({'vmin': lo, 'vmax': hi})
return kw
def bin(data, dtype='float'):
out = np.zeros((n_rows, n_cols), dtype)
out[rows, cols] = data
return out
def imshow_reformat():
# Tighten boundaries, add labels...
pl.xlabel('Column')
pl.ylabel('Row')
pl.xlim(-0.5, n_cols-0.5)
pl.ylim(-0.5, n_rows-0.5)
s = self.ok
rows, cols = array_data.get_property(['row', 'col'], det_uid=self.det_uid)
n_rows, n_cols = rows.max()+1, cols.max()+1
# Init plotting
pl.figure()
pl.gcf().set_size_inches(6., 6.)
pl.subplots_adjust(left=.1, right=.95, top=.95, bottom=.1,
hspace=.2, wspace=.3)
title_fs = 12
# Time constants...
#
pl.subplot(2,2,1)
z = self.tau * 1e3
pl.imshow(bin(z), interpolation='nearest', **limit_args(z))
pl.colorbar()
x_eyes()
pl.title('Time constants (ms)', fontsize=title_fs)
imshow_reformat()
pl.subplot(2,2,2)
z = self.tau_err * 1e3
pl.imshow(bin(z), interpolation='nearest', **limit_args(z))
pl.colorbar()
x_eyes()
pl.title('Time constant errors (ms)', fontsize=title_fs)
imshow_reformat()
if self.ok.sum() > 10:
pl.subplot(2,2,3)
pl.hist(self.tau[self.ok]*1e3, bins=20) #min(20,self.ok.sum()//10)
pl.xlabel('Time constant (ms)')
pl.ylabel('N_dets')
pl.subplot(2,2,4)
pl.hist(self.tau_err[self.ok]*1e3, bins=self.ok.sum()//10)
pl.xlabel('Time constant errors (ms)')
pl.ylabel('N_dets')
pl.savefig(filename+'time_const.png')
pl.clf()
# Positions and stuff
#
for i in [0,1]:
pl.subplot(2,2,1+i)
z = {0: self.x0_err, 1:self.y0_err}[i]
z = z * 180*3600/np.pi # to arcseconds
pl.imshow(bin(z), interpolation='nearest', **limit_args(z))
pl.colorbar()
x_eyes()
imshow_reformat()
pl.title('%s position RMS' % {0: 'X', 1: 'Y'}[i],
fontsize=title_fs)
pl.subplot(2,2,3)
z = self.n_obs
pl.imshow(bin(z), interpolation='nearest')
pl.colorbar()
imshow_reformat()
pl.title('N_obs', fontsize=title_fs)
pl.savefig(filename+'positions.png')
pl.clf()
# Destroy our subplot adjustments
pl.figure()
class FPTemplateFitter:
"""
Class for shift/rotate/shearing a template FPFitFile to match a
target FPFitFile.
After initializing, set the template to use:
fitter = FPTemplateFitter()
fitter.set_template(my_template_fp)
ok, params = fitter.fit(my_target_fp)
Those params are stored internally, so you can get the model FP:
model_for_target = fitter.get_modeled(my_target_fp)
"""
param_names = ['dx', 'dy', 'theta', 'scale', 'shear_theta', 'shear_scale']
formats = {'dx': '%9.6f',
'dy': '%9.6f',
'scale': '%11.4e',
'n_dets': '%4i',
'theta': '%9.6f',
'shear_scale': '%11.4e',
'shear_theta': '%9.6f',
}
@classmethod
def from_params(cls, opts, tod_info=None):
if '_execcfg' in opts:
tod_id = moby2.scripting.products.get_tod_id(tod_info=tod_info)
ic = moby2.scripting.execcfg.InputChooser()
opts1 = ic.get_config(opts['_execcfg'], tod_id=tod_id)
for k,v in list(opts1.items()):
if not k in opts:
opts[k] = v
if 'depot' in opts:
depot = moby2.scripting.get_depot(opts['depot'])
if not 'structure' in opts:
opts['structure'] = '{tag}'
filename = depot.get_full_path(**opts)
else:
filename = opts['filename']
trace(2, 'Loading as template: %s' % filename)
load_args = opts['column_def']
pos_data = moby2.util.StructDB.from_column_file(filename, load_args)
r = opts.get('template_rescale', (1.,1.))
if 'ok' in pos_data.dtype.names:
mask = (pos_data['ok'].astype(int) != 0)
else:
mask = np.ones(pos_data['x'].shape, bool)
template_fits = FPFitFile(det_uid=pos_data['det_uid'][mask])
template_fits.x0[:] = pos_data['x'][mask] * r[0]
template_fits.y0[:] = pos_data['y'][mask] * r[1]
template_fits.ok[:] = True
self = cls()
self.set_template(template_fits)
return self
def set_template(self, template):
self.template = template
self.pivot = self.template.x0[self.template.ok].mean(), \
self.template.y0[self.template.ok].mean()
@staticmethod
def _rotate(theta, x, y):
c, s = np.cos(theta), np.sin(theta)
return x*c - y*s, y*c + x*s
def model(self, params, x=None, y=None):
"""
Shift, rotate, shear the current template according to params
dict. Return the resulting offsets (x, y).
"""
dx, dy, theta, scale, sh_theta, sh_scale = params
scale, sh_scale = np.exp(scale), np.exp(sh_scale)
# Shift away array center and rescale
if x is None:
tp = self.template
x, y = tp.x0, tp.y0
out_x, out_y = scale*(x - self.pivot[0]), scale*(y - self.pivot[1])
# Shear
out_x, out_y = self._rotate(+sh_theta, out_x, out_y)
out_x *= sh_scale
out_x, out_y = self._rotate(-sh_theta, out_x, out_y)
# Rotate
out_x, out_y = self._rotate(theta, out_x, out_y)
# Restore array center and apply additional shift.
return out_x + self.pivot[0] - dx, out_y + self.pivot[1] - dy
def model_inverse(self, params, out_x, out_y):
"""
Inverse of self.model. Keep it up to date!
"""
dx, dy, theta, scale, sh_theta, sh_scale = params
scale, sh_scale = np.exp(scale), np.exp(sh_scale)
# Remove additional shift.
x, y = out_x - self.pivot[0] + dx, out_y - self.pivot[1] + dy
# Unrotate
x, y = self._rotate(-theta, x, y)
# Unshear
x, y = self._rotate(+sh_theta, x, y)
x /= sh_scale
x, y = self._rotate(-sh_theta, x, y)
x, y = x/scale + self.pivot[0], y/scale + self.pivot[1]
return x, y
def fit(self, fp, params, trace_level=0):
"""
Fit positions to a template, which is also an FPFitFile but
may represent different det_uid. 'params' should be a dict
like this one:
params = {
'shift': True,
'rotation': True,
'scale': True,
'shear': True,
}
Returns (ok, params). The fitted_template has the same
det_uid as self.
"""
template = self.template
# Get mask of items that are ok in both the template and fits
fp_ok = fp.ok.astype('bool').copy()
_, temp_ok = template.get_property('ok', fp.det_uid)
fp_ok *= temp_ok
# Get the template and fits positions for those ok items
_, x0 = template.get_property('x0', fp.det_uid[fp_ok])
_, y0 = template.get_property('y0', fp.det_uid[fp_ok])
x1, y1 = fp.x0[fp_ok], fp.y0[fp_ok]
self.A = x0,y0
self.B = x1,y1
# Identify parameters we want to vary
free_params = [params.get('shift', True)]*2
free_params.append(params.get('rotation', True))
free_params.append(params.get('scale', False))
free_params.extend([params.get('shear', False)]*2)
if fp.ok.sum() == 0:
trace(trace_level+0, 'No items for template fit')
self.result = False, [0. for f in free_params]
return self.result
trace(trace_level+0, 'Fitting template using %i items' % fp_ok.sum())
# Start fit with shift based on mean displacement
params0 = [x1.mean()-self.pivot[0], y1.mean()-self.pivot[1],
0., 0., 0., 0.]
trace(trace_level+1, 'Starting parameters: %s' % str(params0))
trace(trace_level+1, 'Free parameters: %s' % str(free_params))
def fit_chi2(params):
x_model, y_model = self.model(params, x0, y0)
var = (x1 - x_model)**2 + (y1 - y_model)**2
#return var.sum()
# Attenuate contribution of outliers? Not clear this works...
mvar = np.median(var)
var_roll = var * (10*mvar / (10*mvar + var))
return var_roll.sum()
# Minimize... start with position or all is lost.
params1 = params0
for iters in [0,1]:
for free_mask in [
# Fit position only...
[True , True , False, False, False, False],
# Fit rotation and scale
[False, False, True , True , False, False],
# Fit skew
[False, False, False, False, True , True ],
# Fit skew and position
[True , True , False, False, True , True ],
# Let everything float
[True , True , True , True , True , True ]]:
free = np.array(free_params) * free_mask
if free.sum() > 0:
params1 = moby2.util.fitting.multi_fmin(
fit_chi2, params1, free=free, disp=0,
xtol=1e-6, ftol=1e-6)
trace(trace_level+2, 'params snapshot: %s' % str(params1))
trace(trace_level+1, 'Final parameters: %s' % str(params1))
self.result = True, params1
return self.result
def check_result(self, opts):
"""
Check self.result against ranges passed in by user. User
passes in a dict with keys like "<name>_range", where <name>
is one of self.param_names. The values are the range (lo, hi) of
acceptable values. If any range checks fail, the function
returns false.
"""
ok, params = self.result
if not ok:
return False
for k, v in zip(self.param_names, params):
k = '%s_range' % k
if not k in opts: continue
if not ((opts[k][0] <= v) and (v < opts[k][1])):
return False
return True
def get_modeled(self, det_uid=None):
"""
Return a FPFitFile with the modeled detector positions. Pass
in the desired det_uid, or the template det_uid will be
used.
"""
if det_uid is None:
det_uid = self.det_uid
matched = FPFitFile(det_uid=det_uid)
_, ok = self.template.get_property('ok', matched.det_uid)
_, x0 = self.template.get_property('x0', matched.det_uid)
_, y0 = self.template.get_property('y0', matched.det_uid)
matched.ok = ok
params = self.result[1]
matched.x0, matched.y0 = self.model(params, x0, y0)
return matched
def make_plots(self, fp, modeled, plot_prefix='./',
title=None):
"""
Show fit quality in a few plots.
"""
import pylab as pl
def sane_axes():
fig.gca().xaxis.set_major_locator(pl.MaxNLocator(4))
fig.gca().yaxis.set_major_locator(pl.MaxNLocator(5))
fig.gca().set_aspect('equal', 'datalim')
DEG = 180./np.pi
fig = pl.figure()
fig.set_size_inches(8., 4.)
pl.subplots_adjust(left=.1, right=.98, top=.85, bottom=.1,
hspace=.2, wspace=.3)
pl.subplot(121)
tp = self.template
s, x, y = tp.ok, tp.x0, tp.y0
pl.scatter(x[s], y[s], marker='o', s=4, alpha=.5)
pl.xlabel('X')
pl.ylabel('Y')
pl.title('Input template')
sane_axes()
# The model positions
pl.subplot(122)
s, x, y = modeled.ok, modeled.x0 * DEG, modeled.y0 * DEG
pl.scatter(x[s], y[s], alpha=.2)
# And the fit positions
s, x, y = fp.ok, fp.x0 * DEG, fp.y0 * DEG
pl.scatter(x[s], y[s], marker='x')
# Now connect them with lines...
u = fp.det_uid[s]
ok1, (x1, y1) = modeled.get_property(['x0','y0'], det_uid=u)
x, y = x[s], y[s]
for i in ok1.nonzero()[0]:
pl.plot([x1[i]*DEG, x[i]], [y1[i]*DEG, y[i]], color='k', alpha=.4)
pl.xlabel('X (deg)')
pl.ylabel('Y (deg)')
pl.title('Fitted result')
sane_axes()
if title != None:
pl.figtext(0.5, 0.93, title, va='bottom', ha='center')
pl.savefig(plot_prefix + 'fit.png')
pl.figure() # destroy our settings...
def old_make_plots(self, fp, modeled, plot_prefix='./',
title=None):
"""
Show fit quality in a few plots.
"""
import pylab as pl
DEG = 180./np.pi
pl.figure()
pl.gcf().set_size_inches(6., 6.)
pl.subplots_adjust(left=.15, right=.95, top=.90, bottom=.1,
hspace=.2, wspace=.3)
tp = self.template
s, x, y = tp.ok, tp.x0, tp.y0
pl.scatter(x[s], y[s], marker='x')
pl.savefig(plot_prefix + '0template.png')
pl.clf()
s, x, y = modeled.ok, modeled.x0 * DEG, modeled.y0 * DEG
pl.scatter(x[s], y[s], alpha=.2)
pl.xlabel('X (deg)')
pl.ylabel('Y (deg)')
pl.savefig(plot_prefix + '1model.png')
pl.clf()
# The model positions
s, x, y = modeled.ok, modeled.x0 * DEG, modeled.y0 * DEG
pl.scatter(x[s], y[s], alpha=.2)
# And the fit positions
s, x, y = fp.ok, fp.x0 * DEG, fp.y0 * DEG
pl.scatter(x[s], y[s], marker='x')
# Now connect them with lines...
u = fp.det_uid[s]
ok1, (x1, y1) = modeled.get_property(['x0','y0'], det_uid=u)
x, y = x[s], y[s]
for i in ok1.nonzero()[0]:
pl.plot([x1[i]*DEG, x[i]], [y1[i]*DEG, y[i]], color='k', alpha=.4)
pl.xlabel('X (deg)')
pl.ylabel('Y (deg)')
if title is not None:
pl.title(title)
pl.savefig(plot_prefix + '2fit.png')
pl.figure() # destroy our settings...
# Formatted output...
def get_ascii(self, names=None, params=None):
if names is None:
names = self.param_names
if params is None:
params = self.result[1]
idx = [self.param_names.index(f) for f in names]
text = [ self.formats.get(n, '%11.4e') % params[i]
for n,i in zip(names,idx) ]
return ' '.join(text)
@staticmethod
def write_fit_list(filename, keys, fits, format=None):
if format == 'fits':
columns = list(zip(*[f.result[1] for f in fits]))
col_defs = ([('id', keys), ('ok', [int(f.result[0]) for f in fits])] +
list(zip(fits[0].param_names, columns)))
db_out = moby2.util.StructDB.from_data(
col_defs, formats=fits[0].formats)
db_out.to_fits_table(filename)
else:
if isinstance(filename, basestring):
filename = open(filename, 'w')
names = fits[0].param_names
filename.write('# %s\n' % ' '.join(names))
for key, fit in zip(keys, fits):
text = fit.get_ascii(names=names)
filename.write('%s %s\n' % (key, text))
| 36.283198 | 137 | 0.51182 | 25,991 | 0.970646 | 0 | 0 | 7,491 | 0.279755 | 0 | 0 | 5,594 | 0.208911 |
25b1a19eb6d8e239df7d680bb083dcaf01ffaddb | 2,864 | py | Python | apps/exporter/models.py | mjj55409/cpq-exporter | ae46c1580a1c7d228a352a88a61164d9b3c2490c | [
"MIT"
] | null | null | null | apps/exporter/models.py | mjj55409/cpq-exporter | ae46c1580a1c7d228a352a88a61164d9b3c2490c | [
"MIT"
] | null | null | null | apps/exporter/models.py | mjj55409/cpq-exporter | ae46c1580a1c7d228a352a88a61164d9b3c2490c | [
"MIT"
] | null | null | null | from django.conf import settings
from django.db import models
# from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
class KB (models.Model):
name = models.CharField(max_length=30, unique=True)
repository_url = models.CharField(max_length=100, blank=False)
def __str__(self):
return self.name
class Destination (models.Model):
TYPE_DB = 0
TYPE_ECC = 1
TYPE_CRM = 2
TYPE_CHOICES = (
(TYPE_DB, _('Database')),
(TYPE_ECC, _('ECC System')),
(TYPE_CRM, _('CRM System'))
)
name = models.CharField(max_length=30)
destination_type = models.SmallIntegerField(choices=TYPE_CHOICES, default=0)
client = models.CharField(max_length=3, default='000', blank=False)
def __str__(self):
return self.name
class DatabaseDestination (models.Model):
TYPE_MSSQL = 0
TYPE_MYSQL = 1
TYPE_JDBC = 2
TYPE_CHOICES = (
(TYPE_MSSQL, _('Microsoft SQL')),
(TYPE_MYSQL, _('MYSQL')),
(TYPE_JDBC, _('Java Connector'))
)
destination = models.OneToOneField(Destination, on_delete=models.CASCADE, primary_key=True)
database_type = models.SmallIntegerField(choices=TYPE_CHOICES, default=0)
host = models.CharField(max_length=100, blank=True)
port = models.CharField(max_length=7, blank=True)
database_name = models.CharField(max_length=100, blank=False)
def __str__(self):
return self.database_name + '@' + self.host + ':' + self.port
class SAPDestination (models.Model):
destination = models.OneToOneField(Destination, on_delete=models.CASCADE, primary_key=True)
host = models.CharField(max_length=100, blank=False)
sid = models.CharField(max_length=4, blank=False)
class Project (models.Model):
name = models.CharField(max_length=40, unique=True)
description = models.TextField(blank=True)
def __str__(self):
return self.name
class ProjectStep (models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='steps')
step_number = models.PositiveSmallIntegerField(null=False, default=1)
name = models.CharField(max_length=40, blank=True)
kb = models.ForeignKey(KB)
def __str__(self):
return self.project.name + '.' + self.kb.name
class Execution (models.Model):
project = models.ForeignKey(Project)
time_start = models.DateTimeField(null=True, blank=True)
time_end = models.DateTimeField(null=True, blank=True)
duration = models.DurationField(blank=True, null=True)
export_status = models.BooleanField(blank=True)
class ExecutionStep (models.Model):
execution = models.ForeignKey(Execution)
step = models.ForeignKey(ProjectStep)
time_start = models.DateTimeField()
time_end = models.DateTimeField()
status = models.BooleanField()
| 30.795699 | 95 | 0.706355 | 2,669 | 0.931913 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.050628 |
25b1ffdcb5ac20b95e9d2af8defd2ffc485591b9 | 43 | py | Python | export_as_bookmark/migrations/__init__.py | 10sr/webtools | 8dd7fecf5d3df9094f32d044f11e983ab3095287 | [
"Apache-2.0"
] | null | null | null | export_as_bookmark/migrations/__init__.py | 10sr/webtools | 8dd7fecf5d3df9094f32d044f11e983ab3095287 | [
"Apache-2.0"
] | 111 | 2019-05-15T05:20:49.000Z | 2021-10-16T14:43:34.000Z | export_as_bookmark/migrations/__init__.py | 10sr/webtools | 8dd7fecf5d3df9094f32d044f11e983ab3095287 | [
"Apache-2.0"
] | null | null | null | """export_as_bookmark migration script."""
| 21.5 | 42 | 0.767442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.976744 |
25b338ff4a0aea87eb7ecc2cc0784ffa15f6582b | 1,915 | py | Python | management/utils.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | 2 | 2020-09-13T17:51:55.000Z | 2020-11-25T18:47:12.000Z | management/utils.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | 2 | 2020-11-25T19:18:15.000Z | 2021-06-01T21:48:12.000Z | management/utils.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from plumbum import cli
class M(cli.Application):
subcommands = {}
@classmethod
def print_commands(cls, root=None, indent=0):
if root is None:
root = cls.subcommands
for name, (app, sub_cmds) in root.items():
print(" "*indent, "Name:", name, "App:", app._NAME)
cls.print_commands(root=sub_cmds, indent=indent+2)
@classmethod
def command(cls, name=None):
postfix = name
def decorator(method):
if postfix is None:
name = method.__name__
else:
name = postfix
mod = method.__module__
if mod.startswith('management'):
mod = mod[len('management'):]
mod = mod.lstrip('.')
if mod == '__main__':
full_name = name
else:
full_name = mod+'.'+name
#print("Registering command", full_name)
app = cls
subcmds = cls.subcommands
for sub in full_name.split('.')[:-1]:
if sub not in subcmds:
#print(" Defining subcommand", sub)
sub_app = type(sub+'App', (cli.Application,),{})
sub_app = app.subcommand(sub)(sub_app)
subcmds[sub] = (sub_app, {})
else:
#print(" Subcommand defined", sub)
pass
app, subcmds = subcmds[sub]
#print("* Defining subcommand", name)
def main(self, *args):
method(*args)
newclass = type(name+'App', (cli.Application,),{"main": main})
newclass = app.subcommand(name)(newclass)
return method
return decorator
| 32.457627 | 74 | 0.461097 | 1,865 | 0.97389 | 0 | 0 | 1,791 | 0.935248 | 0 | 0 | 245 | 0.127937 |
25b3746fa875224a25a61f388c3fd0485d927148 | 4,523 | py | Python | ml/gan_test.py | Ryoich/python_zero | fe4a5fd8b11c8c059d82b797cd1668f96d54e541 | [
"CC-BY-4.0"
] | 203 | 2018-12-14T10:16:33.000Z | 2022-03-10T07:23:34.000Z | ml/gan_test.py | Ryoich/python_zero | fe4a5fd8b11c8c059d82b797cd1668f96d54e541 | [
"CC-BY-4.0"
] | 39 | 2019-06-21T12:28:03.000Z | 2022-01-17T10:41:53.000Z | ml/gan_test.py | Ryoich/python_zero | fe4a5fd8b11c8c059d82b797cd1668f96d54e541 | [
"CC-BY-4.0"
] | 29 | 2018-12-30T06:48:59.000Z | 2022-03-10T07:43:42.000Z | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
def generator_fn(noise, weight_decay=2.5e-5, is_training=True):
layers = tf.contrib.layers
framework = tf.contrib.framework
f1 = framework.arg_scope(
[layers.fully_connected, layers.conv2d_transpose],
activation_fn=tf.nn.relu,
normalizer_fn=layers.batch_norm,
weights_regularizer=layers.l2_regularizer(weight_decay))
f2 = framework.arg_scope(
[layers.batch_norm],
is_training=is_training,
zero_debias_moving_mean=True)
with f1, f2:
net = layers.fully_connected(noise, 1024)
net = layers.fully_connected(net, 7 * 7 * 256)
net = tf.reshape(net, [-1, 7, 7, 256])
net = layers.conv2d_transpose(net, 64, [4, 4], stride=2)
net = layers.conv2d_transpose(net, 32, [4, 4], stride=2)
net = layers.conv2d(net, 1, 4, activation_fn=tf.tanh)
return net
def discriminator_fn(img, _, weight_decay=2.5e-5, is_training=True):
layers = tf.contrib.layers
framework = tf.contrib.framework
with framework.arg_scope(
[layers.conv2d, layers.fully_connected],
activation_fn=(lambda n: tf.nn.leaky_relu(n, alpha=0.01)),
weights_regularizer=layers.l2_regularizer(weight_decay),
biases_regularizer=layers.l2_regularizer(weight_decay)):
net = layers.conv2d(img, 64, [4, 4], stride=2)
net = layers.conv2d(net, 128, [4, 4], stride=2)
net = layers.flatten(net)
with framework.arg_scope([layers.batch_norm], is_training=is_training):
net = layers.fully_connected(
net, 1024, normalizer_fn=layers.batch_norm)
return layers.linear(net, 1)
def provide_data(source, batch_size):
slim = tf.contrib.slim
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'),
}
datanum = sum(1 for _ in tf.python_io.tf_record_iterator(source))
items_to_handlers = {
'image': slim.tfexample_decoder.Image(shape=[28, 28, 1], channels=1),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
dataprovider = slim.dataset_data_provider.DatasetDataProvider
reader = tf.TFRecordReader
dataset = slim.dataset.Dataset(source, reader, decoder, datanum, None)
provider = dataprovider(dataset, shuffle=True)
image, = provider.get(['image'])
image = (tf.cast(image, tf.float32) - 128.0) / 128.0
images = tf.train.batch([image], batch_size=batch_size)
return images
def run_gan(TRAIN_DATA, TOTAL_STEPS=400):
BATCH_SIZE = 32
TOTAL_STEPS += 1
tfgan = tf.contrib.gan
tf.reset_default_graph()
with tf.device('/cpu:0'):
real_images = provide_data(TRAIN_DATA, BATCH_SIZE)
gan_model = tfgan.gan_model(
generator_fn,
discriminator_fn,
real_data=real_images,
generator_inputs=tf.random_normal([BATCH_SIZE, 64]))
improved_wgan_loss = tfgan.gan_loss(
gan_model,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
gradient_penalty_weight=1.0)
generator_optimizer = tf.train.AdamOptimizer(0.001, beta1=0.5)
discriminator_optimizer = tf.train.AdamOptimizer(0.0001, beta1=0.5)
gan_train_ops = tfgan.gan_train_ops(
gan_model,
improved_wgan_loss,
generator_optimizer,
discriminator_optimizer)
with tf.variable_scope('Generator', reuse=True):
eval_images = gan_model.generator_fn(
tf.random_normal([500, 64]),
is_training=False)
visualizer = tfgan.eval.image_reshaper(eval_images[:20, ...], num_cols=10)
train_step_fn = tfgan.get_sequential_train_steps()
global_step = tf.train.get_or_create_global_step()
INTERVAL = 25
with tf.train.SingularMonitoredSession() as sess:
for i in range(TOTAL_STEPS):
train_step_fn(sess, gan_train_ops, global_step,
train_step_kwargs={})
if i % INTERVAL == 0:
digits_np = sess.run([visualizer])
plt.axis('off')
plt.imshow(np.squeeze(digits_np), cmap='gray')
plt.show()
#filename = "mnist.tfrecord"
#filename = "hiragana.tfrecord"
# run_gan(filename)
| 37.07377 | 79 | 0.666372 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.034933 |
25b3918a371821cdc9c995f47c968bb1c9ab06ab | 6,312 | py | Python | src/forms.py | Afsharov/observer-frontend | bd93fd1d7fa1a63ca650640995e1f10b0c99df44 | [
"BSD-3-Clause"
] | null | null | null | src/forms.py | Afsharov/observer-frontend | bd93fd1d7fa1a63ca650640995e1f10b0c99df44 | [
"BSD-3-Clause"
] | null | null | null | src/forms.py | Afsharov/observer-frontend | bd93fd1d7fa1a63ca650640995e1f10b0c99df44 | [
"BSD-3-Clause"
] | 1 | 2021-04-23T08:25:55.000Z | 2021-04-23T08:25:55.000Z | """This module contains all forms used by the Observer-Hive frontend.
"""
import os
import json
import logging
from bcrypt import checkpw
from flask_wtf import FlaskForm
from flask_login import current_user
from wtforms import StringField, PasswordField
from wtforms.validators import InputRequired, EqualTo, Length
logger = logging.getLogger('src')
def get_users():
"""Retrieve all users and their passwords.
:return: dictionary with all users and passwords
"""
cwd = os.path.dirname(os.path.abspath(__file__))
with open(cwd + '/users.json') as registered_users:
users = json.load(registered_users)
return users
class LoginForm(FlaskForm):
"""This class defines the login form.
The form provides two entry fields for the user's
credentials: username and password.
"""
username = StringField('username',
validators=[InputRequired(
message="Please enter a Username.")])
password = PasswordField('password',
validators=[InputRequired(
message="Please enter your Password.")])
def __init__(self, *args, **kwargs):
FlaskForm.__init__(self, *args, **kwargs)
def validate(self):
"""Custom validator for the login form.
Checks if username is known to the app and compares the
entered password to the stored one.
:return: True if all checks have been passed
"""
rv = FlaskForm.validate(self)
if not rv:
return False
users = get_users()
username = self.username.data
if username not in users:
self.username.errors.append('Unknown username')
logger.info(username + ' unknown.')
return False
if not checkpw(self.password.data.encode('utf-8'),
users[username].encode('utf-8')):
self.password.errors.append('Invalid password')
logger.info('Denied access to '
+ username
+ ' due to wrong password.')
return False
return True
class ChangeCredentialsForm(FlaskForm):
"""This class defines the form to change an existing users password.
The form provides one entry fields for the current password and two
entry fields for new password, the second one being used for verification.
"""
username = StringField('username',
validators=[InputRequired(
message="Please enter a Username.")])
currentPassword = PasswordField('currentPassword',
validators=[
InputRequired(
message="Please enter your current Password.")])
newPassword1 = PasswordField('newPassword1',
validators=[
InputRequired(
message="Please enter your new Password."),
Length(min=4,
message="Your password must contain at least 4 characters.")])
newPassword2 = PasswordField('newPassword2',
validators=[
InputRequired(message=
"Please enter your new Password again."),
EqualTo('newPassword1',
message=
'Passwords must match')])
def __init__(self, *args, **kwargs):
FlaskForm.__init__(self, *args, **kwargs)
def validate(self):
"""Custom validator to change credentials.
Checks if user provided the correct password currently in use and
changes it if user has entered a new password which has been
verified by entering it a second time.
:return: True if all checks have been passed.
"""
rv = FlaskForm.validate(self)
if not rv:
return False
users = get_users()
if not checkpw(self.currentPassword.data.encode('utf-8'),
users[current_user.id].encode('utf-8')):
self.currentPassword.errors.append('Invalid password')
logger.info('Attempt to change password of '
+ current_user.id
+ ' failed due to wrong current password.')
return False
return True
class RegisterForm(FlaskForm):
"""This class defines part the registration form.
The form provides entry fields for the chosen username and
two entry fields for a password, the second one being used for verification.
"""
username = StringField('username',
validators=[InputRequired(
message="Please enter a Username.")])
password1 = PasswordField('password1',
validators=[
InputRequired(
message="Please enter your new Password."),
Length(min=4,
message="Your password must contain at least 4 characters.")])
password2 = PasswordField('password2',
validators=[
InputRequired(message=
"Please enter your new Password again."),
EqualTo('password1',
message=
'Passwords must match')])
def __init__(self, *args, **kwargs):
FlaskForm.__init__(self, *args, **kwargs)
def validate(self):
"""Custom validator for new user registrations.
Checks if password is at least 4 characters long and verifies the
correct entry by comparing it to the second input of password.
:return: True if all checks have been passed.
"""
rv = FlaskForm.validate(self)
if not rv:
return False
return True
| 35.460674 | 106 | 0.53628 | 5,652 | 0.895437 | 0 | 0 | 0 | 0 | 0 | 0 | 2,319 | 0.367395 |
25b3c69e50bea2e1fc0a8c23aa497075c8c18426 | 7,151 | py | Python | lido/validate_keys.py | lidofinance/lido-python | 6cf74b17c47e54cfead891d445c6071c25d0478d | [
"MIT"
] | 3 | 2021-02-04T11:35:48.000Z | 2021-09-12T22:19:13.000Z | lido/validate_keys.py | lidofinance/lido-python | 6cf74b17c47e54cfead891d445c6071c25d0478d | [
"MIT"
] | 2 | 2021-08-30T08:35:04.000Z | 2021-09-02T17:17:08.000Z | lido/validate_keys.py | lidofinance/lido-python | 6cf74b17c47e54cfead891d445c6071c25d0478d | [
"MIT"
] | 3 | 2021-05-24T19:08:06.000Z | 2021-09-12T22:19:20.000Z | import typing as t
from py_ecc.bls import G2ProofOfPossession as bls
from lido.eth2deposit.utils.ssz import (
DepositMessage,
compute_deposit_domain,
compute_signing_root,
)
from lido.eth2deposit.settings import get_chain_setting
from lido.constants.chains import get_chain_name, get_eth2_chain_name
from lido.constants.withdrawal_credentials import get_withdrawal_credentials
from lido.contracts.w3_contracts import get_contract
import concurrent
def gen_possible_withdrawal_credentials(live_withdrawal_credentials, chain_id):
return list(
set([live_withdrawal_credentials] + get_withdrawal_credentials(get_chain_name(chain_id)))
)
def validate_key(data: t.Dict) -> t.Optional[bool]:
"""Run signature validation on a key"""
key = data["key"]
chain_id = data["chain_id"]
live_withdrawal_credentials = data["live_withdrawal_credentials"]
possible_withdrawal_credentials = data["possible_withdrawal_credentials"]
strict = data["strict"]
# Is this key already validated?
if "valid_signature" in key.keys():
return None
pubkey = bytes.fromhex(key["key"]) if type(key["key"]) is str else key["key"]
signature = (
bytes.fromhex(key["depositSignature"])
if type(key["depositSignature"]) is str
else key["depositSignature"]
)
fork_version = get_chain_setting(get_eth2_chain_name(chain_id)).GENESIS_FORK_VERSION
domain = compute_deposit_domain(fork_version=fork_version)
# Minimum staking requirement of 32 ETH per validator
REQUIRED_DEPOSIT_ETH = 32
ETH2GWEI = 10 ** 9
amount = REQUIRED_DEPOSIT_ETH * ETH2GWEI
# If strict, not using any previous withdrawal credentials
# Checking only actual live withdrawal credentials for unused keys
if strict or ("used" in key and key["used"] is False):
deposit_message = DepositMessage(
pubkey=pubkey,
withdrawal_credentials=live_withdrawal_credentials,
amount=amount,
)
signing_root = compute_signing_root(deposit_message, domain)
return bls.Verify(pubkey, signing_root, signature)
# If a key has been used already or in loose mode, checking both new and any olds withdrawal creds
for wc in possible_withdrawal_credentials:
deposit_message = DepositMessage(
pubkey=pubkey,
withdrawal_credentials=wc,
amount=amount,
)
signing_root = compute_signing_root(deposit_message, domain)
verified = bls.Verify(pubkey, signing_root, signature)
# Early exit when any key succeeds validation
if verified is True:
return True
# Exit with False if none of the withdrawal creds combination were valid
return False
def validate_keys_mono(
w3, operators: t.List[t.Dict], lido_address: str, lido_abi_path: str, strict: bool
) -> t.List[t.Dict]:
"""
This is an additional, single-process key validation function.
Modifies the input! Adds "valid_signature" field to every key item.
"""
# Prepare network vars
lido = get_contract(w3, address=lido_address, path=lido_abi_path)
chain_id = w3.eth.chainId
live_withdrawal_credentials = lido.functions.getWithdrawalCredentials().call()
possible_withdrawal_credentials = gen_possible_withdrawal_credentials(
live_withdrawal_credentials, chain_id
)
for op_i, op in enumerate(operators):
for key_i, key in enumerate(op["keys"]):
# Is this key already validated?
if "valid_signature" not in key.keys():
operators[op_i]["keys"][key_i]["valid_signature"] = validate_key(
{
"chain_id": chain_id,
"key": key,
"live_withdrawal_credentials": live_withdrawal_credentials,
"possible_withdrawal_credentials": possible_withdrawal_credentials,
"strict": strict,
}
)
return operators
def validate_keys_multi(
w3, operators: t.List[t.Dict], lido_address: str, lido_abi_path: str, strict: bool
) -> t.List[t.Dict]:
"""
Main multi-process validation function.
Modifies the input! Adds "valid_signature" field to every key item.
It will spawn an appropriate process pool for the amount of threads on processor.
"""
# Prepare network vars
lido = get_contract(w3, address=lido_address, path=lido_abi_path)
chain_id = w3.eth.chainId
live_withdrawal_credentials = lido.functions.getWithdrawalCredentials().call()
possible_withdrawal_credentials = gen_possible_withdrawal_credentials(
live_withdrawal_credentials, chain_id
)
with concurrent.futures.ProcessPoolExecutor() as executor:
for op_i, op in enumerate(operators):
# Pass {} to overcome 1-arg limit of concurrency.map()
arguments = [
{
"chain_id": chain_id,
"key": key,
"live_withdrawal_credentials": live_withdrawal_credentials,
"possible_withdrawal_credentials": possible_withdrawal_credentials,
"strict": strict,
}
for key in op["keys"]
]
validate_key_results = executor.map(validate_key, arguments)
for key_index, validate_key_result in enumerate(validate_key_results):
# Is this key already validated?
if validate_key_result is not None:
operators[op_i]["keys"][key_index]["valid_signature"] = validate_key_result
return operators
def validate_key_list_multi(
w3, input: t.List[t.Dict], lido_address: str, lido_abi_path: str, strict: bool
) -> t.List[t.Dict]:
"""
Additional multi-process validation function.
It returns invalid keys instead of the whole operator data like other functions.
"""
# Prepare network
lido = get_contract(w3, address=lido_address, path=lido_abi_path)
chain_id = w3.eth.chainId
live_withdrawal_credentials = lido.functions.getWithdrawalCredentials().call()
possible_withdrawal_credentials = gen_possible_withdrawal_credentials(
live_withdrawal_credentials, chain_id
)
invalid = []
with concurrent.futures.ProcessPoolExecutor() as executor:
# Pass {} to overcome 1-arg limit of concurrency.map()
arguments = [
{
"chain_id": chain_id,
"key": key,
"live_withdrawal_credentials": live_withdrawal_credentials,
"possible_withdrawal_credentials": possible_withdrawal_credentials,
"strict": strict,
}
for key in input
]
validate_key_results = executor.map(validate_key, arguments)
for key_index, validate_key_result in enumerate(validate_key_results):
# Is this key already validated?
if validate_key_result is not None and validate_key_result is False:
invalid.append(input[key_index])
return invalid
| 35.577114 | 102 | 0.669277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,750 | 0.244721 |
25b4d5eeead3f970706a1374c6a7f40b94ccf5d5 | 1,167 | py | Python | app/editor/animation_editor/animation_tab.py | zerorock1312/lt-maker-master | 82f733683f9dba763a5de8567c41fd7cbcfb0173 | [
"MIT"
] | null | null | null | app/editor/animation_editor/animation_tab.py | zerorock1312/lt-maker-master | 82f733683f9dba763a5de8567c41fd7cbcfb0173 | [
"MIT"
] | null | null | null | app/editor/animation_editor/animation_tab.py | zerorock1312/lt-maker-master | 82f733683f9dba763a5de8567c41fd7cbcfb0173 | [
"MIT"
] | null | null | null | from app.resources.resources import RESOURCES
from app.extensions.custom_gui import ResourceListView
from app.editor.data_editor import SingleResourceEditor
from app.editor.base_database_gui import DatabaseTab
from app.editor.animation_editor import animation_model, animation_properties
class AnimationDatabase(DatabaseTab):
@classmethod
def create(cls, parent=None):
data = RESOURCES.animations
title = "Map Animation"
right_frame = animation_properties.AnimationProperties
collection_model = animation_model.AnimationModel
deletion_criteria = None
dialog = cls(data, title, right_frame, deletion_criteria,
collection_model, parent, button_text="Add New %s...",
view_type=ResourceListView)
return dialog
# Testing
# Run "python -m app.editor.animation_editor.animation_tab" from main directory
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
RESOURCES.load('default.ltproj')
window = SingleResourceEditor(AnimationDatabase, ['animations'])
window.show()
app.exec_()
| 35.363636 | 79 | 0.732648 | 524 | 0.449015 | 0 | 0 | 482 | 0.413025 | 0 | 0 | 156 | 0.133676 |
25b55d88dcab9d2dffb59f59c615572e4c007d1c | 98 | py | Python | app/views/dashboard.py | aviago/aviago | 6812f27a6fe1472752b274c9497487eed8d63abd | [
"Apache-2.0"
] | null | null | null | app/views/dashboard.py | aviago/aviago | 6812f27a6fe1472752b274c9497487eed8d63abd | [
"Apache-2.0"
] | null | null | null | app/views/dashboard.py | aviago/aviago | 6812f27a6fe1472752b274c9497487eed8d63abd | [
"Apache-2.0"
] | null | null | null | from .base import BaseUserView
class Dashboard(BaseUserView):
def index(self):
pass
| 14 | 30 | 0.693878 | 64 | 0.653061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
25bd9069c5a294404fe7ae509383c7f176278cbc | 12,070 | py | Python | ikabot/function/sellResources.py | adaamz/ikabot | d243e612ba083a39f6efce15012d173aad693dc6 | [
"MIT"
] | null | null | null | ikabot/function/sellResources.py | adaamz/ikabot | d243e612ba083a39f6efce15012d173aad693dc6 | [
"MIT"
] | null | null | null | ikabot/function/sellResources.py | adaamz/ikabot | d243e612ba083a39f6efce15012d173aad693dc6 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import math
import json
import gettext
import traceback
from decimal import *
from ikabot.config import *
from ikabot.helpers.gui import *
from ikabot.helpers.market import *
from ikabot.helpers.botComm import *
from ikabot.helpers.varios import addDot, wait
from ikabot.helpers.pedirInfo import read
from ikabot.helpers.signals import setInfoSignal
from ikabot.helpers.process import set_child_mode
from ikabot.helpers.planRoutes import waitForArrival
t = gettext.translation('sellResources',
localedir,
languages=languages,
fallback=True)
_ = t.gettext
def chooseCommercialCity(commercial_cities):
"""
Parameters
----------
commercial_cities : list[dict]
Returns
-------
commercial_city : dict
"""
print(_('In which city do you want to sell resources?\n'))
for i, city in enumerate(commercial_cities):
print('({:d}) {}'.format(i + 1, city['name']))
ind = read(min=1, max=len(commercial_cities))
return commercial_cities[ind - 1]
def getMarketInfo(session, city):
"""
Parameters
----------
session : ikabot.web.session.Session
city : dict
Returns
-------
response : dict
"""
params = {'view': 'branchOfficeOwnOffers', 'activeTab': 'tab_branchOfficeOwnOffers', 'cityId': city['id'], 'position': city['pos'], 'backgroundView': 'city', 'currentCityId': city['id'], 'templateView': 'branchOfficeOwnOffers', 'currentTab': 'tab_branchOfficeOwnOffers', 'actionRequest': actionRequest, 'ajax': '1'}
resp = session.post(params=params, noIndex=True)
return json.loads(resp, strict=False)[1][1][1]
def getOffers(session, my_market_city, resource_type):
"""
Parameters
----------
session : ikabot.web.session.Session
my_market_city : dict
resource_type : int
Returns
-------
offers : list
"""
if resource_type == 0:
resource_type = 'resource'
else:
resource_type = str(resource_type)
data = {'cityId': my_market_city['id'], 'position': my_market_city['pos'], 'view': 'branchOffice', 'activeTab': 'bargain', 'type': '333', 'searchResource': resource_type, 'range': my_market_city['rango'], 'backgroundView': 'city', 'currentCityId': my_market_city['id'], 'templateView': 'branchOffice', 'currentTab': 'bargain', 'actionRequest': actionRequest, 'ajax': '1'}
resp = session.post(payloadPost=data)
html = json.loads(resp, strict=False)[1][1][1]
return re.findall(r'<td class=".*?">(.*?)<br/>\((.*?)\)\s*</td>\s*<td>(.*?)</td>\s*<td><img src=".*?"\s*alt=".*?"\s*title=".*?"/></td>\s*<td style="white-space:nowrap;">(\d+)\s*<img src=".*?"\s*class=".*?"/>.*?</td>\s*<td>(\d+)</td>\s*<td><a onclick="ajaxHandlerCall\(this\.href\);return false;"\s*href="\?view=takeOffer&destinationCityId=(\d+)&', html)
def sellToOffers(session, city_to_buy_from, resource_type, event):
"""
Parameters
----------
session : ikabot.web.session.Session
city_to_buy_from : dict
resource_type : int
event : multiprocessing.Event
"""
banner()
offers = getOffers(session, city_to_buy_from, resource_type)
if len(offers) == 0:
print(_('No offers available.'))
enter()
event.set()
return
print(_('Which offers do you want to sell to?\n'))
chosen_offers = []
total_amount = 0
profit = 0
for offer in offers:
cityname, username, amount, price, dist, destination_city_id = offer
cityname = cityname.strip()
amount = amount.replace(',', '').replace('.', '')
amount = int(amount)
price = int(price)
msg = _('{} ({}): {} at {:d} each ({} in total) [Y/n]').format(cityname, username, addDot(amount), price, addDot(price*amount))
rta = read(msg=msg, values=['y', 'Y', 'n', 'N', ''])
if rta.lower() == 'n':
continue
chosen_offers.append(offer)
total_amount += amount
profit += amount * price
if len(chosen_offers) == 0:
event.set()
return
available = city_to_buy_from['recursos'][resource_type]
amount_to_sell = min(available, total_amount)
banner()
print(_('\nHow much do you want to sell? [max = {}]').format(addDot(amount_to_sell)))
amount_to_sell = read(min=0, max=amount_to_sell)
if amount_to_sell == 0:
event.set()
return
left_to_sell = amount_to_sell
profit = 0
for offer in chosen_offers:
cityname, username, amount, price, dist, destination_city_id = offer
cityname = cityname.strip()
amount = amount.replace(',', '').replace('.', '')
amount = int(amount)
price = int(price)
sell = min(amount, left_to_sell)
left_to_sell -= sell
profit += sell * price
print(_('\nSell {} of {} for a total of {}? [Y/n]').format(addDot(amount_to_sell), materials_names[resource_type], addDot(profit)))
rta = read(values=['y', 'Y', 'n', 'N', ''])
if rta.lower() == 'n':
event.set()
return
set_child_mode(session)
event.set()
info = _('\nI sell {} of {} in {}\n').format(addDot(amount_to_sell), materials_names[resource_type], city_to_buy_from['name'])
setInfoSignal(session, info)
try:
do_it1(session, amount_to_sell, chosen_offers, resource_type, city_to_buy_from)
except:
msg = _('Error in:\n{}\nCause:\n{}').format(info, traceback.format_exc())
sendToBot(session, msg)
finally:
session.logout()
def createOffer(session, my_offering_market_city, resource_type, event):
"""
Parameters
----------
session : ikabot.web.session.Session
my_offering_market_city : dict
resource_type : int
event : multiprocessing.Event
"""
banner()
html = getMarketInfo(session, my_offering_market_city)
sell_market_capacity = storageCapacityOfMarket(html)
total_available_amount_of_resource = my_offering_market_city['recursos'][resource_type]
print(_('How much do you want to sell? [max = {}]').format(addDot(total_available_amount_of_resource)))
amount_to_sell = read(min=0, max=total_available_amount_of_resource)
if amount_to_sell == 0:
event.set()
return
price_max, price_min = re.findall(r'\'upper\': (\d+),\s*\'lower\': (\d+)', html)[resource_type]
price_max = int(price_max)
price_min = int(price_min)
print(_('\nAt what price? [min = {:d}, max = {:d}]').format(price_min, price_max))
price = read(min=price_min, max=price_max)
print(_('\nI will sell {} of {} at {}: {}').format(addDot(amount_to_sell), materials_names[resource_type], addDot(price), addDot(price * amount_to_sell)))
print(_('\nProceed? [Y/n]'))
rta = read(values=['y', 'Y', 'n', 'N', ''])
if rta.lower() == 'n':
event.set()
return
set_child_mode(session)
event.set()
info = _('\nI sell {} of {} in {}\n').format(addDot(amount_to_sell), materials_names[resource_type], my_offering_market_city['name'])
setInfoSignal(session, info)
try:
do_it2(session, amount_to_sell, price, resource_type, sell_market_capacity, my_offering_market_city)
except:
msg = _('Error in:\n{}\nCause:\n{}').format(info, traceback.format_exc())
sendToBot(session, msg)
finally:
session.logout()
def sellResources(session, event, stdin_fd):
"""
Parameters
----------
session : ikabot.web.session.Session
event : multiprocessing.Event
stdin_fd: int
"""
sys.stdin = os.fdopen(stdin_fd)
try:
banner()
commercial_cities = getCommercialCities(session)
if len(commercial_cities) == 0:
print(_('There is no store built'))
enter()
event.set()
return
if len(commercial_cities) == 1:
city = commercial_cities[0]
else:
city = chooseCommercialCity(commercial_cities)
banner()
print(_('What resource do you want to sell?'))
for index, material_name in enumerate(materials_names):
print('({:d}) {}'.format(index+1, material_name))
selected_material = read(min=1, max=len(materials_names))
resource = selected_material - 1
banner()
print(_('Do you want to sell to existing offers (1) or do you want to make your own offer (2)?'))
selected = read(min=1, max=2)
[sellToOffers, createOffer][selected - 1](session, city, resource, event)
except KeyboardInterrupt:
event.set()
return
def do_it1(session, left_to_sell, offers, resource_type, city_to_buy_from):
"""
Parameters
----------
session : ikabot.web.session.Session
left_to_sell : int
offers : list[dict]
resource_type : int
city_to_buy_from : dict
"""
for offer in offers:
cityname, username, amount, precio, dist, destination_city_id = offer
cityname = cityname.strip()
amount_to_buy = amount.replace(',', '').replace('.', '')
amount_to_buy = int(amount_to_buy)
while True:
amount_to_sell = min(amount_to_buy, left_to_sell)
ships_available = waitForArrival(session)
ships_needed = math.ceil((Decimal(amount_to_sell) / Decimal(500)))
ships_used = min(ships_available, ships_needed)
if ships_needed > ships_used:
amount_to_sell = ships_used * 500
left_to_sell -= amount_to_sell
amount_to_buy -= amount_to_sell
data = {'action': 'transportOperations', 'function': 'sellGoodsAtAnotherBranchOffice', 'cityId': city_to_buy_from['id'], 'destinationCityId': destination_city_id, 'oldView': 'branchOffice', 'position': city_to_buy_from['pos'], 'avatar2Name': username, 'city2Name': cityname, 'type': '333', 'activeTab': 'bargain', 'transportDisplayPrice': '0', 'premiumTransporter': '0', 'capacity': '5', 'max_capacity': '5', 'jetPropulsion': '0', 'transporters': str(ships_used), 'backgroundView': 'city', 'currentCityId': city_to_buy_from['id'], 'templateView': 'takeOffer', 'currentTab': 'bargain', 'actionRequest': actionRequest, 'ajax': '1'}
if resource_type == 0:
data['cargo_resource'] = amount_to_sell
data['resourcePrice'] = precio
else:
data['tradegood{:d}Price'.format(resource_type)] = precio
data['cargo_tradegood{:d}'.format(resource_type)] = amount_to_sell
session.get(city_url + city_to_buy_from['id'], noIndex=True)
session.post(payloadPost=data)
if left_to_sell == 0:
return
if amount_to_buy == 0:
break
def do_it2(session, amount_to_sell, price, resource_type, sell_market_capacity, city):
"""
Parameters
----------
session : ikabot.web.session.Session
amount_to_sell : int
price : int
resource_type : int
sell_market_capacity : int
city : dict
"""
initial_amount_to_sell = amount_to_sell
html = getMarketInfo(session, city)
previous_on_sell = onSellInMarket(html)[resource_type]
while True:
html = getMarketInfo(session, city)
currently_on_sell = onSellInMarket(html)[resource_type]
# if there is space in the store
if currently_on_sell < storageCapacityOfMarket(html):
# add our new offer to the free space
free_space = sell_market_capacity - currently_on_sell
offer = min(amount_to_sell, free_space)
amount_to_sell -= offer
new_offer = currently_on_sell + offer
payloadPost = {'cityId': city['id'], 'position': city['pos'], 'action': 'CityScreen', 'function': 'updateOffers', 'resourceTradeType': '444', 'resource': '0', 'resourcePrice': '10', 'tradegood1TradeType': '444', 'tradegood1': '0', 'tradegood1Price': '11', 'tradegood2TradeType': '444', 'tradegood2': '0', 'tradegood2Price': '12', 'tradegood3TradeType': '444', 'tradegood3': '0', 'tradegood3Price': '17', 'tradegood4TradeType': '444', 'tradegood4': '0', 'tradegood4Price': '5', 'backgroundView': 'city', 'currentCityId': city['id'], 'templateView': 'branchOfficeOwnOffers', 'currentTab': 'tab_branchOfficeOwnOffers', 'actionRequest': actionRequest, 'ajax': '1'}
if resource_type == 0:
payloadPost['resource'] = new_offer
payloadPost['resourcePrice'] = price
else:
payloadPost['tradegood{:d}'.format(resource_type)] = new_offer
payloadPost['tradegood{:d}Price'.format(resource_type)] = price
session.post(payloadPost=payloadPost)
# if we don't have any more to add to the offer, leave the loop
if amount_to_sell == 0:
break
# sleep for 2 hours
wait(60 * 60 * 2)
# wait until the last of our offer is actualy bought, and let the user know
while True:
html = getMarketInfo(session, city)
currently_on_sell = onSellInMarket(html)[resource_type]
if currently_on_sell <= previous_on_sell:
msg = _('{} of {} was sold at {:d}').format(addDot(initial_amount_to_sell), materials_names[resource_type], price)
sendToBot(session, msg)
return
# sleep for 2 hours
wait(60 * 60 * 2)
| 35.60472 | 663 | 0.69536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,126 | 0.341839 |
25bda1617f91f8624a17b3c445635a9f9cb1d1dd | 2,576 | py | Python | openclean/function/value/text.py | remram44/openclean-core | 8c09c8302cadbb3bb02c959907f91a3ae343f939 | [
"BSD-3-Clause"
] | 4 | 2021-04-20T09:06:26.000Z | 2021-11-20T20:31:28.000Z | openclean/function/value/text.py | remram44/openclean-core | 8c09c8302cadbb3bb02c959907f91a3ae343f939 | [
"BSD-3-Clause"
] | 14 | 2021-01-19T19:23:16.000Z | 2021-04-28T14:31:03.000Z | openclean/function/value/text.py | remram44/openclean-core | 8c09c8302cadbb3bb02c959907f91a3ae343f939 | [
"BSD-3-Clause"
] | 5 | 2021-08-24T11:57:21.000Z | 2022-03-17T04:39:04.000Z | # This file is part of the Data Cleaning Library (openclean).
#
# Copyright (C) 2018-2021 New York University.
#
# openclean is released under the Revised BSD License. See file LICENSE for
# full license details.
"""Helper functions for strings."""
from typing import Any
from openclean.data.types import Value
from openclean.function.value.base import PreparedFunction
class AlphaNumeric(PreparedFunction):
"""Predicate to test whether a given string contains only alpha-numeric
characters.
"""
def eval(self, value: Value) -> bool:
"""Returns True if the string representation of a given value contains
only alpha-numeric characters.
Parameters
----------
value: scalar or tuple
Value from the list that was used to prepare the function.
Returns
-------
bool
"""
# Ensure that the given value is a string.
value = value if isinstance(value, str) else str(value)
for c in value:
if not c.isalpha() and not c.isdigit():
return False
return True
# -- Helper Functions ---------------------------------------------------------
def to_len(value: Any) -> int:
"""Get the length of a value. The value is converted to string and the
number of characters in the resulting string is returned.
Parameters
----------
value: any
Value whose length is returned.
Returns
-------
int
"""
return len(str(value))
def to_lower(value: Any) -> str:
"""Convert value to lower case stirng. Handles cases where value is not a
string.
Parameters
----------
value: any
Value that is converted to lower case string.
Returns
-------
string
"""
try:
return str.lower(value)
except TypeError:
return value
def to_upper(value: Any) -> str:
"""Convert value to upper case stirng. Handles cases where value is not a
string.
Parameters
----------
value: any
Value that is converted to upper case string.
Returns
-------
string
"""
try:
return str.upper(value)
except TypeError:
return value
def to_title(value: Any) -> str:
"""Convert value to title case stirng. Handles cases where value is not a
string.
Parameters
----------
value: any
Value that is converted to title case string.
Returns
-------
string
"""
try:
return str.title(value)
except TypeError:
return value
| 22.596491 | 79 | 0.593168 | 736 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 1,712 | 0.664596 |
25beeed839f274d72d91b74c8a4940ac9efd14ae | 1,570 | py | Python | eddiebot_apps/eddiebot_ssl/scripts/model_controller.py | TooSchoolForCool/EddieBot-ROS | 5dad6d5a6eb974135b7c9587abc0ae17d1ec6760 | [
"Apache-2.0"
] | 5 | 2019-05-15T19:31:47.000Z | 2019-08-31T01:12:35.000Z | eddiebot_apps/eddiebot_ssl/scripts/model_controller.py | TooSchoolForCool/EddieBot-ROS | 5dad6d5a6eb974135b7c9587abc0ae17d1ec6760 | [
"Apache-2.0"
] | null | null | null | eddiebot_apps/eddiebot_ssl/scripts/model_controller.py | TooSchoolForCool/EddieBot-ROS | 5dad6d5a6eb974135b7c9587abc0ae17d1ec6760 | [
"Apache-2.0"
] | 4 | 2019-06-03T12:21:44.000Z | 2019-12-25T08:57:46.000Z | #!/usr/bin/env python
import rospy
import tf
from gazebo_msgs.srv import SetModelState, DeleteModel, SpawnModel
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Pose, Point, Quaternion
class ModelController(object):
def __init__(self):
rospy.wait_for_service("gazebo/delete_model")
rospy.wait_for_service("gazebo/spawn_sdf_model")
rospy.wait_for_service("gazebo/set_model_state")
self.set_state_srv_ = rospy.ServiceProxy("/gazebo/set_model_state", SetModelState)
self.spawn_model_srv_ = rospy.ServiceProxy("/gazebo/spawn_sdf_model", SpawnModel)
self.delete_model_srv_ = rospy.ServiceProxy("/gazebo/delete_model", DeleteModel)
def goto(self, model, x, y, yaw):
quaternion = tf.transformations.quaternion_from_euler(0, 0, yaw)
pose = Pose()
pose.position.x = x
pose.position.y = y
pose.position.z = 0
pose.orientation.x = quaternion[0]
pose.orientation.y = quaternion[1]
pose.orientation.z = quaternion[2]
pose.orientation.w = quaternion[3]
state = ModelState()
state.model_name = model
state.pose = pose
try:
ret = self.set_state_srv_(state)
# print("[ModelController]: {}".format(ret.status_message))
except Exception, e:
rospy.logerr('Error on calling service: %s',str(e))
def spawn_model(self, model_name, model_sdf, x, y, z):
pose = Pose()
pose.position.x = x
pose.position.y = y
pose.position.z = z
pose.orientation.w = 1
self.spawn_model_srv_(model_name, model_sdf, "", pose, "world")
def delete_model(self, model_name):
self.delete_model_srv_(model_name) | 28.545455 | 84 | 0.738217 | 1,361 | 0.866879 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.165605 |
25bef1982e64b0ee23f81d6d159d335e97c5baff | 604 | py | Python | lang/jquery_python/read/redis_python_read.py | ekzemplaro/data_base_language | e77030367ffc595f1fac8583f03f9a3ce5eb1611 | [
"MIT",
"Unlicense"
] | 3 | 2015-05-12T16:44:27.000Z | 2021-02-09T00:39:24.000Z | lang/jquery_python/read/redis_python_read.py | ekzemplaro/data_base_language | e77030367ffc595f1fac8583f03f9a3ce5eb1611 | [
"MIT",
"Unlicense"
] | null | null | null | lang/jquery_python/read/redis_python_read.py | ekzemplaro/data_base_language | e77030367ffc595f1fac8583f03f9a3ce5eb1611 | [
"MIT",
"Unlicense"
] | null | null | null | #! /usr/bin/python3
# -*- coding: utf-8 -*-
#
# redis_python_read.py
#
# Dec/09/2014
#
#
import sys
import json
import redis
#
# ----------------------------------------------------------------
rr = redis.Redis(host='host_dbase', port=6379, db=0)
#
keys = rr.keys ('t*')
dict_aa = {}
for key_bb in sorted (keys):
str_json = rr.get(key_bb).decode ()
unit_aa = json.loads (str_json)
key = key_bb.decode ()
dict_aa[key] = unit_aa
#
out_str_aa = json.dumps (dict_aa)
#
print ("Content-type: text/json\n\n")
#
print (out_str_aa)
#
# ----------------------------------------------------------------
| 18.875 | 66 | 0.504967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 269 | 0.445364 |
25c1397a1fdd090ba6d14772fc3b4d4aa93b1d54 | 1,015 | py | Python | Hash and Counting/SpecialKeyboard.py | haaris272k/Problem-Solving-Collection | 5c8d0c36aff0d525ffec880115f5e123d0f3092b | [
"MIT"
] | 1 | 2022-02-28T06:49:25.000Z | 2022-02-28T06:49:25.000Z | Hash and Counting/SpecialKeyboard.py | haaris272k/Problem-Solving-Collection | 5c8d0c36aff0d525ffec880115f5e123d0f3092b | [
"MIT"
] | null | null | null | Hash and Counting/SpecialKeyboard.py | haaris272k/Problem-Solving-Collection | 5c8d0c36aff0d525ffec880115f5e123d0f3092b | [
"MIT"
] | null | null | null | """Imagine you have a special keyboard with all keys in a single row. The layout of characters on a keyboard is denoted by a string S1 of length 26. S1 is indexed from 0 to 25. Initially, your finger is at index 0.
To type a character, you have to move your finger to the index of the desired character. The time taken to move your finger from index i to index j is |j-i|, where || denotes absolute value.Find the time taken to type the string S2 with the given keyboard layout.
Example 1:
Input:
S1 = "abcdefghijklmnopqrstuvwxyz"
S2 = "cba"
Output:
4
Explanation:
Initially we are at index 0. To type 'c',
it will take a time of abs(0-2) = 2. To, type
'b' next, it will take abs(2-1) = 1. And, for
'a', it will take abs(1-0) = 1 unit time.
So, total time = 2+1+1 = 4."""
S1 = "abcdefghijklmnopqrstuvwxyz"
S2 = "cba"
map = {}
initial = 0
distance = []
for i in range(26):
map[S1[i]] = i
print(map)
for i in S2:
print(i)
distance.append(abs(map[i] - initial))
initial = map[i]
print(distance)
| 30.757576 | 263 | 0.692611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 809 | 0.797044 |
25c1cb3bad57e1815eb464351a2379c671232a0c | 334 | py | Python | en_fila/apps/management/migrations/0004_remove_employee_employee_passcode.py | puertoricanDev/En_fila | cf2551190be61f6a6ce705cccf35734edc682eef | [
"MIT"
] | null | null | null | en_fila/apps/management/migrations/0004_remove_employee_employee_passcode.py | puertoricanDev/En_fila | cf2551190be61f6a6ce705cccf35734edc682eef | [
"MIT"
] | null | null | null | en_fila/apps/management/migrations/0004_remove_employee_employee_passcode.py | puertoricanDev/En_fila | cf2551190be61f6a6ce705cccf35734edc682eef | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-11-16 20:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('management', '0003_employee'),
]
operations = [
migrations.RemoveField(
model_name='employee',
name='employee_passcode',
),
]
| 18.555556 | 47 | 0.598802 | 249 | 0.745509 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.308383 |
25c3b7815ccdc3dde3489a68e92e0d683b1cb9a9 | 8,933 | py | Python | backend/app/app/api/api_v1/system/user.py | lianjy357/vue-element-admin-fastapi | 70f697af33ca747a154d0de129f4cbd7f9d03f7b | [
"MIT"
] | 10 | 2020-12-16T07:31:29.000Z | 2022-01-27T08:01:22.000Z | backend/app/app/api/api_v1/system/user.py | lianjy357/vue-element-admin-fastapi | 70f697af33ca747a154d0de129f4cbd7f9d03f7b | [
"MIT"
] | null | null | null | backend/app/app/api/api_v1/system/user.py | lianjy357/vue-element-admin-fastapi | 70f697af33ca747a154d0de129f4cbd7f9d03f7b | [
"MIT"
] | 3 | 2021-03-18T11:38:21.000Z | 2021-09-02T06:23:15.000Z | from typing import Any, Dict, Optional,List
from fastapi import APIRouter, Depends, HTTPException,Body,Request
from sqlalchemy.orm import Session,joinedload_all,contains_eager,Load
from fastapi.encoders import jsonable_encoder
from app import crud, models, schemas
from app.api import deps
from app.core.security import get_password_hash
from app.utils import list_to_tree,get_list_id_by_tree
router = APIRouter()
@router.get("/me", response_model= schemas.Response)
def read_user_me(
db: Session = Depends(deps.get_db),
current_user: models.
User = Depends(deps.get_current_active_user),
) -> Any:
"""
Get current user.
"""
user = current_user.dict()
user['roles'] = [role.role.key for role in current_user.roles]
return {
"code": 20000,
"data": user,
"message":"",
}
@router.get("/list", response_model=schemas.Response)
def read_routes(*,
db: Session = Depends(deps.get_db),
limit:int,page:int,
deptId:Optional[int]=None,username:Optional[str]=None,nickname:Optional[str]=None,status:Optional[str]=None,
current_user: models.User = Depends(deps.get_current_active_user)) -> Any:
"""
Retrieve Mock Data.
"""
query = db.query(models.User,models.Department
).outerjoin(models.User_Department,models.User_Department.user_id == models.User.id
).outerjoin(models.Department,models.Department.id == models.User_Department.department_id)
if username:query = query.filter(models.User.username.like("%" + username + "%") )
if nickname:query = query.filter(models.User.nickname.like("%" + username + "%") )
if status:query = query.filter(models.User.status == status )
if deptId:
depts = db.query(models.Department).filter(models.Department.id, models.Department.status == 1).all()
tree = list_to_tree([dep.dict() for dep in depts], deptId)
tree_ids = get_list_id_by_tree(tree)
query = query.filter(models.User_Department.department_id.in_(tree_ids))
total = query.count()
user_list = []
items = query.limit(limit).offset((page - 1) * limit).all()
for u in items:
user = u[0].dict()
user["dept"] = u[1]
user_list.append(user)
return {
"code": 20000,
"data": {
"items": user_list,
'total': total
},
"message": "修改成功",
}
@router.get("/",response_model=schemas.Response)
def read_user(*,db: Session = Depends(deps.get_db),current_user: models.User = Depends(deps.get_current_active_user)) -> Any:
roleOptions = db.query(models.Role).all()
postOptions = db.query(models.Dict_Data).join(models.Dict_Type,models.Dict_Type.id == models.Dict_Data.type_id).filter(models.Dict_Type.code == "post").all()
return {
"code": 20000,
"data": {
"roleOptions":roleOptions,
"postOptions":postOptions,
},
"message": "修改成功",
}
@router.get("/{id}",response_model=schemas.Response)
def read_user(*,db: Session = Depends(deps.get_db),id:int,current_user: models.User = Depends(deps.get_current_active_user)) -> Any:
roleOptions = db.query(models.Role).all()
postOptions = db.query(models.Dict_Data).join(models.Dict_Type,models.Dict_Type.id == models.Dict_Data.type_id).filter(models.Dict_Type.code == "post").all()
user = db.query(models.User).filter(models.User.id == id).one()
user_department = db.query(models.User_Department).filter(models.User_Department.user_id == id).first()
user_role = db.query(models.User_Role).filter(models.User_Role.user_id == id).all()
user_post = db.query(models.User_Dict
).outerjoin(models.Dict_Data, models.Dict_Data.id == models.User_Dict.dict_id
).outerjoin(models.Dict_Type,models.Dict_Type.id == models.Dict_Data.type_id
).filter(models.Dict_Type.code=="post",models.User_Dict.user_id == id).all()
user = user.dict()
user["deptId"] = user_department.department_id
user["roleIds"] = [r.role.id for r in user_role]
user["postIds"] = [up.dict_id for up in user_post]
return {
"code": 20000,
"data": {
"user":user,
"roleOptions":roleOptions,
"postOptions":postOptions,
},
"message": "修改成功",
}
@router.put("/",response_model=schemas.Response)
def update_user(*,db: Session = Depends(deps.get_db),user:schemas.UserUpdate,current_user: models.User = Depends(deps.get_current_active_user)) -> Any:
user_id = user.id
user_data = {
"username":user.username,
"nickname":user.nickname,
"identity_card":user.identity_card,
"phone":user.phone,
"address":user.address,
"sex":user.sex,
"work_start":user.work_start,
"avatar":user.avatar,
"introduction":user.introduction,
"is_active":user.is_active,
"is_superuser":user.is_superuser,
"status":user.status,
}
deptId = user.deptId
postIds = user.postIds
roleIds = user.roleIds
#info
db.query(models.User).filter(models.User.id == user_id).update(user_data)
db.flush()
#department
db.query(models.User_Department).filter(models.User_Department.user_id == user_id).delete()
user_department = {
"user_id":user_id,
"department_id":deptId,
}
db.add(models.User_Department(**user_department))
db.flush()
#dcit
#post
db.query(models.User_Dict).filter(models.User_Dict.user_id == user_id).delete()
user_post = [{"user_id":user_id,"dict_id":i} for i in postIds]
user_dict = user_post + []
db.bulk_insert_mappings(models.User_Dict,user_dict)
db.flush()
#role
db.query(models.User_Role).filter(models.User_Role.user_id == user_id).delete()
user_roles = [{"user_id": user_id, "role_id": i} for i in roleIds]
db.bulk_insert_mappings(models.User_Role,user_roles)
db.flush()
return {
"code": 20000,
"data": "",
"message": "修改成功",
}
@router.post("/", response_model=schemas.Response)
def add_user(*, db: Session = Depends(deps.get_db), user: schemas.UserCreate,
current_user: models.User = Depends(deps.get_current_active_user)
) -> Any:
user_data = {
"username":user.username,
"nickname":user.nickname,
"identity_card":user.identity_card,
"phone":user.phone,
"address":user.address,
"sex":user.sex,
"hashed_password":get_password_hash("qwe123"),
"introduction":user.introduction,
"is_active":True,
"is_superuser":False,
"status":user.status,
}
add_user = models.User(**user_data)
db.add(add_user)
db.flush()
deptId = user.deptId
postIds = user.postIds
roleIds = user.roleIds
#department
user_department = {
"user_id":add_user.id,
"department_id":deptId,
}
db.add(models.User_Department(**user_department))
db.flush()
# dict
# post
user_post = [{"user_id":add_user.id,"dict_id":i} for i in postIds]
user_dict = user_post + []
db.bulk_insert_mappings(models.User_Dict,user_dict)
db.flush()
#role
user_roles = [{"user_id": add_user.id, "role_id": i} for i in roleIds]
db.bulk_insert_mappings(models.User_Role,user_roles)
db.flush()
return {
"code": 20000,
"data": "",
"message": "新增成功",
}
@router.put("/reset-password", response_model= schemas.Response)
def reset_password(
db: Session = Depends(deps.get_db), user_id: int = Body(...),password: str = Body(...),User = Depends(deps.get_current_active_user),
) -> Any:
"""
Reset password
"""
data = {
"hashed_password":get_password_hash(password)
}
if User.is_superuser or User.id == user_id:
db.query(models.User).filter(models.User.id == user_id).update(data)
return { "code": 20000,"data": "","message": "修改成功",}
else:
return { "code": 40000,"data": "","message": "无修改权限",}
@router.delete("/{ids}", response_model=schemas.Response)
def delete_user(*, db: Session = Depends(deps.get_db), ids : str,
current_user: models.User = Depends(deps.get_current_active_user)
) -> Any:
ids = [int(id) for id in ids.split(",")]
print(ids)
db.query(models.User_Dict).filter(models.User_Dict.user_id.in_(ids)).delete(synchronize_session=False)
db.query(models.User_Department).filter(models.User_Department.user_id.in_(ids)).delete(synchronize_session=False)
db.query(models.User_Role).filter(models.User_Role.user_id.in_(ids)).delete(synchronize_session=False)
db.query(models.User).filter(models.User.id.in_(ids)).delete(synchronize_session=False)
return {
"code": 20000,
"data": "",
"message": "新增成功",
}
| 38.012766 | 161 | 0.643233 | 0 | 0 | 0 | 0 | 8,564 | 0.951661 | 0 | 0 | 1,058 | 0.117569 |
25c457b39bbaed7f480836fca9e32dd4932de388 | 2,676 | py | Python | janus/driver.py | zhenglz/janus | f3f1ed3f2b6e377c51e958cae2d919069d221eda | [
"BSD-3-Clause"
] | null | null | null | janus/driver.py | zhenglz/janus | f3f1ed3f2b6e377c51e958cae2d919069d221eda | [
"BSD-3-Clause"
] | null | null | null | janus/driver.py | zhenglz/janus | f3f1ed3f2b6e377c51e958cae2d919069d221eda | [
"BSD-3-Clause"
] | null | null | null | """
This is the qmmm driver module
"""
import pickle
from janus import Initializer
def run_janus(filename='input.json'):
"""
Drives the janus program.
Creates an instance of the Initializer class
and feeds wrappers to either :func:`~janus.driver.run_simulation` or
:func:`~janus.driver.run_single_point`
Parameters
----------
filename : str
Filename from which to read input parameters
"""
initializer = Initializer(filename)
print('Initializing')
# initialize wrappers
ll_wrapper, qmmm_wrapper = initializer.initialize_wrappers()
if initializer.run_md is True:
run_simulation(ll_wrapper, qmmm_wrapper)
else:
run_single_point(ll_wrapper, qmmm_wrapper)
def run_simulation(md_sim_wrapper, qmmm_wrapper):
"""
Drives QM/MM with MD time step integration
Parameters
----------
md_sim_wrapper : :class:`~janus.mm_wrapper.MMWrapper`
A child class of MMWrapper that drives MD simulation
qmmm_wrapper: :class:`~janus.qmmm.QMMM`
A QMMM or AQMMM wrapper that drives the QM/MM computations
"""
print('Equilibrating with {} steps'.format(md_sim_wrapper.start_qmmm))
md_sim_wrapper.take_step(md_sim_wrapper.start_qmmm)
for step in range(md_sim_wrapper.qmmm_steps):
print('Taking step {}'.format(step + 1))
run_single_point(md_sim_wrapper, qmmm_wrapper)
# get aqmmm forces
forces = qmmm_wrapper.get_forces()
if (md_sim_wrapper.return_forces_interval != 0 and (step + 1) % md_sim_wrapper.return_forces_interval == 0):
with open(md_sim_wrapper.return_forces_filename, 'wb') as f:
pickle.dump(forces, f)
# feed forces into md simulation and take a step
# make sure positions are updated so that when i get information on entire system
# getting it on the correct one
md_sim_wrapper.take_updated_step(force=forces)
print('QMMM finished.')
md_sim_wrapper.take_step(md_sim_wrapper.end_steps)
main_info = md_sim_wrapper.get_main_info()
md_sim_wrapper.write_pdb(main_info)
def run_single_point(ll_wrapper, qmmm_wrapper):
"""
Drives single QM/MM computation
Parameters
----------
ll_wrapper : :class:`~janus.mm_wrapper.MMWrapper`
A child class of MMWrapper that contains MM information on the whole system
qmmm_wrapper: :class:`~janus.qmmm.QMMM`
A QMMM or AQMMM wrapper that drives the QM/MM computations
"""
#get MM information for entire system
main_info = ll_wrapper.get_main_info()
qmmm_wrapper.run_qmmm(main_info, ll_wrapper.class_type)
| 28.774194 | 116 | 0.688341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,335 | 0.498879 |
25c4e0bb94bfd2ad59d043f0c31d6a9489178782 | 7,632 | py | Python | teether/slicing.py | ljhOfGithub/teether | 7c74dbe0853a527fcb8a458cbb6485d4dd07a375 | [
"Apache-2.0"
] | null | null | null | teether/slicing.py | ljhOfGithub/teether | 7c74dbe0853a527fcb8a458cbb6485d4dd07a375 | [
"Apache-2.0"
] | null | null | null | teether/slicing.py | ljhOfGithub/teether | 7c74dbe0853a527fcb8a458cbb6485d4dd07a375 | [
"Apache-2.0"
] | null | null | null | from teether.cfg.instruction import Instruction
from teether.cfg.opcodes import potentially_user_controlled
from teether.explorer.backward import traverse_back
from teether.util.intrange import Range
def slice_to_program(s):
pc = 0
program = {}
for ins in s:
program[pc] = ins
pc += ins.next_addr - ins.addr
return program
def adjust_stack(backward_slice, stack_delta):
if stack_delta > 0:
backward_slice.extend(Instruction(0x0, 0x63, b'\xde\xad\xc0\xde') for _ in range(abs(stack_delta)))
elif stack_delta < 0:
backward_slice.extend(Instruction(0x0, 0x50) for _ in range(abs(stack_delta)))
class SlicingState(object):
def __init__(self, stacksize, stack_underflow, stack_delta, taintmap, memory_taint, backward_slice, instructions):
self.stacksize = stacksize
self.stack_underflow = stack_underflow
self.stack_delta = stack_delta
self.taintmap = frozenset(taintmap)#frozenset() 返回一个冻结的集合,冻结后集合不能再添加或删除任何元素。
self.memory_taint = memory_taint
# The actual slice doesn't matter that much. What matters more is the resulting EXPRESSION of the return-address
# 实际的切片并不那么重要。更重要的是返回地址的结果表达式
self.backward_slice = tuple(backward_slice)
self.instructions = tuple(instructions)
def __hash__(self):#调用hash()函数时实际调用的函数,自定义散列值,双下划线函数 https://blog.51cto.com/altboy/1945781
return sum(
a * b for a, b in zip((23, 29, 31, 37, 41), (
self.stacksize, self.stack_delta, hash(self.taintmap), hash(self.instructions),
hash(self.backward_slice))))
def __eq__(self, other):
return (
self.stacksize == other.stacksize and
self.stack_delta == other.stack_delta and
self.taintmap == other.taintmap and
self.memory_taint == other.memory_taint and
self.backward_slice == other.backward_slice and
self.instructions == other.instructions)
def __str__(self):
return 'Stacksize: %d, Underflow: %d, Delta: %d, Map: %s, Slice: %s, Instructions: %s' % (
self.stacksize, self.stack_underflow, self.stack_delta, self.taintmap,
','.join('%x' % i.addr for i in self.backward_slice),
','.join('%x' % i.addr for i in self.instructions))
def advance_slice(slicing_state, memory_info):
stacksize = slicing_state.stacksize
stack_underflow = slicing_state.stack_underflow
stack_delta = slicing_state.stack_delta
taintmap = set(slicing_state.taintmap)
memory_taint = slicing_state.memory_taint
backward_slice = list(slicing_state.backward_slice)
instructions = slicing_state.instructions
for ins in instructions[::-1]:
slice_candidate = False
if taintmap and stacksize - ins.outs <= max(taintmap):
slice_candidate = True
if memory_info and ins in memory_info and memory_info[ins].writes & memory_taint:
slice_candidate = True
if slice_candidate:
add_to_slice = False
if 0x80 <= ins.op <= 0x8f: # Special handling for DUPa DUPa的特殊处理
if stacksize - 1 in taintmap:
add_to_slice = True
in_idx = ins.op - 0x7f
taintmap.remove(stacksize - 1)
taintmap.add((stacksize - 1) - in_idx)
elif 0x90 <= ins.op <= 0x9f: # Special handling for SWAP SWAP特殊处理
in_idx = ins.op - 0x8f
if stacksize - 1 in taintmap or (stacksize - 1) - in_idx in taintmap:
add_to_slice = True
if stacksize - 1 in taintmap and (stacksize - 1) - in_idx in taintmap:
# both tainted => taint does not change
pass
elif stacksize - 1 in taintmap:
taintmap.remove(stacksize - 1)
taintmap.add((stacksize - 1) - in_idx)
elif (stacksize - 1) - in_idx in taintmap:
taintmap.remove((stacksize - 1) - in_idx)
taintmap.add(stacksize - 1)
else: # assume entire stack is affected otherwise假设整个栈受到影响
add_to_slice = True
taintmap -= set(range(stacksize - ins.outs, stacksize))
taintmap |= set(range(stacksize - ins.outs, stacksize - ins.delta))
if add_to_slice:
adjust_stack(backward_slice, stack_delta)
stack_delta = -ins.delta
backward_slice.append(ins)
stack_underflow = min(stack_underflow, stacksize - ins.outs)
if memory_info and ins in memory_info:
ins_info = memory_info[ins]
memory_taint = memory_taint - ins_info.writes + ins_info.reads
stacksize -= ins.delta
# no taint left? then our job here is done#没有污点?那我们的任务就完成了
if not taintmap and not memory_taint:
stack_adjust = stacksize - stack_underflow
if stack_adjust > 0:
adjust_stack(backward_slice, stack_adjust)
return SlicingState(stacksize, stack_underflow, stack_delta, set(taintmap), memory_taint,
list(backward_slice),
[])
stack_delta += ins.delta
# still taint left? trace further if gas is still sufficient#还留下污点?如果气体仍然充足,再加微量
return SlicingState(stacksize, stack_underflow, stack_delta, set(taintmap), memory_taint, list(backward_slice),
[])
def backward_slice(ins, taint_args=None, memory_info=None, initial_gas=10, must_visits=[], reachable=False):
# logging.debug('backward_slice called')
if ins.ins == 0:
return []
if taint_args:
taintmap = set((ins.ins - 1) - i for i in taint_args)
else:
taintmap = set(range(ins.ins))
if memory_info and ins in memory_info:
memory_taint = memory_info[ins].reads
else:
memory_taint = Range()
def initial_data(ins):
stacksize = ins.ins
slice = []
stack_underflow = 0
stack_delta = 0
idx = ins.bb.ins.index(ins)
return SlicingState(stacksize, stack_underflow, stack_delta, taintmap, memory_taint, slice,
ins.bb.ins[:idx])
def advance_data(slicing_state):
return advance_slice(slicing_state, memory_info)
def update_data(slicing_state, new_bb):
return SlicingState(slicing_state.stacksize, slicing_state.stack_underflow, slicing_state.stack_delta,
set(slicing_state.taintmap), slicing_state.memory_taint, list(slicing_state.backward_slice),
new_bb.ins)
def finish_path(slicing_state):
return not slicing_state.taintmap and not slicing_state.memory_taint
# logging.debug('Before loop')
slices = [r.backward_slice[::-1] for r in
traverse_back([ins], initial_gas, initial_data, advance_data, update_data, finish_path, must_visits)]
if not reachable:
return slices
else:
filtered_slices = []
for slice in slices:
first_bb = next(i.bb for i in slice if i.bb)
if 0 in first_bb.ancestors | {first_bb.start}:
filtered_slices.append(slice)
return filtered_slices
def interesting_slices(instruction, args=None, reachable=False):
return [bs for bs in backward_slice(instruction, args, reachable=reachable) if any(
ins.name in potentially_user_controlled for ins in bs)]
| 43.862069 | 120 | 0.624476 | 1,855 | 0.235048 | 0 | 0 | 0 | 0 | 0 | 0 | 997 | 0.12633 |
25c4fd8ace573f044a14fdb19367fd2f41c3ec1f | 8,229 | py | Python | install/app_store/tk-nuke-writenode/v1.2.0/app.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-nuke-writenode/v1.2.0/app.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-nuke-writenode/v1.2.0/app.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | 1 | 2020-02-15T10:42:56.000Z | 2020-02-15T10:42:56.000Z | # Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
Tank Write Node for Nuke
"""
import os
import nuke
import tank
from tank import TankError
class NukeWriteNode(tank.platform.Application):
def init_app(self):
"""
Called as the application is being initialized
"""
# import module and create handler
tk_nuke_writenode = self.import_module("tk_nuke_writenode")
self.__write_node_handler = tk_nuke_writenode.TankWriteNodeHandler(self)
# patch handler onto nuke module for access in WriteNode knobs
nuke._shotgun_write_node_handler = self.__write_node_handler
# and for backwards compatibility!
nuke._tank_write_node_handler = self.__write_node_handler
# add WriteNodes to nuke menu
self.__add_write_node_commands()
# add callbacks:
self.__write_node_handler.add_callbacks()
@property
def context_change_allowed(self):
"""
Specifies that context changes are allowed.
"""
return True
def destroy_app(self):
"""
Called when the app is unloaded/destroyed
"""
self.log_debug("Destroying tk-nuke-writenode app")
# remove any callbacks that were registered by the handler:
self.__write_node_handler.remove_callbacks()
# clean up the nuke module:
if hasattr(nuke, "_shotgun_write_node_handler"):
del nuke._shotgun_write_node_handler
if hasattr(nuke, "_tank_write_node_handler"):
del nuke._tank_write_node_handler
def post_context_change(self, old_context, new_context):
"""
Handles refreshing the render paths of all Shotgun write nodes
after a context change has been completed.
:param old_context: The sgtk.context.Context being switched from.
:param new_context: The sgtk.context.Context being switched to.
"""
for node in self.get_write_nodes():
self.reset_node_render_path(node)
self.__write_node_handler.populate_profiles_from_settings()
self.__write_node_handler.populate_script_template()
self.__add_write_node_commands(new_context)
def process_placeholder_nodes(self):
"""
Convert any placeholder nodes to TK Write Nodes
"""
self.__write_node_handler.process_placeholder_nodes()
# interface for other apps to query write node info:
#
# access general information:
def get_write_nodes(self):
"""
Return list of all write nodes
"""
return self.__write_node_handler.get_nodes()
def get_node_name(self, node):
"""
Return the name for the specified node
"""
return self.__write_node_handler.get_node_name(node)
def get_node_profile_name(self, node):
"""
Return the name of the profile the specified node
is using
"""
return self.__write_node_handler.get_node_profile_name(node)
def get_node_tank_type(self, node):
"""
Return the tank type for the specified node
Note: Legacy version with old 'Tank Type' name - use
get_node_published_file_type instead!
"""
return self.__write_node_handler.get_node_tank_type(node)
def get_node_published_file_type(self, node):
"""
Return the published file type for the specified node
"""
return self.__write_node_handler.get_node_tank_type(node)
def is_node_render_path_locked(self, node):
"""
Determine if the render path for the specified node
is locked. The path will become locked if the cached
version of the path no longer matches the computed
path (using the appropriate render template). This
can happen if the file is moved on disk or if the template
is changed.
"""
return self.__write_node_handler.render_path_is_locked(node)
# access full-res render information:
def get_node_render_path(self, node):
"""
Return the render path for the specified node
"""
return self.__write_node_handler.compute_render_path(node)
def get_node_render_files(self, node):
"""
Return the list of rendered files for the node
"""
return self.__write_node_handler.get_files_on_disk(node)
def get_node_render_template(self, node):
"""
Return the render template for the specified node
"""
return self.__write_node_handler.get_render_template(node)
def get_node_publish_template(self, node):
"""
Return the publish template for the specified node
"""
return self.__write_node_handler.get_publish_template(node)
# access proxy-res render information:
def get_node_proxy_render_path(self, node):
"""
Return the render path for the specified node
"""
return self.__write_node_handler.compute_proxy_path(node)
def get_node_proxy_render_files(self, node):
"""
Return the list of rendered files for the node
"""
return self.__write_node_handler.get_proxy_files_on_disk(node)
def get_node_proxy_render_template(self, node):
"""
Return the render template for the specified node
"""
return self.__write_node_handler.get_proxy_render_template(node)
def get_node_proxy_publish_template(self, node):
"""
Return the publish template for the specified node
"""
return self.__write_node_handler.get_proxy_publish_template(node)
# useful utility functions:
def generate_node_thumbnail(self, node):
"""
Generate a thumnail for the specified node
"""
return self.__write_node_handler.generate_thumbnail(node)
def reset_node_render_path(self, node):
"""
Reset the render path of the specified node. This
will force the render path to be updated based on
the current script path and configuraton.
Note, this should really never be needed now that the
path is reset automatically when the user changes something.
"""
self.__write_node_handler.reset_render_path(node)
def convert_to_write_nodes(self):
"""
Convert all Shotgun write nodes found in the current Script to regular
Nuke Write nodes. Additional toolkit information will be stored on
additional user knobs named 'tk_*'
"""
self.__write_node_handler.convert_sg_to_nuke_write_nodes()
def convert_from_write_nodes(self):
"""
Convert all regular Nuke Write nodes that have previously been converted
from Shotgun Write nodes, back into Shotgun Write nodes.
"""
self.__write_node_handler.convert_nuke_to_sg_write_nodes()
# Private methods
#
def __add_write_node_commands(self, context=None):
"""
Creates write node menu entries for all write node configurations
"""
context = context or self.context
write_node_icon = os.path.join(self.disk_location, "resources", "tk2_write.png")
for profile_name in self.__write_node_handler.profile_names:
# add to toolbar menu
cb_fn = lambda pn=profile_name: self.__write_node_handler.create_new_node(pn)
self.engine.register_command(
"%s [Shotgun]" % profile_name,
cb_fn,
dict(
type="node",
icon=write_node_icon,
context=context,
)
)
| 34.004132 | 89 | 0.652813 | 7,652 | 0.929882 | 0 | 0 | 143 | 0.017378 | 0 | 0 | 3,903 | 0.474298 |
25c51b38c650548c45b9f618e5ea521be6de4a8b | 1,398 | py | Python | decorators/logging_decorator.py | devwarrior/python_coding | 4a0029efab2fa15cdd26575f87d7fb02f570ac73 | [
"MIT"
] | null | null | null | decorators/logging_decorator.py | devwarrior/python_coding | 4a0029efab2fa15cdd26575f87d7fb02f570ac73 | [
"MIT"
] | null | null | null | decorators/logging_decorator.py | devwarrior/python_coding | 4a0029efab2fa15cdd26575f87d7fb02f570ac73 | [
"MIT"
] | null | null | null | '''
decorator to log functions executions
'''
from functools import wraps
import logging
import time
def log(logger, level='info'):
'''
@brief decorator implementing the logging of a function.
'''
def log_decorator(method):
'''
@brief decorator implementing the logging of a function.
'''
@wraps(method)
def wrapper(*args, **kw):
'''
@brief compute time elapsed while executing
@param *args arguments for called method
@param **kw arguments for called method
'''
getattr(logger, level)(method.__name__)
return method(*args, **kw)
return wrapper
return log_decorator
LOGGER = logging.getLogger('__main__')
LOGGER.setLevel(logging.DEBUG)
F_HANDLER = logging.FileHandler('log_file.log')
F_HANDLER.setLevel(logging.DEBUG)
LOGGER.addHandler(F_HANDLER)
FORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
F_HANDLER.setFormatter(FORMATTER)
@log(LOGGER, level='debug')
def do_things(tvalue):
'''
@brief a function that simulates work
'''
time.sleep(tvalue)
@log(LOGGER, level='warning')
@log(LOGGER, level='warning')
def do_things1(tvalue):
'''
@brief a function that simulates work
'''
time.sleep(tvalue)
if __name__ == '__main__':
do_things1(1)
do_things(0.8)
| 25.418182 | 85 | 0.637339 | 0 | 0 | 0 | 0 | 639 | 0.457082 | 0 | 0 | 614 | 0.439199 |
25c61bd34a9541bd7028c5974cd748844f8e74ae | 7,828 | py | Python | experiments/createReverseBatches.py | usc-sail/mica-violence-ratings-predictions-from-movie-scripts | be581fe8aa19296321bbc6b46cbbc495c9f23b3f | [
"MIT"
] | 3 | 2020-12-25T06:54:07.000Z | 2021-06-14T12:41:54.000Z | experiments/createReverseBatches.py | usc-sail/mica-violence-ratings | be581fe8aa19296321bbc6b46cbbc495c9f23b3f | [
"MIT"
] | 1 | 2019-02-05T19:36:01.000Z | 2019-02-05T19:36:01.000Z | experiments/createReverseBatches.py | usc-sail/mica-violence-ratings-predictions-from-movie-scripts | be581fe8aa19296321bbc6b46cbbc495c9f23b3f | [
"MIT"
] | 1 | 2020-12-25T12:54:50.000Z | 2020-12-25T12:54:50.000Z | from numpy.random import seed
seed(5393)
from tensorflow import set_random_seed
set_random_seed(12011)
import os
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split, StratifiedKFold
from joblib import Parallel, delayed
from tqdm import tqdm
import logging
logging.basicConfig(level = logging.INFO)
EMBED_DIM = 300
VOCAB_SIZE = 5000
max_len = 1000
batch_size = 16
n_folds = 5
fold_dir = "/data/victor/violence-workshop/batches/reversefolds"
data_pkl = "../../data/dataframe_with_scores_withdoc2vec.pkl"
def pad_csr(a, newshape):
""" Pads csr_matrix with zeros. Modifies a inplace. """
n, m = a.shape
a._shape = newshape
a.indptr = np.pad(a.indptr, (0, newshape[0] - n), 'edge')
def filter_nans(seq):
""" Filters out floats (np.nan) from list """
return np.array([x for x in seq if not isinstance(x, float)])
def pad_or_trim(seq, max_len=1000):
""" Pads or trims seq to have max_len rows """
n, m = seq.shape
if n > max_len:
seq = seq[-max_len:, :]
elif n < max_len:
if sparse.issparse(seq):
pad_csr(seq, (max_len, m))
else:
seq = np.r_[seq, np.zeros((max_len - n, m))]
return seq
def process_ngrams(batch_features, ngram_features):
""" Transform batch_features into tensor of dims:
(n, max_len, #features) where n is len(batch_features)"""
n = batch_features.shape[0]
batch_features = batch_features.apply(ngram_features.transform)\
.apply(pad_or_trim)
batch_features = sparse.vstack(batch_features)
batch_features = batch_features.toarray()\
.reshape(n, max_len, -1)
return batch_features
def process_scores(X):
""" Transforms X into tensor of dims:
(n, max_len, #features) where n is len(X).
This is a special case of process for lists of scores"""
batch_scores = X.apply(np.array)\
.apply(lambda x: x.reshape(-1, 1))\
.apply(pad_or_trim)
batch_scores = np.concatenate(batch_scores.values, axis = 0)\
.reshape(-1, max_len, 1)
return batch_scores
############################################################
# Load Data
############################################################
data = pd.read_pickle(data_pkl)
# Encode genre
lb_genre = LabelEncoder()
data['genre'] = lb_genre.fit_transform(data['genre'])
############################################################
# 3 to 5 chars w/ spaces
# unigrams + bigrams
############################################################
# This defines the analyzer to be used with Countvectorizer
def char_ngram_tokenizer(text, ngram_range):
def aux(text, ngram_size):
for i in range(len(text) - ngram_size):
yield text[i : i + ngram_size]
for n in range(*ngram_range):
for ngram in aux(text, n):
yield ngram
ngram_features = FeatureUnion([
("char_ngrams", CountVectorizer(analyzer = lambda text: char_ngram_tokenizer(text, ngram_range=(3, 6)),
max_features = VOCAB_SIZE)),
("token_ngrams", CountVectorizer(ngram_range=(1, 2),
max_features=VOCAB_SIZE))
])
tfidf_ = TfidfVectorizer(ngram_range=(1, 2), max_features=VOCAB_SIZE)
############################################################
# Batch generation
############################################################
def process(X, Y, i, ngram_features, batch_dir, tfidf_transformer = None):
# Features
## ngrams
#logging.info("ngrams")
#batch_ngrams = process_ngrams(X['sentences'].iloc[i : i + batch_size], ngram_features)
#np.savez(os.path.join(batch_dir, "{}_ngrams".format(i)),
# features = batch_ngrams)
#batch_ngrams = None
## tfidf
#logging.info("tfidf")
#batch_tfidf = process_ngrams(X['sentences'].iloc[i : i + batch_size], tfidf_transformer)
#np.savez(os.path.join(batch_dir, "{}_tfidf".format(i)),
# features = batch_tfidf)
#batch_tfidf = None
# ## Word2vec
#logging.info("word2vec")
#batch_word2vec = X['word2vec_sent_mean_vec'].iloc[i : i + batch_size]\
# .apply(filter_nans)\
# .apply(pad_or_trim)
#np.savez(os.path.join(batch_dir, "{}_word2vec".format(i)),
# features = batch_word2vec)
#batch_word2vec = None
# paragraph2vec
logging.info("paragraph2vec")
batch_paragraph2vec = X['doc2vec_vectors'].iloc[i : i + batch_size]\
.apply(filter_nans)\
.apply(pad_or_trim)
np.savez(os.path.join(batch_dir, "{}_doc2vec".format(i)),
features = batch_paragraph2vec)
batch_paragraph2vec = None
# ## Lexicons
#logging.info("Empath")
#batch_empath = X['empath_sentence'].iloc[i : i + batch_size]\
# .apply(np.array)\
# .apply(pad_or_trim)
#np.savez(os.path.join(batch_dir, "{}_empath".format(i)),
# empath = batch_empath)
#logging.info("Lexicons")
#batch_lexicon = process_scores(X['abusive_scores'].iloc[i : i + batch_size])
#batch_vader = process_scores(X['vader_scores'].iloc[i : i + batch_size])
#batch_afinn = process_scores(X['afinn_scores'].iloc[i : i + batch_size])
#batch_hatebase = X['hatebase_sentence'].iloc[i : i + batch_size].apply(pad_or_trim)
#np.savez(os.path.join(batch_dir, "{}_lexicon".format(i)),
# abusive_scores = batch_lexicon,
# vader = batch_vader,
# afinn = batch_afinn,
# hatebase = batch_hatebase)
# batch_lexicon = None
#batch_vader = None
#batch_afinn = None
#batch_hatebase = None
## Save labels
#logging.info("Labels")
#batch_labels = Y[i : i + batch_size]
#np.savez(os.path.join(batch_dir, "{}_labels".format(i)),
# labels = batch_labels)
## Save metadata
#logging.info("Metadata")
#batch_genre = X['genre'][i : i + batch_size]
#np.savez(os.path.join(batch_dir, "{}_meta".format(i)),
# genre = batch_genre)
logging.info("Done for {}".format(i))
skf = StratifiedKFold(n_splits = n_folds, random_state = 42)
lb = LabelBinarizer()
Y = lb.fit_transform(data['violence_rating'])
for k, (train, test) in enumerate(skf.split(data.violence_rating, data.violence_rating)):
train_dir = os.path.join(fold_dir, str(k), "train")
test_dir = os.path.join(fold_dir, str(k), "test")
eval_dir = os.path.join(fold_dir, str(k), "eval")
for t in [train_dir, test_dir, eval_dir]:
os.makedirs(t, exist_ok = True)
X_train, X_test = data.iloc[train], data.iloc[test]
Y_train, Y_test = Y[train], Y[test]
X_train, X_eval, Y_train, Y_eval = train_test_split(X_train, Y_train, test_size = 64, random_state = 666)
# Fit vocab
ngram_features.fit(data.iloc[train]['text'], Y_train)
tfidf_.fit(data.iloc[train]['text'], Y_train)
# Create batches
for i in tqdm(range(0, X_train.shape[0], batch_size)):
process(X_train, Y_train, i, ngram_features = ngram_features, batch_dir = train_dir, tfidf_transformer = tfidf_)
for i in tqdm(range(0, X_eval.shape[0], batch_size)):
process(X_eval, Y_eval, i, ngram_features = ngram_features, batch_dir = eval_dir, tfidf_transformer = tfidf_)
for i in tqdm(range(0, X_test.shape[0], batch_size)):
process(X_test, Y_test, i, ngram_features = ngram_features, batch_dir = test_dir, tfidf_transformer = tfidf_)
| 35.103139 | 120 | 0.61165 | 0 | 0 | 264 | 0.033725 | 0 | 0 | 0 | 0 | 3,228 | 0.412366 |
25c627add607a62bf9d3af224d727b8fbf9cabb3 | 337 | py | Python | mongosql/util/__init__.py | vdmit11/py-mongosql | 8b66a3386344cf5b38021dccf32c7790a07617e5 | [
"BSD-2-Clause"
] | null | null | null | mongosql/util/__init__.py | vdmit11/py-mongosql | 8b66a3386344cf5b38021dccf32c7790a07617e5 | [
"BSD-2-Clause"
] | null | null | null | mongosql/util/__init__.py | vdmit11/py-mongosql | 8b66a3386344cf5b38021dccf32c7790a07617e5 | [
"BSD-2-Clause"
] | null | null | null | from .raiseload_col import raiseload_col
from .selectinquery import selectinquery
from .counting_query_wrapper import CountingQuery
from .reusable import Reusable
from .mongoquery_settings_handler import MongoQuerySettingsHandler
from .marker import Marker
from .settings_dict import MongoQuerySettingsDict, StrictCrudHelperSettingsDict
| 42.125 | 79 | 0.890208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
25c782a2e45234ae81e902ab1e6701f01f6cf923 | 7,120 | py | Python | files/gpu_stat.py | nauhpc/ansible-role-sacct_gpu | 2c54766412871598bfe5b5eb45ba1c3aa3b08384 | [
"Apache-2.0"
] | null | null | null | files/gpu_stat.py | nauhpc/ansible-role-sacct_gpu | 2c54766412871598bfe5b5eb45ba1c3aa3b08384 | [
"Apache-2.0"
] | 1 | 2020-09-17T18:32:20.000Z | 2020-09-17T18:32:20.000Z | files/gpu_stat.py | nauhpc/gpustats | 2c54766412871598bfe5b5eb45ba1c3aa3b08384 | [
"Apache-2.0"
] | 1 | 2020-06-19T22:10:06.000Z | 2020-06-19T22:10:06.000Z | #!/usr/bin/python3
# author mhakala
import json
import re
import subprocess
import tempfile
import os
import xml.etree.cElementTree as ET
import argparse
import os.path
import time
import random
from datetime import datetime
from datetime import timedelta
import traceback
import configparser
import glob
def jobs_running():
"""find slurm-job-ids active on this node"""
data = subprocess.check_output(['squeue', '-w', os.uname()[1].split('.')[0], '-h', '-o', '%A']).decode()
return data.split()
def pid2id(pid):
"""convert pid to slurm jobid"""
with open('/proc/%s/cgroup' % pid) as f:
for line in f:
m = re.search('.*slurm\/uid_.*\/job_(\d+)\/.*', line)
if m:
return m.group(1)
return None
# get needed slurm values for each running job on the node
def job_info(jobs,current):
for job in jobs:
output = subprocess.check_output(['scontrol', '-o', 'show', 'job', job]).decode()
cpus = re.search('NumCPUs=(\d+)', output)
tres = re.search('TRES=(\S+)', output).group(1)
nodes = re.search('NumNodes=(\d+)', output)
ngpu = 0
for g in tres.split(','):
gs = g.split('=')
if gs[0] == 'gres/gpu:tesla':
if len(gs) == 1:
ngpu = 1
else:
ngpu = int(gs[-1])
# drop multi-node jobs (will be added later if needed)
if int(nodes.group(1)) > 1:
del current[job]
else:
current[job]['ngpu'] = ngpu
current[job]['ncpu']=int(cpus.group(1))
return current
def gpu_info(jobinfo):
output = subprocess.check_output(['nvidia-smi', '-q', '-x']).decode()
root = ET.fromstring(output)
for gpu in root.findall('gpu'):
procs = gpu.find('processes')
mtot = 0.
jobid = None
# Here we assume that multiple job id's cannot access the same
# GPU
for pi in procs.findall('process_info'):
pid = pi.find('pid').text
jobid = pid2id(pid)
# Assume used_memory is of the form '1750 MiB'. Needs fixing
# if the unit is anything but MiB.
mtot += float(pi.find('used_memory').text.split()[0])
util = gpu.find('utilization')
# Here assume gpu utilization is of the form
# '100 %'
gutil = float(util.find('gpu_util').text.split()[0])
# power_draw is of the form 35.25 W
power = gpu.find('power_readings')
gpwrdraw = float(power.find('power_draw').text.split()[0])
# only update, if jobid not dropped (multinode jobs)
# if a job is using multiple GPUs, code below should execute again
if jobid in jobinfo.keys():
if jobinfo[jobid]['ngpu'] != 0:
jobinfo[jobid]['gpu_util'] += gutil/jobinfo[jobid]['ngpu']
jobinfo[jobid]['gpu_power'] += gpwrdraw
jobinfo[jobid]['gpu_mem_max'] = max(mtot,
jobinfo[jobid]['gpu_mem_max'])
return jobinfo
def read_shm(dir_name):
jobinfo = {}
for fpath in glob.glob(dir_name + '*.json'):
jobid = fpath.replace(dir_name, '').replace('.json', '')
with open(fpath, 'r') as fp:
jobinfo[jobid] = json.loads(fp.read())
return jobinfo
def write_shm(jobinfo, running_jobids, dir_path, max_age):
latest = datetime.now() - timedelta(days=max_age)
latest = latest.strftime("%Y-%m-%d %H:%M:%S")
for jobid in jobinfo:
fpath = dir_path + str(jobid) + '.json'
if jobid in running_jobids and jobinfo[jobid]['ngpu'] != 0:
with open(fpath, 'w') as fp:
json.dump(jobinfo[jobid], fp)
elif jobinfo[jobid]['timestamp'] < latest:
os.remove(fpath)
def dir_path(path):
if os.path.isdir(path):
return path
else:
raise argparse.ArgumentTypeError("readable_dir:" +
str(path) +
" is not a valid path")
def main():
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('dir_path',
type=dir_path,
nargs='?',
default='/tmp/gpu_stats/',
help="The directory where a JSON for each job is stored")
parser.add_argument('-n', '--nosleep',
help="Don't sleep at the beginning",
action="store_true")
parser.add_argument('-l',
'--logfile',
help="Name of log file where any exceptions will be written to",
default='/tmp/gpustats.log')
parser.add_argument('-m',
'--max-age',
type=int,
default=1,
help='The maximum time (in days) for which the gpu stats of a job will be stored')
args = parser.parse_args()
if args.dir_path[-1] != '/':
args.dir_path += '/'
logfile = open(args.logfile, 'a+')
try:
if not args.nosleep:
time.sleep(random.randint(0, 30))
# initialize stats
current = {}
jobs = jobs_running()
for job in jobs:
current[job]={'gpu_util': 0, 'gpu_mem_max': 0, 'ngpu': 0,
'ncpu': 0, 'step': 1, 'gpu_power': 0,
'timestamp':
datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
# get current job info
current = job_info(jobs, current)
current = gpu_info(current)
# running_jobids contains jobids of jobs that are running
# if a jobid is not in this set,
# then we don't need to write to the corresponding file
running_jobids = set(current.keys())
# combine with previous steps, calculate avgs and max
prev = read_shm(args.dir_path)
for job in jobs:
if job in prev.keys():
n = prev[job]['step']
current[job]['gpu_util'] = ( float(prev[job]['gpu_util'])*n+float(current[job]['gpu_util']) )/(n+1)
current[job]['gpu_power'] = ( float(prev[job]['gpu_power'])*n+float(current[job]['gpu_power']) )/(n+1)
current[job]['gpu_mem_max'] = max(float(prev[job]['gpu_mem_max']), float(current[job]['gpu_mem_max']))
current[job]['step'] = n+1
for job in prev.keys():
if job not in jobs:
# it must be a job that is no longer running
current[job] = prev[job]
# write json
write_shm(current, running_jobids, args.dir_path, args.max_age)
except Exception as e:
logfile.write(traceback.format_exc())
end_time = time.time()
if end_time - start_time > 55.0:
logfile.write("WARNING: runtime was longer than expected at " +
str(end_time - start_time) +
" seconds\n")
if __name__ == '__main__':
main()
| 34.563107 | 119 | 0.536096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,939 | 0.272331 |
25c78a8c35311e4bcbcce63afb4416e6dde577b9 | 10,567 | py | Python | CRF/CRF.py | zhen8838/Statistical-Learning-Method | c62139804619f84b38a66cc222d0c951a7f76775 | [
"MIT"
] | null | null | null | CRF/CRF.py | zhen8838/Statistical-Learning-Method | c62139804619f84b38a66cc222d0c951a7f76775 | [
"MIT"
] | 3 | 2020-11-13T17:48:41.000Z | 2022-02-09T23:45:10.000Z | CRF/CRF.py | zhen8838/Statistical-Learning-Method | c62139804619f84b38a66cc222d0c951a7f76775 | [
"MIT"
] | 1 | 2020-09-09T08:11:47.000Z | 2020-09-09T08:11:47.000Z | """ 参考自https://github.com/bojone/crf/ """
import tensorflow as tf
k = tf.keras
kl = tf.keras.layers
K = tf.keras.backend
from sklearn.model_selection import train_test_split
import numpy as np
import re
from tqdm import tqdm
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
class CRF(kl.Layer):
"""
CRF层本质上是一个带训练参数的loss计算层,因此CRF层只用来训练模型,
而预测则需要另外建立模型。
"""
def __init__(self, ignore_last_label=False, lr_mult=1., **kwargs):
"""ignore_last_label:定义要不要忽略最后一个标签,起到mask的效果
"""
super().__init__(**kwargs)
self.ignore_last_label = 1 if ignore_last_label else 0
self.lr_mult = lr_mult
def build(self, input_shape):
self.num_labels = input_shape[-1] - self.ignore_last_label
self._trans: tf.Variable = self.add_weight(name='crf_trans',
shape=(self.num_labels, self.num_labels),
initializer='glorot_uniform',
trainable=True)
self._trans.assign(self._trans / self.lr_mult)
self.trans = lambda: self._trans * self.lr_mult
def get_weights(self):
weights = super().get_weights()
return [w * self.lr_mult for w in weights]
def log_norm_step(self, inputs, states):
"""递归计算归一化因子
要点:1、递归计算;2、用logsumexp避免溢出。
技巧:通过expand_dims来对齐张量。
"""
inputs, mask = inputs[:, :-1], inputs[:, -1:]
states = K.expand_dims(states[0], 2) # (batch_size, output_dim, 1)
trans = K.expand_dims(self.trans(), 0) # (1, output_dim, output_dim)
outputs = tf.math.reduce_logsumexp(states + trans, 1) # (batch_size, output_dim)
outputs = outputs + inputs
outputs = mask * outputs + (1 - mask) * states[:, :, 0]
return outputs, [outputs]
def path_score(self, inputs, labels):
"""计算目标路径的相对概率(还没有归一化)
要点:逐标签得分,加上转移概率得分。
技巧:用“预测”点乘“目标”的方法抽取出目标路径的得分。
"""
point_score = K.sum(K.sum(inputs * labels, 2), 1, keepdims=True) # 逐标签得分
labels1 = K.expand_dims(labels[:, :-1], 3)
labels2 = K.expand_dims(labels[:, 1:], 2)
labels = labels1 * labels2 # 两个错位labels,负责从转移矩阵中抽取目标转移得分
trans = K.expand_dims(K.expand_dims(self.trans(), 0), 0)
trans_score = K.sum(K.sum(trans * labels, [2, 3]), 1, keepdims=True)
return point_score + trans_score # 两部分得分之和
def call(self, inputs): # CRF本身不改变输出,它只是一个loss
return inputs
def loss(self, y_true, y_pred): # 目标y_pred需要是one hot形式
if self.ignore_last_label:
mask = 1 - y_true[:, :, -1:]
else:
mask = K.ones_like(y_pred[:, :, :1])
y_true, y_pred = y_true[:, :, :self.num_labels], y_pred[:, :, :self.num_labels]
path_score = self.path_score(y_pred, y_true) # 计算分子(对数)
init_states = [y_pred[:, 0]] # 初始状态
y_pred = K.concatenate([y_pred, mask])
log_norm, _, _ = K.rnn(self.log_norm_step, y_pred[:, 1:], init_states) # 计算Z向量(对数)
log_norm = tf.math.reduce_logsumexp(log_norm, 1, keepdims=True) # 计算Z(对数)
return log_norm - path_score # 即log(分子/分母)
def accuracy(self, y_true, y_pred): # 训练过程中显示逐帧准确率的函数,排除了mask的影响
mask = 1 - y_true[:, :, -1] if self.ignore_last_label else None
y_true, y_pred = y_true[:, :, :self.num_labels], y_pred[:, :, :self.num_labels]
isequal = K.equal(K.argmax(y_true, 2), K.argmax(y_pred, 2))
isequal = K.cast(isequal, 'float32')
if mask == None:
return K.mean(isequal)
else:
return K.sum(isequal * mask) / K.sum(mask)
def max_in_dict(d): # 定义一个求字典中最大值的函数
dict_items = list(d.items())
key, value = dict_items[0]
for i, j in dict_items[1:]:
if j > value:
key, value = i, j
return key, value
def viterbi(nodes, trans): # viterbi算法,跟前面的HMM一致
paths = nodes[0] # 初始化起始路径
for l in range(1, len(nodes)): # 遍历后面的节点
paths_old, paths = paths, {}
for n, ns in nodes[l].items(): # 当前时刻的所有节点
max_path, max_score = '', -1e10
for p, ps in paths_old.items(): # 截止至前一时刻的最优路径集合
score = ns + ps + trans[p[-1] + n] # 计算新分数
if score > max_score: # 如果新分数大于已有的最大分
max_path, max_score = p + n, score # 更新路径
paths[max_path] = max_score # 储存到当前时刻所有节点的最优路径
return max_in_dict(paths)
def cut(s, trans, char2id): # 分词函数,也跟前面的HMM基本一致
if not s: # 空字符直接返回
return []
# 字序列转化为id序列。注意,经过我们前面对语料的预处理,字符集是没有空格的,
# 所以这里简单将空格的id跟句号的id等同起来
sent_ids = np.array([[char2id.get(c, 0) if c != ' ' else char2id[u'。']
for c in s]])
probas = model.predict(sent_ids)[0] # [n,5]
nodes = [dict(zip('sbme', i)) for i in probas[:, :4]] # 只取前4个,因为最后一个是mask
nodes[0] = {i: j for i, j in nodes[0].items() if i in 'bs'} # 首字标签只能是b或s
nodes[-1] = {i: j for i, j in nodes[-1].items() if i in 'es'} # 末字标签只能是e或s
tags = viterbi(nodes, trans)[0]
result = [s[0]]
for i, j in zip(s[1:], tags[1:]):
if j in 'bs': # 词的开始
result.append(i)
else: # 接着原来的词
result[-1] += i
return result
class Evaluate(k.callbacks.Callback):
def __init__(self, tag2id, char2id):
self.highest = 0.
self.tag2id = tag2id
self.char2id = char2id
self.history = []
def on_train_batch_end(self, batch, logs=None):
A = self.model.get_layer('crf').get_weights()[0][:4, :4] # 从训练模型中取出最新得到的转移矩阵
self.history.append(A)
# def on_epoch_end(self, epoch, logs=None):
# A = self.model.get_weights()[-1][:4, :4] # 从训练模型中取出最新得到的转移矩阵
# trans = {}
# for i in 'sbme':
# for j in 'sbme':
# trans[i + j] = A[self.tag2id[i], self.tag2id[j]]
# right = 0.
# total = 0.
# for s in tqdm(iter(valid_sents), desc=u'验证模型中'):
# result = cut(''.join(s), trans, self.char2id)
# total += len(set(s))
# right += len(set(s) & set(result)) # 直接将词集的交集作为正确数。该指标比较简单,
# # 也许会导致估计偏高。读者可以考虑自定义指标
# acc = right / total
# if acc > self.highest:
# self.highest = acc
# print('val acc: %s, highest: %s' % (acc, self.highest))
def show_anime(self, save_path='gif/crf.gif'):
fig, ax = plt.subplots()
fig.set_tight_layout(True)
ax: plt.Axes
A = self.history[0]
c = ax.pcolor(A, cmap='RdBu_r', vmin=A.min(), vmax=A.max(),
edgecolors='w', linewidths=30)
ax.set_xticks(np.arange(4) + 0.5)
ax.set_yticks(np.arange(4) + 0.5)
ax.set_xticklabels(list('sbme'))
ax.set_yticklabels(list('sbme'))
for i in range(4):
for j in range(4):
text = ax.text(j + 0.5, i + 0.5,
f'{A[i, j]:^4.2f}',
ha="center", va="center", color="w")
def update(t):
ax.cla()
ax.set_title(f'iter {t}')
ax.set_xticks(np.arange(4) + 0.5)
ax.set_yticks(np.arange(4) + 0.5)
ax.set_xticklabels(list('sbme'))
ax.set_yticklabels(list('sbme'))
A = self.history[t]
c = ax.pcolor(A, cmap='RdBu_r', vmin=A.min(), vmax=A.max(),
edgecolors='w', linewidths=30)
for i in range(4):
for j in range(4):
text = ax.text(j + 0.5, i + 0.5,
f'{A[i, j]:^4.2f}',
ha="center", va="center", color="w")
anim = FuncAnimation(fig, update, frames=len(self.history), interval=100)
anim.save(save_path, writer='imagemagick', fps=5)
plt.show()
if __name__ == "__main__":
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
sents = []
with open('CRF/msr_training.utf8', 'r') as f:
for line in f.readlines():
sents.append(line.strip())
sents = [re.split(' +', s) for s in sents] # 词之间以空格隔开
sents = [[w for w in s if w] for s in sents] # 去掉空字符串
np.random.shuffle(sents) # 打乱语料,以便后面划分验证集
chars = {} # 统计字表
for s in sents:
for c in ''.join(s):
if c in chars:
chars[c] += 1
else:
chars[c] = 1
# 过滤低频字
min_count = 2
chars = {i: j for i, j in chars.items() if j >= min_count}
id2char = {i + 1: j for i, j in enumerate(chars)} # id到字的映射
char2id = {j: i for i, j in id2char.items()} # 字到id的映射
id2tag = {0: 's', 1: 'b', 2: 'm', 3: 'e'} # 标签(sbme)与id之间的映射
tag2id = {j: i for i, j in id2tag.items()}
train_sents, valid_sents = train_test_split(sents, test_size=0.05)
batch_size = 128
def train_generator():
while True:
X, Y = [], []
for i, s in enumerate(train_sents): # 遍历每个句子
sx, sy = [], []
for w in s: # 遍历句子中的每个词
sx.extend([char2id.get(c, 0) for c in w]) # 遍历词中的每个字
if len(w) == 1:
sy.append(0) # 单字词的标签
elif len(w) == 2:
sy.extend([1, 3]) # 双字词的标签
else:
sy.extend([1] + [2] * (len(w) - 2) + [3]) # 多于两字的词的标签
X.append(sx)
Y.append(sy)
if len(X) == batch_size or i == len(train_sents) - 1: # 如果达到一个batch
maxlen = max([len(x) for x in X]) # 找出最大字数
X = [x + [0] * (maxlen - len(x)) for x in X] # 不足则补零
Y = [y + [4] * (maxlen - len(y)) for y in Y] # 不足则补第五个标签
yield np.array(X), tf.keras.utils.to_categorical(Y, 5)
X, Y = [], []
embedding_size = 128
sequence = kl.Input(shape=(None,), dtype='int32') # 建立输入层,输入长度设为None
embedding = kl.Embedding(len(chars) + 1, embedding_size)(sequence) # 去掉了mask_zero=True
cnn = kl.Conv1D(128, 3, activation='relu', padding='same')(embedding)
cnn = kl.Conv1D(128, 3, activation='relu', padding='same')(cnn)
cnn = kl.Conv1D(128, 3, activation='relu', padding='same')(cnn) # 层叠了3层CNN
crf = CRF(True, lr_mult=100.) # 定义crf层,参数为True,自动mask掉最后一个标签,同时增大crf学习率100倍
tag_score = kl.Dense(5)(cnn) # 变成了5分类,第五个标签用来mask掉
tag_score = crf(tag_score) # 包装一下原来的tag_score
model = k.Model(inputs=sequence, outputs=tag_score)
model.summary()
model.compile(loss=crf.loss, # 用crf自带的loss
optimizer=k.optimizers.Adam(0.001),
metrics=[crf.accuracy] # 用crf自带的accuracy
)
evaluator = Evaluate(tag2id, char2id)
model.fit_generator(train_generator(),
steps_per_epoch=100,
epochs=1,
callbacks=[evaluator]) # 训练并将evaluator加入到训练过程
A = model.get_layer('crf').get_weights()[0][:4, :4] # :4是为了去除mask的转义概率
trans = {}
for i in 'sbme':
for j in 'sbme':
trans[i + j] = A[tag2id[i], tag2id[j]]
right = 0.
total = 0.
for s in range(5):
s = valid_sents[s]
result = cut(''.join(s), trans, char2id)
print(''.join(s), '\n', result)
evaluator.show_anime()
| 35.459732 | 89 | 0.594208 | 6,153 | 0.506962 | 977 | 0.080498 | 0 | 0 | 0 | 0 | 3,825 | 0.315152 |
25c7c301c6e2d964c2b852ffb114911428fe8c06 | 4,284 | py | Python | comet/schedulers.py | chryssa-zrv/UA_COMET | 527e7c86bd0a0d8ff90efda58e820108a5666b92 | [
"Apache-2.0"
] | null | null | null | comet/schedulers.py | chryssa-zrv/UA_COMET | 527e7c86bd0a0d8ff90efda58e820108a5666b92 | [
"Apache-2.0"
] | null | null | null | comet/schedulers.py | chryssa-zrv/UA_COMET | 527e7c86bd0a0d8ff90efda58e820108a5666b92 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
Schedulers
==============
Leraning Rate schedulers used to train COMET models.
"""
from argparse import Namespace
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
class ConstantPolicy:
"""Policy for updating the LR of the ConstantLR scheduler.
With this class LambdaLR objects became picklable.
"""
def __call__(self, *args, **kwargs):
return 1
class ConstantLR(LambdaLR):
"""
Constant learning rate schedule
Wrapper for the huggingface Constant LR Scheduler.
https://huggingface.co/transformers/v2.1.1/main_classes/optimizer_schedules.html
:param optimizer: torch.optim.Optimizer
:param last_epoch:
"""
def __init__(self, optimizer: Optimizer, last_epoch: int = -1) -> None:
super(ConstantLR, self).__init__(optimizer, ConstantPolicy(), last_epoch)
@classmethod
def from_hparams(
cls, optimizer: Optimizer, hparams: Namespace, **kwargs
) -> LambdaLR:
""" Initializes a constant learning rate scheduler. """
return ConstantLR(optimizer)
class WarmupPolicy:
"""Policy for updating the LR of the WarmupConstant scheduler.
With this class LambdaLR objects became picklable.
"""
def __init__(self, warmup_steps):
self.warmup_steps = warmup_steps
def __call__(self, current_step):
if current_step < self.warmup_steps:
return float(current_step) / float(max(1.0, self.warmup_steps))
return 1.0
class WarmupConstant(LambdaLR):
"""
Warmup Linear scheduler.
1) Linearly increases learning rate from 0 to 1 over warmup_steps
training steps.
2) Keeps the learning rate constant afterwards.
:param optimizer: torch.optim.Optimizer
:param warmup_steps: Linearly increases learning rate from 0 to 1 over warmup_steps.
:param last_epoch:
"""
def __init__(
self, optimizer: Optimizer, warmup_steps: int, last_epoch: int = -1
) -> None:
super(WarmupConstant, self).__init__(
optimizer, WarmupPolicy(warmup_steps), last_epoch
)
@classmethod
def from_hparams(
cls, optimizer: Optimizer, hparams: Namespace, **kwargs
) -> LambdaLR:
""" Initializes a constant learning rate scheduler with warmup period. """
return WarmupConstant(optimizer, hparams.warmup_steps)
class LinearWarmupPolicy:
"""Policy for updating the LR of the LinearWarmup scheduler.
With this class LambdaLR objects became picklable.
"""
def __init__(self, warmup_steps, num_training_steps):
self.num_training_steps = num_training_steps
self.warmup_steps = warmup_steps
def __call__(self, current_step):
if current_step < self.warmup_steps:
return float(current_step) / float(max(1, self.warmup_steps))
return max(
0.0,
float(self.num_training_steps - current_step)
/ float(max(1, self.num_training_steps - self.warmup_steps)),
)
class LinearWarmup(LambdaLR):
"""
Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
:param optimizer: torch.optim.Optimizer
:param warmup_steps: Linearly increases learning rate from 0 to 1*learning_rate over warmup_steps.
:param num_training_steps: Linearly decreases learning rate from 1*learning_rate to 0. over remaining
t_total - warmup_steps steps.
:param last_epoch:
"""
def __init__(
self,
optimizer: Optimizer,
warmup_steps: int,
num_training_steps: int,
last_epoch: int = -1,
) -> None:
super(LinearWarmup, self).__init__(
optimizer, LinearWarmupPolicy(warmup_steps, num_training_steps), last_epoch
)
@classmethod
def from_hparams(
cls, optimizer: Optimizer, hparams: Namespace, num_training_steps: int
) -> LambdaLR:
""" Initializes a learning rate scheduler with warmup period and decreasing period. """
return LinearWarmup(optimizer, hparams.warmup_steps, num_training_steps)
str2scheduler = {
"linear_warmup": LinearWarmup,
"constant": ConstantLR,
"warmup_constant": WarmupConstant,
}
| 30.6 | 105 | 0.681839 | 3,914 | 0.913632 | 0 | 0 | 790 | 0.184407 | 0 | 0 | 1,791 | 0.418067 |
25c7cac235ae010638d92daa6f69cfcf3c1ad8dd | 144 | py | Python | pypesto/prediction/__init__.py | sleepy-owl/pyPESTO | a34608de9ad0a274afb6fb89ebc022aff5baf4c0 | [
"BSD-3-Clause"
] | null | null | null | pypesto/prediction/__init__.py | sleepy-owl/pyPESTO | a34608de9ad0a274afb6fb89ebc022aff5baf4c0 | [
"BSD-3-Clause"
] | null | null | null | pypesto/prediction/__init__.py | sleepy-owl/pyPESTO | a34608de9ad0a274afb6fb89ebc022aff5baf4c0 | [
"BSD-3-Clause"
] | null | null | null | """
Prediction
==========
"""
from .amici_predictor import AmiciPredictor
from .prediction import PredictionResult, PredictionConditionResult
| 16 | 67 | 0.763889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.201389 |
25c7dd06bfcbbf01961dfc14315b094c4b911686 | 1,378 | py | Python | tixcraft/utils.py | stavhaygn/crack-tixcraft | c235e0d76ac29b1dea53e93c5fddca5bc0d58b79 | [
"MIT"
] | 20 | 2019-05-30T05:17:49.000Z | 2022-03-27T06:57:26.000Z | tixcraft/utils.py | satan007417/tixcraft_request | c4383c7bcc05844f4c10cbfb2b36c7d4c7c55f79 | [
"MIT"
] | 14 | 2019-07-21T17:22:46.000Z | 2022-03-11T23:50:36.000Z | tixcraft/utils.py | stavhaygn/crack-tixcraft | c235e0d76ac29b1dea53e93c5fddca5bc0d58b79 | [
"MIT"
] | 11 | 2019-07-12T14:24:58.000Z | 2022-03-11T13:26:57.000Z | from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium import webdriver
import requests
def login():
driver = webdriver.Chrome()
driver.implicitly_wait(20)
driver.get("https://tixcraft.com/login")
WebDriverWait(driver, 600).until(
EC.visibility_of_element_located((By.XPATH, "//*[@class='user-name']"))
)
cookies = driver.get_cookies()
driver.quit()
return cookies
def user_verify(driver, url):
driver.get(url)
url = driver.current_url
while "ticket/verify" in url:
try:
url = driver.current_url
WebDriverWait(driver, 2).until(EC.alert_is_present())
alert = driver.switch_to_alert()
alert.accept()
except:
pass
return url
def session_to_driver(session):
cookies = session.cookies.get_dict()
driver = webdriver.Chrome()
driver.get("https://tixcraft.com")
for name, value in cookies.items():
cookie = {"name": name, "value": value}
driver.add_cookie(cookie)
return driver
def driver_to_session(driver):
cookies = driver.get_cookies()
session = requests.Session()
for cookie in cookies:
session.cookies.set(cookie["name"], cookie["value"])
return session
| 27.56 | 79 | 0.666909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.08418 |