max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
utils/__init__.py | biubiubiiu/SpamClassification | 0 | 12771451 | from .logger import logger_setup
__all__ = [
'logger_setup'
]
| 1.09375 | 1 |
testing/test_table_api.py | dianfu/pyflink-faq | 14 | 12771452 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.table import DataTypes
from pyflink.table.udf import udf
from test_utils import PyFlinkStreamTableTestCase, TestAppendSink, results
class TableTests(PyFlinkStreamTableTestCase):
def get_results(self, table_name):
gateway = get_gateway()
TestValuesTableFactory = gateway.jvm.org.apache.flink.table.planner.factories.TestValuesTableFactory
return TestValuesTableFactory.getResults(table_name)
def test_scalar_function(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
table_sink = TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.select(t.a, add_one(t.a)) \
.execute_insert("Results").wait()
actual = results()
self.assert_equals(actual, ["+I[1, 2]", "+I[2, 3]", "+I[3, 4]"])
def test_sink_ddl(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
self.t_env.execute_sql("""
CREATE TABLE Results(
a BIGINT,
b BIGINT
) with (
'connector' = 'values'
)
""")
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.select(t.a, add_one(t.a)) \
.execute_insert("Results").wait()
actual = self.get_results("Results")
self.assert_equals(actual, ["+I[1, 2]", "+I[2, 3]", "+I[3, 4]"])
| 1.796875 | 2 |
scripts/gather/test_gather_browser.py | acutesoftware/rawdata | 10 | 12771453 | <filename>scripts/gather/test_gather_browser.py
#!/usr/bin/python3
# test_gather_browser.py
import unittest
import os
import sys
root_fldr = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'rawdata'))
imp_folder = root_fldr + os.sep + 'gather'
print(imp_folder)
sys.path.insert(1, imp_folder)
import browser_usage
class TestGatherBrowser(unittest.TestCase):
def test_01_browser(self):
browser = browser_usage.Browser(browser_usage.browser_data_path, browser_usage.op_folder, 'Chrome')
browser.get_passwords()
browser.get_browser_history_chrome()
browser.get_browser_bookmarks_chrome()
print(browser)
bookmarks_file = browser_usage.op_folder + os.sep + 'chrome_bookmarks.csv'
history_file = browser_usage.op_folder + os.sep + 'chrome_history.csv'
password_op = browser_usage.op_folder + os.sep + 'PASSWORDS.csv'
#self.assertEqual(os.path.exists(bookmarks_file), True)
#self.assertEqual(os.path.exists(history_file), True)
#self.assertEqual(os.path.exists(password_op), True)
self.assertEqual(str(browser)[0:36], 'browser_usage reading Chrome browser')
if __name__ == '__main__':
unittest.main()
| 2.5625 | 3 |
betterproto/tests/generate.py | boukeversteegh/python-betterproto | 4 | 12771454 | #!/usr/bin/env python
import os
# Force pure-python implementation instead of C++, otherwise imports
# break things because we can't properly reset the symbol database.
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
import importlib
import json
import subprocess
import sys
from typing import Generator, Tuple
from google.protobuf import symbol_database
from google.protobuf.descriptor_pool import DescriptorPool
from google.protobuf.json_format import MessageToJson, Parse
root = os.path.dirname(os.path.realpath(__file__))
def get_files(end: str) -> Generator[str, None, None]:
for r, dirs, files in os.walk(root):
for filename in [f for f in files if f.endswith(end)]:
yield os.path.join(r, filename)
def get_base(filename: str) -> str:
return os.path.splitext(os.path.basename(filename))[0]
def ensure_ext(filename: str, ext: str) -> str:
if not filename.endswith(ext):
return filename + ext
return filename
if __name__ == "__main__":
os.chdir(root)
if len(sys.argv) > 1:
proto_files = [ensure_ext(f, ".proto") for f in sys.argv[1:]]
bases = {get_base(f) for f in proto_files}
json_files = [
f for f in get_files(".json") if get_base(f).split("-")[0] in bases
]
else:
proto_files = get_files(".proto")
json_files = get_files(".json")
for filename in proto_files:
print(f"Generating code for {os.path.basename(filename)}")
subprocess.run(
f"protoc --python_out=. {os.path.basename(filename)}", shell=True
)
subprocess.run(
f"protoc --plugin=protoc-gen-custom=../plugin.py --custom_out=. {os.path.basename(filename)}",
shell=True,
)
for filename in json_files:
# Reset the internal symbol database so we can import the `Test` message
# multiple times. Ugh.
sym = symbol_database.Default()
sym.pool = DescriptorPool()
parts = get_base(filename).split("-")
out = filename.replace(".json", ".bin")
print(f"Using {parts[0]}_pb2 to generate {os.path.basename(out)}")
imported = importlib.import_module(f"{parts[0]}_pb2")
input_json = open(filename).read()
parsed = Parse(input_json, imported.Test())
serialized = parsed.SerializeToString()
preserve = "casing" not in filename
serialized_json = MessageToJson(parsed, preserving_proto_field_name=preserve)
s_loaded = json.loads(serialized_json)
in_loaded = json.loads(input_json)
if s_loaded != in_loaded:
raise AssertionError("Expected JSON to be equal:", s_loaded, in_loaded)
open(out, "wb").write(serialized)
| 2.0625 | 2 |
backend/api/view/RegistrationView.py | forgeno/CMPUT404-group-project | 0 | 12771455 | <filename>backend/api/view/RegistrationView.py<gh_stars>0
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from django.contrib.auth.models import User
from ..models import AuthorProfile
from ..serializers import CreateUserSerializer
from django.db import transaction
from django.conf import settings
class RegistrationView(generics.GenericAPIView):
serializer_class = CreateUserSerializer
permission_classes = (permissions.AllowAny, )
def post(self, request, *args, **kwargs):
try:
with transaction.atomic():
user_obj = User.objects.create_user(username=request.data["username"],password=request.data["password"])
AuthorProfile.objects.create(
host=settings.BACKEND_URL,
displayName=request.data["displayName"],
github=request.data["github"],
bio=request.data["bio"],
user=user_obj,
firstName=request.data["firstName"],
lastName=request.data["lastName"],
email=request.data["email"], isValid=False
)
return Response("Register success", status.HTTP_200_OK)
except Exception as e:
return Response(str("Register failed"), status.HTTP_400_BAD_REQUEST)
| 2.15625 | 2 |
django/mysite/polls/migrations/0005_auto_20170815_0447.py | vithd/vithd.github.io | 0 | 12771456 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-15 04:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0004_auto_20170811_0444'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='demo_1',
),
migrations.RemoveField(
model_name='question',
name='demo_2',
),
migrations.RemoveField(
model_name='question',
name='demo_3',
),
migrations.AddField(
model_name='question',
name='integer',
field=models.IntegerField(blank=True, default='321', null=True),
),
migrations.AddField(
model_name='question',
name='select',
field=models.CharField(choices=[(0, 'Option 1'), (1, 'Option two'), (2, 'Option Teemo')], max_length=1, null=True),
),
migrations.AddField(
model_name='question',
name='textarea',
field=models.TextField(default='Very long text, isnt it?', max_length=200),
),
]
| 1.75 | 2 |
ranked/datasets/replay.py | Delaunay/Ranked | 0 | 12771457 | import json
from ranked.datasets import Matchup
from ranked.models import Batch, Match
class ReplayMatchup(Matchup):
"""Returns a batch of matchups, each batch have each players once.
The matches are sorted by ascending timestamp.
This means that the first batch represent the first match for each player.
second batch second match, etc...
Parameters
----------
ranker:
Ranker object used to create teams
pool:
Pool of player
matchupfs:
Name of the file containing the replay data
"""
def __init__(self, ranker, pool, matchupfs: str) -> None:
self.ranker = ranker
self.matches = []
self.batches = []
self.pool = pool
self.step = 0
with open(matchupfs, "r") as data:
for line in data.readline():
#
match = json.loads(line)
batch = match.get("batch")
teams = match.get("teams")
leaderboard = []
for team in teams:
players = team["players"]
score = team["score"]
t1 = self.ranker.new_team(
*[self.pool[player_id] for player_id in players]
)
leaderboard.append((t1, score))
m = Match(*leaderboard)
if batch is not None:
self.batches.append((batch, m))
self.matches.append(m)
self.batches.sort(key=lambda item: item[0])
def matches(self) -> Batch:
for b in self.batches:
yield b
| 3.3125 | 3 |
keras-multi-input/mixed_training.py | 26medias/GAN-toolkit | 0 | 12771458 | # USAGE
# python mixed_training.py --dataset Houses-dataset/Houses\ Dataset/
# import the necessary packages
from pyimagesearch import datasets
from pyimagesearch import models
from sklearn.model_selection import train_test_split
from keras.layers.core import Dense
from keras.models import Model
from keras.optimizers import Adam
from keras.layers import concatenate
import numpy as np
import argparse
import locale
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", type=str, required=True,
help="path to input dataset of house images")
args = vars(ap.parse_args())
# construct the path to the input .txt file that contains information
# on each house in the dataset and then load the dataset
print("[INFO] loading house attributes...")
inputPath = os.path.sep.join([args["dataset"], "HousesInfo.txt"])
df = datasets.load_house_attributes(inputPath)
# load the house images and then scale the pixel intensities to the
# range [0, 1]
print("[INFO] loading house images...")
images = datasets.load_house_images(df, args["dataset"])
images = images / 255.0
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
print("[INFO] processing data...")
split = train_test_split(df, images, test_size=0.25, random_state=42)
(trainAttrX, testAttrX, trainImagesX, testImagesX) = split
# find the largest house price in the training set and use it to
# scale our house prices to the range [0, 1] (will lead to better
# training and convergence)
maxPrice = trainAttrX["price"].max()
trainY = trainAttrX["price"] / maxPrice
testY = testAttrX["price"] / maxPrice
# process the house attributes data by performing min-max scaling
# on continuous features, one-hot encoding on categorical features,
# and then finally concatenating them together
(trainAttrX, testAttrX) = datasets.process_house_attributes(df,
trainAttrX, testAttrX)
# create the MLP and CNN models
mlp = models.create_mlp(trainAttrX.shape[1], regress=False)
cnn = models.create_cnn(64, 64, 3, regress=False)
# create the input to our final set of layers as the *output* of both
# the MLP and CNN
combinedInput = concatenate([mlp.output, cnn.output])
# our final FC layer head will have two dense layers, the final one
# being our regression head
x = Dense(4, activation="relu")(combinedInput)
x = Dense(1, activation="linear")(x)
# our final model will accept categorical/numerical data on the MLP
# input and images on the CNN input, outputting a single value (the
# predicted price of the house)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
# compile the model using mean absolute percentage error as our loss,
# implying that we seek to minimize the absolute percentage difference
# between our price *predictions* and the *actual prices*
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
# train the model
print("[INFO] training model...")
model.fit(
[trainAttrX, trainImagesX], trainY,
validation_data=([testAttrX, testImagesX], testY),
epochs=200, batch_size=8)
# make predictions on the testing data
print("[INFO] predicting house prices...")
preds = model.predict([testAttrX, testImagesX])
# compute the difference between the *predicted* house prices and the
# *actual* house prices, then compute the percentage difference and
# the absolute percentage difference
diff = preds.flatten() - testY
percentDiff = (diff / testY) * 100
absPercentDiff = np.abs(percentDiff)
# compute the mean and standard deviation of the absolute percentage
# difference
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
# finally, show some statistics on our model
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
print("[INFO] avg. house price: {}, std house price: {}".format(
locale.currency(df["price"].mean(), grouping=True),
locale.currency(df["price"].std(), grouping=True)))
print("[INFO] mean: {:.2f}%, std: {:.2f}%".format(mean, std)) | 3.28125 | 3 |
Preparation/largesmall.py | jaiswalIT02/pythonprograms | 0 | 12771459 | <filename>Preparation/largesmall.py
l = [4, 3, 5, 4, -3,10,2,33,98,4]
print(l)
min = l[0]
max = l[0]
n = len(l)
for i in range(1, n):
curr = l[i]
if curr < min:
min=curr
if curr>max:
max=curr
print("Min=",min,"Max=",max)
| 3.4375 | 3 |
show_next_error.py | 77QingLiu/SAS-Syntax-and-Theme | 9 | 12771460 | <filename>show_next_error.py
import sublime, sublime_plugin, re
class ShowNextErrorCommand(sublime_plugin.TextCommand):
def run(self, edit):
s = sublime.load_settings('SAS_Package.sublime-settings')
err_regx = s.get('err-regx', "(^(error|warning:)|uninitialized|[^l]remerge|Invalid data for)(?! (the .{4,15} product with which|your system is scheduled|will be expiring soon, and|this upcoming expiration.|information on your warning period.))")
s.set('err-regx', err_regx)
sublime.save_settings('SAS_Package.sublime-settings')
# err_regx = re.compile(err_regx, re.MULTILINE)
# Get end of last current selection.
curr_pos = 0
for region in self.view.sel():
curr_pos = region.end()
# Find the next error
next_error = self.view.find(err_regx, curr_pos, sublime.IGNORECASE)
if next_error:
# Clear out any previous selections.
self.view.sel().clear()
self.view.sel().add(next_error)
self.view.show(next_error)
sublime.status_message("Found error at " + str(next_error))
else:
sublime.status_message("No more errors!")
| 2.3125 | 2 |
Transynther/x86/process.py | ljhsiun2/medusa | 9 | 12771461 | <gh_stars>1-10
#!/usr/bin/env python3
import sys, os, codecs
from fuzz import MemAddress
def mkdir_if(dir):
if not os.path.exists(dir):
os.mkdir(dir, 0o755)
def leakage_attr(faultyLoad, faultType, leakageHist, previousMem):
ht = st = zero = expected = unknown = _4k = same = False
for k in leakageHist:
kd = ord(codecs.decode(k, 'hex'))
if kd in range(0x40, 0x50) or kd in range(0x61, 0x69):
ht = True
elif kd == 0:
zero = True
elif kd in range(0x30, 0x40):
if faultType == "NONE" and MemAddress.Types[faultyLoad["src"]["type"]]["byte"] == kd:
expected = True
else:
st = True
elif kd in range(0x51, 0x59):
for v in previousMem:
if v['OP'] == 'STOR':
if v['dst']['same'] and v['dst']["type"] == faultyLoad["src"]["type"]:
same = True
if faultType == "NONE":
expected = True
if faultType != "NONE" and v['dst']['congruent'] == 11 and v['dst']["type"] != faultyLoad["src"]["type"]:
_4k = True
if not expected:
st = True
else:
unknown = True
attr = "_"
attr += "ht_" if ht else ""
attr += "st_" if st else ""
attr += "zr_" if zero else ""
attr += "un_" if unknown else ""
attr += "xt_" if expected else ""
attr += "4k_" if _4k else ""
attr += "sm_" if same else ""
return attr
def fault_type(faultyLoad):
if faultyLoad["src"]["AVXalign"] or faultyLoad["src"]["NT"]:
return "AVXALIGN"
addrType = faultyLoad["src"]["type"]
if MemAddress.Types[addrType]["safe"] or addrType == "addresses_PSE" or addrType == "addresses_RW":
return "NONE"
else:
return addrType.split("_")[1]
def process_log(log, leakageHist):
path = '_processed'
mkdir_if(path)
lines = log.strip().split("\n")
iFaulty = lines.index("[Faulty Load]")
iPrep = lines.index("<gen_prepare_buffer>")
inThreadMem = map(eval, lines[2:iFaulty])
outThreadMem = map(eval, lines[iPrep+1:])
faultyLoad = eval(lines[iFaulty+1])
faultType = fault_type(faultyLoad)
path = path + "/%s"%(faultType)
mkdir_if(path)
leakageAttr = leakage_attr(faultyLoad, faultType, leakageHist, inThreadMem)
path = path + "/%s"%(leakageAttr)
mkdir_if(path)
return path
def main():
logFilePath = sys.argv[1]
fLog = open(logFilePath)
line = fLog.readline()
code = log = ""
fC = fL = False
c = 0
while line:
if line.startswith(".global s_prepare_buffers"):
fC = True
fL = False
if line.startswith("<gen_faulty_load>"):
fC = False
fL = True
if line.startswith("Leaked"):
byteLeaked = int(line.split()[1])
byteHist = eval(fLog.readline())
bytePattern = fLog.readline().strip()
root = process_log(log, byteHist)
asmPath = "%s/%s_%s_%s.asm"%(root, os.path.basename(logFilePath), byteLeaked, c)
print(asmPath)
with open(asmPath, 'w+') as outFile:
outFile.write(code)
outFile.write("\n/*\n")
outFile.write(log)
outFile.write("%s\n"%str(byteHist))
outFile.write("%s\n"%bytePattern)
outFile.write("*/\n")
code = log = ""
fC = fL = False
c += 1
if fC:
code += line
if fL:
log += line
line = fLog.readline()
if __name__== "__main__":
main()
| 2.203125 | 2 |
Algo-1/week3/2-Min-Max-Heap/min_max_heap.py | pepincho/Python101-and-Algo1-Courses | 2 | 12771462 | class MinMaxHeap:
# Checks if a binary tree is a min/max heap.
@staticmethod
def is_valid(values, index, level, min_value, max_value):
if index >= len(values):
return True
if (values[index] > min_value and values[index] < max_value) == False:
return False
if level % 2 != 0: # odd
min_value = values[index]
else: # even
max_value = values[index]
return (MinMaxHeap.is_valid(values, index * 2 + 1, level + 1, min_value, max_value)
and MinMaxHeap.is_valid(values, index * 2 + 2, level + 1, min_value, max_value))
def main():
# "8 71 41 31 10 11 16 46 51 31 21 13" - YES
# "8 71 41 31 25 11 16 46 51 31 21 13" - NO
N = int(input("N: "))
line = input()
l = line.split()
values = []
for number in l:
values.append(int(number))
result = MinMaxHeap.is_valid(values, 0, 1, 0, 1e10)
if result:
print("YES")
else:
print("NO")
if __name__ == '__main__':
main()
| 3.984375 | 4 |
swagger_fuzzer/utils.py | cadesalaberry/swagger-fuzzer | 25 | 12771463 | <reponame>cadesalaberry/swagger-fuzzer<filename>swagger_fuzzer/utils.py
""" Various helpers
"""
import json
from datetime import datetime
class CustomJsonEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
| 1.859375 | 2 |
poodle/core/profile/__init__.py | danielkauffmann/poodle | 0 | 12771464 | from .get_profile import MoodleProfile
__all__ = ["MoodleProfile"]
| 1.046875 | 1 |
main.py | mcascallares/hand-luggage | 1 | 12771465 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import csv
from collections import defaultdict
import webapp2
from webapp2_extras import json
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
import config
from tripit_facade import TripItFacade
AIRPORTS_ID = 1
MATRIX_ID = 2
class BlobModel(ndb.Model):
payload = ndb.PickleProperty(compressed=True)
@classmethod
def by_name(cls, name_value):
return cls.query(name=name_value)
class HomeHandler(webapp2.RequestHandler):
def get(self):
template_values = {}
template = config.JINJA_ENVIRONMENT.get_template('views/home.html')
self.response.write(template.render(template_values))
class AirportListHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/csv'
airports = BlobModel.get_by_id(AIRPORTS_ID)
colors = ['#AF81C9', '#F89A7E', '#F2CA85', '#54D1F1', '#7C71AD', '#445569']
writer = csv.writer(self.response.out)
writer.writerow(['name', 'color'])
for i, value in enumerate(airports.payload):
writer.writerow([value, colors[i % len(colors)]])
class AirportMatrixHandler(webapp2.RequestHandler):
def get(self):
self.response.content_type = 'application/json'
matrix = BlobModel.get_by_id(MATRIX_ID)
self.response.write(json.encode(matrix.payload))
class RawHandler(webapp2.RequestHandler):
def get(self):
tripit = TripItFacade(config.TRIPIT_USERNAME, config.TRIPIT_PASSWORD)
flight_segments = tripit.list_flight_segments()
if len(flight_segments) > 0:
self.response.content_type = 'application/json'
self.response.write(json.encode(flight_segments))
class TripItHandler(webapp2.RequestHandler):
def get(self):
logging.info('Scheduling tripit fetch')
taskqueue.add(url='/tripit/worker')
def post(self):
tripit = TripItFacade(config.TRIPIT_USERNAME, config.TRIPIT_PASSWORD)
flight_segments = tripit.list_flight_segments()
logging.info('Flight segments retrieved!')
airports = set()
matrix = defaultdict(int)
for s in flight_segments:
origin, destination = s['start_airport_code'], s['end_airport_code']
matrix[origin, destination] += 1
airports.add(origin)
airports.add(destination)
airports = list(airports) # to guarantee order
weights = []
for i in airports:
current_line = [0] * len(airports)
for j, value in enumerate(airports):
current_line[j] = matrix[i, value]
weights.append(current_line)
if len(weights) > 0:
tripit_airport = BlobModel(id=AIRPORTS_ID, payload=airports)
tripit_airport.put()
tripit_matrix = BlobModel(id=MATRIX_ID, payload=weights)
tripit_matrix.put()
logging.info('Updated datastore entries with matrix and airport information')
else:
logging.error('Ignoring datastore update due to missing information, check log for errors')
app = webapp2.WSGIApplication([
('/', HomeHandler),
('/airports/matrix.json', AirportMatrixHandler),
('/airports/list.csv', AirportListHandler),
('/tripit/schedule', TripItHandler),
('/tripit/worker', TripItHandler),
('/tripit/raw', RawHandler)
], debug=True)
| 2.421875 | 2 |
Python/devtests/openImage.py | TimelabTech/apollo15 | 0 | 12771466 | <filename>Python/devtests/openImage.py<gh_stars>0
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from astropy.io import fits
hdu_list = fits.open("../../HST/u2c70104t/u2c70104t_c0f.fits")
hdu_list.info()
image_data = hdu_list[0].data
print(type(image_data))
print(image_data.shape)
#header = hdu_list['PRIMARY'].header
#print(header)
print('Min:', np.min(image_data[0]))
print('Max:', np.max(image_data[0]))
print('Mean:', np.mean(image_data[0]))
print('Stdev:', np.std(image_data[0]))
#plt.imshow(image_data[1], cmap='gray')
#plt.colorbar()
#NBINS = 1000
#histogram = plt.hist(image_data[0].flat, NBINS)
#plt.show()
plt.imshow(image_data[0], cmap='gray', norm=LogNorm())
# I chose the tick marks based on the histogram above
cbar = plt.colorbar(ticks=[0,200,400])
cbar.ax.set_yticklabels(['0','200','400'])
plt.show()
| 2.5 | 2 |
antioch/plugins/ask/plugin.py | philchristensen/antioch | 15 | 12771467 | # antioch
# Copyright (c) 1999-2019 <NAME>
#
#
# See LICENSE for details
"""
Client-side prompt support.
"""
from zope.interface import provider
from antioch import IPlugin
def ask(p, question, callback, *args, **kwargs):
details = dict(
question = question,
)
p.exchange.send_message(p.caller.get_id(), dict(
command = 'ask',
details = details,
callback = dict(
origin_id = callback.get_origin().get_id(),
verb_name = callback.get_names()[0],
args = args,
kwargs = kwargs,
)
))
@provider(IPlugin)
class AskPlugin(object):
script_url = 'js/ask-plugin.js'
def get_environment(self):
return dict(
ask = ask,
)
| 2.25 | 2 |
python_programming/basics/file_reader.py | JoshuaTPritchett/30DaysCoding | 0 | 12771468 | <filename>python_programming/basics/file_reader.py
'''
Simple file reader object that will teach
me how to properly read files
'''
#hmmm
import json
from os.path import exists
class FileReader(object):
def __init__(self, strings, mfile):
self.strings = strings
self.mfile = mfile
self.cfile = None
def print_those_damn_strings(self):
for s in self.strings:
print s
# must specify the w/r of content
# w for only writing
# r for only reading
# b binary mode
# rb, wb, r+b
def open(self):
self.cfile = open(self.mfile, 'r+')
def truncate(self):
self.cfile.truncate()
def close(self):
self.cfile.close()
def read(self, read_num=0):
if read_num:
self.contents = self.read(read_num)
else:
self.contents = self.cfile.read()
def readline():
self.contents = self.cfile.readlines()
def readlines():
self.contents = self.cfile.readlines()
def xreadlines():
self.contents = self.cfile.xreadlines()
def read_close(self):
with open(self.mfile, 'r') as f:
self.contents = f.read()
def write(self, output):
if output:
self.cfile.write(output)
self.cfile.write("\n")
#for s in self.strings:
# self.cfile.write(s)
# self.cfile.write("\n")
def seek_file(self, seek_num):
self.cfile.seek(seek_num)
def json_dump(self):
json.dump(self.strings, self.cfile)
def exists(self):
return exists(self.mfile)
| 3.9375 | 4 |
tesa/preprocess_annotations.py | clementjumel/master_thesis | 2 | 12771469 | """
Script to preprocess and save the annotated queries and the annotations.
Usages:
tests:
python preprocess_annotations.py --no_save
regular usage:
python preprocess_annotations.py
"""
from database_creation.annotation_task import AnnotationTask
from toolbox.parsers import standard_parser, add_annotations_arguments
from collections import defaultdict
from pickle import dump
from os import makedirs
from os.path import exists
def parse_arguments():
""" Use arparse to parse the input arguments and return it as a argparse.ArgumentParser. """
ap = standard_parser()
add_annotations_arguments(ap)
return ap.parse_args()
def filter_annotations(annotations, args):
"""
Remove the annotations which don't meet the two criteria (annotations with not enough answers and answers from
workers that didn't do enough assignments) and return them.
Args:
annotations: dict of list of Annotations, Annotations from the MT workers.
args: argparse.ArgumentParser, parser object that contains the options of a script.
"""
min_assignments = args.min_assignments
min_answers = args.min_answers
length1 = sum([len([annotation for annotation in annotation_list if annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
length2 = sum([len([annotation for annotation in annotation_list if not annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
if not args.silent:
print("Filtering the annotations; annotations answered: %i, n/a: %i..." % (length1, length2))
workers_count = defaultdict(list)
for annotation_id_, annotation_list in annotations.items():
for annotation in annotation_list:
workers_count[annotation.worker_id].append(annotation_id_)
worker_cmpt = 0
for worker_id, annotation_ids in workers_count.items():
if len(annotation_ids) < min_assignments:
worker_cmpt += 1
for annotation_id_ in annotation_ids:
annotations[annotation_id_] = [annotation for annotation in annotations[annotation_id_]
if annotation.worker_id != worker_id]
length1 = sum([len([annotation for annotation in annotation_list if annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
length2 = sum([len([annotation for annotation in annotation_list if not annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
if not args.silent:
print("Number of workers discarded: %i" % worker_cmpt)
print("First filter done (number of assignments); annotations answered: %i, n/a: %i..." % (length1, length2))
annotations = {id_: annotation_list for id_, annotation_list in annotations.items()
if len([annotation for annotation in annotation_list if not annotation.bug]) >= min_answers}
length1 = sum([len([annotation for annotation in annotation_list if annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
length2 = sum([len([annotation for annotation in annotation_list if not annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
if not args.silent:
print("Second filter done (number of answers); annotations answered: %i, n/a %i.\n" % (length1, length2))
return annotations
def save_pkl(annotations, queries, args):
"""
Saves the annotations and the queries using pickle.
Args:
annotations: dict of list of Annotations, Annotations from the MT workers.
queries: dict of Queries, Queries of the annotations.
args: argparse.ArgumentParser, parser object that contains the options of a script.
"""
path = args.annotations_path + "annotations/"
annotations_fname = path + "annotations.pkl"
queries_fname = path + "queries.pkl"
if not args.no_save:
if not exists(path):
makedirs(path)
if not args.silent:
print("Folder(s) created at %s." % path)
with open(annotations_fname, 'wb') as annotations_file, open(queries_fname, 'wb') as queries_file:
dump(obj=annotations, file=annotations_file, protocol=-1)
dump(obj=queries, file=queries_file, protocol=-1)
if not args.silent:
print("Files annotations.pkl & queries.pkl saved at %s." % path)
elif not args.silent:
print("Files annotations.pkl & queries.pkl not saved at %s (not in save mode)." % path)
def main():
""" Save in a .pkl the annotated queries and the annotations. """
args = parse_arguments()
annotation_task = AnnotationTask(silent=args.silent,
results_path=args.annotations_path,
years=None,
max_tuple_size=None,
short=None,
short_size=None,
random=None,
debug=None,
random_seed=None,
save=None,
corpus_path=None)
annotation_task.process_task(exclude_pilot=args.exclude_pilot)
queries = annotation_task.queries
annotations = annotation_task.annotations
annotations = filter_annotations(annotations, args=args)
save_pkl(queries=queries, annotations=annotations, args=args)
if __name__ == '__main__':
main()
| 2.96875 | 3 |
jaegerserver/apps.py | the-bombers/jaeger | 0 | 12771470 | from django.apps import AppConfig
class JaegerserverConfig(AppConfig):
name = 'jaegerserver'
| 1.125 | 1 |
src/app/main/form.py | YiNNx/OAuth2.0 | 0 | 12771471 | <reponame>YiNNx/OAuth2.0<filename>src/app/main/form.py
' wtf表单 '
__author__ = 'YiNN'
from wtforms.fields import simple,RadioField,IntegerField
from wtforms import Form,validators,widgets
class LoginForm(Form):
'''Form'''
email = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
validators.Email(message="请输入正确的Email格式(゚Д゚*)ノ"),
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
pword = simple.PasswordField(
validators=[
validators.DataRequired(message="请输入密码(゚Д゚*)ノ"),
],
widget=widgets.PasswordInput(),
render_kw={"class":"form-control"}
)
class SignUpForm(Form):
'''Form'''
email = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
validators.Email(message="请输入正确的Email格式(゚Д゚*)ノ"),
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
pword = simple.PasswordField(
validators=[
validators.DataRequired(message="请输入密码(゚Д゚*)ノ"),
validators.Length(max=20,min=6,message="密码长度须大于%(max)d字且小于%(min)d字(゚Д゚*)ノ"),
],
widget=widgets.PasswordInput(),
render_kw={"class":"form-control"}
)
pword_re = simple.PasswordField(
validators=[
validators.DataRequired(message="请输入密码(゚Д゚*)ノ"),
validators.EqualTo('pword',message="两次密码输入不同哦(゚Д゚*)ノ"),
],
widget=widgets.PasswordInput(),
render_kw={"class":"form-control"}
)
nickname = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
validators.Length(max=8,min=3,message="昵称须大于%(max)d字且小于%(min)d字(゚Д゚*)ノ")
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
class InfoForm(Form):
'''Form'''
email = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
validators.Email(message="请输入正确的Email格式(゚Д゚*)ノ"),
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
nickname = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
validators.Length(max=8,min=3,message="昵称须大于%(max)d字且小于%(min)d字(゚Д゚*)ノ")
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
avator = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ")
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
intro = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
class OAuthSignForm(Form):
'''Form'''
appName = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
homeURL = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
appDesc = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
backURL = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
secrets = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
class CollectForm(Form):
statu = RadioField(
choices=[ ('想看', '想看'),('在看', '在看'), ('看过', '看过'), ('搁置', '搁置'), ('抛弃', '抛弃')],
validators=[validators.DataRequired(message="不能为空(゚Д゚*)ノ")]
)
score = IntegerField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
validators.NumberRange(min=1, max=10,message="超出范围了(゚Д゚*)ノ")
],
render_kw={"class":"form-control"} #设置属性生成的html属性
)
comment = simple.StringField(
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="不能为空(゚Д゚*)ノ"),
validators.Length(max=200,message="最多%(min)d字(゚Д゚*)ノ")
],
render_kw={"class":"form-control"} #设置属性生成的html属性
) | 2.578125 | 3 |
topicmodel/dataprep/extractbiovectors.py | tedunderwood/biographies | 0 | 12771472 | <filename>topicmodel/dataprep/extractbiovectors.py
chars2get = set()
with open('biofic2take.tsv', encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
charid = fields[0]
chars2get.add(charid)
outlines = []
with open('../biofic50/biofic50_doctopics.txt', encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
charid = fields[1]
if charid in chars2get:
outlines.append(line)
with open('../biofic50/biofic50_viz.tsv', mode = 'w', encoding = 'utf-8') as f:
for line in outlines:
f.write(line)
| 2.65625 | 3 |
gifmaker.py | tumaatti/gifmaker | 0 | 12771473 | <reponame>tumaatti/gifmaker<filename>gifmaker.py<gh_stars>0
#!/usr/bin/env python3
import argparse
import os
parser = argparse.ArgumentParser(
description='Simple way to generate .gif-files from videofiles'
)
parser.add_argument('input', metavar='I', type=str, help='specify input file')
parser.add_argument('output', metavar='O', type=str, help='specify output file')
parser.add_argument(
'start_time',
metavar='S',
type=str,
help='specify start time of the gif in input file in format 12:33'
)
parser.add_argument(
'duration',
metavar='D',
type=str,
help='give the duration of the clip'
)
args = parser.parse_args()
palette = '/tmp/palette.png'
filters = 'fps=25,scale=1280:720:flags=lanczos'
os.system(
f"ffmpeg -v warning -ss {args.start_time} -t {args.duration} -i "
f"{args.input} -vf '{filters},palettegen' -y {palette}"
)
os.system(
f"ffmpeg -v warning -ss {args.start_time} -t {args.duration} -i "
f"{args.input} -i {palette} -lavfi '{filters} [x]; [x][1:v] paletteuse'"
f"-y {args.output}"
)
| 2.703125 | 3 |
tests/__init__.py | bisguzar/flask-mongoengine | 1 | 12771474 | import unittest
import flask
import mongoengine
class FlaskMongoEngineTestCase(unittest.TestCase):
"""Parent class of all test cases"""
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['MONGODB_DB'] = 'test_db'
self.app.config['TESTING'] = True
self.ctx = self.app.app_context()
self.ctx.push()
# Mongoengine keep a global state of the connections that must be
# reset before each test.
# Given it doesn't expose any method to get the list of registered
# connections, we have to do the cleaning by hand...
mongoengine.connection._connection_settings.clear()
mongoengine.connection._connections.clear()
mongoengine.connection._dbs.clear()
def tearDown(self):
self.ctx.pop()
| 2.5 | 2 |
test/test_drive.py | albertpatterson/google-api-helpers | 1 | 12771475 | from google_api_helpers import auth
from google_api_helpers import drive
from test.utils import drive as test_drive
from test.utils import auth as test_auth
import pytest
@pytest.fixture(scope="session", autouse=True)
def getTestCredentials():
return test_auth.getTestCredentials()
class TestDrive(test_drive.WithDriveCleaningFixture):
def test_list_empty(self):
contents = drive.list()
assert contents == []
def test_createBlank(self):
testName = "testing_created_blank"
createdSheetId = drive.createBlank(testName, [], drive.MimeTypes.sheet)
contents = drive.list()
createdSheet = {'id': createdSheetId, 'name': testName}
assert contents == [createdSheet]
def test_createBlankSheet(self):
testName = "testing_created_blank_sheet"
createdSheetId = drive.createBlankSheet(testName, [])
contents = drive.list()
createdSheet = {'id': createdSheetId, 'name': testName}
assert contents == [createdSheet]
def test_list_filtered(self):
testName1 = "testing_created_1"
createdSheetId1 = drive.createBlank(
testName1, [], drive.MimeTypes.sheet)
testName2 = "testing_created_2"
createdSheetId2 = drive.createBlank(
testName2, [], drive.MimeTypes.sheet)
testName3 = "testing_created_3"
createdSheetId3 = drive.createBlank(
testName3, [], drive.MimeTypes.sheet)
matchedContents = drive.list("name = 'testing_created_2'")
assert matchedContents == [
{'id': createdSheetId2, 'name': testName2}
]
| 2.359375 | 2 |
spider/html_downloader.py | pq27120/xgb_spider | 0 | 12771476 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
import requests
import html_parser
import urllib2
# from ruokuaicode import RClient
import exceptions
from PIL import Image
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# import filecache
import time
import os
import random
import datetime
# import config
import socket
import sys
import logging
try:
import StringIO
def readimg(content):
return Image.open(StringIO.StringIO(content))
except ImportError:
import tempfile
def readimg(content):
f = tempfile.TemporaryFile()
f.write(content)
return Image.open(f)
UA = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
PROXY = "172.16.58.3:8123"
PSIPHON = '127.0.0.1:54552'
def test():
profile_dir = r"D:\MyChrome\Default"
# 设置请求头
# "Referer": "http://weixin.sogou.com"
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--user-data-dir=" + os.path.abspath(profile_dir))
PROXY = "172.16.58.3:8123"
# j = random.randint(0, len(proxys)-1)
# proxy = proxys[j]
chrome_options.add_argument('--proxy-server=%s' % PROXY)
# chrome_options.add_extension('')添加crx扩展
# service_args = ['--proxy=localhost:9050', '--proxy-type=socks5', ]
driver = webdriver.Chrome(r'C:\Python27\chromedriver', chrome_options=chrome_options)
driver.get('http://icanhazip.com')
driver.refresh()
print(driver.page_source)
driver.quit()
class HtmlDownloader(object):
def __init__(self):
# self._ocr = RClient(config.dama_name, config.dama_pswd, config.dama_soft_id, config.dama_soft_key)
# self._cache = filecache.WechatCache(config.cache_dir, 60 * 60)
# self._session = self._cache.get(config.cache_session_name) if self._cache.get(
# config.cache_session_name) else requests.session()
self.cookie = self.maintain_cookies_ph()
self.agents = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
def ocr4wechat(self, url):
# logger.debug('vcode appear, using _ocr_for_get_gzh_article_by_url_text')
timestr = str(time.time()).replace('.', '')
timever = timestr[0:13] + '.' + timestr[13:17]
codeurl = 'http://mp.weixin.qq.com/mp/verifycode?cert=' + timever
coder = self._session.get(codeurl)
if hasattr(self, '_ocr'):
result = self._ocr.create(coder.content, 2040)
img_code = result['Result']
print(img_code)
else:
im = readimg(coder.content)
im.show()
img_code = raw_input("please input code: ")
post_url = 'http://mp.weixin.qq.com/mp/verifycode'
post_data = {
'cert': timever,
'input': img_code
}
headers = {
"User-Agent": random.choice(self.agents),
'Host': 'mp.weixin.qq.com',
'Referer': url
}
rr = self._session.post(post_url, post_data, headers=headers)
print(rr.text)
remsg = eval(rr.text)
if remsg['ret'] != 0:
# logger.error('cannot verify get_gzh_article because ' + remsg['errmsg'])
raise exceptions.WechatSogouVcodeException('cannot verify wechat_code because ' + remsg['errmsg'])
# self._cache.set(config.cache_session_name, self._session)
# logger.debug('ocr ', remsg['errmsg'])
def download_list(self, url, name):
'''
使用urllib2 获取微信公众号列表页的url
:param url:
:param name:
:return:
'''
headers = {
"User-Agent": random.choice(self.agents),
"Referer": 'http://weixin.sogou.com/',
'Host': 'weixin.sogou.com',
'Cookie': random.choice(self.cookie)
}
req = urllib2.Request(url, headers=headers)
req.set_proxy(PROXY, 'http')
try:
response = urllib2.urlopen(req)
time.sleep(1)
except urllib2.URLError as e:
if hasattr(e, 'reason'):
#HTTPError and URLError all have reason attribute.
print 'We failed to reach a server.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
#Only HTTPError has code attribute.
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
return
try:
# a = html_parser.HtmlParser.parse_list_url(response, name)
a = ''
except AttributeError:
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
return
if a is not None:
return self.download(a, name, url)
# headers_weixin = {
# "User-Agent": random.choice(self.agents),
# "Referer": 'http://weixin.sogou.com/',
# 'Host': 'mp.weixin.qq.com',
# }
# req1 = urllib2.Request(a, headers=headers_weixin)
# response1 = urllib2.urlopen(req1)
# with open('c:\\a.html', 'a') as f:
# f.write(response1.read())
def download(self, link, name, url):
"""
下载指定公众号的文章列表
:param link:
:param name:
:param url:
:return:
"""
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
random.choice(self.agents)
)
dcap["takesScreenshot"] = False
dcap["phantomjs.page.customHeaders.Cookie"] = random.choice(self.cookie)
# dcap["phantomjs.page.settings.resourceTimeout"] = ("1000")
try:
driver1 = webdriver.PhantomJS(desired_capabilities=dcap, service_args=['--load-images=no', ])
except Exception as e:
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
print(datetime.datetime.now())
print(url)
print(e)
else:
try:
driver1.set_page_load_timeout(20)
driver1.get(link)
b = True
try:
driver1.find_element_by_class_name('page_verify')
except:
b = False
if b is True:
print('page needs verify, stop the program')
print('the last weixinNUM is %s\n' % name)
self.ocr4wechat(link)
time.sleep(5)
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
else:
html = driver1.page_source
return link, html
except Exception as e:
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
print(url)
print(datetime.datetime.now())
print(e)
finally:
driver1.quit()
def download_list_ph(self, url, name):
'''
使用phantomjs下载微信公众号文章列表
:param url:
:param name:
:return:
'''
if url is None:
return None
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
random.choice(self.agents)
)
dcap["takesScreenshot"] = False
dcap["phantomjs.page.customHeaders.Cookie"] = random.choice(self.cookie)
# dcap["phantomjs.page.settings.resourceTimeout"] = ("1000")
a = True
try:
driver = webdriver.PhantomJS(desired_capabilities=dcap, service_args=['--load-images=no',
'--proxy=172.16.58.3:8123'])
except Exception as e:
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
print(datetime.datetime.now())
print(url)
print(e)
else:
driver.set_page_load_timeout(20)
try:
driver.get(url)
except:
time.sleep(2)
driver.refresh()
try:
driver.find_element_by_id("noresult_part1_container")
a = True
except:
a = False
if a is True:
with open(r'no_wechat.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
# 公众号存在
elif a is False:
try:
# driver.get_screenshot_as_file(r'c:\pic.png')
driver.implicitly_wait(2)
# 代理连接过多导致失败
button = driver.find_element_by_css_selector('a[uigs =\'main_toweixin_account_image_0\']')
link = button.get_attribute('href')
# with open(r'c:\WechatList.txt', 'a') as f:
# f.write(name.encode('utf-8') + '\n')
except Exception as e:
link = None
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
print(datetime.datetime.now())
print(url)
print(e)
finally:
driver.quit()
# 获取公众号文章列表
if a is False and link is not None:
try:
driver1 = webdriver.PhantomJS(desired_capabilities=dcap, service_args=['--load-images=no'])
except Exception as e:
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
print(datetime.datetime.now())
print(url)
print(e)
else:
try:
driver1.set_page_load_timeout(20)
driver1.get(link)
b = True
try:
driver1.find_element_by_class_name('page_verify')
except:
b = False
if b is True:
print('page needs verify, stop the program')
print('the last weixinNUM is %s\n' % name)
self.ocr4wechat(link)
time.sleep(5)
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
# os.system('pause')
else:
html = driver1.page_source
return link, html
except Exception as e:
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
print(url)
print(datetime.datetime.now())
print(e)
finally:
driver1.quit()
def download_list_chrome(self, url, name):
if url is None:
return None
profile_dir = r"D:\MyChrome\Default"
# "Referer": "http://weixin.sogou.com"
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--user-data-dir=" + os.path.abspath(profile_dir))
chrome_options.add_argument('--proxy-server=%s' % PROXY)
# chrome_options.add_extension('')添加crx扩展
# service_args = ['--proxy=localhost:9050', '--proxy-type=socks5', ]
try:
driver = webdriver.Chrome(r'C:\Python27\chromedriver', chrome_options=chrome_options)
except Exception as e:
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
print(datetime.datetime.now())
print(url)
print(e)
else:
try:
driver.set_page_load_timeout(20)
try:
driver.get('http://weixin.sogou.com/')
except:
time.sleep(3)
driver.refresh()
# driver.implicitly_wait(5)
# 会产生too many requests
driver.delete_all_cookies()
i = random.randint(0, 4)
for cookie in self.cookie[i]:
driver.add_cookie(cookie)
time.sleep(1)
try:
driver.get(url)
except:
time.sleep(2)
driver.refresh()
time.sleep(2)
# 判断是否存在这个公众号
try:
driver.find_element_by_id("noresult_part1_container")
a = True
except:
a = False
if a is True:
with open(r'no_wechat.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
elif a is False:
# 应对 too many connections
try:
WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.ID, "sogou_vr_11002301_box_0"))
)
except:
time.sleep(2)
driver.refresh()
now_handle = driver.current_window_handle
driver.find_element_by_id('sogou_vr_11002301_box_0').click()
# 会存在需要验证的情况
time.sleep(2)
all_handles = driver.window_handles
for handle in all_handles:
if handle != now_handle:
driver.switch_to.window(handle) # 跳转到新的窗口
# 判断页面是否是验证页面
# b = True
# while b is True:
# try:
# driver.find_element_by_class_name("page_verify")
# b = True
# driver.refresh()
# time.sleep(2)
# except:
# b = False
#
# # 等待列表的出现
# try:
# WebDriverWait(driver, 5).until(
# EC.presence_of_element_located((By.CLASS_NAME, "weui_msg_card_hd"))
# )
# except:
# driver.refresh()
# time.sleep(2)
# html = driver.page_source#网页动态加载后的代码
wechat_url = driver.current_url
i = random.randint(0, 4)
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
UA
)
dcap["takesScreenshot"] = (False)
dcap["phantomjs.page.customHeaders.Cookie"] = self.cookie[i]
try:
driver1 = webdriver.PhantomJS(desired_capabilities=dcap, service_args=['--load-images=no'])
except Exception as e:
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
print(datetime.datetime.now())
print(url)
print(e)
else:
try:
driver1.set_page_load_timeout(20)
driver1.get(wechat_url)
html = driver1.page_source
return wechat_url, html
# except Exception as e:
# with open(r'list_error.txt', 'a') as f:
# f.write(name.encode('utf-8'))
# f.write('\n')
# print(datetime.datetime.now())
# print(url)
# print(e)
finally:
driver1.quit()
# return wechat_url, html
except Exception as e:
with open(r'list_error.txt', 'a') as f:
f.write(name.encode('utf-8'))
f.write('\n')
print(url)
print(datetime.datetime.now())
print(e)
finally:
driver.quit()
# if a is False:
# i = random.randint(0, 4)
# dcap = dict(DesiredCapabilities.PHANTOMJS)
# dcap["phantomjs.page.settings.userAgent"] = (
# UA
# )
# dcap["takesScreenshot"] = (False)
# dcap["phantomjs.page.customHeaders.Cookie"] = self.cookie[i]
# try:
# driver1 = webdriver.PhantomJS(desired_capabilities=dcap, service_args=['--load-images=no'])
# except Exception as e:
# print(datetime.datetime.now())
# print(url)
# print(e)
# else:
# try:
# driver1.set_page_load_timeout(20)
# driver1.get(wechat_url)
# html = driver1.page_source
# return wechat_url, html
# except Exception as e:
# print(datetime.datetime.now())
# print(url)
# print(e)
# finally:
# driver1.quit()
# response = urllib2.urlopen(url)
# if response.getcode() != 200:
# return None
# return response.read()
def download_articles_ph(self, url):
'''
使用phantomjs下载文章
:param url: 文章链接
:return:
'''
if url is None:
return None
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
UA
)
dcap["takesScreenshot"] = (False)
try:
driver = webdriver.PhantomJS(executable_path=r"D:\soft\phantomjs-2.1.1-windows\bin\phantomjs.exe", desired_capabilities=dcap, service_args=['--load-images=no'])
except Exception as e:
print(datetime.datetime.now())
print(url)
print(e)
else:
try:
driver.set_page_load_timeout(30)
driver.get(url)
time.sleep(1)
# driver.implicitly_wait(2)
html = driver.page_source
return html
except:
print(datetime.datetime.now())
print(url)
finally:
driver.quit()
def download_articles_chrome(self, url):
# service_args = ['--load-images=no', ]
profile_dir = r"D:\MyChrome\Default"
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--user-data-dir=" + os.path.abspath(profile_dir))
# PROXY = "172.16.58.3:8123"
# # j = random.randint(0, len(proxys)-1)
# # proxy = proxys[j]
# chrome_options.add_argument('--proxy-server=%s' % PROXY)
# chrome_options.add_extension('')添加crx扩展
# service_args = ['--proxy=localhost:9050', '--proxy-type=socks5', '--load-images=no', ]
try:
driver = webdriver.Chrome(r'C:\Python27\chromedriver', chrome_options=chrome_options)
except Exception as e:
print(datetime.datetime.now())
print(url)
print(e)
else:
try:
driver.set_page_load_timeout(30)
driver.get(url)
driver.implicitly_wait(2)
html = driver.page_source
return html
except:
print(datetime.datetime.now())
print(url)
# selenium.common.exceptions.TimeoutException:
# return self.download_acticles(url)
return None
finally:
driver.quit()
def maintain_cookies(self):
cookie = []
# 获取5组cookies
for i in range(5):
driver = webdriver.Chrome(r'C:\Python27\chromedriver')
driver.get("http://weixin.sogou.com/")
# 获得cookie信息
cookie.append(driver.get_cookies())
print(driver.get_cookies())
driver.quit()
return cookie
def maintain_cookies_ph(self):
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = UA
cookie = []
# 获取5组cookies
for i in range(5):
driver = webdriver.PhantomJS(executable_path=r"D:\soft\phantomjs-2.1.1-windows\bin\phantomjs.exe", desired_capabilities=dcap, service_args=['--load-images=no', ])
driver.get("http://weixin.sogou.com/")
# 获得cookie信息
cookie.append(driver.get_cookies())
# print(driver.get_cookies())
driver.quit()
return cookie
if __name__ == "__main__":
a = HtmlDownloader()
# # a.ocr4wechat('http://mp.weixin.qq.com/s?timestamp=1478687270&src=3&ver=1&signature=5RtOXxZ16P0x8hvN7sARkESooWCRi1F-'
# 'AcdjyV1phiMF7EC8fCYB1STlGWMUeoUQtSoEFQC26jd-X-*3GiGa-ZwBJQBld54xrGpEc81g*kjGncNNXLgRkpw5WIoCO5T-KbO'
# 'xjsRjYFvrvDaynu1I7vvIE9itjIEzCa77YZuMMyM=')
# a.download_list_chrome("http://weixin.sogou.com/weixin?type=%d&query=%s" % (1, 'renmin'), u'renmin')
| 2.296875 | 2 |
sparselt/linear_transform.py | LiamBindle/splint | 0 | 12771477 | <gh_stars>0
import numpy as np
import scipy.sparse
class SparseLinearTransform:
_input_core_shape = None
_input_core_dims = None
_output_core_shape = None
_output_core_dims = None
_matrix = None
_order = None
_vfunc = None
def __init__(self, weights, row_ind, col_ind,
input_transform_dims, output_transform_dims,
one_based_indices=False, order="C"):
weights = np.asarray(weights)
row_ind = np.asarray(row_ind)
col_ind = np.asarray(col_ind)
if one_based_indices:
row_ind -= 1
col_ind -= 1
self._input_core_dims = tuple(input_transform_dims[0])
self._input_core_shape = tuple(input_transform_dims[1])
self._output_core_dims = tuple(output_transform_dims[0])
self._output_core_shape = tuple(output_transform_dims[1])
self._matrix = scipy.sparse.csr_matrix((weights, (row_ind, col_ind)))
self._order = order
self._vfunc = self._create_vfunc()
def _func(self, a: np.ndarray):
a = a.flatten(order=self._order)
return self._matrix.dot(a).reshape(self._output_core_shape, order=self._order)
def _create_vfunc(self) -> callable:
input_signature = ','.join(self.input_core_dims)
output_signature = ','.join(self.output_core_dims)
return np.vectorize(self._func, signature='({})->({})'.format(input_signature, output_signature))
@property
def vfunc(self) -> callable:
return self._vfunc
@property
def input_core_dims(self) -> tuple:
return self._input_core_dims
@property
def output_core_dims(self) -> tuple:
return self._output_core_dims
| 2.234375 | 2 |
desafio_primalidad.py | davagni/PythonPractice | 0 | 12771478 | <reponame>davagni/PythonPractice
import math
def es_primo(numero):
# <NAME> Wilson
num = math.factorial(numero - 1) + 1
if num % numero == 0:
return True
else:
return False
def run():
numero = int(input('Ingresa un numero: '))
if es_primo(numero) and numero != 1:
print(f'{numero} es primo')
else:
print(f'{numero} no es primo')
if __name__ == "__main__":
run()
| 3.921875 | 4 |
main.py | Mo-Shakib/Wordle-Solver | 1 | 12771479 | import words
words_database = words.words()
yellow_letters = []
my_letters = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
defaults = ["BRICK","JUMPY","VOZHD","GLENT","WAQFS"]
defaults_2 = [['b','r','i','c','k'],['j','u','m','p','y'],['v','o','z','h','d'],['g','l','e','n','t'],['w','a','q','f','s']]
fixed_letters = {}
valid_letters = ['x']
original_letters = []
expected_word = ['_','_','_','_','_']
print('---------- Welcome to wordle solver! ----------')
it = 0
for i in defaults_2:
print(f'---> Use {defaults[it]} as input {it+1}')
g = list(input('[ ] Enter green letters: ').split())
for j in g:
if j in i:
valid_letters.append(j)
original_letters.append(j)
for k in range(len(g)):
if g[k] in i:
fixed_letters[k] = g[k]
expected_word[k] = g[k]
it += 1
y = list(input('[ ] Enter yellow letters: ').split())
for j in y:
if j in i:
valid_letters.append(j)
yellow_letters.append(j)
original_letters.append(j)
final_word = ['_','_','_','_','_']
temp = []
positions = []
for keys in fixed_letters.keys():
positions.append(keys)
for wrd in words_database:
wrd = wrd.strip()
if len(fixed_letters) == 1:
if wrd[positions[0]] == fixed_letters[positions[0]]:
temp.append(wrd)
final_word[positions[0]] = wrd[positions[0]]
if len(fixed_letters) == 2:
if wrd[positions[0]] == fixed_letters[positions[0]] and wrd[positions[1]] == fixed_letters[positions[1]]:
temp.append(wrd)
final_word[positions[0]] = wrd[positions[0]]
final_word[positions[1]] = wrd[positions[1]]
if len(fixed_letters) == 3:
if wrd[positions[0]] == fixed_letters[positions[0]] and wrd[positions[1]] == fixed_letters[positions[1]] and wrd[positions[2]] == fixed_letters[positions[2]]:
temp.append(wrd)
final_word[positions[0]] = wrd[positions[0]]
final_word[positions[1]] = wrd[positions[1]]
final_word[positions[2]] = wrd[positions[2]]
if len(fixed_letters) == 4:
if wrd[positions[0]] == fixed_letters[positions[0]] and wrd[positions[1]] == fixed_letters[positions[1]] and wrd[positions[2]] == fixed_letters[positions[2]] and wrd[positions[3]] == fixed_letters[positions[3]]:
temp.append(wrd)
final_word[positions[0]] = wrd[positions[0]]
final_word[positions[1]] = wrd[positions[1]]
final_word[positions[2]] = wrd[positions[2]]
final_word[positions[3]] = wrd[positions[3]]
if len(fixed_letters) == 5:
if wrd[positions[0]] == fixed_letters[positions[0]] and wrd[positions[1]] == fixed_letters[positions[1]] and wrd[positions[2]] == fixed_letters[positions[2]] and wrd[positions[3]] == fixed_letters[positions[3]] and wrd[positions[4]] == fixed_letters[positions[4]]:
temp.append(wrd)
final_word[positions[0]] = wrd[positions[0]]
final_word[positions[1]] = wrd[positions[1]]
final_word[positions[2]] = wrd[positions[2]]
final_word[positions[3]] = wrd[positions[3]]
final_word[positions[4]] = wrd[positions[4]]
temp = sorted(temp)
last_filter = []
if len(temp) == 0:
for word in words_database:
count = 0
for i in word:
if i in valid_letters:
count += 1
if count == 5:
last_filter.append(word)
else:
for word in temp:
count = 0
for i in word:
if i in valid_letters:
count += 1
if count == 5:
last_filter.append(word)
if len(last_filter) == 0:
print('Sorry, no words found')
exit()
else:
result = {}
original_letters = sorted(original_letters)
last_filter = sorted(last_filter)
for word in last_filter:
w = word
word = list(word)
word = set(word)
score = len(word)
for i in word:
if i in yellow_letters:
score += 1
elif i not in original_letters:
score -= 1
result[w] = score
result = sorted(result.items(), key=lambda kv: kv[1], reverse=True)
output = result[0][0]
print('The word is:',output.upper()) | 3.484375 | 3 |
lib/emulator/nn/max_pool.py | amohant4/myFramework | 0 | 12771480 | <filename>lib/emulator/nn/max_pool.py
"""
Created on Tue May 08 17:18:00 2018
@Author: <NAME>, <NAME>
"""
import numpy as np
import emulator as em
from emulator.operation import Operation
class max_pool(Operation):
def __init__(self, i, ksize, strides, padding, name):
super(max_pool,self).__init__([i])
self.ksize = ksize
# print("i: ", i)
# print("ksize: ", ksize)
# print("strides: ", strides)
# print("padding: ", padding)
self.stride = strides
self.padding = padding
self._shape = [i.shape[0], None, None, i.shape[-1] ]
@property
def shape(self):
return self._shape
def pool_1ch(self, img_pad, ksize, stride):
# print("img_ch shape: ", img_pad.shape)
# print("img_ch: ", img_pad)
ri, ci = img_pad.shape
rk, ck = ksize[1], ksize[2]
ro = int(np.floor((ri-rk)/stride[1])) + 1
co = int(np.floor((ci-ck)/stride[2])) + 1
pool_out = np.zeros((ro, co))
pool_loc = np.zeros((ri, ci))
for r in range(ro):
for c in range(co):
sr = r*stride[1]
sc = c*stride[2]
region_tmp = img_pad[sr:sr+rk, sc:sc+ck]
pool_out[r, c] = np.max(region_tmp)
return pool_out
def compute(self, img):
# print("img_ch shape: ", img.shape)
# print("img_ch: ", img)
img = img.astype(float)
img_padded = em.utils.pad(img, self.ksize, self.stride, self.padding, padder = -999)
img_b, img_h, img_w, img_c = img_padded.shape
ro = int(np.floor((img_h-self.ksize[1])/self.stride[1]))+1
co = int(np.floor((img_w-self.ksize[2])/self.stride[2]))+1
pool_out = np.zeros((img_b, ro, co, img_c))
for b_tmp in range(img_b):
for i_tmp in range(img_c):
pool_out_tmp = self.pool_1ch(img_padded[b_tmp,:,:,i_tmp], self.ksize, self.stride)
pool_out[b_tmp,:,:,i_tmp] = pool_out_tmp
return pool_out
| 2.578125 | 3 |
dbutils/actions/query.py | Yash-Amin/DbUtils | 0 | 12771481 | """Query mode"""
import os
import re
import csv
import sys
import argparse
from bson import json_util
from dataclasses import dataclass
from pymongo.mongo_client import MongoClient
from typing import Dict, List, Pattern, TextIO
from dbutils import constants
from dbutils.utils import get_comma_separated_fields, str2bool
@dataclass
class QueryModeOptions:
# Database name
database: str
# Collection name
collection: str
# Specify comma-separeted columns names, given fields will be projected
# If no value is specified, all columns will be returned in the output
columns: List[str]
# Limit number of records
limit: int
# provide batch size for file-chunks mode
# script will also use batch_size to fetch record in batches
batch_size: int
# Specify output mode (stdout, file, fie-chunks)
output_mode: str
# Specify output file type (json, csv)
output_file_type: str
# Specify bool to iclude header in csv file format
include_header: bool
# Output path. For file-chunks mode provide dir path, for file mode provide
# file path
output_path: str
# For file-chunks mode, provide file prefix
output_file_prefix: str
# For file-chunks mode, provide file extension
output_file_extension: str
# Provide queries
queries: Dict[str, Pattern]
# MongoDB client
mongodb_client: MongoClient
# MongoDB collection
mongodb_collection: any
# default mode
mode: str = constants.Modes.QUERY
def parse_arugments() -> argparse.Namespace:
"""Parse arguments for query mode."""
parser = argparse.ArgumentParser(description="DbUtils - Query mode")
parser.add_argument("mode", help="Operation mode", choices=[constants.Modes.QUERY])
parser.add_argument("-database", help="Mongodb Database Name", required=True)
parser.add_argument("-collection", help="Mongodb Collection Name", required=True)
parser.add_argument(
"-columns",
help="Given comma-separated values will be projected in the output (default=all columns)",
default="",
)
parser.add_argument("-batch-size", help="Batch size", default=500, type=int)
parser.add_argument("-limit", help="Limit number of records.", default=-1, type=int)
parser.add_argument(
"-output-mode",
help=(
"'stdout' output mode will print output in stdout. "
"'file' output mode will write output to a file. "
"'file-chunks' output mode will write output in smaller file chunks. "
"Use batch-mode argument to specify batch size."
),
required=True,
choices=[
constants.OutputMode.FILE,
constants.OutputMode.FILE_CHUNKS,
constants.OutputMode.STDOUT,
],
)
parser.add_argument(
"-output-file-type",
help="Output file type",
required=True,
choices=[
constants.FileTypes.CSV,
constants.FileTypes.JSON,
],
)
parser.add_argument(
"-include-header",
help="Include header for CSV file",
default=False,
type=str2bool,
)
parser.add_argument(
"-output-path",
help="If output-mode is file, provide file name. If output-mode is file-chunks, provide directory name",
default="",
)
parser.add_argument(
"-output-file-prefix",
help="Output file prefix for file-chunks mode",
default="",
)
parser.add_argument(
"-output-file-extension",
help="Output file extension for file-chunks mode",
default="txt",
)
parser.add_argument(
"-queries",
help="Provide regex queries in this format - '-queries KEY_NAME_1=REGEX_1 KEY_NAME_2=REGEX-2'",
nargs="*",
)
args = parser.parse_args()
# output-path is required when output-mode is file or file-chunks
if args.output_mode != constants.OutputMode.STDOUT and args.output_path == "":
parser.print_usage()
raise argparse.ArgumentTypeError(
"output-path is required when output-mode is file or file-chunks."
)
# output-file-prefix is required when output-mode is file-chunks
if (
args.output_mode == constants.OutputMode.FILE_CHUNKS
and args.output_file_prefix == ""
):
parser.print_usage()
raise argparse.ArgumentTypeError(
"output-file-prefix is required when output-mode is file-chunks."
)
args.queries = {
query[: query.index("=")]: re.compile(query[query.index("=") + 1 :])
for query in args.queries or []
}
args.columns = get_comma_separated_fields(args.columns)
return args
def create_options_from_args() -> QueryModeOptions:
"""Parses arguments and returns QueryModeOptions."""
args = vars(parse_arugments())
# FIXME: get mongodb connection string from environment variable
mongo_client = MongoClient()
args["mongodb_client"] = mongo_client
args["mongodb_collection"] = mongo_client[args["database"]][args["collection"]]
return QueryModeOptions(**args)
def _write_json(records: List[Dict], output_file: TextIO):
"""Write records to json file."""
# Using json_util.dumps convert mongodb record with object_id, to string
# and write records to file
output_file.writelines([json_util.dumps(record) + "\n" for record in records])
# Close output stream
if output_file != sys.stdout:
# If output_file mode is stdout, closing it will cause error for print()
output_file.close()
def _write_csv(records: List[Dict], output_file: TextIO, write_header: bool = False):
"""Write output to csv file."""
# Find unique column names
columns = set()
for record in records:
columns.update(record.keys())
csv_writer = csv.DictWriter(output_file, fieldnames=sorted(columns))
# Write columns
write_header and csv_writer.writeheader()
for record in records:
csv_writer.writerow(record)
# Close output stream
if output_file != sys.stdout:
# If output_file mode is stdout, closing it will cause error for print()
output_file.close()
def output(options: QueryModeOptions, batch_id: int, records: List[Dict]):
"""Write output"""
output_mode = options.output_mode
if output_mode == constants.OutputMode.STDOUT:
output_path = ""
output_stream = sys.stdout
elif output_mode == constants.OutputMode.FILE:
output_path = options.output_path
output_stream = open(output_path, "a")
elif output_mode == constants.OutputMode.FILE_CHUNKS:
output_file_name = (
f"{options.output_file_prefix}-{batch_id}.{options.output_file_extension}"
)
output_path = os.path.join(options.output_path, output_file_name)
output_stream = open(output_path, "w")
# Write output to csv/json file
if options.output_file_type == constants.FileTypes.CSV:
_write_csv(records, output_stream, options.include_header)
elif options.output_file_type == constants.FileTypes.JSON:
_write_json(records, output_stream)
def run(options: QueryModeOptions) -> None:
"""Runs query mode."""
if options.output_mode == constants.OutputMode.FILE_CHUNKS:
# Creates directory for file-chunks mode
os.makedirs(options.output_path, exist_ok=True)
elif options.output_mode == constants.OutputMode.FILE:
# If output-mode is `file` and if output-path is '/some/path/file.csv'
# and if directory '/some/path/' does not exist, it will be created
dir_path = os.path.dirname(os.path.abspath(options.output_path))
os.makedirs(dir_path, exist_ok=True)
# TODO: raise error if output-path exists, add new argument to
# overwrite file if it exists.
# If output_mode is file and output-path exists, this will delete it
if os.path.exists(options.output_path) and os.path.isfile(options.output_path):
os.unlink(options.output_path)
elif options.output_mode == constants.OutputMode.FILE_CHUNKS:
# TODO: if files with file-name matching output-mode-prefix and output-mode-extension
# exists, raise error
pass
db = options.mongodb_collection
last_id = ""
current_batch = 0
# Fetch records in batches
records = db.find(options.queries).limit(options.batch_size)
while records:
# Records to output will be stored in this list
output_records = []
for record in records:
last_id = record["_id"]
# Create dict containing the fields specified using 'columns' argument
output_record = {
key: value
for key, value in record.items()
if len(options.columns) == 0 or key in options.columns
}
output_records.append(output_record)
# If no records are found, exit
if len(output_records) == 0:
return
output(options, current_batch, output_records)
current_batch += 1
# Fetch record for the next batch
next_query = {**options.queries, "_id": {"$gt": last_id}}
records = db.find(next_query).limit(options.batch_size)
# If number of fetched records is >= limit provided, exit
if options.limit > 0 and current_batch * options.batch_size >= options.limit:
return
| 2.9375 | 3 |
account_check/models/account_bank_statement_line.py | odoo-mastercore/odoo-argentina | 1 | 12771482 | <reponame>odoo-mastercore/odoo-argentina
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import models, _
from odoo.exceptions import ValidationError
import logging
_logger = logging.getLogger(__name__)
class AccountBankStatementLine(models.Model):
_inherit = "account.bank.statement.line"
def button_cancel_reconciliation(self):
""" Delete operation of checks that are debited from statement
"""
for st_line in self.filtered('move_name'):
if st_line.journal_entry_ids.filtered(
lambda x:
x.payment_id.payment_reference == st_line.move_name):
check_operation = self.env['account.check.operation'].search(
[('origin', '=',
'account.bank.statement.line,%s' % st_line.id)])
check_operation.check_id._del_operation(st_line)
return super(
AccountBankStatementLine, self).button_cancel_reconciliation()
def process_reconciliation(
self, counterpart_aml_dicts=None, payment_aml_rec=None,
new_aml_dicts=None):
"""
Si el move line de contrapartida es un cheque entregado, entonces
registramos el debito desde el extracto en el cheque
TODO: por ahora si se cancela la linea de extracto no borramos el
debito, habria que ver si queremos hacer eso modificando la funcion de
arriba directamente
"""
check = False
if counterpart_aml_dicts:
for line in counterpart_aml_dicts:
move_line = line.get('move_line')
check = move_line and move_line.payment_id.check_id or False
moves = super(AccountBankStatementLine, self).process_reconciliation(
counterpart_aml_dicts=counterpart_aml_dicts,
payment_aml_rec=payment_aml_rec, new_aml_dicts=new_aml_dicts)
if check and check.state == 'handed':
if check.journal_id != self.statement_id.journal_id:
raise ValidationError(_(
'Para registrar el debito de un cheque desde el extracto, '
'el diario del cheque y del extracto deben ser los mismos'
))
if len(moves) != 1:
raise ValidationError(_(
'Para registrar el debito de un cheque desde el extracto '
'solo debe haber una linea de contrapartida'))
check._add_operation('debited', self, date=self.date)
return moves
| 1.953125 | 2 |
Handlers/DataAPIHandler.py | Nuit-De-L-Info-2016-STRI-DL/Backend | 0 | 12771483 | # -*- coding: utf-8 -*-
import os
import tornado.web
from zipfile import ZipFile
from tools import ListingFiles
folder_path = 'data/'
zip_file = './export.zip'
def list_all_export_file():
"""
Make a list of important 'data' files (important files to export).
:return: important file list
"""
temp = list_high_level_files()
return temp
def list_high_level_files():
return [os.path.normpath(os.path.join(folder_path, file))
for file in ListingFiles.list_file_root_folder(folder_path)
if file.endswith('.json')]
def create_zip(file_list: list):
"""
Create a zip from list_export_file() returned file list.
"""
with ZipFile(zip_file, 'w') as myzip:
for file in file_list:
myzip.write(file, arcname=file[len(folder_path):])
class DataAPIHandler(tornado.web.RequestHandler):
"""
Class to handle '/data' endpoint.
"""
def get(self, path_request):
"""
Handle GET requests.
:param path_request: request path ( < URI)
"""
if path_request == 'all_export.zip':
create_zip(list_all_export_file())
with open(zip_file, mode='rb') as file:
c = file.read()
self.set_header('content-type', 'application/zip')
self.write(c)
else:
self.send_error(status_code=400, reason='bad request')
return
def post(self, path_request):
"""
Handle POST requests.
:param path_request: request path ( < URI)
"""
if path_request == 'import.zip':
try:
fileinfo = self.request.files['file'][0]
# fname = fileinfo['filename'] # le nom du fichier recu
with open(os.path.join(folder_path, 'imported.zip'), 'wb') as fh:
fh.write(fileinfo['body'])
zip_2_extract = ZipFile(os.path.join(folder_path, 'imported.zip'), 'r')
zip_2_extract.extractall(folder_path)
zip_2_extract.close()
os.remove(os.path.join(folder_path, 'imported.zip'))
except KeyError: # pas 'file' comme nom dans le formulaire pour le fichier recu
self.send_error(status_code=400, reason='bad request')
else:
self.send_error(status_code=400, reason='bad request')
return
| 2.9375 | 3 |
gym_ds3/schedulers/deepsocs/parameter_server.py | EpiSci/SoCRATES | 6 | 12771484 |
import numpy as np
import tensorflow as tf
from gym_ds3.schedulers.deepsocs.average_reward import AveragePerStepReward
from gym_ds3.schedulers.deepsocs.compute_baselines import get_piecewise_linear_fit_baseline
from gym_ds3.schedulers.deepsocs.deepsocs_scheduler import Deepsocs
from gym_ds3.schedulers.models.deepsocs_model import create_deepsocs_model, create_deepsocs_graph
from gym_ds3.envs.utils.helper_deepsocs import suppress_tf_warning, discount
class ParameterServer(object):
def __init__(self, args):
self.args = args
self.seed = args.seed
suppress_tf_warning() # suppress TF warnings
# AAD model
self.model, self.sess = create_deepsocs_model(args)
self.graph = create_deepsocs_graph(args=args, model=self.model)
# Deepsocs Scheduler
self.deepsocs = Deepsocs(args, self.model, self.sess)
self.avg_reward_calculator = AveragePerStepReward(size=100000)
# Initialize model
tf.set_random_seed(self.seed)
np.random.seed(self.seed)
self.sess.run(tf.global_variables_initializer())
# Flag to initialize assign operations for 'set_weights()'
self.FIRST_SET_FLAG = True
def get_weights(self):
weight_vals = self.sess.run(self.model['all_vars'])
return weight_vals
def set_weights(self, weight_vals):
"""
Set weights without memory leakage
"""
if self.FIRST_SET_FLAG:
self.FIRST_SET_FLAG = False
self.assign_placeholders = []
self.assign_ops = []
for w_idx, weight_tf_var in enumerate(self.model['all_vars']):
a = weight_tf_var
assign_placeholder = tf.placeholder(a.dtype, shape=a.get_shape())
assign_op = a.assign(assign_placeholder)
self.assign_placeholders.append(assign_placeholder)
self.assign_ops.append(assign_op)
for w_idx, weight_tf_var in enumerate(self.model['all_vars']):
self.sess.run(self.assign_ops[w_idx],
{self.assign_placeholders[w_idx]: weight_vals[w_idx]})
def apply_gradients(self, gradients):
self.sess.run(self.graph['apply_grads'], feed_dict={
i: d for i, d in zip(self.graph['gradients'], gradients)
})
def compute_advantages(self, ops_vals):
# calculate advantages (input-dependent baselines)
all_times, all_diff_times, all_rewards, last_returns = [], [], [], []
results = {}
for ops_val in ops_vals:
rollout_val = ops_val[0]
stat = ops_val[1]
diff_time = np.array(rollout_val['wall_time'][1:]) - np.array(rollout_val['wall_time'][:-1])
self.avg_reward_calculator.add_list_filter_zero(rollout_val['reward'], diff_time)
all_diff_times.append(diff_time)
all_times.append(rollout_val['wall_time'][1:])
all_rewards.append(rollout_val['reward'])
for k, v in stat.items():
try:
results[k].append(v)
except:
results.update({k: []})
results[k].append(v)
adv, all_cum_reward = compute_advantage(
self.args, self.avg_reward_calculator, all_rewards, all_diff_times, all_times)
for cum_reward in all_cum_reward:
last_returns.append(cum_reward[-1])
return results, adv
def compute_advantage(args, reward_calculator, all_rewards, all_diff_times, all_times):
# compute differential reward
all_cum_reward = []
avg_per_step_reward = reward_calculator.get_avg_per_step_reward()
for i in range(args.num_agents):
# differential reward mode on
rewards = np.array([r - avg_per_step_reward * t for \
(r, t) in zip(all_rewards[i], all_diff_times[i])])
cum_reward = discount(rewards, args.gamma)
all_cum_reward.append(cum_reward)
baselines = get_piecewise_linear_fit_baseline(all_cum_reward, all_times)
# give worker back the advantage
advs = []
for i in range(args.num_agents):
batch_adv = all_cum_reward[i] - baselines[i]
batch_adv = np.reshape(batch_adv, [len(batch_adv), 1])
advs.append(batch_adv)
return advs, all_cum_reward
| 2.078125 | 2 |
deepcontact/layers.py | largelymfs/deepcontact | 27 | 12771485 | #! /usr/bin/env python
#################################################################################
# File Name : ./layer.py
# Created By : yang
# Creation Date : [2017-11-15 12:51]
# Last Modified : [2017-11-15 13:09]
# Description : some layers definition
#################################################################################
import lasagne, theano
import numpy as np
import theano.tensor as T
DTYPE = "float32"
class FeatureCombineLayer(lasagne.layers.MergeLayer):
def __init__(self, incomings, **kwargs):
super(FeatureCombineLayer, self).__init__(incomings, **kwargs)
max_size = self.output_shape[2]
self.one = T.ones((1, max_size), dtype=DTYPE)
def get_output_shape_for(self, input_shapes, **kwargs):
return (input_shapes[0][0], input_shapes[0][1] + input_shapes[1][1] * 2, input_shapes[0][2], input_shapes[0][3])
def get_output_for(self, input,**kwargs):
feature2d = input[0]
feature1d = input[1]
feature1d_h = feature1d.dimshuffle(0, 1, 2, 'x')
feature1d_h = T.tensordot(feature1d_h, self.one, [[3], [0]])
feature1d_v = feature1d_h.dimshuffle(0, 1, 3, 2)
return T.concatenate([feature2d, feature1d_h, feature1d_v], axis = 1)
class Feature2dBiasLayer(lasagne.layers.Layer):
def __init__(self, incoming = None, **kwargs):
super(Feature2dBiasLayer,self).__init__(incoming, **kwargs)
self.max_size = self.output_shape[2]
###generate zero
self.bias = np.zeros((7, self.max_size, self.max_size), dtype = DTYPE)
for i in xrange(self.max_size):
for j in xrange(self.max_size):
delta = abs(i - j)
if delta < 14:
t = 0
elif delta < 18:
t = 1
elif delta < 23:
t = 2
elif delta < 28:
t = 3
elif delta < 38:
t = 4
elif delta < 48:
t = 5
else:
t = 6
self.bias[t, i, j] = 1.0
self.bias = theano.shared(self.bias)
self.bias = self.bias.dimshuffle('x', 0, 1, 2)
def get_output_shape_for(self, input_shape, **kwargs):
return (input_shape[0], input_shape[1] + 7, input_shape[2], input_shape[3])
def get_output_for(self, input, **kwargs):
batch_size = input.shape[0]
one = T.ones((batch_size, 1), dtype=DTYPE)
tmp = T.tensordot(one, self.bias, [[1], [0]])
return T.concatenate([input, tmp], axis = 1)
class LinearLayer(lasagne.layers.Layer):
def __init__(self, incoming = None, max_size = 256, deepth = 25, W = lasagne.init.GlorotUniform(), b = lasagne.init.Constant(0.0),num_output = 1,**kwargs):
super(LinearLayer, self).__init__(incoming, **kwargs)
self.max_size = max_size
self.deepth = deepth
self.num_output = num_output
self.W = self.add_param(W,(self.deepth,num_output), name = "W")
self.b = self.add_param(b, (num_output,), name = 'b')
def get_output_shape_for(self, input_shape, **kwargs):
return (input_shape[0], self.num_output, input_shape[2], input_shape[3])
def get_output_for(self, input, **kwargs):
tmp = T.tensordot(input, self.W, [[1],[0]]).dimshuffle(0, 3, 1, 2)
return tmp + self.b[None,:,None,None]
| 2.75 | 3 |
composite_draineli_sed.py | bjweiner/sedfitting | 1 | 12771486 |
# Take two sets of Draine & Li SEDs and add them with varying
# mixes using gamma, according to
# j_nu = (1-gamma)*j_nu[umin,umin] + gamma*j_nu[umin,umax]
# so it's intended that
# sedset1 is the power law distrib of U
# sedset2 is for the diffuse medium and has a single U=Umin
# these are SEDs produced by convert_draineli_sed
# gamma is an array; for each pair, produce several SEDs for
# diff values of gamma
# indexes1 and indexes2 allow you to specify which of set2 matches
# which of set1, thus, if gamma has 4 values,
# output[0] = gamma[0] * sedset1[indexes1[0]] + (1-gamma[0])*sedset2[indexes2[0]]
# output[1] = gamma[1] * sedset1[indexes1[0]] + (1-gamma[1])*sedset2[indexes2[0]]
# and so on
# interpolate onto the wavelength array of sedset1
# return a similar sedset structure
import numpy as np
import matplotlib.pyplot as plt
def composite_draineli_sed(sedset1, sedset2, gamma, indexes1=0, indexes2=0, makeplot=0):
if indexes1==0:
indexes1 = range(len(sedset1))
if indexes2==0:
indexes2 = range(len(sedset2))
if len(indexes1) != len(indexes2):
print "Warning: sed set lengths should match in composite_draineli_sed. Unpredictable results."
nsed = min(len(indexes1),len(indexes2))
else:
nsed = len(indexes1)
ngamma = len(gamma)
sedstruct = []
for i in range(nsed):
for j in range(ngamma):
gam = gamma[j]
sed1 = sedset1[indexes1[i]]
sed2 = sedset2[indexes2[i]]
# Draine's wavelengths are decreasing so need to reverse arrays
# for interp, try using [::-1] for reversed view of array
sed2fluxinterp = np.interp(sed1['wave'][::-1], sed2['wave'][::-1],
sed2['flux'], left=0.0, right=0.0)
# plt.clf()
# plt.plot(np.log10(sed1['wave']),np.log10(sed1['flux']),'k-')
# plt.plot(np.log10(sed2['wave']),np.log10(sed2['flux']),'b-')
# plt.plot(np.log10(sed1['wave']),np.log10(sed2fluxinterp),'bx')
# plt.show()
sedfluxnew = gam * sed1['flux'] + (1.0-gam)*sed2fluxinterp
labelnew = gam*sed1['label'] + (1.0-gam)*sed2['label']
namenew = sed1['name'] + '_gamma' + str(gam) + '_' + sed2['name']
struct1 = {'label':labelnew, 'name':namenew, 'wave':sed1['wave'], 'flux':sedfluxnew}
sedstruct.append(struct1)
if makeplot != 0:
plt.clf()
for i in range(len(sedstruct)):
style = 'k-'
plt.plot(np.log10(sedstruct[i]['wave']), np.log10(sedstruct[i]['flux']), style)
plt.show()
return sedstruct
| 2.203125 | 2 |
fastestimator/architecture/cyclegan.py | rajesh1226/fastestimator | 1 | 12771487 | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflow.python.keras import Model, layers
from tensorflow.python.keras.initializers import RandomNormal
from fastestimator.layers import InstanceNormalization, ReflectionPadding2D
def _resblock(x0, num_filter=256, kernel_size=3):
x = ReflectionPadding2D()(x0)
x = layers.Conv2D(filters=num_filter, kernel_size=kernel_size, kernel_initializer=RandomNormal(mean=0,
stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
x = ReflectionPadding2D()(x)
x = layers.Conv2D(filters=num_filter, kernel_size=kernel_size, kernel_initializer=RandomNormal(mean=0,
stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.Add()([x, x0])
return x
def build_discriminator(input_shape=(256, 256, 3)):
"""Returns the discriminator network of the GAN.
Args:
input_shape (tuple, optional): shape of the input image. Defaults to (256, 256, 3).
Returns:
'Model' object: GAN discriminator.
"""
x0 = layers.Input(input_shape)
x = layers.Conv2D(filters=64,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x0)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(filters=128,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(filters=256,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = ReflectionPadding2D()(x)
x = layers.Conv2D(filters=512, kernel_size=4, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = ReflectionPadding2D()(x)
x = layers.Conv2D(filters=1, kernel_size=4, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
return Model(inputs=x0, outputs=x)
def build_generator(input_shape=(256, 256, 3), num_blocks=9):
"""Returns the generator of the GAN.
Args:
input_shape (tuple, optional): shape of the input image. Defaults to (256, 256, 3).
num_blocks (int, optional): number of resblocks for the generator. Defaults to 9.
Returns:
'Model' object: GAN generator.
"""
x0 = layers.Input(input_shape)
x = ReflectionPadding2D(padding=(3, 3))(x0)
x = layers.Conv2D(filters=64, kernel_size=7, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
# downsample
x = layers.Conv2D(filters=128,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
x = layers.Conv2D(filters=256,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
# residual
for _ in range(num_blocks):
x = _resblock(x)
# upsample
x = layers.Conv2DTranspose(filters=128,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
x = layers.Conv2DTranspose(filters=64,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
# final
x = ReflectionPadding2D(padding=(3, 3))(x)
x = layers.Conv2D(filters=3, kernel_size=7, activation='tanh', kernel_initializer=RandomNormal(mean=0,
stddev=0.02))(x)
return Model(inputs=x0, outputs=x)
| 2.5625 | 3 |
data/text/tokenizer.py | anh/TransformerTTS | 894 | 12771488 | from typing import Union
import re
from phonemizer.phonemize import phonemize
from data.text.symbols import all_phonemes, _punctuations
class Tokenizer:
def __init__(self, start_token='>', end_token='<', pad_token='/', add_start_end=True, alphabet=None,
model_breathing=True):
if not alphabet:
self.alphabet = all_phonemes
else:
self.alphabet = sorted(list(set(alphabet))) # for testing
self.idx_to_token = {i: s for i, s in enumerate(self.alphabet, start=1)}
self.idx_to_token[0] = pad_token
self.token_to_idx = {s: [i] for i, s in self.idx_to_token.items()}
self.vocab_size = len(self.alphabet) + 1
self.add_start_end = add_start_end
if add_start_end:
self.start_token_index = len(self.alphabet) + 1
self.end_token_index = len(self.alphabet) + 2
self.vocab_size += 2
self.idx_to_token[self.start_token_index] = start_token
self.idx_to_token[self.end_token_index] = end_token
self.model_breathing = model_breathing
if model_breathing:
self.breathing_token_index = self.vocab_size
self.token_to_idx[' '] = self.token_to_idx[' '] + [self.breathing_token_index]
self.vocab_size += 1
self.breathing_token = '@'
self.idx_to_token[self.breathing_token_index] = self.breathing_token
self.token_to_idx[self.breathing_token] = [self.breathing_token_index]
def __call__(self, sentence: str) -> list:
sequence = [self.token_to_idx[c] for c in sentence] # No filtering: text should only contain known chars.
sequence = [item for items in sequence for item in items]
if self.model_breathing:
sequence = [self.breathing_token_index] + sequence
if self.add_start_end:
sequence = [self.start_token_index] + sequence + [self.end_token_index]
return sequence
def decode(self, sequence: list) -> str:
return ''.join([self.idx_to_token[int(t)] for t in sequence])
class Phonemizer:
def __init__(self, language: str, with_stress: bool, njobs=4):
self.language = language
self.njobs = njobs
self.with_stress = with_stress
self.special_hyphen = '—'
self.punctuation = ';:,.!?¡¿—…"«»“”'
self._whitespace_re = re.compile(r'\s+')
self._whitespace_punctuation_re = re.compile(f'\s*([{_punctuations}])\s*')
def __call__(self, text: Union[str, list], with_stress=None, njobs=None, language=None) -> Union[str, list]:
language = language or self.language
njobs = njobs or self.njobs
with_stress = with_stress or self.with_stress
# phonemizer does not like hyphens.
text = self._preprocess(text)
phonemes = phonemize(text,
language=language,
backend='espeak',
strip=True,
preserve_punctuation=True,
with_stress=with_stress,
punctuation_marks=self.punctuation,
njobs=njobs,
language_switch='remove-flags')
return self._postprocess(phonemes)
def _preprocess_string(self, text: str):
text = text.replace('-', self.special_hyphen)
return text
def _preprocess(self, text: Union[str, list]) -> Union[str, list]:
if isinstance(text, list):
return [self._preprocess_string(t) for t in text]
elif isinstance(text, str):
return self._preprocess_string(text)
else:
raise TypeError(f'{self} input must be list or str, not {type(text)}')
def _collapse_whitespace(self, text: str) -> str:
text = re.sub(self._whitespace_re, ' ', text)
return re.sub(self._whitespace_punctuation_re, r'\1', text)
def _postprocess_string(self, text: str) -> str:
text = text.replace(self.special_hyphen, '-')
text = ''.join([c for c in text if c in all_phonemes])
text = self._collapse_whitespace(text)
text = text.strip()
return text
def _postprocess(self, text: Union[str, list]) -> Union[str, list]:
if isinstance(text, list):
return [self._postprocess_string(t) for t in text]
elif isinstance(text, str):
return self._postprocess_string(text)
else:
raise TypeError(f'{self} input must be list or str, not {type(text)}')
| 3.046875 | 3 |
prez/renderers/spaceprez/spaceprez_feature_collection_renderer.py | surroundaustralia/Prez | 2 | 12771489 | from typing import Dict, Optional, Union
from fastapi.responses import Response, JSONResponse, PlainTextResponse
from rdflib import Graph
from rdflib.namespace import DCAT, DCTERMS, RDFS
from connegp import MEDIATYPE_NAMES
from config import *
from renderers import Renderer
from profiles.spaceprez_profiles import oai, geo
from models.spaceprez import SpacePrezFeatureCollection
from utils import templates
class SpacePrezFeatureCollectionRenderer(Renderer):
profiles = {"oai": oai, "geo": geo}
default_profile_token = "oai"
def __init__(self, request: object, instance_uri: str) -> None:
super().__init__(
request,
SpacePrezFeatureCollectionRenderer.profiles,
SpacePrezFeatureCollectionRenderer.default_profile_token,
instance_uri,
)
def set_collection(self, collection: SpacePrezFeatureCollection) -> None:
self.collection = collection
def _render_oai_html(
self, template_context: Union[Dict, None]
) -> templates.TemplateResponse:
"""Renders the HTML representation of the DCAT profile for a feature collection"""
_template_context = {
"request": self.request,
"collection": self.collection.to_dict(),
"uri": self.instance_uri,
"profiles": self.profiles,
"default_profile": self.default_profile_token,
"mediatype_names": dict(MEDIATYPE_NAMES, **{"application/geo+json": "GeoJSON"}),
}
if template_context is not None:
_template_context.update(template_context)
return templates.TemplateResponse(
"spaceprez/spaceprez_feature_collection.html",
context=_template_context,
headers=self.headers,
)
# def _render_oai_json(self) -> JSONResponse:
# """Renders the JSON representation of the OAI profile for a feature collection"""
# return JSONResponse(
# content={"test": "test"},
# media_type="application/json",
# headers=self.headers,
# )
def _render_oai_geojson(self) -> JSONResponse:
"""Renders the GeoJSON representation of the OAI profile for a feature collection"""
content = self.collection.to_geojson()
content["links"] = [
{
"href": str(self.request.url),
"rel": "self",
"type": self.mediatype,
"title": "this document",
},
{
"href": str(self.request.base_url)[:-1] + str(self.request.url.path),
"rel": "alternate",
"type": "text/html",
"title": "this document as HTML",
},
]
return JSONResponse(
content=content,
media_type="application/geo+json",
headers=self.headers,
)
def _render_oai(self, template_context: Union[Dict, None]):
"""Renders the OAI profile for a feature collection"""
if self.mediatype == "text/html":
return self._render_oai_html(template_context)
else: # else return GeoJSON
return self._render_oai_geojson()
def _generate_geo_rdf(self) -> Graph:
"""Generates a Graph of the GeoSPARQL representation"""
r = self.collection.graph.query(f"""
PREFIX dcat: <{DCAT}>
PREFIX dcterms: <{DCTERMS}>
PREFIX geo: <{GEO}>
PREFIX rdfs: <{RDFS}>
CONSTRUCT {{
?fc a geo:FeatureCollection ;
?fc_pred ?fc_o ;
geo:hasBoundingBox ?geom ;
rdfs:member ?mem .
?geom ?geom_p ?geom_o .
?d a dcat:Dataset ;
rdfs:member ?fc .
}}
WHERE {{
BIND (<{self.collection.uri}> AS ?fc)
?fc a geo:FeatureCollection ;
?fc_pred ?fc_o ;
rdfs:member ?mem .
FILTER (STRSTARTS(STR(?fc_pred), STR(geo:)))
OPTIONAL {{
?fc geo:hasBoundingBox ?geom .
?geom ?geom_p ?geom_o .
}}
?d a dcat:Dataset ;
rdfs:member ?fc .
}}
""")
g = r.graph
g.bind("dcat", DCAT)
g.bind("dcterms", DCTERMS)
g.bind("geo", GEO)
g.bind("rdfs", RDFS)
return g
def _render_geo_rdf(self) -> Response:
"""Renders the RDF representation of the GeoSPAQRL profile for a feature collection"""
g = self._generate_geo_rdf()
return self._make_rdf_response(g)
def _render_geo(self):
"""Renders the GeoSPARQL profile for a feature collection"""
return self._render_geo_rdf()
def render(
self, template_context: Optional[Dict] = None
) -> Union[
PlainTextResponse, templates.TemplateResponse, Response, JSONResponse, None
]:
if self.error is not None:
return PlainTextResponse(self.error, status_code=400)
elif self.profile == "alt":
return self._render_alt(template_context)
elif self.profile == "oai":
return self._render_oai(template_context)
elif self.profile == "geo":
return self._render_geo()
else:
return None
| 2.234375 | 2 |
api/models/unique_file.py | merwane/shield | 0 | 12771490 | from mongoengine import *
import datetime
class UniqueFile(Document):
filename = StringField()
file_size = FloatField(default=0) # megabytes by default
file_type = StringField()
labels = ListField(default=[])
checksum = StringField()
added_at = DateTimeField(default=datetime.datetime.utcnow) | 2.546875 | 3 |
services/dynamic-sidecar/tests/unit/test_docker_utils.py | mrnicegyu11/osparc-simcore | 0 | 12771491 | <reponame>mrnicegyu11/osparc-simcore
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
from typing import AsyncIterable
import aiodocker
import pytest
from simcore_service_dynamic_sidecar.core.docker_utils import get_volume_by_label
from simcore_service_dynamic_sidecar.core.errors import VolumeNotFoundError
pytestmark = pytest.mark.asyncio
@pytest.fixture(scope="session")
def volume_name() -> str:
return "test_source_name"
@pytest.fixture
async def volume_with_label(volume_name: str) -> AsyncIterable[None]:
async with aiodocker.Docker() as docker_client:
volume = await docker_client.volumes.create(
{"Name": "test_volume_name_1", "Labels": {"source": volume_name}}
)
yield
await volume.delete()
async def test_volume_with_label(volume_with_label: None, volume_name: str) -> None:
assert await get_volume_by_label(volume_name)
async def test_volume_label_missing() -> None:
with pytest.raises(VolumeNotFoundError) as info:
await get_volume_by_label("not_exist")
assert (
info.value.args[0]
== "Expected 1 volume with source_label='not_exist', query returned []"
)
| 1.867188 | 2 |
tournamentmasters/command_tournament_master.py | jorgeparavicini/FourWins | 1 | 12771492 | <reponame>jorgeparavicini/FourWins<filename>tournamentmasters/command_tournament_master.py<gh_stars>1-10
from bots import BaseBot
from tournamentmasters.tournament_master import TournamentMaster
class CommandTournamentMaster(TournamentMaster):
def __init__(self, bot_1: BaseBot, bot_2: BaseBot, grid_width: int, grid_height: int,
time_between_rounds: float = 0):
super(CommandTournamentMaster, self).__init__(bot_1, bot_2, grid_width, grid_height, time_between_rounds)
self.winner_id = -1
def on_turn_end(self, bot_played: BaseBot):
self.grid.print()
print("---------------------\n")
def on_winner_found(self, winner_bot: BaseBot):
print(f'{winner_bot.name} {winner_bot.id} WOOOOOOON')
self.winner_id = winner_bot.id
def play(self):
super().play()
return self.winner_id
| 2.78125 | 3 |
homework_01/game_of_life.py | zunigjor/BI-PYT | 1 | 12771493 | # Homework 01 - Game of life
#
# Your task is to implement part of the cell automata called
# Game of life. The automata is a 2D simulation where each cell
# on the grid is either dead or alive.
#
# State of each cell is updated in every iteration based state of neighbouring cells.
# Cell neighbours are cells that are horizontally, vertically, or diagonally adjacent.
#
# Rules for update are as follows:
#
# 1. Any live cell with fewer than two live neighbours dies, as if by underpopulation.
# 2. Any live cell with two or three live neighbours lives on to the next generation.
# 3. Any live cell with more than three live neighbours dies, as if by overpopulation.
# 4. Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
#
#
# Our implementation will use coordinate system will use grid coordinates starting from (0, 0) - upper left corner.
# The first coordinate is row and second is column.
#
# Do not use wrap around (toroid) when reaching edge of the board.
#
# For more details about Game of Life, see Wikipedia - https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life
def createBoard(rows, cols):
board = [[False] * cols for i in range(rows)]
return board
def fillBoard(board, alive):
for j in alive:
x, y = j
board[x][y] = True
return None
def isAlive(board, x, y, rows, cols):
if x < 0 or x >= rows:
return False
if y < 0 or y >= cols:
return False
return board[x][y]
def sumAliveNeighbors(board, r, c, rows, cols) -> int:
sumN = 0
sumN += isAlive(board, r - 1, c, rows, cols) # up
sumN += isAlive(board, r + 1, c, rows, cols) # down
sumN += isAlive(board, r, c - 1, rows, cols) # left
sumN += isAlive(board, r, c + 1, rows, cols) # right
sumN += isAlive(board, r - 1, c - 1, rows, cols) # up left
sumN += isAlive(board, r - 1, c + 1, rows, cols) # up right
sumN += isAlive(board, r + 1, c - 1, rows, cols) # down left
sumN += isAlive(board, r + 1, c + 1, rows, cols) # down right
return sumN
def makeGameStep(current_board, rows, cols):
next_board = createBoard(rows, cols)
for r in range(rows):
for c in range(cols):
sumN = sumAliveNeighbors(current_board, r, c, rows, cols)
if current_board[r][c]:
if sumN < 2 or sumN > 3:
next_board[r][c] = False
if sumN == 2 or sumN == 3:
next_board[r][c] = True
else:
if sumN == 3:
next_board[r][c] = True
current_board = next_board
return current_board
def getAliveSet(board, rows, cols):
result = set()
for row in range(rows):
for column in range(cols):
if board[row][column]:
t = (row, column)
result.add(t)
return result
def update(alive, size, iter_n):
rows, cols = size
current_board = createBoard(rows, cols)
fillBoard(current_board, alive)
i = 0
while i < iter_n:
current_board = makeGameStep(current_board, rows, cols)
i += 1
# Return the set of alive cells from the last current_board
return getAliveSet(current_board, rows, cols)
def draw(alive, size):
"""
alive - set of cell coordinates marked as alive, can be empty
size - size of simulation grid as tuple - (
output - string showing the board state with alive cells marked with X
"""
# Don't call print in this method, just return board string as output.
# Example of 3x3 board with 1 alive cell at coordinates (0, 2):
# +---+
# | X|
# | |
# | |
# +---+
rows, cols = size
outputString = "+"
for i in range(cols):
outputString += "-"
outputString += "+\n"
for i in range(rows):
outputString += "|"
for j in range(cols):
if (i, j) in alive:
outputString += "X"
else:
outputString += " "
outputString += "|\n"
outputString += "+"
for i in range(cols):
outputString += "-"
outputString += "+"
return outputString
| 4.25 | 4 |
loader/fis_load.py | gwu-libraries/vivo-load | 2 | 12771494 | from fis_entity import *
from utility import valid_college_name, valid_department_name, xml_result_generator, remove_extra_args
import os
GWU = "The George Washington University"
class Loader:
def __init__(self, filename, data_dir,
gwids=None, entity_class=None, field_to_entity=None, field_rename=None,
add_entities_from_fields=None, field_to_lookup=None, remove_fields=None,
limit=None):
self.filename = filename
self.data_dir = data_dir
self.limit = limit
# Map of result field names to (new field names, lookup map).
self.field_lookup = field_to_lookup or {}
# Map of result field names to entity classes. Classes must take a single positional argument.
self.field_to_entity = field_to_entity or {}
# Map of result field names to rename.
self.field_rename = field_rename or {}
# The entity class to create.
self.entity_class = entity_class
# List of fields that contain entities that should be added to graph.
self.add_entities_from_fields = add_entities_from_fields or []
# List of fields to remove
self.remove_fields = remove_fields or []
# Create an RDFLib Graph
self.g = Graph(namespace_manager=ns_manager)
# Gwids
self.gwids = gwids
def load(self):
addl_entities = self._addl_entities()
for entity in addl_entities:
self.g += entity.to_graph()
try:
row_count = 0
for row_count, result in enumerate(xml_result_generator(os.path.join(self.data_dir, self.filename)),
start=1):
# Check the _use_result function
if (self._use_result(result) and
# Optionally limit by faculty ids
(self.gwids is None or result["gw_id"] in self.gwids)):
# Optionally remove fields
for field in self.remove_fields:
if field in result:
del result[field]
# Optionally process the result to change values
self._process_result(result)
# Optionally lookup some result values
for key, (new_key, lookup_map) in self.field_lookup.items():
if key in result:
result[new_key] = lookup_map[result[key]]
if key != new_key:
del result[key]
# Optionally map some result values to entities (e.g., organization)
for key, clazz in self.field_to_entity.items():
if key in result:
result[key] = clazz(result[key])
# Optionally rename some fields
for src_key, dest_key in self.field_rename.items():
if src_key in result:
result[dest_key] = result[src_key]
del result[src_key]
# Generate the entities
entities = self._generate_entities(result)
for entity in entities:
self.g += entity.to_graph()
if self.limit and row_count > self.limit-1:
break
if not row_count:
warning_log.error("%s has no data.", self.filename)
return None
return self.g
# If there is an IOError, log it and return None
except IOError, e:
warning_log.error("%s: %s", e.strerror, e.filename)
return None
def _addl_entities(self):
return []
def _use_result(self, result):
return True
def _process_result(self, result):
pass
def _generate_entities(self, result):
# Instantiate an entity using the result as keyword args
entities = [self._create_entity(self.entity_class, result)]
for field in self.add_entities_from_fields:
if field in result and result[field] and hasattr(result[field], "to_graph"):
entities.append(result[field])
return entities
@staticmethod
def _create_entity(clazz, args):
remove_extra_args(args, clazz.__init__)
return clazz(**args)
class BasicLoader(Loader):
"""
A Loader that maps gw_id field to a Person entity
and organization field to an Organization entity.
The Organization entity is also added to the graph.
"""
def __init__(self, filename, data_dir, entity_class, gwids, netid_lookup,
limit=None):
Loader.__init__(self, filename, data_dir, gwids=gwids, entity_class=entity_class,
field_to_entity={"netid": Person, "organization": Organization},
field_rename={"netid": "person"}, add_entities_from_fields=["organization"],
field_to_lookup={"gw_id": ("netid", netid_lookup)},
limit=limit)
class DepartmentLoader(Loader):
# List of departments that should be modeled as colleges.
colleges = ("The Trachtenberg School of Public Policy and Public Administration",
"Graduate School of Political Management",
"School of Media and Public Affairs",
"Corcoran School of the Arts & Design")
def __init__(self, data_dir, limit=None):
Loader.__init__(self, "fis_department.xml", data_dir, limit=limit)
self.gwu = Organization(GWU, organization_type="University", is_gw=True)
def _addl_entities(self):
return [self.gwu]
def _use_result(self, result):
return valid_department_name(result["department"]) and valid_college_name(result["college"])
def _generate_entities(self, result):
# College
c = Organization(result["college"], organization_type="College", is_gw=True, part_of=self.gwu)
# Department
d = Organization(result["department"],
organization_type="College" if result["department"] in self.colleges else "AcademicDepartment",
is_gw=True, part_of=c)
return [c, d]
def load_departments(data_dir, limit=None):
print "Loading departments."
l = DepartmentLoader(data_dir, limit=limit)
return l.load()
class FacultyLoader(Loader):
def __init__(self, data_dir, gwids, netid_lookup, is_mediaexpert, limit=None):
Loader.__init__(self, "fis_faculty.xml", data_dir, gwids=gwids, entity_class=Person,
field_to_entity={"home_department": Organization},
field_to_lookup={"gw_id": ("netid", netid_lookup)},
remove_fields=["research_areas", "personal_statement"] if is_mediaexpert else None,
limit=limit)
def _process_result(self, result):
if not (valid_department_name(result["home_department"]) and valid_college_name(result["home_college"])):
# Remove home department
del result["home_department"]
def load_faculty(data_dir, faculty_gwids, netid_lookup, is_mediaexpert=False, limit=None):
print "Loading faculty."
l = FacultyLoader(data_dir, faculty_gwids, netid_lookup, is_mediaexpert=is_mediaexpert, limit=limit)
return l.load()
class AcademicAppointmentLoader(Loader):
def __init__(self, data_dir, gwids, netid_lookup, limit=None):
Loader.__init__(self, "fis_academic_appointment.xml", data_dir, gwids=gwids,
entity_class=AcademicAppointment,
field_to_entity={"organization": Organization, "netid": Person},
field_rename={"netid": "person"},
field_to_lookup={"gw_id": ("netid", netid_lookup)},
limit=limit)
def _use_result(self, result):
return valid_department_name(result["department"]) or valid_college_name(result["college"])
def _process_result(self, result):
if valid_department_name(result["department"]):
result["organization"] = result["department"]
# Else, if College name, then College
else:
result["organization"] = result["college"]
def load_academic_appointment(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading academic appointments."
l = AcademicAppointmentLoader(data_dir, faculty_gwids, netid_lookup, limit=limit)
return l.load()
class AdminAppointmentLoader(Loader):
def __init__(self, data_dir, gwids, netid_lookup, limit=None):
Loader.__init__(self, "fis_admin_appointment.xml", data_dir, gwids=gwids,
entity_class=AdminAppointment,
field_to_entity={"organization": Organization, "netid": Person},
field_rename={"netid": "person"},
field_to_lookup={"gw_id": ("netid", netid_lookup)},
limit=limit)
self.gwu = Organization(GWU, organization_type="University", is_gw=True)
def _addl_entities(self):
return [self.gwu]
def _process_result(self, result):
# If Department name, then Department
if valid_department_name(result["department"]):
result["organization"] = result["department"]
# Else, if College name, then College
elif valid_college_name(result["college"]):
result["organization"] = result["college"]
# Else GWU
else:
result["organization"] = GWU
def load_admin_appointment(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading admin appointments."
l = AdminAppointmentLoader(data_dir, faculty_gwids, netid_lookup, limit=limit)
return l.load()
def load_degree_education(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading degree education."
l = Loader("fis_degree_education.xml", data_dir, gwids=faculty_gwids, entity_class=DegreeEducation,
field_to_entity={"institution": Organization, "netid": Person},
field_rename={"institution": "organization", "netid": "person"},
add_entities_from_fields=["organization"],
field_to_lookup={"gw_id": ("netid", netid_lookup)},
limit=limit)
return l.load()
def load_non_degree_education(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading non-degree education."
l = Loader("fis_non_degree_education.xml", data_dir, gwids=faculty_gwids, entity_class=NonDegreeEducation,
field_to_entity={"institution": Organization, "netid": Person},
field_rename={"institution": "organization", "netid": "person"},
add_entities_from_fields=["organization"],
field_to_lookup={"gw_id": ("netid", netid_lookup)},
limit=limit)
return l.load()
def load_courses(data_dir, faculty_gwids, netid_lookup, limit=None,):
print "Loading courses taught."
l = BasicLoader("fis_courses.xml", data_dir, Course, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_awards(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading awards."
l = BasicLoader("fis_awards.xml", data_dir, Award, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_professional_memberships(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading professional memberships."
l = BasicLoader("fis_prof_memberships.xml", data_dir, ProfessionalMembership, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_reviewerships(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading reviewerships."
l = BasicLoader("fis_reviewer.xml", data_dir, Reviewership, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_presentations(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading presentations."
l = BasicLoader("fis_presentations.xml", data_dir, Presentation, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_books(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading books."
l = Loader("fis_books.xml", data_dir, gwids=faculty_gwids, entity_class=Book,
field_to_entity={"netid": Person, "publisher": Organization},
field_rename={"netid": "person"}, add_entities_from_fields=["publisher"],
field_to_lookup={"gw_id": ("netid", netid_lookup)},
limit=limit)
return l.load()
def load_reports(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading reports."
l = Loader("fis_reports.xml", data_dir, gwids=faculty_gwids, entity_class=Report,
field_to_entity={"netid": Person, "distributor": Organization},
field_rename={"netid": "person"}, add_entities_from_fields=["distributor"],
field_to_lookup={"gw_id": ("netid", netid_lookup)},
limit=limit)
return l.load()
def load_articles(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading articles"
l = BasicLoader("fis_articles.xml", data_dir, Article, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_academic_articles(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading academic articles"
l = BasicLoader("fis_acad_articles.xml", data_dir, AcademicArticle, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_article_abstracts(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading article abstracts"
l = BasicLoader("fis_article_abstracts.xml", data_dir, ArticleAbstract, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_reviews(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading reviews"
l = BasicLoader("fis_reviews.xml", data_dir, Review, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_reference_articles(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading reference articles"
l = BasicLoader("fis_ref_articles.xml", data_dir, ReferenceArticle, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_letters(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading letters"
l = BasicLoader("fis_letters.xml", data_dir, Letter, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_testimony(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading testimony"
l = BasicLoader("fis_testimony.xml", data_dir, Testimony, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_chapters(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading chapters"
l = BasicLoader("fis_chapters.xml", data_dir, Chapter, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_conference_abstracts(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading conference abstracts"
l = BasicLoader("fis_conf_abstracts.xml", data_dir, ConferenceAbstract, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_conference_papers(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading conference papers"
l = BasicLoader("fis_conf_papers.xml", data_dir, ConferencePaper, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_conference_posters(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading conference posters"
l = BasicLoader("fis_conf_posters.xml", data_dir, ConferencePoster, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
def load_patents(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading patents"
l = BasicLoader("fis_patents.xml", data_dir, Patent, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
class GrantLoader(Loader):
def __init__(self, data_dir, gwids, netid_lookup, limit=None):
Loader.__init__(self, "fis_grants.xml", data_dir, gwids=gwids,
entity_class=Grant,
field_to_entity={"awarded_by": Organization, "netid": Person},
field_rename={"netid": "person"},
field_to_lookup={"gw_id": ("netid", netid_lookup)},
limit=limit)
def _use_result(self, result):
return result["title"]
def load_grants(data_dir, faculty_gwids, netid_lookup, limit=None):
print "Loading grants."
l = GrantLoader(data_dir, faculty_gwids,
netid_lookup, limit=limit)
return l.load()
| 2.640625 | 3 |
get_snaps.py | mark-bell-tna/webarchive | 0 | 12771495 | #!/home/ec2-user/WEBARCH/env/bin/python3
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
from time import sleep
prefix = "https://webarchive.nationalarchives.gov.uk/"
#20121204113457/
page = "https://www.gov.uk/government/how-government-works"
def crawl_versions(url,url_file,skip_list = set()):
version_list = []
try:
html = urlopen(url)
except Exception as e:
print("Error with URL:",url)
print(e)
return
soup = BeautifulSoup(html, 'html.parser')
#print(soup)
if url[len(prefix)-1:len(prefix)+2] != "/*/":
print("Different format:",url,url[len(prefix)-1:len(prefix)+2])
return
domain = url[len(prefix)+2:]
#out_file = open(url_file,"a")
accordions = soup.findAll("div", {"class": "accordion"})
print("Dom:",domain)
print("Url:",url,"Accordions:",len(accordions))
for acc in accordions:
year = acc.find("span", {"class" : "year"})
#print("Acc:",acc)
print("\tYear", year, year.text,domain)
versions = acc.findAll("a", href=re.compile(".[1-2]*" + domain, re.IGNORECASE))
for v in versions:
print("\t\t",v['href'])
version_list.append(v['href'])
#out_file.write(domain + "|" + year.text + "|" + v['href'] + "\n")
#out_file.close()
return version_list
url = prefix + "*/" + page
crawl_versions(url,url.replace("/","_") + ".txt")
| 3.34375 | 3 |
travelist/lib/flickr.py | ibz/travelist | 0 | 12771496 | from datetime import datetime
import time
import traceback
import urllib
import urllib2
from xml.dom import minidom
from django.core import mail
from travelist import utils
import settings
def call(method, **params):
response = urllib2.urlopen("http://flickr.com/services/rest?api_key=%s&method=%s&%s"
% (settings.FLICKR_KEY, method, urllib.urlencode(params)))
try:
return minidom.parse(response)
finally:
response.close()
def flickr_machinetags_getRecentValues(namespace, predicate, added_since):
dom = call('flickr.machinetags.getRecentValues', namespace=namespace, predicate=predicate, added_since=added_since)
return [{'value': node.childNodes[0].nodeValue,
'last_added': int(node.getAttribute('last_added'))}
for node in dom.getElementsByTagName('value')]
def flickr_photos_search(user_id, tags):
dom = call('flickr.photos.search', user_id=user_id, tags=tags)
return [{'id': int(node.getAttribute('id')),
'owner': node.getAttribute('owner')}
for node in dom.getElementsByTagName('photo')]
def flickr_photos_getInfo(photo_id):
dom = call('flickr.photos.getInfo', photo_id=photo_id)
try:
return [{'title': node.getElementsByTagName('title')[0].childNodes[0].nodeValue,
'date': datetime.strptime(node.getElementsByTagName('dates')[0].getAttribute('taken'), "%Y-%m-%d %H:%M:%S"),
'url': utils.find(node.getElementsByTagName('url'), lambda n: n.getAttribute('type') == 'photopage').childNodes[0].nodeValue}
for node in dom.getElementsByTagName('photo')][0]
except IndexError:
return None
def flickr_photos_getSizes(photo_id):
dom = call('flickr.photos.getSizes', photo_id=photo_id)
return dict((node.getAttribute('label'), node.getAttribute('source'))
for node in dom.getElementsByTagName('size'))
def track(namespace, predicate, callback):
wait_time = 60
last_added_max = 0
while True:
try:
values = flickr_machinetags_getRecentValues(namespace, predicate, last_added_max + 1)
wait_time = 60
for value in values:
callback(value['value'])
if value['last_added'] > last_added_max:
last_added_max = value['last_added']
time.sleep(60)
except Exception:
traceback.print_exc()
time.sleep(wait_time)
wait_time *= 2
if wait_time > 10 * 60:
mail.mail_admins("Flickr tracking error", traceback.format_exc(), fail_silently=True)
wait_time = 10 * 60
| 2.40625 | 2 |
openelex/tests/test_transform_registry.py | Mpopoma/oe-core | 156 | 12771497 | from unittest import TestCase
from mock import Mock
from openelex.base.transform import registry
class TestTransformRegistry(TestCase):
def test_register_with_validators(self):
mock_transform = Mock(return_value=None)
mock_transform.__name__ = 'mock_transform'
mock_validator1 = Mock(return_value=None)
mock_validator1.__name__ = 'mock_validator1'
mock_validator2 = Mock(return_value=None)
mock_validator2.__name__ = 'mock_validator2'
validators = [mock_validator1, mock_validator2]
registry.register("XX", mock_transform, validators)
transform = registry.get("XX", "mock_transform")
self.assertEqual(list(transform.validators.values()), validators)
transform()
mock_transform.assert_called_once_with()
def test_register_raw(self):
mock_transform = Mock(return_value=None)
mock_transform.__name__ = 'mock_transform'
registry.register("XX", mock_transform, raw=True)
transform = registry.get("XX", "mock_transform", raw=True)
transform()
mock_transform.assert_called_once_with()
| 2.59375 | 3 |
tests/params/test_param.py | TradDog/pyro | 0 | 12771498 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from copy import copy
from unittest import TestCase
import numpy as np
import torch
import torch.optim
from torch import nn as nn
from torch.distributions import constraints
import pyro
from tests.common import assert_equal
class ParamStoreDictTests(TestCase):
def setUp(self):
pyro.clear_param_store()
self.linear_module = nn.Linear(3, 2)
self.linear_module2 = nn.Linear(3, 2)
self.linear_module3 = nn.Linear(3, 2)
def test_save_and_load(self):
lin = pyro.module("mymodule", self.linear_module)
pyro.module("mymodule2", self.linear_module2)
x = torch.randn(1, 3)
myparam = pyro.param("myparam", 1.234 * torch.ones(1))
cost = torch.sum(torch.pow(lin(x), 2.0)) * torch.pow(myparam, 4.0)
cost.backward()
params = list(self.linear_module.parameters()) + [myparam]
optim = torch.optim.Adam(params, lr=0.01)
myparam_copy_stale = copy(pyro.param("myparam").detach().cpu().numpy())
optim.step()
myparam_copy = copy(pyro.param("myparam").detach().cpu().numpy())
param_store_params = copy(pyro.get_param_store()._params)
param_store_param_to_name = copy(pyro.get_param_store()._param_to_name)
assert len(list(param_store_params.keys())) == 5
assert len(list(param_store_param_to_name.values())) == 5
pyro.get_param_store().save("paramstore.unittest.out")
pyro.clear_param_store()
assert len(list(pyro.get_param_store()._params)) == 0
assert len(list(pyro.get_param_store()._param_to_name)) == 0
pyro.get_param_store().load("paramstore.unittest.out")
def modules_are_equal():
weights_equal = (
np.sum(
np.fabs(
self.linear_module3.weight.detach().cpu().numpy()
- self.linear_module.weight.detach().cpu().numpy()
)
)
== 0.0
)
bias_equal = (
np.sum(
np.fabs(
self.linear_module3.bias.detach().cpu().numpy()
- self.linear_module.bias.detach().cpu().numpy()
)
)
== 0.0
)
return weights_equal and bias_equal
assert not modules_are_equal()
pyro.module("mymodule", self.linear_module3, update_module_params=False)
assert id(self.linear_module3.weight) != id(pyro.param("mymodule$$$weight"))
assert not modules_are_equal()
pyro.module("mymodule", self.linear_module3, update_module_params=True)
assert id(self.linear_module3.weight) == id(pyro.param("mymodule$$$weight"))
assert modules_are_equal()
myparam = pyro.param("myparam")
store = pyro.get_param_store()
assert myparam_copy_stale != myparam.detach().cpu().numpy()
assert myparam_copy == myparam.detach().cpu().numpy()
assert sorted(param_store_params.keys()) == sorted(store._params.keys())
assert sorted(param_store_param_to_name.values()) == sorted(
store._param_to_name.values()
)
assert sorted(store._params.keys()) == sorted(store._param_to_name.values())
def test_dict_interface():
param_store = pyro.get_param_store()
# start empty
param_store.clear()
assert not param_store
assert len(param_store) == 0
assert "x" not in param_store
assert "y" not in param_store
assert list(param_store.items()) == []
assert list(param_store.keys()) == []
assert list(param_store.values()) == []
# add x
param_store["x"] = torch.zeros(1, 2, 3)
assert param_store
assert len(param_store) == 1
assert "x" in param_store
assert "y" not in param_store
assert list(param_store.keys()) == ["x"]
assert [key for key, value in param_store.items()] == ["x"]
assert len(list(param_store.values())) == 1
assert param_store["x"].shape == (1, 2, 3)
assert_equal(param_store.setdefault("x", torch.ones(1, 2, 3)), torch.zeros(1, 2, 3))
assert param_store["x"].unconstrained() is param_store["x"]
# add y
param_store.setdefault("y", torch.ones(4, 5), constraint=constraints.positive)
assert param_store
assert len(param_store) == 2
assert "x" in param_store
assert "y" in param_store
assert sorted(param_store.keys()) == ["x", "y"]
assert sorted(key for key, value in param_store.items()) == ["x", "y"]
assert len(list(param_store.values())) == 2
assert param_store["x"].shape == (1, 2, 3)
assert param_store["y"].shape == (4, 5)
assert_equal(param_store.setdefault("y", torch.zeros(4, 5)), torch.ones(4, 5))
assert_equal(param_store["y"].unconstrained(), torch.zeros(4, 5))
# remove x
del param_store["x"]
assert param_store
assert len(param_store) == 1
assert "x" not in param_store
assert "y" in param_store
assert list(param_store.keys()) == ["y"]
assert list(key for key, value in param_store.items()) == ["y"]
assert len(list(param_store.values())) == 1
assert param_store["y"].shape == (4, 5)
assert_equal(param_store.setdefault("y", torch.zeros(4, 5)), torch.ones(4, 5))
assert_equal(param_store["y"].unconstrained(), torch.zeros(4, 5))
# remove y
del param_store["y"]
assert not param_store
assert len(param_store) == 0
assert "x" not in param_store
assert "y" not in param_store
assert list(param_store.keys()) == []
assert list(key for key, value in param_store.items()) == []
assert len(list(param_store.values())) == 0
| 2.28125 | 2 |
turnovertools/mediaobject.py | morganwl/turnovertools | 0 | 12771499 | #!/usr/bin/env python3
from abc import ABCMeta
import collections.abc
class MediaObject(object):
"""
Parent class for all media objects. Not meant to be instantiated directly.
"""
__wraps_type__ = type(None)
__default_data__ = []
__requires_properties__ = []
@classmethod
def wrap_list(cls, data_list, parent=None, **kwargs):
"""
Wraps a list of data objects using the given MediaObject child
class, returning them in a new list.
"""
mob_list = []
for d in data_list:
mob_list.append(cls(d, parent=parent, **kwargs))
return mob_list
def __init__(self, data=None, parent=None, **kwargs):
"""
Instantiate MediaObject with a new data object, or with
kwargs.
"""
self.parent = parent
if data is not None:
assert isinstance(data, self.__wraps_type__)
self.data = data
else:
self.data = self.__wraps_type__(*self.default_data)
for key, val in kwargs.items():
if key in self.__requires_properties__:
setattr(self, key, val)
else:
raise AttributeError('Invalid keyword parameter ' + key)
def __setattr__(self, key, value):
"""
Optionally call a private _on_update method whenever
attributes are changed in this object.
"""
self._on_update(key, value)
super(MediaObject, self).__setattr__(key, value)
def _on_update(self, key, value):
pass
class Sequence(MediaObject, collections.abc.Sequence):
def __init__(self, data=None, **kwargs):
super(Sequence, self).__init__(data=data, **kwargs)
self.tracks = []
def __getitem__(self, i):
return self.tracks[i]
def __len__(self):
return len(self.tracks)
class SequenceTrack(MediaObject, collections.abc.Sequence):
def __init__(self, data=None, **kwargs):
super(SequenceTrack, self).__init__(data=data, **kwargs)
self.events = []
def __getitem__(self, i):
return self.events[i]
def __len__(self):
return len(self.events)
class Event(MediaObject):
__requires_properties__ = ['clip_name', 'source_file', 'tape_name']
def get_custom(self, name):
raise NotImplementedError()
@property
def posterframes(self):
"""Returns a list of posterframes (in record), or rec_start_frame in
list form."""
if getattr(self, '_posterframes', None):
return self._posterfames
return [0]
@posterframes.setter
def posterframes(self, val):
self._posterframes = val;
@property
def reel(self):
if self.tape_name is not None:
return self.tape_name
return self.source_file
@reel.setter
def reel(self, val):
if self.source_file is not None:
self.source_file = val
self.tape_name = val
class SourceClip(MediaObject):
def get_custom(self, name):
raise NotImplementedError()
@property
def reel(self):
if self.tape_name is not None:
return self.tape_name
return self.source_file
@reel.setter
def reel(self, val):
if self.source_file is not None:
self.source_file = val
self.tape_name = val
class Bin(MediaObject):
pass
class DictWrapperMeta(ABCMeta):
def __new__(meta, name, bases, class_dict):
lookup = class_dict.get('__lookup__', {})
for prop, target in lookup.items():
if prop not in class_dict:
class_dict[prop] = property(meta.getmapper(target),
meta.setmapper(target))
cls = type.__new__(meta, name, bases, class_dict)
return cls
def getmapper(target):
def getter(self):
return self.data.get(target, None)
return getter
def setmapper(lookup):
def setter(self, val):
self.data[target] = val
return setter
class DictWrapper(object, metaclass=DictWrapperMeta):
__wraps_type__ = dict
| 3.09375 | 3 |
main.py | doravante/bot-carona-tg | 0 | 12771500 | import sys
import time
import bot
def main():
TOKEN = sys.argv[1]
b = bot.Bot(TOKEN)
print 'Listening ...'
b.notifyOnMessage(run_forever=True)
if __name__ == '__main__':
main();
| 2.0625 | 2 |
pdkit/tremor_processor.py | gkroussos/pdkit | 21 | 12771501 | <reponame>gkroussos/pdkit
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Birkbeck College. All rights reserved.
#
# Licensed under the MIT license. See file LICENSE for details.
#
# Author(s): <NAME>
import sys
import logging
import numpy as np
import pandas as pd
from scipy import interpolate, signal, fft
from tsfresh.feature_extraction import feature_calculators
class TremorProcessor:
"""
This is the main Tremor Processor class. Once the data is loaded it will be
accessible at data_frame (pandas.DataFrame), where it looks like:
data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration
data_frame.index is the datetime-like index
These values are recommended by the author of the pilot study :cite:`Kassavetis2015`
:param sampling_frequency: (optional) the sampling frequency in Hz (100.0Hz)
:type sampling_frequency: float
:param cutoff_frequency: (optional) the cutoff frequency in Hz (2.0Hz)
:type cutoff_frequency: float
:param filter_order: (optional) filter order (2)
:type filter_order: int
:param window: (optional) window (256)
:type window: int
:param lower_frequency: (optional) lower frequency in Hz (2.0Hz)
:type lower_frequency: float
:param upper_frequency: (optional) upper frequency in Hz (10.0Hz)
:type upper_frequency: float
:Example:
>>> import pdkit
>>> tp = pdkit.TremorProcessor()
>>> ts = pdkit.TremorTimeSeries().load(path_to_data)
>>> amplitude, frequency = tp.amplitude(ts)
"""
def __init__(self, sampling_frequency=100.0, cutoff_frequency=2.0, filter_order=2,
window=256, lower_frequency=2.0, upper_frequency=10.0):
try:
self.ampl = 0
self.freq = 0
self.sampling_frequency = sampling_frequency
self.cutoff_frequency = cutoff_frequency
self.filter_order = filter_order
self.window = window
self.lower_frequency = lower_frequency
self.upper_frequency = upper_frequency
logging.debug("TremorProcessor init")
except IOError as e:
ierr = "({}): {}".format(e.errno, e.strerror)
logging.error("TremorProcessor I/O error %s", ierr)
except ValueError as verr:
logging.error("TremorProcessor ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on TremorProcessor init: %s", sys.exc_info()[0])
def resample_signal(self, data_frame):
"""
Convenience method for frequency conversion and resampling of data frame.
Object must have a DatetimeIndex. After re-sampling, this methods interpolate the time magnitude sum
acceleration values and the x,y,z values of the data frame acceleration
:param data_frame: the data frame to resample
:type data_frame: pandas.DataFrame
:return: the resampled data frame
:rtype: pandas.DataFrame
"""
df_resampled = data_frame.resample(str(1 / self.sampling_frequency) + 'S').mean()
f = interpolate.interp1d(data_frame.td, data_frame.mag_sum_acc)
new_timestamp = np.arange(data_frame.td[0], data_frame.td[-1], 1.0 / self.sampling_frequency)
df_resampled.mag_sum_acc = f(new_timestamp)
logging.debug("resample signal")
return df_resampled.interpolate(method='linear')
def filter_signal(self, data_frame, ts='mag_sum_acc'):
"""
This method filters a data frame signal as suggested in :cite:`Kassavetis2015`. First step is to high \
pass filter the data frame using a \
`Butterworth <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html>`_ \
digital and analog filter. Then this method
filters the data frame along one-dimension using a \
`digital filter <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html>`_.
:param data_frame: the input data frame
:type data_frame: pandas.DataFrame
:param ts: time series name of data frame to filter
:type ts: str
:return data_frame: adds a column named 'filtered_signal' to the data frame
:rtype data_frame: pandas.DataFrame
"""
b, a = signal.butter(self.filter_order, 2*self.cutoff_frequency/self.sampling_frequency,'high', analog=False)
filtered_signal = signal.lfilter(b, a, data_frame[ts].values)
data_frame['filtered_signal'] = filtered_signal
logging.debug("filter signal")
return data_frame
def fft_signal(self, data_frame):
"""
This method perform Fast Fourier Transform on the data frame using a \
`hanning window <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.hann.html>`_
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return: data frame with a 'filtered_singal', 'transformed_signal' and 'dt' columns
:rtype: pandas.DataFrame
"""
signal_length = len(data_frame.filtered_signal.values)
ll = int(signal_length / 2 - self.window / 2)
rr = int(signal_length / 2 + self.window / 2)
msa = data_frame.filtered_signal[ll:rr].values
hann_window = signal.hann(self.window)
msa_window = (msa * hann_window)
transformed_signal = fft(msa_window)
data = {'filtered_signal': msa_window, 'transformed_signal': transformed_signal,
'dt': data_frame.td[ll:rr].values}
data_frame_fft = pd.DataFrame(data, index=data_frame.index[ll:rr],
columns=['filtered_signal', 'transformed_signal', 'dt'])
logging.debug("fft signal")
return data_frame_fft
def amplitude_by_fft(self, data_frame):
"""
This methods extract the fft components and sum the ones from lower to upper freq as per \
:cite:`Kassavetis2015`
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ampl: the ampl
:rtype ampl: float
:return freq: the freq
:rtype freq: float
"""
signal_length = len(data_frame.filtered_signal)
normalised_transformed_signal = data_frame.transformed_signal.values / signal_length
k = np.arange(signal_length)
T = signal_length / self.sampling_frequency
f = k / T # two sides frequency range
f = f[range(int(signal_length / 2))] # one side frequency range
ts = normalised_transformed_signal[range(int(signal_length / 2))]
ampl = sum(abs(ts[(f > self.lower_frequency) & (f < self.upper_frequency)]))
freq = f[abs(ts).argmax(axis=0)]
logging.debug("tremor ampl calculated")
return ampl, freq
def amplitude_by_welch(self, data_frame):
"""
This methods uses the Welch method :cite:`Welch1967` to obtain the power spectral density, this is a robust
alternative to using fft_signal & amplitude
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return: the ampl
:rtype ampl: float
:return: the freq
:rtype freq: float
"""
frq, Pxx_den = signal.welch(data_frame.filtered_signal.values, self.sampling_frequency, nperseg=self.window)
freq = frq[Pxx_den.argmax(axis=0)]
ampl = sum(Pxx_den[(frq > self.lower_frequency) & (frq < self.upper_frequency)])
logging.debug("tremor amplitude by welch calculated")
return ampl, freq
def approximate_entropy(self, x, m=None, r=None):
"""
As in tsfresh \
`approximate_entropy <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\
feature_calculators.py#L1601>`_
Implements a `vectorized approximate entropy algorithm <https://en.wikipedia.org/wiki/Approximate_entropy>`_
For short time-series this method is highly dependent on the parameters,
but should be stable for N > 2000, see :cite:`Yentes2013`. Other shortcomings and alternatives discussed in \
:cite:`Richman2000`
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param m: Length of compared run of data
:type m: int
:param r: Filtering level, must be positive
:type r: float
:return: Approximate entropy
:rtype: float
"""
if m is None or r is None:
m = 2
r = 0.3
entropy = feature_calculators.approximate_entropy(x, m, r)
logging.debug("approximate entropy by tsfresh calculated")
return entropy
def autocorrelation(self, x, lag):
"""
As in tsfresh `autocorrelation <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\
feature_calculators.py#L1457>`_
Calculates the autocorrelation of the specified lag, according to the `formula <https://en.wikipedia.org/wiki/\
Autocorrelation#Estimation>`_:
.. math::
\\frac{1}{(n-l)\sigma^{2}} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu)
where :math:`n` is the length of the time series :math:`X_i`, :math:`\sigma^2` its variance and :math:`\mu` its
mean. `l` denotes the lag.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param lag: the lag
:type lag: int
:return: the value of this feature
:rtype: float
"""
# This is important: If a series is passed, the product below is calculated
# based on the index, which corresponds to squaring the series.
if lag is None:
lag = 0
_autoc = feature_calculators.autocorrelation(x, lag)
logging.debug("autocorrelation by tsfresh calculated")
return _autoc
def partial_autocorrelation(self, x, param=None):
"""
As in tsfresh `partial_autocorrelation <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\
feature_extraction/feature_calculators.py#L308>`_
Calculates the value of the partial autocorrelation function at the given lag. The lag `k` partial \
autocorrelation of a time series :math:`\\lbrace x_t, t = 1 \\ldots T \\rbrace` equals the partial correlation \
of :math:`x_t` and \
:math:`x_{t-k}`, adjusted for the intermediate variables \
:math:`\\lbrace x_{t-1}, \\ldots, x_{t-k+1} \\rbrace` (:cite:`Wilson2015`). \
Following `this notes <https://onlinecourses.science.psu.edu/stat510/node/62>`_, it can be defined as
.. math::
\\alpha_k = \\frac{ Cov(x_t, x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1})}
{\\sqrt{ Var(x_t | x_{t-1}, \\ldots, x_{t-k+1}) Var(x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1} )}}
with (a) :math:`x_t = f(x_{t-1}, \\ldots, x_{t-k+1})` and (b) :math:`x_{t-k} = f(x_{t-1}, \\ldots, x_{t-k+1})` \
being AR(k-1) models that can be fitted by OLS. Be aware that in (a), the regression is done on past values to \
predict :math:`x_t` whereas in (b), future values are used to calculate the past value :math:`x_{t-k}`.\
It is said in :cite:`Wilson2015` that "for an AR(p), the partial autocorrelations [ :math:`\\alpha_k` ] \
will be nonzero for `k<=p` and zero for `k>p`."\
With this property, it is used to determine the lag of an AR-Process.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param param: contains dictionaries {"lag": val} with int val indicating the lag to be returned
:type param: list
:return: the value of this feature
:rtype: float
"""
if param is None:
param = [{'lag': 3}, {'lag': 5}, {'lag': 6}]
_partialc = feature_calculators.partial_autocorrelation(x, param)
logging.debug("partial autocorrelation by tsfresh calculated")
return _partialc
def minimum(self, x):
"""
Calculates the lowest value of the time series x.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:rtype: float
"""
return np.min(x)
def mean(self, x):
"""
Returns the mean of x
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:rtype: float
"""
logging.debug("mean calculated")
return np.mean(x)
def ratio_value_number_to_time_series_length(self, x):
"""
As in tsfresh `ratio_value_number_to_time_series_length <https://github.com/blue-yonder/tsfresh/blob/master\
/tsfresh/feature_extraction/feature_calculators.py#L830>`_
Returns a factor which is 1 if all values in the time series occur only once,
and below one if this is not the case.
In principle, it just returns: # unique values / # values
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:rtype: float
"""
ratio = feature_calculators.ratio_value_number_to_time_series_length(x)
logging.debug("ratio value number to time series length by tsfresh calculated")
return ratio
def change_quantiles(self, x, ql=None, qh=None, isabs=None, f_agg=None):
"""
As in tsfresh `change_quantiles <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\
feature_extraction/feature_calculators.py#L1248>`_
First fixes a corridor given by the quantiles ql and qh of the distribution of x. Then calculates the \
average, absolute value of consecutive changes of the series x inside this corridor. Think about selecting \
a corridor on the y-Axis and only calculating the mean of the absolute change of the time series inside \
this corridor.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param ql: the lower quantile of the corridor
:type ql: float
:param qh: the higher quantile of the corridor
:type qh: float
:param isabs: should the absolute differences be taken?
:type isabs: bool
:param f_agg: the aggregator function that is applied to the differences in the bin
:type f_agg: str, name of a numpy function (e.g. mean, var, std, median)
:return: the value of this feature
:rtype: float
"""
if ql is None or qh is None or isabs is None or f_agg is None:
f_agg = 'mean'
isabs = True
qh = 0.2
ql = 0.0
quantile = feature_calculators.change_quantiles(x, ql, qh, isabs, f_agg)
logging.debug("change_quantiles by tsfresh calculated")
return quantile
def number_peaks(self, x, n=None):
"""
As in tsfresh `number_peaks <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\
feature_calculators.py#L1003>`_
Calculates the number of peaks of at least support n in the time series x. A peak of support n is defined \
as a subsequence of x where a value occurs, which is bigger than its n neighbours to the left and to the right.
Hence in the sequence
>>> x = [3, 0, 0, 4, 0, 0, 13]
4 is a peak of support 1 and 2 because in the subsequences
>>> [0, 4, 0]
>>> [0, 0, 4, 0, 0]
4 is still the highest value. Here, 4 is not a peak of support 3 because 13 is the 3th neighbour to the \
right of 4 and its bigger than 4.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param n: the support of the peak
:type n: int
:return: the value of this feature
:rtype: float
"""
if n is None:
n = 5
peaks = feature_calculators.number_peaks(x, n)
logging.debug("agg linear trend by tsfresh calculated")
return peaks
def agg_linear_trend(self, x, param=None):
"""
As in tsfresh `agg_inear_trend <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\
feature_extraction/feature_calculators.py#L1727>`_
Calculates a linear least-squares regression for values of the time series that were aggregated over chunks\
versus the sequence from 0 up to the number of chunks minus one.
This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model.
The parameters attr controls which of the characteristics are returned. Possible extracted attributes are\
"pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of linregress for more \
information.
The chunksize is regulated by "chunk_len". It specifies how many time series values are in each chunk.
Further, the aggregation function is controlled by "f_agg", which can use "max", "min" or , "mean", "median"
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param param: contains dictionaries {"attr": x, "chunk_len": l, "f_agg": f} with x, f a str and l an int
:type param: list
:return: the different feature values
:rtype: pandas.Series
"""
if param is None:
param = [{'attr': 'intercept', 'chunk_len': 5, 'f_agg': 'min'},
{'attr': 'rvalue', 'chunk_len': 10, 'f_agg': 'var'},
{'attr': 'intercept', 'chunk_len': 10, 'f_agg': 'min'}]
agg = feature_calculators.agg_linear_trend(x, param)
logging.debug("agg linear trend by tsfresh calculated")
return list(agg)
def spkt_welch_density(self, x, param=None):
"""
As in tsfresh `spkt_welch_density <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\
feature_extraction/feature_calculators.py#L1162>`_ . This feature calculator estimates the cross power \
spectral density of the time series x at different frequencies. To do so, the time series is first shifted \
from the time domain to the frequency domain. \
The feature calculators returns the power spectrum of the different frequencies.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param param: contains dictionaries {"coeff": x} with x int
:type param: list
:return: the different feature values
:rtype: pandas.Series
"""
if param is None:
param = [{'coeff': 2}, {'coeff': 5}, {'coeff': 8}]
welch = feature_calculators.spkt_welch_density(x, param)
logging.debug("spkt welch density by tsfresh calculated")
return list(welch)
def percentage_of_reoccurring_datapoints_to_all_datapoints(self, x):
"""
As in tsfresh `percentage_of_reoccurring_datapoints_to_all_datapoints <https://github.com/blue-yonder/tsfresh/\
blob/master/tsfresh/feature_extraction/feature_calculators.py#L739>`_ \
Returns the percentage of unique values, that are present in the time series more than once.\
len(different values occurring more than once) / len(different values)\
This means the percentage is normalized to the number of unique values, in contrast to the \
percentage_of_reoccurring_values_to_all_values.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:rtype: float
"""
_perc = feature_calculators.percentage_of_reoccurring_datapoints_to_all_datapoints(x)
logging.debug("percentage of reoccurring datapoints to all datapoints by tsfresh calculated")
return _perc
def abs_energy(self, x):
"""
As in tsfresh `abs_energy <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\
feature_calculators.py#L390>`_ \
Returns the absolute energy of the time series which is the sum over the squared values\
.. math::
E=\\sum_{i=1,\ldots, n}x_i^2
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:rtype: float
"""
_energy = feature_calculators.abs_energy(x)
logging.debug("abs energy by tsfresh calculated")
return _energy
def fft_aggregated(self, x, param=None):
"""
As in tsfresh `fft_aggregated <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\
feature_calculators.py#L896>`_
Returns the spectral centroid (mean), variance, skew, and kurtosis of the absolute fourier transform spectrum.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param param: contains dictionaries {"aggtype": s} where s str and in ["centroid", "variance",
"skew", "kurtosis"]
:type param: list
:return: the different feature values
:rtype: pandas.Series
"""
if param is None:
param = [{'aggtype': 'centroid'}]
_fft_agg = feature_calculators.fft_aggregated(x, param)
logging.debug("fft aggregated by tsfresh calculated")
return list(_fft_agg)
def fft_coefficient(self, x, param=None):
"""
As in tsfresh `fft_coefficient <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\
feature_calculators.py#L852>`_ \
Calculates the fourier coefficients of the one-dimensional discrete Fourier Transform for real input by fast \
fourier transformation algorithm
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp \\left \\{ -2 \\pi i \\frac{m k}{n} \\right \\}, \\qquad k = 0, \\ldots , n-1.
The resulting coefficients will be complex, this feature calculator can return the real part (attr=="real"), \
the imaginary part (attr=="imag), the absolute value (attr=""abs) and the angle in degrees (attr=="angle).
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param param: contains dictionaries {"coeff": x, "attr": s} with x int and x >= 0, s str and in ["real", "imag"\
, "abs", "angle"]
:type param: list
:return: the different feature values
:rtype: pandas.Series
"""
if param is None:
param = [{'attr': 'abs', 'coeff': 44}, {'attr': 'abs', 'coeff': 63}, {'attr': 'abs', 'coeff': 0},
{'attr': 'real', 'coeff': 0}, {'attr': 'real', 'coeff': 23}]
_fft_coef = feature_calculators.fft_coefficient(x, param)
logging.debug("fft coefficient by tsfresh calculated")
return list(_fft_coef)
def sum_values(self, x):
"""
Calculates the sum over the time series values
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:rtype: bool
"""
if len(x) == 0:
return 0
return np.sum(x)
def dc_remove_signal(self, data_frame):
"""
Removes the dc component of the signal as per :cite:`Kassavetis2015`
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return: the data frame with dc remove signal field
:rtype: pandas.DataFrame
"""
mean_signal = np.mean(data_frame.mag_sum_acc)
data_frame['dc_mag_sum_acc'] = data_frame.mag_sum_acc - mean_signal
logging.debug("dc remove signal")
return data_frame
def bradykinesia(self, data_frame, method='fft'):
"""
This method calculates the bradykinesia amplitude of the data frame. It accepts two different methods, \
'fft' and 'welch'. First the signal gets re-sampled, dc removed and then high pass filtered.
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:param method: fft or welch.
:type method: str
:return ampl: the amplitude of the Bradykinesia
:rtype ampl: float
:return freq: the frequency of the Bradykinesia
:rtype freq: float
"""
try:
data_frame_resampled = self.resample_signal(data_frame)
data_frame_dc = self.dc_remove_signal(data_frame_resampled)
data_frame_filtered = self.filter_signal(data_frame_dc, 'dc_mag_sum_acc')
if method == 'fft':
data_frame_fft = self.fft_signal(data_frame_filtered)
return self.amplitude_by_fft(data_frame_fft)
else:
return self.amplitude_by_welch(data_frame_filtered)
except ValueError as verr:
logging.error("TremorProcessor bradykinesia ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on TemorProcessor bradykinesia: %s", sys.exc_info()[0])
def amplitude(self, data_frame, method='fft'):
"""
This method calculates the tremor amplitude of the data frame. It accepts two different methods, \
'fft' and 'welch'. First the signal gets re-sampled and then high pass filtered.
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:param method: fft or welch
:type method: str
:return ampl: the amplitude of the Tremor
:rtype ampl: float
:return freq: the frequency of the Tremor
:rtype freq: float
"""
try:
data_frame_resampled = self.resample_signal(data_frame)
data_frame_filtered = self.filter_signal(data_frame_resampled)
if method == 'fft':
data_frame_fft = self.fft_signal(data_frame_filtered)
return self.amplitude_by_fft(data_frame_fft)
else:
return self.amplitude_by_welch(data_frame_filtered)
except ValueError as verr:
logging.error("TremorProcessor ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on TremorProcessor process: %s", sys.exc_info()[0])
def extract_features(self, data_frame, pre=''):
"""
This method extracts all the features available to the Tremor Processor class.
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return: amplitude_by_fft, frequency_by_fft, amplitude_by_welch, frequency_by_fft, bradykinesia_amplitude_by_fft, \
bradykinesia_frequency_by_fft, bradykinesia_amplitude_by_welch, bradykinesia_frequency_by_welch, \
magnitude_approximate_entropy, magnitude_autocorrelation_lag_8, magnitude_autocorrelation_lag_9, \
magnitude_partial_autocorrelation_lag_3, magnitude_partial_autocorrelation_lag_5, \
magnitude_partial_autocorrelation_lag_6, magnitude_minimum, magnitude_mean, \
magnitude_ratio_value_number_to_time_series_length, magnitude_change_quantiles, magnitude_number_peaks, \
magnitude_agg_linear_trend_min_chunk_len_5_attr_intercept, \
magnitude_agg_linear_trend_var_chunk_len_10_attr_rvalue, \
magnitude_agg_linear_trend_min_chunk_len_10_attr_intercept, \
magnitude_spkt_welch_density_coeff_2, magnitude_spkt_welch_density_coeff_5, \
magnitude_spkt_welch_density_coeff_8, magnitude_percentage_of_reoccurring_datapoints_to_all_datapoints, \
magnitude_abs_energy, magnitude_fft_aggregated_centroid, magnitude_fft_aggregated_centroid, \
magnitude_fft_coefficient_abs_coeff_44, magnitude_fft_coefficient_abs_coeff_63, \
magnitude_fft_coefficient_abs_coeff_0, magnitude_fft_coefficient_real_coeff_0, \
magnitude_fft_coefficient_real_coeff_23, magnitude_sum_values
:rtype: list
"""
try:
magnitude_partial_autocorrelation = self.partial_autocorrelation(data_frame.mag_sum_acc)
magnitude_agg_linear = self.agg_linear_trend(data_frame.mag_sum_acc)
magnitude_spkt_welch_density = self.spkt_welch_density(data_frame.mag_sum_acc)
magnitude_fft_coefficient = self.fft_coefficient(data_frame.mag_sum_acc)
try:
magnitutde_approximate_entropy = self.approximate_entropy(data_frame.mag_sum_acc)
except MemoryError as error:
magnitutde_approximate_entropy = 0
logging.error("Failed to allocate memory, setting to zero and skipping approximate entropy calculation.")
return {pre+'amplitude_by_fft': self.amplitude(data_frame)[0],
pre+'frequency_by_fft': self.amplitude(data_frame)[1],
pre+'amplitude_by_welch': self.amplitude(data_frame, 'welch')[0],
pre+'frequency_by_welch': self.amplitude(data_frame, 'welch')[1],
pre+'bradykinesia_amplitude_by_fft': self.bradykinesia(data_frame)[0],
pre+'bradykinesia_frequency_by_fft': self.bradykinesia(data_frame)[1],
pre+'bradykinesia_amplitude_by_welch': self.bradykinesia(data_frame, 'welch')[0],
pre+'bradykinesia_frequency_by_welch': self.bradykinesia(data_frame, 'welch')[1],
pre+'magnitude_approximate_entropy': magnitutde_approximate_entropy,
pre+'magnitude_autocorrelation_lag_8': self.autocorrelation(data_frame.mag_sum_acc, 8),
pre+'magnitude_autocorrelation_lag_9': self.autocorrelation(data_frame.mag_sum_acc, 9),
pre+'magnitude_partial_autocorrelation_lag_3': magnitude_partial_autocorrelation[0][1],
pre+'magnitude_partial_autocorrelation_lag_5': magnitude_partial_autocorrelation[1][1],
pre+'magnitude_partial_autocorrelation_lag_6': magnitude_partial_autocorrelation[2][1],
pre+'magnitude_minimum': self.minimum(data_frame.mag_sum_acc),
pre+'magnitude_mean': self.mean(data_frame.mag_sum_acc),
pre+'magnitude_ratio_value_number_to_time_series_length':
self.ratio_value_number_to_time_series_length(data_frame.mag_sum_acc),
pre+'magnitude_change_quantiles': self.change_quantiles(data_frame.mag_sum_acc),
pre+'magnitude_number_peaks': self.number_peaks(data_frame.mag_sum_acc),
pre+'magnitude_agg_linear_trend_min_chunk_len_5_attr_intercept': magnitude_agg_linear[0][1],
pre+'magnitude_agg_linear_trend_var_chunk_len_10_attr_rvalue': magnitude_agg_linear[1][1],
pre+'magnitude_agg_linear_trend_min_chunk_len_10_attr_intercept': magnitude_agg_linear[2][1],
pre+'magnitude_spkt_welch_density_coeff_2': magnitude_spkt_welch_density[0][1],
pre+'magnitude_spkt_welch_density_coeff_5': magnitude_spkt_welch_density[1][1],
pre+'magnitude_spkt_welch_density_coeff_8': magnitude_spkt_welch_density[2][1],
pre+'magnitude_percentage_of_reoccurring_datapoints_to_all_datapoints':
self.percentage_of_reoccurring_datapoints_to_all_datapoints(data_frame.mag_sum_acc),
pre+'magnitude_abs_energy': self.abs_energy(data_frame.mag_sum_acc),
pre+'magnitude_fft_aggregated_centroid': self.fft_aggregated(data_frame.mag_sum_acc)[0][1],
pre+'magnitude_fft_coefficient_abs_coeff_44': magnitude_fft_coefficient[0][1],
pre+'magnitude_fft_coefficient_abs_coeff_63': magnitude_fft_coefficient[1][1],
pre+'magnitude_fft_coefficient_abs_coeff_0': magnitude_fft_coefficient[2][1],
pre+'magnitude_fft_coefficient_real_coeff_0': magnitude_fft_coefficient[3][1],
pre+'magnitude_fft_coefficient_real_coeff_23': magnitude_fft_coefficient[4][1],
pre+'magnitude_sum_values': self.sum_values(data_frame.mag_sum_acc)}
except:
logging.error("Error on TremorProcessor process, extract features: %s", sys.exc_info()[0])
| 2.8125 | 3 |
src/flickr/boundingbox.py | dballesteros7/master-thesis-2015 | 0 | 12771502 | class BoundingBox:
def __init__(self, top: float, right: float, bottom: float, left: float):
self.top = top
self.right = right
self.bottom = bottom
self.left = left
def to_flickr_bounding_box(self):
return '{self.left}, {self.bottom}, {self.right}, {self.top}'.format(self=self)
| 3.015625 | 3 |
examples/chalicelib/blueprints/authed.py | cuenca-mx/agave | 3 | 12771503 | <reponame>cuenca-mx/agave
from functools import wraps
from typing import Callable
from chalice import Blueprint
class AuthedBlueprint(Blueprint):
"""
This dummy class is an example of Authentication/Authorization blueprint.
"""
def route(self, path: str, **kwargs):
"""
Builds route decorator with custom authentication.
It is only a function wrapper for `Blueprint._register_handler` methods
For this example we do not validate any credentials but
your authentication logic could be implemented here.
:param path:
:param kwargs:
:return:
"""
def decorator(user_handler: Callable):
@wraps(user_handler)
def authed_handler(*args, **kwargs):
# your authentication logic goes here
# before execute `user_handler` function.
self.current_request.user_id = 'US123456789'
return user_handler(*args, **kwargs)
self._register_handler( # type: ignore
'route',
user_handler.__name__,
authed_handler,
authed_handler,
dict(path=path, kwargs=kwargs),
)
return decorator
def user_id_filter_required(self):
"""
It overrides `RestApiBlueprint.user_id_filter_required()` method.
This method is required to be implemented with your own business logic.
You have to determine when `user_id` filter is required. For example:
- `Account`s created by one user should not be queryable/retrievable
by others users. In that case return `True`.
- "Admin" users are allowed to query/retrieve any `Account` from any
user. In that case return `False`.
For testing purpose we return `False` as default behavior.
But if we need to change it to `True` in tests we could monkey patch it
when needed.
:return:
"""
return False
| 2.875 | 3 |
pyseq/objstage.py | chaichontat/PySeq2500 | 9 | 12771504 | <filename>pyseq/objstage.py
#!/usr/bin/python
"""Illumina HiSeq2500 :: Objective Stage
Uses commands found on `hackteria
<www.hackteria.org/wiki/HiSeq2000_-_Next_Level_Hacking>`_
The objective can move between steps 0 and 65535, where step 0 is
the closest to the stage. Each objective stage step is about 4 nm.
**Examples:**
.. code-block:: python
#Create an objective stage objective
import pyseq
fpga = pyseq.fpga.FPGA('COM12','COM15')
fpga.initialize()
obj = pyseq.objstage.OBJstage(fpga)
#Initialize the objective stage
obj.initialize()
# Change objective velocity to 1 mm/s and move to step 5000
obj.set_velocity(1)
obj.move(5000)
"""
import time
from math import ceil, floor
class OBJstage():
"""HiSeq 2500 System :: Objective Stage
**Attributes:**
- spum (int): The number of objective steps per micron.
- v (float): The velocity the objective will move at in mm/s.
- position (int): The absolute position of the objective in steps.
- min_z (int): Minimum obj stage step position.
- max_z (int): Maximum obj stage step position.
- min_v (int): Minimum velocity in mm/s.
- max_v (int): Maximum velocity in mm/s.
- focus_spacing: Distance in microns between frames in an objective stack
- focus_velocity (float): Velocity used for objective stack
- focus_frames (int): Number of camera frames used for objective stack
- focus_range (float): Percent of total objective range used for objective stack
- focus_start (int): Initial step for objective stack.
- focus_stop (int): Final step for objective stack.
- focus_rough (int): Position used for imaging when focus position is
not known.
- logger (logger): Logger used for messaging.
"""
def __init__(self, fpga, logger = None):
"""The constructor for the objective stage.
**Parameters:**
- fpga (fpga object): The Illumina HiSeq 2500 System :: FPGA.
- logger (log, optional): The log file to write communication with
the objective stage to.
**Returns:**
- objective stage object: A objective stage object to control the
position of the objective.
"""
self.fpga = fpga
self.min_z = 0
self.max_z = 65535
self.spum = 262 #steps per um
self.max_v = 5 #mm/s
self.min_v = 0.1 #mm/s
self.v = None #mm/s
self.suffix = '\n'
self.position = None
self.logger = logger
self.focus_spacing = 0.5 # distance in microns between frames in obj stack
self.focus_velocity = 0.1 #mm/s
self.focus_frames = 200 # number of total camera frames for obj stack
self.focus_range = 90 #%
self.focus_start = 2000 # focus start step
self.focus_stop = 62000 # focus stop step
self.focus_rough = int((self.max_z - self.min_z)/2 + self.min_z)
self.timeout = 100
def initialize(self):
"""Initialize the objective stage."""
# Update the position of the objective
self.position = self.check_position()
#Set velocity to 5 mm/s
self.set_velocity(5)
def command(self, text):
"""Send a command to the objective stage and return the response.
**Parameters:**
- text (str): A command to send to the objective stage.
**Returns:**
- str: The response from the objective stage.
"""
response = self.fpga.command(text,'OBJstage')
# text = text + self.suffix
# self.serial_port.write(text)
# self.serial_port.flush()
# response = self.serial_port.readline()
# if self.logger is not None:
# self.logger.info('OBJstage::txmt::'+text)
# self.logger.info('OBJstage::rcvd::'+response)
return response
def move(self, position):
"""Move the objective to an absolute step position.
The objective can move between steps 0 and 65535, where step 0 is
the closest to the stage. If the position is out of range, the
objective will not move and a warning message is printed.
**Parameters:**
- position (int): The step position to move the objective to.
"""
if self.min_z <= position <= self.max_z:
try:
position = int(position)
start = time.time()
while self.check_position() != position:
response = self.command('ZMV ' + str(position)) # Move Objective
if (time.time() - start) > self.timeout:
self.check_position()
break
except:
self.check_position()
self.write_log('ERROR::Could not move objective')
else:
self.write_log('ERROR::Objective position out of range')
def check_position(self):
"""Return the absolute step position of the objective.
The objective can move between steps 0 and 65535, where step 0 is
the closest to the stage. If the position of the objective can't be
read, None is returned.
**Returns:**
- int: The absolution position of the objective steps.
"""
try:
response = self.command('ZDACR') # Read position
position = response.split(' ')[1]
position = int(position[0:-1])
self.position = position
except:
self.write_log('WARNING:: Could not read objective position')
position = None
while response.strip() != '':
response = self.fpga.serial_port.readline()
return position
def set_velocity(self, v):
"""Set the velocity of the objective.
The maximum objective velocity is 5 mm/s. If the objective velocity
is not in range, the velocity is not set and an error message is
printed.
**Parameters:**
- v (float): The velocity for the objective to move at in mm/s.
"""
if self.min_v <= v <= self.max_v:
self.v = v
# convert mm/s to steps/s
v = int(v * 1288471) #steps/mm
self.command('ZSTEP ' + str(v)) # Set velocity
else:
self.write_log('ERROR::Objective velocity out of range')
def set_focus_trigger(self, position):
"""Set trigger for an objective stack to determine focus position.
**Parameters:**
- position (int): Step position to start imaging.
**Returns:**
- int: Current step position of the objective.
"""
self.command('ZTRG ' + str(position))
self.command('ZYT 0 3')
return self.check_position()
def update_focus_limits(self, cam_interval=0.040202, range=90, spacing=4.1):
"""Update objective velocity and start/stop positions for focusing.
**Parameters:**
- cam_interval (float): Camera frame interval in seconds per frame
- range(float): Percent of total objective range to use for focusing
- spacing (float): Distance between objective stack frames in microns.
**Returns:**
- bool: True if all values are acceptable.
"""
# Calculate velocity needed to space out frames
velocity = spacing/cam_interval/1000 #mm/s
if self.min_v > velocity:
spacing = self.min_v*1000*cam_interval
velocity = self.min_v
print('Spacing too small, changing to ', spacing)
elif self.max_v < velocity:
spacing = self.max_v*1000*cam_interval
velocity = self.max_v
print('Spacing too large, changing to ', spacing)
self.focus_spacing = spacing
self.focus_velocity = velocity
spf = spacing*self.spum # steps per frame
# Update focus range, ie start and stop step positions
if 1 <= range <= 100:
self.focus_range = range
range_step = int(range/100*(self.max_z-self.min_z)/2)
self.focus_stop = self.focus_rough+range_step
self.focus_start = self.focus_rough-range_step
self.focus_frames = ceil((self.focus_stop-self.focus_start)/spf)
self.focus_frames += 100
acceptable = True
else:
acceptable = False
return acceptable
def write_log(self, text):
"""Write messages to the log."""
if self.logger is None:
print('OBJstage::'+text)
else:
self.logger.info('OBJstage::'+text)
| 2.796875 | 3 |
dl/_utils.py | jjjkkkjjj/pytorch.dl | 2 | 12771505 | import os, cv2
import torch
from torch import nn
import numpy as np
def weights_path(_file_, _root_num, dirname):
basepath = os.path.dirname(_file_)
backs = [".."]*_root_num
model_dir = os.path.abspath(os.path.join(basepath, *backs, dirname))
return model_dir
def _check_ins(name, val, cls, allow_none=False, default=None):
if allow_none and val is None:
return default
if not isinstance(val, cls):
err = 'Argument \'{}\' must be {}, but got {}'
if isinstance(cls, (tuple, list)):
types = [c.__name__ for c in cls]
err = err.format(name, types, type(val).__name__)
raise ValueError(err)
else:
err = err.format(name, cls.__name__, type(val).__name__)
raise ValueError(err)
return val
def _check_retval(funcname, val, cls):
if not isinstance(val, cls):
err = '\'{}\' must return {}, but got {}'
if isinstance(cls, (tuple, list)):
types = [c.__name__ for c in cls]
err = err.format(funcname, types, type(val).__name__)
raise ValueError(err)
else:
err = err.format(funcname, cls.__name__, type(val).__name__)
raise ValueError(err)
return val
def _check_norm(name, val):
if isinstance(val, (float, int)):
val = torch.tensor([float(val)], requires_grad=False)
elif isinstance(val, (list, tuple)):
val = torch.tensor(val, requires_grad=False).float()
elif not isinstance(val, torch.Tensor):
raise ValueError('{} must be int, float, list, tuple, Tensor, but got {}'.format(name, type(val).__name__))
return val
def _initialize_xavier_uniform(layers):
from .models.layers import ConvRelu
for module in layers.modules():
if isinstance(module, nn.Conv2d):
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, ConvRelu):
nn.init.xavier_uniform_(module.conv.weight)
if module.conv.bias is not None:
nn.init.constant_(module.conv.bias, 0)
def _get_model_url(name):
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
return model_urls[name]
def _check_image(image, device, size=None):
"""
:param image: ndarray or Tensor of list or tuple, or ndarray, or Tensor. Note that each type will be handled as;
ndarray of list or tuple, ndarray: (?, h, w, c). channel order will be handled as RGB
Tensor of list or tuple, Tensor: (?, c, h, w). channel order will be handled as RGB
:param device: torch.device
:param size: None or tuple, if None is passed, check will not be done
Note that size = (w, h)
:return:
img: Tensor, shape = (b, c, h, w)
orig_imgs: list of Tensor, shape = (c, h, w) these images may be used for visualization
"""
orig_imgs = []
def __check(_tim, _cim, cfirst):
"""
Note that 2d or 3d image is resizable
:param _tim: tensor, shape = (h, w, ?) or (?, h, w)
:param _cim: ndarray, shape = (h, w, ?) or (?, h, w)
:return:
tims: tensor, shape = (c, h, w)
cims: ndarray, shape = (h, w, c)
"""
#### check size of tensor ####
if size:
h, w = _tim.shape[-2:] if cfirst else _tim.shape[:2]
wcond = size[0] if size[0] is not None else w
hcond = size[1] if size[1] is not None else h
if not (h == hcond and w == wcond):
# do resize
if cfirst and _cim.ndim == 3:
# note that _cim's shape must be (c, h, w)
_cim = _cim.transpose((1, 2, 0))
# _cim's shape = (h, w, ?)
resized_cim = cv2.resize(_cim, (wcond, hcond))
return __check(torch.tensor(resized_cim, requires_grad=False), _cim, cfirst=False)
#### check tensor ####
assert isinstance(_tim, torch.Tensor)
if _tim.ndim == 2:
tim = _tim.unsqueeze(2)
elif _tim.ndim == 3:
tim = _tim
else:
raise ValueError('Invalid image found. image must be 2d or 3d, but got {}'.format(_tim.ndim))
if not cfirst:
# note that tim's shape must be (h, w, c)
tim = tim.permute((2, 0, 1))
#### check cvimg ####
assert isinstance(_cim, np.ndarray)
if _cim.ndim == 2:
cim = np.broadcast_to(np.expand_dims(_cim, 2), (_cim.shape[0], _cim.shape[1], 3)).copy()
elif _cim.ndim == 3:
cim = _cim
else:
raise ValueError('Invalid image found. image must be 2d or 3d, but got {}'.format(_cim.ndim))
if cfirst:
# note that cim's shape must be (c, h, w)
cim = cim.transpose((1, 2, 0))
return tim, cim
if isinstance(image, (list, tuple)):
img = []
for im in image:
if isinstance(im, np.ndarray):
tim = torch.tensor(im, requires_grad=False)
# im and tim's shape = (h, w, ?)
tim, cim = __check(tim, im, cfirst=False)
elif isinstance(im, torch.Tensor):
cim = im.cpu().numpy()
# im and tim's shape = (?, h, w)
tim, cim = __check(im, cim, cfirst=True)
else:
raise ValueError('Invalid image type. list or tuple\'s element must be ndarray, but got \'{}\''.format(type(im).__name__))
img += [tim]
orig_imgs += [cim]
# (b, c, h, w)
img = torch.stack(img)
elif isinstance(image, np.ndarray):
if image.ndim == 2:
tim, cim = __check(torch.tensor(image, requires_grad=False), image, cfirst=False)
img = tim.unsqueeze(0)
orig_imgs += [cim]
elif image.ndim == 3:
tim, cim = __check(torch.tensor(image, requires_grad=False), image, cfirst=False)
img = tim.unsqueeze(0)
orig_imgs += [cim]
elif image.ndim == 4:
img = []
for i in range(image.shape[0]):
tim, cim = __check(torch.tensor(image[i], requires_grad=False), image[i], cfirst=False)
img += [tim]
orig_imgs += [cim]
img = torch.stack(img)
else:
raise ValueError('Invalid image found. image must be from 2d to 4d, but got {}'.format(image.ndim))
elif isinstance(image, torch.Tensor):
if image.ndim == 2:
tim, cim = __check(image, image.cpu().numpy(), cfirst=True)
img = tim.unsqueeze(0)
orig_imgs += [cim]
elif image.ndim == 3:
tim, cim = __check(image, image.cpu().numpy(), cfirst=True)
img = tim.unsqueeze(0)
orig_imgs += [cim]
elif image.ndim == 4:
img = []
for i in range(image.shape[0]):
tim, cim = __check(image[i], image[i].cpu().numpy(), cfirst=True)
img += [tim]
orig_imgs += [cim]
img = torch.stack(img)
else:
raise ValueError('Invalid image found. image must be from 2d to 4d, but got {}'.format(image.ndim))
else:
raise ValueError('Invalid image type. list or tuple\'s element must be'
'\'list\', \'tuple\', \'ndarray\' or \'Tensor\', but got \'{}\''.format(type(image).__name__))
assert img.ndim == 4, "may forget checking..."
return img.to(device), orig_imgs
def _check_shape(desired_shape, input_shape):
"""
Note that desired_shape is allowed to have None, which means whatever input size is ok
:param desired_shape: array-like
:param input_shape: array-like
:return:
"""
if len(desired_shape) != len(input_shape):
raise ValueError("shape dim was not same, got {} and {}".format(len(desired_shape), len(input_shape)))
for i, (des_d, inp_d) in enumerate(zip(desired_shape, input_shape)):
if des_d is None:
continue
if des_d != inp_d:
raise ValueError('dim:{} is invalid size, desired one: {}, but got {}'.format(i, des_d, inp_d))
def _get_normed_and_origin_img(img, orig_imgs, rgb_means, rgb_stds, toNorm, device):
"""
:param img: Tensor, shape = (b, c, h, w)
:param orig_imgs: list of ndarray, shape = (h, w, c)
:param rgb_means: tuple or float
:param rgb_stds: tuple or float
:param toNorm: Bool
:param device: torch.device
:return:
normed_img: Tensor, shape = (b, c, h, w)
orig_img: Tensor, shape = (b, c, h, w). Order is rgb
"""
rgb_means = _check_norm('rgb_means', rgb_means)
rgb_stds = _check_norm('rgb_stds', rgb_stds)
img = img.to(device)
if toNorm:
# shape = (1, 3, 1, 1)
rgb_means = rgb_means.unsqueeze(0).unsqueeze(-1).unsqueeze(-1).to(device)
rgb_stds = rgb_stds.unsqueeze(0).unsqueeze(-1).unsqueeze(-1).to(device)
normed_img = (img / 255. - rgb_means) / rgb_stds
orig_imgs = orig_imgs
else:
normed_img = img
# shape = (1, 1, 3)
rgb_means = rgb_means.unsqueeze(0).unsqueeze(0).cpu().numpy()
rgb_stds = rgb_stds.unsqueeze(0).unsqueeze(0).cpu().numpy()
orig_imgs = [oim * rgb_stds + rgb_means for oim in orig_imgs]
return normed_img, orig_imgs | 2.328125 | 2 |
x7/view/tests/canvas.py | gribbg/x7-view | 0 | 12771506 | <gh_stars>0
import tkinter as tk
root = tk.Tk()
canvas = tk.Canvas(root, width=600, height=400)
canvas.pack()
canvas.create_line((0, 0, 600, 400), fill='blue')
def button():
print('Button pressed, calling "image %s"' % image)
# (self._w, 'scale') + args
canvas.tk.call(canvas._w, 'image', '-foo', '-bar', image)
image = tk.PhotoImage(master=root, name='canvas1', width=20, height=20)
b = tk.Button(root, image=image, command=button)
b.pack()
root.mainloop()
| 3.109375 | 3 |
app/helpers/markdown.py | mrakinola/simple-fastapi-blog | 0 | 12771507 | <filename>app/helpers/markdown.py
from fastapi import HTTPException, status
from markdown import markdown
from os.path import join
from typing import Text
def read_markdown(filename: str) -> dict[str, Text]:
path = join("app/page_content", filename)
try:
with open(path, "r", encoding="utf-8") as file_to_read:
simple_text = file_to_read.read()
except Exception as e:
file_without_markup = filename[:-3]
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Markdown page for {file_without_markup} not found",
)
converted_html = markdown(simple_text)
info = {"simple_text": converted_html}
return info
| 3.125 | 3 |
reaction.py | ricciolino/rs-framework | 0 | 12771508 | #!/usr/bin/python3.6
class Reaction:
# the sets of reactants, inhibitors and products of the reaction
name = None
reactants = set()
inhibitors = set()
products = set()
# the creation of a reaction is made through the called of a function in which all the controls are performed, so we can
# assume that reactants, inhibitors and products arrives to this initialization already as sets, and we can
# assume moreover that the checks of correctness of the reaction is already been done
def __init__(self,_name,_reactants,_inhibitors,_products):
self.name = _name
self.reactants = _reactants
self.inhibitors = _inhibitors
self.products = _products
# special method to print easily a reaction
def __str__(self):
# put in string the reactants
rappresentation = "({"
for r in self.reactants:
rappresentation += r + ','
rappresentation = rappresentation[:-1] # remove the ,
# put in string the inhibitors
rappresentation += "},{"
for i in self.inhibitors:
rappresentation += i + ','
rappresentation = rappresentation[:-1] # remove the ,
# put in string the products
rappresentation += "},{"
for p in self.products:
rappresentation += p + ','
rappresentation = rappresentation[:-1] # remove the ,
rappresentation += "})"
return 'reaction_' + self.name + ' = ' + rappresentation
# check if a reaction belong to a given nonempty set
def BelongTo(self,s):
return self.reactants.issubset(s) and self.inhibitors.issubset(s) and self.products.issubset(s)
# check if a reaction is enabled by a given nonempty set
def EnabledBy(self,t):
return self.reactants.issubset(t) and self.inhibitors.isdisjoint(t)
# special method to permit a list of reactions to map into a set
def __hash__(self):
return 0
# special method to check if two reactions are equals
def __eq__(self,other):
if isinstance(other,Reaction):
return self.reactants == other.reactants and self.products == other.products and self.inhibitors == other.inhibitors
return NotImplemented
| 4.0625 | 4 |
usaspending_api/references/models/overall_totals.py | g4brielvs/usaspending-api | 217 | 12771509 | from django.db import models
class OverallTotals(models.Model):
id = models.AutoField(primary_key=True)
create_date = models.DateTimeField(auto_now_add=True, blank=True, null=True)
update_date = models.DateTimeField(auto_now=True, null=True)
fiscal_year = models.IntegerField(blank=True, null=True)
total_budget_authority = models.DecimalField(max_digits=23, decimal_places=2, blank=True, null=True)
class Meta:
managed = True
db_table = "overall_totals"
| 1.921875 | 2 |
tests/test_conv.py | DuinoDu/backbone-neck | 0 | 12771510 | <filename>tests/test_conv.py
# -*- coding: utf-8 -*-
from .context import backbone_neck
import unittest
import sys
from backbone_neck.gluon.nn import conv
from backbone_neck.gluon.nn import ConvModule
import numpy as np
import mxnet as mx
def _input(h=128, w=128):
try:
import mxnet.ndarray as nd
except ImportError as e:
print('mxnet not install, exit')
sys.exit()
inputs = nd.random.randn(1, 3, h, w)
return inputs
class BasicConvSuite(unittest.TestCase):
"""Basic test cases."""
def test_conv(self):
cfg = dict(
type='Conv',
channels=32,
kernel_size=3,
padding=1,
use_bias=False)
net = backbone_neck.gluon.nn.conv.build_conv_layer(cfg)
net.initialize(ctx=[mx.cpu(0)])
x = _input(128, 128)
y = net(x)
x, y = x.asnumpy(), y.asnumpy()
self.assertEqual(y.shape[0], x.shape[0])
self.assertEqual(y.shape[1], 32)
self.assertEqual(y.shape[2], x.shape[2])
self.assertEqual(y.shape[3], x.shape[3])
def test_conv_deform(self):
cfg = dict(
type='DCN',
channels=32,
kernel_size=3,
padding=1,
use_bias=False)
net = backbone_neck.gluon.nn.conv.build_conv_layer(cfg)
net.initialize(ctx=[mx.cpu(0)])
x = _input(128, 128)
y = net(x)
x, y = x.asnumpy(), y.asnumpy()
self.assertEqual(y.shape[0], x.shape[0])
self.assertEqual(y.shape[1], 32)
self.assertEqual(y.shape[2], x.shape[2])
self.assertEqual(y.shape[3], x.shape[3])
def test_conv_oct(self):
cfg = dict(
type='OctConv',
channels=32,
kernel_size=3,
padding=1,
use_bias=False)
with self.assertRaises(NotImplementedError):
net = backbone_neck.gluon.nn.conv.build_conv_layer(cfg)
class BasicModuleSuite(unittest.TestCase):
"""Basic test cases."""
def test_conv(self):
conv_cfg = dict(
type='Conv',
channels=32,
kernel_size=3,
padding=1,
use_bias=False)
norm_cfg = dict(
type='BN')
act_cfg = dict(
type='Activation',
activation='relu')
order = ('conv', 'norm', 'act')
net = ConvModule(
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
order=order)
net.initialize(ctx=[mx.cpu(0)])
x = _input(128, 128)
y = net(x)
x, y = x.asnumpy(), y.asnumpy()
self.assertEqual(y.shape[0], x.shape[0])
self.assertEqual(y.shape[1], 32)
self.assertEqual(y.shape[2], x.shape[2])
self.assertEqual(y.shape[3], x.shape[3])
if __name__ == '__main__':
unittest.main()
| 2.359375 | 2 |
Validation/RecoB/test/validation_customJet_cfg.py | ckamtsikis/cmssw | 852 | 12771511 | from __future__ import print_function
# The following comments couldn't be translated into the new config version:
#! /bin/env cmsRun
import FWCore.ParameterSet.Config as cms
process = cms.Process("validation")
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing ('analysis')
# load the full reconstraction configuration, to make sure we're getting all needed dependencies
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
options.register ('jets',
"ak4PFJetsCHS", # default value, examples : "ak4PFJets", "ak4PFJetsCHS"
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"jet collection to use")
options.parseArguments()
whichJets = options.jets
applyJEC = True
corrLabel = "ak4PFCHS"
from Configuration.AlCa.GlobalTag import GlobalTag
tag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
useTrigger = False
triggerPath = "HLT_PFJet80_v*"
runOnMC = True
#Flavour plots for MC: "all" = plots for all jets ; "dusg" = plots for d, u, s, dus, g independently ; not mandatory and any combinations are possible
#b, c, light (dusg), non-identified (NI), PU jets plots are always produced
flavPlots = "allbcldusg"
###prints###
print("jet collcetion asked : ", whichJets)
print("JEC applied?", applyJEC, ", correction:", corrLabel)
print("trigger will be used ? : ", useTrigger, ", Trigger paths:", triggerPath)
print("is it MC ? : ", runOnMC, ", Flavours:", flavPlots)
print("Global Tag : ", tag.globaltag)
############
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load("JetMETCorrections.Configuration.JetCorrectors_cff")
process.load("CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi")
process.load("RecoJets.JetAssociationProducers.ak4JTA_cff")
process.load("RecoBTag.Configuration.RecoBTag_cff")
process.load("PhysicsTools.JetMCAlgos.HadronAndPartonSelector_cfi")
process.load("PhysicsTools.JetMCAlgos.AK4PFJetsMCFlavourInfos_cfi")
process.load("PhysicsTools.JetMCAlgos.CaloJetsMCFlavour_cfi")
process.load("PhysicsTools.PatAlgos.mcMatchLayer0.jetMatch_cfi")
process.JECseq = cms.Sequence(getattr(process,corrLabel+"L1FastL2L3CorrectorChain"))
newjetID=cms.InputTag(whichJets)
process.ak4JetFlavourInfos.jets = newjetID
process.ak4JetFlavourInfos.hadronFlavourHasPriority = cms.bool(True)
process.AK4byRef.jets = newjetID
if not "ak4PFJetsCHS" in whichJets:
process.ak4JetTracksAssociatorAtVertexPF.jets = newjetID
process.pfImpactParameterTagInfos.jets = newjetID
process.softPFMuonsTagInfos.jets = newjetID
process.softPFElectronsTagInfos.jets = newjetID
process.patJetGenJetMatch.src = newjetID
process.btagSequence = cms.Sequence(
process.ak4JetTracksAssociatorAtVertexPF *
process.btagging
)
process.jetSequences = cms.Sequence(process.goodOfflinePrimaryVertices * process.btagSequence)
###
print("inputTag : ", process.ak4JetTracksAssociatorAtVertexPF.jets)
###
if runOnMC:
process.flavourSeq = cms.Sequence(
process.selectedHadronsAndPartons *
process.ak4JetFlavourInfos
)
process.load("Validation.RecoB.bTagAnalysis_cfi")
process.bTagValidation.jetMCSrc = 'ak4JetFlavourInfos'
if "Calo" in whichJets:
process.bTagValidation.caloJetMCSrc = 'AK4byValAlgo'
process.bTagValidation.useOldFlavourTool = True
process.flavourSeq = cms.Sequence(
process.myPartons *
process.AK4Flavour
)
process.bTagValidation.applyPtHatWeight = False
process.bTagValidation.doJetID = True
process.bTagValidation.doJEC = applyJEC
process.bTagValidation.JECsourceMC = cms.InputTag(corrLabel+"L1FastL2L3Corrector")
process.bTagValidation.flavPlots = flavPlots
process.bTagHarvestMC.flavPlots = flavPlots
#process.bTagValidation.ptRecJetMin = cms.double(20.)
process.bTagValidation.genJetsMatched = cms.InputTag("patJetGenJetMatch")
process.bTagValidation.doPUid = cms.bool(True)
process.ak4GenJetsForPUid = cms.EDFilter("GenJetSelector",
src = cms.InputTag("ak4GenJets"),
cut = cms.string('pt > 8.'),
filter = cms.bool(False)
)
process.patJetGenJetMatch.matched = cms.InputTag("ak4GenJetsForPUid")
process.patJetGenJetMatch.maxDeltaR = cms.double(0.25)
process.patJetGenJetMatch.resolveAmbiguities = cms.bool(True)
else:
process.load("DQMOffline.RecoB.bTagAnalysisData_cfi")
process.bTagAnalysis.doJEC = applyJEC
process.bTagAnalysis.JECsourceData = cms.InputTag(corrLabel+"L1FastL2L3ResidualCorrector")
process.JECseq *= (getattr(process,corrLabel+"ResidualCorrector") * getattr(process,corrLabel+"L1FastL2L3ResidualCorrector"))
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring()
)
from HLTrigger.HLTfilters.hltHighLevel_cfi import *
if useTrigger:
process.bTagHLT = hltHighLevel.clone(TriggerResultsTag = "TriggerResults::HLT", HLTPaths = ["HLT_PFJet40_v*"])
process.bTagHLT.HLTPaths = [triggerPath]
if runOnMC:
process.dqmSeq = cms.Sequence(process.ak4GenJetsForPUid * process.patJetGenJetMatch * process.flavourSeq * process.bTagValidation * process.bTagHarvestMC * process.dqmSaver)
else:
process.dqmSeq = cms.Sequence(process.bTagAnalysis * process.bTagHarvest * process.dqmSaver)
if useTrigger:
process.plots = cms.Path(process.bTagHLT * process.JECseq * process.jetSequences * process.dqmSeq)
else:
process.plots = cms.Path(process.JECseq * process.jetSequences * process.dqmSeq)
process.dqmEnv.subSystemFolder = 'BTAG'
process.dqmSaver.producer = 'DQM'
process.dqmSaver.workflow = '/POG/BTAG/BJET'
process.dqmSaver.convention = 'Offline'
process.dqmSaver.saveByRun = cms.untracked.int32(-1)
process.dqmSaver.saveAtJobEnd =cms.untracked.bool(True)
process.dqmSaver.forceRunNumber = cms.untracked.int32(1)
process.PoolSource.fileNames = [
]
#keep the logging output to a nice level
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.GlobalTag = tag
| 1.6875 | 2 |
Tokenizer/tokenizer.py | lucapascarella/T5_Replication_Package | 3 | 12771512 | import sentencepiece as spm
import argparse
def main():
print("ciao")
parser = argparse.ArgumentParser()
parser.add_argument("-input", "--input", type=str, default="data/train.txt",
help="tokenizer input file")
parser.add_argument("-model_prefix", "--model_prefix", type=str, default="m",
help="prefix for the model")
parser.add_argument("-vocab_size", "--vocab_size", type=int, default=32000,
help="the size of the vocabulary")
parser.add_argument("-character_coverage", "--character_coverage", type=float, default=0.995,
help="amount of characters covered by the model, good defaults are: 0.9995 for languages with rich character set like Japanse or Chinese and 1.0 for other languages with small character set")
parser.add_argument("-bos_id", "--bos_id", type=int, default=-1,
help="begin of sentence id")
parser.add_argument("-eos_id", "--eos_id", type=int, default=1,
help="end of sentence id")
parser.add_argument("-unk_id", "--unk_id", type=int, default=2,
help="unknown id")
parser.add_argument("-pad_id", "--pad_id", type=int, default=0,
help="padding id")
args = parser.parse_args()
# spm.SentencePieceTrainer.train('--input=train_pretraining_clean.txt --model_prefix=dl4se --vocab_size=32000 --bos_id=-1 --eos_id=1 --unk_id=2 --pad_id=0')
spm.SentencePieceTrainer.train(input=args.input, model_prefix=args.model_prefix, vocab_size=args.vocab_size, character_coverage=args.character_coverage,
bos_id=args.bos_id, eos_id=args.eos_id, unk_id=args.unk_id, pad_id=args.pad_id)
if __name__=="__main__":
main()
| 3.03125 | 3 |
research/tests/models/test_subject.py | ZviBaratz/pylabber | 3 | 12771513 | import pandas as pd
from datetime import date, timedelta
from django.conf import settings
from django.core.exceptions import ValidationError
from django.test import TestCase
from research.models.choices import DominantHand, Sex, Gender
from ..factories import SubjectFactory
class SubjectModelTestCase(TestCase):
def setUp(self):
self.test_subject = SubjectFactory()
self.test_subject.save()
df = pd.read_excel(
settings.RAW_SUBJECT_TABLE_PATH,
sheet_name="Subjects",
header=[0, 1],
index_col=0,
)
subject_details = {
("Anonymized", "Patient ID"): "ABC123",
("Anonymized", "First Name"): "Noam",
("Anonymized", "Last Name"): "Aharony",
("Raw", "Patient ID"): "11111",
("Raw", "First Name"): "Name",
("Raw", "Last Name"): "Last",
}
for item in subject_details:
df[item].iloc[0] = subject_details[item]
def test_not_future_birthdate_validator(self):
self.test_subject.date_of_birth = date.today() + timedelta(days=1)
with self.assertRaises(ValidationError):
self.test_subject.full_clean()
def test_null_char_field(self):
subject_one = SubjectFactory(id_number=None)
subject_one.save()
subject_two = SubjectFactory(id_number=None)
subject_two.save()
self.assertIsNone(subject_one.id_number)
self.assertIsNone(subject_two.id_number)
def test_dominant_hand_choices(self):
for choice in DominantHand:
self.test_subject.dominant_hand = choice.name
try:
self.test_subject.full_clean()
except ValidationError:
self.fail(f"Failed to set dominant hand to {choice.value}")
def test_invalid_dominant_hand_choice(self):
self.test_subject.dominant_hand = "Right"
with self.assertRaises(ValidationError):
self.test_subject.full_clean()
def test_sex_choices(self):
for choice in Sex:
self.test_subject.sex = choice.name
try:
self.test_subject.full_clean()
except ValidationError:
self.fail(f"Failed to set sex to {choice.value}")
def test_invalid_sex_choice(self):
self.test_subject.sex = "Z"
with self.assertRaises(ValidationError):
self.test_subject.full_clean()
def test_gender_choices(self):
for choice in Gender:
self.test_subject.gender = choice.name
try:
self.test_subject.full_clean()
except ValidationError:
self.fail(f"Failed to set gender to {choice.value}")
def test_invalid_gender_choice(self):
self.test_subject.gender = "Z"
with self.assertRaises(ValidationError):
self.test_subject.full_clean()
def test_get_full_name(self):
s = self.test_subject
expected = f"{s.first_name} {s.last_name}"
self.assertEqual(self.test_subject.get_full_name(), expected)
def test_str(self):
subject_id = self.test_subject.id
expected = f"Subject #{subject_id}"
self.assertEqual(str(self.test_subject), expected)
def test_get_personal_information(self):
# @TODO: Finish the personal information test.
# result = self.test_subject.get_personal_information()
# result = result[[item for item in ]]
# excpected = {
# ("Anonymized", "Patient ID"): "ABC123",
# ("Anonymized", "First Name"): "Noam",
# ("Anonymized", "Last Name"): "Aharony",
# ("Raw", "Patient ID"): "11111",
# ("Raw", "First Name"): "Name",
# ("Raw", "Last Name"): "Last",
# }
pass
def test_get_raw_information(self):
pass
def test_get_questionnaire_data(self):
pass
| 2.4375 | 2 |
find_duplicateVocab.py | PhilippPede/StudyApp | 0 | 12771514 | import pandas as pd
filename = "dictionary.csv"
df_vocab = pd.read_csv(filename)
print("Duplicates:")
df_duplicated = df_vocab[df_vocab[["Chinese", "PinYin"]].duplicated(keep=False)][["Chinese", "PinYin"]]
if df_duplicated.shape[0] == 0:
print("===== [OK] No duplicates found =====")
else:
print(df_duplicated)
| 3.65625 | 4 |
WEB/login/urls.py | JoseCarlosPa/CECEQ-GO | 1 | 12771515 | <reponame>JoseCarlosPa/CECEQ-GO<filename>WEB/login/urls.py
# Importar la libreria para URLS
from django.urls import path
# Uso del "punto" para referenciar esta libreria e importar el views
from . import views
urlpatterns = [
# primero se pondra el nombre de la ruta a llama
# Se pone el nombre views seguido de un nombre que se le dara
# Despues se pondra el nombre en este caso login
path('', views.login, name='login'),
path('logout/', views.logout, name='logout'),
] | 2.15625 | 2 |
scrapi/harvesters/opensiuc.py | wearpants/scrapi | 34 | 12771516 | """
Harvester for the OpenSIUC API at Southern Illinois University for the SHARE project
More information available here:
https://github.com/CenterForOpenScience/SHARE/blob/master/providers/edu.siu.opensiuc.md
An example API call: http://opensiuc.lib.siu.edu/do/oai/?verb=ListRecords&metadataPrefix=oai_dc&from=2014-10-09T00:00:00Z
"""
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class OpenSIUCHarvester(OAIHarvester):
short_name = 'opensiuc'
long_name = 'OpenSIUC at the Southern Illinois University Carbondale'
url = 'http://opensiuc.lib.siu.edu/'
base_url = 'http://opensiuc.lib.siu.edu/do/oai/'
property_list = [
'type', 'source', 'format',
'identifier', 'date', 'setSpec'
]
approved_sets = [
'ad_pubs',
'agecon_articles',
'agecon_wp',
'anat_pubs',
'anthro_pubs',
'arch_videos',
'asfn_articles',
'auto_pres',
'ccj_articles',
'cee_pubs',
'chem_mdata',
'chem_pubs',
'cs_pubs',
'cs_sp',
'cwrl_fr',
'dh_articles',
'dh_pres',
'dh_works',
'dissertations',
'ebl',
'ece_articles',
'ece_books',
'ece_confs',
'ece_tr',
'econ_dp',
'econ_pres',
'epse_books',
'epse_confs',
'epse_pubs',
'esh_2014',
'fiaq_pubs',
'fiaq_reports',
'fin_pubs',
'fin_wp',
'for_articles',
'geol_comp',
'geol_pubs',
'gers_pubs',
'gmrc_gc',
'gmrc_nm',
'gs_rp',
'hist_pubs',
'histcw_pp',
'igert_cache',
'igert_reports',
'ijshs_2014',
'im_pubs',
'jcwre',
'kaleidoscope',
'math_aids',
'math_articles',
'math_books',
'math_diss',
'math_grp',
'math_misc',
'math_theses',
'meded_books',
'meded_confs',
'meded_pubs',
'meep_articles',
'micro_pres',
'micro_pubs',
'morris_articles',
'morris_confs',
'morris_surveys',
'music_gradworks',
'ojwed',
'pb_pubs',
'pb_reports',
'phe_pres',
'phe_pubs',
'phys_pubs',
'phys_vids',
'pn_wp',
'pnconfs_2010',
'pnconfs_2011',
'pnconfs_2012',
'ppi_papers',
'ppi_sipolls',
'ppi_statepolls',
'ps_confs',
'ps_dr',
'ps_pubs',
'ps_wp',
'psas_articles',
'psych_diss',
'psych_grp',
'psych_pubs',
'psych_theses',
'reach_posters',
'rehab_pubs',
'safmusiccharts_faculty',
'safmusiccharts_students',
'safmusicpapers_faculty',
'safmusicpapers_students',
'srs_2009',
'theses',
'ucowrconfs_2003',
'ucowrconfs_2004',
'ucowrconfs_2005',
'ucowrconfs_2006',
'ucowrconfs_2007',
'ucowrconfs_2008',
'ucowrconfs_2009',
'ugr_mcnair',
'wed_diss',
'wed_grp',
'wed_theses',
'wrd2011_keynote',
'wrd2011_pres',
'zool_data',
'zool_diss',
'zool_pubs'
]
| 1.890625 | 2 |
app/routes.py | AbdManian/WebDigiLabel | 0 | 12771517 | from flask import render_template, request
from app import app
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
info = dict(title='DigiLabel')
files = request.files.getlist("file")
for file in files:
print("Content: ", file.filename)
return render_template('index.html', **info)
| 2.671875 | 3 |
bin/find_genes.py | Mxrcon/Savio_alignments_nf | 0 | 12771518 | <reponame>Mxrcon/Savio_alignments_nf
#!/usr/bin/env python3
from Bio import SeqIO
import sys
import os
#basic_args
input_file = sys.argv[1]
gene_name = sys.argv[2]
#script
output_name = input_file.split(".")[0]
for seq_record in SeqIO.parse(input_file , 'genbank'):
for feature in seq_record.features:
if feature.type == "CDS" and "gene" in feature.qualifiers:
gene = feature.qualifiers['gene'][0]
if gene_name == gene:
with open(output_name+"_"+gene_name+".fasta", "w") as outfile:
outfile.write(">{0}|{1}\n{2}\n".format(output_name,gene_name,feature.location.extract(seq_record).seq))
| 2.71875 | 3 |
fasp/scripts/FASPScript14.py | STRIDES-Codes/Sample-search-based-on-clinical-phenotypic-and-sample-attributes | 4 | 12771519 | ''' Query Search SRA tables for 1K Genomes data, access files via SRA DRS ids'''
# IMPORTS
import sys
from fasp.search import DiscoverySearchClient
def main(argv):
searchClient = DiscoverySearchClient('https://ga4gh-search-adapter-presto-public.prod.dnastack.com', debug=False)
query = """SELECT s.su_submitter_id, drs_id
FROM thousand_genomes.onek_genomes.ssd_drs s
join thousand_genomes.onek_genomes.sra_drs_files f on f.sample_name = s.su_submitter_id
where filetype = 'bam' and mapped = 'mapped' and sequencing_type ='exome' and population = 'JPT' LIMIT 3"""
searchClient.runQuery(query)
if __name__ == "__main__":
main(sys.argv[1:])
| 2.484375 | 2 |
python_basics/src/2_data_type/test_list.py | YingVickyCao/testPython | 0 | 12771520 | def create_empty_list():
phone_device_types = []
print(phone_device_types)
return
def access_list_item():
phone_device_types = ["IOS", "Android"]
print(phone_device_types) # ['IOS', 'Android']
print(phone_device_types[0])
print(phone_device_types[0].title())
# -1 = last item. 倒数第一个
print(phone_device_types[-1]) # Android
# -2 = 倒数第二个
print(phone_device_types[-2])
# print(phone_device_types[-3]) # ERROR: Traceback (most recent call last): IndexError: list index out of range
shopping_name = []
# print(shopping_name[0]) # ERROR:Traceback (most recent call last):IndexError: list index out of range
# print(shopping_name[-1]) # ERROR:Traceback (most recent call last):IndexError: list index out of range
return
def modify_item():
nums = [10, 20, 30]
print(nums) # [10, 20, 30]
# modify item
nums[0] = 1
print(nums) # [1, 20, 30]
return
def add_item():
nums = [10, 20, 30, 40]
# add item
print(nums) # [10, 20, 30, 40]
# insert item before index
nums.insert(1, 50)
print(nums) # [10, 50, 20, 30, 40]
nums.insert(10, 10)
print(nums) # [10, 50, 20, 30, 40, 10]
return
def remove_item():
nums = [1, 2, 3, 4, 5, 6]
print(nums) # [1, 2, 3, 4, 5, 6]
# remove item
# remove item - by index
del nums[0]
print(nums) # [2, 3, 4, 5, 6]
empty_list = []
# del empty_list[2] # ERROR: Traceback (most recent call last): IndexError: list assignment index out of range
# remove item - by pop
pop_num = nums.pop()
print(nums) # [2, 3, 4, 5]
print(pop_num) # 6
pop_num2 = nums.pop(1)
print(nums) # [2, 4, 5]
print(pop_num2) # 3
# empty_list.pop() # ERROR: Traceback (most recent call last):IndexError: pop from empty list
# empty_list.pop(2) # ERROR: Traceback (most recent call last):IndexError: pop from empty list
list2 = [1]
# list2.pop(2) # ERROR: Traceback (most recent call last): IndexError: pop index out of range
# remove item - by value
strings = ['A', 'B', "B", 'C', 'D']
strings.remove('C')
print(strings) # ['A', 'B', 'B', 'D']
# strings.remove('d') # ERROR: Traceback (most recent call last): ValueError: list.remove(x): x not in list
# print(strings)
strings.remove('B') # remove() Only delete the first pointed value. If want to delete all same values, remove them by looping.
print(strings) # ['A', 'B', 'D']
return
def sort_list():
sort_list_by_permanent_order()
sort_list_by_temporary_order()
return
def sort_list_by_permanent_order():
# Permanent order
cars = ["bw", "ya", "abc", "ca"]
cars.sort() # 正序排列,永久排序
print(cars) # ['abc', 'bw', 'ca', 'ya']
print("\n")
languages = ['Java', 'C', "Python"]
languages.sort(reverse=True) # 倒序排列,永久排序
print(languages) # ['Python', 'Java', 'C']
return
def sort_list_by_temporary_order():
nums = [1, 10, 5, 8]
num_temp = sorted(nums) # 正序排列,临时排序
print(num_temp) # [1, 5, 8, 10]
print(nums) # [1, 10, 5, 8]
print("\n")
prices = [100, 5, 10]
prices_temp = sorted(prices, reverse=True) # 倒序排列,临时排序
print(prices_temp) # [100, 10, 5]
print(prices) # [100, 5, 10]
return
def reverse_list():
reverse_list_by_permanent_order()
reverse_list_by_temporary_order()
def reverse_list_by_permanent_order():
stu = ["A", "C", "B"]
stu.reverse() # 反转列表,not 倒序排列. 永久性修改,但通过再次调用恢复原来的排列顺序
print(stu) # ['B', 'C', 'A']
stu.reverse()
print(stu) # ['A', 'C', 'B']
return
def reverse_list_by_temporary_order():
money = [5, 10, 8]
money_temp = list(reversed(money)) # 反转列表,not 倒序排列. 临时反转
# reversed(money) -> list_reverseiterator object at 0x1057fe450
print(money_temp) # [8, 10, 5]
print(money) # [5, 10, 8]
return
def length_of_list():
names = ["A", "C"]
print(len(names)) # 2
print(len([])) # 0
return
def traversing_list():
score = [90, 94, 98]
for item in score:
print(item)
return
def traversing_list():
score = [90, 94, 98]
for item in score:
print(item)
return
def create_value_list():
create_empty_list()
create_value_list4_use_range()
return
def create_value_list4_use_range():
# range(): start from first param, stop until second param. [)
for value in range(1, 5):
print(value)
# list(range(1, 5)):convert result to a list
nums = list(range(1, 5)) # [1, 2, 3, 4]
print(nums)
# create list with step: [)
even_number = list(range(2, 11, 2))
print(even_number) # [2, 4, 6, 8, 10]
even_number2 = list(range(2, 12, 2))
print(even_number2) # [2, 4, 6, 8, 10]
return
# 统计计算
def statistical_computing():
digits = [1, 10, 3]
min_digit = min(digits)
print(min_digit) # 1
max_digit = max(digits)
print(max_digit) # 10
sum_of_digits = sum(digits)
print(sum_of_digits) # 14
return
# 列表解析
def list_comprehension():
squares = []
for value in range(1, 5):
# square = value ** 2
# squares.append(square)
squares.append(value ** 2)
print(squares) # [1, 4, 9, 16]
# 列表解析将for循环和创建新元素的代码合并成一行,并自动附加新元素。
# 首先指定一个描述性的列表名,squares2;然后,指定一个左方括号, 并定义一个表达式,用于生成你要存储到列表中的值。在这个示例中,表达式为value**2,它计 算平方值。接下来,编写一个for循环,用于给表达式提供值,再加上右方括号。
# 在这个示例中,for循环为for value in range(1,11),它将值1~10提供给表达式value**2。
# Why use it ? 使用代码生成列表太繁琐。创建列表解析,以简化生成列表。
squares2 = [value ** 2 for value in range(1, 5)]
print(squares2) # [1, 4, 9, 16]
return
# 切片
def segment():
players = ["1", '2', "3", "4", "5", '6']
# [startIndex : endIndex]
print(players[0:2]) # ['1', '2']
print(players[1:4]) # ['2', '3', '4']
print(players[1:10]) # ['2', '3', '4', '5', '6']
# When no start index, [0:
print(players[:2]) # ['1', '2']
# When no end index, :lastIndex]
print(players[1:]) # ['2', '3', '4', '5', '6']
# 负数索引: [倒数第n个:last index]
print(players[-3:]) # ['4', '5', '6']
print(players[-10:]) # ['1', '2', '3', '4', '5', '6']
# traversing segment
# 1
# 2
for item in players[:2]:
print(item)
# Copy list
# Copy whole list
# =[:]:value copy
# 使用列表副本
my_shopping_foods = ["pizza", 'cake', 'coffee']
friend_shopping_foods = my_shopping_foods[:]
print(my_shopping_foods) # ['pizza', 'cake', 'coffee']
print(friend_shopping_foods) # ['pizza', 'cake', 'coffee']
my_shopping_foods.append('water')
friend_shopping_foods.append('ice cream')
print(my_shopping_foods) # ['pizza', 'cake', 'coffee', 'water']
print(friend_shopping_foods) # ['pizza', 'cake', 'coffee', 'ice cream']
# = : set ref
# 设置引用
my_languages = ['C', "C++", 'Java', "JS"]
friend_languages = my_languages
print(my_languages)
print(friend_languages)
my_languages.append('Python')
friend_languages.append("Excel")
print(my_languages) # ['C', 'C++', 'Java', 'JS', 'Python', 'Excel']
print(friend_languages) # ['C', 'C++', 'Java', 'JS', 'Python', 'Excel']
# Copy segment
return
def test():
# access_list_item()
# modify_item()
# add_item()
# remove_item()
# sort_list()
# reverse_list()
# length_of_list()
# traversing_list()
# create_value_list()
# statistical_computing()
# list_comprehension()
segment()
return
test()
| 4.3125 | 4 |
__init__.py | FangmingXie/mctseq_over_under_splitting | 1 | 12771521 | <reponame>FangmingXie/mctseq_over_under_splitting
import time
import logging
import glob
import os
import numpy as np
import pandas as pd
import collections
# data structures
GC_matrix = collections.namedtuple('GC_matrix', ['gene', 'cell', 'data'])
| 1.882813 | 2 |
urwidDisplay.py | abid-mujtaba/fetchheaders | 1 | 12771522 | <filename>urwidDisplay.py
#!/usr/bin/python
#
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Author: <NAME>
# Email: <EMAIL>
#
# Start Date: Jan. 13, 2013
# Last Revised: Jan. 13, 2013
#
# This script implements a urwid based class that will be used to display the email header information. The display will allow for mutt-style navigation (using j/k keys) and enable the user to mark messages for deletion.
class urwidDisplay() :
'''
This class acts as a wrapper around urwid objects and in fact will contian the urwid Main Loop as well. Most of the active parts will be carried out in the __init__ method so that the very act of creating an object of this class will cause the email header display using urwid to be executed. We will remain the __init__ method until the loop ends at which point the program will terminate as well. To that end the urwid event handler functions will be members of this class as well.
'''
def __init__( self, servers, settings ) :
'''
This is the actionable part of the class and is responsible for initializing the class objects, implementing the urwid display and executing the main loop. 'servers' is an object which contains all the necessary information for logging in to the email accounts and extracting headers.
settings: <DIC> containing the global settings associated with the program
'''
# Import the necessary modules and functions:
try:
import urwid
except ImportError:
print("urwid module missing. Try: pip install urwid")
import sys
sys.exit(1)
from miscClasses import threadedExec # This function implements email account access using threads
self.settings = settings # Store the settings (mostly global for the program) locally in a dictionary
self.servers = servers # This wealth of information will come in handy when we will be deleting emails
# Define the palette that will be used by urwid. Note that is defined to be a member of the urwidDisplay class as all objects will be
normal_bg_color = 'black' # Here we define the default normal and focus state bg colors for the header lines displayed
focus_bg_color = 'light blue'
self.palette = [
( 'title', 'yellow', 'dark blue' ),
( 'account', 'light red', normal_bg_color ),
( 'bw', 'white', normal_bg_color ),
( 'flag', 'dark green', normal_bg_color ), # We define the normal state color scheme for the various parts of the header
( 'date', 'brown', normal_bg_color ),
( 'from', 'dark cyan', normal_bg_color ),
( 'subject', 'dark green', normal_bg_color ),
( 'subjectSeen', 'brown', normal_bg_color ),
# We define the 'focus' state color scheme for various parts of the header. Note the 'F_' at the beginning of each name
( 'F_bw', 'white', focus_bg_color ) , # Black and White text when focussed
( 'F_flag', 'light green', focus_bg_color ),
( 'F_date', 'yellow', focus_bg_color ),
( 'F_from', 'light cyan', focus_bg_color ),
( 'F_subject', 'light green', focus_bg_color ),
( 'F_subjectSeen', 'yellow', focus_bg_color ),
# We define the normal state flagged for Deletion scheme for the header. Note the 'D_' at the beginning of each name
( 'D_bw', 'dark red', normal_bg_color ),
( 'D_flag', 'dark red', normal_bg_color ),
( 'D_date', 'dark red', normal_bg_color ),
( 'D_from', 'dark red', normal_bg_color ),
( 'D_subject', 'dark red', normal_bg_color ),
( 'D_subjectSeen', 'dark red', normal_bg_color ),
# We define the focus state flagged for Deletion scheme for the header. Note the 'DF_' at the beginning of each name.
( 'DF_bw', 'dark red', focus_bg_color ),
( 'DF_flag', 'dark red', focus_bg_color ),
( 'DF_date', 'dark red', focus_bg_color ),
( 'DF_from', 'dark red', focus_bg_color ),
( 'DF_subject', 'dark red', focus_bg_color ),
( 'DF_subjectSeen', 'dark red', focus_bg_color ) ]
self.title = urwid.AttrMap( urwid.Text( " FetchHeaders q: Quit a: Abort d: Delete u: UnDelete j: Down k: Up" ), 'title' )
self.div = urwid.Divider()
self.titlePile = urwid.Pile( [ self.title, self.div ] )
self.emails = [] # This is a list which will contain the emails whose headers have been displayed. We will use it when shifting focus and marking for deletion.
self.focus = -1 # Initially no header has focus. This is donated by the value -1. 0 will donate the first header corresponding to self.emails[0].
self.List = [] # This is the list of objects that will be used to construct the main listbox that displays all email headers and auxiliary information.
# We will now extract header information from each account and use it to construct various objects. While doing so we must keep in mind that when focus shifts the objects must be re-drawn explicitly. This can be handled by constructing the lines every time it is required, using separate functions to handle the construction by simply passing them the same information
for out in threadedExec( servers, self.settings[ 'maxThreads' ] ) : # This calls the threaded processed to extract information and return it in an iterable queue
if out.error: # out.error is True if an Exception is raised while it is being calculated. In such a case we display an error line
account = urwid.Text( ('account', ' ' + out.settings[ 'name' ] + ':' ) )
error = urwid.Text(('bw', 'Error!'))
accountLine = urwid.Columns( [('fixed', 13, account), error ])
self.List += [ accountLine ]
else:
# Construct account line widget
account = urwid.Text( ( 'account', ' ' + out.settings[ 'name' ] + ':' ) )
if out.settings[ 'showNums' ] : # Numbers are supposed to displayed after the account name
numbers = urwid.Text( ( 'bw', '( total: ' + str( out.numAll ) + ' | unseen: ' + str( out.numUnseen ) + ' )' ) )
accountLine = urwid.Columns( [ ( 'fixed', 13, account ), numbers ] )
else : # Don't display numbers
accountLine = urwid.Columns( [ ( 'fixed', 13, account ) ] )
self.List += [ accountLine, self.div ] # First line displays account name and number of messages
# We now construct and display the email headers
for ii in range( len( out.emails ) ) :
email = out.emails[ ii ]
email.account = out.settings[ 'name' ] # Store name of account associated with each email
email.Delete = False # Boolean Flag for tracking if email has to be deleted.
email.serial = ii + 1 # Serial Number associated with this email
email.numDigits = out.numDigits # No. of digits for displaying serial number, calculated by analyzing the number of emails for this particular account
email.listPos = len( self.List ) # Store the position of the email header urwid object (urwid.Columns) in self.List. Will need it for focusing or deletion.
self.emails.append( email ) # Add the displayed email to the self.emails list
line = self.constructLine( email, focus = False )
self.List.append( line ) # Call constructLine to create header line using data in 'email' object. ii + 1 is serial number
self.List += [ self.div, self.div ] # Add two empty lines after account ends
self.total = len( self.emails ) # Total no. of emails being displayed
# All account information has been input and the urwid display is almost ready:
self.listbox = urwid.ListBox( self.List )
self.frame = urwid.Frame( self.listbox, header = self.titlePile ) # By using a frame we ensure that the top title doesn't move when we scroll through the listbox
self.loop = urwid.MainLoop( self.frame, self.palette, unhandled_input = self.handler )
# Now we run the main loop:
self.loop.run()
def handler( self, key ) :
'''
This is the input handler. This takes unprocessed key presses from urwid and translates them in to the appropriate action.
'''
import urwid
if key in ( 'a', 'A' ) : # Exit loop without making any changes (NO Deletions) when the 'A' key is pressed (in any case)
raise urwid.ExitMainLoop()
if key in ( 'j', 'J' ) : # This pushes focus down
self.focus += 1
self.shiftFocus( self.focus - 1 ) # Call the shiftFocus() method to implement the change in focus. We need to pass it the last focus so that it can be unfocussed.
if key in ( 'k', 'K' ) : # This pushes the focus up
self.focus -= 1
self.shiftFocus( self.focus + 1 )
if key in ( 'd', 'D' ) : # Email in focus must be flagged for deletion
self.emails[ self.focus ].Delete = True # Set Delete flag on focused email
# Shift focus forward (down) by one to move to the next email
self.focus += 1
self.shiftFocus( self.focus - 1 ) # Call shiftFocus() to implement change in display status. 'None' is passed since the focus hasn't moved.
if key in ( 'u', 'U' ) :
self.emails[ self.focus ].Delete = False # UnSet Delete flag on focused email
# Shift focus forward (down) by one to move to the next email
self.focus += 1
self.shiftFocus( self.focus - 1 )
if key in ( 'q', 'Q' ) :
self.titlePile[0].set_text( " FetchHeaders - Deleting Emails and Exiting." ) # Change title to indicate process.
self.loop.draw_screen() # Force redraw so that the title changes
self.quit() # Call the quit() method/function to delete flagged emails and exit the program
def quit( self ) :
'''
This method/function deletes all emails that have been flagged for deletion and then exits the program.
'''
import urwid
# The first step is to scan self.emails and find the emails flagged for deletion. We collect them in a single data structure:
delete = {}
for email in self.emails :
if email.Delete : # Email is marked for deletion
if not email.account in delete.keys() : # Checking if the account exists in the 'delete' dictionary as a key. If not it must be created as an empty list
delete[ email.account ] = [] # Empty list created to hold uids of emails flagged for deletion
delete[ email.account ].append( email.uid )
# If any emails have been specified for deletion we continue:
if delete : # One True if the <DIC> is non-empty
# Now we delete the specified emails by logging in to the various accounts in a threaded fashion:
from Queue import Queue
inQueue = Queue()
# Populate Queue with task data
for name in delete.keys() :
inQueue.put( { 'account': self.servers[ name ], 'listUIDs': delete[ name ] } ) # Add <DIC> containing account settings and UIDs of emails to be deleted.
# Create a number of threads to parallelize the task:
from miscClasses import delWorker
workers = [ delWorker( inQueue ) for ii in range( self.settings[ 'maxThreads' ] ) ]
for worker in workers :
worker.start() # Begin execution of each thread
inQueue.join() # Pause program execution here until all tasks in inQueue are complete
raise urwid.ExitMainLoop()
def shiftFocus( self, oldFocus ) :
'''
This method/function is called whenever the focus shifts. It implements said change.
'''
# The first step is to check whether the focus is out of bounds or not.
if self.focus < 0 : self.focus = self.total - 1 # Treat emails in a circular data structure. We set focus to the last email.
if self.focus >= self.total : self.focus = 0 # Move around the circle and set focus to first email.
# First we unfocus the previously focussed email, if there is one.
if oldFocus != None : # If 'None' has been passed in the focus hasn't changed position, for instance if the email is to be marked for deletion.
email = self.emails[ oldFocus ]
self.List[ email.listPos ] = self.constructLine( email, focus = False )
# Now implement change in focus.
email = self.emails[ self.focus ] # We select the email object associated with the new focus
self.List[ email.listPos ] = self.constructLine( email, focus = True )
# Finally change listbox focus so that the listbox scrolls properly with our scrolling:
self.listbox.set_focus( email.listPos )
def constructLine( self, email, focus = False ) :
'''
This function takes the 'email' object and a single flag and uses them to construct a urwid.Column object representing the correctly formatted header line for the display. This is stored in the listbox for displaying.
serialNum: An integer specifying the serial number associated with the email in the list of emails when it is displayed.
numDigits: Number of digits for displaying the serial number. An account level value that has already been calculated.
self.settings: A Dictionary containing the following settings.
showUnseen: A boolean flag. Global setting. When true indicates that only unseen messages are to be displayed.
showFlags: A boolean flag. Gobal setting. When true indicates the flags are to displayed
focus: A boolean flag. When True indicates that the line is in focus and so the coloring scheme needs to be changed.
'''
import urwid
from miscClasses import strWidth as sW
if focus : pre = 'F_' # This string determines which color from the palette is used: normal of focus scheme, flagged for deletion or not.
else : pre = ''
if email.Delete :
if focus: pre = 'DF_' # Email is both flagged for deletion and in focus
else : pre = 'D_' # Email is flagged for deletion
date = urwid.Text( ( pre + 'date', sW( email.Date, 17 ) ) )
From = urwid.Text( ( pre + 'from', sW( email.From, 30 ) ) )
serial = urwid.Text( ( pre + 'bw', sW( str( email.serial ), email.numDigits, align = '>' ) ) )
if not email.Seen : # If email is unseen then:
subject = urwid.Text( ( pre + 'subject', sW( email.Subject, 120 ) ) )
else:
subject = urwid.Text( ( pre + 'subjectSeen', sW( email.Subject, 120 ) ) )
if self.settings[ 'showFlags' ] : # Flags are to be displayed
if email.Seen :
if email.Delete : ch = " D "
else : ch = " "
else :
if email.Delete : ch = " ND"
else : ch = " N "
sep = [ ('fixed', 2, urwid.Text(( pre + 'bw', " [" ))), ('fixed', 3, urwid.Text(( pre + 'flag', ch ))), ('fixed', 4, urwid.Text(( pre + 'bw', "] " ))) ]
else :
sep = [ ( 'fixed', 3, urwid.Text(( pre + 'bw', ". " )) ) ]
lineList = [ ('fixed', email.numDigits, serial) ] + sep + [ ('fixed', 21, date ), ('fixed', 34, From), subject ]
line = urwid.AttrMap( urwid.Columns( lineList ), pre + 'bw' ) # Applying the AttrMap here ensures the whole line gets the same background color
return line # Return the constructed line
| 2.828125 | 3 |
base/migrations/0002_auto_20210117_0922.py | shah-deep/Smart-Guidance | 2 | 12771523 | <gh_stars>1-10
# Generated by Django 3.1.2 on 2021-01-17 09:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='topic',
name='entry',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='topic',
name='key',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
| 1.75 | 2 |
weather/views.py | RobMilinski/WeatherProj | 0 | 12771524 | from django.shortcuts import render
import urllib.request
import json
from datetime import datetime
from .city_weather import CityWeather
#locations given by Uni assignment
loc1_details = CityWeather("Lake District National Park", "54.4609", "-3.0886")
loc2_details = CityWeather("Corfe Castle", "50.6395", "-2.0566")
loc3_details = CityWeather("The Cotswolds", "51.8330", "-1.8433")
loc4_details = CityWeather("Cambridge", "52.2053", "0.1218")
loc5_details = CityWeather("Bristol", "51.4545", "-2.5879")
loc6_details = CityWeather("Oxford", "51.7520", "-1.2577")
loc7_details = CityWeather("Norwich", "52.6309", "1.2974")
loc8_details = CityWeather("Stonehenge", "51.1789", "-1.8262")
loc9_details = CityWeather("Watergate Bay", "50.4429", "-5.0553")
loc10_details = CityWeather("Birmingham", "52.4862", "-1.8904")
def get_displayed_cities_weather(cities_list):
#for each location in list, run method
for city in cities_list:
city.get_city_weather()
def weatherapp(request):
#list of assigned cities, user assigned to be inserted later in position 0
displayed_cities = [loc1_details, loc2_details, loc3_details, loc4_details, loc5_details, loc6_details, loc7_details, loc8_details, loc9_details, loc10_details]
if request.method == "POST":
#add user defined location to cities list
select_box_json = request.POST['cityselectbox']
select_city = None
if select_box_json != '':
select_city = json.loads(select_box_json)
if request.POST['latitude'] != '' and request.POST['longitude'] != '':
#if user inputs customer lat/lon
input_city = CityWeather(request.POST['city'], request.POST['latitude'], request.POST['longitude'])
displayed_cities.insert(0, input_city)
elif select_city != None:
#if user selects city from list
input_city = CityWeather(select_city['city'], select_city['lat'], select_city['lon'])
displayed_cities.insert(0, input_city)
#pulls weather information for cities in list
get_displayed_cities_weather(displayed_cities)
#displays updated cities weather information on weatherapp template
return render(request, 'weather/weatherapp.html', {'displayed_cities': displayed_cities})
| 2.640625 | 3 |
answers/api/views/answers_viewsets.py | NicolasMuras/Lookdaluv | 1 | 12771525 | <filename>answers/api/views/answers_viewsets.py<gh_stars>1-10
from answers.api.serializers.answers_serializers import AnswerSerializer
from modules.api.views.general_views import GeneralViewSet
class AnswerViewSet(GeneralViewSet):
serializer_class = AnswerSerializer
model_to_format = AnswerSerializer.Meta.model
| 1.359375 | 1 |
case/api_test/conftest.py | lzpsgh/AscTrio | 5 | 12771526 | # import pytest
#
# from case.conftest import api_data
#
#
# @pytest.fixture(scope="function")
# def testcase_data(request):
# testcase_name = request.function.__name__
# return api_data.get(testcase_name)
| 1.984375 | 2 |
digsby/src/tests/testgui/uberdemos/CapabilitiesBarDemo.py | ifwe/digsby | 35 | 12771527 | from DemoApp import App
import wx
import gettext
gettext.install('Digsby', './locale', unicode=True)
from gui.capabilitiesbar import CapabilitiesBar
class Frame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,title='Simple Menu Test')
self.panel=wx.Panel(self)
self.Bind(wx.EVT_CLOSE, lambda e: wx.GetApp().ExitMainLoop())
self.panel.Sizer=wx.BoxSizer(wx.VERTICAL)
self.capbar=CapabilitiesBar(self.panel)
self.panel.Sizer.Add(self.capbar,0,wx.EXPAND)
b1=wx.Button(self.panel,-1,'Hide Capabilities')
b2=wx.Button(self.panel,-1,'Hide To/From')
b3=wx.Button(self.panel,-1,'Hide Compose')
b1.Bind(wx.EVT_BUTTON,lambda e: self.capbar.ShowCapabilities(not self.capbar.cbar.IsShown()))
b2.Bind(wx.EVT_BUTTON,lambda e: self.capbar.ShowToFrom(not self.capbar.tfbar.IsShown()))
b3.Bind(wx.EVT_BUTTON, lambda e: self.capbar.ShowComposeButton(not self.capbar.bcompose.IsShown()))
self.panel.Sizer.Add(b1)
self.panel.Sizer.Add(b2)
self.panel.Sizer.Add(b3)
self.capbar.bsms.Bind(wx.EVT_BUTTON,self.OnButton)
self.capbar.binfo.Bind(wx.EVT_BUTTON,lambda e: self.capbar.bsms.SendButtonEvent())
def OnButton(self,event):
print "button clicked"
def Go():
f=Frame()
f.Show(True)
if __name__=='__main__':
a = App( Go )
from util import profile
profile(a.MainLoop)
| 2.375 | 2 |
dmt/cechmate_wrap.py | arksch/fuzzy-eureka | 0 | 12771528 | """
Parser for cechmate format of simplicial complex
"""
from itertools import chain, combinations
from cechmate import Cech, Rips, Alpha
import numpy as np
from scipy.sparse import coo_matrix
from dmt.morse_complex import MorseComplex
from dmt.perseus import save_points_perseus_brips, load_points_perseus_brips
def parse_cechmate(cechmate_complex):
""" Parses the Cechmate format for simplicial complexes
:param cechmate_complex: [(simplex_as_index_tuple, filtration)]
:return dict 'cell_dimensions': np.ndarray, 'filtration': np.ndarray,
'boundary_matrix': scipy.sparse.coo_matrix, 'cechmate_complex': cechmate complex for testing
:Example:
>>> cechmate_cplx = [([0], 0), ([1], 0), ([2], 0), ((0, 1, 2), 1.760962625882297), ((1, 2), 1.760962625882297), ((0, 2), 0.30122587679897417), ((0, 1), 0.2489387964292784)]
>>> MorseComplex(**parse_cechmate(cechmate_cplx)
"""
simplices, filtration = zip(*cechmate_complex)
simplices = list(map(tuple, simplices)) # All should be tuples, so they can be in a dict
size = len(simplices)
index_map = {splx: ix for splx, ix in zip(simplices, range(size))}
columns_rows = chain.from_iterable([[(index_map[splx], index_map[bdry])
for bdry in combinations(splx, len(splx) - 1) if bdry]
for splx in simplices])
columns, rows = zip(*columns_rows)
columns, rows = list(columns), list(rows)
data = [True] * len(columns)
boundary = coo_matrix((data, (rows, columns)), shape=(size, size), dtype=bool)
filtration = list(filtration)
cell_dimensions = np.array(list(map(len, simplices))) - 1
return dict(boundary_matrix=boundary,
cell_dimensions=cell_dimensions,
filtration=filtration,
cechmate_complex=cechmate_complex)
class VietorisRips(MorseComplex):
default_max_dim = 3
def __init__(self, points, max_dimension=default_max_dim):
points = np.array(points)
self.max_dimension = max_dimension
super().__init__(points=points, **parse_cechmate(Rips(maxdim=self.max_dimension).build(points)))
def save_brips(self, filepath):
save_points_perseus_brips(filepath, self.points)
@classmethod
def load_brips(cls, filepath, max_dimension=default_max_dim):
return cls(load_points_perseus_brips(filepath), max_dimension)
class CechComplex(MorseComplex):
default_max_dim = 3
def __init__(self, points, max_dimension=default_max_dim):
points = np.array(points, dtype=float)
self.max_dimension = max_dimension
super().__init__(points=points, **parse_cechmate(Cech(maxdim=self.max_dimension).build(points)))
class AlphaComplex(MorseComplex):
def __init__(self, points):
points = np.array(points, dtype=float)
super().__init__(points=points, **parse_cechmate(Alpha().build(points)))
| 2.34375 | 2 |
venv/Lib/site-packages/flask_codemirror/fields.py | natemellendorf/configpy | 4 | 12771529 | <filename>venv/Lib/site-packages/flask_codemirror/fields.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Flask Codemirror Field
~~~~~~~~~~~~~~~~~~~~~~
Import it using
`from flask.ext.codemirror.fields import CodeMirrorField`
It works exactly like a `wtforms.fields.TextAreaField`
"""
from __future__ import print_function
from flask_codemirror.widgets import CodeMirrorWidget
try:
from wtforms.fields import TextAreaField
except ImportError as exc:
print('WTForms is required by Flask-Codemirror')
raise exc
__author__ = '<NAME>'
class CodeMirrorField(TextAreaField):
"""Code Mirror Field
A TextAreaField with a custom widget
:param language: CodeMirror mode
:param config: CodeMirror config
"""
def __init__(self, label='', validators=None, language=None,
config=None, **kwargs):
widget = CodeMirrorWidget(language, config)
super(CodeMirrorField, self).__init__(label=label,
validators=validators,
widget=widget,
**kwargs)
| 2.6875 | 3 |
vendor/deadline/custom/plugins/MayaPype/DeadlineMayaBatchFunctions.py | kalisp/pype-setup | 5 | 12771530 | from __future__ import print_function
import json
import os
import re
import subprocess
import maya.cmds
import maya.mel
# The version that Redshift fixed the render layer render setup override locking issue
# Prior versions will need to use the workaround in the unlockRenderSetupOverrides function
REDSHIFT_RENDER_SETUP_FIX_VERSION = (2, 5, 64)
def getCurrentRenderLayer():
return maya.cmds.editRenderLayerGlobals( query=True, currentRenderLayer=True )
# A method mimicing the built-in mel function: 'renderLayerDisplayName', but first tries to see if it exists
def getRenderLayerDisplayName( layer_name ):
if maya.mel.eval( 'exists renderLayerDisplayName' ):
layer_name = maya.mel.eval( 'renderLayerDisplayName ' + layer_name )
else:
# renderLayerDisplayName doesn't exist, so we try to do it ourselves
if layer_name == 'masterLayer':
return layer_name
if maya.cmds.objExists(layer_name) and maya.cmds.nodeType( layer_name ) == 'renderLayer':
# Display name for default render layer
if maya.cmds.getAttr( layer_name + '.identification' ) == 0:
return 'masterLayer'
# If Render Setup is used the corresponding Render Setup layer name should be used instead of the legacy render layer name.
result = maya.cmds.listConnections( layer_name + '.msg', type='renderSetupLayer' )
if result:
return result[0]
return layer_name
# remove_override_json_string is a json string consisting of a node as a key, with a list of attributes we want to unlock as the value
# ie. remove_override_json_string = '{ "defaultRenderGlobals": [ "animation", "startFrame", "endFrame" ] }'
def unlockRenderSetupOverrides( remove_overrides_json_string ):
try:
# Ensure we're in a version that HAS render setups
import maya.app.renderSetup.model.renderSetup as renderSetup
except ImportError:
return
# Ensure that the scene is actively using render setups and not the legacy layers
if not maya.mel.eval( 'exists mayaHasRenderSetup' ) or not maya.mel.eval( 'mayaHasRenderSetup();' ):
return
# If the version of Redshift has the bug fix, bypass the overrides
if not redshiftRequiresWorkaround():
return
remove_overrides = json.loads( remove_overrides_json_string )
render_setup = renderSetup.instance()
layers = render_setup.getRenderLayers()
layers_to_unlock = [ layer for layer in layers if layer.name() != 'defaultRenderLayer' ]
for render_layer in layers_to_unlock:
print('Disabling Render Setup Overrides in "%s"' % render_layer.name())
for collection in render_layer.getCollections():
if type(collection) == maya.app.renderSetup.model.collection.RenderSettingsCollection:
for override in collection.getOverrides():
if override.targetNodeName() in remove_overrides and override.attributeName() in remove_overrides[ override.targetNodeName() ]:
print( ' Disabling Override: %s.%s' % ( override.targetNodeName(), override.attributeName() ) )
override.setSelfEnabled( False )
def redshiftRequiresWorkaround():
# Get the version of Redshift
redshiftVersion = maya.cmds.pluginInfo( 'redshift4maya', query=True, version=True )
redshiftVersion = tuple( int(version) for version in redshiftVersion.split('.') )
# Check if the Redshift version is prior to the bug fix
return redshiftVersion < REDSHIFT_RENDER_SETUP_FIX_VERSION
def performArnoldPathmapping( startFrame, endFrame, tempLocation=None ):
"""
Performs pathmapping on all arnold standin files that are need for the current task
:param startFrame: Start frame of the task
:param endFrame: End frame of the task
:param tempLocation: The temporary location where all pathmapped files will be copied to. Only needs to be provided the first time this function is called.
:return: Nothing
"""
if tempLocation:
performArnoldPathmapping.tempLocation = tempLocation
else:
if not performArnoldPathmapping.tempLocation:
raise ValueError( "The first call made to performArnoldPathmapping must provided a tempLocation" )
#a simple regex for finding frame numbers
frameRE = re.compile( r'#+' )
# Define a function that will be used when looping to replace padding with a 0 padded string.
def __replaceHashesWithZeroPaddedFrame( frameNum, origFileName ):
return frameRE.sub( lambda matchObj: str( frameNum ).zfill( len(matchObj.group(0)) ), origFileName )
standInObjects = maya.cmds.ls( type="aiStandIn" )
for standIn in standInObjects:
try:
# If we have already seen this node before then grab the settings that we need
origDir, origFileName = performArnoldPathmapping.originalProperties[ standIn ]
except KeyError:
# If we have not seen this node before then store it's original path and update the path in the node to where we will be pathmapping the file.
standinFile = maya.cmds.getAttr( standIn + ".dso" )
if not standinFile or os.path.splitext( standinFile )[ 1 ].lower() != ".ass":
# If the standinFile isn't set or isn't .ass file then we cannot pathmap it.
continue
origDir, origFileName = os.path.split( standinFile )
standinTempLocation = os.path.join( performArnoldPathmapping.tempLocation, standIn )
maya.cmds.setAttr( "%s.dso" % standIn, os.path.join( standinTempLocation, origFileName ), type="string" )
#Create the Temp directory the first time we see a new standin
if not os.path.isdir( standinTempLocation ):
os.makedirs( standinTempLocation )
performArnoldPathmapping.originalProperties[ standIn ] = (origDir, origFileName)
for frame in range( startFrame, endFrame + 1 ):
# evaluate the frame that the node is using (Normally it will be the same as the scene but it can be different)
evalFrame = maya.cmds.getAttr( "%s.frameNumber" % standIn, time=frame )
fileNameWithFrame = __replaceHashesWithZeroPaddedFrame( evalFrame, origFileName )
# If we have already mapped this file then continue.
if not ( standIn, fileNameWithFrame ) in performArnoldPathmapping.mappedFiles:
#Perform pathmapping
runPathmappingOnFile(
os.path.join( origDir, fileNameWithFrame ),
os.path.join( performArnoldPathmapping.tempLocation, standIn, fileNameWithFrame )
)
performArnoldPathmapping.mappedFiles.add( ( standIn, fileNameWithFrame ) )
performArnoldPathmapping.tempLocation = ""
#State property which contains mappings of standin objects to their original fileproperties
performArnoldPathmapping.originalProperties = {}
#State property which contains unique identifier for each file that we have already mapped in the form of ( standin, filename )
performArnoldPathmapping.mappedFiles=set()
def runPathmappingOnFile( originalLocation, pathmappedLocation ):
print( 'Running PathMapping on "%s" and copying to "%s"' % (originalLocation, pathmappedLocation) )
arguments = [ "-CheckPathMappingInFile", originalLocation, pathmappedLocation ]
print( CallDeadlineCommand( arguments ) )
def GetDeadlineCommand():
deadlineBin = ""
try:
deadlineBin = os.environ['DEADLINE_PATH']
except KeyError:
#if the error is a key error it means that DEADLINE_PATH is not set. however Deadline command may be in the PATH or on OSX it could be in the file /Users/Shared/Thinkbox/DEADLINE_PATH
pass
# On OSX, we look for the DEADLINE_PATH file if the environment variable does not exist.
if deadlineBin == "" and os.path.exists( "/Users/Shared/Thinkbox/DEADLINE_PATH" ):
with open( "/Users/Shared/Thinkbox/DEADLINE_PATH" ) as f:
deadlineBin = f.read().strip()
deadlineCommand = os.path.join(deadlineBin, "deadlinecommand")
return deadlineCommand
def CallDeadlineCommand(arguments, hideWindow=True):
deadlineCommand = GetDeadlineCommand()
startupinfo = None
creationflags = 0
if os.name == 'nt':
if hideWindow:
# Python 2.6 has subprocess.STARTF_USESHOWWINDOW, and Python 2.7 has subprocess._subprocess.STARTF_USESHOWWINDOW, so check for both.
if hasattr( subprocess, '_subprocess' ) and hasattr( subprocess._subprocess, 'STARTF_USESHOWWINDOW' ):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
elif hasattr( subprocess, 'STARTF_USESHOWWINDOW' ):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
# still show top-level windows, but don't show a console window
CREATE_NO_WINDOW = 0x08000000 #MSDN process creation flag
creationflags = CREATE_NO_WINDOW
arguments.insert( 0, deadlineCommand )
# Specifying PIPE for all handles to workaround a Python bug on Windows. The unused handles are then closed immediatley afterwards.
proc = subprocess.Popen(arguments, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, creationflags=creationflags)
output, errors = proc.communicate()
return output
def OutputPluginVersions():
print("================== PLUGINS ===================\n")
plugins = sorted(maya.cmds.pluginInfo(query=True, listPlugins=True), key=lambda p: p.lower())
for plugin in plugins:
version = maya.cmds.pluginInfo(plugin, query=True, version=True)
print("%s (v%s)" % (plugin, version))
print("==============================================\n")
def ForceLoadPlugins():
"""
Force load an explicit set of plug-ins with known issues. There are bugs in Maya where these plug-ins are not
automatically loaded when required in a scene.
When a scene contains an Alembic reference node (backed by an external .abc file), Maya does not embed "requires"
statements into the scene to indicate that the "AbcImport" and "fbxmaya" plug-ins are dependencies of the scene.
This can be changed for the current Maya session with the following MEL commands:
pluginInfo -edit -writeRequires AbcImport
pluginInfo -edit -writeRequires fbxmaya
However, there is a secondary bug where the "requires" statements are inserted in the scene after already trying to
load the references.
Our work-around is to force loading of these plug-ins always before loading the job scene. Both plugins ship with
Maya and are fairly lightweight in size.
"""
PLUGINS_TO_LOAD = (
'AbcImport', # For Maya 2017 on Windows this is 5MB and takes 15 ms to load
'fbxmaya' # For Maya 2017 on Windows this is 12MB and takes 141ms to load
)
for plugin in PLUGINS_TO_LOAD:
plugin_loaded = maya.cmds.pluginInfo(plugin, query=True, loaded=True)
if not plugin_loaded:
try:
print( "Loading %s..." % plugin, end="" )
maya.cmds.loadPlugin( plugin )
except RuntimeError as e:
# Maya raises this exception when it cannot find the plugin. The message is formatted as:
#
# Plug-in, "pluginName", was not found on MAYA_PLUG_IN_PATH
#
# This seems reasonable enough to forward on to the user. The try-except only serves the purpose of
# continuing to attempt additional plug-ins. This is a best-effort work-around.
print( 'Error: %s' % e)
else:
print( "ok" ) | 2.0625 | 2 |
mmseg/distillation/distillers/segmentation_distiller.py | pppppM/mmsegmentation-distiller | 35 | 12771531 | <reponame>pppppM/mmsegmentation-distiller
import torch.nn as nn
import torch.nn.functional as F
import torch
from mmseg.core import add_prefix
from mmseg.ops import resize
from mmcv.runner import load_checkpoint
from ..builder import DISTILLER,build_distill_loss
from mmseg.models import build_segmentor
from mmseg.models.segmentors.base import BaseSegmentor
@DISTILLER.register_module()
class SegmentationDistiller(BaseSegmentor):
"""Base distiller for segmentors.
It typically consists of teacher_model and student_model.
"""
def __init__(self,
teacher_cfg,
student_cfg,
distill_cfg=None,
teacher_pretrained=None,):
super(SegmentationDistiller, self).__init__()
self.teacher = build_segmentor(teacher_cfg.model,
train_cfg=teacher_cfg.get('train_cfg'),
test_cfg=teacher_cfg.get('test_cfg'))
self.init_weights_teacher(teacher_pretrained)
self.teacher.eval()
self.student= build_segmentor(student_cfg.model,
train_cfg=student_cfg.get('train_cfg'),
test_cfg=student_cfg.get('test_cfg'))
self.distill_losses = nn.ModuleDict()
self.distill_cfg = distill_cfg
student_modules = dict(self.student.named_modules())
teacher_modules = dict(self.teacher.named_modules())
def regitster_hooks(student_module,teacher_module):
def hook_teacher_forward(module, input, output):
self.register_buffer(teacher_module,output)
def hook_student_forward(module, input, output):
self.register_buffer( student_module,output )
return hook_teacher_forward,hook_student_forward
for item_loc in distill_cfg:
student_module = 'student_' + item_loc.student_module.replace('.','_')
teacher_module = 'teacher_' + item_loc.teacher_module.replace('.','_')
self.register_buffer(student_module,None)
self.register_buffer(teacher_module,None)
hook_teacher_forward,hook_student_forward = regitster_hooks(student_module ,teacher_module )
teacher_modules[item_loc.teacher_module].register_forward_hook(hook_teacher_forward)
student_modules[item_loc.student_module].register_forward_hook(hook_student_forward)
for item_loss in item_loc.methods:
loss_name = item_loss.name
self.distill_losses[loss_name] = build_distill_loss(item_loss)
def base_parameters(self):
return nn.ModuleList([self.student,self.distill_losses])
def discriminator_parameters(self):
return self.discriminator
def init_weights_teacher(self, path=None):
"""Load the pretrained model in teacher detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
checkpoint = load_checkpoint(self.teacher, path, map_location='cpu')
def forward_train(self, img, img_metas, gt_semantic_seg):
"""Forward function for training.
Args:
img (Tensor): Input images.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
with torch.no_grad():
self.teacher.eval()
teacher_loss = self.teacher.forward_train(img, img_metas, gt_semantic_seg)
student_loss = self.student.forward_train(img, img_metas, gt_semantic_seg)
buffer_dict = dict(self.named_buffers())
for item_loc in self.distill_cfg:
student_module = 'student_' + item_loc.student_module.replace('.','_')
teacher_module = 'teacher_' + item_loc.teacher_module.replace('.','_')
student_feat = buffer_dict[student_module]
teacher_feat = buffer_dict[teacher_module]
for item_loss in item_loc.methods:
loss_name = item_loss.name
student_loss[ loss_name] = self.distill_losses[loss_name](student_feat,teacher_feat)
return student_loss
# TODO refactor
def slide_inference(self, img, img_meta, rescale):
"""Inference by sliding-window with overlap."""
h_stride, w_stride = self.test_cfg.stride
h_crop, w_crop = self.test_cfg.crop_size
batch_size, _, h_img, w_img = img.size()
num_classes = self.student.num_classes
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
pad_img = crop_img.new_zeros(
(crop_img.size(0), crop_img.size(1), h_crop, w_crop))
pad_img[:, :, :y2 - y1, :x2 - x1] = crop_img
pad_seg_logit = self.student.encode_decode(pad_img, img_meta)
preds[:, :, y1:y2,
x1:x2] += pad_seg_logit[:, :, :y2 - y1, :x2 - x1]
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
preds = preds / count_mat
if rescale:
preds = resize(
preds,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.student.align_corners,
warning=False)
return preds
def whole_inference(self, img, img_meta, rescale):
"""Inference with full image."""
seg_logit = self.student.encode_decode(img, img_meta)
if rescale:
seg_logit = resize(
seg_logit,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.student.align_corners,
warning=False)
return seg_logit
def inference(self, img, img_meta, rescale):
"""Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
assert self.student.test_cfg.mode in ['slide', 'whole']
ori_shape = img_meta[0]['ori_shape']
assert all(_['ori_shape'] == ori_shape for _ in img_meta)
if self.student.test_cfg.mode == 'slide':
seg_logit = self.slide_inference(img, img_meta, rescale)
else:
seg_logit = self.whole_inference(img, img_meta, rescale)
output = F.softmax(seg_logit, dim=1)
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
if flip:
assert flip_direction in ['horizontal', 'vertical']
if flip_direction == 'horizontal':
output = output.flip(dims=(3, ))
elif flip_direction == 'vertical':
output = output.flip(dims=(2, ))
return output
def simple_test(self, img, img_meta, rescale=True):
"""Simple test with single image."""
seg_logit = self.inference(img, img_meta, rescale)
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
def aug_test(self, imgs, img_metas, rescale=True):
"""Test with augmentations.
Only rescale=True is supported.
"""
# aug_test rescale all imgs back to ori_shape for now
assert rescale
# to save memory, we get augmented seg logit inplace
seg_logit = self.inference(imgs[0], img_metas[0], rescale)
for i in range(1, len(imgs)):
cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale)
seg_logit += cur_seg_logit
seg_logit /= len(imgs)
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
| 1.96875 | 2 |
packages/reporting-server/rest_server/repositories/report/test_fleet_state.py | baviera08/romi-dashboard | 23 | 12771532 | # conflicts with isort because of local non-relative import
# pylint: disable=wrong-import-order
import unittest
from fastapi.testclient import TestClient
from models.tortoise_models.fleet import Fleet, Robot
from models.tortoise_models.fleet_state import FleetState, RobotStateEnum
from rest_server.app import get_app
from rest_server.repositories.report.fleet_state import get_fleet_state
from rest_server.test_utils import start_test_database
from tortoise import Tortoise
app = get_app()
class TestReportFleetState(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
await start_test_database()
self.client = TestClient(app)
robot = await Robot.create(name="Robot 1")
fleet = await Fleet.create(name="Fleet 1")
await FleetState.create(
fleet=fleet,
robot=robot,
robot_battery_percent="100",
robot_location="1",
robot_mode=RobotStateEnum.MODE_WAITING,
robot_seq=1,
robot_task_id="test",
)
await FleetState.create(
fleet=fleet,
robot=robot,
robot_battery_percent="100",
robot_location="1",
robot_mode=RobotStateEnum.MODE_WAITING,
robot_seq=2,
robot_task_id="test",
)
async def asyncTearDown(self):
await Tortoise.close_connections()
async def test_get_fleet_states(self):
fleet_list = await get_fleet_state(0, 10)
self.assertEqual(len(fleet_list), 2)
| 2.25 | 2 |
train.py | ashok-arjun/simple-super-resolution | 0 | 12771533 | import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from utils import get_most_recent_checkpoint,get_test_set,get_training_set, set_seed
from math import log10
from model.srcnn_upconv7 import Upconv
from model.rdn import RDN
import argparse
import os
from os.path import exists, join, basename
from os import makedirs, remove
import urllib
import tarfile
def download_bsd300(dest):
output_image_dir = join(dest, "BSDS300/images")
if not exists(output_image_dir):
makedirs(dest)
url = "http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz"
print("downloading url ", url)
data = urllib.request.urlopen(url)
file_path = join(dest, basename(url))
with open(file_path, 'wb') as f:
f.write(data.read())
print("Extracting data")
with tarfile.open(file_path) as tar:
for item in tar:
tar.extract(item, dest)
remove(file_path)
else:
print("BSDS300 dataset already exists")
return output_image_dir
'''
Training Settings
'''
def str2bool(v):
return str(v).lower() in ("y", "yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='Pytorch Image/Video Super-Resolution')
parser.add_argument('--upscale_factor',type=int,default=2, help="Super-resolution upscale factor")
parser.add_argument('--datapath',type=str,default="data/", help="Path to Original data")
parser.add_argument('--model',type=str,default="RDN",help="Choose which SR model to use")
parser.add_argument('--threads',type=int,default=4,help='Number of thread for DataLoader')
parser.add_argument('--lr',type=float,default=0.001,help='Learning rate')
parser.add_argument('--nEpochs',type=int,default=1000,help='Number of epochs')
parser.add_argument('--batchSize',type=int,default=8,help='Training batch size')
parser.add_argument('--testBatchSize',type=int,default=4,help='Test batch size')
parser.add_argument('--isCuda',type=str2bool,default=True,help='Cuda Usage')
opt = parser.parse_args()
print(opt)
lr = opt.lr
nEpochs = opt.nEpochs
batchSize = opt.batchSize
testBatchSize = opt.testBatchSize
isCuda = opt.isCuda
set_seed(0)
if isCuda and not torch.cuda.is_available():
raise Exception("No GPU, please change isCuda False")
device = torch.device("cuda" if isCuda else "cpu")
print('===> Loading datasets')
dataset_path = download_bsd300(opt.datapath)
train_set = get_training_set(opt.upscale_factor,dataset_path)
test_set = get_test_set(opt.upscale_factor,dataset_path)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=testBatchSize, shuffle=False)
print('===> Datasets Loading Complete')
print('===> Model Initialize')
if opt.model == "Upconv":
model = Upconv(upscale_factor=opt.upscale_factor).to(device)
os.makedirs('ckpt/Upconv',exist_ok=True)
criterion = model.criterion
optimizer = model.optimizer
#scheduler = model.scheduler
if len(next(os.walk('ckpt/Upconv'))[2]) != 0:
min_iter = 1
last_ckpt, min_iter = get_most_recent_checkpoint('ckpt/Upconv')
model = torch.load(last_ckpt)
else :
min_iter = 1
elif opt.model == "RDN":
model = RDN(channel = 1,growth_rate = 64,rdb_number = 3,upscale_factor=opt.upscale_factor).to(device)
os.makedirs('ckpt/RDN',exist_ok=True)
criterion = model.criterion
optimizer = model.optimizer
scheduler = model.scheduler
min_iter = 1
print('===> Model Initialize Complete')
'''
Model Implementation
elif opt.model == "Model_name":
model = Model_name(upscale_factor=opt.upscale_factor).to(device)
os.makedirs('ckpt/Model_name',exist_ok=True)
criterion = model.criterion
optimizer = model.optimizer
scheduler = model.scheduler
if len(next(os.walk('ckpt/Model_name'))[2]) != 0:
min_iter = 1
last_ckpt, min_iter = get_most_recent_checkpoint('ckpt/Model_name')
model = torch.load(last_ckpt)
else :
min_iter = 1
'''
print('===> Training Initialize')
if torch.cuda.is_available():
cudnn.benchmark = True
criterion.cuda()
print('===> Training Initialize Complete')
def train(epoch):
print('===> Training # %d epoch'%(epoch))
epoch_loss = 0
for iteration, batch in enumerate(training_data_loader, 1):
input, target = batch[0].to(device), batch[1].to(device)
optimizer.zero_grad()
loss = criterion(model(input), target)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
print("===> Epoch[{}]({}/{}): Loss: {:.6f}".format(epoch, iteration, len(training_data_loader), loss.item()))
print("===> Epoch {} Complete: Avg. Loss: {:.6f}".format(epoch, epoch_loss / len(training_data_loader)))
def test():
print('===> Testing # %d epoch'%(epoch))
avg_psnr = 0
with torch.no_grad():
for batch in testing_data_loader:
input, target = batch[0].to(device), batch[1].to(device)
prediction = model(input)
mse = criterion(prediction, target)
psnr = 10 * log10(1 / mse.item())
avg_psnr += psnr
print("===> Avg. PSNR: {:.6f} dB".format(avg_psnr / len(testing_data_loader)))
def checkpoint(epoch):
if opt.model == "Upconv":
model_out_path = "ckpt/" + "Upconv" + "/model_epoch_{}.pth".format(epoch)
elif opt.model == "RDN":
model_out_path = "ckpt/" + "RDN" + "/model_epoch_{}.pth".format(epoch)
'''
Model Implementation
elif opt.model == "Model_Name":
model_out_path = "ckpt/" + "Model_Name" + "/model_epoch_{}.pth".format(epoch)
'''
print(model_out_path)
torch.save(model, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
if __name__ == '__main__':
for epoch in range(min_iter, nEpochs + 1):
print("=====> Training %d epochs"%(epoch))
train(epoch)
print("=====> Training %d epochs completed"%(epoch))
print("=====> Testing %d epochs"%(epoch))
test()
print("=====> Testing %d epochs completed"%(epoch))
print("=====> lr scheduler activated in %d epochs"%(epoch))
scheduler.step(epoch)
print("=====> lr scheduler activated in %d epochs completed"%(epoch))
print("=====> Save checkpoint %d epochs"%(epoch))
checkpoint(epoch)
print("=====> Save checkpoint %d epochs completed"%(epoch))
| 2.4375 | 2 |
plenum/test/cli/mock_output.py | steptan/indy-plenum | 0 | 12771534 | <reponame>steptan/indy-plenum<gh_stars>0
from prompt_toolkit.output import Output
class MockOutput(Output):
def __init__(self, recorder=None):
self.writes = []
self.recorder = recorder
def fileno(self):
raise NotImplementedError
def cursor_up(self, amount):
raise NotImplementedError
def erase_screen(self):
raise NotImplementedError
def hide_cursor(self):
raise NotImplementedError
def set_attributes(self, attrs):
pass
def enable_mouse_support(self):
raise NotImplementedError
def clear_title(self):
raise NotImplementedError
def quit_alternate_screen(self):
raise NotImplementedError
def enable_autowrap(self):
pass
def erase_end_of_line(self):
raise NotImplementedError
def cursor_backward(self, amount):
raise NotImplementedError
def flush(self):
pass
def disable_autowrap(self):
raise NotImplementedError
def erase_down(self):
raise NotImplementedError
def cursor_forward(self, amount):
raise NotImplementedError
def cursor_goto(self, row=0, column=0):
raise NotImplementedError
def disable_mouse_support(self):
raise NotImplementedError
def show_cursor(self):
raise NotImplementedError
def cursor_down(self, amount):
raise NotImplementedError
def enter_alternate_screen(self):
raise NotImplementedError
def set_title(self, title):
raise NotImplementedError
def write_raw(self, data):
raise NotImplementedError
def write(self, data):
self.writes.append(data)
if self.recorder:
self.recorder.write(data)
def reset_attributes(self):
pass
def scroll_buffer_to_prompt(self):
pass
| 2.3125 | 2 |
users/signals.py | swasthikcnayak/sushiksha-website | 0 | 12771535 | <gh_stars>0
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from users.tasks import send_email
from .models import Profile, Pomodoro, Reward
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
Pomodoro.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save()
@receiver(post_save, sender=Reward)
def send_mail(sender, instance, created, **kwargs):
if created:
email = instance.user.email
name = instance.user.profile.name
badge = instance.badges.title
description = instance.description
awarded_by = instance.awarded_by
timestamp = instance.timestamp
image = 'https://sushiksha.konkanischolarship.com' + str(instance.badges.logo.url)
array = [email, timestamp, awarded_by, description, badge, name, image]
send_email.delay(array)
# do not uncomment
# uncomment above line only if you have celery, rabbitmq setup and know the implementation
return True
| 2.078125 | 2 |
rate/urls.py | Kennedy-karuri/Awards | 0 | 12771536 | from django.urls import path,include
from . import views
from rate import views as user_views
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import url
urlpatterns=[
path('',views.home,name = 'home'),
path('accounts/register/', views.register, name='register'),
path('profile/', views.profile,name = 'profile'),
path('update_profile/', user_views.update_profile,name = 'update_profile'),
path('new_project/', views.new_project,name ='new_project'),
path('search/', views.search_results, name = 'search_results'),
url(r'^singleproject/(\d+)',views.single_project,name='singleproject'),
path('rate/<int:id>/',views.rate,name='rates'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | 2.03125 | 2 |
2-resources/python-data-generation/generate-random-data-into-dynamodb.py | eengineergz/Lambda | 0 | 12771537 | import boto.dynamodb2
from boto.dynamodb2.table import Table
from boto.dynamodb2.fields import HashKey
from boto.regioninfo import RegionInfo
from boto.dynamodb2.layer1 import DynamoDBConnection
from faker import Factory
import uuid
import time
try:
sessions = Table(
table_name='usertable',
schema=[HashKey('id')],
connection=DynamoDBConnection(
region=RegionInfo(name='eu-west-1',
endpoint='dynamodb.eu-west-1.amazonaws.com')
))
except:
print("connection not successful")
def create_session():
id = str(uuid.uuid4())
timestamp = time.strftime("%Y%m%d%H%M%S")
ipv4 = Factory.create().ipv4()
users_id = Factory.create().slug()
users_name = Factory.create().first_name()
users_surname = Factory.create().last_name()
res = sessions.put_item(data={
'username': id,
'data': {
'user_id': users_id,
'name' : users_name,
'surname' : users_surname,
'ip': str(ipv4),
'datetime': timestamp
}
})
print('Created: ' + str(res))
if __name__ == '__main__':
for x in range(20):
create_session()
| 2.328125 | 2 |
dfpipeline/DateTransformer.py | IBM/dataframe-pipeline | 2 | 12771538 | ##############################################################################
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import pandas as pd
from . import DFPBase
import numpy as np
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto
class DateTransformer(DFPBase):
"""
Create time features.
Parameters
----------
column : string
Column name holding the time data. Each element of the column must be a string representing a date such as '2018-02-02 18:31' or an int value representing the time in seconds. When the time is represented as seconds, the origin argument needs to be specified to calculate the date from the time data. From this column, the following six features (columns) are created. The names of the created columns have this column name as a prefix.
- MY (months in a year)
- WY (weeks in a year)
- DY (days in a year)
- DM (days in a month)
- DW (days in a week)
- HD (hours in a day)
origin: string (default is 1970-01-01)
An origin of the time to calculate dates. This is needed when a columm has the time values in seconds. This is not needed when a column has the string values representing dates.
Examples:
----------
>>> df = pd.DataFrame({'DT': ['2018-02-02 18:31', '2018-02-03 11:15', '2018-02-03 13:11']})
>>> tf1 = TimeTransformer(datetime='DT')
"""
def __init__(
self,
column=None,
origin=None
):
super().__init__()
self.column = column
self.origin = origin
self.date_fields = ['MY', 'WY', 'DY', 'DM', 'DW', 'HD']
def transform(self, df):
if self.origin is not None:
df[self.column] = pd.to_datetime(df[self.column], origin=self.origin, unit='s')
else:
df[self.column] = pd.to_datetime(df[self.column])
for f in self.date_fields:
output_column = self.column + '_' + f
if f == 'MY':
df[output_column] = df[self.column].dt.month
elif f == 'WY':
df[output_column] = df[self.column].dt.isocalendar().week.astype(np.int64)
elif f == 'DY':
df[output_column] = df[self.column].dt.dayofyear
elif f == 'DM':
df[output_column] = df[self.column].dt.day
elif f == 'DW':
df[output_column] = df[self.column].dt.dayofweek
elif f == 'HD':
df[output_column] = df[self.column].dt.hour
else:
assert False, 'Uknown date field ' + f
return df
def to_onnx_operator(self, graph):
input_tensor = graph.get_current_tensor(self.column)
output_tensors = []
output_tensor_names = []
for f in self.date_fields:
output_column = self.column + '_' + f
output_tensor = graph.get_next_tensor(output_column, TensorProto.INT32)
output_tensors.append(output_tensor)
output_tensor_names.append(output_tensor.name)
kwargs = {}
kwargs['format'] = '%Y-%m-%d'
op = helper.make_node('Date', [input_tensor.name], output_tensor_names, graph.get_node_name('Date'), domain='ai.onnx.ml', **kwargs)
graph.add([input_tensor], output_tensors, [op])
| 2.15625 | 2 |
flyingsim/data/airportrouteloader.py | mavrakis/flyingsim | 0 | 12771539 | <reponame>mavrakis/flyingsim
from google.appengine.ext import bulkload
from google.appengine.api import datastore_types
from google.appengine.ext import search
from google.appengine.ext import db
from google.appengine.api import datastore_entities
from google.appengine.api import datastore
from google.appengine.api.datastore_errors import NeedIndexError
import logging
from flyingsim import models
class AirportRouteLoader(bulkload.Loader):
def __init__(self):
# Our 'Person' entity contains a name string and an email
logging.error("init self")
self.sequence_nr=1
bTry=True
while bTry:
try:
last = models.AirportRoute.all().order('-sequence_nr').fetch(1)
bTry=False
if last:
self.sequence_nr=last[0].sequence_nr
except OSError:
logging.error("Got OS error")
except NeedIndexError:
bTry=False
bulkload.Loader.__init__(self, 'AirportRoute',
[('icao_from', str),
('to_icao_id',str),
])
def HandleEntity(self, entity):
logging.error ("HandleEntity %s " ,entity)
to_icao_id=entity['to_icao_id']
if to_icao_id:
bTry=True
while bTry:
try:
airportObj = models.Airport.gql("Where icao_id=:1",to_icao_id).get()
bTry=False
except OSError:
logging.error("Got OS error")
#Add the key
entity['airport_to']=airportObj.key()
del entity['to_icao_id']
entity['sequence_nr']=self.sequence_nr
self.sequence_nr= self.sequence_nr +1
logging.error ("entity right before return %s " ,entity)
return entity
if __name__ == '__main__':
bulkload.main(AirportRouteLoader()) | 2.140625 | 2 |
frets.py | mariuszlitwin/frets | 0 | 12771540 | <gh_stars>0
#!/usr/bin/python3
from colorama import Fore, Back
class frets:
tuning = list()
max_string_name_len = 0;
frets_count = 0;
strings = dict()
NOTES = ('E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B', 'C', 'C#', 'D', 'D#')
def __init__(self,
tuning=('E', 'A', 'D', 'G'),
frets_count=24):
self.tuning = tuning
self.frets_count = frets_count
for string in tuning:
if len(string) > self.max_string_name_len:
self.max_string_name_len = len(string)
padding_count = 0;
padding = ''
self.strings[string] = list()
starting_note = self.NOTES.index(string) + 1
for i in range(frets_count):
padding = '^' * int(((starting_note + i) / len(self.NOTES)))
self.strings[string].append(self.NOTES[(starting_note + i) % len(self.NOTES)] + padding)
#print('{}{} ({}) = {}'.format(string,
# i,
# int(((starting_note + i) / len(self.NOTES))),
# self.NOTES[(starting_note + i) % len(self.NOTES)] + padding))
def debug_strings(self):
print(self.strings)
def show_me_plz(self,
seek_note=None,
seek_string=None):
if (seek_string):
seek_note = self.strings[seek_string[0]][int(seek_string[1]) - 1]
upper_seek_note = None
lower_seek_note = None
if seek_note and seek_note.endswith('^'):
lower_seek_note = seek_note[0:-1]
if seek_note:
upper_seek_note = seek_note + '^'
upper_found_position = list()
found_position = list()
lower_found_position = list()
print(Fore.WHITE + \
' ' * (self.max_string_name_len + 2),
end='')
for fret_nr in range(1, self.frets_count + 1):
print(Fore.WHITE + \
(' ' * (4 - len(str(fret_nr)))) + str(fret_nr),
end='')
print(Fore.YELLOW + '|', end='')
print('')
for string in reversed(self.tuning):
color = Fore.WHITE + Back.BLACK
if string == seek_note:
color = Fore.WHITE + Back.RED
found_position.append(string + "0")
elif string == upper_seek_note:
color = Fore.WHITE + Back.CYAN
upper_found_position.append(string + "0")
elif string == lower_seek_note:
color = Fore.WHITE + Back.MAGENTA
lower_found_position.append(string + "0")
print(color + \
(' ' * (self.max_string_name_len - len(string))) + \
string, end='')
print(Fore.YELLOW + '||', end='')
fret_nr = 1
for note in self.strings[string]:
color = Fore.WHITE + Back.BLACK
if note == seek_note:
color = Fore.WHITE + Back.RED
found_position.append(string + str(fret_nr))
elif note == upper_seek_note:
color = Fore.WHITE + Back.CYAN
upper_found_position.append(string + str(fret_nr))
elif note == lower_seek_note:
color = Fore.WHITE + Back.MAGENTA
lower_found_position.append(string + str(fret_nr))
print(color + \
note[0:4] + \
'-' * (4 - len(note)), end='')
print(Fore.YELLOW + Back.BLACK + '|', end='')
fret_nr += 1
print(Fore.WHITE + Back.BLACK + '')
print(Fore.WHITE + '\n')
print(Back.CYAN + ' ' + Back.BLACK + \
' Found octave-higher note {} on: {}'.format(upper_seek_note,
upper_found_position))
print(Back.RED + ' ' + Back.BLACK + \
' Found note {} on: {}'.format(seek_note,
found_position))
print(Fore.WHITE + \
Back.MAGENTA + ' ' + Back.BLACK + \
' Found octave-lower note {} on: {}'.format(lower_seek_note,
lower_found_position))
| 2.8125 | 3 |
DeepSentiment/GetSentiment.py | AbhinavBhatnagar/DeepSentiment | 1 | 12771541 | <gh_stars>1-10
import requests
import json
import subprocess
import os
class DeepSentiment:
def __init__(self):
self.parameter = []
self.text = ""
self.sentiment = ""
self.sentiment_score = 0
self.proc = None
def run_server(self):
call_ = ['nohup','java', '-jar', str(os.getcwd()) +"/DeepSentiment/resources/deepsentiment.jar"]
self.proc = subprocess.Popen(call_)
def get_text(self, text):
self.text = text
return self.text
def deepsentiment(self, text):
self.parameter = [('text',text)]
response = requests.get("http://127.0.0.1:9000/", params=self.parameter)
jresponse = json.loads(response.text)
return jresponse
def get_sentiment(self, jresponse):
self.sentiment = jresponse["sentiment"]
return self.sentiment
def get_sentiment_score(self, jresponse):
self.sentiment_score = jresponse["sentimentscore"]
return self.sentiment_score
def stop_server(self):
self.proc.terminate()
| 2.546875 | 3 |
Python/kraken/core/objects/operators/kl_operator.py | goshow-jp/Kraken | 0 | 12771542 | """Kraken - objects.operators.kl_operator module.
Classes:
KLOperator - Splice operator object.
"""
import pprint
import re
from kraken.core.maths import MathObject, Mat44, Xfo, Vec2, Vec3
from kraken.core.objects.object_3d import Object3D
from kraken.core.objects.operators.operator import Operator
from kraken.core.objects.attributes.attribute import Attribute
from kraken.core.kraken_system import ks
from kraken.log import getLogger
logger = getLogger('kraken')
class KLOperator(Operator):
"""KL Operator representation."""
def __init__(self, name, solverTypeName, extension):
super(KLOperator, self).__init__(name)
self.solverTypeName = solverTypeName
self.extension = extension
# Load the Fabric Engine client and construct the RTVal for the Solver
ks.loadCoreClient()
ks.loadExtension('Kraken')
if self.extension != 'Kraken':
ks.loadExtension(self.extension)
self.solverRTVal = ks.constructRTVal(self.solverTypeName)
# logger.debug("Creating kl operator object [%s] of type [%s] from extension [%s]:" % (self.getName(), self.solverTypeName, self.extension))
self.args = self.solverRTVal.getArguments('KrakenSolverArg[]')
# Initialize the inputs and outputs based on the given args.
for i in xrange(len(self.args)):
arg = self.args[i]
argName = arg.name.getSimpleType()
argDataType = arg.dataType.getSimpleType()
argConnectionType = arg.connectionType.getSimpleType()
# Note, do not create empty arrays here as we need to know later whether or not
# to create default values if input/output is None
if argConnectionType == 'In':
self.inputs[argName] = None
else:
self.outputs[argName] = None
def getSolverTypeName(self):
"""Returns the solver type name for this operator.
Returns:
str: Name of the solver type this operator uses.
"""
return self.solverTypeName
def getExtension(self):
"""Returns the extention this operator uses.
Returns:
str: Name of the extension this solver uses.
"""
return self.extension
def getSolverArgs(self):
"""Returns the args array defined by the KL Operator.
Returns:
RTValArray: Args array defined by the KL Operator.
"""
return self.args
def getInputType(self, name):
"""Returns the type of input with the specified name."""
for arg in self.args:
if arg.connectionType.getSimpleType() == "In" and arg.name.getSimpleType() == name:
return arg.dataType.getSimpleType()
raise Exception("Could not find input argument %s in kl operator %s" % (name, self.getName()))
def getOutputType(self, name):
"""Returns the type of output with the specified name."""
for arg in self.args:
if arg.connectionType.getSimpleType() == "Out" and arg.name.getSimpleType() == name:
return arg.dataType.getSimpleType()
raise Exception("Could not find output argument %s in kl operator %s" % (name, self.getName()))
def getDefaultValue(self, name, RTValDataType, mode="arg"):
"""Returns the default RTVal value for this argument
Only print debug if setting default inputs. Don't care about outputs, really
Args:
name (str): Name of the input to get.
mode (str): "inputs" or "outputs"
Returns:
RTVal
"""
def isFixedArrayType(string):
return bool(re.search(r'\[\d', string))
# If attribute has a default value
if self.solverRTVal.defaultValues.has("Boolean", name).getSimpleType():
RTVal = ks.convertFromRTVal(self.solverRTVal.defaultValues[name])
if RTVal.isArray():
# If RTValDataType is variable array, but default value is fixed array, convert it
if isFixedArrayType(RTVal.getTypeName().getSimpleType()) and not isFixedArrayType(RTValDataType):
RTValArray = ks.rtVal(RTValDataType)
if len(RTVal):
RTValArray.resize(len(RTVal))
for i in range(len(RTVal)):
RTValArray[i] = RTVal[i]
RTVal = RTValArray
else:
# Not totally sure why we need to do this, but we get None from getSimpleType from the RTVal
# when we run it on it's own and use the type that we query. Gotta investigate this further...
RTVal = ks.convertFromRTVal(self.solverRTVal.defaultValues[name], RTTypeName=RTValDataType)
logger.debug("Using default value for %s.%s.%s(%s) --> %s" % (self.solverTypeName, self.getName(), mode, name, RTVal))
return RTVal
else:
if True: #mode == "arg": #Only report a warning if default value is not provided for arg
logger.warn("No default value for %s.%s.%s[%s]." % (self.solverTypeName, self.getName(), mode, name))
defaultValue = ks.rtVal(RTValDataType)
if True: #mode == "arg":
logger.warn(" Creating default value by generating new RTVal object of type: %s. You should set default values for %s.%s(%s) in your KL Operator." %
(RTValDataType, self.solverTypeName, mode, name,))
return defaultValue
def getInput(self, name):
"""Returns the input with the specified name.
If there is no input value, it get the default RTVal and converts to
python data
Args:
name (str): Name of the input to get.
Returns:
object: Input object.
"""
if name in self.inputs and self.inputs[name] is not None:
return self.inputs[name]
def rt2Py(rtVal, rtType):
if "[" in rtType:
return []
if rtType == "Xfo":
return Xfo(rtVal)
if rtType == "Mat44":
return Mat44(rtVal)
if rtType == "Vec2":
return Vec2(rtVal)
if rtType == "Vec3":
return Vec3(rtVal)
else:
return rtVal.getSimpleType()
#raise ValueError("Cannot convert rtval %s from %s" (rtVal, rtType))
argDataType = None
for arg in self.args:
if arg.name.getSimpleType() == name:
argDataType = arg.dataType.getSimpleType()
break
if argDataType is None:
raise Exception("Cannot find arg %s for object %s" (arg, self.getName()))
defaultVal = self.getDefaultValue(name, argDataType, mode="arg")
pyVal = rt2Py(defaultVal, argDataType)
return pyVal
def generateSourceCode(self):
"""Returns the source code for a stub operator that will invoke the KL operator
Returns:
str: The source code for the stub operator.
"""
# Start constructing the source code.
opSourceCode = "dfgEntry {\n"
# In SpliceMaya, output arrays are not resized by the system prior to
# calling into Splice, so we explicily resize the arrays in the
# generated operator stub code.
for i in xrange(len(self.args)):
arg = self.args[i]
argName = arg.name.getSimpleType()
argDataType = arg.dataType.getSimpleType()
argConnectionType = arg.connectionType.getSimpleType()
if argDataType.endswith('[]') and argConnectionType == 'Out':
arraySize = len(self.getOutput(argName))
opSourceCode += " " + argName + ".resize(" + str(arraySize) + \
");\n"
# guard
if argDataType.endswith('[]') and argConnectionType == 'In':
arraySize = len(self.getInput(argName))
opSourceCode += " if({}.size() != {}){{\n".format(argName, str(arraySize))
opSourceCode += " return;\n"
opSourceCode += " }\n"
opSourceCode += " if(solver == null)\n"
opSourceCode += " solver = " + self.solverTypeName + "();\n"
opSourceCode += " solver.solve(\n"
for i in xrange(len(self.args)):
argName = self.args[i].name.getSimpleType()
if i == len(self.args) - 1:
opSourceCode += " " + argName + "\n"
else:
opSourceCode += " " + argName + ",\n"
opSourceCode += " );\n"
opSourceCode += "}\n"
return opSourceCode
def evaluate(self):
"""Invokes the KL operator causing the output values to be computed.
Returns:
bool: True if successful.
"""
# logger.debug("\nEvaluating kl operator [%s] of type [%s] from extension [%s]..." % (self.getName(), self.solverTypeName, self.extension))
super(KLOperator, self).evaluate()
def getRTVal(obj, asInput=True):
if isinstance(obj, Object3D):
if asInput:
return obj.globalXfo.getRTVal().toMat44('Mat44')
else:
return obj.xfo.getRTVal().toMat44('Mat44')
elif isinstance(obj, Xfo):
return obj.getRTVal().toMat44('Mat44')
elif isinstance(obj, MathObject):
return obj.getRTVal()
elif isinstance(obj, Attribute):
return obj.getRTVal()
elif type(obj) is bool:
return ks.rtVal('Boolean', obj)
elif type(obj) is int:
return ks.rtVal('Integer', obj)
elif type(obj) is float:
return ks.rtVal('Scalar', obj)
elif type(obj) is str:
return ks.rtVal('String', obj)
else:
return obj #
def validateArg(rtVal, argName, argDataType):
"""Validate argument types when passing built in Python types.
Args:
rtVal (RTVal): rtValue object.
argName (str): Name of the argument being validated.
argDataType (str): Type of the argument being validated.
"""
# Validate types when passing a built in Python type
if type(rtVal) in (bool, str, int, float):
if argDataType in ('Scalar', 'Float32', 'UInt32', 'Integer'):
if type(rtVal) not in (float, int):
raise TypeError(self.getName() + ".evaluate(): Invalid Argument Value: " + str(rtVal) + " (" + type(rtVal).__name__ + "), for Argument: " + argName + " (" + argDataType + ")")
elif argDataType == 'Boolean':
if type(rtVal) != bool:
raise TypeError(self.getName() + ".evaluate(): Invalid Argument Value: " + str(rtVal) + " (" + type(rtVal).__name__ + "), for Argument: " + argName + " (" + argDataType + ")")
elif argDataType == 'String':
if type(rtVal) != str:
raise TypeError(self.getName() + ".evaluate(): Invalid Argument Value: " + str(rtVal) + " (" + type(rtVal).__name__ + "), for Argument: " + argName + " (" + argDataType + ")")
argVals = []
debug = []
for i in xrange(len(self.args)):
arg = self.args[i]
argName = arg.name.getSimpleType()
argDataType = arg.dataType.getSimpleType()
argConnectionType = arg.connectionType.getSimpleType()
if argDataType == 'EvalContext':
argVals.append(ks.constructRTVal(argDataType))
continue
if argName == 'time':
argVals.append(ks.constructRTVal(argDataType))
continue
if argName == 'frame':
argVals.append(ks.constructRTVal(argDataType))
continue
if argConnectionType == 'In':
if str(argDataType).endswith('[]'):
if argName in self.inputs and self.inputs[argName] is not None:
rtValArray = ks.rtVal(argDataType)
rtValArray.resize(len(self.inputs[argName]))
for j in xrange(len(self.inputs[argName])):
if self.inputs[argName][j] is None:
continue
rtVal = getRTVal(self.inputs[argName][j])
validateArg(rtVal, argName, argDataType[:-2])
rtValArray[j] = rtVal
else:
rtValArray = self.getDefaultValue(argName, argDataType, mode="arg")
argVals.append(rtValArray)
else:
if argName in self.inputs and self.inputs[argName] is not None:
rtVal = getRTVal(self.inputs[argName])
else:
rtVal = self.getDefaultValue(argName, argDataType, mode="arg")
validateArg(rtVal, argName, argDataType)
argVals.append(rtVal)
elif argConnectionType in ('IO', 'Out'):
if str(argDataType).endswith('[]'):
if argName in self.outputs and self.outputs[argName] is not None:
rtValArray = ks.rtVal(argDataType)
rtValArray.resize(len(self.outputs[argName]))
for j in xrange(len(self.outputs[argName])):
if self.outputs[argName][j] is None:
continue
rtVal = getRTVal(self.outputs[argName][j], asInput=False)
validateArg(rtVal, argName, argDataType[:-2])
rtValArray[j] = rtVal
else:
rtValArray = self.getDefaultValue(argName, argDataType, mode="output")
argVals.append(rtValArray)
else:
if argName in self.outputs and self.outputs[argName] is not None:
rtVal = getRTVal(self.outputs[argName], asInput=False)
else:
rtVal = self.getDefaultValue(argName, argDataType, mode="output")
validateArg(rtVal, argName, argDataType)
argVals.append(rtVal)
else:
raise Exception("Operator:'" + self.getName() + " has an invalid 'argConnectionType': " + argConnectionType)
debug.append(
{
argName: [
{
"dataType": argDataType,
"connectionType": argConnectionType
},
argVals[-1]
]
})
try:
# argstr = [str(arg) for arg in argVals]
# logger.debug("%s.solve('', %s)" % (self.solverTypeName, ", ".join(argstr)))
self.solverRTVal.solve('', *argVals)
except Exception as e:
errorMsg = "\nPossible problem with KL operator [%s]. Arguments:\n" % self.getName()
errorMsg += pprint.pformat(debug, indent=4, width=800)
logger.error(errorMsg)
raise e
# Now put the computed values out to the connected output objects.
def setRTVal(obj, rtval):
if isinstance(obj, Object3D):
obj.xfo.setFromMat44(Mat44(rtval))
elif isinstance(obj, Xfo):
obj.setFromMat44(Mat44(rtval))
elif isinstance(obj, Mat44):
obj.setFromMat44(rtval)
elif isinstance(obj, Attribute):
if ks.isRTVal(rtval):
obj.setValue(rtval.getSimpleType())
else:
obj.setValue(rtval)
else:
if hasattr(obj, '__iter__'):
logger.warning("Warning: Trying to set a KL port with an array directly.")
logger.warning("Not setting rtval: %s\n\tfor output object: %s\n\tof KL object: %s\n." % \
(rtval, obj.getName(), self.getName()))
for i in xrange(len(argVals)):
arg = self.args[i]
argName = arg.name.getSimpleType()
argDataType = arg.dataType.getSimpleType()
argConnectionType = arg.connectionType.getSimpleType()
if argConnectionType != 'In':
if argName in self.outputs and self.outputs[argName] is not None:
if str(argDataType).endswith('[]'):
for j in xrange(len(argVals[i])):
if len(self.outputs[argName]) > j and self.outputs[argName][j] is not None:
setRTVal(self.outputs[argName][j], argVals[i][j])
else:
setRTVal(self.outputs[argName], argVals[i])
return True
| 2.234375 | 2 |
model/CNN_model/train_script.py | nicolepanek/Thermophile_classification | 0 | 12771543 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import argparse
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import os
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
import thermodrift_model
def load_data():
# Load data
X = torch.load('/gscratch/stf/jgershon/tensor_x.pt')
Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt')
return X, Y
def split_data(X, Y):
if 'X_train.pt' not in os.listdir('/gscratch/stf/jgershon/'):
# Convert y back from one hot encoding
Y = torch.argmax(Y, dim=1)
print('new Y: ', Y[:10])
print('X load: ', X.size())
print('Y load: ', Y.size())
# Split data tensors into dev and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.20, random_state=42)
print('X_train: ', X_train.size())
print('X_test: ', X_test.size())
print('y_train: ', y_train.size())
print('y_test: ', y_test.size())
torch.save(X_train, '/gscratch/stf/jgershon/X_train.pt')
torch.save(X_test, '/gscratch/stf/jgershon/X_test.pt')
torch.save(y_train, '/gscratch/stf/jgershon/y_train.pt')
torch.save(y_test, '/gscratch/stf/jgershon/y_test.pt')
else:
X_train = torch.load('/gscratch/stf/jgershon/X_train.pt')
X_test = torch.load('/gscratch/stf/jgershon/X_test.pt')
y_train = torch.load('/gscratch/stf/jgershon/y_train.pt')
y_test = torch.load('/gscratch/stf/jgershon/y_test.pt')
return X_train, X_test, y_train, y_test
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-indir', type=str, required=False, default=None)
parser.add_argument('-outdir', type=str, required=True, default=None)
args = parser.parse_args()
return args
args = get_args()
indir = args.indir
outdir = args.outdir
# Loading and processing the data:
X, Y = load_data()
X_train, X_test, y_train, y_test = split_data(X, Y)
# Do we need to normalize the one hot encoded tensors? Prob not.
# Generate train and test datasets
trainset = TensorDataset(X_train, y_train)
testset = TensorDataset(X_test, y_test)
# Prepare train and test loaders
train_loader = torch.utils.data.DataLoader(trainset,
batch_size=100,
shuffle=True,
num_workers=2)
test_loader = torch.utils.data.DataLoader(testset,
batch_size=100,
shuffle=True,
num_workers=2)
# Instantiate the network
model = thermodrift_model.Net()
# Load model from previous state if indir arg is specified
if indir is not None:
if len(indir) > 0:
model.load_state_dict(torch.load(indir))
model.eval()
print('Model loaded from: ', indir)
# Instantiate the cross-entropy loss
criterion = nn.CrossEntropyLoss()
# Instantiate the Adam optimizer
optimizer = optim.Adam(model.parameters(),
lr=3e-4,
weight_decay=0.001)
# Moving tensors over to gpu if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device chosen: ', device)
X_train = X_train.to(device)
X_test = X_test.to(device)
y_train = y_train.to(device)
y_test = y_test.to(device)
model = model.to(device)
# batch_size, epoch and iteration
batch_size = 100
features_train = X.size()[0]
n_iters = 100000
num_epochs = int(n_iters/(features_train/batch_size))
num_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Number of parameters: ', num_parameters)
# CNN model training
count = 0
loss_list = []
iteration_list = []
accuracy_list = []
output_dict = {}
# Number of iterations between validation cycles
n_run_valid = 500
for epoch in range(num_epochs):
for i, data in enumerate(train_loader, 0):
train, labels = data
# Clear gradients
optimizer.zero_grad()
# Forward propagation
outputs = model(train.unsqueeze(1))
# Calculate relu and cross entropy loss
loss = criterion(outputs, labels)
# Calculating gradients
loss.backward()
# Update weights
optimizer.step()
count += 1
print('Train - example: '+str(i)+' loss: '+str(float(loss.data)))
if count % n_run_valid == 0:
# Calculate Accuracy
correct = 0
total = 0
valid_loss = 0
# Iterate through test dataset
for j, data in enumerate(test_loader, 0):
test, labels = data
# Forward propagation
outputs = model(test.unsqueeze(1))
loss_valid = criterion(outputs, labels)
# Get predictions from the maximum value
predicted = torch.max(outputs.data, 1)[1]
# Total number of labels
total += len(labels)
correct += (predicted == labels).sum()
valid_loss += float(loss_valid.data)
#print('valid_loss: ', valid_loss)
accuracy = 100 * correct / float(total)
print('Valid - iter: '+str(count/n_run_valid) +
' loss: '+str(float(valid_loss/(j+1))))
if count % 500 == 0:
# Print Loss
print('Iteration: {} Train Loss: {} Test Accuracy: {} %'.format(
count, loss.data, accuracy))
path = outdir+'save_model/model_'+str(count)+'.pt'
torch.save(model.state_dict(), path)
print('Model '+str(count)+' was saved.')
| 2.53125 | 3 |
cm_custom/api/item.py | libermatic/cm_custom | 1 | 12771544 | # -*- coding: utf-8 -*-
import frappe
from toolz.curried import (
compose,
merge,
unique,
concat,
valmap,
groupby,
first,
excepts,
keyfilter,
map,
filter,
)
import html
from erpnext.portal.product_configurator.utils import (
get_products_for_website,
get_product_settings,
get_item_codes_by_attributes,
get_conditions,
)
from erpnext.shopping_cart.product_info import get_product_info_for_website
from erpnext.accounts.doctype.sales_invoice.pos import get_child_nodes
from erpnext.utilities.product import get_price, get_qty_in_stock
from cm_custom.api.utils import handle_error, transform_route
@frappe.whitelist(allow_guest=True)
@handle_error
def get_list(page="1", field_filters=None, attribute_filters=None, search=None):
other_fieldnames = ["item_group", "thumbnail", "has_variants"]
price_list = frappe.db.get_single_value("Shopping Cart Settings", "price_list")
products_settings = get_product_settings()
products_per_page = products_settings.products_per_page
get_other_fields = compose(
valmap(excepts(StopIteration, first, lambda _: {})),
groupby("name"),
lambda item_codes: frappe.db.sql(
"""
SELECT name, {other_fieldnames}
FROM `tabItem`
WHERE name IN %(item_codes)s
""".format(
other_fieldnames=", ".join(other_fieldnames)
),
values={"item_codes": item_codes},
as_dict=1,
),
lambda items: [x.get("name") for x in items],
)
frappe.form_dict.start = (frappe.utils.cint(page) - 1) * products_per_page
kwargs = _get_args(field_filters, attribute_filters, search)
items = get_products_for_website(**kwargs)
other_fields = get_other_fields(items) if items else {}
item_prices = _get_item_prices(price_list, items) if items else {}
get_rates = _rate_getter(price_list, item_prices)
stock_qtys_by_item = _get_stock_by_item(items) if items else {}
return [
merge(
x,
get_rates(x.get("name")),
{k: other_fields.get(x.get("name"), {}).get(k) for k in other_fieldnames},
{
"route": transform_route(x),
"description": frappe.utils.strip_html_tags(x.get("description") or ""),
"stock_qty": stock_qtys_by_item.get(x.get("name"), 0),
},
)
for x in items
]
@frappe.whitelist(allow_guest=True)
@handle_error
def get_count(field_filters=None, attribute_filters=None, search=None):
products_settings = get_product_settings()
products_per_page = products_settings.products_per_page
def get_pages(count):
return frappe.utils.ceil(count / products_per_page)
kwargs = _get_args(field_filters, attribute_filters, search)
def get_field_filters():
if not field_filters:
return []
meta = frappe.get_meta("Item")
def get_filter(fieldname, values):
df = meta.get_field(fieldname)
if df.fieldtype == "Table MultiSelect":
child_meta = frappe.get_meta(df.options)
fields = child_meta.get(
"fields", {"fieldtype": "Link", "in_list_view": 1}
)
if fields:
return [df.options, fields[0].fieldname, "in", values]
return ["Item", fieldname, "in", values]
return [get_filter(k, v) for k, v in kwargs.get("field_filters").items() if v]
def get_attribute_conditions():
if not attribute_filters:
return None
return get_conditions(
[
[
"Item",
"name",
"in",
get_item_codes_by_attributes(kwargs.get("attribute_filters")),
]
]
)
def get_default_conditions():
return get_conditions([["Item", "disabled", "=", 0]])
def get_variant_conditions():
if products_settings.hide_variants:
return get_conditions([["Item", "show_in_website", "=", 1]])
return get_conditions(
[
["Item", "show_in_website", "=", 1],
["Item", "show_variant_in_website", "=", 1],
],
"or",
)
def get_search_conditions():
if not search:
return None
meta = frappe.get_meta("Item")
search_fields = set(
meta.get_search_fields(),
["name", "item_name", "description", "item_group"],
)
return get_conditions(
[["Item", field, "like", "%(search)s"] for field in search_fields], "or"
)
_field_filters = get_field_filters()
conditions = " and ".join(
[
c
for c in [
get_attribute_conditions(),
get_conditions(_field_filters, "and"),
get_default_conditions(),
get_variant_conditions(),
get_search_conditions(),
]
if c
]
)
left_joins = " ".join(
[
"LEFT JOIN `tab{0}` ON `tab{}`.parent = `tabItem`.name".format(f[0])
for f in _field_filters
if f[0] != "Item"
]
)
count = frappe.db.sql(
"""
SELECT COUNT(`tabItem`.name) FROM `tabItem` {left_joins}
WHERE {conditions}
""".format(
left_joins=left_joins, conditions=conditions
)
)[0][0]
return {"count": count, "pages": get_pages(count)}
@frappe.whitelist(allow_guest=True)
@handle_error
def get(name=None, route=None):
item_code = _get_name(name, route)
if not item_code:
frappe.throw(frappe._("Item does not exist at this route"))
doc = frappe.get_cached_value(
"Item",
item_code,
fieldname=[
"name",
"item_name",
"item_group",
"has_variants",
"description",
"web_long_description",
"image",
"website_image",
],
as_dict=1,
)
price_list = frappe.get_cached_value("Shopping Cart Settings", None, "price_list")
item_prices = _get_item_prices(price_list, [doc])
get_rate = _rate_getter(price_list, item_prices)
return merge({"route": route}, doc, get_rate(doc.get("name")))
@frappe.whitelist(allow_guest=True)
@handle_error
def get_product_info(name=None, item_code=None, route=None, token=None):
# todo: first set user from token
frappe.set_user(
frappe.get_cached_value("Ahong eCommerce Settings", None, "webapp_user")
)
item_code = item_code or _get_name(name, route)
if not item_code:
frappe.throw(frappe._("Item does not exist at this route"))
item_for_website = get_product_info_for_website(
item_code, skip_quotation_creation=True
)
stock_qtys_by_item = _get_stock_by_item([{"name": item_code}])
return {
"price": keyfilter(
lambda x: x in ["currency", "price_list_rate"],
item_for_website.get("product_info", {}).get("price", {}),
),
"stock_qty": stock_qtys_by_item.get(item_code, 0),
}
@frappe.whitelist(allow_guest=True)
@handle_error
def get_media(name=None, route=None):
item_code = _get_name(name, route)
def get_values(name):
return frappe.get_cached_value(
"Item",
name,
["thumbnail", "image", "website_image", "slideshow"],
as_dict=1,
)
def get_slideshows(slideshow):
if not slideshow:
return None
doc = frappe.get_cached_doc("Website Slideshow", slideshow)
if not doc:
return None
return [x.get("image") for x in doc.slideshow_items if x.get("image")]
variant_of = frappe.get_cached_value("Item", item_code, "variant_of")
images = get_values(item_code)
template_images = get_values(variant_of) if variant_of else {}
def get_image(field):
return images.get(field) or template_images.get(field)
return {
"thumbnail": get_image("thumbnail"),
"image": get_image("image"),
"website_image": get_image("website_image"),
"slideshow": get_slideshows(get_image("slideshow")),
}
@frappe.whitelist(allow_guest=True)
@handle_error
def get_related_items(name=None, route=None):
item_code = _get_name(name, route)
if not item_code:
frappe.throw(frappe._("Item does not exist at this route"))
item_group = frappe.get_cached_value("Item", item_code, "item_group")
result = get_list(field_filters={"item_group": [item_group]})
return [x for x in result if x.get("name") != item_code]
def _get_name(name=None, route=None):
if name:
return html.unescape(name)
if route:
return frappe.db.exists("Item", {"route": (route or "").replace("__", "/")})
return None
_get_item_prices = compose(
valmap(excepts(StopIteration, first, lambda _: {})),
groupby("item_code"),
lambda price_list, items: frappe.db.sql(
"""
SELECT item_code, price_list_rate
FROM `tabItem Price`
WHERE price_list = %(price_list)s AND item_code IN %(item_codes)s
""",
values={"price_list": price_list, "item_codes": [x.get("name") for x in items]},
as_dict=1,
)
if price_list
else {},
)
def _rate_getter(price_list, item_prices):
def fn(item_code):
price_obj = (
get_price(
item_code,
price_list,
customer_group=frappe.get_cached_value(
"Selling Settings", None, "customer_group"
),
company=frappe.defaults.get_global_default("company"),
)
or {}
)
price_list_rate = item_prices.get(item_code, {}).get("price_list_rate")
item_price = price_obj.get("price_list_rate") or price_list_rate
return {
"price_list_rate": item_price,
"slashed_rate": price_list_rate if price_list_rate != item_price else None,
}
return fn
def _get_args(field_filters=None, attribute_filters=None, search=None):
get_item_groups = compose(
list,
unique,
map(lambda x: x.get("name")),
concat,
map(lambda x: get_child_nodes("Item Group", x) if x else []),
)
field_dict = (
frappe.parse_json(field_filters)
if isinstance(field_filters, str)
else field_filters
) or {}
item_groups = (
get_item_groups(field_dict.get("item_group"))
if field_dict.get("item_group")
else None
)
return {
"field_filters": merge(
field_dict, {"item_group": item_groups} if item_groups else {}
),
"attribute_filters": frappe.parse_json(attribute_filters),
"search": search,
}
@frappe.whitelist(allow_guest=True)
@handle_error
def get_recent_items():
price_list = frappe.db.get_single_value("Shopping Cart Settings", "price_list")
products_per_page = frappe.db.get_single_value(
"Products Settings", "products_per_page"
)
items = frappe.db.sql(
"""
SELECT
name, item_name, item_group, route, has_variants,
thumbnail, image, website_image,
description, web_long_description
FROM `tabItem`
WHERE show_in_website = 1
ORDER BY modified DESC
LIMIT %(products_per_page)s
""",
values={"products_per_page": products_per_page},
as_dict=1,
)
item_prices = _get_item_prices(price_list, items) if items else {}
get_rates = _rate_getter(price_list, item_prices)
stock_qtys_by_item = _get_stock_by_item(items) if items else {}
return [
merge(
x,
get_rates(x.get("name")),
{
"route": transform_route(x),
"description": frappe.utils.strip_html_tags(x.get("description") or ""),
"stock_qty": stock_qtys_by_item.get(x.get("name"), 0),
},
)
for x in items
]
@frappe.whitelist(allow_guest=True)
@handle_error
def get_featured_items():
homepage = frappe.get_single("Homepage")
if not homepage.products:
return []
price_list = frappe.db.get_single_value("Shopping Cart Settings", "price_list")
items = frappe.db.sql(
"""
SELECT
name, item_name, item_group, route, has_variants,
thumbnail, image, website_image,
description, web_long_description
FROM `tabItem`
WHERE show_in_website = 1 AND name IN %(featured)s
ORDER BY modified DESC
""",
values={"featured": [x.item_code for x in homepage.products]},
as_dict=1,
)
item_prices = _get_item_prices(price_list, items) if items else {}
get_rates = _rate_getter(price_list, item_prices)
stock_qtys_by_item = _get_stock_by_item(items) if items else {}
return [
merge(
x,
get_rates(x.get("name")),
{
"route": transform_route(x),
"description": frappe.utils.strip_html_tags(x.get("description") or ""),
"stock_qty": stock_qtys_by_item.get(x.get("name"), 0),
},
)
for x in items
]
@frappe.whitelist(allow_guest=True)
def get_next_attribute_and_values(item_code, selected_attributes):
from erpnext.portal.product_configurator.utils import get_next_attribute_and_values
session_user = frappe.session.user
webapp_user = frappe.get_cached_value(
"Ahong eCommerce Settings", None, "webapp_user"
)
if not webapp_user:
frappe.throw(frappe._("Site setup not complete"))
frappe.set_user(webapp_user)
result = get_next_attribute_and_values(item_code, selected_attributes)
frappe.set_user(session_user)
return result
def _get_stock_by_item(items):
warehouses = [
x.get("name")
for x in get_child_nodes(
"Warehouse",
frappe.db.get_single_value("Ahong eCommerce Settings", "warehouse"),
)
]
if not warehouses:
return {}
return {
item_code: stock_qty
for item_code, stock_qty in frappe.db.sql(
"""
SELECT b.item_code,
GREATEST(
b.actual_qty - b.reserved_qty - b.reserved_qty_for_production - b.reserved_qty_for_sub_contract,
0
) / IFNULL(C.conversion_factor, 1)
FROM `tabBin` AS b
INNER JOIN `tabItem` AS i
ON b.item_code = i.item_code
LEFT JOIN `tabUOM Conversion Detail` C
ON i.sales_uom = C.uom AND C.parent = i.item_code
WHERE b.item_code IN %(item_codes)s AND b.warehouse in %(warehouses)s
""",
values={
"item_codes": [x.get("name") for x in items],
"warehouses": warehouses,
},
as_list=1,
)
}
| 1.757813 | 2 |
qwilt/qsig/__init__.py | Qwilt/Qsig-Token-Python | 1 | 12771545 | <reponame>Qwilt/Qsig-Token-Python<filename>qwilt/qsig/__init__.py
# -*- coding: utf-8 -*-
from .qsig import Qsig, QsigError
__all__ = ['Qsig', 'QsigError'] | 1.453125 | 1 |
misc/zip/Cura-master/plugins/PostProcessingPlugin/Script.py | criscola/G-Gen | 1 | 12771546 | <filename>misc/zip/Cura-master/plugins/PostProcessingPlugin/Script.py
# Copyright (c) 2015 <NAME>
# Copyright (c) 2017 Ultimaker B.V.
# The PostProcessingPlugin is released under the terms of the AGPLv3 or higher.
from UM.Logger import Logger
from UM.Signal import Signal, signalemitter
from UM.i18n import i18nCatalog
# Setting stuff import
from UM.Application import Application
from UM.Settings.ContainerStack import ContainerStack
from UM.Settings.InstanceContainer import InstanceContainer
from UM.Settings.DefinitionContainer import DefinitionContainer
from UM.Settings.ContainerRegistry import ContainerRegistry
import re
import json
import collections
i18n_catalog = i18nCatalog("cura")
## Base class for scripts. All scripts should inherit the script class.
@signalemitter
class Script:
def __init__(self):
super().__init__()
self._settings = None
self._stack = None
setting_data = self.getSettingData()
self._stack = ContainerStack(stack_id = str(id(self)))
self._stack.setDirty(False) # This stack does not need to be saved.
## Check if the definition of this script already exists. If not, add it to the registry.
if "key" in setting_data:
definitions = ContainerRegistry.getInstance().findDefinitionContainers(id = setting_data["key"])
if definitions:
# Definition was found
self._definition = definitions[0]
else:
self._definition = DefinitionContainer(setting_data["key"])
self._definition.deserialize(json.dumps(setting_data))
ContainerRegistry.getInstance().addContainer(self._definition)
self._stack.addContainer(self._definition)
self._instance = InstanceContainer(container_id="ScriptInstanceContainer")
self._instance.setDefinition(self._definition.getId())
self._instance.addMetaDataEntry("setting_version", self._definition.getMetaDataEntry("setting_version", default = 0))
self._stack.addContainer(self._instance)
self._stack.propertyChanged.connect(self._onPropertyChanged)
ContainerRegistry.getInstance().addContainer(self._stack)
settingsLoaded = Signal()
valueChanged = Signal() # Signal emitted whenever a value of a setting is changed
def _onPropertyChanged(self, key, property_name):
if property_name == "value":
self.valueChanged.emit()
# Property changed: trigger reslice
# To do this we use the global container stack propertyChanged.
# Reslicing is necessary for setting changes in this plugin, because the changes
# are applied only once per "fresh" gcode
global_container_stack = Application.getInstance().getGlobalContainerStack()
global_container_stack.propertyChanged.emit(key, property_name)
## Needs to return a dict that can be used to construct a settingcategory file.
# See the example script for an example.
# It follows the same style / guides as the Uranium settings.
# Scripts can either override getSettingData directly, or use getSettingDataString
# to return a string that will be parsed as json. The latter has the benefit over
# returning a dict in that the order of settings is maintained.
def getSettingData(self):
setting_data = self.getSettingDataString()
if type(setting_data) == str:
setting_data = json.loads(setting_data, object_pairs_hook = collections.OrderedDict)
return setting_data
def getSettingDataString(self):
raise NotImplementedError()
def getDefinitionId(self):
if self._stack:
return self._stack.getBottom().getId()
def getStackId(self):
if self._stack:
return self._stack.getId()
## Convenience function that retrieves value of a setting from the stack.
def getSettingValueByKey(self, key):
return self._stack.getProperty(key, "value")
## Convenience function that finds the value in a line of g-code.
# When requesting key = x from line "G1 X100" the value 100 is returned.
def getValue(self, line, key, default = None):
if not key in line or (';' in line and line.find(key) > line.find(';')):
return default
sub_part = line[line.find(key) + 1:]
m = re.search('^-?[0-9]+\.?[0-9]*', sub_part)
if m is None:
return default
try:
return float(m.group(0))
except:
return default
## Convenience function to produce a line of g-code.
#
# You can put in an original g-code line and it'll re-use all the values
# in that line.
# All other keyword parameters are put in the result in g-code's format.
# For instance, if you put ``G=1`` in the parameters, it will output
# ``G1``. If you put ``G=1, X=100`` in the parameters, it will output
# ``G1 X100``. The parameters G and M will always be put first. The
# parameters T and S will be put second (or first if there is no G or M).
# The rest of the parameters will be put in arbitrary order.
# \param line The original g-code line that must be modified. If not
# provided, an entirely new g-code line will be produced.
# \return A line of g-code with the desired parameters filled in.
def putValue(self, line = "", **kwargs):
#Strip the comment.
comment = ""
if ";" in line:
comment = line[line.find(";"):]
line = line[:line.find(";")] #Strip the comment.
#Parse the original g-code line.
for part in line.split(" "):
if part == "":
continue
parameter = part[0]
if parameter in kwargs:
continue #Skip this one. The user-provided parameter overwrites the one in the line.
value = part[1:]
kwargs[parameter] = value
#Write the new g-code line.
result = ""
priority_parameters = ["G", "M", "T", "S", "F", "X", "Y", "Z", "E"] #First some parameters that get priority. In order of priority!
for priority_key in priority_parameters:
if priority_key in kwargs:
if result != "":
result += " "
result += priority_key + str(kwargs[priority_key])
del kwargs[priority_key]
for key, value in kwargs.items():
if result != "":
result += " "
result += key + str(value)
#Put the comment back in.
if comment != "":
if result != "":
result += " "
result += ";" + comment
return result
## This is called when the script is executed.
# It gets a list of g-code strings and needs to return a (modified) list.
def execute(self, data):
raise NotImplementedError()
| 1.898438 | 2 |
src/app/entities/hashing_algorithms/sha1.py | dieisabel/cypherman | 0 | 12771547 | """Module for SHA1 hashing algorithm"""
__all__ = ['SHA1HashingAlgorithm']
import hashlib
from entities.hashing_algorithms import IHashingAlgorithm
class SHA1HashingAlgorithm(IHashingAlgorithm):
"""SHA1 hashing algorithm
Attributes:
name: Algorithm name
bits: Amount of checksum bits
is_secure: Can algorithm be used for securing purposes
"""
name: str = "sha1"
bits: int = 160
is_secure: bool = False
def hash(self, data: str) -> str:
"""Hash data with SHA1 hashing algorithm
Args:
data: Data to hash
Returns:
Checksum
"""
encoded_data: bytes = data.encode('utf-8')
return hashlib.sha1(encoded_data).hexdigest()
| 3.25 | 3 |
hornet/backend/tasks/apis/api_schema.py | defnngj/test_dev06 | 0 | 12771548 | from projects.models import Project
from ninja import Schema
from typing import List, Any
class TaskIn(Schema):
"""任务入参"""
project: int
name: str
describe: str = None
cases: list
class ResultOut(Schema):
"""测试报告返回"""
name: str
passed: int
error: int
failure: int
skipped: int
tests: int
run_time: float
result: str
create_time: Any
| 2.234375 | 2 |
src/baldor/quaternion.py | fsuarez6/baldor | 9 | 12771549 | <filename>src/baldor/quaternion.py
#!/usr/bin/env python
"""
Functions to operate quaternions.
.. important:: Quaternions :math:`w + ix + jy + kz` are represented as :math:`[w, x, y, z]`.
"""
import math
import numpy as np
# Local modules
import baldor as br
def are_equal(q1, q2, rtol=1e-5, atol=1e-8):
"""
Returns True if two quaternions are equal within a tolerance.
Parameters
----------
q1: array_like
First input quaternion (4 element sequence)
q2: array_like
Second input quaternion (4 element sequence)
rtol: float
The relative tolerance parameter.
atol: float
The absolute tolerance parameter.
Returns
-------
equal : bool
True if `q1` and `q2` are `almost` equal, False otherwise
See Also
--------
numpy.allclose: Contains the details about the tolerance parameters
Notes
-----
Quaternions :math:`w + ix + jy + kz` are represented as :math:`[w, x, y, z]`.
Examples
--------
>>> import baldor as br
>>> q1 = [1, 0, 0, 0]
>>> br.quaternion.are_equal(q1, [0, 1, 0, 0])
False
>>> br.quaternion.are_equal(q1, [1, 0, 0, 0])
True
>>> br.quaternion.are_equal(q1, [-1, 0, 0, 0])
True
"""
if np.allclose(q1, q2, rtol, atol):
return True
return np.allclose(np.array(q1)*-1, q2, rtol, atol)
def conjugate(q):
"""
Compute the conjugate of a quaternion.
Parameters
----------
q: array_like
Input quaternion (4 element sequence)
Returns
-------
qconj: ndarray
The conjugate of the input quaternion.
Notes
-----
Quaternions :math:`w + ix + jy + kz` are represented as :math:`[w, x, y, z]`.
Examples
--------
>>> import baldor as br
>>> q0 = br.quaternion.random()
>>> q1 = br.quaternion.conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
qconj = np.array(q, dtype=np.float64, copy=True)
np.negative(qconj[1:], qconj[1:])
return qconj
def dual_to_transform(qr, qt):
"""
Return a homogeneous transformation from the given dual quaternion.
Parameters
----------
qr: array_like
Input quaternion for the rotation component (4 element sequence)
qt: array_like
Input quaternion for the translation component (4 element sequence)
Returns
-------
T: array_like
Homogeneous transformation (4x4)
Notes
-----
Some literature prefers to use :math:`q` for the rotation component and
:math:`q'` for the translation component
"""
T = np.eye(4)
R = br.quaternion.to_transform(qr)[:3, :3]
t = 2*br.quaternion.multiply(qt, br.quaternion.conjugate(qr))
T[:3, :3] = R
T[:3, 3] = t[1:]
return T
def inverse(q):
"""
Return multiplicative inverse of a quaternion
Parameters
----------
q: array_like
Input quaternion (4 element sequence)
Returns
-------
qinv : ndarray
The inverse of the input quaternion.
Notes
-----
Quaternions :math:`w + ix + jy + kz` are represented as :math:`[w, x, y, z]`.
"""
return conjugate(q) / norm(q)
def multiply(q1, q2):
"""
Multiply two quaternions
Parameters
----------
q1: array_like
First input quaternion (4 element sequence)
q2: array_like
Second input quaternion (4 element sequence)
Returns
-------
result: ndarray
The resulting quaternion
Notes
-----
`Hamilton product of quaternions
<http://en.wikipedia.org/wiki/Quaternions#Hamilton_product>`_
Quaternions :math:`w + ix + jy + kz` are represented as :math:`[w, x, y, z]`.
Examples
--------
>>> import numpy as np
>>> import baldor as br
>>> q = br.quaternion.multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> np.allclose(q, [28, -44, -14, 48])
True
"""
w1, x1, y1, z1 = q1
w2, x2, y2, z2 = q2
return np.array([-x1*x2 - y1*y2 - z1*z2 + w1*w2,
x1*w2 + y1*z2 - z1*y2 + w1*x2,
-x1*z2 + y1*w2 + z1*x2 + w1*y2,
x1*y2 - y1*x2 + z1*w2 + w1*z2], dtype=np.float64)
def norm(q):
"""
Compute quaternion norm
Parameters
----------
q : array_like
Input quaternion (4 element sequence)
Returns
-------
n : float
quaternion norm
Notes
-----
Quaternions :math:`w + ix + jy + kz` are represented as :math:`[w, x, y, z]`.
"""
return np.dot(q, q)
def random(rand=None):
"""
Generate an uniform random unit quaternion.
Parameters
----------
rand: array_like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
Returns
-------
qrand: array_like
The random quaternion
Notes
-----
Quaternions :math:`w + ix + jy + kz` are represented as :math:`[w, x, y, z]`.
Examples
--------
>>> import numpy as np
>>> import baldor as br
>>> q = br.quaternion.random()
>>> np.allclose(1, np.linalg.norm(q))
True
"""
if rand is None:
rand = np.random.rand(3)
else:
assert len(rand) == 3
r1 = np.sqrt(1.0 - rand[0])
r2 = np.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return np.array([np.cos(t2)*r2, np.sin(t1)*r1, np.cos(t1)*r1, np.sin(t2)*r2])
def to_axis_angle(quaternion, identity_thresh=None):
"""
Return axis-angle rotation from a quaternion
Parameters
----------
quaternion: array_like
Input quaternion (4 element sequence)
identity_thresh : None or scalar, optional
Threshold below which the norm of the vector part of the quaternion (x,
y, z) is deemed to be 0, leading to the identity rotation. None (the
default) leads to a threshold estimated based on the precision of the
input.
Returns
----------
axis: array_like
axis around which rotation occurs
angle: float
angle of rotation
Notes
-----
Quaternions :math:`w + ix + jy + kz` are represented as :math:`[w, x, y, z]`.
A quaternion for which x, y, z are all equal to 0, is an identity rotation.
In this case we return a `angle=0` and `axis=[1, 0, 0]``. This is an arbitrary
vector.
Examples
--------
>>> import numpy as np
>>> import baldor as br
>>> axis, angle = br.euler.to_axis_angle(0, 1.5, 0, 'szyx')
>>> np.allclose(axis, [0, 1, 0])
True
>>> angle
1.5
"""
w, x, y, z = quaternion
Nq = norm(quaternion)
if not np.isfinite(Nq):
return np.array([1.0, 0, 0]), float('nan')
if identity_thresh is None:
try:
identity_thresh = np.finfo(Nq.type).eps * 3
except (AttributeError, ValueError): # Not a numpy type or not float
identity_thresh = br._FLOAT_EPS * 3
if Nq < br._FLOAT_EPS ** 2: # Results unreliable after normalization
return np.array([1.0, 0, 0]), 0.0
if not np.isclose(Nq, 1): # Normalize if not normalized
s = math.sqrt(Nq)
w, x, y, z = w / s, x / s, y / s, z / s
len2 = x*x + y*y + z*z
if len2 < identity_thresh**2:
# if vec is nearly 0,0,0, this is an identity rotation
return np.array([1.0, 0, 0]), 0.0
# Make sure w is not slightly above 1 or below -1
theta = 2 * math.acos(max(min(w, 1), -1))
return np.array([x, y, z]) / math.sqrt(len2), theta
def to_euler(quaternion, axes='sxyz'):
"""
Return Euler angles from a quaternion using the specified axis sequence.
Parameters
----------
q : array_like
Input quaternion (4 element sequence)
axes: str, optional
Axis specification; one of 24 axis sequences as string or encoded tuple
Returns
-------
ai: float
First rotation angle (according to axes).
aj: float
Second rotation angle (according to axes).
ak: float
Third rotation angle (according to axes).
Notes
-----
Many Euler angle triplets can describe the same rotation matrix
Quaternions :math:`w + ix + jy + kz` are represented as :math:`[w, x, y, z]`.
Examples
--------
>>> import numpy as np
>>> import baldor as br
>>> ai, aj, ak = br.quaternion.to_euler([0.99810947, 0.06146124, 0, 0])
>>> np.allclose([ai, aj, ak], [0.123, 0, 0])
True
"""
return br.transform.to_euler(to_transform(quaternion), axes)
def to_transform(quaternion):
"""
Return homogeneous transformation from a quaternion.
Parameters
----------
quaternion: array_like
Input quaternion (4 element sequence)
axes: str, optional
Axis specification; one of 24 axis sequences as string or encoded tuple
Returns
-------
T: array_like
Homogeneous transformation (4x4)
Notes
-----
Quaternions :math:`w + ix + jy + kz` are represented as :math:`[w, x, y, z]`.
Examples
--------
>>> import numpy as np
>>> import baldor as br
>>> T0 = br.quaternion.to_transform([1, 0, 0, 0]) # Identity quaternion
>>> np.allclose(T0, np.eye(4))
True
>>> T1 = br.quaternion.to_transform([0, 1, 0, 0]) # 180 degree rot around X
>>> np.allclose(T1, np.diag([1, -1, -1, 1]))
True
"""
q = np.array(quaternion, dtype=np.float64, copy=True)
n = np.dot(q, q)
if n < br._EPS:
return np.identity(4)
q *= math.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[0.0, 0.0, 0.0, 1.0]])
| 4.21875 | 4 |
bridge.py | kharidiron/DSTcord | 0 | 12771550 | <reponame>kharidiron/DSTcord
#!/usr/bin/env python3
import asyncio
import re
import aiofiles
from discord.ext import commands
import watchgod
import yaml
description = '''
A DST to Discord Bridge.
Relies on some hackery using named pipes when starting the DST server.
I'll document that bit later.
'''
with open('vars.yml', 'r') as f:
vars = yaml.load(f, Loader=yaml.FullLoader)
bot = commands.Bot(command_prefix=vars['prefix'])
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
channel = bot.get_channel(int(vars['channel_id']))
await channel.send("DST Bridge is live!")
await channel.send(f"Join the server '{vars['cluster_name']}'. Password is '{vars['cluster_password']}'.")
await asyncio.gather(incoming_game_message())
@bot.command(description="Information on how to connect to the DST Server")
async def connect(ctx):
await ctx.send(f"Server name is '{vars['cluster_name']}'. The password is '{vars['cluster_password']}'.")
@bot.listen('on_message')
async def incoming_discord_message(message):
if message.author.id == bot.user.id:
return
if message.content.startswith(bot.command_prefix):
"""
Watch for commands
"""
await bot.process_commands(message)
with open(vars['dst_pipe'], 'w') as pipe:
pipe.write(f'TheNet:SystemMessage("[DC] <{message.author.display_name}> {message.content}")\n')
preamble_strip = re.compile("\[\d\d:\d\d:\d\d\]: \[(.*)\] (\(.*\) )?")
spoken_parse = re.compile("^(.*?): (.*)")
async def incoming_game_message():
async with aiofiles.open(vars['dst_chatlog'], mode='r') as f:
async for _ in watchgod.awatch(vars['dst_chatlog']):
content = await f.readline()
async for content in f:
pass
raw = content.rstrip()
msg = preamble_strip.split(raw)
try:
if msg[1] =='Say':
spoken = spoken_parse.split(msg[3])
await(print_game_message(f"<{spoken[1]}> {spoken[2]}"))
elif msg[1] == 'Join Announcement':
await(print_game_message(f"**{msg[3]} has joined**"))
elif msg[1] == 'Leave Announcement':
await(print_game_message(f"**{msg[3]} has left**"))
elif msg[1] in ['Death Announcement', 'Resurrect Announcement']:
await(print_game_message(f"**{msg[3]}**"))
except Exception as e:
print(f"! Exception occurred. Type: {type(e).__name__}")
print(f"--- Bad line ---: {raw}")
async def print_game_message(message):
channel = bot.get_channel(int(vars['channel_id']))
await channel.send(message)
if __name__ == '__main__':
bot.run(vars['token'], bot=True)
| 2.453125 | 2 |