max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
Archive for old stuff/BluetoothServer/Old/gatt-python.py | Kandidatarbete-Chalmers-MCCX02-19-06/RaspberryPiRadarProgram | 7 | 12763051 | import gatt
class AnyDeviceManager(gatt.DeviceManager):
def device_discovered(self, device):
print("Discovered [%s] %s" % (device.mac_address, device.alias()))
manager = AnyDeviceManager(adapter_name='hci0')
manager.start_discovery()
manager.run() | 2.359375 | 2 |
specklepy/reduction/dark.py | felixbosco/specklepy | 0 | 12763052 | import numpy as np
import os
from astropy.io import fits
from astropy.stats import sigma_clip, sigma_clipped_stats
from specklepy.logging import logger
from specklepy.reduction.subwindow import SubWindow
from specklepy.utils.time import default_time_stamp
class MasterDark(object):
extensions = {'variance': 'VAR', 'mask': 'MASK'}
def __init__(self, file_list, file_name='MasterDark.fits', file_path=None, out_dir=None, setup=None,
sub_window=None, new=True):
self.files = file_list
self.file_name = self.insert_setup_to_file_name(file_name=file_name, setup=setup)
self.file_path = file_path if file_path is not None else ''
self.out_dir = out_dir if out_dir is not None else ''
# Store sub-window
if isinstance(sub_window, str):
self.sub_window = sub_window
else:
self.sub_window = np.unique(sub_window)[0]
# Initialize maps
self.image = None
self.var = None
self.mask = None
@classmethod
def from_file(cls, file_path):
# Create object from path information
out_dir, file_name = os.path.split(file_path)
obj = cls(file_list=None, file_name=file_name, out_dir=out_dir, setup=None)
# Load data from file
obj.image = fits.getdata(obj.path)
try:
obj.var = fits.getdata(obj.path, obj.extensions.get('variance'))
except KeyError:
logger.debug(f"Loading MasterDark from file {obj.path!r} without {obj.extensions.get('variance')!r} "
f"extension")
try:
obj.mask = fits.getdata(obj.path, obj.extensions.get('mask')).astype(bool)
except KeyError:
logger.debug(f"Loading MasterDark from file {obj.path!r} without {obj.extensions.get('mask')!r} "
f"extension")
obj.sub_window = fits.getheader(obj.path)["HIERARCH SPECKLEPY REDUCTION SUBWIN"]
return obj
@property
def path(self):
return os.path.join(self.out_dir, self.file_name)
@staticmethod
def insert_setup_to_file_name(file_name, setup=None):
if setup is None:
return file_name
else:
base, ext = os.path.splitext(file_name)
return f"{base}_{setup}{ext}"
def combine(self, max_number_frames=None, rejection_threshold=10):
logger.info("Combining master dark frame...")
# if max_number_frames is not None:
# logger.debug(f"Using only the first {max_number_frames} frames of each cube")
means = []
vars = []
number_frames = []
# Iterate through files
for file in self.files:
logger.info(f"Reading DARK frames from file {file!r}...")
path = os.path.join(self.file_path, file)
with fits.open(path) as hdu_list:
data = hdu_list[0].data.squeeze()
if data.ndim == 2:
means.append(data)
vars.append(np.zeros(data.shape))
# self.combine_mask(np.zeros(data.shape, dtype=bool))
number_frames.append(1)
elif data.ndim == 3:
logger.info("Computing statistics of data cube...")
clipped_mean, _, clipped_std = sigma_clipped_stats(data=data, sigma=rejection_threshold, axis=0)
# mean = np.mean(data, axis=0)
# std = np.std(data, axis=0)
#
# # Identify outliers based on sigma-clipping
# mean_mask = sigma_clip(mean, sigma=rejection_threshold, masked=True).mask
# std_mask = sigma_clip(std, sigma=rejection_threshold, masked=True).mask
# mask = np.logical_or(mean_mask, std_mask)
# mask_indexes = np.array(np.where(mask)).transpose()
#
# # Re-compute the identified pixels
# logger.info(f"Re-measuring {len(mask_indexes)} outliers...")
# for mask_index in mask_indexes:
# # Extract t-series for the masked pixel
# arr = data[:, mask_index[0], mask_index[1]]
#
# # Compute sigma-clipped statistics for this pixel
# arr_mean, _, arr_std = sigma_clipped_stats(arr, sigma=rejection_threshold)
# mean[mask_index[0], mask_index[1]] = arr_mean
# std[mask_index[0], mask_index[1]] = arr_std
#
# mean = sigma_clip(mean, sigma=rejection_threshold, masked=True)
# std = sigma_clip(std, sigma=rejection_threshold, masked=True)
# Store results into lists
means.append(clipped_mean)
vars.append(np.square(clipped_std))
# self.combine_mask(np.logical_or(mean.mask, std.mask))
number_frames.append(data.shape[0])
else:
raise ValueError(f"Shape of data {data.shape} is not understood. Data must be either 2 or "
f"3-dimensional!")
# Cast list of arrays into 3-dim arrays
means = np.array(means)
vars = np.array(vars)
# Combine variances
if (vars == 0).all(): # catch case, where all frames have no variance
self.var = np.var(means, axis=0)
else:
self.var = np.average(vars, axis=0, weights=number_frames)
# Build mask based on variances
bpm = self.var == 0 # Bad pixel mask
if bpm.all(): # Catch case, where all frames have no variance
bpm = np.zeros(bpm.shape, dtype=bool)
gpm = ~bpm # Good pixel mask
# Build weights based on variance, and combine images
weights = np.multiply(np.reciprocal(self.var, where=gpm), np.expand_dims(number_frames, (1, 2)))
self.image = np.average(means, axis=0, weights=weights)
# Combine mask
self.mask = bpm
# def combine_var(self, new_var):
# if self.var is None:
# self.var = new_var
# else:
# self.var = np.add(self.var, new_var)
#
# def combine_mask(self, new_mask):
# if self.mask is None:
# self.mask = new_mask
# else:
# self.mask = np.logical_or(self.mask, new_mask)
def write(self, overwrite=True):
# Build primary HDU
header = fits.Header()
for index, file in enumerate(self.files):
header.set(f"HIERARCH SPECKLEPY SOURCE FILE{index:04} NAME", os.path.basename(file))
header.set("HIERARCH SPECKLEPY REDUCTION SUBWIN", self.sub_window)
primary = fits.PrimaryHDU(data=self.image, header=header)
# Build HDU list
hdu_list = fits.HDUList([primary])
# Build variance HDU
if self.var is not None:
var_hdu = fits.ImageHDU(data=self.var, name=self.extensions.get('variance'))
hdu_list.append(var_hdu)
# Build mask HDU
if self.mask is not None:
mask_hdu = fits.ImageHDU(data=self.mask.astype(np.int16), name=self.extensions.get('mask'))
hdu_list.append(mask_hdu)
# Write HDU list to file
logger.info(f"Writing master dark frame to file {self.path!r}")
hdu_list.writeto(self.path, overwrite=overwrite)
def subtract(self, file_path, extension=None, sub_window=None, sub_window_order='xy'):
"""Subtract the master dark from a file containing image data.
The master dark is subtracted from the image or each frame in a data cube. Then uncertainties are propagated.
Arguments:
file_path (str):
Path to the file, containing image data.
extension (str, optional):
Classifier for the image data extension.
sub_window (str, optional):
Sub-window string to initialize sub-windows from.
sub_window_order (str, optional):
Order of axis in the sub-window strings.
"""
logger.info(f"Subtracting master dark {self.file_name!r} from file at {file_path!r}")
# Construct sub-window
sub_window = SubWindow.from_str(sub_window, full=self.sub_window, order=sub_window_order)
# Construct good pixel mask
if self.mask is None:
gpm = np.ones(sub_window(self.image).shape, dtype=bool)
else:
gpm = sub_window(~self.mask)
# Load image data
data = fits.getdata(file_path, extension)
# Subtract
if data.ndim == 2:
data = np.subtract(data, sub_window(self.image), where=gpm)
elif data.ndim == 3:
for f, frame in enumerate(data):
data[f] = np.subtract(frame, sub_window(self.image), where=gpm)
# Propagate variances
try:
var = fits.getdata(file_path, self.extensions.get('variance'))
has_var_hdu = True
var = np.add(var, sub_window(self.var), where=gpm)
except KeyError:
has_var_hdu = False
var = sub_window(self.var)
# Propagate mask
try:
mask = fits.getdata(file_path, self.extensions.get('mask')).astype(bool)
has_mask_hdu = True
mask = np.logical_or(mask, sub_window(self.mask))
except KeyError:
has_mask_hdu = False
mask = sub_window(self.mask)
# Store data to cube
with fits.open(file_path, mode='update') as hdu_list:
# Update header
hdu_list[0].header.set('HIERARCH SPECKLEPY REDUCTION DARKCORR', default_time_stamp())
# Image data
hdu_list[0].data = data
# Variance data
if has_var_hdu:
hdu_list[self.extensions.get('variance')].data = var
else:
var_hdu = fits.ImageHDU(data=var, name=self.extensions.get('variance'))
hdu_list.append(var_hdu)
# Mask data
if has_mask_hdu:
hdu_list[self.extensions.get('mask')].data = mask.astype(np.int16)
else:
mask_hdu = fits.ImageHDU(data=mask.astype(np.int16), name=self.extensions.get('mask'))
hdu_list.append(mask_hdu)
# Write HDU list to file
logger.info(f"Updating dark subtraction in file {file_path!r}")
hdu_list.flush()
| 2.09375 | 2 |
appion/bin/testStackFormat.py | leschzinerlab/myami-3.2-freeHand | 0 | 12763053 | #!/usr/bin/env python
#python
import os
import shutil
import random
#appion
from appionlib import appionScript
from appionlib import apStack
from appionlib import apDisplay
from appionlib import apStackFormat
class convertStackScript(appionScript.AppionScript):
#=====================
def setupParserOptions(self):
self.formatoptions = ("eman", "spider", "frealign", "xmipp")
self.parser.set_usage("Usage: %prog --stackid=ID --format=PROGRAM_NAME [options]")
self.parser.add_option("-s", "--stackid", dest="stackid", type="int",
help="Stack database id", metavar="ID")
self.parser.add_option("--format", dest="format",
default="spider", type="choice", choices=self.formatoptions,
help="Format to be converted to, options: "+str(self.formatoptions))
#=====================
def checkConflicts(self):
if self.params['stackid'] is None:
apDisplay.printError("stackid was not defined")
if self.params['runname'] is None:
apDisplay.printError("new runname was not defined")
#=====================
def setRunDir(self):
stackdata = apStack.getOnlyStackData(self.params['stackid'], msg=False)
path = stackdata['path']['path']
uppath = os.path.dirname(os.path.abspath(path))
self.params['rundir'] = os.path.join(uppath, self.params['runname'])
#=====================
def start(self):
#new stack path
stackdata = apStack.getOnlyStackData(self.params['stackid'])
apStackFormat.linkFormattedStack(stackdata, self.params['format'],'test')
#apStackFormat.replaceFormattedStack(stackdata, self.params['format'], self.params['rundir'],'normlist.doc')
#=====================
if __name__ == "__main__":
subStack = convertStackScript()
subStack.start()
subStack.close()
| 2.359375 | 2 |
rise_of_machines/helpers.py | fpdevil/rise_of_machines | 0 | 12763054 | #!/usr/bin/env python3
# coding: utf-8
# Contains common methods frequently used across....
# The example reference at the below matplotlib is helpful in choosing an
# appropriate colormap for the output plot
# https://matplotlib.org/examples/color/colormaps_reference.html
# import the necessary packages
import numpy as np
import matplotlib.pyplot as plt
def create_meshgrid(x, y, margin=1, step=0.02):
"""Create a numoy rectangular meshgrid out of an array of
x values and an array of y values
@ref https://stackoverflow.com/questions/36013063
/what-is-the-purpose-of-meshgrid-in-python-numpy
:x: array-like point x
:y: array-like point y
:margin: (int) boundary
:step: (float) stepping the values, default = 0.02
Examples
--------
x = np.array([0, 1, 2, 3, 4])
y = np.array([0, 1, 2, 3, 4])
xx,yy=np.meshgrid(x,y)
plt.plot(xx,yy, marker='.', color='k',linestyle='none')
"""
x_min, x_max = x.min() - margin, x.max() + margin
y_min, y_max = y.min() - margin, y.max() + margin
# define the mesh grid, with xx and yy holding the grid of
# points where the function will be evaluated
xx, yy = np.meshgrid(
np.arange(x_min, x_max, step), np.arange(y_min, y_max, step))
return xx, yy
def draw_decision_boundary(x,
y,
classifier,
margin=1,
step=0.02,
alpha=0.8,
cmap=plt.cm.coolwarm):
"""Draw decision boundary separating the collections
Parameters
----------
x: {array-like}, shape = [n_samples, n_features]
y: array-like, shape = [n_samples]
margin: margin for the min and max
step_size: float This would be the buffer for clarity
This is spacing between values. For any output out, this is the distance
between two adjacent values, out[i+1] - out[i]
alpha: float
color alpha value
cmap: color map
"""
# set-up the marker generator and color map for plotting
markers = ('s', 'o', 'x', '^', 'v')
# for data, first set-up a grid for plotting.
X0, X1 = x[:, 0], x[:, 1]
xx, yy = create_meshgrid(X0, X1, margin, step)
mesh = np.array([xx.ravel(), yy.ravel()])
print("np.array: {}", format(mesh))
# compute the classifiers output
Z = classifier.predict(mesh.T)
Z = Z.reshape(xx.shape)
# now plot the contour
plt.contourf(xx, yy, Z, alpha=alpha, cmap=cmap)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
for idx, cl in enumerate(np.unique(y)):
print("cl: ", cl)
plt.scatter(
x=x[y == cl, 0],
y=x[y == cl, 1],
alpha=alpha,
marker=markers[idx],
label=cl,
edgecolor='yellow')
def plot_classifier(X,
y,
classifier,
margin=1.0,
step_size=0.01,
alpha=0.8,
test_idx=None,
cmap=plt.cm.Paired):
"""Draw the datapoints and boundaries
Parameters
----------
x: {array-like}, shape = [n_samples, n_features]
y: array-like, shape = [n_samples]
margin: margin for the min and max
step_size: float
This is spacing between values. For any output out, this is the distance
between two adjacent values, out[i+1] - out[i]
alpha: float
blending value to decide transparency - 0 (transparent) and 1 (opaque)
test_idx: list
cmap: object
color map for the output colors of objects
"""
# set-up the marker generator for plotting
markers = ('s', 'o', 'x', '*', 'v')
# setup and define a range for plotting the data
X0, X1 = X[:, 0], X[:, 1]
xx, yy = create_meshgrid(X0, X1, margin=margin, step=step_size)
# compute the output of the classifier
mesh = np.c_[xx.ravel(), yy.ravel()]
mesh_output = classifier.predict(mesh)
# reshape the array
mesh_output = mesh_output.reshape(xx.shape)
# draw and fill contour lines
plt.contourf(xx, yy, mesh_output, alpha=0.4, cmap=cmap)
# now overlay the training coordinates over the plot
# set boundaries
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks((np.arange(int(min(X[:, 0]) - 1), int(max(X[:, 0]) + 1), 1.0)))
plt.yticks((np.arange(int(min(X[:, 1]) - 1), int(max(X[:, 1]) + 1), 1.0)))
# use a separate marker for each training label
for (i, cl) in enumerate(np.unique(y)):
plt.scatter(
x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=alpha,
marker=markers[i],
label=cl,
edgecolors='purple')
# plotting and highlighting the test samples
if test_idx:
# x_test, y_test = X[test_idx, :], y[test_idx]
x_test = X[test_idx, :]
plt.scatter(
x_test[:, 0],
x_test[:, 1],
c='',
edgecolors='purple',
alpha=alpha,
linewidths=1,
marker='o',
s=100,
label='Test Data')
| 4.09375 | 4 |
8bit subleq CPU/assemble.py | MajFontana/computer-architecture | 0 | 12763055 | import ext
import re
CODE = "assembly.txt"
OUTPUT = "assembled.bin"
def parse(value):
if re.match("^0[a-zA-Z]", value):
if value[1] == "b":
parsed = int(value, 2)
elif value[1] == "x":
parsed = int(value, 16)
else:
parsed = int(value)
return parsed
with open(CODE, "r") as f:
lines = f.read().lower().replace("\t", " ").split("\n")
lookup = {}
counter = 0
section = None
sequence = []
for line in lines:
line = line.split(";")[0]
if line:
if line[0] == ".":
section = line[1:]
elif section == "data":
line = line.replace(" ", "")
token, value = line.split("=")
lookup[token] = parse(value)
elif section == "code":
if line[0] == ":":
line = line.replace(" ", "")
lookup[line[1:]] = counter
else:
tokens = [token for token in line.split(" ") if token]
arguments = [parse(token) if token[0].isdigit() else token for token in tokens[1:]]
sequence.append((counter, tokens[0], arguments))
counter += ext.inst_size(tokens[0])
raw = b""
for macro in sequence:
num = ext.render_inst(macro[1], macro[2], macro[0], lookup)
for operation in num:
binary = b"".join([value.to_bytes(1, "little") for value in ((operation[0] << 4) | operation[1], operation[2])])
raw += binary
with open(OUTPUT, "wb") as f:
f.write(raw)
print("Program size: %i operations (%i bytes)" % (len(raw) // 2, len(raw)))
print("Lookup table:")
for key, value in lookup.items():
print(" %s: %i" % (key, value))
| 2.8125 | 3 |
app/main/routes.py | GyftakisK/OpenDataGraph | 0 | 12763056 | <gh_stars>0
import os
from app import graph_manager
from app.main import bp
from flask import render_template, request, jsonify
from app.utilities import relationships_to_d3_data, normilize_mesh_term
@bp.route('/')
@bp.route('/index')
def index():
literature_status = graph_manager.get_literature_status()
diseases = [normilize_mesh_term(mesh_term) for mesh_term in literature_status["mesh_terms"]]
node_counts, entity_rel_type_counts, article_rel_type_counts = graph_manager.get_graph_info()
return render_template('main/index.html',
diseases=f'{", ".join(diseases[:-1])} and {diseases[:-1]}' if len(diseases) > 1
else f'{diseases[0]}',
node_counts=node_counts,
ent_relationship_count=len(entity_rel_type_counts),
art_relationship_count=len(article_rel_type_counts))
@bp.route('/browse')
def browse():
return render_template('main/browse.html')
@bp.route('/autocomplete', methods=['GET'])
def autocomplete():
search_term = request.args.get('q')
matches = []
matches.extend(graph_manager.get_neo4j_manager().get_entities_matching_label(search_term, 15, 'ranking'))
lexicographical_matches = graph_manager.get_neo4j_manager().get_entities_matching_labels_beginning(search_term, 15, 'ranking')
matches.extend([match for match in lexicographical_matches if match not in matches])
return jsonify(matching_results=matches)
@bp.route('/graph', methods=['POST'])
def graph():
req_data = request.get_json()
if req_data:
node_label = req_data['label']
frozen_sets = req_data['frozen']
number_of_neighbours = req_data['number_of_neighbours']
skip_nodes = req_data['skip_nodes']
excl_rel = req_data["excluded_relationships"]
excl_sem = req_data["excluded_semantic_types"]
db_manager = graph_manager.get_neo4j_manager()
query_node, relationships = db_manager.get_node_and_neighbors(node_label=node_label,
num_of_neighbors=number_of_neighbours,
skip_nodes=skip_nodes,
excl_rel=excl_rel,
excl_sem=excl_sem)
skip_nodes = skip_nodes + len(relationships)
if frozen_sets:
for cui_1, cui_2 in frozen_sets:
relationships.extend(db_manager.get_all_relationships_between_nodes_by_cui(cui_1, cui_2))
data = relationships_to_d3_data(query_node, relationships)
data["query_node_id"] = query_node.identity
data['skip_nodes'] = skip_nodes
return jsonify(data)
else:
return "Invalid term"
@bp.route('/articles', methods=['POST'])
def articles():
req_data = request.get_json()
if not req_data:
return "Invalid JSON"
node_label = req_data.setdefault('node_label', None)
start_cui = req_data.setdefault('start_cui', None)
end_cui = req_data.setdefault('end_cui', None)
rel_type = req_data.setdefault('rel_type', None)
db_manager = graph_manager.get_neo4j_manager()
if node_label:
data = db_manager.get_articles_for_entity(node_label)
elif start_cui and end_cui and rel_type:
data = db_manager.get_articles_from_relationship(start_node_cui=start_cui,
end_node_cui=end_cui,
type=rel_type)
else:
return "Invalid input"
return jsonify(data)
@bp.route('/node', methods=['POST'])
def node():
req_data = request.get_json()
if req_data:
node_label = req_data['label']
db_manager = graph_manager.get_neo4j_manager()
relationship_counts, sem_types_counts, node_count = db_manager.get_neighbor_stats_for_node(node_label)
return jsonify({"relationship_counts": relationship_counts, "sem_types_counts": sem_types_counts,
"node_count": node_count})
else:
return "Invalid term"
| 2.203125 | 2 |
examples/tenant_tutorial/customers/apps.py | buraketmen/django-tenants | 514 | 12763057 | from __future__ import unicode_literals
from django.apps import AppConfig
class CustomersConfig(AppConfig):
name = 'customers'
verbose_name = 'Customers'
def ready(self):
import customers.handlers
| 1.5625 | 2 |
schwarz/log_utils/tests/testutils_test.py | FelixSchwarz/log_utils | 0 | 12763058 | # -*- coding: utf-8 -*-
# Copyright (c) 2021 <NAME>
# The source code contained in this file is licensed under the MIT license.
# SPDX-License-Identifier: MIT
import logging
from pythonic_testcase import assert_raises, PythonicTestCase
from ..testutils import (assert_did_log_message, assert_no_log_messages,
build_collecting_logger)
class TestutilsTest(PythonicTestCase):
def test_can_assert_logged_messages(self):
log, lc = build_collecting_logger()
log.info('foo')
log.debug('bar')
assert_did_log_message(lc, 'foo')
assert_did_log_message(lc, 'foo', level=logging.INFO)
with assert_raises(AssertionError):
assert_did_log_message(lc, 'foo', level=logging.DEBUG)
assert_no_log_messages(lc, min_level=logging.WARN)
| 2.140625 | 2 |
tests/conftest.py | infobip-community/infobip-api-python-sdk | 0 | 12763059 | import json
import string
from io import IOBase
from random import choice
from string import ascii_letters
from typing import Optional
import pytest
import requests
from werkzeug import Response
from infobip_channels.core.models import CamelCaseModel, MultipartMixin
def get_random_string(length: int) -> str:
return "".join(choice(ascii_letters) for _ in range(length))
def get_random_numbers(length: int) -> str:
return "".join(choice(string.digits) for _ in range(length))
class HttpTestClient:
def __init__(self, url, headers):
self.url = url
self.headers = headers
def post(self, endpoint, body, headers=None):
headers = headers or self.headers
return requests.post(url=f"{self.url}" + endpoint, json=body, headers=headers)
def get(self, endpoint, headers=None):
headers = headers or self.headers
return requests.get(url=f"{self.url}" + endpoint, headers=headers)
class Address(CamelCaseModel):
street: str
city: str
zip_code: int
class UserInfo(CamelCaseModel, MultipartMixin):
name: Optional[str] = None
last_name: str
address: Address
profile_image: IOBase
class Config(CamelCaseModel.Config):
arbitrary_types_allowed = True
@pytest.fixture
def http_test_client():
def _get_http_test_client(url, headers):
return HttpTestClient(url, headers)
return _get_http_test_client
def get_response_object(status_code, content):
return Response(json.dumps(content), status_code)
def get_response_error_invalid_content():
return {
"error": {"field_one": "error_one", "field_two": "error_two"},
}
def get_expected_post_headers(content_type="application/json"):
return {
"Authorization": "App secret",
"Content-Type": content_type,
"Accept": "application/json",
}
def get_expected_put_headers(content_type="application/json"):
return {
"Authorization": "App secret",
"Content-Type": content_type,
"Accept": "application/json",
}
def get_expected_get_headers():
return {
"Authorization": "App secret",
"Accept": "application/json",
}
def get_expected_delete_headers():
return {
"Authorization": "App secret",
"Accept": "application/json",
}
| 2.171875 | 2 |
examples/keras_parikh_entailment/spacy_hook.py | moyogo/spacy | 0 | 12763060 | <gh_stars>0
from keras.models import model_from_json
import numpy
import numpy.random
class KerasSimilarityShim(object):
@classmethod
def load(cls, path, nlp, get_features=None):
if get_features is None:
get_features = doc2ids
with (path / 'config.json').open() as file_:
config = json.load(file_)
model = model_from_json(config['model'])
with (path / 'model').open('rb') as file_:
weights = pickle.load(file_)
embeddings = get_embeddings(nlp.vocab)
model.set_weights([embeddings] + weights)
return cls(model, get_features=get_features)
def __init__(self, model, get_features=None):
self.model = model
self.get_features = get_features
def __call__(self, doc):
doc.user_hooks['similarity'] = self.predict
doc.user_span_hooks['similarity'] = self.predict
def predict(self, doc1, doc2):
x1 = self.get_features(doc1)
x2 = self.get_features(doc2)
scores = self.model.predict([x1, x2])
return scores[0]
def get_embeddings(vocab, nr_unk=100):
nr_vector = max(lex.rank for lex in vocab) + 1
vectors = numpy.zeros((nr_vector+nr_unk+2, vocab.vectors_length), dtype='float32')
for lex in vocab:
if lex.has_vector:
vectors[lex.rank+1] = lex.vector / lex.vector_norm
return vectors
def get_word_ids(docs, rnn_encode=False, tree_truncate=False, max_length=100, nr_unk=100):
Xs = numpy.zeros((len(docs), max_length), dtype='int32')
for i, doc in enumerate(docs):
if tree_truncate:
queue = [sent.root for sent in doc.sents]
else:
queue = list(doc)
words = []
while len(words) <= max_length and queue:
word = queue.pop(0)
if rnn_encode or (not word.is_punct and not word.is_space):
words.append(word)
if tree_truncate:
queue.extend(list(word.lefts))
queue.extend(list(word.rights))
words.sort()
for j, token in enumerate(words):
if token.has_vector:
Xs[i, j] = token.rank+1
else:
Xs[i, j] = (token.shape % (nr_unk-1))+2
j += 1
if j >= max_length:
break
else:
Xs[i, len(words)] = 1
return Xs
def create_similarity_pipeline(nlp):
return [SimilarityModel.load(
nlp.path / 'similarity',
nlp,
feature_extracter=get_features)]
| 2.390625 | 2 |
src/gibbs_sampling.py | alexandru-dinu/MCMC | 0 | 12763061 | import numpy as np
import seaborn as sns
def p_x_given_y(y, mus, sigmas):
mu = mus[0] + sigmas[1, 0] / sigmas[0, 0] * (y - mus[1])
sigma = sigmas[0, 0] - sigmas[1, 0] / sigmas[1, 1] * sigmas[1, 0]
return np.random.normal(mu, sigma)
def p_y_given_x(x, mus, sigmas):
mu = mus[1] + sigmas[0, 1] / sigmas[1, 1] * (x - mus[0])
sigma = sigmas[1, 1] - sigmas[0, 1] / sigmas[0, 0] * sigmas[0, 1]
return np.random.normal(mu, sigma)
def gibbs_sampling(mus, sigmas, iter=10000):
samples = np.zeros((iter, 2))
y = np.random.rand() * 10
for i in range(iter):
x = p_x_given_y(y, mus, sigmas)
y = p_y_given_x(x, mus, sigmas)
samples[i, :] = [x, y]
return samples
if __name__ == "__main__":
mus = np.array([5, 5])
sigmas = np.array([[1, 0.9], [0.9, 1]])
samples = gibbs_sampling(mus, sigmas)
sns.jointplot(samples[:, 0], samples[:, 1])
| 2.6875 | 3 |
parkinson.py | Cassiel60/python | 0 | 12763062 | <reponame>Cassiel60/python<filename>parkinson.py<gh_stars>0
#coding=utf-8
import pandas as pd
import pdb
import re
varfil1=r'C:\Users\BAIOMED07\Desktop\parkinson_Database_20170619.xlsx'
varfil2=r'C:\Users\BAIOMED07\Desktop\parkinson.txt'
out = open('parkinson.xls','wb')
df1=pd.read_excel(varfil1)
df2=pd.read_csv(varfil2,header=0,sep='\t')['#Phynotype'].values.tolist()
#df2=open('parkinson.txt')
sum=0
for index1,row in df1.iterrows():
value1 = row['CLNDBN']
newrow = '' #add blank string
need = False
for index2,value2 in enumerate(df2):
if str(value1).upper().__contains__(str(value2).upper()):
need = True
break
# sum+=1
# print index1,value1
# print sum
# res.append(row)
# #pdb.set_trace()
# print res
if need:
newrow = str(index1)
for val in row.values:
tmp = ' '
try:
tmp = str(val).decode('utf-8').encode('gb2312')
except:
pass
newrow += '\t' +tmp
newrow+='\n'
out.write(newrow)
out.flush()
out.close()
| 2.84375 | 3 |
realtime-epi-figs/ridge.py | COVID-IWG/epimargin-studies | 0 | 12763063 | <reponame>COVID-IWG/epimargin-studies
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from epimargin.utils import cwd
from sklearn.linear_model import Ridge, Lasso, ElasticNet, ElasticNetCV, LogisticRegression
from sklearn.decomposition import SparsePCA
from sklearn.preprocessing import minmax_scale
from sklearn.svm import LinearSVC
import seaborn as sns
data = cwd()/"example_data"
df = pd.read_csv(data/"metro_state_policy_evaluation.csv").dropna()
df["Rt_binarized"] = (df["RR_pred"] >= 1).astype(int)
X = pd.concat([
df.drop(columns =
[col for col in df.columns if col.startswith("RR_")] +
[col for col in df.columns if col.startswith("metro_")] +
["metro-state", "date", "state", "state_name", "start_stay_at_home",
"end_stay_at_home", "mask_mandate_all", "metro_outbreak_start", "threshold_ind", "cbsa_fips",
"new_cases_ts", "daily_confirmed_cases"]),
# pd.get_dummies(df.state_name, prefix = "state_name")
], axis = 1)
X_normed = minmax_scale(X.drop(columns = ["Rt_binarized"]))
ridge = Ridge(alpha = 0.1, random_state = 0)
ridge.fit(X = X_normed, y = X["Rt_binarized"])
for _ in sorted(zip(X.columns, ridge.coef_), key = lambda t: np.abs(t[1]), reverse = True):
print(_)
plt.plot(ridge.coef_, ".")
plt.show()
lasso = Lasso(alpha = 0.001, random_state = 0)
lasso.fit(X = X_normed, y = X["Rt_binarized"])
for _ in sorted(zip(X.columns, lasso.coef_), key = lambda t: np.abs(t[1]), reverse = True):
print(_)
plt.plot(np.abs(lasso.coef_), ".")
plt.show()
enet = ElasticNet(alpha = 0.001, random_state = 0)
enet.fit(X = X_normed, y = X["Rt_binarized"])
for _ in sorted(zip(X.columns, enet.coef_), key = lambda t: np.abs(t[1]), reverse = True):
print(_)
plt.plot(np.abs(enet.coef_), ".")
plt.show()
enetcv = ElasticNetCV(l1_ratio = [0.1, 0.2, 0.5, 0.6, 0.7, 0.9, 0.95, 0.99, 1], n_alphas = 10000, random_state = 0)
enetcv.fit(X = X_normed, y = df["RR_pred"])
print(enetcv.l1_ratio_, enetcv.alpha_)
for _ in sorted(zip(X.columns, enetcv.coef_), key = lambda t: np.abs(t[1]), reverse = True):
print(_)
plt.plot(enetcv.coef_, ".")
plt.gca().set_xticklabels([""] + [_.split("_")[0] for _ in X.columns[:-1]])
plt.show()
enetcv_bin = ElasticNetCV(l1_ratio = [0.1, 0.2, 0.5, 0.6, 0.7, 0.9, 0.95, 0.99, 1], n_alphas = 10000, random_state = 0)
enetcv_bin.fit(X = X_normed, y = X["Rt_binarized"])
print(enetcv_bin.l1_ratio_, enetcv_bin.alpha_)
for _ in sorted(zip(X.columns, enetcv_bin.coef_), key = lambda t: np.abs(t[1]), reverse = True):
print(_)
plt.plot(enetcv_bin.coef_, ".")
plt.gca().set_xticklabels([""] + [_.split("_")[0] for _ in X.columns[:-1]])
plt.show()
Xn_indexed = pd.concat([
pd.DataFrame(X_normed, columns = X.columns[:-1]),
X["Rt_binarized"]
], axis = 1)
sns.scatterplot(data=Xn_indexed, x = "retail_and_recreation_percent_change_from_baseline", y = "grocery_and_pharmacy_percent_change_from_baseline", hue = "Rt_binarized")
plt.show()
# 2D projection
sparse_pca = SparsePCA(n_components = 2, random_state = 0, alpha = 2)
X_scaled = minmax_scale(X)
sparse_pca.fit(X = X_scaled)
print(pd.DataFrame(sparse_pca.components_, columns = [_.replace("_percent_change_from_baseline", "") for _ in X.columns]))
X_tf = sparse_pca.transform(X_scaled)
X_tf_Rt = pd.DataFrame(X_tf, columns = ["X1", "X2"])
X_tf_Rt["Rt"] = X["Rt_binarized"]
sns.scatterplot(data = X_tf_Rt, x = "X1", y = "X2", hue = "Rt_binarized")
plt.show()
ax = fig.add_subplot(111, projection='3d')
svc = LinearSVC(random_state = 0, penalty = "l1", loss = "squared_hinge", dual = False, max_iter = 10000)
svc.fit(X = X_normed, y = X["Rt_binarized"])
| 2.46875 | 2 |
constants.py | erinxocon/pybuntu-docker | 6 | 12763064 | from pathlib import Path
ROOT = Path(__file__).parent
TEMPLATES_DIR = ROOT / "templates"
OUT = ROOT / "out"
UBUNTU_VERSIONS = {"18.04": "bionic", "20.04": "focal"}
PYTHON_VERSIONS = {"3.6.15", "3.7.12", "3.8.12", "3.9.7", "3.10.0rc2"}
VERSIONS = ((u_ver, p_ver) for u_ver in UBUNTU_VERSIONS for p_ver in PYTHON_VERSIONS)
PIP_VERSION = "21.2.4"
| 1.9375 | 2 |
src/tweetengine/handlers/twitter.py | Arachnid/tweetengine | 2 | 12763065 | <filename>src/tweetengine/handlers/twitter.py
import environment
import datetime
import logging
import urlparse
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from tweetengine.handlers import base
from tweetengine import model
from tweetengine import oauth
class TweetHandler(base.UserHandler):
@base.requires_account
def post(self, account_name):
permission = self.current_permission
in_reply_to = self.request.get("in_reply_to_status_id", None)
tweet = model.OutgoingTweet(account=self.current_account,
user=self.user_account,
message=self.request.get("tweet"),
in_reply_to=in_reply_to)
if permission.can_send():
tweet.approved_by=self.user_account
tweet.approved = True
if self.request.POST["when"] == "schedule":
timestamp = "%s %s" % (self.request.POST['datestamp'],
self.request.POST['timestamp'])
tweet.timestamp = datetime.datetime.strptime(timestamp,"%d/%m/%Y %H:%M")
tweet.schedule()
else:
response = tweet.send()
if response.status_code != 200:
self.error(500)
logging.error(response.content)
elif permission.can_suggest():
tweet.put()
self.redirect("/%s/" % (account_name,))
class ScheduledTweetHandler(webapp.RequestHandler):
URL_PATH = '/_ah/queue/scheduled_tweets'
def post(self):
publishApprovedTweets()
def publishApprovedTweets():
q = model.OutgoingTweet.all()
q.filter("approved =", True)
q.filter("sent =", False)
q.filter("timestamp <", datetime.datetime.now())
rpcs = []
for tweet in q.fetch(30):
rpcs.append((tweet.send_async(), tweet))
logging.error('sending tweet %s' % tweet.message)
successful_tweets = []
for rpc, tweet in rpcs:
response = rpc.get_result()
if response.status_code == 200:
successful_tweets.append(tweet)
else:
logging.error(response.content)
logging.info("Sent %d tweets", len(successful_tweets))
db.put(successful_tweets)
| 2.6875 | 3 |
paper/figures/make_ha_cachito_Fe_figure.py | abostroem/asassn15oz | 0 | 12763066 |
# coding: utf-8
# Creates:
# * cachito_fe_vel_comp.pdf
# In[1]:
import os
import numpy as np
import yaml
from astropy.io import ascii as asc
from astropy.time import Time
import astropy.units as u
import astropy.constants as c
from astropy.modeling import models, fitting
from matplotlib import pyplot as plt
#get_ipython().run_line_magic('matplotlib', 'inline')
from utilities_az import supernova
# In[2]:
plt.style.use(['seaborn-paper', 'az-paper-onecol'])
# In[3]:
TEST_FILE_DIR = '../../data/line_info/testing/'
FIG_DIR = './'
DATA_DIR = '../../data/line_info'
# In[4]:
HA = 6563.0
SiII = 6355.0
FeII = 5169.0
IR_dates = Time(['2015-09-05','2015-10-05', '2015-10-10'])
# In[5]:
sn15oz = supernova.LightCurve2('asassn-15oz')
texpl = Time(sn15oz.jdexpl, format='jd')
# In[6]:
new_fit_cachito = asc.read(os.path.join(TEST_FILE_DIR, 'cachito.tab'))
# In[7]:
def calc_velocity(obs_wl, rest_wl):
velocity = c.c*(obs_wl/rest_wl - 1)
return velocity
# In[8]:
phase_cachito = (Time(new_fit_cachito['date'])-texpl).value
velocity_cachito = -1*calc_velocity(new_fit_cachito['vel0'], HA).to(u.km/u.s).value
# In[9]:
#tbdata_feII = asc.read(os.path.join(DATA_DIR, 'FeII_multi.tab'))
#tbdata_feII.remove_columns(['vel1', 'vel_err_left_1', 'vel_err_right_1', 'vel_pew_1', 'vel_pew_err1'])
tbdata_feII = asc.read(os.path.join(DATA_DIR, 'FeII_5169.tab'))
tbdata_feII.rename_column('vel0', 'velocity')
tbdata_feII.rename_column('vel_err_left_0', 'vel_err_left')
tbdata_feII.rename_column('vel_err_right_0', 'vel_err_right')
tbdata_feII.rename_column('vel_pew_0', 'pew')
tbdata_feII.rename_column('vel_pew_err0', 'pew_err')
# In[10]:
phase_feII = (Time(tbdata_feII['date'])-texpl).value
velocity_feII = -1*calc_velocity(tbdata_feII['velocity'], FeII).to(u.km/u.s)
# In[15]:
fig = plt.figure()
fig.subplotpars.update(left=.17, bottom=0.23)
ax_Fe = fig.add_subplot(1,1,1)
ax_Fe.plot((Time(new_fit_cachito['date'])-texpl).value, -1*calc_velocity(new_fit_cachito['vel0'], SiII).to(u.km/u.s)/1000, '^', label='Cachito (as SiII 6533)')
ax_Fe.plot(phase_feII, velocity_feII/1000, 'o', label='FeII (5169)')
ax_Fe.set_xticks(np.arange(0, 90, 10))
ax_Fe.legend()
ax_Fe.set_ylim(5, 11)
ax_Fe.set_xlim(0, 40)
ax_Fe.set_xlabel('Phase (day)')
ax_Fe.set_ylabel('Velocity (1000 km/s)')
plt.savefig(os.path.join(FIG_DIR, 'cachito_fe_vel_comp.pdf'))
| 1.992188 | 2 |
module1-introduction-to-sql/buddymove_holidayiq.py | sberniz/DS-Unit-3-Sprint-2-SQL-and-Databases | 0 | 12763067 | <filename>module1-introduction-to-sql/buddymove_holidayiq.py
import pandas as pd
import sqlite3
#Part 2
db = pd.read_csv('buddymove_holidayiq.csv')
print(db.shape)
conn = sqlite3.connect('buddymove_holidayiq.sqlite3')
curs = conn.cursor()
db.to_sql('buddymove_holidayiq',conn, if_exists='replace')
QUERY = 'SELECT COUNT(*) FROM buddymove_holidayiq;'
curs.execute(QUERY)
db_sql = curs.fetchall()
print(f'Number of rows in database: {db_sql[0][0]}')
reviews_more_100 = 'SELECT COUNT(*) FROM buddymove_holidayiq WHERE Nature > 100 AND Shopping > 100;'
curs.execute(reviews_more_100)
reviews = curs.fetchall()
print(f'Number of users where reviews of Nature and Shopping more than 100: {reviews[0][0]}')
AVGS = 'SELECT AVG(Sports) , AVG(Religious), AVG(Nature), \
AVG(Theatre),AVG(Shopping), AVG(Picnic) FROM buddymove_holidayiq;'
curs.execute(AVGS)
averages = curs.fetchall()
print(f"average Sports: {averages[0][0]}")
print(f"Average Religious {averages[0][1]}")
print(f"average Nature: {averages[0][2]}")
print(f"Average Theatre: {averages[0][3]}")
print(f"Average Shipping {averages[0][4]}")
print(f"Average Picnic: {averages[0][5]}")
| 3.578125 | 4 |
tests/generate_search_info_test_data.py | garz75/osxphotos | 0 | 12763068 | <filename>tests/generate_search_info_test_data.py
""" Create the test data needed for test_search_info_10_15_7.py """
# reads data from the author's system photo library to build the test data
# used to test SearchInfo
# run as:
# python3 tests/generate_search_info_test_data.py >tests/search_info_test_data_10_15_7.json
import json
import osxphotos
UUID = [
"C8EAF50A-D891-4E0C-8086-C417E1284153",
"71DFB4C3-E868-4BE4-906E-D96BD8692D7E",
"2C151013-5BBA-4D00-B70F-1C9420418B86",
]
data = {
"UUID_SEARCH_INFO": {},
"UUID_SEARCH_INFO_NORMALIZED": {},
"UUID_SEARCH_INFO_ALL": {},
"UUID_SEARCH_INFO_ALL_NORMALIZED": {},
}
photosdb = osxphotos.PhotosDB()
for uuid in UUID:
photo = photosdb.get_photo(uuid)
search = photo.search_info
search_norm = photo.search_info_normalized
data["UUID_SEARCH_INFO"][uuid] = search.asdict()
data["UUID_SEARCH_INFO_NORMALIZED"][uuid] = search_norm.asdict()
data["UUID_SEARCH_INFO_ALL"][uuid] = search.all
data["UUID_SEARCH_INFO_ALL_NORMALIZED"][uuid] = search_norm.all
print(json.dumps(data))
| 2.546875 | 3 |
Curso_Em_Video_Python/ex088.py | ThallesTorres/Curso_Em_Video_Python | 0 | 12763069 | <reponame>ThallesTorres/Curso_Em_Video_Python
# Ex: 088 - Faça um programa que ajude um jogador da MEGA SENA a criar palpites.
# O programa vai perguntar quantos jogos serão gerados e vai sortear 6 números
# entre 1 e 60 para cada jogo, cadastrando tudo em uma lista composta.
from random import sample
from time import sleep
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Seja bem-vindo!
--Exercício 088
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
''')
jogos = int(input("Total de Jogos a serem sorteados: "))
sorteados = list(sample(range(1, 61), 6) for x in range(jogos))
# for x in range(jogos):
# sorteados.append(sample(range(1, 61), 6))
print("\n--Números sorteados:")
for cont, jogo in enumerate(sorteados):
print(f"Jogo {cont + 1}: {sorted(jogo)}")
sleep(1)
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Obrigado pelo uso!
--Desenvolvido por <NAME>
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-''')
| 3.828125 | 4 |
tests/test_utils/test_share_storage.py | jvrana/pyro-graphnets | 0 | 12763070 | <filename>tests/test_utils/test_share_storage.py<gh_stars>0
import pytest
import torch
from caldera.utils import same_storage
view_methods = {
"slice": lambda x: x[:10],
"cpu": lambda x: x.cpu(),
"contiguous": lambda x: x.contiguous(),
}
copy_methods = {
"clone": lambda x: x.clone(),
"torch.tensor": lambda x: torch.tensor(x),
"to(torch.float64)": lambda x: x.to(torch.float64),
}
if torch.cuda.is_available():
device = "cuda:" + str(torch.cuda.current_device())
copy_methods["to(" + device + ")"] = lambda x: x.to(device)
def parameterize(n, d):
args = []
ids = []
for k, v in d.items():
args.append(v)
ids.append(k)
return pytest.mark.parametrize(n, args, ids=ids)
@parameterize("f", view_methods)
@pytest.mark.parametrize(
"a", [torch.randn(100), torch.randn((10, 9)), torch.randn((2, 3))]
)
def test_same_storage_view_methods(f, a):
b = f(a)
assert same_storage(a, b)
assert same_storage(b, a)
@parameterize("f", copy_methods)
@pytest.mark.parametrize(
"a", [torch.randn(100), torch.randn((10, 9)), torch.randn((2, 3))]
)
def test_same_storage_copy_methods(f, a):
b = f(a)
assert not same_storage(a, b)
assert not same_storage(b, a)
| 2.140625 | 2 |
cs251tk/student/markdownify/process_file.py | renovate-tests/cs251-toolkit | 0 | 12763071 | <gh_stars>0
import os
from collections import Iterable
from cs251tk.common import run, flatten
from .truncate import truncate
from .cat import cat
from .pipe import pipe
def get_file(filename, results, options):
file_status, file_contents = cat(filename)
if file_status == 'success':
_, last_edit, _ = run(['git', 'log', '-n', '1', '--pretty=format:%cd', '--', filename])
results['last modified'] = last_edit
if options['hide_contents']:
file_contents = ''
elif options['truncate_contents']:
file_contents = truncate(file_contents, options['truncate_contents'])
if file_status != 'success':
results['missing'] = True
results['other files'] = os.listdir('.')
results['optional'] = options['optional']
return False
results['contents'] = file_contents
return True
def compile_file(filename, steps, results, supporting_dir, basedir, web, student):
server_path = ' '.join([
'-o "{}/server/server_file"'.format(basedir),
'"{}/data/supporting/sd_fun.h"'.format(basedir),
'"{}/data/supporting/sd_fun.o"'.format(basedir),
'"{}/data/supporting/str_util.o"'.format(basedir)
])
for step in steps:
command = step \
.replace('$@', './' + filename) \
.replace('$SUPPORT', supporting_dir) \
.replace('$SERVER', server_path)
cmd, input_for_cmd = pipe(command)
status, compilation, _ = run(cmd, timeout=30, input_data=input_for_cmd)
results['compilation'].append({
'command': command,
'output': compilation,
'status': status,
})
if web:
if status == 'success':
input("{} - {}".format(student, filename))
else:
print("{} - {} COMPILE ERROR".format(student, filename))
if status != 'success':
return False
return True
def test_file(filename, *, spec, results, options, cwd, supporting_dir, interact):
tests = flatten([test_spec['commands']
for test_spec in spec.get('tests', {})
if test_spec['filename'] == filename])
for test_cmd in tests:
if not test_cmd:
continue
test_cmd = test_cmd \
.replace('$@', './' + filename) \
.replace('$SUPPORT', supporting_dir)
test_cmd, input_for_test = pipe(test_cmd)
if os.path.exists(os.path.join(cwd, filename)):
again = True
while again:
status, full_result, again = run(test_cmd,
input_data=input_for_test,
timeout=options['timeout'],
interact=interact)
result = truncate(full_result, options['truncate_output'])
was_truncated = (full_result != result)
results['result'].append({
'command': test_cmd,
'status': status,
'output': result,
'truncated': was_truncated,
'truncated after': options['truncate_output'],
})
else:
results['result'].append({
'command': test_cmd,
'error': True,
'output': '{} could not be found.'.format(filename),
})
return True
def process_file(filename, *, steps, options, spec, cwd, supporting_dir, interact, basedir, student):
steps = steps if isinstance(steps, Iterable) else [steps]
base_opts = {
'timeout': 4,
'truncate_output': 10000, # 10K
'truncate_contents': False,
'optional': False,
'hide_contents': False,
'web': False
}
base_opts.update(options)
options = base_opts
results = {
'filename': filename,
'missing': False,
'compilation': [],
'result': [],
}
should_continue = get_file(filename, results, options)
if not should_continue:
return results
should_continue = compile_file(filename, steps, results, supporting_dir,
basedir, options['web'], student)
if not should_continue or not steps or options['web']:
return results
should_continue = test_file(filename,
spec=spec,
results=results,
options=options,
cwd=cwd,
supporting_dir=supporting_dir,
interact=interact)
if not should_continue:
return results
return results
| 2.359375 | 2 |
moceansdk/modules/command/button/wa_call_button.py | d3no/mocean-sdk-python | 0 | 12763072 | <reponame>d3no/mocean-sdk-python
from moceansdk.modules.command.button.wa_button_basic import WaButtonBasic
class WaCallButton(WaButtonBasic):
def type(self):
return "call"
def required_key(self):
return []
| 2.34375 | 2 |
proxy/core/acceptor.py | opencollective/proxy.py | 0 | 12763073 | # -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Programmable Proxy Server in a single Python file.
:copyright: (c) 2013-present by <NAME> and contributors.
:license: BSD, see LICENSE for more details.
"""
import logging
import multiprocessing
import selectors
import socket
import threading
# import time
from multiprocessing import connection
from multiprocessing.reduction import send_handle, recv_handle
from typing import List, Optional, Type, Tuple
from .threadless import ThreadlessWork, Threadless
from .event import EventQueue, EventDispatcher, eventNames
from ..common.flags import Flags
logger = logging.getLogger(__name__)
class AcceptorPool:
"""AcceptorPool.
Pre-spawns worker processes to utilize all cores available on the system. Server socket connection is
dispatched over a pipe to workers. Each worker accepts incoming client request and spawns a
separate thread to handle the client request.
"""
def __init__(self, flags: Flags, work_klass: Type[ThreadlessWork]) -> None:
self.flags = flags
self.running: bool = False
self.socket: Optional[socket.socket] = None
self.acceptors: List[Acceptor] = []
self.work_queues: List[connection.Connection] = []
self.work_klass = work_klass
self.event_queue: Optional[EventQueue] = None
self.event_dispatcher: Optional[EventDispatcher] = None
self.event_dispatcher_thread: Optional[threading.Thread] = None
self.event_dispatcher_shutdown: Optional[threading.Event] = None
if self.flags.enable_events:
self.event_queue = EventQueue()
def listen(self) -> None:
self.socket = socket.socket(self.flags.family, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((str(self.flags.hostname), self.flags.port))
self.socket.listen(self.flags.backlog)
self.socket.setblocking(False)
logger.info(
'Listening on %s:%d' %
(self.flags.hostname, self.flags.port))
def start_workers(self) -> None:
"""Start worker processes."""
for acceptor_id in range(self.flags.num_workers):
work_queue = multiprocessing.Pipe()
acceptor = Acceptor(
idd=acceptor_id,
work_queue=work_queue[1],
flags=self.flags,
work_klass=self.work_klass,
event_queue=self.event_queue
)
acceptor.start()
logger.debug('Started acceptor process %d', acceptor.pid)
self.acceptors.append(acceptor)
self.work_queues.append(work_queue[0])
logger.info('Started %d workers' % self.flags.num_workers)
def start_event_dispatcher(self) -> None:
self.event_dispatcher_shutdown = threading.Event()
assert self.event_dispatcher_shutdown
assert self.event_queue
self.event_dispatcher = EventDispatcher(
shutdown=self.event_dispatcher_shutdown,
event_queue=self.event_queue
)
self.event_dispatcher_thread = threading.Thread(
target=self.event_dispatcher.run
)
self.event_dispatcher_thread.start()
logger.debug('Thread ID: %d', self.event_dispatcher_thread.ident)
def shutdown(self) -> None:
logger.info('Shutting down %d workers' % self.flags.num_workers)
if self.flags.enable_events:
assert self.event_dispatcher_shutdown
assert self.event_dispatcher_thread
self.event_dispatcher_shutdown.set()
self.event_dispatcher_thread.join()
logger.debug(
'Shutdown of global event dispatcher thread %d successful',
self.event_dispatcher_thread.ident)
for acceptor in self.acceptors:
acceptor.join()
logger.debug('Acceptors shutdown')
def setup(self) -> None:
"""Listen on port, setup workers and pass server socket to workers."""
self.running = True
self.listen()
if self.flags.enable_events:
self.start_event_dispatcher()
self.start_workers()
# Send server socket to all acceptor processes.
assert self.socket is not None
for index in range(self.flags.num_workers):
send_handle(
self.work_queues[index],
self.socket.fileno(),
self.acceptors[index].pid
)
self.work_queues[index].close()
self.socket.close()
class Acceptor(multiprocessing.Process):
"""Socket client acceptor.
Accepts client connection over received server socket handle and
starts a new work thread.
"""
lock = multiprocessing.Lock()
def __init__(
self,
idd: int,
work_queue: connection.Connection,
flags: Flags,
work_klass: Type[ThreadlessWork],
event_queue: Optional[EventQueue] = None) -> None:
super().__init__()
self.idd = idd
self.work_queue: connection.Connection = work_queue
self.flags = flags
self.work_klass = work_klass
self.event_queue = event_queue
self.running = False
self.selector: Optional[selectors.DefaultSelector] = None
self.sock: Optional[socket.socket] = None
self.threadless_process: Optional[multiprocessing.Process] = None
self.threadless_client_queue: Optional[connection.Connection] = None
def start_threadless_process(self) -> None:
pipe = multiprocessing.Pipe()
self.threadless_client_queue = pipe[0]
self.threadless_process = Threadless(
client_queue=pipe[1],
flags=self.flags,
work_klass=self.work_klass,
event_queue=self.event_queue
)
self.threadless_process.start()
logger.debug('Started process %d', self.threadless_process.pid)
def shutdown_threadless_process(self) -> None:
assert self.threadless_process and self.threadless_client_queue
logger.debug('Stopped process %d', self.threadless_process.pid)
self.threadless_process.join()
self.threadless_client_queue.close()
def start_work(self, conn: socket.socket, addr: Tuple[str, int]) -> None:
if self.flags.threadless and \
self.threadless_client_queue and \
self.threadless_process:
self.threadless_client_queue.send(addr)
send_handle(
self.threadless_client_queue,
conn.fileno(),
self.threadless_process.pid
)
conn.close()
else:
work = self.work_klass(
fileno=conn.fileno(),
addr=addr,
flags=self.flags,
event_queue=self.event_queue
)
work_thread = threading.Thread(target=work.run)
work.publish_event(
event_name=eventNames.WORK_STARTED,
event_payload={'fileno': conn.fileno(), 'addr': addr},
publisher_id=self.__class__.__name__
)
work_thread.start()
def run_once(self) -> None:
assert self.selector and self.sock
with self.lock:
events = self.selector.select(timeout=1)
if len(events) == 0:
return
conn, addr = self.sock.accept()
# now = time.time()
# fileno: int = conn.fileno()
self.start_work(conn, addr)
# logger.info('Work started for fd %d in %f seconds', fileno, time.time() - now)
def run(self) -> None:
self.running = True
self.selector = selectors.DefaultSelector()
fileno = recv_handle(self.work_queue)
self.work_queue.close()
self.sock = socket.fromfd(
fileno,
family=self.flags.family,
type=socket.SOCK_STREAM
)
try:
self.selector.register(self.sock, selectors.EVENT_READ)
if self.flags.threadless:
self.start_threadless_process()
while self.running:
self.run_once()
except KeyboardInterrupt:
pass
finally:
self.selector.unregister(self.sock)
if self.flags.threadless:
self.shutdown_threadless_process()
self.sock.close()
self.running = False
| 2.328125 | 2 |
Weather_Predictor/weather.py | LasalJayawardena/Neural-Network-Projects | 0 | 12763074 | <filename>Weather_Predictor/weather.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.impute import SimpleImputer
from sklearn.model_selection import GridSearchCV
from numpy.random import seed
import datetime
import pdb
def print_time():
print("%s" % datetime.datetime.now())
# =========================================================
# Rain header
# Date,Location,MinTemp,MaxTemp,Rainfall,Evaporation,Sunshine,WindGustDir,WindGustSpeed,
# WindDir9am,WindDir3pm,WindSpeed9am,WindSpeed3pm,Humidity9am,Humidity3pm,Pressure9am,
# Pressure3pm,Cloud9am,Cloud3pm,Temp9am,Temp3pm,RainToday,RISK_MM,RainTomorrow
#exclude = ['Location', 'Date', 'RISK_MM']
exclude = ['Date', 'RISK_MM']
bools = ['RainToday', 'RainTomorrow']
categorical = ['Location', 'WindGustDir', 'WindDir9am', 'WindDir3pm']
df = pd.read_csv('weatherAUS.csv')
# Reduce size of dataset
df = df.loc[df['Location']=='Perth',:] # Pick just one location, otherwise wind direction will be meaningless
# Remove unwanted variables
for var in exclude:
del df[var]
# Boolean variables to {0,1}
for var in bools:
df[var] = df[var].astype('category')
df[var] = df[var].cat.codes # convert to category codes
# Categorical variables to indicator variables, one new column per value
for var in categorical:
df = pd.concat([df, pd.get_dummies(df[var], prefix=var)], axis=1)
del df[var]
cols = list(df.columns.values)
cols.remove('RainTomorrow')
cols.append('RainTomorrow') # Move to the end
df = df[ cols ]
# =========================================================
seed(0)
print_time()
X = df.iloc[:,0:-1]
X = X.dropna(axis=1, how='all') # Drop totally empty columns here explicitly, to capture column names (impossible if Imputer drops cols)
y = df.iloc[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
# Dealing with missing data
col_names = list(X_train)
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(X_train)
X_train = imp.transform(X_train)
X_test = imp.transform(X_test)
print("X_train.shape = ", X_train.shape)
print("X_test.shape = ", X_test.shape)
# =========================================================
# Ref: https://www.springboard.com/blog/beginners-guide-neural-network-in-python-scikit-learn-0-18/
#
# Rescale data for ANN
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test) # Use the same scaler as on training set
classifier = MLPClassifier(hidden_layer_sizes=(50,),max_iter=50000)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print_time()
| 3.03125 | 3 |
WORKFLOW/code/python_code/tes.py | tabhitmy/MLTF | 0 | 12763075 | <filename>WORKFLOW/code/python_code/tes.py
import numpy
import numpy as np
import time
print(' $$$ WELCOME to MLTF(v0.1) ! $$$')
print(' <Author: <NAME>> ')
print(' <Copyright Reserved.>')
timenow = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print(' <' + timenow + '>')
a = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print(a)
print(a[:-5])
print(a[-5:])
# print(a[11:13] + a[14:16])
b = (a[11:13] + a[14:16])
print(b)
# c = input('try code!')
# if c == b:
# print('y')
# else:
# print('n')
# print(time.localtime(time.time()))
| 3.125 | 3 |
icpp2018/experiments/experimentaux.py | eclufsc/packdrop-data-analysis | 1 | 12763076 | <reponame>eclufsc/packdrop-data-analysis<filename>icpp2018/experiments/experimentaux.py<gh_stars>1-10
from parseutils.file_outputter import *
from parseutils.experiment_analyzer import *
from parseutils.experiment_groups import *
from parseutils.directory_resolver import *
from enum import Enum
def list_append(lst, item):
lst.append(item)
return lst
class ExperimentType(Enum):
Schedtime = 0
Apptime = 1
Steptime = 2
class ExperimentOrganizer:
"""A class to organize the header of the different files in an experiment"""
common_hdr = ['app', 'sched', 'plat_size', 'wildmetric']
metric_position = {
'app': 0,
'sched': 1,
'plat_size': 2,
'wildmetric': 3
}
headers = {
ExperimentType.Schedtime: list_append(common_hdr[:], 'sched_time'),
ExperimentType.Apptime: list_append(common_hdr[:], 'app_time'),
ExperimentType.Steptime: list_append(common_hdr[:], 'step_time')
}
def outfiles(experiment_name):
return {
ExperimentType.Schedtime: DirectoryResolver.output_abspath(experiment_name, experiment_name + '-schedtime.csv'),
ExperimentType.Apptime: DirectoryResolver.output_abspath(experiment_name, experiment_name + '-apptime.csv'),
ExperimentType.Steptime: DirectoryResolver.output_abspath(experiment_name, experiment_name + '-steptime.csv')
}
def metric_csv_position(metric):
if(metric in ExperimentOrganizer.metric_position):
return ExperimentOrganizer.metric_position[metric]
return 4
class ExperimentWrapper:
"""SDumontWrapper keeps the analysis object and all the outputters linked to a single action_group for the sdumont data"""
def open_all(self):
"""Open all the output files for the experiment"""
for etype in ExperimentType:
self.outputters[etype].append()
def close_all(self):
"""Close all the output files opened for the experiment"""
for etype in ExperimentType:
self.outputters[etype].close()
def write_header(self):
"""Writes the header of all the csv files"""
for etype in ExperimentType:
self.outputters[etype].write_header(ExperimentOrganizer.headers[etype])
def set_sdumont_initial_files(self, application_name, sizes):
"""Sets the file group of the first round of experiments that will be analyzed"""
file_group = InputFileGroup('-', 'out', ['lb-test-results'], [application_name], sizes)
self.analysis.map_group(file_group, self.action_register)
def set_sdumont_freq_files(self, frequencies, scheds):
"""Sets the file group of the first round of experiments that will be analyzed. This function should not be needed if the name patterns would be carefully looked at"""
file_group = InputFileGroup('_', 'res', ['3_freq_leanmd'], frequencies, scheds)
self.analysis.map_group(file_group, self.action_register)
def set_g5k_files(self, app, wildvals, scheds):
"""Sets the file group of the first round of experiments that will be analyzed. This function should not be needed if the name patterns would be carefully looked at"""
file_group = InputFileGroup('_', 'res', [app], wildvals, scheds)
self.analysis.map_group(file_group, self.action_register)
def print_line(self, etype):
self.outputters[etype].write()
def set_metric(self, metric, val, etypes):
for etype in etypes:
self.outputters[etype].attributes[ExperimentOrganizer.metric_csv_position(metric)] = val
def __init__(self, experiment_name):
self.outputters = {}
self.analysis = ExperimentAnalyzer(experiment_name)
self.action_register = InputActionGroup()
for etype in ExperimentType:
self.outputters[etype] = CsvOutputter(ExperimentOrganizer.outfiles(experiment_name)[etype], ExperimentOrganizer.headers[etype])
self.action_register.map_control_action(InputActionGroup.ControlAction.BEFORE_PARSE, self.open_all)
self.action_register.map_control_action(InputActionGroup.ControlAction.AFTER_PARSE, self.close_all)
class ExperimentActions:
"""Class that executes all actions to notify the outputters to change their state and write lines into the files"""
def found_sched(self, line, result):
"""Executed when the regex found the scheduler value in the log. Sets the sched data for all the experiments"""
self.exp.set_metric('sched', result.groups(1)[0], [e for e in ExperimentType])
def found_lb_test_app_and_platsize(self, line, result):
"""Executed when the regex found the app and platform size values in the lb_test log. Sets the sched data for all the experiments"""
self.exp.set_metric('app', result.groups(1)[0], [e for e in ExperimentType])
self.exp.set_metric('plat_size', result.groups(1)[1], [e for e in ExperimentType])
def found_leanmd_app(self, line, result):
"""Executed when the regex found the app value in the leanmd log. Sets the app data for all the experiments"""
self.exp.set_metric('app', 'leanmd', [e for e in ExperimentType])
def found_leanmd_period(self, line, result):
"""Executed when the regex found the leanmd sched period value in the leanmd log. Sets the wildmetric data for all the experiments"""
self.exp.set_metric('wildmetric', result.groups(1)[0], [e for e in ExperimentType])
def found_topology(self, line, result):
"""Executed when the regex found the topology value in the lb_test log. Sets the wildmetric data for all the experiments"""
self.exp.set_metric('wildmetric', result.groups(1)[0], [e for e in ExperimentType])
def found_sched_time(self, line, result):
"""Executed when the regex found the scheduler time in the log. Sets the sched_time data for the relevant experiment"""
self.exp.set_metric('sched_time', result.groups(1)[0], [ExperimentType.Schedtime])
self.exp.print_line(ExperimentType.Schedtime)
def found_leanmd_platsize(self, line, result):
"""Executed when the regex found the platform size value in the leanmd log. Sets the platform_size data for all the experiments"""
self.exp.set_metric('plat_size', result.groups(1)[0], [e for e in ExperimentType])
def found_apptime(self, line, result):
self.exp.set_metric('app_time', result.groups(1)[0], [ExperimentType.Apptime])
self.exp.print_line(ExperimentType.Apptime)
def found_leanmd_step_time(self, line, result):
self.exp.set_metric('step_time', result.groups(1)[0], [ExperimentType.Steptime])
self.exp.print_line(ExperimentType.Steptime)
def __init__(self, ewrapper):
self.exp = ewrapper
class CharmLogTriggers:
def __init__(self, actor):
self.mapping = {
r'\[0\] (.+?)LB created': actor.found_sched,
r'MOLECULAR DYNAMICS START UP': actor.found_leanmd_app,
r'Running (.+?) on (\d+?) processors': actor.found_lb_test_app_and_platsize,
r'Selecting Topology (.*)': actor.found_topology,
r'strategy finished at .+? duration (.+?) s': actor.found_sched_time,
r'step \d+? finished at .+? duration (.+?) ': actor.found_sched_time,
r'LB Period:(\d+)': actor.found_leanmd_period,
r' numPes (\d+)': actor.found_leanmd_platsize,
r'TIME\sPER\sSTEP\s150\s+?(.+?)\s+?': actor.found_apptime,
r'Total application time (.+?) s': actor.found_apptime,
r'Step \d+? Benchmark Time (.+?) ': actor.found_leanmd_step_time
} | 2.453125 | 2 |
Baselines/enjoy_custom.py | AhmetHamzaEmra/Understanding_RL | 2 | 12763077 | import gym
import itertools
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
import baselines.common.tf_util as U
from baselines.common.tf_util import load_state, save_state
from baselines import logger
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer
from baselines.deepq.utils import ObservationInput
from baselines.common.schedules import LinearSchedule
def model(inpt, num_actions, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
out = inpt
out = layers.fully_connected(out, num_outputs=128, activation_fn=tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=128, activation_fn=tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
if __name__ == '__main__':
saver = tf.train.Saver()
with U.make_session(8) as sess:
# Create the environment
env = gym.make("LunarLander-v2")
# Create all the functions necessary to train the model
act, train, update_target, debug = deepq.build_train(
make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name),
q_func=model,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
)
# Create the schedule for exploration starting from 1 (every action is random) down to
# 0.02 (98% of actions are selected according to values predicted by the model).
exploration = LinearSchedule(schedule_timesteps=10000, initial_p=0, final_p=0)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
saver.restore(sess, "./models/custom_model.ckpt")
obs = env.reset()
while True:
env.render()
# Take action and update exploration to the newest value
action = act(obs[None], update_eps=exploration.value(t))[0]
new_obs, rew, done, _ = env.step(action)
if done:
break
| 2.15625 | 2 |
draugr/visualisation/matplotlib_utilities/annotate.py | cnHeider/draugr | 3 | 12763078 | <filename>draugr/visualisation/matplotlib_utilities/annotate.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__doc__ = r"""
Created on 04-03-2021
"""
from typing import Tuple
from draugr.scipy_utilities import mag_decimation_subsample
__all__ = [
"annotate_lines",
"default_index_decimator",
]
from warg import GDKC, passes_kws_to
from matplotlib.pyplot import Axes
default_index_decimator = GDKC(
mag_decimation_subsample, decimation_factor=5, return_indices=True
) # finds interesting features?
@passes_kws_to(Axes.annotate)
def annotate_lines(
ax_: Axes,
num_lines: int = 1, # None for all
index_decimator: callable = default_index_decimator,
color: str = "k", # None for auto color
xycoords: Tuple[str, str] = (
"data",
# 'axes fraction',
"data",
), # TODO: NOT DONE! Where to place annotation, use 'axes fraction' for along axes'
ha: str = "left",
va: str = "center",
**kwargs,
) -> None:
lines = ax_.lines
if not num_lines:
num_lines = len(lines)
for l, _ in zip(lines, range(num_lines)):
y = l.get_ydata()
x = l.get_xdata()
if not color:
color = l.get_color()
if index_decimator:
mag_y = index_decimator(y)
else:
mag_y = list(range(len(y)))
for x_, y_ in zip(x[mag_y], y[mag_y]):
ax_.annotate(
f"{y_:.2f}",
xy=(x_, y_), # ( 1, y_) axes fraction'
xycoords=xycoords,
ha=ha,
va=va,
color=color,
**kwargs,
)
if __name__ == "__main__":
def hsdh():
from matplotlib import pyplot
a = [*range(0, 10), *range(10, -10, -1), *range(-10, 0)]
ax_ = pyplot.plot(a)
annotate_lines(pyplot.gca(), index_decimator=default_index_decimator)
pyplot.show()
hsdh()
| 2.640625 | 3 |
nnunet/self_supervision/train_pretext.py | julieeecious/nnUNet | 1 | 12763079 | ######################################################################################
# ----------Copyright 2021 Division of Medical and Environmental Computing,----------#
# ----------Technical University of Darmstadt, Darmstadt, Germany--------------------#
######################################################################################
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
# Main driver for running self supervised learning pretext tasks
def main():
import argparse
parser = argparse.ArgumentParser(description="We extend nnUNet to offer self-supervision tasks. This step is to"
" split the dataset into two - self-supervision input and self- "
"supervisio output folder.")
parser.add_argument("-t", type=int, help="Task id. The task name you wish to run self-supervision task for. "
"It must have a matching folder 'TaskXXX_' in the raw "
"data folder", required=True)
parser.add_argument("-ss", help="Run self-supervision pretext asks. Specify which self-supervision task you "
"wish to train. Current supported tasks: context_restoration| jigsaw_puzzle | byol")
args = parser.parse_args()
base = join(os.environ['nnUNet_raw_data_base'], 'nnUNet_raw_data')
task_name = convert_id_to_task_name(args.t)
target_base = join(base, task_name)
pretext = str(args.ss)
print(f'Hey there: here\'s pretext task {pretext} for {task_name}. '
f'Path to get ss datasets are {join(target_base, "ssInput" + "BYOL")} and {join(target_base, "ssOutput" + "BYOL")}')
if __name__ == "__main__":
main()
| 2.34375 | 2 |
PP4E-Examples-1.4/Examples/PP4E/System/Processes/pipefifo.py | AngelLiang/PP4E | 0 | 12763080 | <reponame>AngelLiang/PP4E<filename>PP4E-Examples-1.4/Examples/PP4E/System/Processes/pipefifo.py
"""
named pipes; os.mkfifo is not available on Windows (without Cygwin);
there is no reason to fork here, since fifo file pipes are external
to processes--shared fds in parent/child processes are irrelevent;
"""
import os, time, sys
fifoname = '/tmp/pipefifo' # must open same name
def child():
pipeout = os.open(fifoname, os.O_WRONLY) # open fifo pipe file as fd
zzz = 0
while True:
time.sleep(zzz)
msg = ('Spam %03d\n' % zzz).encode() # binary as opened here
os.write(pipeout, msg)
zzz = (zzz+1) % 5
def parent():
pipein = open(fifoname, 'r') # open fifo as text file object
while True:
line = pipein.readline()[:-1] # blocks until data sent
print('Parent %d got "%s" at %s' % (os.getpid(), line, time.time()))
if __name__ == '__main__':
if not os.path.exists(fifoname):
os.mkfifo(fifoname) # create a named pipe file
if len(sys.argv) == 1:
parent() # run as parent if no args
else: # else run as child process
child()
| 2.609375 | 3 |
day16.py | t-ah/adventofcode-2019 | 0 | 12763081 | <gh_stars>0
def parse():
with open("input16") as f:
n = [int(x) for x in f.read()]
return n
n = parse()
for _ in range(100):
result = []
for i in range(1, len(n) + 1):
digit = 0
start = i - 1
add = 1
while start < len(n):
end = min(start + i, len(n))
digit += add * sum(n[start:end])
add *= -1
start += 2*i
result.append(abs(digit) % 10)
n = result
print("".join([str(x) for x in n[:8]]))
# part 2
n = parse()
k = int("".join([str(x) for x in n[:7]]))
total_len = 10000 * len(n)
relevant_len = total_len - k
offset = k % len(n)
l = n[offset:] + (relevant_len // len(n)) * n
for nr in range(100):
print(nr)
result = []
cache = {}
for i in range(0, len(l)):
digit = 0
start = i
add = 1
while start < len(l):
end = min(start + i + k, len(l))
if (start - 1, end) in cache:
q = (cache[(start - 1,end)] - l[start - 1])
else:
q = sum(l[start:end])
digit += add * q
cache[(start, end)] = q
add *= -1
start += 2 * (i + k)
result.append(abs(digit) % 10)
l = result
print("".join([str(x) for x in l[:8]])) | 2.453125 | 2 |
scripts/build_signals_page.py | malariagen/agam-selection-atlas | 3 | 12763082 | <filename>scripts/build_signals_page.py
# -*- coding: utf-8 -*-
from setup import *
if __name__ == '__main__':
loader = jinja2.FileSystemLoader('templates')
env = jinja2.Environment(loader=loader)
template = env.get_template('signals.rst')
data = dict()
data['signals'] = list(etl.fromcsv('docs/_static/data/signals.csv').dicts())
# render the report
out_path = 'docs/signals.rst'
print('rendering', out_path)
with open(out_path, mode='w') as f:
print(template.render(**data), file=f)
| 2.03125 | 2 |
src/pretix/plugins/paypal/signals.py | NicsTr/pretix | 0 | 12763083 | import json
from collections import OrderedDict
from django import forms
from django.dispatch import receiver
from django.template.loader import get_template
from django.utils.translation import gettext_lazy as _
from pretix.base.forms import SecretKeySettingsField
from pretix.base.signals import (
logentry_display, register_global_settings, register_payment_providers,
requiredaction_display,
)
@receiver(register_payment_providers, dispatch_uid="payment_paypal")
def register_payment_provider(sender, **kwargs):
from .payment import Paypal
return Paypal
@receiver(signal=logentry_display, dispatch_uid="paypal_logentry_display")
def pretixcontrol_logentry_display(sender, logentry, **kwargs):
if logentry.action_type != 'pretix.plugins.paypal.event':
return
data = json.loads(logentry.data)
event_type = data.get('event_type')
text = None
plains = {
'PAYMENT.SALE.COMPLETED': _('Payment completed.'),
'PAYMENT.SALE.DENIED': _('Payment denied.'),
'PAYMENT.SALE.REFUNDED': _('Payment refunded.'),
'PAYMENT.SALE.REVERSED': _('Payment reversed.'),
'PAYMENT.SALE.PENDING': _('Payment pending.'),
}
if event_type in plains:
text = plains[event_type]
else:
text = event_type
if text:
return _('PayPal reported an event: {}').format(text)
@receiver(signal=requiredaction_display, dispatch_uid="paypal_requiredaction_display")
def pretixcontrol_action_display(sender, action, request, **kwargs):
if not action.action_type.startswith('pretix.plugins.paypal'):
return
data = json.loads(action.data)
if action.action_type == 'pretix.plugins.paypal.refund':
template = get_template('pretixplugins/paypal/action_refund.html')
elif action.action_type == 'pretix.plugins.paypal.overpaid':
template = get_template('pretixplugins/paypal/action_overpaid.html')
elif action.action_type == 'pretix.plugins.paypal.double':
template = get_template('pretixplugins/paypal/action_double.html')
ctx = {'data': data, 'event': sender, 'action': action}
return template.render(ctx, request)
@receiver(register_global_settings, dispatch_uid='paypal_global_settings')
def register_global_settings(sender, **kwargs):
return OrderedDict([
('payment_paypal_connect_client_id', forms.CharField(
label=_('PayPal Connect: Client ID'),
required=False,
)),
('payment_paypal_connect_secret_key', SecretKeySettingsField(
label=_('PayPal Connect: Secret key'),
required=False,
)),
('payment_paypal_connect_endpoint', forms.ChoiceField(
label=_('PayPal Connect Endpoint'),
initial='live',
choices=(
('live', 'Live'),
('sandbox', 'Sandbox'),
),
)),
])
| 1.929688 | 2 |
ngrams.py | RichardLitt/language-niche-research | 2 | 12763084 | <gh_stars>1-10
import nltk
from nltk import bigrams
from nltk import trigrams
from nltk.util import ngrams
from sys import argv
script, filename = argv
text = open(filename)
# for n in range (2,7):
for line in text:
tokens = nltk.word_tokenize(line)
tokens = [token.lower() for token in tokens if len(token) > 1] #same as unigrams
bi_tokens = bigrams(tokens)
tri_tokens = trigrams(tokens)
print [(item, tri_tokens.count(item)) for item in sorted(set(tri_tokens))]
# fdist = nltk.FreqDist(grams)
# print fdist.most_common()
# for k,v in fdist.items():
# print v, k
# ngrams
# for n in range(0,6):
# for line in text:
# for grams in ngrams(line.split(), n):
# print grams
| 3.5 | 4 |
priority_queues/slider_puzzle_solver.py | reppertj/algorithms | 0 | 12763085 | from functools import total_ordering
from MinPQ import MinPQ
"""
Write a program to solve the 8-puzzle problem
(and its natural generalizations) using the A* search algorithm
"""
class Board():
def __init__(self, array):
"""constructor takes an
n x n list of lists containing
the n ** 2 integers between 0 and n ** 2 - 1,
where 0 represents the blank square
"""
self.flat = [tile for row in array for tile in row]
self.dimension = len(array)
def __repr__(self):
output = "\n" + str(self.dimension)
for n in range(len(self.flat)):
if n % (self.dimension) == 0:
output += "\n" + str(self.flat[n])
else:
output += " " + str(self.flat[n])
return output
def hamming_distance(self):
"""Number of tiles out of place
"""
distance = 0
for pos in range(1, len(self.flat)):
if pos != self.flat[pos - 1]:
distance += 1
return distance
def manhattan_distance(self):
"""Sum of manhattan distances
between self and goal
"""
distance = 0
for pos in range(1, len(self.flat) + 1):
tile = self.flat[pos - 1]
actual = ((pos - 1) // self.dimension,
(pos - 1) % self.dimension)
goal = ((tile - 1) // self.dimension,
(tile - 1) % self.dimension)
if tile != 0:
distance += abs(goal[0] - actual[0]) + abs(goal[1] - actual[1])
return distance
def neighbors(self):
for board in self._neighbor_boards():
neighbor = Board([])
neighbor.dimension = self.dimension
neighbor.flat = board
yield neighbor
def _neighbor_boards(self):
for k in range(0, len(self.flat)):
if self.flat[k] == 0:
pos_0 = k
coords = ((k // self.dimension), (k % self.dimension))
break
neighbor_boards = []
# horizontal neighbors
if coords[1] == 0:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 + 1))
elif coords[1] == self.dimension - 1:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 - 1))
else:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 - 1))
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 + 1))
# vertical neighbors
if coords[0] == 0:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 + self.dimension))
elif coords[0] == self.dimension - 1:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 - self.dimension))
else:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 + self.dimension))
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 - self.dimension))
return neighbor_boards
@staticmethod
def _exch(flat, a, b):
swap = flat[a]
flat[a] = flat[b]
flat[b] = swap
return flat
def is_goal(self):
"""Is this the goal board?
"""
for k in range(len(self.flat) - 1):
if k + 1 != self.flat[k]:
return False
return True
def __eq__(self, other):
return (self.flat == other.flat)
def twin(self):
"""a board that is obtained
by exchanging any pair of tiles
"""
for i in range(len(self.flat)):
if self.flat[i] != 0:
for j in range(i + 1, len(self.flat)):
if self.flat[j] != 0:
t = self._exch(self.flat.copy(), i, j)
break
break
tw = Board([])
tw.flat = t
tw.dimension = self.dimension
return tw
@total_ordering
class SearchNode():
def __init__(self, board: Board, moves: int, prev):
self.board = board
self.moves = moves
self.prev = prev
self.priority = moves + self.board.manhattan_distance()
def __eq__(self, other):
return self.priority == other.priority
def __lt__(self, other):
return self.priority < other.priority
class Solver():
def __init__(self, initial: Board):
self.init_node = SearchNode(initial, 0, None)
self.queue = MinPQ()
self.queue.insert(self.init_node)
self.twin_init = SearchNode(initial.twin(), 0, None)
self.twin_queue = MinPQ()
self.twin_queue.insert(self.twin_init)
def is_solvable(self):
return self.solution()[0] != -1
def n_moves(self):
return self.solution()[0]
@staticmethod
def seek_soln(queue):
while True:
min_node = queue.del_min()
if min_node.board.is_goal():
yield min_node
for neighbor in min_node.board.neighbors():
if min_node.prev is None or neighbor != min_node.prev.board:
queue.insert(SearchNode(
neighbor, min_node.moves + 1, min_node))
yield False
def solution(self):
min_node = False
impossible = False
while min_node is False and impossible is False:
min_node = next(self.seek_soln(self.queue))
impossible = next(self.seek_soln(self.twin_queue))
if not impossible:
moves = min_node.moves
solns = []
while min_node.prev is not None:
solns.append(min_node.board)
min_node = min_node.prev
solns.append(min_node)
solns.reverse()
return (moves, solns)
else:
return (-1, None)
sample = Board([
[5, 2, 6],
[3, 7, 1],
[8, 4, 0],
])
impossible_sample = Board([
[1, 2, 3],
[4, 5, 6],
[8, 7, 0],
])
s = Solver(sample)
i = Solver(impossible_sample)
print(s.solution())
print(i.solution())
| 4 | 4 |
examples/hydro_thermal/discrete.py | odow/msppy | 0 | 12763086 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: lingquan
"""
import pandas
import numpy
from msppy.msp import MSLP
from msppy.solver import SDDP
import gurobipy
import sys
n_processes = int(sys.argv[1])
hydro_ = pandas.read_csv("./data/hydro.csv", index_col=0)
demand = pandas.read_csv("./data/demand.csv", index_col=0)
deficit_ = pandas.read_csv("./data/deficit.csv", index_col=0)
exchange_ub = pandas.read_csv("./data/exchange.csv", index_col=0)
thermal_ = [
pandas.read_csv("./data/thermal_{}.csv".format(i), index_col=0)
for i in range(4)
]
hist = [
pandas.read_csv("./data/hist_{}.csv".format(i), sep=";")
for i in range(4)
]
hist = pandas.concat(hist, axis=1)
hist.dropna(inplace=True)
hist.drop(columns='YEAR', inplace=True)
scenarios = [
hist.iloc[:,12*i:12*(i+1)].transpose().values for i in range(4)
]
T = 120
HydroThermal = MSLP(T=T, bound=0)
for t in range(T):
m = HydroThermal[t]
stored_now,stored_past = m.addStateVars(4, ub=hydro_['UB'][:4], name="stored")
spill = m.addVars(4, name="spill")
hydro = m.addVars(4, ub=hydro_['UB'][-4:], name="hydro")
deficit = m.addVars(
[(i,j) for i in range(4) for j in range(4)],
ub = [
demand.iloc[t%12][i] * deficit_['DEPTH'][j]
for i in range(4) for j in range(4)
],
obj = [
deficit_['OBJ'][j]
for i in range(4) for j in range(4)
],
name = "deficit")
thermal = [None] * 4
for i in range(4):
thermal[i] = m.addVars(
len(thermal_[i]),
ub=thermal_[i]['UB'],
lb=thermal_[i]['LB'],
obj=thermal_[i]['OBJ'],
name="thermal_{}".format(i)
)
exchange = m.addVars(5,5, ub=exchange_ub.values.flatten(), name="exchange")
thermal_sum = m.addVars(4, name="thermal_sum")
m.addConstrs(thermal_sum[i] == gurobipy.quicksum(thermal[i].values()) for i in range(4))
for i in range(4):
m.addConstr(
thermal_sum[i]
+ gurobipy.quicksum(deficit[(i,j)] for j in range(4))
+ hydro[i]
- gurobipy.quicksum(exchange[(i,j)] for j in range(5))
+ gurobipy.quicksum(exchange[(j,i)] for j in range(5))
== demand.iloc[t%12][i]
)
m.addConstr(
gurobipy.quicksum(exchange[(j,4)] for j in range(5))
- gurobipy.quicksum(exchange[(4,j)] for j in range(5))
== 0
)
for i in range(4):
if t == 0:
m.addConstr(
stored_now[i] + spill[i] + hydro[i] - stored_past[i]
== hydro_['INITIAL'][4:8][i]
)
else:
m.addConstr(
stored_now[i] + spill[i] + hydro[i] - stored_past[i] == 0,
uncertainty = {'rhs': scenarios[i][(t-1)%12]}
)
if t == 0:
m.addConstrs(stored_past[i] == hydro_['INITIAL'][:4][i] for i in range(4))
HydroThermal_SDDP = SDDP(HydroThermal)
HydroThermal_SDDP.solve(
max_time=200,n_processes=n_processes,n_steps=n_processes)
| 1.96875 | 2 |
GeologicProfile_Toolbox/StrDataToPlanProfile.py | DannyLeonD/GeoTools | 0 | 12763087 | <reponame>DannyLeonD/GeoTools
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# StrDataToPlanProfile.py
# Author: <NAME> (Servicio Geologico Colombiano), <NAME>(Servicio Geologico Colombiano), <NAME> (Servicio Geologico Colombiano), <NAME>(Servicio Geologico Colombiano)
# Created on: 2020-08-30 16:40:30.00000
# Description: This tool calculate the apparent dip in the selected structural data and project the data to the plan profile line.
# ---------------------------------------------------------------------------
# Import modules
import arcpy
# Script arguments
strDataFC = arcpy.GetParameterAsText(0)
profileFC = arcpy.GetParameterAsText(1)
azRumField = arcpy.GetParameterAsText(2)
dipField = arcpy.GetParameterAsText(3)
faultFC = arcpy.GetParameterAsText(4)
foldFC = arcpy.GetParameterAsText(5)
outFC = arcpy.GetParameterAsText(6)
# Environment settings
mxd = arcpy.mapping.MapDocument("CURRENT")
df = arcpy.mapping.ListDataFrames(mxd, "*")[0]
arcpy.CheckOutExtension("Spatial")
sr = arcpy.Describe(strDataFC).spatialReference
# Overwrite pre-existing files
arcpy.env.overwriteOutput = True
# Add coordinates X and Y to Structural Data Feature Layer
arcpy.AddXY_management(strDataFC)
outlines = [] # Array to stock the temporal line projections of structural data
# Add bearing azimuth to plan profile line
arcpy.AddGeometryAttributes_management(profileFC, 'LINE_BEARING', 'METERS', '#', sr)
# Define cursor to loop the Structural Data Feature Layer
cursor1= arcpy.SearchCursor(strDataFC)
# Loop the cursor and append the temporal line projections in outlines array
for row1 in cursor1:
Bz = (row1.getValue(dipField))
Az = (row1.getValue(azRumField))
Az2 = Az + 180
xi = (row1.getValue("POINT_X"))
yi = (row1.getValue("POINT_Y"))
start = arcpy.PointGeometry(arcpy.Point(xi,yi), sr)
end = start.pointFromAngleAndDistance(Az,5000,"PLANAR")
end2 = start.pointFromAngleAndDistance(Az2,5000,"PLANAR")
outlines.append(arcpy.Polyline(arcpy.Array([start.centroid,end.centroid]),sr))
outlines.append(arcpy.Polyline(arcpy.Array([start.centroid,end2.centroid]),sr))
arcpy.CopyFeatures_management(outlines,'in_memory\Aux1')
del cursor1
# Define cursor to loop the Plan Profile Line Feature Layer
cursor2= arcpy.SearchCursor(profileFC)
# Adjust the value in case the azimuth is greater than 180
for row in cursor2:
azVal = row.BEARING
if azVal > 180:
azVal = azVal - 180
arcpy.FlipLine_edit(profileFC)
arcpy.AddMessage("Bearing azimuth profile line: " + str(azVal))
arcpy.CalculateField_management(profileFC,"BEARING",str(azVal), "PYTHON", "#")
arcpy.SpatialJoin_analysis('in_memory\Aux1',strDataFC,'in_memory\Aux2') # Join the Structural Data attributes to the temporal projection lines
arcpy.Intersect_analysis([profileFC,'in_memory\Aux2'],'in_memory\Aux5',"ALL","","POINT") # Capture the intersection points to project the structural data in the plan profile line
# Project the intersection points in case there are faults and/or folds
if foldFC != '' and faultFC != '':
arcpy.Intersect_analysis([profileFC,foldFC],'in_memory\Aux3',"ALL","","POINT")
arcpy.Intersect_analysis([profileFC,faultFC],'in_memory\Aux4',"ALL","","POINT")
arcpy.Merge_management(['in_memory\Aux3','in_memory\Aux4','in_memory\Aux5'],'in_memory\Aux6')
arcpy.DeleteFeatures_management('in_memory\Aux3')
arcpy.DeleteFeatures_management('in_memory\Aux4')
elif foldFC == '' and faultFC != '':
arcpy.Intersect_analysis([profileFC,faultFC],'in_memory\Aux4',"ALL","","POINT")
arcpy.Merge_management(['in_memory\Aux4','in_memory\Aux5'],'in_memory\Aux6')
arcpy.DeleteFeatures_management('in_memory\Aux4')
elif foldFC != '' and faultFC == '':
arcpy.Intersect_analysis([profileFC,foldFC],'in_memory\Aux3',"ALL","","POINT")
arcpy.Merge_management(['in_memory\Aux3','in_memory\Aux5'],'in_memory\Aux6')
arcpy.DeleteFeatures_management('in_memory\Aux3')
else:
arcpy.CopyFeatures_management('in_memory\Aux5','in_memory\Aux6')
# Identify the kind of angle to calculate the aparent Dip in each structural data (Except Faults and Folds)
arcpy.AddField_management('in_memory\Aux6',"I_D","TEXT")
arcpy.MakeFeatureLayer_management('in_memory\Aux6', "DE_Corte")
arcpy.SelectLayerByAttribute_management("DE_Corte","NEW_SELECTION",'"'+str(azRumField)+'"'+'>= "BEARING" AND'+ '"'+str(azRumField)+'"'+'<( "BEARING" +180)')
arcpy.CalculateField_management("DE_Corte","I_D",'"Izq"',"PYTHON")
arcpy.SelectLayerByAttribute_management("DE_Corte","NEW_SELECTION",'"'+str(azRumField)+'"'+'< "BEARING" OR'+'"'+str(azRumField)+'"'+'>=( "BEARING" +180)')
arcpy.CalculateField_management("DE_Corte","I_D",'"Der"',"PYTHON")
arcpy.AddField_management("DE_Corte","Angle","FLOAT")
arcpy.SelectLayerByAttribute_management("DE_Corte","NEW_SELECTION",'"'+str(azRumField)+'"'+'< "BEARING"')
arcpy.CalculateField_management("DE_Corte","Angle",'!BEARING!- !'+str(azRumField)+'!',"PYTHON")
arcpy.SelectLayerByAttribute_management("DE_Corte","NEW_SELECTION",'"'+str(azRumField)+'"'+'> "BEARING" AND'+'"'+str(azRumField)+'"'+'<= 180')
arcpy.CalculateField_management("DE_Corte","Angle",'!'+str(azRumField)+'!- !BEARING!',"PYTHON")
arcpy.SelectLayerByAttribute_management("DE_Corte","NEW_SELECTION",'"'+str(azRumField)+'"'+'> "BEARING" AND 270 >='+ '"'+str(azRumField)+'"'+'AND'+'"'+str(azRumField)+'"'+' > 180')
arcpy.CalculateField_management("DE_Corte","Angle",'!BEARING!-( !'+str(azRumField)+'!-180)',"PYTHON")
arcpy.SelectLayerByAttribute_management("DE_Corte","NEW_SELECTION",'"'+str(azRumField)+'"'+'> "BEARING" AND 360 >='+'"'+str(azRumField)+'"'+'AND'+'"'+str(azRumField)+'"'+'> 270')
arcpy.CalculateField_management("DE_Corte","Angle",'(!'+str(azRumField)+'!-180)-!BEARING!',"PYTHON")
arcpy.SelectLayerByAttribute_management("DE_Corte","NEW_SELECTION",'"'+str(azRumField)+'"'+'= 0')
arcpy.CalculateField_management("DE_Corte","Angle",'(180-!BEARING!)',"PYTHON")
arcpy.SelectLayerByAttribute_management("DE_Corte","CLEAR_SELECTION")
# Calculate the aparent Dip
arcpy.AddField_management("DE_Corte","AparentDip","FLOAT")
arcpy.CalculateField_management("DE_Corte","AparentDip",'math.degrees(math.atan(math.tan( (!'+str(dipField)+'!*math.pi)/180 )*math.sin(( !Angle! *math.pi)/180 ) ) )',"PYTHON")
strDataPlan = arcpy.MultipartToSinglepart_management("DE_Corte",outFC)
addLayer = arcpy.mapping.Layer(outFC)
arcpy.mapping.AddLayer(df, addLayer, "TOP") # Add layer to data frame
# Delete temporal data
arcpy.DeleteFeatures_management('in_memory\Aux1')
arcpy.DeleteFeatures_management('in_memory\Aux2')
arcpy.DeleteFeatures_management('in_memory\Aux5')
arcpy.DeleteFeatures_management('in_memory\Aux6')
arcpy.Delete_management("DE_Corte") | 1.898438 | 2 |
GRU.py | harrys17451/CryptocurrencyPrediction | 669 | 12763088 | <reponame>harrys17451/CryptocurrencyPrediction
import pandas as pd
import numpy as numpy
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten,Reshape
from keras.layers import Conv1D, MaxPooling1D, LeakyReLU
from keras.utils import np_utils
from keras.layers import GRU,CuDNNGRU
from keras.callbacks import CSVLogger, ModelCheckpoint
import h5py
import os
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
with h5py.File(''.join(['bitcoin2015to2017_close.h5']), 'r') as hf:
datas = hf['inputs'].value
labels = hf['outputs'].value
output_file_name='bitcoin2015to2017_close_GRU_1_tanh_relu_'
step_size = datas.shape[1]
units= 50
batch_size = 8
nb_features = datas.shape[2]
epochs = 100
output_size=16
#split training validation
training_size = int(0.8* datas.shape[0])
training_datas = datas[:training_size,:]
training_labels = labels[:training_size,:,0]
validation_datas = datas[training_size:,:]
validation_labels = labels[training_size:,:,0]
#build model
model = Sequential()
model.add(GRU(units=units, input_shape=(step_size,nb_features),return_sequences=False))
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(Dense(output_size))
model.add(Activation('relu'))
model.compile(loss='mse', optimizer='adam')
model.fit(training_datas, training_labels, batch_size=batch_size,validation_data=(validation_datas,validation_labels), epochs = epochs, callbacks=[CSVLogger(output_file_name+'.csv', append=True),ModelCheckpoint('weights/'+output_file_name+'-{epoch:02d}-{val_loss:.5f}.hdf5', monitor='val_loss', verbose=1,mode='min')])
# model.fit(datas,labels)
#model.save(output_file_name+'.h5')
| 2.328125 | 2 |
make/photon/prepare/migrations/__init__.py | gerhardgossen/harbor | 1 | 12763089 | import os
MIGRATION_BASE_DIR = os.path.dirname(__file__)
accept_versions = {'1.9.0', '1.10.0', '2.0.0'} | 1.546875 | 2 |
update_metrics.py | Shpinkso/Software_metrics | 0 | 12763090 | <reponame>Shpinkso/Software_metrics<filename>update_metrics.py
from gitlabMetrics import *
from jiraMetrics import *
import yaml
import argparse
prodmode = ''
def main():
login_dict = yaml.safe_load(open('login.yml'))
gl_project = login_dict['gitlab_project_id']
jira_project_key = login_dict['jira_project_key']
gl_metrics = GitlabMetrics(prodmode)
gl_metrics.connect()
gl_metrics.load_project(gl_project)
jira_metrics = JiraMetrics(prodmode)
jira_metrics.connect()
try:
jira_metrics.load_project(jira_project_key)
except Exception as e:
assert False, "{}".format(e)
parser = argparse.ArgumentParser()
parser.add_argument("--jdate", help="iso8601 date",type=str)
parser.add_argument("--gdate", help="iso8601 date",type=str)
args = parser.parse_args()
if args.jdate == 'latest':
print("Updating jira metrics from project {} since last entry..".format(jira_project_key))
j_date = jira_metrics.get_date_of_last_done_issue_in_database()
else:
print("Updating jira metrics from project {} since {}".format(jira_project_key,args.jdate))
j_date = args.jdate
jira_metrics.load_done_project_issues_since(j_date)
jira_metrics.add_issues_to_database()
if args.gdate == 'latest':
print("Updating gitlab metrics from project {} since last entry..".format(gl_project))
gl_date = gl_metrics.get_date_of_last_commit_in_database()
else:
print("Updating gitlab metrics from project {} since {}".format(gl_project,args.gdate))
gl_date = args.gdate
gl_metrics.load_project_commits_since(gl_date)
gl_metrics.add_commits_to_database()
if __name__ == "__main__":
main()
| 2.5 | 2 |
utils.py | yuanc3/LSUnetMix | 0 | 12763091 | import numpy as np
from sklearn.metrics import roc_auc_score,jaccard_score
import cv2
from torch import nn
import torch.nn.functional as F
import math
from functools import wraps
import warnings
import weakref
from torch.optim.optimizer import Optimizer
class WeightedBCE(nn.Module):
def __init__(self, weights=[0.4, 0.6]):
super(WeightedBCE, self).__init__()
self.weights = weights
def forward(self, logit_pixel, truth_pixel):
# print("====",logit_pixel.size())
logit = logit_pixel.view(-1)
truth = truth_pixel.view(-1)
assert(logit.shape==truth.shape)
loss = F.binary_cross_entropy(logit, truth, reduction='none')
pos = (truth>0.5).float()
neg = (truth<0.5).float()
pos_weight = pos.sum().item() + 1e-12
neg_weight = neg.sum().item() + 1e-12
loss = (self.weights[0]*pos*loss/pos_weight + self.weights[1]*neg*loss/neg_weight).sum()
return loss
class WeightedDiceLoss(nn.Module):
def __init__(self, weights=[0.5, 0.5]): # W_pos=0.8, W_neg=0.2
super(WeightedDiceLoss, self).__init__()
self.weights = weights
def forward(self, logit, truth, smooth=1e-5):
batch_size = len(logit)
logit = logit.view(batch_size,-1)
truth = truth.view(batch_size,-1)
assert(logit.shape==truth.shape)
p = logit.view(batch_size,-1)
t = truth.view(batch_size,-1)
w = truth.detach()
w = w*(self.weights[1]-self.weights[0])+self.weights[0]
# p = w*(p*2-1) #convert to [0,1] --> [-1, 1]
# t = w*(t*2-1)
p = w*(p)
t = w*(t)
intersection = (p * t).sum(-1)
union = (p * p).sum(-1) + (t * t).sum(-1)
dice = 1 - (2*intersection + smooth) / (union +smooth)
# print "------",dice.data
loss = dice.mean()
return loss
class WeightedDiceBCE(nn.Module):
def __init__(self,dice_weight=1,BCE_weight=1):
super(WeightedDiceBCE, self).__init__()
self.BCE_loss = WeightedBCE(weights=[0.5, 0.5])
self.dice_loss = WeightedDiceLoss(weights=[0.5, 0.5])
self.BCE_weight = BCE_weight
self.dice_weight = dice_weight
def _show_dice(self, inputs, targets):
inputs[inputs>=0.5] = 1
inputs[inputs<0.5] = 0
# print("2",np.sum(tmp))
targets[targets>0] = 1
targets[targets<=0] = 0
hard_dice_coeff = 1.0 - self.dice_loss(inputs, targets)
return hard_dice_coeff
def forward(self, inputs, targets):
# inputs = inputs.contiguous().view(-1)
# targets = targets.contiguous().view(-1)
# print "dice_loss", self.dice_loss(inputs, targets)
# print "focal_loss", self.focal_loss(inputs, targets)
dice = self.dice_loss(inputs, targets)
BCE = self.BCE_loss(inputs, targets)
# print "dice",dice
# print "focal",focal
dice_BCE_loss = self.dice_weight * dice + self.BCE_weight * BCE
return dice_BCE_loss
def auc_on_batch(masks, pred):
'''Computes the mean Area Under ROC Curve over a batch during training'''
aucs = []
for i in range(pred.shape[1]):
prediction = pred[i][0].cpu().detach().numpy()
# print("www",np.max(prediction), np.min(prediction))
mask = masks[i].cpu().detach().numpy()
# print("rrr",np.max(mask), np.min(mask))
aucs.append(roc_auc_score(mask.reshape(-1), prediction.reshape(-1)))
return np.mean(aucs)
def iou_on_batch(masks, pred):
'''Computes the mean Area Under ROC Curve over a batch during training'''
ious = []
for i in range(pred.shape[0]):
pred_tmp = pred[i][0].cpu().detach().numpy()
# print("www",np.max(prediction), np.min(prediction))
mask_tmp = masks[i].cpu().detach().numpy()
pred_tmp[pred_tmp>=0.5] = 1
pred_tmp[pred_tmp<0.5] = 0
# print("2",np.sum(tmp))
mask_tmp[mask_tmp>0] = 1
mask_tmp[mask_tmp<=0] = 0
# print("rrr",np.max(mask), np.min(mask))
ious.append(jaccard_score(mask_tmp.reshape(-1), pred_tmp.reshape(-1)))
return np.mean(ious)
def dice_coef(y_true, y_pred):
smooth = 1e-5
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_on_batch(masks, pred):
'''Computes the mean Area Under ROC Curve over a batch during training'''
dices = []
for i in range(pred.shape[0]):
pred_tmp = pred[i][0].cpu().detach().numpy()
# print("www",np.max(prediction), np.min(prediction))
mask_tmp = masks[i].cpu().detach().numpy()
pred_tmp[pred_tmp>=0.5] = 1
pred_tmp[pred_tmp<0.5] = 0
# print("2",np.sum(tmp))
mask_tmp[mask_tmp>0] = 1
mask_tmp[mask_tmp<=0] = 0
# print("rrr",np.max(mask), np.min(mask))
dices.append(dice_coef(mask_tmp, pred_tmp))
return np.mean(dices)
def save_on_batch(images1, masks, pred, names, vis_path):
'''Computes the mean Area Under ROC Curve over a batch during training'''
for i in range(pred.shape[0]):
pred_tmp = pred[i][0].cpu().detach().numpy()
mask_tmp = masks[i].cpu().detach().numpy()
pred_tmp[pred_tmp>=0.5] = 255
pred_tmp[pred_tmp<0.5] = 0
mask_tmp[mask_tmp>0] = 255
mask_tmp[mask_tmp<=0] = 0
cv2.imwrite(vis_path+ names[i][:-4]+"_pred.jpg", pred_tmp)
cv2.imwrite(vis_path+names[i][:-4]+"_gt.jpg", mask_tmp)
class _LRScheduler(object):
def __init__(self, optimizer, last_epoch=-1):
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
# Initialize epoch and base learning rates
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `lr_scheduler.step()` is called after
# `optimizer.step()`
def with_counter(method):
if getattr(method, '_with_counter', False):
# `optimizer.step()` has already been replaced, return.
return method
# Keep a weak reference to the optimizer instance to prevent
# cyclic references.
instance_ref = weakref.ref(method.__self__)
# Get the unbound method for the same purpose.
func = method.__func__
cls = instance_ref().__class__
del method
@wraps(func)
def wrapper(*args, **kwargs):
instance = instance_ref()
instance._step_count += 1
wrapped = func.__get__(instance, cls)
return wrapped(*args, **kwargs)
# Note that the returned function here is no longer a bound method,
# so attributes like `__func__` and `__self__` no longer exist.
wrapper._with_counter = True
return wrapper
self.optimizer.step = with_counter(self.optimizer.step)
self.optimizer._step_count = 0
self._step_count = 0
self.step()
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_last_lr(self):
""" Return last computed learning rate by current scheduler.
"""
return self._last_lr
def get_lr(self):
# Compute learning rate using chainable form of the scheduler
raise NotImplementedError
def step(self, epoch=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
"initialization. Please, make sure to call `optimizer.step()` before "
"`lr_scheduler.step()`. See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
"will result in PyTorch skipping the first value of the learning rate schedule. "
"See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
self._step_count += 1
class _enable_get_lr_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_lr_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_lr_called_within_step = False
return self
with _enable_get_lr_call(self):
if epoch is None:
self.last_epoch += 1
values = self.get_lr()
else:
self.last_epoch = epoch
if hasattr(self, "_get_closed_form_lr"):
values = self._get_closed_form_lr()
else:
values = self.get_lr()
for param_group, lr in zip(self.optimizer.param_groups, values):
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
class CosineAnnealingWarmRestarts(_LRScheduler):
r"""Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}`
is the number of epochs since the last restart and :math:`T_{i}` is the number
of epochs between two warm restarts in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 +
\cos\left(\frac{T_{cur}}{T_{i}}\pi\right)\right)
When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`.
When :math:`T_{cur}=0` after restart, set :math:`\eta_t=\eta_{max}`.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_0 (int): Number of iterations for the first restart.
T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1.
eta_min (float, optional): Minimum learning rate. Default: 0.
last_epoch (int, optional): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1):
if T_0 <= 0 or not isinstance(T_0, int):
raise ValueError("Expected positive integer T_0, but got {}".format(T_0))
if T_mult < 1 or not isinstance(T_mult, int):
raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult))
self.T_0 = T_0
self.T_i = T_0
self.T_mult = T_mult
self.eta_min = eta_min
super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch)
self.T_cur = self.last_epoch
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", DeprecationWarning)
return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2
for base_lr in self.base_lrs]
def step(self, epoch=None):
"""Step could be called after every batch update
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> iters = len(dataloader)
>>> for epoch in range(20):
>>> for i, sample in enumerate(dataloader):
>>> inputs, labels = sample['inputs'], sample['labels']
>>> scheduler.step(epoch + i / iters)
>>> optimizer.zero_grad()
>>> outputs = net(inputs)
>>> loss = criterion(outputs, labels)
>>> loss.backward()
>>> optimizer.step()
This function can be called in an interleaved way.
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> for epoch in range(20):
>>> scheduler.step()
>>> scheduler.step(26)
>>> scheduler.step() # scheduler.step(27), instead of scheduler(20)
"""
if epoch is None and self.last_epoch < 0:
epoch = 0
if epoch is None:
epoch = self.last_epoch + 1
self.T_cur = self.T_cur + 1
if self.T_cur >= self.T_i:
self.T_cur = self.T_cur - self.T_i
self.T_i = self.T_i * self.T_mult
else:
if epoch < 0:
raise ValueError("Expected non-negative epoch, but got {}".format(epoch))
if epoch >= self.T_0:
if self.T_mult == 1:
self.T_cur = epoch % self.T_0
else:
n = int(math.log((epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult))
self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1)
self.T_i = self.T_0 * self.T_mult ** (n)
else:
self.T_i = self.T_0
self.T_cur = epoch
self.last_epoch = math.floor(epoch)
class _enable_get_lr_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_lr_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_lr_called_within_step = False
return self
with _enable_get_lr_call(self):
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
| 2.21875 | 2 |
backend/ai/mean_actor_critic.py | MisterTea/LandsharkGame | 0 | 12763092 | <gh_stars>0
#!/usr/bin/env python3
import copy
import os
import random
from collections import Counter
from enum import IntEnum
from typing import Dict, List, Tuple
from uuid import UUID, uuid4
import numpy as np
import pytorch_lightning as pl
import torch
from engine.game_interface import GameInterface
from torch import multiprocessing
from utils.profiler import Profiler
from ai.types import GameRollout
def masked_softmax(x, mask, temperature):
x = x / temperature
x[mask < 1] = -1e20
x = x - torch.max(x, dim=1, keepdim=True)[0]
e_x = torch.exp(x)
e_x = e_x * mask
softmax_sum = e_x.sum(dim=1, keepdim=True)
softmax_sum[softmax_sum == 0.0] = 1.0
out = e_x / softmax_sum
return out
class ActorCritic(torch.nn.Module):
def __init__(self, feature_dim: int, action_dim: int):
super().__init__()
self.feature_dim = feature_dim
self.action_dim = action_dim
self.player_property_embedding = torch.nn.Embedding(31, 16)
self.player_embedding = torch.nn.Sequential(
torch.nn.Linear(18, 16), torch.nn.LeakyReLU()
)
self.bidding_property_embedding = torch.nn.Embedding(31, 16)
self.property_consumed_embedding = torch.nn.Embedding(31, 16)
self.bidding_dollar_embedding = torch.nn.Embedding(17, 16)
self.dollar_consumed_embedding = torch.nn.Embedding(17, 16)
self.shared = torch.nn.Sequential(
(torch.nn.Linear(98, 64)),
torch.nn.LeakyReLU(),
)
self.critic = torch.nn.Sequential(
torch.nn.Linear(64, action_dim),
torch.nn.Identity(),
)
self.actor = torch.nn.Sequential(
torch.nn.Linear(64, action_dim),
torch.nn.Identity(),
)
self.num_steps = 0
def forward(self, inputs, possible_actions, do_epsilon_greedy: bool):
inputs = inputs.detach()
# Embed player properties
opponent_vector = None
for player_index in range(0, 4):
cursor = 1 + 4 + 30 + 30 + 1 + 4 + (9 * player_index)
property_indices = inputs[:, cursor + 1 : cursor + 8].long()
e = (self.player_property_embedding(property_indices)).mean(dim=1)
player_embedding = self.player_embedding(
torch.cat(
(
e,
inputs[
:,
(cursor, cursor + 8),
],
),
dim=1,
)
)
if player_index == 0:
self_vector = player_embedding
elif opponent_vector is None:
opponent_vector = player_embedding
else:
opponent_vector = opponent_vector + player_embedding
# Embed houses to buy
cursor = 1
property_indices = inputs[:, cursor : cursor + 4].long()
houses_to_buy = (self.bidding_property_embedding(property_indices)).mean(dim=1)
# Embed properties consumed
cursor = 1 + 4
property_indices = inputs[:, cursor : cursor + 30].long()
property_consumed = (self.property_consumed_embedding(property_indices)).mean(
dim=1
)
# Embed dollar cards to buy
cursor = 1 + 4 + 30 + 30 + 1
dollar_indices = inputs[:, cursor : cursor + 4].long()
dollars_to_buy = (self.bidding_dollar_embedding(dollar_indices)).mean(dim=1)
# Embed dollars consumed
cursor = 1 + 4 + 30
dollar_indices = inputs[:, cursor : cursor + 30].long()
dollars_consumed = (self.dollar_consumed_embedding(dollar_indices)).mean(dim=1)
x = torch.cat(
(
inputs[:, 0:1],
houses_to_buy,
property_consumed,
inputs[:, 1 + 4 + 30 + 30 : 1 + 4 + 30 + 30 + 1],
dollars_to_buy,
dollars_consumed,
self_vector,
opponent_vector,
),
dim=1,
)
x = self.shared(x)
shared_result = x
# Critic
critic_action_values = self.critic(x)
# Actor
x = self.actor(shared_result)
# x_before_activation = x.cpu()
# x = torch.clamp(torch.nn.Sigmoid()(x), min=1e-3, max=(1.0 - 1e-3))
# assert torch.min(x) > 0.0
x = masked_softmax(x, possible_actions, 1.0)
# Replace softmax with linear scale
# x = x * possible_actions.float()
# original_x = x.cpu()
# x = x / x.sum(dim=1, keepdim=True)
assert torch.allclose(
torch.max(possible_actions, dim=1).values.cpu(), torch.IntTensor([1])
), f"{torch.max(possible_actions, dim=1).values.cpu()}"
assert torch.allclose(x.sum(dim=1).cpu(), torch.Tensor([1.0]))
assert torch.min(x) >= 0.0
if do_epsilon_greedy:
epsilon_prob = possible_actions * (
0.1 / possible_actions.sum(dim=1, keepdim=True).float()
)
assert epsilon_prob.size() == x.size()
for _ in range(5):
x = torch.max(x, epsilon_prob)
x = x / x.sum(dim=1, keepdim=True)
actor_probs = x
return actor_probs, critic_action_values
def training_step(self, batch_list: List[torch.Tensor], batch_idx):
self.num_steps += 1
batch = GameRollout(*[x[0] for x in batch_list])
actor_probs, critic_action_values = self(
batch.states, batch.possible_actions, True
)
action_one_hot = torch.nn.functional.one_hot(
batch.actions.squeeze(1), num_classes=self.action_dim
).type_as(batch.states)
payoff_for_player_to_act = (
(
batch.payoffs
* torch.nn.functional.one_hot(batch.player_to_act.squeeze(1))
)
.sum(dim=1, keepdim=True)
.type_as(batch.states)
)
batch_size = batch.actions.size()[0]
# labels = (0.99 ** batch.distance_to_payoff) * payoff_for_player_to_act
labels = payoff_for_player_to_act
assert labels.size() == (batch_size, 1)
labels = (labels * action_one_hot).sum(dim=1, keepdim=True)
assert action_one_hot.size() == batch.possible_actions.size()
total_prob = actor_probs.sum(dim=1)
outputs = (critic_action_values * action_one_hot).sum(dim=1, keepdim=True)
criterion = torch.nn.L1Loss(reduction="none")
# Some actions are impossible, these will get an importance weight of 0
importance_weight = (actor_probs / batch.policy.clamp(min=1e-6)).clamp(max=10.0)
assert (
torch.isnan(importance_weight).sum() == 0
), f"Invalid importance weight {actor_probs}"
importance_weight_for_action_taken = (
(importance_weight * action_one_hot).sum(dim=1, keepdim=True).detach()
)
loss_before_importance = criterion(outputs, labels)
assert (
importance_weight_for_action_taken.size() == loss_before_importance.size()
)
critic_loss = (
loss_before_importance * importance_weight_for_action_taken
).mean()
# print("Critic loss", critic_loss)
if True or self.num_steps >= 100:
detached_critic = critic_action_values.detach()
assert critic_action_values.size() == batch.possible_actions.size()
baseline = (actor_probs * detached_critic).sum(dim=1, keepdim=True)
advantage = detached_critic - baseline
advantage_loss = -1 * (
importance_weight.detach() * actor_probs * advantage.detach()
).sum(dim=1, keepdim=True)
# advantage_loss = -1 * (
# importance_weight.detach() * actor_probs * detached_critic
# ).sum(dim=1, keepdim=True)
# advantage_loss = torch.nn.LeakyReLU()(advantage).sum(dim=1, keepdim=True)
advantage_loss = advantage_loss.mean()
entropy = (
torch.sum(
-actor_probs * torch.log(actor_probs.clamp(min=1e-6)),
dim=1,
keepdim=True,
)
/ torch.log((actor_probs > 0).sum(dim=1, keepdim=True).float()).clamp(
min=1e-6
)
)
assert (
(entropy > 1.01).sum() + (entropy < -0.01).sum()
) == 0, f"Invalid entropy {torch.min(torch.sum(actor_probs, dim=1))}, {torch.max(torch.sum(actor_probs, dim=1))}, {entropy[entropy > 1.0]}, {entropy[entropy < 0.0]}"
entropy_loss = (
torch.nn.L1Loss()(entropy, torch.ones_like(entropy).float()) * 0.1
)
actor_loss = advantage_loss + entropy_loss
# print("Actor losses", loss, entropy_loss)
advantage_loss = advantage_loss.detach()
entropy_loss = entropy_loss.detach()
else:
# Don't bother training actor while critic is so wrong
actor_loss = advantage_loss = entropy_loss = 0
return {
"progress_bar": {
"advantage_loss": advantage_loss,
"entropy_loss": entropy_loss,
"critic_loss": critic_loss.detach(),
"actor_loss": actor_loss.detach(),
},
"loss": actor_loss + critic_loss,
}
@torch.no_grad()
def test_actor(game, actor, opponent_policy, current_epoch):
scoreCounter: Counter = Counter()
NUM_RANDOM_GAMES = 1000
num_decisions = 0
average_decision = torch.zeros((game.action_dim(),), dtype=torch.float)
for on_game in range(NUM_RANDOM_GAMES):
gameState = game.clone()
gameState.reset()
features = torch.zeros((1, gameState.feature_dim()), dtype=torch.float)
while not gameState.terminal(): # gameState.phase != GamePhase.GAME_OVER:
seatToAct = gameState.get_player_to_act()
possible_action_mask = gameState.get_one_hot_actions(True)
if seatToAct == 0:
gameState.populate_features(features[0])
action_probs = actor(
features, possible_action_mask.unsqueeze(0), False
)[0]
elif opponent_policy is not None:
gameState.populate_features(features[0])
action_probs = opponent_policy(
features, possible_action_mask.unsqueeze(0), False
)[0]
else:
action_probs = possible_action_mask.float()
action_prob_dist = torch.distributions.Categorical(
action_probs * possible_action_mask
)
action_index = int(action_prob_dist.sample().item())
if seatToAct == 0:
average_decision[action_index] += 1.0
num_decisions += 1
gameState.act(seatToAct, action_index)
payoffs = gameState.payoffs()
for i, p in enumerate(payoffs):
scoreCounter[str(i)] += p
print(f"TESTING EPOCH {current_epoch}")
print("DECISION HISTOGRAM")
print(average_decision / num_decisions)
print(f"SCORE AGAINST PLAYER")
for x in range(gameState.num_players):
print(x, scoreCounter[str(x)] / float(NUM_RANDOM_GAMES))
def test_actors(game, epoch: int, actor_critics):
p = multiprocessing.Pool(8)
for i, actor_critic in enumerate(actor_critics):
actor = copy.deepcopy(actor_critic).cpu().eval()
with torch.no_grad():
test_actor_params = []
for current_epoch in range(-1, epoch + 1, max(1, epoch // 5)):
# Check winrate against random or past player
opponent_policy = None
if current_epoch >= 0:
opponent_policy = (
torch.load(f"models/MAC_ActorCritic_{current_epoch}_{0}.torch")
.cpu()
.eval()
)
test_actor_params.append([game, actor, opponent_policy, current_epoch])
p.starmap(test_actor, test_actor_params, 1)
class TorchSaveCallback(pl.Callback):
def on_epoch_end(self, trainer, pl_module):
epoch = trainer.current_epoch
for i, actor_critic in enumerate(pl_module.actor_critics):
torch.save(actor_critic, f"models/MAC_ActorCritic_{epoch}_{i}.torch")
test_actors(pl_module.game, epoch, pl_module.actor_critics)
NUM_PARALLEL_MODELS = 1
class MeanActorCritic(pl.LightningModule):
def __init__(self, game: GameInterface):
super().__init__()
self.game = game
self.actor_critics = torch.nn.ModuleList(
[
ActorCritic(game.feature_dim(), game.action_dim())
for _ in range(NUM_PARALLEL_MODELS)
]
)
self.learning_rate = 1e-3
# test_actors(self.game, self.current_epoch, self.actor_critics)
def forward(self, inputs):
self.actor_critics[0].forward(inputs)
def train_model(self, train_dataset, output_file=None):
self.train_dataset = train_dataset
trainer = pl.Trainer(
gpus=1,
# show_progress_bar=False,
max_epochs=1000,
# default_save_path=os.path.join(os.getcwd(), "models", "MAC"),
val_check_interval=train_dataset.max_games,
callbacks=[TorchSaveCallback()],
# auto_lr_find=True,
num_sanity_val_steps=0,
)
with Profiler(True):
trainer.fit(self)
if output_file is not None:
trainer.save_checkpoint(output_file)
self.train_dataset = None
def train_dataloader(self):
return torch.utils.data.DataLoader(self.train_dataset, pin_memory=True)
def configure_optimizers(self):
optimizers = [
torch.optim.Adam(x.parameters(), lr=(self.learning_rate), weight_decay=1e-5)
for x in self.actor_critics
]
return optimizers
def training_step(self, batch, batch_idx, optimizer_idx=0):
retval = self.actor_critics[optimizer_idx].training_step(batch, batch_idx)
if "progress_bar" in retval:
self.log(
"critic_loss",
retval["progress_bar"]["critic_loss"],
prog_bar=True,
on_step=True,
)
self.log(
"advantage_loss",
retval["progress_bar"]["advantage_loss"],
prog_bar=True,
on_step=True,
)
self.log(
"entropy_loss",
retval["progress_bar"]["entropy_loss"],
prog_bar=True,
on_step=True,
)
return retval
| 1.921875 | 2 |
epipack/tests/numeric_tests.py | benmaier/epipack | 25 | 12763093 | import unittest
import numpy as np
from scipy.optimize import root
from scipy.interpolate import interp1d
from scipy.stats import entropy, poisson
import warnings
from epipack.numeric_epi_models import (
DynamicBirthRate,
ConstantBirthRate,
DynamicLinearRate,
ConstantLinearRate,
DynamicQuadraticRate,
ConstantQuadraticRate,
EpiModel,
SISModel,
SIModel,
SIRModel,
SEIRModel,
SIRSModel,
)
from epipack.integrators import time_leap_ivp, time_leap_newton
from epipack.stochastic_epi_models import StochasticEpiModel
class EpiTest(unittest.TestCase):
def test_compartments(self):
epi = EpiModel(list("SEIR"))
assert(all([ i == epi.get_compartment_id(C) for i, C in enumerate("SEIR") ]))
assert(epi.get_compartment_id("E") == 1)
assert(epi.get_compartment(1) == "E")
def test_linear_rates(self):
epi = EpiModel(list("SEIR"))
epi.add_transition_processes([
("E", 1.0, "I"),
("I", 1.0, "R"),
])
linear_rates = [ ConstantLinearRate(1.0,1), ConstantLinearRate(1.0,2) ]
linear_events = [ np.array([0,-1,+1,0]), np.array([0,0,-1,+1.]) ]
for r0, r1 in zip(linear_rates, epi.linear_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(linear_events, epi.linear_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
epi = EpiModel(list("SEIR"))
_r0 = lambda t, y: 2+np.cos(t)
_r1 = lambda t, y: 2+np.sin(t)
epi.add_transition_processes([
("E", _r0, "I"),
("I", _r1, "R"),
])
linear_rates = [ DynamicLinearRate(_r0,1), DynamicLinearRate(_r1,2) ]
linear_events = [ np.array([0,-1,+1,0]), np.array([0,0,-1,+1.]) ]
for r0, r1 in zip(linear_rates, epi.linear_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(linear_events, epi.linear_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_adding_linear_rates(self):
epi = EpiModel(list("SEIR"))
epi.set_processes([
("E", 1.0, "I"),
])
epi.add_transition_processes([
("I", 1.0, "R"),
])
linear_rates = [ ConstantLinearRate(1.0,1), ConstantLinearRate(1.0,2) ]
linear_events = [ np.array([0,-1,+1,0]), np.array([0,0,-1,+1.]) ]
for r0, r1 in zip(linear_rates, epi.linear_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(linear_events, epi.linear_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_quadratic_processes(self):
epi = EpiModel(list("SEIAR"))
quadratic_rates = [ ConstantQuadraticRate(1.0,2,0)]
quadratic_events = [ np.array([-1,+1,0,0,0.])]
epi.add_transmission_processes([
("S", "I", 1.0, "I", "E"),
])
for r0, r1 in zip(quadratic_rates, epi.quadratic_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(quadratic_events, epi.quadratic_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_adding_quadratic_processes(self):
epi = EpiModel(list("SEIAR"))
quadratic_rates = [ ConstantQuadraticRate(1.0,2,0), ConstantQuadraticRate(1.0,3,0) ]
quadratic_events = [ np.array([-1,+1,0,0,0.]), np.array([-1,+1,0,0,0.]) ]
epi.set_processes([
("S", "I", 1.0, "I", "E"),
])
epi.add_transmission_processes([
("S", "A", 1.0, "A", "E"),
])
for r0, r1 in zip(quadratic_rates, epi.quadratic_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(quadratic_events, epi.quadratic_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_SIS_with_simulation_restart_and_euler(self):
N = 100
epi = SISModel(infection_rate=2,recovery_rate=1,initial_population_size=N)
epi.set_initial_conditions({'S':0.99*N,'I':0.01*N})
tt = np.linspace(0,100,2)
result = epi.integrate(tt,['S'])
assert(np.isclose(result['S'][-1],N/2))
tt = np.linspace(0,100,1000)
result = epi.integrate_and_return_by_index(tt,['S'],integrator='euler')
assert(np.isclose(result[0,-1],N/2))
def test_repeated_simulation(self):
N = 100
epi = SISModel(infection_rate=2,recovery_rate=1,initial_population_size=N)
epi.set_initial_conditions({'S':0.99*N,'I':0.01*N})
tt = np.linspace(0,100,100)
old_t = tt[0]
for it, t in enumerate(tt[1:]):
result = epi.integrate_and_return_by_index([old_t,t],integrator='euler',adopt_final_state=True)
old_t = t
assert(np.isclose(result[0,-1],N/2))
def test_birth_death(self):
epi = EpiModel(list("SIR"))
R0 = 2
rho = 1
mu = 0.2
eta = R0 * rho
with self.assertWarns(UserWarning):
epi.set_processes([
("S", "I", eta, "I", "I"),
("I", rho, "R"),
(None, mu, "S"),
("S", mu, None),
("R", mu, None),
("I", mu, None),
])
epi.set_initial_conditions({'S': 0.8, 'I':0.2 })
t = [0,1000]
res = epi.integrate(t)
assert(np.isclose(res['S'][-1],(mu+rho)/eta))
assert(np.isclose(res['I'][-1],mu/eta*(eta-mu-rho)/(mu+rho)))
def test_dynamic_birth(self):
A = "A"
epi = EpiModel([A])
epi.set_initial_conditions({A:1})
with self.assertWarns(UserWarning):
epi.set_processes([
(None, lambda t, y: 2*t, A),
])
res = epi.integrate([0,5])
assert(np.isclose(res[A][-1],5**2+1))
def test_correcting_for_declining_pop_size(self):
A, B = list("AB")
epi = EpiModel([A, B],10,correct_for_dynamical_population_size=True)
epi.add_transition_processes([
#(None, 0.1, A),
])
epi.add_fusion_processes([
(A, B, 1, B),
])
epi.set_initial_conditions({B:4, A:6})
tt = np.linspace(0,30)
result = epi.integrate(tt)
#from matplotlib import pyplot as pl
#pl.plot(tt, result[A], label=A)
#pl.plot(tt, result[B], label=B)
epi.correct_for_dynamical_population_size = False
result = epi.integrate(tt)
#pl.plot(tt, result[A], label=A)
#pl.plot(tt, result[B], label=B)
#pl.legend()
#pl.show()
def test_fusion_and_adding_rates(self):
A, B, C = list("ABC")
epi = EpiModel(list("ABC"))
# this should not raise a warning that rates do not sum to zero
# as it will be actively suppressed
epi.add_fusion_processes([
(A, B, 1, C),
])
with self.assertWarns(UserWarning):
# this should raise a warning that rates do not sum to zero
epi.add_quadratic_events([
((A, B), 1, [(C, -1),(A, +1)]),
])
# now rates should sum to zero
epi.add_quadratic_events([
((A, B), 1, [(B, +1)]),
])
with self.assertWarns(UserWarning):
# this should raise a warning that rates do not sum to zero
epi.add_linear_events([
((A,), 1, [(B,-1)])
])
def test_initial_condition_warnings(self):
A, B, C = list("ABC")
epi = EpiModel(list("ABC"))
with self.assertWarns(UserWarning):
# this should raise a warning that rates do not sum to zero
epi.set_initial_conditions({A:0.1,B:0.2})
with self.assertWarns(UserWarning):
# this should raise a warning that initial conditions were set twice
epi.set_initial_conditions([(A,0.1),(A,0.2)])
def test_custom_models(self):
S, I, R = list("SIR")
eta = 1
epi = SIModel(eta)
epi.set_initial_conditions({"S":0.99, "I":0.01})
epi.integrate([0,1000],adopt_final_state=True)
assert(np.isclose(epi.y0[0],0))
eta = 2
rho = 1
epi = SIRModel(eta,rho)
S0 = 0.99
epi.set_initial_conditions({S:S0, I:1-S0})
R0 = eta/rho
Rinf = lambda x: 1-x-S0*np.exp(-x*R0)
res = epi.integrate([0,100])
SIR_theory = root(Rinf,0.5).x[0]
assert(np.isclose(res[R][-1],SIR_theory))
omega = 1
epi = SEIRModel(eta,rho,omega)
epi.set_initial_conditions({S:S0, I:1-S0})
res = epi.integrate([0,100])
assert(np.isclose(res[R][-1],SIR_theory))
#======================
epi = SISModel(eta, rho, initial_population_size=100)
epi.set_initial_conditions({S: 99, I:1 })
tt = np.linspace(0,1000,2)
result = epi.integrate(tt)
assert(np.isclose(result[S][-1],50))
epi = SIRSModel(eta, rho, omega)
epi.set_initial_conditions({S: 0.99, I:0.01 })
tt = np.linspace(0,1000,2)
result = epi.integrate(tt)
assert(np.isclose(result[R][-1],(1-rho/eta)/(1+omega/rho)))
def test_inference_of_temporal_dependence(self,plot=False):
data = np.array([
(1.0, 2.00),
(10000.0, 2.00),
(10001.0, -2.00),
])
times, rates = data[:,0], data[:,1]
f = interp1d(times, rates, kind='linear')
def infection_rate(t,y):
return f(t)
S, I = list("SI")
N = 100
rec = 1
model = EpiModel([S,I], N)
# first, initialize the time to t0 = 1, so
# column sum tests do not fail
model.set_initial_conditions({S:99,I:1},initial_time=1)
# Here, the function will fail to evaluate time dependence
# but will warn the user that there were errors in time
# evaluation.
self.assertWarns(UserWarning,model.set_processes,
[
(S, I, infection_rate, I, I),
(I, infection_rate, S),
],
)
assert(not model.rates_have_explicit_time_dependence)
assert(model.rates_have_functional_dependence)
# this should warn the user that rates are functionally dependent
# but that no temporal dependence could be inferred, to in case
# they know that there's a time dependence, they have to state
# that explicitly
self.assertWarns(UserWarning,model.simulate,tmax=2)
model.set_initial_conditions({S:99,I:1},initial_time=1)
# here, the time dependence is given explicitly and so
# the warning will not be shown
model.simulate(tmax=2,rates_have_explicit_time_dependence=True)
def test_temporal_gillespie(self,plot=False):
scl = 40
def R0(t,y=None):
return 4+np.cos(t*scl)
S, I = list("SI")
N = 100
rec = 1
model = EpiModel([S,I], N)
model.set_processes([
(S, I, R0, I, I),
(I, rec, S),
])
I0 = 1
S0 = N - I0
model.set_initial_conditions({
S: S0,
I: I0,
})
taus = []
N_sample = 10000
for sample in range(N_sample):
tau, _ = model.get_time_leap_and_proposed_compartment_changes(0)
taus.append(tau)
I = lambda t: (4*t + 1/scl*np.sin(t*scl))
I2 = lambda t: I(t)*S0*I0/N+I0*rec*t
pdf = lambda t: (R0(t)*S0*I0/N + I0*rec) * np.exp(-I2(t))
measured, bins = np.histogram(taus,bins=100,density=True)
theory = [ np.exp(-I2(bins[i-1]))-np.exp(-I2(bins[i])) for i in range(1,len(bins)) if measured[i-1] > 0]
experi = [ measured[i-1] for i in range(1,len(bins)) if measured[i-1] > 0]
# make sure the kullback-leibler divergence is below some threshold
if plot: # pragma: no cover
import matplotlib.pyplot as pl
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.yscale('log')
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.show()
assert(entropy(theory, experi) < 0.01)
def test_temporal_gillespie_repeated_simulation(self,plot=False):
scl = 40
def R0(t,y=None):
return 4+np.cos(t*scl)
S, I = list("SI")
N = 100
rec = 1
model = EpiModel([S,I], N)
model.set_processes([
(S, I, R0, I, I),
(I, rec, S),
])
I0 = 1
S0 = N - I0
model.set_initial_conditions({
S: S0,
I: I0,
})
taus = []
N_sample = 10000
if plot:
from tqdm import tqdm
else:
tqdm = lambda x: x
tt = np.linspace(0,1,100)
for sample in tqdm(range(N_sample)):
tau = None
model.set_initial_conditions({
S: S0,
I: I0,
})
for _t in tt[1:]:
time, result = model.simulate(_t,adopt_final_state=True)
#print(time, result['I'])
if result['I'][-1] != I0:
tau = time[1]
break
#print()
if tau is not None:
taus.append(tau)
I = lambda t: (4*t + 1/scl*np.sin(t*scl))
I2 = lambda t: I(t)*S0*I0/N+I0*rec*t
pdf = lambda t: (R0(t)*S0*I0/N + I0*rec) * np.exp(-I2(t))
measured, bins = np.histogram(taus,bins=100,density=True)
theory = [ np.exp(-I2(bins[i-1]))-np.exp(-I2(bins[i])) for i in range(1,len(bins)) if measured[i-1] > 0]
experi = [ measured[i-1] for i in range(1,len(bins)) if measured[i-1] > 0]
# make sure the kullback-leibler divergence is below some threshold
if plot:
import matplotlib.pyplot as pl
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.yscale('log')
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.show()
assert(entropy(theory, experi) < 0.01)
def test_stochastic_well_mixed(self):
S, E, I, R = list("SEIR")
N = 75000
tmax = 100
model = EpiModel([S,E,I,R],N)
model.set_processes([
( S, I, 2, E, I ),
( I, 1, R),
( E, 1, I),
])
model.set_initial_conditions({S: N-100, I: 100})
tt = np.linspace(0,tmax,10000)
result_int = model.integrate(tt)
t, result_sim = model.simulate(tmax,sampling_dt=1,return_compartments=[S, R])
model = StochasticEpiModel([S,E,I,R],N)
model.set_link_transmission_processes([
( I, S, 2, I, E ),
])
model.set_node_transition_processes([
( I, 1, R),
( E, 1, I),
])
model.set_random_initial_conditions({S: N-100, I: 100})
t, result_sim2 = model.simulate(tmax,sampling_dt=1,return_compartments=[S, R])
for c, res in result_sim2.items():
#print(c, np.abs(1-res[-1]/result_int[c][-1]))
#print(c, np.abs(1-res[-1]/result_sim[c][-1]))
assert(np.abs(1-res[-1]/result_int[c][-1]) < 0.05)
assert(np.abs(1-res[-1]/result_sim[c][-1]) < 0.05)
def test_stochastic_fission(self):
A, B, C = list("ABC")
N = 10
epi = EpiModel([A,B,C],N,correct_for_dynamical_population_size=True)
epi.add_fusion_processes([
(A, B, 1.0, C),
])
epi.set_initial_conditions({ A: 5, B: 5})
t, res = epi.simulate(1e9)
assert(res[C][-1] == 5)
def test_birth_stochastics(self):
A, B, C = list("ABC")
epi = EpiModel([A,B,C],10,correct_for_dynamical_population_size=True)
epi.set_initial_conditions({A:5, B:5})
epi.set_processes([
(None, 1, A),
(A, 1, B),
(B, 1, None),
],allow_nonzero_column_sums=True)
_, res = epi.simulate(200,sampling_dt=0.05)
vals = np.concatenate([res[A][_>10], res[B][_>10]])
rv = poisson(vals.mean())
measured, bins = np.histogram(vals,bins=np.arange(10)-0.5,density=True)
theory = [ rv.pmf(i) for i in range(0,len(bins)-1) if measured[i] > 0]
experi = [ measured[i] for i in range(0,len(bins)-1) if measured[i] > 0]
# make sure the kullback-leibler divergence is below some threshold
#for a, b in zip(theory, experi):
# print(a,b)
assert(entropy(theory, experi) < 1e-2)
assert(np.median(res[A]) == 1)
def test_sampling_callback(self):
epi = SIModel(infection_rate=5.0,initial_population_size=100)
epi.set_initial_conditions({"S":90,"I":10})
self.assertRaises(ValueError,epi.simulate,1,sampling_callback=lambda x: x)
i = 0
samples = []
def sampled():
samples.append(epi.y0[0])
t, res = epi.simulate(10,sampling_dt=0.1,sampling_callback=sampled)
assert(all([a==b for a, b in zip(res['S'], samples)]))
def test_integral_solvers(self):
def get_event_rates(t, y):
return y * (0.05 + 0.03 * np.array([ np.cos(t), np.sin(t), np.cos(t)**2, np.sin(t)**2 ]))
rand = 0.834053
t0 = 1.0
y0 = np.array([0.1,0.2,0.3,0.4])
t_nwt = time_leap_newton(t0, y0, get_event_rates, rand)
t_ivp = time_leap_ivp(t0, y0, get_event_rates, rand)
expected = 30.76
numeric = np.array([t_nwt, t_ivp])
assert(np.all( np.abs(numeric-expected)/numeric < 1e-3) )
def test_integrate_until(self):
N = 100
epi = SIModel(infection_rate=5.0,initial_population_size=N)
epi.set_initial_conditions({"S":90,"I":10})
thresh = 0.5
iS = epi.get_compartment_id("S")
stop_condition = lambda t, y: thresh*N - y[iS]
t, res = epi.integrate_until(0,stop_condition,return_compartments=['S'])
assert(np.isclose(thresh*N,res['S'][-1]))
if __name__ == "__main__":
import sys
T = EpiTest()
T.test_fusion_and_adding_rates()
T.test_inference_of_temporal_dependence()
#T.test_integrate_until()
#T.test_integral_solvers()
#T.test_temporal_gillespie_repeated_simulation()
#T.test_sampling_callback()
#T.test_birth_stochastics()
#T.test_stochastic_fission()
#T.test_correcting_for_declining_pop_size()
#T.test_dynamic_birth()
#T.test_stochastic_well_mixed()
#T.test_temporal_gillespie()
#T.test_compartments()
#T.test_linear_rates()
#T.test_adding_linear_rates()
#T.test_quadratic_processes()
#T.test_adding_quadratic_processes()
#T.test_SIS_with_simulation_restart_and_euler()
#T.test_repeated_simulation()
#T.test_custom_models()
#T.test_birth_death()
#T.test_initial_condition_warnings()
| 2.015625 | 2 |
integration_tests/txinfo_msgtransfer.py | SaveTheAles/cyber.py | 0 | 12763094 | from cyber_sdk.client.lcd import LCDClient
if __name__ == "__main__":
client = LCDClient(url="https://lcd.space-pussy-1.cybernode.ai/", chain_id="space-pussy-1")
client.tx.tx_info(
"D22FC6EB287D9F099DD8EBADAAC5D9A0F6AA9D6B87F4A35A3FACEF4182706A16"
)
| 1.632813 | 2 |
pygama/dsp/_processors/gaussian_filter1d.py | iguinn/pygama | 13 | 12763095 | <filename>pygama/dsp/_processors/gaussian_filter1d.py
# Copyright (C) 2003-2005 <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# All this code belongs to the team that coded Scipy, found at this link:
# https://github.com/scipy/scipy/blob/v1.6.0/scipy/ndimage/filters.py#L210-L260
# The only thing changed was the calculation of the convulution, which
# originally called a function from a C library. In this code, the convolution is
# performed with NumPy's built in convolution function.
import numpy
from numba import guvectorize
def gaussian_filter1d(sigma, truncate):
"""1-D Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
"""
def _gaussian_kernel1d(sigma, radius):
"""
Computes a 1-D Gaussian convolution kernel.
"""
sigma2 = sigma * sigma
x = numpy.arange(-radius, radius + 1)
phi_x = numpy.exp(-0.5 / sigma2 * x ** 2)
phi_x = phi_x / phi_x.sum()
return phi_x
sd = float(sigma)
# Make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
# Since we are calling correlate, not convolve, revert the kernel
weights = _gaussian_kernel1d(sigma, lw)[::-1]
weights = numpy.asarray(weights, dtype=numpy.float64)
# Find the length of the kernel so we can reflect the signal an appropriate amount
extension_length = int(len(weights) / 2) + 1
"""Calculate a 1-D correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
1-D sequence of numbers.
%(mode_reflect)s
"""
@guvectorize(["void(float32[:], float32[:])",
"void(float64[:], float64[:])",
"void(int32[:], int32[:])",
"void(int64[:], int64[:])"],
"(n),(m)", forceobj=True)
def gaussian_filter1d_out(wf_in, wf_out):
# Have to create an array to enable the reflect mode
# Extend the signal on the left and right by at least half of the length of the kernel
wf_in = numpy.asarray(wf_in)
# Short warning message if kernel is larger than signal, in which case signal can't be convolved
if len(wf_in) < extension_length:
print('Kernel calculated was larger than signal, try again with smaller parameters')
return 0
# This mode extends as a reflection
# ‘reflect’ (d c b a | a b c d | d c b a)
# The input is extended by reflecting about the edge of the last pixel. This mode is also
# sometimes referred to as half-sample symmetric.
reflected_front = numpy.flip(wf_in[0:extension_length])
reflected_end = numpy.flip(wf_in[-extension_length:])
# Extend the signal
extended_signal = wf_in
extended_signal = numpy.concatenate((extended_signal, reflected_end ), axis=None)
extended_signal = numpy.concatenate((reflected_front, extended_signal), axis=None)
output = numpy.correlate(extended_signal, weights, mode='same')
# Now extract the original signal length
wf_out[:] = output[extension_length:-extension_length]
return gaussian_filter1d_out
| 1.679688 | 2 |
software_gui/VAEsemane_GUI.py | Roboy/robotic_musicianship | 8 | 12763096 | <reponame>Roboy/robotic_musicianship<filename>software_gui/VAEsemane_GUI.py
import sys
import os
import torch
import threading
import time
import mido
import numpy as np
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QApplication, QMainWindow, qApp, QAction, QFileDialog
from PyQt5.uic import loadUi
from VAE.VAE_Train import VAE
from utils.LiveInput_ClassCompliant import LiveParser
from gui_utils.vae_gui import vae_interact, vae_endless, vae_generate
from loadModel import loadModel, loadStateDict
class VAEsemane_GUI(QMainWindow):
def __init__(self):
super(VAEsemane_GUI, self).__init__()
loadUi('VAEsemane_v2.ui', self)
self.setWindowTitle('VAEsemane')
self.live_instrument = None
self.is_running = False
self.temperature = self.slider_temperature.value()/100.
self.initUIConnects()
def initUIConnects(self):
# menu bar
# first tab "File"
self.menu_bar.setNativeMenuBar(False)
self.action_quit.triggered.connect(self.quit_clicked)
# second tab "MIDI port", sets instrument
available_port = mido.get_input_names()
midi_ports = []
midi_actions = []
for port in available_port:
midi_actions.append(QAction(port, self))
self.menu_midi_port.addAction(midi_actions[-1])
self.menu_midi_port.triggered.connect(self.set_instrument)
# third tab set model path
self.action_find.triggered.connect(self.set_model_path)
self.action_pretrained.triggered.connect(self.set_pretrained_model)
# buttons
self.btn_run.clicked.connect(self.btn_run_clicked)
self.btn_stop.clicked.connect(self.btn_stop_clicked)
self.btn_randomize.clicked.connect(self.btn_randomize_clicked)
self.btn_reset.clicked.connect(self.btn_reset_clicked)
self.btn_run_endless.clicked.connect(self.btn_run_endless_clicked)
self.btn_generate.clicked.connect(self.btn_generate_clicked)
# dials --> get list of dials and sort them by object name
self.dials = []
for i in range(self.dial_grid.count()):
item = self.dial_grid.itemAt(i)
self.dials.append(item.widget())
self.dials.sort(key=lambda x: x.objectName())
# change range of the dials, default is [-100,100] (in %)
for dial in self.dials:
dial.setMinimum(-1000)
dial.setMaximum(1000)
# sliders
self.lbl_bpm.setText("{} BPM".format(self.slider_bpm.value()))
self.slider_bpm.valueChanged.connect(self.update_bpm)
self.lbl_temperature.setText("Temperature {0:.2f}".format(self.temperature))
self.slider_temperature.valueChanged.connect(self.update_temperature)
self.lbl_bars.setText("{} bar".format(self.slider_bars.value()))
self.slider_bars.valueChanged.connect(self.update_bars)
def quit_clicked(self):
qApp.quit()
def set_instrument(self, q):
self.live_instrument = LiveParser(port=q.text(), ppq=24,
bars=self.slider_bars.value(), bpm=self.slider_bpm.value())
self.live_instrument.open_inport(self.live_instrument.parse_notes)
self.live_instrument.open_outport()
self.start_metronome()
# self.start_progress_bar()
def start_metronome(self):
metronome_thread = threading.Thread(target=self.update_metronome)
metronome_thread.setDaemon(True)
metronome_thread.start()
def update_metronome(self):
while True:
if self.live_instrument.human:
self.lcd_comp_metronome.display("0")
self.lcd_human_metronome.display("{}".format(self.live_instrument.metronome))
else:
self.lcd_human_metronome.display("0")
self.lcd_comp_metronome.display("{}".format(self.live_instrument.metronome))
time.sleep(0.01)
def update_bpm(self):
current_val = self.slider_bpm.value()
self.lbl_bpm.setText("{} BPM".format(current_val))
if self.live_instrument:
self.live_instrument.update_bpm(current_val)
def update_temperature(self):
self.temperature = self.slider_temperature.value()/100.
self.lbl_temperature.setText("Temperature {0:.2f}".format(self.temperature))
def update_bars(self):
current_val = self.slider_bars.value()
if current_val > 1:
self.lbl_bars.setText("{} bars".format(current_val))
else:
self.lbl_bars.setText("{} bar".format(current_val))
if self.live_instrument:
self.live_instrument.update_bars(current_val)
def search_instruments(self):
# this function could search for new midi instruments if they were
# connected after gui was startet
pass
def set_model_path(self):
file_path = QFileDialog.getOpenFileName(self, 'Open File', os.getenv('..'))[0]
self.model = VAE()
try:
self.model = loadModel(self.model, file_path, dataParallelModel=True)
except:
self.model = loadStateDict(self.model, file_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if self.model.train():
self.model.eval()
def set_pretrained_model(self):
rel_path = "../utils/pretrained_models/vae_model.pth"
self.model = VAE()
self.model = loadStateDict(self.model, rel_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if self.model.train():
self.model.eval()
def btn_run_clicked(self):
if not self.is_running:
vae_thread = threading.Thread(target=vae_interact, args=(self,))
vae_thread.setDaemon(True)
vae_thread.start()
self.is_running = vae_thread.is_alive()
def btn_run_endless_clicked(self):
if not self.is_running:
vae_thread = threading.Thread(target=vae_endless, args=(self,))
vae_thread.setDaemon(True)
vae_thread.start()
self.is_running = vae_thread.is_alive()
def btn_stop_clicked(self):
self.is_running = False
def btn_generate_clicked(self):
if not self.is_running:
vae_thread = threading.Thread(target=vae_generate, args=(self,))
vae_thread.setDaemon(True)
vae_thread.start()
self.is_running = vae_thread.is_alive()
def btn_randomize_clicked(self):
for dial in self.dials:
rand = np.random.randint(-1000,1000)
dial.setSliderPosition(rand)
def btn_reset_clicked(self):
for dial in self.dials:
dial.setSliderPosition(0)
def test_dials(self):
for dial in self.dials:
print(dial.objectName())
print (dial.value())
if __name__ == '__main__':
app = QApplication(sys.argv)
gui = VAEsemane_GUI()
gui.show()
sys.exit(app.exec_())
| 2.28125 | 2 |
sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app.py | praveenkuttappan/azure-sdk-for-python | 2,728 | 12763097 | <reponame>praveenkuttappan/azure-sdk-for-python
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import (
ConversationTest,
GlobalConversationAccountPreparer
)
from azure.ai.language.conversations import ConversationAnalysisClient
from azure.ai.language.conversations.models import (
ConversationAnalysisOptions,
AnalyzeConversationResult,
ConversationPrediction
)
class ConversationAppTests(ConversationTest):
@GlobalConversationAccountPreparer()
def test_conversation_app(self, conv_account, conv_key, conv_project):
# prepare data
query = "One california maki please."
input = ConversationAnalysisOptions(
query=query,
)
# analyze quey
client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key))
with client:
result = client.analyze_conversations(
input,
project_name=conv_project,
deployment_name='production'
)
# assert
assert isinstance(result, AnalyzeConversationResult)
assert result.query == query
assert isinstance(result.prediction, ConversationPrediction)
assert result.prediction.project_kind == 'conversation'
assert result.prediction.top_intent == 'Order'
assert len(result.prediction.entities) > 0
assert len(result.prediction.intents) > 0
assert result.prediction.intents[0].category == 'Order'
assert result.prediction.intents[0].confidence_score > 0
assert result.prediction.entities[0].category == 'OrderItem'
assert result.prediction.entities[0].text == 'california maki'
assert result.prediction.entities[0].confidence_score > 0
@GlobalConversationAccountPreparer()
def test_conversation_app_with_dictparams(self, conv_account, conv_key, conv_project):
# prepare data
query = "One california maki please."
params = {
"query": query,
"api_version": "2021-11-01-preview"
}
# analyze quey
client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key))
with client:
result = client.analyze_conversations(
params,
project_name=conv_project,
deployment_name='production'
)
# assert
assert isinstance(result, AnalyzeConversationResult)
assert result.query == query
assert isinstance(result.prediction, ConversationPrediction)
assert result.prediction.project_kind == 'conversation'
assert result.prediction.top_intent == 'Order'
assert len(result.prediction.entities) > 0
assert len(result.prediction.intents) > 0
assert result.prediction.intents[0].category == 'Order'
assert result.prediction.intents[0].confidence_score > 0
assert result.prediction.entities[0].category == 'OrderItem'
assert result.prediction.entities[0].text == 'california maki'
assert result.prediction.entities[0].confidence_score > 0
| 2.171875 | 2 |
metecho/api/migrations/0091_user_onboarded_at.py | VaccineCloud/Metecho-Vax | 21 | 12763098 | # Generated by Django 3.1.6 on 2021-02-08 21:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0090_rename_repository_project"),
]
operations = [
migrations.AddField(
model_name="user",
name="onboarded_at",
field=models.DateTimeField(
blank=True,
help_text="Date of the last time the user completed the interactive onboarding",
null=True,
),
),
]
| 1.890625 | 2 |
python/problems/smallest-subarray.py | fossabot/a-grim-loth | 4 | 12763099 | <reponame>fossabot/a-grim-loth<filename>python/problems/smallest-subarray.py
from bisect import bisect_left, bisect_right
def Merge(left, right):
mergedList = []
i = j = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
mergedList.append(left[i])
i += 1
else:
mergedList.append(right[j])
j += 1
while i < len(left):
mergedList.append(left[i])
i += 1
while j < len(right):
mergedList.append(right[j])
j += 1
return mergedList
def BuildMergeSortTree(Tree, A, index, start, end):
if start == end:
Tree[index].append(A[start])
return
mid = (start + end) // 2
BuildMergeSortTree(Tree, A, 2 * index, start, mid)
BuildMergeSortTree(Tree, A, 2 * index + 1, mid + 1, end)
Tree[index] = Merge(Tree[2 * index], Tree[2 * index + 1])
def query(Tree, index, start, end, left, right, k):
if right < start or left > end:
return 0
if start >= left and end <= right:
# binary search to find index of k
print(Tree[index])
try:
start_ele = Tree[index][0]
except:
start_ele = 0
return len(Tree[index]) - (bisect_left(Tree[index], k)) - start_ele
mid = (start + end) // 2
return query(Tree, 2 * index, start, mid, left, right, k) + query(
Tree, 2 * index + 1, mid + 1, end, left, right, k
)
def solve(A, B, N):
Tree = [[] for i in range(4 * N + 1)]
BuildMergeSortTree(Tree, A, 0, 0, N - 1)
for i in range(N):
value = A[i]
start = i
end = N - 1
ans = -1
while start <= end:
m = (start + end) >> 1
cnt = query(Tree, 1, 0, N - 1, i, m, value)
if cnt >= B[i]:
ans = m
end = m - 1
else:
start = m + 1
if ans == -1:
print(-1, end=" ")
else:
print(ans - i + 1, end=" ")
N = 5
A = list(map(int, "8 2 4 1 9".split()))
B = list(map(int, "2 3 5 1 1".split()))
solve(A, B, N)
| 3.515625 | 4 |
porespy/filters/__init__.py | xu-kai-xu/porespy | 0 | 12763100 | r"""
Filters
#######
**Highlight Features of Interest**
This module contains a variety of functions for altering images based on
the structural characteristics, such as pore sizes. A definition of a
*filter* is a function that returns an image the shape as the original
image, but with altered values.
.. currentmodule:: porespy
.. autosummary::
:template: mybase.rst
:toctree: generated/
filters.apply_chords
filters.apply_chords_3D
filters.apply_padded
filters.chunked_func
filters.distance_transform_lin
filters.fftmorphology
filters.fill_blind_pores
filters.find_disconnected_voxels
filters.find_dt_artifacts
filters.find_peaks
filters.find_trapped_regions
filters.flood
filters.flood_func
filters.hold_peaks
filters.ibip
filters.ibip_gpu
filters.imagej
filters.local_thickness
filters.nl_means_layered
filters.nphase_border
filters.pc_to_satn
filters.porosimetry
filters.prune_branches
filters.reduce_peaks
filters.region_size
filters.satn_to_seq
filters.seq_to_satn
filters.size_to_satn
filters.size_to_seq
filters.snow_partitioning
filters.snow_partitioning_n
filters.snow_partitioning_parallel
filters.trim_disconnected_blobs
filters.trim_extrema
filters.trim_floating_solid
filters.trim_nearby_peaks
filters.trim_nonpercolating_paths
filters.trim_saddle_points
filters.trim_small_clusters
"""
from ._funcs import apply_chords
from ._funcs import apply_chords_3D
from ._funcs import apply_padded
from ._funcs import chunked_func
from ._funcs import distance_transform_lin
from ._funcs import fill_blind_pores
from ._funcs import find_disconnected_voxels
from ._funcs import find_dt_artifacts
from ._funcs import flood
from ._funcs import flood_func
from ._funcs import hold_peaks
from ._funcs import local_thickness
from ._funcs import nphase_border
from ._funcs import porosimetry
from ._funcs import prune_branches
from ._funcs import region_size
from ._funcs import trim_disconnected_blobs
from ._funcs import trim_extrema
from ._funcs import trim_floating_solid
from ._funcs import trim_nonpercolating_paths
from ._funcs import trim_small_clusters
from ._snows import snow_partitioning
from ._snows import snow_partitioning_n
from ._snows import snow_partitioning_parallel
from ._snows import find_peaks
from ._snows import reduce_peaks
from ._snows import trim_nearby_peaks
from ._snows import trim_saddle_points
from ._snows import trim_saddle_points_legacy
from ._size_seq_satn import *
from ._nlmeans import nl_means_layered
from ._fftmorphology import fftmorphology
from . import imagej
from ._ibip import ibip
from ._ibip import find_trapped_regions
from ._ibip_gpu import ibip_gpu
| 1.796875 | 2 |
Collections.py | Ichinga-Samuel/Python-Buffet | 0 | 12763101 | <reponame>Ichinga-Samuel/Python-Buffet
from collections import OrderedDict as od, Counter, namedtuple, ChainMap
# OrderedDict and dictionaries
a = [('a', 1), ('z', 34), ('b', 2), ('c', 3)]
d = od(a) # create and ordered dictionary from a list of tuples
print(d) # --> OrderedDict([('a', 1), ('z', 34), ('b', 2),('c', 3)])
d['r'] = '67' # add to the OrderedDict like an ordinary dict()
g = d.keys() # the keys() method works on it
print(d['a']) # --> 1 can be accessed with keywords
print(g) # --> odict_keys(['a', 'z', 'b', 'c', 'r'])
print(d.values()) # --> odict_values([1, 34, 2, 3, '67']) values() method work also
e = dict(d)
print(e) # --> {'a': 1, 'z': 34, 'b': 2, 'c': 3, 'r': '67'} can be converted to a dictionary
# alternative ways of creating dictionaries
w = dict(a=9,b=6,v=7)
r = dict([('a', 6), ('t', 9), ('y', 67)])
# merging two dicts
print(w) # --> {'a': 9, 'b': 6, 'v': 7}
print(r) # --> {'a': 6, 't': 9, 'y': 67}
c = {**w, **r} # syntactic sugar for merging two dictionaries and creating new dictionary
# when two keys are in both dicts the second key is used
r.update(w) # when two keys are in both dicts the key in the dictionary calling the method is used
print(c) # --> {'a': 6, 'b': 6, 'v': 7, 't': 9, 'y': 67}
print(r) # ---> {'a': 9, 't': 9, 'y': 67, 'b': 6, 'v': 7}
n = w.setdefault('yu',68) # adds new item to dict, returns value if key already in dict.
b = w.get('o', 9) # does not add to dict() if key not in dict()
w['yu'] = w.get('yu')-8
# Counter Dictionaries
# this creates a dictionary that returns a dictionary sorted in descending order of frequency showing
# the number of times an item occurs in an iterable
b='rfoireoosjjfototieldkfkorpwprkgptktprkptktjtootktptktpktktotktotkotktototptktootptoro'
bc = Counter(b)
print(bc) # --> Counter({'t': 24, 'o': 17, 'k': 14, 'p': 9, 'r': 6, 'f': 3, 'j': 3, 'i': 2,
# 'e': 2, 's': 1, 'l': 1, 'd': 1, 'w': 1, 'g': 1})
# Counter objects support dictionary methods.
# Counters can be used to check if two words are Anagrams
a = 'anagram'
c = 'gramana'
print(Counter(a)==Counter(c))
| 3.953125 | 4 |
curvetorch/curvesgd.py | yashpatel5400/CurveTorch | 0 | 12763102 | #!/usr/bin/env python3
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch
from torch.optim.optimizer import Optimizer
import numpy as np
from scipy.optimize import minimize
__all__ = ('CurveSGD',)
class CurveSGD(Optimizer):
r"""Implements Self-Tuning Stochastic Optimization with
Curvature-Aware Gradient Filtering algorithm (https://arxiv.org/pdf/2011.04803.pdf).
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
Example:
>>> import curvesgd as curve
>>> optimizer = curve.CurveSGD(model.parameters(), lr=0.1)
>>>
>>> for _ in range(iterations):
>>> def closure():
>>> optimizer.zero_grad()
>>> f = func(x)
>>> f.backward(retain_graph=True, create_graph=True)
>>> return f
>>> optimizer.step(closure)
"""
def __init__(
self,
params,
lr: float = 1e-3,
beta_r=0.999,
beta_sigma=0.999,
beta_alpha=0.999,
):
if lr <= 0.0:
raise ValueError('Invalid learning rate: {}'.format(lr))
defaults = dict(
lr=lr,
beta_r=beta_r,
beta_sigma=beta_sigma,
beta_alpha=beta_alpha,
)
super(CurveSGD, self).__init__(params, defaults)
def get_hessian_prod(self, params, grads, delta):
"""Get an estimate of Hessian product.
This is done by computing the Hessian vector product with the stored delta
vector at the current gradient point, to estimate Hessian trace by
computing the gradient of <gradsH, s>.
Args:
params: iterable of parameters to optimize or dicts defining
parameter groups
grads: gradient of parameters
delta: vector to be multiplied against the Hessian (right multiplied)
Returns:
hessian_prod: Product of hessian and delta argument
"""
# Check backward was called with create_graph set to True
if grads.grad_fn is None:
msg = (
'Gradient tensor {:} does not have grad_fn. When '
'calling loss.backward(), make sure the option '
'create_graph is set to True.'
)
raise RuntimeError(msg.format(i))
# this is for distributed setting with single node and multi-gpus,
# for multi nodes setting, we have not support it yet.
hvs = torch.autograd.grad(
grads, params, grad_outputs=delta, only_inputs=True, retain_graph=True
)
return hvs[0]
def _get_prob_improve_num_den(self, alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t):
"""Helper function for probability improvement/gradient calculation. See
prob_improve for full description of its use
Args:
alpha: value of step size
delta_t: Gradient change
m_t: Kalman filtered gradient mean
B_delta: Hessian-vector product
s_t: Kalman filtered function mean
P_t: Kalman filtered gradient covariance
Q_t: Covariance of Hessian-vector product
Returns:
prob_gradient: Numerator and denominator of probability function evaluation
"""
alpha = alpha[0]
numerator = -alpha * delta_t.matmul(m_t) + alpha ** 2 / 2 * delta_t.t().matmul(B_delta)
denominator = 2 * s_t + alpha ** 2 * delta_t.t().matmul(P_t).matmul(delta_t) \
+ alpha ** 4 / 4 * delta_t.t().matmul(Q_t).matmul(delta_t)
numerator = numerator.detach().numpy()
denominator = np.sqrt(denominator.detach().numpy())[0]
return numerator, denominator
def prob_improve(self, alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t):
"""Get an estimate of improvement probability assuming alpha step size.
This is done as a subroutine procedure to determine the optimal
step size within after running filtering on the function and gradient values.
Intended to be used in conjunction with an optimization procedure (i.e scipy.optimize)
assuming all parameters fixed except alpha.
Args:
alpha: value of step size
delta_t: Gradient change
m_t: Kalman filtered gradient mean
B_delta: Hessian-vector product
s_t: Kalman filtered function mean
P_t: Kalman filtered gradient covariance
Q_t: Covariance of Hessian-vector product
Returns:
prob: Improvement probability function evaluation
"""
numerator, denominator = self._get_prob_improve_num_den(alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t)
return numerator / denominator
def prob_improve_num_grad(self, alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t):
"""Get an estimate of improvement numerical gradient. See prob_improve for docs
Args:
alpha: value of step size
delta_t: Gradient change
m_t: Kalman filtered gradient mean
B_delta: Hessian-vector product
s_t: Kalman filtered function mean
P_t: Kalman filtered gradient covariance
Q_t: Covariance of Hessian-vector product
Returns:
prob_gradient: Numerical gradient of improvement probability function
"""
eps = 1e-4
f_plus = self.prob_improve(alpha + eps, delta_t, m_t, B_delta, s_t, P_t, Q_t)
f_minus = self.prob_improve(alpha - eps, delta_t, m_t, B_delta, s_t, P_t, Q_t)
return (f_plus - f_minus) / (2 * eps)
def prob_improve_grad(self, alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t):
"""Get an estimate of improvement probability gradient. See prob_improve for docs
Args:
alpha: value of step size
delta_t: Gradient change
m_t: Kalman filtered gradient mean
B_delta: Hessian-vector product
s_t: Kalman filtered function mean
P_t: Kalman filtered gradient covariance
Q_t: Covariance of Hessian-vector product
Returns:
prob_gradient: Gradient of improvement probability function
"""
numerator, denominator = self._get_prob_improve_num_den(alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t)
alpha = alpha[0]
numerator_grad = -delta_t.t().matmul(m_t) + alpha * delta_t.t().matmul(B_delta)
denominator_grad = 1 / (2 * denominator) * (2 * alpha * delta_t.t().matmul(P_t).matmul(delta_t) \
+ alpha ** 3 * delta_t.t().matmul(Q_t).matmul(delta_t))
numerator_grad = numerator_grad.detach().numpy()
denominator_grad = denominator_grad.detach().numpy()
return (denominator * numerator_grad - numerator * denominator_grad) / denominator ** 2
def mean_var_ewa(self, ema, emvar, x, beta):
r"""Computes exponential moving average/variance of tensor with update weight beta.
Args:
ema: Current exponential moving average.
emvar: Current exponential moving variance.
x: New datapoint (should have same untis as ema).
beta: Averaging weight for update step.
Returns:
(ema, emvar): Tuple of weighted average and variance
"""
alpha = 1 - beta
delta = x - ema
ema_new = ema.add(delta.mul(alpha))
emvar_new = emvar.add(delta.mul(delta).mul(alpha)).mul(1 - alpha)
return ema_new, emvar_new
def step(self, closure = None):
r"""Performs a single optimization step.
Args:
closure: A closure that reevaluates the model and returns the loss.
Returns:
loss: Loss (before taking optimizer step)
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
beta_r = group['beta_r']
beta_sigma = group['beta_sigma']
beta_alpha = group['beta_alpha']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data.flatten()
if d_p.is_sparse:
msg = (
'CurveSGD does not support sparse gradients, '
'please consider SparseAdam instead'
)
raise RuntimeError(msg)
state = self.state[p]
# State initialization
if len(state) == 0:
state['t'] = 0
state['delta_t'] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# Exponential moving average of function values
state['func_exp_avg'] = loss.clone()
state['func_exp_var'] = torch.zeros((1))
# Exponential moving average of gradient values
state['grad_exp_avg'] = d_p.clone()
state['grad_exp_var'] = torch.zeros_like(
p.flatten(), memory_format=torch.preserve_format
)
# Exponential moving average of Hessian values
state['hess_exp_avg'] = self.get_hessian_prod(p, p.grad, state['delta_t']).flatten().clone()
state['hess_exp_var'] = torch.zeros_like(
p.flatten(), memory_format=torch.preserve_format
)
# Kalman Filter states
state['m_t'] = torch.zeros_like(
p.flatten(), memory_format=torch.preserve_format
)
state['P_t'] = torch.eye(d_p.size()[0]).mul(1e4)
state['u_t'] = 0
state['s_t'] = 1e4
func_exp_avg = state['func_exp_avg']
func_exp_var = state['func_exp_var']
grad_exp_avg = state['grad_exp_avg']
grad_exp_var = state['grad_exp_var']
hess_exp_avg = state['hess_exp_avg']
hess_exp_var = state['hess_exp_var']
delta_t = state['delta_t']
B_delta = self.get_hessian_prod(p, p.grad, delta_t).flatten()
delta_t = delta_t.flatten()
if state['t'] != 0:
beta_delta = 1 - 1 / state['t'] # non-smoothed running average/variance
func_exp_avg, func_exp_var = self.mean_var_ewa(func_exp_avg, func_exp_var, loss, beta_r)
grad_exp_avg, grad_exp_var = self.mean_var_ewa(grad_exp_avg, grad_exp_var, d_p, beta_sigma)
hess_exp_avg, hess_exp_var = self.mean_var_ewa(hess_exp_avg, hess_exp_var, B_delta, beta_delta)
eps = 10e-1
sigma_t = max(eps, torch.mean(grad_exp_var))
q_t = max(eps, torch.mean(hess_exp_var))
# Match notation from paper for convenience
y_t = func_exp_avg
r_t = func_exp_var
g_t = grad_exp_avg
Sigma_t = torch.eye(d_p.size()[0]).mul(sigma_t)
b_t = hess_exp_avg
Q_t = torch.eye(d_p.size()[0]).mul(q_t)
# Kalman Filter update for f
u_t = state['u_t']
s_t = state['s_t']
m_t = state['m_t']
P_t = state['P_t']
# steps for Kalman filter
# compute u_t_minus
u_t_minus = u_t + m_t.t().matmul(delta_t) + 1 / 2 * delta_t.t().matmul(B_delta)
c_t = s_t + delta_t.t().matmul(P_t).matmul(delta_t) + 1 / 4 * delta_t.t().matmul(Q_t).matmul(delta_t) + r_t
lambda_t = max((y_t - u_t_minus) ** 2 - c_t, 0)
s_t_minus = lambda_t + c_t - r_t
mix_t = s_t_minus / (s_t_minus + r_t)
u_t = (1 - mix_t) * u_t_minus + mix_t * y_t
s_t = (1 - mix_t) ** 2 * s_t_minus + mix_t ** 2 * r_t
# Kalman Filter update for grad f
m_t_minus = m_t + B_delta
P_t_minus = P_t + Q_t
K_t = P_t_minus.matmul((P_t_minus + Sigma_t).inverse())
m_t = (torch.eye(d_p.size()[0]) - K_t).matmul(m_t_minus) + K_t.matmul(g_t)
P_t = (torch.eye(d_p.size()[0]) - K_t).matmul(P_t_minus).matmul((torch.eye(d_p.size()[0]) - K_t).t()) \
+ K_t.matmul(Sigma_t).matmul(K_t.t())
prob_improve_closure = lambda alpha : self.prob_improve(alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t)
prob_improve_grad_closure = lambda alpha : self.prob_improve_num_grad(alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t)
if state['t'] == 0:
lr = group['lr']
else:
lr = min(.0015, minimize(prob_improve_closure, group['lr'], jac=prob_improve_grad_closure, method='BFGS').x[0])
delta_t = m_t.mul(lr).reshape(p.data.shape)
state['t'] += 1
state['u_t'] = u_t
state['s_t'] = s_t
state['m_t'] = m_t
state['P_t'] = P_t
state['func_exp_avg'] = func_exp_avg
state['func_exp_var'] = func_exp_var
state['grad_exp_avg'] = grad_exp_avg
state['grad_exp_var'] = grad_exp_var
state['hess_exp_avg'] = hess_exp_avg
state['hess_exp_var'] = hess_exp_var
state['delta_t'] = delta_t
# Use filtered gradient estimate for update step
p.data.sub_(delta_t)
return loss | 2.578125 | 3 |
client_app/python-pcl-0.3.0rc1/examples/3dharris.py | yaseenit/debs19_grand_challenge- | 46 | 12763103 | <filename>client_app/python-pcl-0.3.0rc1/examples/3dharris.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# http://virtuemarket-lab.blogspot.jp/2015/03/harris.html
import pcl
import numpy as np
import pcl.pcl_visualization
# pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
# pcl::io::loadPCDFile<pcl::PointXYZ> (argv[1], *cloud);
# cloud = pcl.load("table_scene_mug_stereo_textured.pcd")
# cloud = pcl.load('./examples/pcldata/tutorials/table_scene_mug_stereo_textured.pcd')
cloud = pcl.load('./bunny.pcd')
print("cloud points : " + str(cloud.size))
# pcl::HarrisKeypoint3D<pcl::PointXYZ, pcl::PointXYZI> detector;
# detector.setNonMaxSupression (true);
# detector.setRadius (0.01);
# //detector.setRadiusSearch (100);
# detector.setInputCloud(cloud);
# pcl::PointCloud<pcl::PointXYZI>::Ptr keypoints(new pcl::PointCloud<pcl::PointXYZI>());
# detector.compute(*keypoints);
###
detector = cloud.make_HarrisKeypoint3D()
detector.set_NonMaxSupression (True)
detector.set_Radius (0.01)
# detector.set_NonMaxSupression (False)
# detector.set_RadiusSearch (100)
keypoints = detector.compute()
# std::cout << "keypoints detected: " << keypoints->size() << std::endl;
print("keypoints detected: " + str(keypoints.size))
# pcl::PointCloud<pcl::PointXYZ>::Ptr keypoints3D(new pcl::PointCloud<pcl::PointXYZ>());
# pcl::PointXYZ tmp;
# double max = 0,min=0;
# for(pcl::PointCloud<pcl::PointXYZI>::iterator i = keypoints->begin(); i!= keypoints->end(); i++)
# tmp = pcl::PointXYZ((*i).x,(*i).y,(*i).z);
# if ((*i).intensity>max )
# std::cout << (*i) << " coords: " << (*i).x << ";" << (*i).y << ";" << (*i).z << std::endl;
# max = (*i).intensity;
# if ((*i).intensity<min)
# min = (*i).intensity;
# keypoints3D->push_back(tmp);
#
# std::cout << "maximal responce: "<< max << " min responce: "<< min<<std::endl;
###
keypoints3D = pcl.PointCloud()
max = -999
min = 999
count = 0
points = np.zeros((keypoints.size, 3), dtype=np.float32)
# Generate the data
for i in range(0, keypoints.size):
# set Point Plane
points[i][0] = keypoints[i][0]
points[i][1] = keypoints[i][1]
points[i][2] = keypoints[i][2]
intensity = keypoints[i][3]
if intensity > max:
print("coords: " + str(keypoints[i][0]) + ";" + str(keypoints[i][1]) + ";" + str(keypoints[i][2]) )
max = intensity
if intensity < min:
min = intensity
count = count + 1
points.resize(count, 3)
print(points)
keypoints3D.from_array(points)
print("maximal responce: " + str(max) + " min responce: " + str(min) )
# //show point cloud
# pcl::visualization::PCLVisualizer viewer ("3D Viewer");
# pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> pccolor(cloud, 255, 255, 255);
# pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> kpcolor(keypoints3D, 255, 0, 0);
# viewer.addPointCloud(cloud, pccolor, "testimg.png");
# viewer.addPointCloud(keypoints3D, kpcolor,"keypoints.png");
# viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 7, "keypoints.png");
##
viewer = pcl.pcl_visualization.PCLVisualizering('3D Viewer')
pccolor = pcl.pcl_visualization.PointCloudColorHandleringCustom(cloud, 255, 255, 255)
kpcolor = pcl.pcl_visualization.PointCloudColorHandleringCustom(keypoints3D, 255, 0, 0)
# OK
viewer.AddPointCloud_ColorHandler(cloud, pccolor)
viewer.AddPointCloud_ColorHandler(keypoints3D, kpcolor, b'keypoints')
# viewer.AddPointCloud_ColorHandler(cloud, pccolor, "testimg.png", 0)
# viewer.AddPointCloud_ColorHandler(keypoints3D, kpcolor, str('keypoints.png'), 0)
# need? : AddPointCloud_ColorHandler Function Succeded
# viewer.SetPointCloudRenderingProperties (pcl.pcl_visualization.PCLVISUALIZER_POINT_SIZE, 7, b'keypoints.png')
###
# while (!viewer.wasStopped ())
# {
# viewer.spinOnce();
# pcl_sleep (0.01);
# }
flag = True
while flag:
flag != viewer.WasStopped()
viewer.SpinOnce()
# pcl_sleep (0.01)
# pass
end
| 2.546875 | 3 |
trove/tests/api/flavors.py | sapcc/trove | 1 | 12763104 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from proboscis.asserts import assert_raises
from proboscis import before_class
from proboscis.decorators import time_out
from proboscis import test
from trove.common.utils import poll_until
from trove import tests
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
from trove.tests.config import CONFIG
from trove.tests.util.check import AttrCheck
from trove.tests.util import create_dbaas_client
from trove.tests.util import create_nova_client
from trove.tests.util import test_config
from trove.tests.util.users import Requirements
from troveclient.compat import exceptions
from troveclient.v1.flavors import Flavor
GROUP = "dbaas.api.flavors"
GROUP_DS = "dbaas.api.datastores"
FAKE_MODE = test_config.values['fake_mode']
servers_flavors = None
dbaas_flavors = None
user = None
def assert_attributes_equal(name, os_flavor, dbaas_flavor):
"""Given an attribute name and two objects,
ensures the attribute is equal.
"""
assert_true(hasattr(os_flavor, name),
"open stack flavor did not have attribute %s" % name)
assert_true(hasattr(dbaas_flavor, name),
"dbaas flavor did not have attribute %s" % name)
expected = getattr(os_flavor, name)
actual = getattr(dbaas_flavor, name)
assert_equal(expected, actual,
'DBaas flavor differs from Open Stack on attribute ' + name)
def assert_flavors_roughly_equivalent(os_flavor, dbaas_flavor):
assert_attributes_equal('name', os_flavor, dbaas_flavor)
assert_attributes_equal('ram', os_flavor, dbaas_flavor)
def assert_link_list_is_equal(flavor):
assert_true(hasattr(flavor, 'links'))
assert_true(flavor.links)
if flavor.id:
flavor_id = str(flavor.id)
else:
flavor_id = flavor.str_id
for link in flavor.links:
href = link['href']
if "self" in link['rel']:
expected_href = os.path.join(test_config.dbaas_url, "flavors",
str(flavor.id))
url = test_config.dbaas_url.replace('http:', 'https:', 1)
msg = ("REL HREF %s doesn't start with %s" %
(href, test_config.dbaas_url))
assert_true(href.startswith(url), msg)
url = os.path.join("flavors", flavor_id)
msg = "REL HREF %s doesn't end in '%s'" % (href, url)
assert_true(href.endswith(url), msg)
elif "bookmark" in link['rel']:
base_url = test_config.version_url.replace('http:', 'https:', 1)
expected_href = os.path.join(base_url, "flavors", flavor_id)
msg = 'bookmark "href" must be %s, not %s' % (expected_href, href)
assert_equal(href, expected_href, msg)
else:
assert_false(True, "Unexpected rel - %s" % link['rel'])
@test(groups=[tests.DBAAS_API, GROUP, GROUP_DS, tests.PRE_INSTANCES],
depends_on_groups=["services.initialize"])
class Flavors(object):
@before_class
def setUp(self):
rd_user = test_config.users.find_user(
Requirements(is_admin=False, services=["trove"]))
self.rd_client = create_dbaas_client(rd_user)
if test_config.nova_client is not None:
nova_user = test_config.users.find_user(
Requirements(services=["nova"]))
self.nova_client = create_nova_client(nova_user)
def get_expected_flavors(self):
# If we have access to the client, great! Let's use that as the flavors
# returned by Trove should be identical.
if test_config.nova_client is not None:
return self.nova_client.flavors.list()
# If we don't have access to the client the flavors need to be spelled
# out in the config file.
flavors = [Flavor(Flavors, flavor_dict, loaded=True)
for flavor_dict in test_config.flavors]
return flavors
@test
def confirm_flavors_lists_nearly_identical(self):
os_flavors = self.get_expected_flavors()
dbaas_flavors = self.rd_client.flavors.list()
print("Open Stack Flavors:")
print(os_flavors)
print("DBaaS Flavors:")
print(dbaas_flavors)
# Length of both flavors list should be identical.
assert_equal(len(os_flavors), len(dbaas_flavors))
for os_flavor in os_flavors:
found_index = None
for index, dbaas_flavor in enumerate(dbaas_flavors):
if os_flavor.name == dbaas_flavor.name:
msg = ("Flavor ID '%s' appears in elements #%s and #%d." %
(dbaas_flavor.id, str(found_index), index))
assert_true(found_index is None, msg)
assert_flavors_roughly_equivalent(os_flavor, dbaas_flavor)
found_index = index
msg = "Some flavors from OS list were missing in DBAAS list."
assert_false(found_index is None, msg)
for flavor in dbaas_flavors:
assert_link_list_is_equal(flavor)
@test
def test_flavor_list_attrs(self):
allowed_attrs = ['id', 'name', 'ram', 'vcpus', 'disk', 'links',
'ephemeral', 'local_storage', 'str_id']
flavors = self.rd_client.flavors.list()
attrcheck = AttrCheck()
for flavor in flavors:
flavor_dict = flavor._info
attrcheck.contains_allowed_attrs(
flavor_dict, allowed_attrs,
msg="Flavors list")
attrcheck.links(flavor_dict['links'])
@test
def test_flavor_get_attrs(self):
allowed_attrs = ['id', 'name', 'ram', 'vcpus', 'disk', 'links',
'ephemeral', 'local_storage', 'str_id']
flavor = self.rd_client.flavors.get(1)
attrcheck = AttrCheck()
flavor_dict = flavor._info
attrcheck.contains_allowed_attrs(
flavor_dict, allowed_attrs,
msg="Flavor Get 1")
attrcheck.links(flavor_dict['links'])
@test
def test_flavor_not_found(self):
assert_raises(exceptions.NotFound,
self.rd_client.flavors.get, "foo")
@test
def test_flavor_list_datastore_version_associated_flavors(self):
datastore = self.rd_client.datastores.get(
test_config.dbaas_datastore)
dbaas_flavors = (self.rd_client.flavors.
list_datastore_version_associated_flavors(
datastore=test_config.dbaas_datastore,
version_id=datastore.default_version))
os_flavors = self.get_expected_flavors()
assert_equal(len(dbaas_flavors), len(os_flavors))
# verify flavor lists are identical
for os_flavor in os_flavors:
found_index = None
for index, dbaas_flavor in enumerate(dbaas_flavors):
if os_flavor.name == dbaas_flavor.name:
msg = ("Flavor ID '%s' appears in elements #%s and #%d." %
(dbaas_flavor.id, str(found_index), index))
assert_true(found_index is None, msg)
assert_flavors_roughly_equivalent(os_flavor, dbaas_flavor)
found_index = index
msg = "Some flavors from OS list were missing in DBAAS list."
assert_false(found_index is None, msg)
for flavor in dbaas_flavors:
assert_link_list_is_equal(flavor)
@test(runs_after=[Flavors],
groups=[tests.DBAAS_API, GROUP, GROUP_DS],
depends_on_groups=["services.initialize"],
enabled=FAKE_MODE)
class DatastoreFlavorAssociation(object):
@before_class
def setUp(self):
rd_user = test_config.users.find_user(
Requirements(is_admin=False, services=["trove"]))
self.rd_client = create_dbaas_client(rd_user)
self.datastore = self.rd_client.datastores.get(
test_config.dbaas_datastore)
self.name1 = "test_instance1"
self.name2 = "test_instance2"
self.volume = {'size': 2}
self.instance_id = None
self.nics = None
shared_network = CONFIG.get('shared_network', None)
if shared_network:
self.nics = [{'net-id': shared_network}]
@test
@time_out(TIMEOUT_INSTANCE_CREATE)
def test_create_instance_with_valid_flavor_association(self):
# all the nova flavors are associated with the default datastore
result = self.rd_client.instances.create(
name=self.name1, flavor_id='1', volume=self.volume,
datastore=self.datastore.id,
nics=self.nics)
self.instance_id = result.id
assert_equal(200, self.rd_client.last_http_code)
def result_is_active():
instance = self.rd_client.instances.get(self.instance_id)
if instance.status == "ACTIVE":
return True
else:
# If its not ACTIVE, anything but BUILD must be
# an error.
assert_equal("BUILD", instance.status)
return False
poll_until(result_is_active)
self.rd_client.instances.delete(self.instance_id)
@test(runs_after=[test_create_instance_with_valid_flavor_association])
def test_create_instance_with_invalid_flavor_association(self):
dbaas_flavors = (self.rd_client.flavors.
list_datastore_version_associated_flavors(
datastore=test_config.dbaas_datastore,
version_id=self.datastore.default_version))
self.flavor_not_associated = None
os_flavors = Flavors().get_expected_flavors()
for os_flavor in os_flavors:
if os_flavor not in dbaas_flavors:
self.flavor_not_associated = os_flavor.id
break
if self.flavor_not_associated is not None:
assert_raises(exceptions.BadRequest,
self.rd_client.instances.create, self.name2,
flavor_not_associated, self.volume,
datastore=self.datastore.id,
nics=self.nics)
| 1.648438 | 2 |
tests/test_rs_compute_tex.py | pygfx/wgpu-py | 74 | 12763105 | import random
import ctypes
import sys
import wgpu.backends.rs # noqa
import numpy as np
from pytest import skip
from testutils import run_tests, get_default_device
from testutils import can_use_wgpu_lib, is_ci
from renderutils import render_to_texture, render_to_screen # noqa
if not can_use_wgpu_lib:
skip("Skipping tests that need the wgpu lib", allow_module_level=True)
elif is_ci and sys.platform == "win32":
skip("These tests fail on dx12 for some reason", allow_module_level=True)
# %% 1D
def test_compute_tex_1d_rgba8uint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<u32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<rgba8uint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 = vec4<i32>(textureLoad(r_tex1, i, 0));
let color2 = vec4<i32>(color1.x + i, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, vec4<u32>(color2));
}
"""
# Generate data
nx, ny, nz, nc = 64, 1, 1, 4
data1 = (ctypes.c_uint8 * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba8uint,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_1d_rgba16sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<rgba16sint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 : vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 128, 1, 1, 4
data1 = (ctypes.c_int16 * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba16sint,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_1d_r32sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<r32sint, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 : vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 1, 1, 1
data1 = (ctypes.c_int32 * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32sint,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_1d_r32float():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<f32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<r32float,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 : vec4<f32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<f32>(color1.x + f32(i), color1.y + 1.0, color1.z * 2.0, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 1, 1, 1
data1 = (ctypes.c_float * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32float,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
# %% 2D
def test_compute_tex_2d_rgba8uint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_2d<u32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<rgba8uint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1 = vec4<i32>(textureLoad(r_tex1, i, 0));
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, vec4<u32>(color2));
}
"""
# Generate data
nx, ny, nz, nc = 64, 8, 1, 4
data1 = (ctypes.c_uint8 * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba8uint,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_2d_rgba16sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_2d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<rgba16sint, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 128, 8, 1, 4
data1 = (ctypes.c_int16 * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba16sint,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_2d_r32sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_2d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<r32sint, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 8, 1, 1
data1 = (ctypes.c_int32 * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32sint,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_2d_r32float():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1:texture_2d<f32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<r32float, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1: vec4<f32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<f32>(color1.x + f32(i.x), color1.y + 1.0, color1.z * 2.0, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 8, 1, 1
data1 = (ctypes.c_float * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32float,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
# %% 3D
def test_compute_tex_3d_rgba8uint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<u32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<rgba8uint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1 = vec4<i32>(textureLoad(r_tex1, i, 0));
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, vec4<u32>(color2));
}
"""
# Generate data
nx, ny, nz, nc = 64, 8, 6, 4
data1 = (ctypes.c_uint8 * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba8uint,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_3d_rgba16sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<rgba16sint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 128, 8, 6, 4
data1 = (ctypes.c_int16 * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba16sint,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_3d_r32sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<r32sint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 8, 6, 1
data1 = (ctypes.c_int32 * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32sint,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_3d_r32float():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<f32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<r32float,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1: vec4<f32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<f32>(color1.x + f32(i.x), color1.y + 1.0, color1.z * 2.0, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 64, 8, 6, 1
data1 = (ctypes.c_float * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32float,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
# %%
def _compute_texture(compute_shader, texture_format, texture_dim, texture_size, data1):
"""
Apply a computation on a texture and validate the result. The shader should:
* Add the x-coordinate to the red channel.
* Add 1 to the green channel.
* Multiply the blue channel by 2.
* The alpha channel must remain equal.
"""
nx, ny, nz, nc = texture_size
nbytes = ctypes.sizeof(data1)
bpp = nbytes // (nx * ny * nz) # bytes per pixel
device = get_default_device()
cshader = device.create_shader_module(code=compute_shader)
# Create textures and views
texture1 = device.create_texture(
size=(nx, ny, nz),
dimension=texture_dim,
format=texture_format,
usage=wgpu.TextureUsage.TEXTURE_BINDING | wgpu.TextureUsage.COPY_DST,
)
texture2 = device.create_texture(
size=(nx, ny, nz),
dimension=texture_dim,
format=texture_format,
usage=wgpu.TextureUsage.STORAGE_BINDING | wgpu.TextureUsage.COPY_SRC,
)
texture_view1 = texture1.create_view()
texture_view2 = texture2.create_view()
# Create buffer that we need to upload the data
buffer_usage = wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.COPY_DST
buffer = device.create_buffer_with_data(data=data1, usage=buffer_usage)
assert buffer.usage == buffer_usage
texture_sample_type = "float"
if "uint" in texture_format:
texture_sample_type = "uint"
elif "sint" in texture_format:
texture_sample_type = "sint"
# Define bindings
# One can see here why we need 2 textures: one is readonly, one writeonly
bindings = [
{"binding": 0, "resource": texture_view1},
{"binding": 1, "resource": texture_view2},
]
binding_layouts = [
{
"binding": 0,
"visibility": wgpu.ShaderStage.COMPUTE,
"texture": {
"sample_type": texture_sample_type,
"view_dimension": texture_dim,
},
},
{
"binding": 1,
"visibility": wgpu.ShaderStage.COMPUTE,
"storage_texture": {
"access": wgpu.StorageTextureAccess.write_only,
"format": texture_format,
"view_dimension": texture_dim,
},
},
]
bind_group_layout = device.create_bind_group_layout(entries=binding_layouts)
pipeline_layout = device.create_pipeline_layout(
bind_group_layouts=[bind_group_layout]
)
bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings)
# Create a pipeline and run it
compute_pipeline = device.create_compute_pipeline(
layout=pipeline_layout,
compute={"module": cshader, "entry_point": "main"},
)
assert compute_pipeline.get_bind_group_layout(0) is bind_group_layout
command_encoder = device.create_command_encoder()
command_encoder.copy_buffer_to_texture(
{
"buffer": buffer,
"offset": 0,
"bytes_per_row": bpp * nx,
"rows_per_image": ny,
},
{"texture": texture1, "mip_level": 0, "origin": (0, 0, 0)},
(nx, ny, nz),
)
compute_pass = command_encoder.begin_compute_pass()
compute_pass.push_debug_group("foo")
compute_pass.insert_debug_marker("setting pipeline")
compute_pass.set_pipeline(compute_pipeline)
compute_pass.insert_debug_marker("setting bind group")
compute_pass.set_bind_group(
0, bind_group, [], 0, 999999
) # last 2 elements not used
compute_pass.insert_debug_marker("dispatch!")
compute_pass.dispatch(nx, ny, nz)
compute_pass.pop_debug_group()
compute_pass.end_pass()
command_encoder.copy_texture_to_buffer(
{"texture": texture2, "mip_level": 0, "origin": (0, 0, 0)},
{
"buffer": buffer,
"offset": 0,
"bytes_per_row": bpp * nx,
"rows_per_image": ny,
},
(nx, ny, nz),
)
device.queue.submit([command_encoder.finish()])
# Read the current data of the output buffer
data2 = data1.__class__.from_buffer(device.queue.read_buffer(buffer))
# Numpy arrays are easier to work with
a1 = np.ctypeslib.as_array(data1).reshape(nz, ny, nx, nc)
a2 = np.ctypeslib.as_array(data2).reshape(nz, ny, nx, nc)
# Validate!
for x in range(nx):
assert np.all(a2[:, :, x, 0] == a1[:, :, x, 0] + x)
if nc >= 2:
assert np.all(a2[:, :, :, 1] == a1[:, :, :, 1] + 1)
if nc >= 3:
assert np.all(a2[:, :, :, 2] == a1[:, :, :, 2] * 2)
if nc >= 4:
assert np.all(a2[:, :, :, 3] == a1[:, :, :, 3])
if __name__ == "__main__":
run_tests(globals())
| 1.984375 | 2 |
spectral_cube/tests/test_regrid.py | sushobhana/spectral-cube | 0 | 12763106 | <gh_stars>0
import pytest
import numpy as np
from astropy import units as u
from astropy import convolution
from astropy.wcs import WCS
from astropy import wcs
from astropy.io import fits
from radio_beam import beam, Beam
from .. import SpectralCube
from ..utils import WCSCelestialError
from .test_spectral_cube import cube_and_raw
from .test_projection import load_projection
from . import path
def test_convolution():
cube, data = cube_and_raw('255_delta.fits')
# 1" convolved with 1.5" -> 1.8027....
target_beam = Beam(1.802775637731995*u.arcsec, 1.802775637731995*u.arcsec,
0*u.deg)
conv_cube = cube.convolve_to(target_beam)
expected = convolution.Gaussian2DKernel((1.5*u.arcsec /
beam.SIGMA_TO_FWHM /
(5.555555555555e-4*u.deg)).decompose().value,
x_size=5, y_size=5,
)
expected.normalize()
np.testing.assert_almost_equal(expected.array,
conv_cube.filled_data[0,:,:].value)
# 2nd layer is all zeros
assert np.all(conv_cube.filled_data[1,:,:] == 0.0)
def test_beams_convolution():
cube, data = cube_and_raw('455_delta_beams.fits')
# 1" convolved with 1.5" -> 1.8027....
target_beam = Beam(1.802775637731995*u.arcsec, 1.802775637731995*u.arcsec,
0*u.deg)
conv_cube = cube.convolve_to(target_beam)
pixscale = wcs.utils.proj_plane_pixel_area(cube.wcs.celestial)**0.5*u.deg
for ii, bm in enumerate(cube.beams):
expected = target_beam.deconvolve(bm).as_kernel(pixscale, x_size=5,
y_size=5)
expected.normalize()
np.testing.assert_almost_equal(expected.array,
conv_cube.filled_data[ii,:,:].value)
def test_beams_convolution_equal():
cube, data = cube_and_raw('522_delta_beams.fits')
# Only checking that the equal beam case is handled correctly.
# Fake the beam in the first channel. Then ensure that the first channel
# has NOT been convolved.
target_beam = Beam(1.0 * u.arcsec, 1.0 * u.arcsec, 0.0 * u.deg)
cube.beams.major[0] = target_beam.major
cube.beams.minor[0] = target_beam.minor
cube.beams.pa[0] = target_beam.pa
conv_cube = cube.convolve_to(target_beam)
np.testing.assert_almost_equal(cube.filled_data[0].value,
conv_cube.filled_data[0].value)
def test_reproject():
pytest.importorskip('reproject')
cube, data = cube_and_raw('adv.fits')
wcs_in = WCS(cube.header)
wcs_out = wcs_in.deepcopy()
wcs_out.wcs.ctype = ['GLON-SIN', 'GLAT-SIN', wcs_in.wcs.ctype[2]]
wcs_out.wcs.crval = [134.37608, -31.939241, wcs_in.wcs.crval[2]]
wcs_out.wcs.crpix = [2., 2., wcs_in.wcs.crpix[2]]
header_out = cube.header
header_out['NAXIS1'] = 4
header_out['NAXIS2'] = 5
header_out['NAXIS3'] = cube.shape[0]
header_out.update(wcs_out.to_header())
result = cube.reproject(header_out)
assert result.shape == (cube.shape[0], 5, 4)
def test_spectral_smooth():
cube, data = cube_and_raw('522_delta.fits')
result = cube.spectral_smooth(kernel=convolution.Gaussian1DKernel(1.0), use_memmap=False)
np.testing.assert_almost_equal(result[:,0,0].value,
convolution.Gaussian1DKernel(1.0,
x_size=5).array,
4)
result = cube.spectral_smooth(kernel=convolution.Gaussian1DKernel(1.0), use_memmap=True)
np.testing.assert_almost_equal(result[:,0,0].value,
convolution.Gaussian1DKernel(1.0,
x_size=5).array,
4)
def test_spectral_smooth_4cores():
pytest.importorskip('joblib')
cube, data = cube_and_raw('522_delta.fits')
result = cube.spectral_smooth(kernel=convolution.Gaussian1DKernel(1.0), num_cores=4, use_memmap=True)
np.testing.assert_almost_equal(result[:,0,0].value,
convolution.Gaussian1DKernel(1.0,
x_size=5).array,
4)
# this is one way to test non-parallel mode
result = cube.spectral_smooth(kernel=convolution.Gaussian1DKernel(1.0), num_cores=4, use_memmap=False)
np.testing.assert_almost_equal(result[:,0,0].value,
convolution.Gaussian1DKernel(1.0,
x_size=5).array,
4)
# num_cores = 4 is a contradiction with parallel=False, but we want to make
# sure it does the same thing
result = cube.spectral_smooth(kernel=convolution.Gaussian1DKernel(1.0), num_cores=4, parallel=False)
np.testing.assert_almost_equal(result[:,0,0].value,
convolution.Gaussian1DKernel(1.0,
x_size=5).array,
4)
def test_spectral_smooth_fail():
cube, data = cube_and_raw('522_delta_beams.fits')
with pytest.raises(AttributeError) as exc:
cube.spectral_smooth(kernel=convolution.Gaussian1DKernel(1.0))
assert exc.value.args[0] == ("VaryingResolutionSpectralCubes can't be "
"spectrally smoothed. Convolve to a "
"common resolution with `convolve_to` before "
"attempting spectral smoothed.")
def test_spectral_interpolate():
cube, data = cube_and_raw('522_delta.fits')
orig_wcs = cube.wcs.deepcopy()
# midpoint between each position
sg = (cube.spectral_axis[1:] + cube.spectral_axis[:-1])/2.
result = cube.spectral_interpolate(spectral_grid=sg)
np.testing.assert_almost_equal(result[:,0,0].value,
[0.0, 0.5, 0.5, 0.0])
assert cube.wcs.wcs.compare(orig_wcs.wcs)
def test_spectral_interpolate_with_fillvalue():
cube, data = cube_and_raw('522_delta.fits')
# Step one channel out of bounds.
sg = ((cube.spectral_axis[0]) -
(cube.spectral_axis[1] - cube.spectral_axis[0]) *
np.linspace(1,4,4))
result = cube.spectral_interpolate(spectral_grid=sg,
fill_value=42)
np.testing.assert_almost_equal(result[:,0,0].value,
np.ones(4)*42)
def test_spectral_interpolate_fail():
cube, data = cube_and_raw('522_delta_beams.fits')
with pytest.raises(AttributeError) as exc:
cube.spectral_interpolate(5)
assert exc.value.args[0] == ("VaryingResolutionSpectralCubes can't be "
"spectrally interpolated. Convolve to a "
"common resolution with `convolve_to` before "
"attempting spectral interpolation.")
def test_spectral_interpolate_with_mask():
hdu = fits.open(path("522_delta.fits"))[0]
# Swap the velocity axis so indiff < 0 in spectral_interpolate
hdu.header["CDELT3"] = - hdu.header["CDELT3"]
cube = SpectralCube.read(hdu)
mask = np.ones(cube.shape, dtype=bool)
mask[:2] = False
masked_cube = cube.with_mask(mask)
orig_wcs = cube.wcs.deepcopy()
# midpoint between each position
sg = (cube.spectral_axis[1:] + cube.spectral_axis[:-1])/2.
result = masked_cube.spectral_interpolate(spectral_grid=sg[::-1])
# The output makes CDELT3 > 0 (reversed spectral axis) so the masked
# portion are the final 2 channels.
np.testing.assert_almost_equal(result[:, 0, 0].value,
[0.0, 0.5, np.NaN, np.NaN])
assert cube.wcs.wcs.compare(orig_wcs.wcs)
def test_spectral_interpolate_reversed():
cube, data = cube_and_raw('522_delta.fits')
orig_wcs = cube.wcs.deepcopy()
# Reverse spectral axis
sg = cube.spectral_axis[::-1]
result = cube.spectral_interpolate(spectral_grid=sg)
np.testing.assert_almost_equal(sg.value, result.spectral_axis.value)
def test_convolution_2D():
proj, hdu = load_projection("55_delta.fits")
# 1" convolved with 1.5" -> 1.8027....
target_beam = Beam(1.802775637731995*u.arcsec, 1.802775637731995*u.arcsec,
0*u.deg)
conv_proj = proj.convolve_to(target_beam)
expected = convolution.Gaussian2DKernel((1.5*u.arcsec /
beam.SIGMA_TO_FWHM /
(5.555555555555e-4*u.deg)).decompose().value,
x_size=5, y_size=5,
)
expected.normalize()
np.testing.assert_almost_equal(expected.array,
conv_proj.value)
assert conv_proj.beam == target_beam
def test_nocelestial_convolution_2D_fail():
cube, data = cube_and_raw('255_delta.fits')
proj = cube.moment0(axis=1)
test_beam = Beam(1.0 * u.arcsec)
with pytest.raises(WCSCelestialError) as exc:
proj.convolve_to(test_beam)
assert exc.value.args[0] == ("WCS does not contain two spatial axes.")
def test_reproject_2D():
pytest.importorskip('reproject')
proj, hdu = load_projection("55.fits")
wcs_in = WCS(proj.header)
wcs_out = wcs_in.deepcopy()
wcs_out.wcs.ctype = ['GLON-SIN', 'GLAT-SIN']
wcs_out.wcs.crval = [134.37608, -31.939241]
wcs_out.wcs.crpix = [2., 2.]
header_out = proj.header
header_out['NAXIS1'] = 4
header_out['NAXIS2'] = 5
header_out.update(wcs_out.to_header())
result = proj.reproject(header_out)
assert result.shape == (5, 4)
assert result.beam == proj.beam
def test_nocelestial_reproject_2D_fail():
pytest.importorskip('reproject')
cube, data = cube_and_raw('255_delta.fits')
proj = cube.moment0(axis=1)
with pytest.raises(WCSCelestialError) as exc:
proj.reproject(cube.header)
assert exc.value.args[0] == ("WCS does not contain two spatial axes.")
@pytest.mark.parametrize('use_memmap', (True,False))
def test_downsample(use_memmap):
cube, data = cube_and_raw('255.fits')
dscube = cube.downsample_axis(factor=2, axis=0, use_memmap=use_memmap)
expected = data.mean(axis=0)
np.testing.assert_almost_equal(expected[None,:,:],
dscube.filled_data[:].value)
dscube = cube.downsample_axis(factor=2, axis=1, use_memmap=use_memmap)
expected = np.array([data[:,:2,:].mean(axis=1),
data[:,2:4,:].mean(axis=1),
data[:,4:,:].mean(axis=1), # just data[:,4,:]
]).swapaxes(0,1)
assert expected.shape == (2,3,5)
assert dscube.shape == (2,3,5)
np.testing.assert_almost_equal(expected,
dscube.filled_data[:].value)
dscube = cube.downsample_axis(factor=2, axis=1, truncate=True,
use_memmap=use_memmap)
expected = np.array([data[:,:2,:].mean(axis=1),
data[:,2:4,:].mean(axis=1),
]).swapaxes(0,1)
np.testing.assert_almost_equal(expected,
dscube.filled_data[:].value)
| 2.0625 | 2 |
disk1.py | ricaportela/disk-usage | 0 | 12763107 | <gh_stars>0
from matplotlib import pyplot as plt
def plot_data():
""" Draw Pie Chart with values from Json File """
# Data to plot
labels = 'Used', 'Available', 'Use%'
sizes = [215, 210, 134]
colors = ['gold', 'yellowgreen', 'lightcoral']
explode = (0.1, 0, 0) # explode 1st slice
plt.title('Filesystem'+'\n'+'Size of '+'Mounted on ')
# Plot
plt.pie(sizes, explode=explode, labels=labels, \
colors=colors, autopct='%1.1f%%', \
shadow=True, startangle=140)
plt.axis('equal')
plt.show()
if __name__ == ("__main__"):
plot_data()
| 3.28125 | 3 |
apps/make-zero-based.py | ShabnamSheikhha/ligra | 0 | 12763108 | <filename>apps/make-zero-based.py
import sys
in_name, out_name = sys.argv[1], sys.argv[2]
in_file, out_file = open(in_name, 'r'), open(out_name, 'w')
firstline = True
i = 0
while True:
line = in_file.readline()
# reached end of file
if not line:
break
# skip the comments
if line[0] == "%":
continue
# ignore the first line (contains number of nodes and edges)
if firstline:
firstline = False
continue
src, dst = map(int, line.split())
#src -= 1
#dst -= 1
out_file.write(str(src) + " " + str(dst) + "\n")
in_file.close()
out_file.close()
| 2.96875 | 3 |
test/mm.py | justrypython/utils | 0 | 12763109 | <filename>test/mm.py
#encoding:UTF-8
import os
import random
import numpy as np
import matplotlib.pyplot as plt
def get_ratio(pos):
pos = np.array(pos).reshape((-1, 1))
def _get_ratio(*args):
pos_list = pos
ratios = args[0]
ratios[ratios>=0] = 1
ratios[ratios<0] = -1
ratios *= -1
ratios = np.tile(ratios, (len(pos_list), 1))
return 1 + ratios * pos
return _get_ratio
def mm(pos, ratio, init_m=10000, t=1000):
randn = np.random.random((len(ratio), t))
randn -= np.array(ratio).reshape((len(ratio), 1))
func = get_ratio(pos)
ratios = np.apply_along_axis(func, 0, randn)
ratios = np.cumprod(ratios, axis=-1)
return init_m * ratios
def mm1(pos=0.01, ratio=0.53, init_m=10000, t=1000):
result = [init_m]
p = 1 + pos
n = 1 - pos
m = init_m
for i in range(t):
randn = np.random.random()
if randn >= ratio:
m *= n
else:
m *= p
result.append(m)
return result
def mm2(pos=[0.01, 0.02, 0.04, 0.08], ratio=0.53, init_m=10000, t=1000):
result = [[init_m] * len(pos)]
p = 1 + np.array(pos)
n = 1 - np.array(pos)
m = np.array([init_m] * len(pos))
for i in range(t):
randn = np.random.random()
if randn >= ratio:
m = m * n
else:
m = m * p
result.append(m)
return result
def showmm():
results1 = mm([0.01, 0.02, 0.04, 0.08], [0.52])
results1 = np.squeeze(results1)
plt.plot(results1[0], 'r')
plt.plot(results1[1], 'b')
plt.plot(results1[2], 'g')
plt.plot(results1[3], 'y')
plt.show()
def showmm1():
while True:
results1 = mm1(0.01)
results2 = mm1(0.02)
results3 = mm1(0.04)
results4 = mm1(0.08)
plt.plot(results1, 'r')
plt.plot(results2, 'b')
plt.plot(results3, 'g')
plt.plot(results4, 'y')
plt.show()
def showmm2():
while True:
results = np.array(mm2())
plt.plot(results[:, 0], 'r')
plt.plot(results[:, 1], 'b')
plt.plot(results[:, 2], 'g')
plt.plot(results[:, 3], 'y')
plt.show()
def showmm2_1(ratio, times):
result = []
print('for ratio %.2f %d times'%(ratio, times))
for i in range(1000):
results = np.array(mm2(np.arange(0.001, 0.3, 0.001), ratio=ratio, t=times))
result.append(results[-1])
result = np.log(np.array(result)/10000)
#plt.plot(result[:, 0], 'r')
#plt.plot(result[:, 1], 'b')
#plt.plot(result[:, 2], 'g')
#plt.plot(result[:, 3], 'y')
#plt.show()
#plt.plot(result.mean(axis=0), 'r')
#plt.plot(result.std(axis=0), 'g')
#plt.show()
a = np.argmax(result.mean(axis=0))
b = np.argmax(result.mean(axis=0)-result.std(axis=0))
print('the max mean is at %f, and max mean is %f'%(0.001*(a+1), np.max(result.mean(axis=0))))
print('the max diff is at %f, and max diff is %f'%(0.001*(b+1), np.max(result.mean(axis=0)-result.std(axis=0))))
print('end')
def showmm2_2():
for ratio in np.arange(0.50, 0.6, 0.01):
#showmm2_1(ratio, 100)
#showmm2_1(ratio, 200)
#showmm2_1(ratio, 400)
showmm2_1(ratio, 1000)
showmm2_1(ratio, 2000)
print('#'*70)
def showmm2_3():
while True:
result = []
results = np.array(mm2([0.01, 0.02, 0.04, 0.057, 0.08], ratio=0.53, t=2000))
result.append(results[-1])
result = np.log(np.array(result)/10000)
plt.plot(results[:, 0], 'r')
plt.plot(results[:, 1], 'b')
plt.plot(results[:, 2], 'g')
plt.plot(results[:, 3], 'y')
plt.plot(results[:, 4], 'm')
plt.show()
print(result)
def main():
#showmm2_2()
showmm2_3()
if __name__ == '__main__':
main() | 2.859375 | 3 |
string_op.py | princejvm/python | 0 | 12763110 | <reponame>princejvm/python
# Set the variable brian on line 3!
brian = "Hello life!"
# Assign your variables below, each on its own line!
caesar = "Graham"
praline = "John"
viking = "Teresa"
# Put your variables above this line
print caesar
print praline
print viking
# The string below is broken. Fix it using the escape backslash!
str1 = "This isn't flying, this is falling with style!"
str2 = 'This isn\'t flying, this is falling with style!'
"""
The string "PYTHON" has six characters,
numbered 0 to 5, as shown below:
+---+---+---+---+---+---+
| P | Y | T | H | O | N |
+---+---+---+---+---+---+
0 1 2 3 4 5
So if you wanted "Y", you could just type
"PYTHON"[1] (always start counting from 0!)
"""
fifth_letter = "MONTY"[4]
print fifth_letter
# inbuilt functions on string
parrot="Norwegian Blue"
print len(parrot)
print parrot.lower()
print parrot.upper()
"""Declare and assign your variable on line 4,
then call your method on line 5!"""
pi=3.14
print str(pi)
"""Assign the string "Ping!" to
the variable the_machine_goes on
line 5, then print it out on line 6!"""
the_machine_goes="Ping!"
print the_machine_goes
# Print the concatenation of "Spam and eggs" on line 3!
print "Spam "+"and "+"eggs"
# Turn 3.14 into a string on line 3!
print "The value of pi is around " + str(3.14)
string_1 = "Camelot"
string_2 = "place"
print "Let's not go to %s. 'Tis a silly %s." % (string_1, string_2)
| 4.09375 | 4 |
python-pj/scrapy_spider/hentai.py | RabitDash/practice | 0 | 12763111 | import requests
# import re
from bs4 import BeautifulSoup
import time
courl = "https://e-hentai.org/?page={}"
def getContent(url):
res = requests.get(url)
soup = BeautifulSoup(res.text, 'lxml')
return soup
def getImage(soup):
src = soup.find_all('div', {'class': 'it5'})
name = soup.find_all('div', {'class': 'it2'})
print(src,name)
for (i, j) in zip(src, name):
if (j.find('img') is None) | (i is None):
continue
else:
f.write("name: "+i.find('a').get_text()) # get name
f.write("title_pic link: "+j.find('img').get('src')) # get title_pic link
f.write("manga link: "+i.find('a').get('href')) # get manga link
f.write("\n")
return None
page_num = 14333
f = open('hentai_log', 'a')
for i in range(1, page_num + 1):
soup = getContent(courl.format(i))
print(soup)
getImage(soup)
print("fetching content of page{}".format(i))
time.sleep(1)
# print("total number:{}".format(page_num))
f.close()
| 3.078125 | 3 |
Curso_em_Video_py3/ex034.py | Rodrigo98Matos/Projetos_py | 1 | 12763112 | s = float(input('Salário: '))
if s <= 1250:
s = s * 1.15
if s > 1250:
s = s * 1.1
print('Novo salário: R${}'.format(s))
| 3.5625 | 4 |
sasmodels/models/lamellar_hg.py | pkienzle/sasmodels | 11 | 12763113 | # Note: model title and parameter table are inserted automatically
r"""
This model provides the scattering intensity, $I(q)$, for a lyotropic lamellar
phase where a random distribution in solution are assumed. The SLD of the head
region is taken to be different from the SLD of the tail region.
Definition
----------
The scattering intensity $I(q)$ is
.. math::
I(q) = 2\pi\frac{\text{scale}}{2(\delta_H + \delta_T)} P(q) \frac{1}{q^2}
The form factor $P(q)$ is
.. math::
P(q) = \frac{4}{q^2}
\left\lbrace
\Delta \rho_H
\left[\sin[q(\delta_H + \delta_T)\ - \sin(q\delta_T)\right]
+ \Delta\rho_T\sin(q\delta_T)
\right\rbrace^2
where $\delta_T$ is *length_tail*, $\delta_H$ is *length_head*,
$\Delta\rho_H$ is the head contrast (*sld_head* $-$ *sld_solvent*),
and $\Delta\rho_T$ is tail contrast (*sld* $-$ *sld_solvent*).
The total thickness of the lamellar sheet is
a_H + \delta_T + \delta_T + \delta_H$. Note that in a non aqueous solvent
the chemical "head" group may be the "Tail region" and vice-versa.
The 2D scattering intensity is calculated in the same way as 1D, where
the $q$ vector is defined as
.. math:: q = \sqrt{q_x^2 + q_y^2}
References
----------
#. <NAME>, <NAME>, and <NAME>, *J. Phys. II France*, 3, (1993) 487-502
#. <NAME>, <NAME>, <NAME>, <NAME>,
*J. Phys. Chem. B*, 105, (2001) 11081-11088
Authorship and Verification
----------------------------
* **Author:**
* **Last Modified by:**
* **Last Reviewed by:** <NAME> and <NAME> **Date** April 17, 2014
"""
import numpy as np
from numpy import inf
name = "lamellar_hg"
title = "Random lamellar phase with Head and Tail Groups"
description = """\
[Random lamellar phase with Head and Tail Groups]
I(q)= 2*pi*P(q)/(2(H+T)*q^(2)), where
P(q)= see manual
layer thickness =(H+T+T+H) = 2(Head+Tail)
sld = Tail scattering length density
sld_head = Head scattering length density
sld_solvent = solvent scattering length density
background = incoherent background
scale = scale factor
"""
category = "shape:lamellae"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type","description"],
parameters = [["length_tail", "Ang", 15, [0, inf], "volume", "Tail thickness ( total = H+T+T+H)"],
["length_head", "Ang", 10, [0, inf], "volume", "Head thickness"],
["sld", "1e-6/Ang^2", 0.4, [-inf,inf], "sld", "Tail scattering length density"],
["sld_head", "1e-6/Ang^2", 3.0, [-inf,inf], "sld", "Head scattering length density"],
["sld_solvent", "1e-6/Ang^2", 6, [-inf,inf], "sld", "Solvent scattering length density"]]
# pylint: enable=bad-whitespace, line-too-long
# No volume normalization despite having a volume parameter
# This should perhaps be volume normalized?
form_volume = """
return 1.0;
"""
Iq = """
const double qsq = q*q;
const double drh = sld_head - sld_solvent;
const double drt = sld - sld_solvent; //correction 13FEB06 by L.Porcar
const double qT = q*length_tail;
double Pq, inten;
Pq = drh*(sin(q*(length_head+length_tail))-sin(qT)) + drt*sin(qT);
Pq *= Pq;
Pq *= 4.0/(qsq);
inten = 2.0e-4*M_PI*Pq/qsq;
// normalize by the bilayer thickness
inten /= 2.0*(length_head+length_tail);
return inten;
"""
def random():
"""Return a random parameter set for the model."""
thickness = 10**np.random.uniform(1, 4)
length_head = thickness * np.random.uniform(0, 1)
length_tail = thickness - length_head
pars = dict(
length_head=length_head,
length_tail=length_tail,
)
return pars
#
tests = [
[{'scale': 1.0, 'background': 0.0, 'length_tail': 15.0, 'length_head': 10.0,
'sld': 0.4, 'sld_head': 3.0, 'sld_solvent': 6.0},
[0.001], [653143.9209]],
]
# ADDED by: RKH ON: 18Mar2016 converted from sasview previously, now renaming everything & sorting the docs
| 2.4375 | 2 |
bip_utils/ss58/ss58.py | MIPPLTeam/bip_utils | 149 | 12763114 | <reponame>MIPPLTeam/bip_utils
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for SS58 decoding/encoding.
Reference: https://github.com/paritytech/substrate/wiki/External-Address-Format-(SS58).
"""
# Imports
from typing import Tuple
from bip_utils.ss58.ss58_ex import SS58ChecksumError
from bip_utils.base58 import Base58Decoder, Base58Encoder
from bip_utils.utils.misc import ConvUtils, CryptoUtils
class SS58Const:
"""Class container for SS58 constants."""
# Max format for simple account
SIMPLE_ACCOUNT_FORMAT_MAX_VAL: int = 63
# Format maximum value
FORMAT_MAX_VAL: int = 16383
# Reserved formats
RESERVED_FORMATS: Tuple[int, int] = (46, 47)
# Data length in bytes
DATA_BYTE_LEN: int = 32
# Checksum length in bytes
CHECKSUM_BYTE_LEN: int = 2
# Checksum prefix
CHECKSUM_PREFIX: bytes = b"SS58PRE"
class SS58Utils:
"""Class container for SS58 utility functions."""
@staticmethod
def ComputeChecksum(data_bytes: bytes) -> bytes:
"""
Compute SS58 checksum.
Args:
data_bytes (bytes): Data bytes
Returns:
bytes: Computed checksum
"""
return CryptoUtils.Blake2b(SS58Const.CHECKSUM_PREFIX + data_bytes)[:SS58Const.CHECKSUM_BYTE_LEN]
class SS58Encoder:
"""SS58 encoder class. It provides methods for encoding to SS58 format."""
@staticmethod
def Encode(data_bytes: bytes,
ss58_format: int) -> str:
"""
Encode bytes into a SS58 string.
Args:
data_bytes (bytes): Data bytes (32-byte length)
ss58_format (int) : SS58 format
Returns:
str: SS58 encoded string
Raises:
ValueError: If parameters are not valid
"""
# Check parameters
if len(data_bytes) != SS58Const.DATA_BYTE_LEN:
raise ValueError(f"Invalid data length ({len(data_bytes)})")
if ss58_format < 0 or ss58_format > SS58Const.FORMAT_MAX_VAL:
raise ValueError(f"Invalid SS58 format ({ss58_format})")
if ss58_format in SS58Const.RESERVED_FORMATS:
raise ValueError(f"Invalid SS58 format ({ss58_format})")
# Simple account
if ss58_format <= SS58Const.SIMPLE_ACCOUNT_FORMAT_MAX_VAL:
ss58_format_bytes = ConvUtils.IntegerToBytes(ss58_format)
# Full address
else:
# 0b00HHHHHH_MMLLLLLL -> (0b01LLLLLL, 0bHHHHHHMM)
ss58_format_bytes = bytes([
((ss58_format & 0x00FC) >> 2) | 0x0040,
(ss58_format >> 8) | ((ss58_format & 0x0003) << 6)
])
# Get payload
payload = ss58_format_bytes + data_bytes
# Compute checksum
checksum = SS58Utils.ComputeChecksum(payload)
# Encode
return Base58Encoder.Encode(payload + checksum)
class SS58Decoder:
"""SS58 decoder class. It provides methods for decoding SS58 format."""
@staticmethod
def Decode(data_str: str) -> Tuple[int, bytes]:
"""
Decode bytes from a SS58 string.
Args:
data_str (string): Data string
Returns:
tuple: SS58 format and data bytes
Raises:
SS58ChecksumError: If checksum is not valid
ValueError: If the string is not a valid SS58 format
"""
# Decode string
dec_bytes = Base58Decoder.Decode(data_str)
# Full address
if dec_bytes[0] & 0x40:
ss58_format_len = 2
ss58_format = ((dec_bytes[0] & 0x3F) << 2) | (dec_bytes[1] >> 6) | \
((dec_bytes[1] & 0x3F) << 8)
# Simple account
else:
ss58_format_len = 1
ss58_format = dec_bytes[0]
# Check format
if ss58_format in SS58Const.RESERVED_FORMATS:
raise ValueError(f"Invalid SS58 format ({ss58_format})")
# Get back data and checksum
data_bytes = dec_bytes[ss58_format_len:-SS58Const.CHECKSUM_BYTE_LEN]
checksum_bytes = dec_bytes[-SS58Const.CHECKSUM_BYTE_LEN:]
# Check data length
if len(data_bytes) != SS58Const.DATA_BYTE_LEN:
raise ValueError(f"Invalid data length ({len(data_bytes)})")
# Compute checksum
comp_checksum = SS58Utils.ComputeChecksum(dec_bytes[:-SS58Const.CHECKSUM_BYTE_LEN])
# Verify checksum
if checksum_bytes != comp_checksum:
raise SS58ChecksumError(
f"Invalid checksum (expected {ConvUtils.BytesToHexString(comp_checksum)}, "
f"got {ConvUtils.BytesToHexString(checksum_bytes)})"
)
return ss58_format, data_bytes
| 1.4375 | 1 |
aldryn_events/cms_plugins.py | what-digital/aldryn-events | 25 | 12763115 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from distutils.version import LooseVersion
from django.contrib.sites.shortcuts import get_current_site
from django.utils import timezone
from django.utils.dates import MONTHS
from django.utils.translation import ugettext_lazy as _, get_language_from_request
from cms import __version__ as cms_version
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .utils import (
build_calendar, is_valid_namespace_for_language,
get_valid_languages,
)
from .models import (
UpcomingPluginItem, Event, EventListPlugin, EventCalendarPlugin
)
from .forms import (
UpcomingPluginForm, EventListPluginForm, EventCalendarPluginForm,
)
CMS_GTE_330 = LooseVersion(cms_version) >= LooseVersion('3.3.0')
NO_APPHOOK_ERROR_MESSAGE = _(
'There is an error in plugin configuration: selected Events '
'config is not available. Please switch to edit mode and '
'change plugin app_config settings to use valid config. '
'Also note that aldryn-events should be used at least once '
'as an apphook for that config.')
class NameSpaceCheckMixin(object):
def get_namespace(self, instance):
if instance.app_config_id and instance.app_config.namespace:
return instance.app_config.namespace
return ''
def get_language(self, request):
return get_language_from_request(request, check_path=True)
def render(self, context, instance, placeholder):
# translated filter the events, language set current language
namespace = self.get_namespace(instance)
language = self.get_language(context['request'])
self.valid_languages = get_valid_languages(namespace, language)
# check if we can reverse list view for configured namespace
# if no prepare a message to admin users.
valid = False
for lang_code in self.valid_languages:
if is_valid_namespace_for_language(namespace, lang_code):
valid = True
break
if not valid:
# add message, should be properly handled in template
context['plugin_configuration_error'] = NO_APPHOOK_ERROR_MESSAGE
return super(NameSpaceCheckMixin, self).render(
context, instance, placeholder)
class AdjustableCacheMixin(object):
"""
For django CMS < 3.3.0 installations, we have no choice but to disable the
cache where there is time-sensitive information. However, in later CMS
versions, we can configure it with `get_cache_expiration()`.
"""
if not CMS_GTE_330:
cache = False
def get_cache_expiration(self, request, instance, placeholder):
return getattr(instance, 'cache_duration', 0)
def get_fieldsets(self, request, obj=None):
"""
Removes the cache_duration field from the displayed form if we're not
using django CMS v3.3.0 or later.
"""
fieldsets = super(AdjustableCacheMixin, self).get_fieldsets(
request, obj=None)
if CMS_GTE_330:
return fieldsets
field = 'cache_duration'
for fieldset in fieldsets:
new_fieldset = [
item for item in fieldset[1]['fields'] if item != field]
fieldset[1]['fields'] = tuple(new_fieldset)
return fieldsets
class UpcomingPlugin(NameSpaceCheckMixin, AdjustableCacheMixin,
CMSPluginBase):
render_template = False
name = _('Upcoming or Past Events')
module = _('Events')
model = UpcomingPluginItem
form = UpcomingPluginForm
def render(self, context, instance, placeholder):
context = super(UpcomingPlugin, self).render(context, instance,
placeholder)
if context.get('plugin_configuration_error') is not None:
return context
context['instance'] = instance
language = self.get_language(context['request'])
namespace = self.get_namespace(instance)
if instance.language not in self.valid_languages:
events = Event.objects.none()
else:
events = Event.objects.namespace(namespace).language(language)
events = events.translated(*self.valid_languages)
if instance.past_events:
events = events.past(count=instance.latest_entries)
else:
events = events.upcoming(count=instance.latest_entries)
context['events'] = events
return context
def get_render_template(self, context, instance, placeholder):
name = '%s/upcoming.html' % instance.style
return 'aldryn_events/plugins/upcoming/%s' % name
class EventListCMSPlugin(NameSpaceCheckMixin, CMSPluginBase):
render_template = False
module = _('Events')
name = _('List')
model = EventListPlugin
form = EventListPluginForm
def render(self, context, instance, placeholder):
context = super(EventListCMSPlugin, self).render(context, instance,
placeholder)
if context.get('plugin_configuration_error') is not None:
return context
language = self.get_language(context['request'])
namespace = self.get_namespace(instance)
context['instance'] = instance
if instance.language not in self.valid_languages:
events = Event.objects.none()
else:
events = instance.events.namespace(namespace).language(language)
events = events.translated(*self.valid_languages)
context['events'] = events
return context
def get_render_template(self, context, instance, placeholder):
return 'aldryn_events/plugins/list/%s/list.html' % instance.style
class CalendarPlugin(NameSpaceCheckMixin, AdjustableCacheMixin,
CMSPluginBase):
render_template = 'aldryn_events/plugins/calendar.html'
name = _('Calendar')
module = _('Events')
model = EventCalendarPlugin
form = EventCalendarPluginForm
def render(self, context, instance, placeholder):
context = super(CalendarPlugin, self).render(context, instance,
placeholder)
if context.get('plugin_configuration_error') is not None:
return context
namespace = self.get_namespace(instance)
language = self.get_language(context['request'])
site_id = getattr(get_current_site(context['request']), 'id', None)
year = context.get('event_year')
month = context.get('event_month')
if not all([year, month]):
year = str(timezone.now().date().year)
month = str(timezone.now().date().month)
current_date = datetime.date(int(year), int(month), 1)
context['event_year'] = year
context['event_month'] = month
context['days'] = build_calendar(
year, month, language, namespace, site_id)
context['current_date'] = current_date
context['last_month'] = current_date + datetime.timedelta(days=-1)
context['next_month'] = current_date + datetime.timedelta(days=35)
context['calendar_label'] = '%s %s' % (MONTHS.get(int(month)), year)
context['calendar_namespace'] = namespace
return context
plugin_pool.register_plugin(CalendarPlugin)
plugin_pool.register_plugin(EventListCMSPlugin)
plugin_pool.register_plugin(UpcomingPlugin)
| 1.796875 | 2 |
exec_procedure.py | SigmaAdvancedAnalytics/hydra | 0 | 12763116 | <reponame>SigmaAdvancedAnalytics/hydra
def exec_procedure(session, proc_name, params):
sql_params = ",".join(["@{0}={1}".format(name, value) for name, value in params.items()])
sql_string = """
DECLARE @return_value int;
EXEC @return_value = [dbo].[{proc_name}] {params};
SELECT 'Return Value' = @return_value;
""".format(proc_name=proc_name, params=sql_params)
return session.execute(sql_string).fetchall()
| 2.4375 | 2 |
iot_detector.py | perygee/IoT-device-type-identification | 26 | 12763117 | import os
import numpy as np
import pandas as pd
import tensorflow as tf
def load_data(data_path, label_col, feature_cols=None):
# Load data csv with pandas and divide it to features and labels
if feature_cols:
data = pd.read_csv(os.path.abspath(data_path), usecols=feature_cols + [label_col], low_memory=False)
features = data[feature_cols]
else:
data = pd.read_csv(os.path.abspath(data_path), usecols=None, low_memory=False)
features = data.drop(label_col, 1)
labels = data[label_col]
return features, labels
def create_dev_sess_regressor(sessions, is_dev_labels):
# Creates a regressor that returns the probability of a session originating from dev
# TODO: Implement this! return regressor for device
def dev_sess_regressor(sess):
return sess / 10
return dev_sess_regressor
def classify_sess(dev_sess_regressor, threshold, sess):
# Classifies a session with regressor according to threshold
return 1 if dev_sess_regressor(sess) > threshold else 0
def create_dev_sess_classifier(dev_sess_regressor, threshold):
# Creates a classifier that returns 1 if a session originates from dev and 0 otherwise
def dev_sess_classifier(sess):
return classify_sess(dev_sess_regressor, threshold, sess)
return dev_sess_classifier
def find_opt_threshold(dev_sess_regressor, sessions, is_dev_labels):
# TODO: Implement this! returns optimal threshold for device classefication with given regressor
return 0.5
def classify_seq(dev_sess_classifier, seq):
# Classifies a seq with classifier according to a majority vote
return 1 if sum(map(dev_sess_classifier, seq)) > len(seq) / 2 else 0
def create_dev_seq_classifier(dev_sess_classifier):
# Creates a classifier that returns 1 if a majority of sessions in a sequence originate from dev and 0 otherwise
def dev_seq_classifier(seq):
return classify_seq(dev_sess_classifier, seq)
return dev_seq_classifier
def classify_dev(dev_sess_classifier, opt_seq_len, sessions):
for start in range(len(sessions) - opt_seq_len):
if classify_seq(dev_sess_classifier, sessions[start:start + opt_seq_len]):
return 1
return 0
def create_dev_classifier(dev_sess_classifier, opt_seq_len):
def dev_classifier(sessions):
return classify_dev(dev_sess_classifier, opt_seq_len, sessions)
return dev_classifier
def find_opt_seq_len(this_dev, dev_sess_classifier, dev_sess_dict):
# Finds minimal seq length s.t accuracy=1 on all sessions
opt_seq_len = 1
# Find minimal sequence length s.t FPR=1 for all other devs
for dev, dev_sess in dev_sess_dict.items():
start = 1
seq_len = 1
while start + seq_len <= len(dev_sess):
is_dev = dev == this_dev
is_dev_pred = classify_seq(dev_sess_classifier, dev_sess[start:start + seq_len])
if is_dev == is_dev_pred:
start += 1
else:
start = 1
seq_len += 2
opt_seq_len = max(seq_len, opt_seq_len)
# Return minimal seq length s.t accuracy=1
return opt_seq_len
# def find_opt_seq_len(dev_sess_classifier, dev_sessions, other_devs_sessions):
# # Finds minimal seq length s.t accuracy=1 on all sessions
# opt_seq_len = 1
# # Find minimal sequence length s.t TPR=1
# start = 0
# seq_len = 1
# while start + seq_len <= len(dev_sessions):
# if classify_seq(dev_sess_classifier, dev_sessions[start:start + seq_len]):
# start += 1
# else:
# start = 1
# seq_len += 2
# opt_seq_len = max(seq_len, opt_seq_len)
# # Find minimal sequence length s.t FPR=1 for all other devs
# for other_dev_sessions in other_devs_sessions:
# start = 1
# seq_len = 1
# while start+seq_len <= len(other_dev_sessions):
# if classify_seq(dev_sess_classifier, other_dev_sessions[start:start + seq_len]):
# start = 1
# seq_len += 2
# else:
# start += 1
# opt_seq_len = max(seq_len, opt_seq_len)
# # Return minimal seq length s.t accuracy=1
# return opt_seq_len
def classify_multi_dev(dev_cls_dict, dev_sessions):
# Returns name of the device the session originated from or None for an unknown device
for dev, dev_classifier in dev_cls_dict.items():
if dev_classifier(dev_sessions):
return dev
return None
def create_multi_dev_classifier(dev_cls_dict):
def multi_dev_classifier(dev_sessions):
return classify_multi_dev(dev_cls_dict, dev_sessions)
return multi_dev_classifier
def is_eq(a):
return lambda b: 1 if a == b else 0
def create_iot_classifier(train, validation):
train_sessions = train.drop('device_category',1)
validation_sessions = validation.drop('device_category')
devs = train['device_category'].unique()
train_is_devt_dict = {dev: train['device_category'].apply(is_eq(dev)) for dev in devs}
validation_is_devt_dict = {dev: validation['device_category'].apply(is_eq(dev)) for dev in devs}
dev_sess_dict = {dev: sess for dev, sess in train.groupby('device_category')}
dev_sess_reg_dict = {dev: create_dev_sess_regressor(train_sessions, train_is_devt_dict[dev]) for dev in devs}
opt_thr_dict = {dev: find_opt_threshold(dev_sess_reg_dict[dev], validation_sessions, validation_is_devt_dict[dev])
for dev, is_dev in devs}
dev_sess_cls_dict = {dev: create_dev_sess_classifier(dev_sess_reg_dict[dev], opt_thr_dict[dev]) for dev in devs}
opt_seq_len_dict = {dev: find_opt_seq_len(dev, dev_sess_cls_dict[dev], dev_sess_dict) for dev in devs}
dev_cls_dict = {dev: create_dev_classifier(dev_sess_cls_dict[dev], opt_seq_len_dict[dev]) for dev in devs}
return create_multi_dev_classifier(dev_cls_dict)
train = pd.read_csv(os.path.abspath('data/train.csv'), usecols=['ack', 'device_category'], low_memory=False)
validation = pd.read_csv(os.path.abspath('data/validation.csv'), usecols=['ack', 'device_category'], low_memory=False)
test = pd.read_csv(os.path.abspath('data/test.csv'), usecols=['ack', 'device_category'], low_memory=False)
classifier = create_iot_classifier(train, validation)
print('@@ DONE @@')
| 2.734375 | 3 |
contests_atcoder/abc172/abc172_e_after.py | takelifetime/competitive-programming | 0 | 12763118 | <gh_stars>0
MOD = 10 ** 9 + 7
n, m = map(int, input().split())
def comb(n, r, p):
if r < 0 or n < r:
return 0
r = min(r, n - r)
return fact[n] * factinv[r] * factinv[n-r] % p
def perm(n, r, p):
if r < 0 or n < r:
return 0
return fact[n] * factinv[n-r] % p
p = MOD
N = m
fact = [1, 1] # fact[n] = (n! mod p)
factinv = [1, 1] # factinv[n] = ((n!)^(-1) mod p)
inv = [0, 1] # factinv 計算用
fact_append = fact.append
inv_append = inv.append
factinv_append = factinv.append
for i in range(2, N + 1):
fact_append(fact[-1] * i % p)
inv_append(-inv[p % i] * (p // i) % p)
factinv_append(factinv[-1] * inv[-1] % p)
ans = 0
for k in range(n + 1):
ans += (-1) ** k * comb(n, k, MOD) * perm(m, n, MOD) * perm(m - k, n - k, MOD) % MOD
print(ans % MOD) | 2.328125 | 2 |
CondTools/DT/test/popcon_keyconf_user.py | ckamtsikis/cmssw | 852 | 12763119 | import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.CondDBCommon.connect = 'sqlite_file:userconf.db'
process.CondDBCommon.DBParameters.authenticationPath = '.'
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
logconnect = cms.untracked.string('sqlite_file:log.db'),
toPut = cms.VPSet(
cms.PSet(
record = cms.string('DTCCBConfigRcd'),
tag = cms.string('conf_test'),
timetype = cms.untracked.string('runnumber')
),
cms.PSet(
record = cms.string('keyedConfBricks'),
tag = cms.string('DT_keyedConfBricks_V01'),
timetype = cms.untracked.string('hash'),
withWrapper = cms.untracked.bool(True),
outOfOrder = cms.untracked.bool(True)
),
cms.PSet(
record = cms.string('keyedConfListIOV'),
tag = cms.string('DT_keyedConfListIOV_V01'),
timetype = cms.untracked.string('runnumber'),
withWrapper = cms.untracked.bool(True),
outOfOrder = cms.untracked.bool(False)
)
)
)
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
interval = cms.uint64(1)
)
process.essource = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
DumpStat=cms.untracked.bool(True),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('DTKeyedConfigListRcd'),
tag = cms.string('DT_keyedConfListIOV_V01')
),
cms.PSet(
record = cms.string('DTKeyedConfigContainerRcd'),
tag = cms.string('DT_keyedConfBricks_V01')
)
)
)
process.conf_o2o = cms.EDAnalyzer("DTUserKeyedConfigPopConAnalyzer",
name = cms.untracked.string('DTCCBConfig'),
Source = cms.PSet(
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('.')
),
onlineDB = cms.string('sqlite_file:dummy_online.db'),
tag = cms.string('conf_test'),
run = cms.int32(1),
writeKeys = cms.bool(True),
writeData = cms.bool(True),
container = cms.string('keyedConfBricks'),
DTConfigKeys = cms.VPSet(
cms.PSet(
configType = cms.untracked.int32(1),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(2),
configKey = cms.untracked.int32(926)
),
cms.PSet(
configType = cms.untracked.int32(3),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(4),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(5),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(6),
configKey = cms.untracked.int32(1226)
)
),
onlineAuthentication = cms.string('.')
),
SinceAppendMode = cms.bool(True),
record = cms.string('DTCCBConfigRcd'),
loggingOn = cms.untracked.bool(True),
debug = cms.bool(False)
)
process.p = cms.Path(process.conf_o2o)
| 1.414063 | 1 |
tests/test_host_config.py | mrzor/gcloud_sync_ssh | 7 | 12763120 | from gcloud_sync_ssh.host_config import HostConfig
def test_empty():
hc = HostConfig()
assert not hc.minidict()
def test_default_lines():
hc = HostConfig(HostName="172.16.31.10", CertificateFile="exists-or-not-we-dont-check")
ls = hc.lines()
assert ls == [' CertificateFile exists-or-not-we-dont-check\n', ' HostName 4.4.4.4\n']
def test_ordering_default():
hc = HostConfig(HostName="192.168.0.20", User="mrzor", ForwardX11=False)
assert hc.lines() == [' ForwardX11 no\n',
' HostName 192.168.0.20\n',
' User mrzor\n']
def test_ordering_fully_specified():
hc = HostConfig(HostName="192.168.0.20", User="mrzor", ForwardX11=False)
ordering = ['User', 'HostName', 'ForwardX11']
assert hc.lines(ordering=ordering) == [' User mrzor\n',
' HostName 192.168.0.20\n',
' ForwardX11 no\n']
def test_ordering_partially_specified():
hc = HostConfig(HostName="192.168.0.20", User="mrzor", ForwardX11=False)
ordering = ['User']
assert hc.lines(ordering=ordering) == [' User mrzor\n',
' ForwardX11 no\n',
' HostName 192.168.0.20\n']
def test_ordering_extra_keys():
hc = HostConfig(HostName="192.168.0.20", User="mrzor", ForwardX11=False)
ordering = ['User', 'BindAddress']
assert hc.lines(ordering=ordering) == [' User mrzor\n',
' ForwardX11 no\n',
' HostName 192.168.0.20\n']
def test_custom_casing():
hc = HostConfig(HostName="192.168.0.20", User="mrzor")
assert hc.lines(casings=["HoStNaMe", "USER"]) == [' HoStNaMe 192.168.0.20\n',
' USER mrzor\n']
def test_custom_separator():
hc = HostConfig(User="narcissus")
assert hc.lines(separator="=") == [' User=narcissus\n']
assert hc.lines(separator=" = ") == [' User = narcissus\n']
def test_custom_indent():
hc = HostConfig(User="narcissus")
assert hc.lines(indent="") == ['User narcissus\n']
assert hc.lines(indent="\t") == ['\tUser narcissus\n']
def test_quoting():
hc = HostConfig(User="narcissus")
assert hc.lines(force_quotes=True) == [' User "narcissus"\n']
hc = HostConfig(User="n<NAME>")
assert hc.lines() == [' User "n<NAME>"\n']
assert hc.lines(force_quotes=True) == [' User "n<NAME>"\n']
def test_multiple_values():
hc = HostConfig(LocalForward=['lf1', 'lf2', 'lf3'], User="narcissus")
assert len(hc.lines()) == 4
| 2.40625 | 2 |
Source/Componentes/menu_principal.py | NicolasTangredi/grupo4 | 0 | 12763121 | import json
import PySimpleGUI as sg
from ..Ventanas import principal as ventana, ayuda
from ..Componentes import menu_estadisticas, menu_tablero
from ..Componentes import menu_puntajes
from ..Componentes import menu_configuracion
from ..Handlers import usuario
def iniciar():
''' comienza la ejecucion del menu del juego '''
with open('data/usuarios.json',"r", encoding="utf8") as file:
users = json.load(file)
user = [user for user in users if user["conectado"] == 1][0]
sg.theme(user['configuracion']['paleta_de_colores'])
window = ventana.crear()
loop(window)
def loop(window):
''' mantiene la ventana abierta y recibe el input del usuario '''
while True:
event, _value = window.read()
if(event == '-JUGAR-'):
window.hide()
menu_tablero.start()
window.un_hide()
elif(event == '-CONFIG-'):
window.hide()
menu_configuracion.start()
window.un_hide()
elif(event == '-PUNTOS-'):
window.hide()
menu_puntajes.start()
window.un_hide()
elif(event == '-ESTAD-'):
window.hide()
menu_estadisticas.start()
window.un_hide()
elif(event == '-AYUDA-'):
win = ayuda.crear()
win.read()
win.close()
elif(event == '-SALIR-' or event == sg.WIN_CLOSED):
user = usuario.usuario_conectado()
usuario.user_disconnected(user)
window.close()
break
| 2.78125 | 3 |
AISnake/Algorithm_2/modules/agent.py | EdgarLi/AIGames | 543 | 12763122 | <reponame>EdgarLi/AIGames
'''
Function:
define the ai agent
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
from modules.food import *
from operator import itemgetter
from collections import OrderedDict
'''ai agent'''
class Agent():
def __init__(self, cfg, snake, **kwargs):
self.cfg = cfg
self.num_rows = cfg.GAME_MATRIX_SIZE[1]
self.num_cols = cfg.GAME_MATRIX_SIZE[0]
self.directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]
self.path = self.buildcircle(snake)
self.shortcut_path = {}
'''make decision'''
def act(self, snake, food):
# make decision
if self.shortcut_path:
head_next = self.shortcut_path.pop(snake.coords[0])
else:
head_next = self.path[snake.coords[0]]
query = (head_next[0]-snake.coords[0][0], head_next[1]-snake.coords[0][1])
direction = {(-1, 0): 'left', (1, 0): 'right', (0, -1): 'up', (0, 1): 'down'}[query]
snake.setDirection(direction)
if snake.update(food):
food = Apple(self.cfg, snake.coords)
infos = {'eaten': True, 'food': food}
else:
infos = {'eaten': False, 'food': None}
# if snake has eaten the food
if head_next == food.coord:
path = self.buildcircle(snake)
if path:
self.path = path
# take shortcut
if self.shortcut_path:
return
shortcut_path = self.shortcut(snake, food)
if shortcut_path:
self.shortcut_path = shortcut_path
# return the necessary infos
return infos
'''calculate shortcut path'''
def shortcut(self, snake, food):
# empty screen, with the ordered hamitonian cycle precomputed and order numbered
world = [[0 for i in range(self.num_cols)] for j in range(self.num_rows)]
num = 1
node = snake.coords[-1]
world[node[1]][node[0]] = num
node = self.path[node]
while node != snake.coords[-1]:
num += 1
world[node[1]][node[0]] = num
node = self.path[node]
# obtain shortcut_path
wall = snake.coords
food = food.coord
food_number = world[food[1]][food[0]]
node, pre = wall[0], (-1, -1)
wait = OrderedDict()
wait[node] = pre
path = {}
while wait:
node, pre = wait.popitem(last=False)
path[node] = pre
if node == food:
break
node_number = world[node[1]][node[0]]
neigh = {}
for direction in self.directions:
to = (node[0]+direction[0], node[1]+direction[1])
if not self.checkboundary(to):
continue
if to in wait or to in wall or to in path:
continue
to_number = world[to[1]][to[0]]
if to_number > node_number and to_number <= food_number:
neigh[node_number] = to
neigh = sorted(neigh.items(), key=itemgetter(0), reverse=True)
for item in neigh:
wait[item[1]] = node
if node != food:
return {}
return self.reverse(path, snake.coords[0], food)
'''check boundary'''
def checkboundary(self, pos):
if pos[0] < 0 or pos[1] < 0 or pos[0] >= self.num_cols or pos[1] >= self.num_rows:
return False
return True
'''the shortest'''
def shortest(self, wall, head, food):
wait = OrderedDict()
node, pre = head, (-1, -1)
wait[node] = pre
path = {}
while wait:
node, pre = wait.popitem(last=False)
path[node] = pre
if node == food:
break
if pre in path:
prepre = path[pre]
direction = (pre[0]-prepre[0], pre[1]-prepre[1])
if (direction in self.directions) and (direction != self.directions[0]):
self.directions.remove(direction)
self.directions.insert(0, direction)
for direction in self.directions:
to = (node[0] + direction[0], node[1] + direction[1])
if not self.checkboundary(to):
continue
if to in path or to in wait or to in wall:
continue
wait[to] = node
if node != food:
return None
return self.reverse(path, head, food)
'''reverse path'''
def reverse(self, path, head, food):
if not path: return path
path_new = {}
node = food
while node != head:
path_new[path[node]] = node
node = path[node]
return path_new
'''the longest'''
def longest(self, wall, head, food):
path = self.shortest(wall, head, food)
if path is None:
return None
node = head
while node != food:
if self.extendpath(path, node, wall+[food]):
node = head
continue
node = path[node]
return path
'''extend path'''
def extendpath(self, path, node, wall):
next_ = path[node]
direction_1 = (next_[0]-node[0], next_[1]-node[1])
if direction_1 in [(0, -1), (0, 1)]:
directions = [(-1, 0), (1, 0)]
else:
directions = [(0, -1), (0, 1)]
for d in directions:
src = (node[0]+d[0], node[1]+d[1])
to = (next_[0]+d[0], next_[1]+d[1])
if (src == to) or not (self.checkboundary(src) and self.checkboundary(to)):
continue
if src in path or src in wall or to in path or to in wall:
continue
direction_2 = (to[0]-src[0], to[1]-src[1])
if direction_1 == direction_2:
path[node] = src
path[src] = to
path[to] = next_
return True
return False
'''build a Hamiltonian cycle'''
def buildcircle(self, snake):
path = self.longest(snake.coords[1: -1], snake.coords[0], snake.coords[-1])
if (not path) or (len(path) - 1 != self.num_rows * self.num_cols - len(snake.coords)):
return None
for i in range(1, len(snake.coords)):
path[snake.coords[i]] = snake.coords[i-1]
return path | 3.046875 | 3 |
etl.py | Lamasheg/DENDDataLakeProject | 0 | 12763123 | import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS_SECRET_ACCESS_KEY']
def create_spark_session():
'''
Creates a Spark Session
'''
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
'''
Process song data to build the songs and artists table and write them to parquet files
Inputs:
spark: spark session
input_data: path to data files to extract the data
output_data: path where the created tables will be stored
'''
# get filepath to song data file
song_data = input_data + 'song_data/A/*/*/*.json'
# read song data file
df = spark.read.json(song_data)
# extract columns to create songs table
songs_table = df.select('song_id', 'title', 'artist_id','year', 'duration').dropDuplicates()
# write songs table to parquet files partitioned by year and artist
songs_table.write.partitionBy('year', 'artist_id').parquet((output_data + 'songs/songs.parquet'), 'overwrite')
# extract columns to create artists table
artists_table = df.select('artist_id','artist_name','artist_location','artist_latitude','artist_longitude').dropDuplicates()
# write artists table to parquet files
artists_table.write.parquet((output_data + 'artists/artists.parquet'), 'overwrite')
def process_log_data(spark, input_data, output_data):
'''
Process log data to build the user, time and songsplays tables and write them to parquet files
Inputs:
spark: spark session
input_data: path to data files to extract the data
output_data: path where the created tables will be stored
'''
# get filepath to log data file
log_data = input_data + 'log_data/*/*/*.json'
# read log data file
df = spark.read.json(log_data)
# filter by actions for song plays
actions_df = df.filter(df.page == 'NextSong').select('ts', 'userId', 'level', 'song', 'artist',
'sessionId', 'location','userAgent')
# extract columns for users table
users_table = df.select('userId', 'firstName', 'lastName','gender', 'level').dropDuplicates()
# write users table to parquet files
users.write.parquet((output_data + 'users/users.parquet'), 'overwrite')
# create timestamp column from original timestamp column
get_timestamp = udf(lambda x: str(int(int(x)/1000)))
df = actions_df.withColumn('timestamp', get_timestamp(actions_df.ts))
# create datetime column from original timestamp column
get_datetime = udf(lambda x: str(datetime.fromtimestamp(int(x) / 1000)))
df = df.withColumn('start_time', get_datetime(df.ts))
# extract columns to create time table
df = df.withColumn('hour', hour('start_time'))
df = df.withColumn('day', dayofmonth('start_time'))
df = df.withColumn('month', month('start_time'))
df = df.withColumn('year', year('start_time'))
df = df.withColumn('week', weekofyear('start_time'))
df = df.withColumn('weekday', dayofweek('start_time'))
time_table = df.select('start_time','hour','day','week','month','year','weekday').dropDuplicates()
# write time table to parquet files partitioned by year and month
time_table.write.partitionBy('year', 'month').parquet((output_data + 'time/time.parquet'), 'overwrite')
# read in song data to use for songplays table
song_df = spark.read.json(input_data + 'song_data/A/*/*/*.json')
df = df.join(song_df, song_df.title == df.song)
# extract columns from joined song and log datasets to create songplays table
songplays_table = df.select('start_time','userId','level','song_id','artist_id','ssessionId',
'location','userAgent').withColumn('songplay_id',monotonically_increasing_id())
# write songplays table to parquet files partitioned by year and month
songplays_table.write.partitionBy('year', 'month').parquet((output_data + 'songplays/songplays.parquet'),'overwrite')
def main():
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = ""
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
| 2.96875 | 3 |
SmartStats/roster/tests.py | waztaz/Basketball-Stats | 1 | 12763124 | <filename>SmartStats/roster/tests.py
from django.test import TestCase
from myapp.models import Coach, Team, Player, Scout, Game, CumulativeStats, BasketballStats
# Create your tests here.
class CoachTestCase(TestCase):
def setUp(self):
Coach.objects.create(first_name="Test" , last_name="Coach")
def test_coach_exists(self):
"Testing for properly generated coach"
#coach = Coach.objects.get()
class TeamTestCase(TestCase):
def setUp(self):
Team.objects.create(name="Cavaliers")
#def test_coach_exists(self):
class PlayerTestCase(TestCase):
def setUp(self):
Player.objects.create(team="Cavaliers", first_name="Test", last_name="Player", height=68, weight=190, position=SG)
class ScoutTestCase(TestCase):
def setUp(self):
Scout.objects.create(first_name="Test" , last_name="Scout")
#class GameTestCase(TestCase):
# def setUp(self):
# Game.objects.create(first_name="Test" , last_name="Coach")
#class CumulativeStatsTestCase(TestCase):
# def setUp(self):
#class BasketballStatTestCase(TestCase):
# def setUp(self):
| 2.359375 | 2 |
FluentPy/basic_data_sturcture/NumpyAndScipy.py | QAlexBall/Learning_Py | 2 | 12763125 | <filename>FluentPy/basic_data_sturcture/NumpyAndScipy.py
import numpy
a = numpy.arange(12)
print(a)
print(type(a))
print(a.shape)
a.shape = 3, 4
print(a)
print(a[2])
print(a[2, 1])
print(a[:, 1])
print(a.transpose())
# Numpy也可以对numpy.ndarray 中的元素进行抽象的读取,保存和其他操作
| 3.046875 | 3 |
iot_search.py | BoneLee/IOT-search-engine | 1 | 12763126 | <reponame>BoneLee/IOT-search-engine<filename>iot_search.py
import nltk
from influxdb import InfluxDBClient
from baeza_yates_intersect import BaezaYates_intersect
class IotSearcher(object):
def __init__(self, database='searcher', host="localhost", port=8086, user='root', password='<PASSWORD>'):
self._client = InfluxDBClient(host, port, user, password, database)
self._client.drop_database(database)
self._client.create_database(database)
self._english_punctuations = set([',', '.', ':', ';', '?', '(', ')', '[', ']', '!', '@', '#', '%', '$', '*'])
self._stemmer = nltk.stem.lancaster.LancasterStemmer()
self._stopwords = set(nltk.corpus.stopwords.words('english'))
self._index_table = "iot_index"
# def remove_index(self):
# self._client.drop_database(self._index_table)
def _get_token(self, sentence):
tokens = []
for word in nltk.word_tokenize(sentence.lower()):
if word not in self._english_punctuations and word not in self._stopwords:
tokens.append(self._stemmer.stem(word))
return tokens
def index(self, doc_id, doc):
values = {"doc_id": doc_id}
for field, value in doc.iteritems():
if type(value) in (int, long, float):
values[field] = value
rows_data = []
for field, value in doc.iteritems():
if type(value) is str:
tokens = self._get_token(value)
for token in tokens:
rows_data.append({
"measurement": self._index_table,
"tags": {"token": token, "field2": field},
"fields": values
})
self._client.write_points(rows_data)
def search(self, query, page=100):
result = []
field = None
if "=" in query:
field, query = query.split("=")
query = query.replace("'", "")
query = query.replace('"', "")
words = self._get_token(query)
for word in words:
if field:
response = self._client.query("select doc_id from %s WHERE token='%s' and field2='%s' ORDER BY time DESC LIMIT %d;" % (self._index_table, word, field, page))
else:
response = self._client.query("select doc_id from %s WHERE token='%s' ORDER BY time DESC LIMIT %d;" % (self._index_table, word, page))
docs = [item["doc_id"] for r in response for item in r]
if not result:
result = docs
else:
result = BaezaYates_intersect(result, docs)
print result
return result
def main():
searcher = IotSearcher()
# searcher.remove_index()
searcher.index(1, {"name": "<NAME>", "age": 23, "cats": "ao da miao and ao er miao", "title": "this is a good title"})
searcher.index(2, {"name": "<NAME>", "age": 33, "cats": "unknown", "title": "hello world, this is from jack"})
searcher.index(3, {"kaka": "how are you?"})
searcher.search("lee")
searcher.search("hello lee")
searcher.search("this is a good miao")
searcher.search("cats='da miao'")
# TODO
# searcher.search("cats='da miao' and name='bone'")
if __name__ == '__main__':
main()
| 2.8125 | 3 |
pychk/fetch_resources.py | ayushpriya10/pychk | 1 | 12763127 | import json
import requests
import sys
def fetch_jsons(json_output=False):
print('[INFO] Fetching latest resource files.')
try:
insecure_deps = json.loads(requests.get('https://raw.githubusercontent.com/pyupio/safety-db/master/data/insecure.json').content)
print('[INFO] Fetched list of Insecure Dependencies.')
insecure_deps_full = json.loads(requests.get('https://raw.githubusercontent.com/pyupio/safety-db/master/data/insecure_full.json').content)
print('[INFO] Fetched list of Security Advisories')
print('[INFO] Fetch complete.')
return insecure_deps, insecure_deps_full
except:
print('[ERR] An error occurred while fetching resouce files. Maybe you\'re not connected to the internet?')
sys.exit(1)
if __name__ == "__main__":
insecure_deps, insecure_deps_full = fetch_jsons()
for deps in insecure_deps:
ver_list = insecure_deps[deps]
for ver in ver_list:
if len(ver.split(',')) > 2:
print(ver_list, ver)
| 3.59375 | 4 |
itertools_module/itertools_module.py | pyjads/Python_module_helper | 0 | 12763128 | # %%
# line printer
def printer(info):
print('\n\n================================= {} =================================================\n\n'.format(info))
# %%
import itertools
# to get a counter starting from 0 to infinte
counter = itertools.count() # return type is the iterator and count will start from 0 and you can use it with for loop
# or next() command
# if you want to get a counter starting from certain number
start = 10
counter = itertools.count(start=start, step=-5) # step function is optional and can be negative
# itertools.count() can be used with zip function like
counter = itertools.count(start=start, step=-5)
l = [100, 200, 300, 400]
zipped = zip(counter, l)
print(list(zipped))
# %%
from itertools import zip_longest
# zip is used to map two list and shortest list length will be considered, Iteration continues until the longest
# iterable is exhausted
l1 = ['sanket', 'sanchita', 'rohan', 'devi', 'adarsh', 'vishnu', 'prashant', 'chirag']
l2 = [1, 2, 3, 4, 5, 6]
print(list(zip(l1, l2)))
print(list(zip_longest(l1, l2)))
# %%
from itertools import cycle
# cycle is used as circular linked list where we can iterate through certain value over and over again
# it can be use with list, tuple, string
counter = cycle([1, 2, 3, 4])
counter = cycle(('On', 'Off'))
counter = cycle('san') # it will repeat 's' 'a' and 'n'
print(next(counter))
print(next(counter))
print(next(counter))
print(next(counter))
print(next(counter))
print(next(counter))
# %%
from itertools import repeat
# repeat can be used to repeat single value multiple time
counter = repeat(2, times=3) # repeat 2, 3 times-- times is optional element and if not provided repeat many times
print(next(counter))
print(next(counter))
print(next(counter))
# example
squares = map(pow, range(1, 10), repeat(2, times=3))
print(list(squares))
# %%
from itertools import starmap
'''
def starmap(function, iterable):
# starmap(pow, [(2,5), (3,2), (10,3)]) --> 32 9 1000
for args in iterable:
yield function(*args)
'''
def power(x, y):
return x ** y
# the above example in repeat can be used with starmap as
squares = starmap(power, [(0, 2), (1, 3), (2, 2), (3, 3), (4, 2), (9, 3)])
print(list(squares))
# how *args work
# for k in [(0, 2), (1, 3), (2, 2), (3, 3), (4, 2), (9, 3)]:
# print(*k)
# %%
from itertools import combinations, permutations
# used for getting all the possible combination
letters = ['a', 'b', 'c', 'd', 'e']
number = [0, 1, 2, 3]
names = ['sanket', 'raj']
result = combinations(letters, 3) # produce the combination where (a,b) and (b,a) is same so only one will be mentioned
print(list(result))
# if order matters the use permutations
result = permutations(letters, 3)
print(list(result))
# for example if we want to craete 4 digit code using number where combiantion can also include same number multiple
# time
from itertools import product
# computes the cartesion product
# product(iterable, repeat=n) this is permutation with replacement
# for solution see https://www.hackerrank.com/challenges/itertools-product/problem
result = product(number, repeat=3) # arrange numbers in group of 3
print(list(result))
# in product function we have to provide repeat argumnt, the similar function that can be used is
# combinations_with_replacement
from itertools import combinations_with_replacement
result = combinations_with_replacement(number, 4) # arrange number in a group of 4 where each number can be used
# multiple times
print(list(result))
# for permutations with replacement use below link for solution
# https://stackoverflow.com/questions/46255220/how-to-produce-permutations-with-replacement-in-python
'''
import itertools
choices = [-1, 1]
n = 3
l = [choices] * n
list(itertools.product(*l))
'''
# %%
letters = ['a', 'b', 'c', 'd', 'e']
number = [0, 1, 2, 3]
names = ['sanket', 'raj']
# if you want to iter through all letters, number, names you first have to craete a list add all those to a single list
# and then iter through new list, this can be solved using chain
from itertools import chain
iterator = chain(letters, number, names)
print(iterator) # object of iterator
# print(list(iterator)) # print the complete list
# %%
# islice is used to slice the iterable object which is memory efficient
letters = ['a', 'b', 'c', 'd', 'e']
number = [0, 1, 2, 3]
names = ['sanket', 'raj']
import itertools
from itertools import islice
result = islice(letters, 1, 3)
print(list(result))
result = itertools.islice(range(10), 1, 5) # islice take (iterable, start, stop) or (iterable, stop)
# islice is used when , suppose you have a very long iterator which is hard to load into memory and then slicing, it can
# be costly, lets, suppose we have a file
import os
with open('collections_module/islice_text', 'r') as file:
# file object is an iterator
header = islice(file, 3)
for line in header:
print(line, end='')
#
# %%
import numpy as np
np.random.seed(42) # seeding is done for pseudo number generator
selectors = np.random.randint(1, 10000, 50)
'''
letters = np.array(['a', 'b', 'c', 'd', 'e'])
print(selectors)
print(letters[selectors])
'''
# filterfalse work as same as filter instead of returning true value it returns false values
from itertools import filterfalse
result = filterfalse(lambda x: x > 5000, selectors) # return an iterator
print(list(result))
# %%
# dropwhile can be used when you want to filter until the condition is met for the first time
# similar to dropwhile is takewhile but the opposite of the former
import numpy as np
np.random.seed(42)
selectors = np.random.randint(1, 10000, 50)
print(selectors)
from itertools import dropwhile
result = dropwhile(lambda x: x > 500, selectors)
print(len(list(result)))
# %%
# accumulate -- it is used to work on the iterable and is used to perform cumulative operations
import numpy as np
np.random.seed(42)
selectors = np.random.randint(1, 10, 10)
print(selectors)
from itertools import accumulate
import operator
result = accumulate(selectors, operator.mul)
print(list(result))
printer('string work')
# working with the string
selectors = np.random.choice(['a', 'b', 'c', 'f', 'g'], size=10)
print(selectors)
result = accumulate(selectors, lambda x, y: x + y)
print(list(result))
#%%
# groupby work as same as pandas.groupby
people = [
{
'name': '<NAME>',
'city': 'Gotham',
'state': 'NY'
},
{
'name': '<NAME>',
'city': 'Kings Landing',
'state': 'NY'
},
{
'name': '<NAME>',
'city': 'Boulder',
'state': 'CO'
},
{
'name': '<NAME>',
'city': 'Denver',
'state': 'CO'
},
{
'name': '<NAME>',
'city': 'Hinton',
'state': 'WV'
},
{
'name': '<NAME>',
'city': 'Rand',
'state': 'WV'
},
{
'name': '<NAME>',
'city': 'Asheville',
'state': 'NC'
},
{
'name': '<NAME>',
'city': 'Charlotte',
'state': 'NC'
},
{
'name': '<NAME>',
'city': 'Faketown',
'state': 'NC'
}
]
def get_state(person):
return person['state']
# for groupby to work efficiently, the key has to be sorted else it will not work as expected
from itertools import groupby
result = groupby(people, get_state)
for key, group in result:
# here group is an iterator
#
for g in group:
print(key,' ', g)
#%%
# in order to create multiple copies of iterator you can use tee
import numpy as np
np.random.seed(42) # seeding is done for pseudo number generator
selectors = np.random.randint(1, 10000, 50)
from itertools import filterfalse
from itertools import tee
result = filterfalse(lambda x: x < 500, selectors)
copy1, copy2 = tee(result) # don't use the original result iterator
print(list(copy1))
print(list(copy2))
| 4.0625 | 4 |
empower_core/accountsmanager/accountshandler.py | 5g-empower/empower-core | 3 | 12763129 | <reponame>5g-empower/empower-core
#!/usr/bin/env python3
#
# Copyright (c) 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Exposes a RESTful interface ."""
import empower_core.apimanager.apimanager as apimanager
# pylint: disable=W0223
class AccountsHandler(apimanager.APIHandler):
"""All the accounts defined in the controller."""
URLS = [r"/api/v1/accounts/?",
r"/api/v1/accounts/([a-zA-Z0-9:.]*)/?"]
@apimanager.validate(max_args=1)
def get(self, username=None):
"""List the accounts.
Args:
[0]: the username
Example URLs:
GET /api/v1/accounts
[
{
"email": "<EMAIL>",
"name": "admin",
"username": "root"
},
{
"email": "<EMAIL>",
"name": "Foo",
"username": "foo"
},
{
"email": "<EMAIL>",
"name": "Bar",
"username": "bar"
}
]
GET /api/v1/accounts/root
{
"email": "<EMAIL>",
"name": "admin",
"username": "root"
}
"""
return self.service.accounts[username] \
if username else self.service.accounts
@apimanager.validate(returncode=201)
def post(self, **kwargs):
"""Create a new account.
Request:
version: protocol version (1.0)
username: username (mandatory)
password: password (mandatory)
name: name (mandatory)
email: email (mandatory)
Example URLs:
POST /api/v1/accounts
{
"version" : 1.0,
"username" : "foo",
"password" : "<PASSWORD>",
"name" : "foo",
"email" : "<EMAIL>"
}
"""
self.service.create(username=kwargs['username'],
password=kwargs['password'],
name=kwargs['name'],
email=kwargs['email'])
@apimanager.validate(returncode=204, min_args=1, max_args=1)
def put(self, *args, **kwargs):
"""Update an account.
Args:
[0]: the username
Request:
version: protocol version (1.0)
name: name (mandatory)
email: email (mandatory)
password: password (optional)
new_password: <PASSWORD> (optional)
new_password_confirm: <PASSWORD>_password_confirm (optional)
Example URLs:
PUT /api/v1/accounts/test
{
"version" : 1.0,
"name" : "foo",
"email" : "<EMAIL>",
"new_password" : "<PASSWORD>",
"new_password_confirm" : "<PASSWORD>",
}
PUT /api/v1/accounts/test
{
"version" : 1.0,
"name" : "foo",
"email" : "<EMAIL>",
}
"""
username = args[0]
password = <PASSWORD>
name = kwargs['name'] if 'name' in kwargs else None
email = kwargs['email'] if 'email' in kwargs else None
if 'new_password' in kwargs and 'new_password_confirm' in kwargs:
if kwargs['new_password'] != kwargs['new_password_confirm']:
raise ValueError("Passwords do not match")
password = <PASSWORD>['<PASSWORD>']
self.service.update(username=username, password=password, name=name,
email=email)
@apimanager.validate(returncode=204, min_args=1, max_args=1)
def delete(self, username):
"""Delete an account.
Args:
[0]: the username
Example URLs:
DELETE /api/v1/accounts/foo
"""
self.service.remove(username)
| 2.078125 | 2 |
examples/get_started.py | paulgoetze/whattime | 0 | 12763130 | <reponame>paulgoetze/whattime
from datetime import datetime
from whattime import whattime, Hemisphere
# Asking for all types of time descriptors for a e.g. a monday afternoon in summer time on the southern hemisphere:
now = datetime.now()
info = whattime(now, Hemisphere.SOUTHERN)
print(info.types)
# {<TimeType.WEEKDAY: 'weekday'>, <TimeType.MONDAY: 'monday'>, ...}
print(info.is_weekday)
# True
print(info.is_weekend)
# False
print(info.is_monday)
# True
print(info.is_tuesday)
# True
print(info.is_afternoon)
# True
print(info.is_morning)
# False
print(info.is_summer)
# True
print(info.is_winter)
# False
| 3.71875 | 4 |
scripts/generate-sprites.py | alexfmpe/beeraffe | 41 | 12763131 | #!/usr/bin/python
import subprocess
import sys
import os
# This is in the `unicode-character-database` package on Arch Linux.
UNICODE_DATABASE = '/usr/share/unicode-character-database/UnicodeData.txt'
WORDS = 'data/words.txt'
NOTO_EMOJI_DIR = '/home/jasper/Stash/noto-emoji'
def load_unicode_database():
data = {}
with open(UNICODE_DATABASE, 'rb') as db:
for line in db:
[code, name, _] = line.decode('utf-8').split(';', 2)
name = name.lower().replace(' ', '-')
code = code.lower()
data[name] = code
return data
def load_words():
words = {}
with open(WORDS, 'r') as f:
for line in f:
pieces = line.split(':')
if len(pieces) <= 1:
words[pieces[0].strip()] = pieces[0].strip()
else:
words[pieces[0].strip()] = pieces[1].strip()
return words
if __name__ == "__main__":
unicode_database = load_unicode_database()
words = load_words()
montage_files = []
for word, canonical in words.items():
code = unicode_database[canonical]
print('{} -> {} -> {}'.format(word, canonical, code), file=sys.stderr)
png_128 = "{}/png/128/emoji_u{}.png".format(NOTO_EMOJI_DIR, code)
if not os.path.isfile(png_128):
raise Exception("{} does not exist".format(png_128))
else:
montage_files += [png_128]
print('Invoking montage...', file=sys.stderr)
tmp_png = 'build/sprites-tmp-01.png'
montage_command = \
['montage', '-geometry', '32x32+0+0', '-background', 'none'] + \
montage_files + \
['png32:{}'.format(tmp_png)]
subprocess.run(montage_command, check=True)
print('Fixing alpha...', file=sys.stderr)
subprocess.run([
'convert', '-channel', 'A', '-threshold', '50%', tmp_png,
'png32:build/sprites.png'
], check=True)
| 2.65625 | 3 |
src/vision/utils.py | ncl-ROVers/surface-2019-20 | 3 | 12763132 | """
Computer vision Utils
=====================
Standard utils module storing common to the package classes, functions, constants, and other objects.
"""
| 1.054688 | 1 |
app/main.py | scraiber/kubectl-api | 0 | 12763133 | from fastapi import FastAPI
from pydantic import BaseModel
from typing import List
import subprocess
import random
import string
import os
app = FastAPI()
class Item(BaseModel):
kube_str: str
yaml_file: List[str]
class Response(BaseModel):
Output: str
Error: str
@app.get("/general/{item}", response_model=Response)
async def general_kubectl(item: str):
if item.startswith("kubectl"):
input = []
else:
input = ["kubectl"]
input.extend(item.split(" "))
result = subprocess.run(input, capture_output=True, text=True)
return {"Output": result.stdout, "Error": result.stderr}
@app.get("/general_json", response_model=Response)
async def general_kubectl_json(request: Item):
if request.kube_str.count(".yaml") + request.kube_str.count(".yml") >=2:
return {"Output": "", "Error": "There should be only one YAML in kube_str"}
#Generate YAML file
file_name = ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=30))+".yaml"
file_path_name = "/cache/"+ file_name
with open(file_path_name, 'w') as f:
for item in request.yaml_file:
f.write("%s\n" % item)
#Create Kubectl command line string
if request.kube_str.startswith("kubectl"):
input = []
else:
input = ["kubectl"]
splitted_kube_str = [item if ("yaml" not in item and "yml" not in item) else file_path_name for item in request.kube_str.split(" ")]
input.extend(splitted_kube_str)
#Execute and remove YAML file
result = subprocess.run(input, capture_output=True, text=True)
if os.path.exists(file_path_name):
os.remove(file_path_name)
return {"Output": result.stdout, "Error": result.stderr} | 2.765625 | 3 |
diction.py | beatles213/py_emulate | 2 | 12763134 | <reponame>beatles213/py_emulate
'''字典 key - value'''
alien_0={'color':'green','points':5}
print(alien_0['color'])
print(alien_0['points'])
#添加
alien_0['x_position']=0
alien_0['y_position']=25
print(alien_0)
#修改
alien_0['color']='yellow'
print(alien_0)
#删除 删除的键—值对永远消失了。
del alien_0['points']
print(alien_0)
#由类似对象组成 的字典
student={
'name':'Michael',
'goods':'backetball',
'age':30
}
print(student['name']+' age is '+str(student['age']))
#遍历字典 for k, v in user_0.items()
for key,value in student.items():
print("\nKey:"+key)
if str(value).isdigit():
print("Value:"+str(value))
else:
print("Value:"+str(value.title()))
for k,v in student.items():
print(k)
if str(v).isdigit():
print(str(v))
else:
print(v)
#遍历所有的key
for k in student.keys():
print(k)
#嵌套
alien_0={'color':'green','points':5}
alien_1={'color':'yellow','points':10}
alien_2={'color':'red','points':15}
aliens=[alien_0,alien_1,alien_2]
for alien in aliens:
print(alien)
aliens=[]
for alien_number in range(30):
new_alien={'color':'green','points':5,'speed':'slow'}
aliens.append(new_alien)
for alien in aliens[:5]:
print(alien)
print(alien['color'])
print("...")
print(len("Total number of alien:"+str(len(aliens))))
#在字典里存储列表
pizza={
'crust':'thick',
'toppings':['mushrooms','extra cheese']
}
print(pizza['crust'])
print(pizza['toppings'])
for topping in pizza['toppings']:
print(topping)
favorite_languages={
'jen':['python','ruby'],
'edward':['ruby','go'],
'phil':['python','haskell'],
'serah':['C']
}
for key,value in favorite_languages.items():
print("\n"+key.title()+"'s favorite lanugage are:")
for language in value:
print("\t"+language.title())
#在字典中存储字典
users={
'aeinstein':{
'first':'albert',
'last':'einstein',
'location':'princeton',
},
'mcurie':{
'first':'marie',
'last':'curie',
'location':'paris',
},
}
for key,value in users.items():
print("\nUsername:"+key)
full_name=value['first']+" "+value['last']
location=value['location']
print("\tFull_name:"+full_name.title())
print("\tLocation:"+location.title())
| 3.6875 | 4 |
powersimdata/data_access/tests/sql/test_execute_table.py | c-voegele/PowerSimData | 27 | 12763135 | from collections import OrderedDict
import pytest
from powersimdata.data_access.execute_table import ExecuteTable
from powersimdata.data_access.sql_store import SqlError
row_id = 9000
def _get_test_row():
global row_id
row_id += 1
return OrderedDict([("id", row_id)])
class NoEffectSqlStore(ExecuteTable):
def __exit__(self, exc_type, exc_value, traceback):
self.conn.rollback()
super().__exit__(exc_type, exc_value, traceback)
class RaiseErrorSqlStore(ExecuteTable):
def add_entry(self, scenario_info):
raise Exception("Error while executing sql")
@pytest.fixture
def store():
with NoEffectSqlStore() as store:
yield store
@pytest.mark.integration
@pytest.mark.db
def test_err_handle():
with pytest.raises(SqlError):
with RaiseErrorSqlStore() as store:
store.add_entry(None)
@pytest.mark.integration
@pytest.mark.db
def test_select_no_limit(store):
store.add_entry(_get_test_row())
store.add_entry(_get_test_row())
result = store.get_execute_table()
assert result.shape[0] == 2
@pytest.mark.integration
@pytest.mark.db
def test_select_with_limit(store):
n_rows = 6
limit = 3
for i in range(n_rows):
store.add_entry(_get_test_row())
result = store.get_execute_table(limit)
assert result.shape[0] == limit
@pytest.mark.integration
@pytest.mark.db
def test_add_entry(store):
info = _get_test_row()
store.add_entry(info)
status = store.get_status(info["id"])
assert status.loc[0, "status"] == "created"
@pytest.mark.integration
@pytest.mark.db
def test_update_entry(store):
info = _get_test_row()
store.add_entry(info)
sid = info["id"]
store.set_status(sid, "testing")
status = store.get_status(sid)
assert status.loc[0, "status"] == "testing"
@pytest.mark.integration
@pytest.mark.db
def test_delete_entry(store):
info = _get_test_row()
sid = info["id"]
store.add_entry(info)
store.delete_entry(sid)
status = store.get_status(sid)
assert status.shape == (0, 0)
| 2.203125 | 2 |
tests/utils.py | kbakba/github3.py | 0 | 12763136 | import requests
import github3
import expecter
import json
import sys
from mock import patch
from io import BytesIO
from unittest import TestCase
from requests.structures import CaseInsensitiveDict
is_py3 = sys.version_info > (3, 0)
def load(name):
with path(name) as f:
j = json.load(f)
return j
def path(name, mode='r'):
return open('tests/json/{0}'.format(name), mode)
class CustomExpecter(expecter.expect):
def is_not_None(self):
assert self._actual is not None, (
'Expected anything but None but got it.'
)
def is_None(self):
assert self._actual is None, (
'Expected None but got %s' % repr(self._actual) # nopep8
)
def is_True(self):
assert self._actual is True, (
'Expected True but got %s' % repr(self._actual) # nopep8
)
def is_False(self):
assert self._actual is False, (
'Expected False but got %s' % repr(self._actual) # nopep8
)
def is_in(self, iterable):
assert self._actual in iterable, (
"Expected %s in %s but it wasn't" % (
repr(self._actual), repr(iterable)
)
)
@classmethod
def githuberror(cls):
return cls.raises(github3.GitHubError)
expect = CustomExpecter
class BaseCase(TestCase):
github_url = 'https://api.github.com/'
def setUp(self):
self.g = github3.GitHub()
self.args = ()
self.conf = {'allow_redirects': True}
self.mock = patch.object(requests.sessions.Session, 'request')
self.request = self.mock.start()
def tearDown(self):
self.mock.stop()
def login(self):
self.g.login('user', 'password')
def mock_assertions(self):
assert self.request.called is True
conf = self.conf.copy()
args, kwargs = self.request.call_args
expect(self.args) == args
if 'data' in self.conf:
if isinstance(self.conf['data'], dict):
for k, v in list(self.conf['data'].items()):
s = json.dumps({k: v})[1:-1]
expect(s).is_in(kwargs['data'])
else:
expect(self.conf['data']) == kwargs['data']
del self.conf['data']
for k in self.conf:
expect(k).is_in(kwargs)
expect(self.conf[k]) == kwargs[k]
self.request.reset_mock()
self.conf = conf
def response(self, path_name, status_code=200, enc='utf-8',
_iter=False, **headers):
r = requests.Response()
r.status_code = status_code
r.encoding = enc
if path_name:
with path(path_name) as f:
content = f.read().strip()
if _iter:
content = '[{0}]'.format(content)
r.raw = RequestsBytesIO(content.encode())
elif is_py3:
r.raw = RequestsBytesIO(content.encode())
else:
r.raw = RequestsBytesIO(content)
else:
r.raw = RequestsBytesIO()
if headers:
r.headers = CaseInsensitiveDict(headers)
self.request.return_value = r
def delete(self, url):
self.args = ('DELETE', url)
self.conf = {}
def get(self, url):
self.args = ('GET', url)
def patch(self, url):
self.args = ('PATCH', url)
def post(self, url):
self.args = ('POST', url)
def put(self, url):
self.args = ('PUT', url)
def not_called(self):
expect(self.request.called).is_False()
class RequestsBytesIO(BytesIO):
def read(self, chunk_size, *args, **kwargs):
return super(RequestsBytesIO, self).read(chunk_size)
| 2.40625 | 2 |
ephemere/archive.py | vandalt/ephemere | 0 | 12763137 | from pathlib import Path
from typing import List, Optional, Union
from exofile.archive import ExoFile
from pandas.core.frame import DataFrame
from ephemere import constants as const
def load_archive(
query: bool = True,
exofile_param_file: Optional[Union[str, Path]] = None,
use_alt_exofile: bool = False,
keep_controv: bool = False,
warn_units: bool = False,
warn_local_file: bool = False,
convert_omega: bool = True,
return_pandas: bool = True,
**kwargs
) -> DataFrame:
# TODO: Either merge the warn_units in exofile or use warning filters here instead
# Masterfile PR: https://github.com/AntoineDarveau/exofile/pull/26
tbl = ExoFile.load(
query=query,
param=exofile_param_file,
use_alt_file=use_alt_exofile,
warn_units=warn_units,
warn_local_file=warn_local_file,
**kwargs,
)
if not keep_controv:
tbl = tbl[tbl[const.CONTROV_FLAG] == 0]
# All our RV calculations expect omega in radians, so convert now
if convert_omega and tbl[const.OMEGA_KEY].unit != "rad":
tbl[const.OMEGA_KEY] = tbl[const.OMEGA_KEY].to("rad")
tbl[const.OMEGA_KEY + "err1"] = tbl[const.OMEGA_KEY + "err1"].to("rad")
tbl[const.OMEGA_KEY + "err2"] = tbl[const.OMEGA_KEY + "err2"].to("rad")
return tbl.to_pandas() if return_pandas else tbl
def get_archive_names(names: List[str]) -> List[str]:
new_objs = names.copy()
# GL/GJ stars are all "GJ " in NASA archive (and in exofile)
def _replace_gj(oname: str):
# Version with spaces before otherwise the space-free version will match
gj_alts = ["GJ", "GL", "Gl"]
gj_alts_with_space = [gja + " " for gja in gj_alts]
gj_alts = tuple(gj_alts_with_space + gj_alts)
if oname.startswith(gj_alts):
for gja in gj_alts:
if oname.startswith(gja):
return oname.replace(gja, "GJ ")
else:
return oname
new_objs = [_replace_gj(o) for o in new_objs]
# Handle binary stars
def _format_binary(oname: str):
if oname.endswith((" A", " B")):
return oname
elif oname.endswith(("A", "B")):
return oname[:-1] + " " + oname[-1]
else:
return oname
new_objs = [_format_binary(o) for o in new_objs]
return new_objs
| 2.515625 | 3 |
tests/gsl.pkg/vector_set.py | PyreFramework/pyre | 0 | 12763138 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME>. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
"""
Exercise setting and getting individual vector elements
"""
def test():
# package access
import gsl
# make a vector
v = gsl.vector(shape=100)
# fill with a test pattern
for i in range(len(v)): v[i] = i
# verify it happened
assert v[50] == 50
# access through reflection
v[-99] == v[1]
# all done
return v
# main
if __name__ == "__main__":
test()
# end of file
| 3.046875 | 3 |
mani_skill_learn/utils/torch/misc.py | art-e-fact/ManiSkill-Learn | 39 | 12763139 | from functools import wraps
import numpy as np
import torch
from mani_skill_learn.utils.data import split_in_dict_array, concat_list_of_array
def disable_gradients(network):
for param in network.parameters():
param.requires_grad = False
def worker_init_fn(worker_id):
"""The function is designed for pytorch multi-process dataloader.
Note that we use the pytorch random generator to generate a base_seed. Please try to be consistent.
References:
https://pytorch.org/docs/stable/notes/faq.html#dataloader-workers-random-seed
"""
base_seed = torch.IntTensor(1).random_().item()
# print(worker_id, base_seed)
np.random.seed(base_seed + worker_id)
def no_grad(f):
wraps(f)
def wrapper(*args, **kwargs):
with torch.no_grad():
return f(*args, **kwargs)
return wrapper
def run_with_mini_batch(function, data, batch_size):
"""
Run a pytorch function with mini-batch when the batch size of dat is very large.
:param function: the function
:param data: the input data which should be in dict array structure
:param batch_size: the batch_size of the mini-batch
:return: all the outputs.
"""
data_list = split_in_dict_array(data, batch_size, axis=0)
ans = []
for data_i in data_list:
ans_i = function(data_i)
ans.append(ans_i)
return concat_list_of_array(ans, axis=0)
| 2.671875 | 3 |
Deep_Crossing/modules.py | jingxiufenghua/rec-model | 1,323 | 12763140 | <filename>Deep_Crossing/modules.py
"""
Created on May 18, 2021
modules of Deep&Crossing: Residual units
@author: <NAME>(<EMAIL>)
"""
import tensorflow as tf
from tensorflow.keras.layers import Dense, ReLU, Layer
class Residual_Units(Layer):
"""
Residual Units
"""
def __init__(self, hidden_unit, dim_stack):
"""
:param hidden_unit: A list. Neural network hidden units.
:param dim_stack: A scalar. The dimension of inputs unit.
"""
super(Residual_Units, self).__init__()
self.layer1 = Dense(units=hidden_unit, activation='relu')
self.layer2 = Dense(units=dim_stack, activation=None)
self.relu = ReLU()
def call(self, inputs, **kwargs):
x = inputs
x = self.layer1(x)
x = self.layer2(x)
outputs = self.relu(x + inputs)
return outputs | 2.8125 | 3 |
source/cloudsatlhr/acquisition_stack.py | JakeHendy/cloudsatlhr | 0 | 12763141 | from aws_cdk import (core )
from .ingestion_stack import IngestionStack
from .data_store_stack import DataStoreStack
class AcquisitionStack(core.Stage):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.datastore_stack = DataStoreStack(self, "DataStoreStack")
self.ingestion_stack = IngestionStack(self, "IngestionStack", target_table=self.datastore_stack.table)
| 2.15625 | 2 |
melange/tests/test_propagators.py | dominicrufa/melange | 1 | 12763142 | """
test melange.propagators
"""
from jax import random
from jax import vmap
import jax.numpy as jnp
from melange.propagators import *
from melange.tests.utils import checker_function, get_nondefault_potential_initializer
import tqdm
import numpy as np
from jax.config import config; config.update("jax_enable_x64", True)
def test_1D_ULA_propagator(key = random.PRNGKey(0), num_runs=1000):
"""
take a batch of 1000 particles distributed according to N(0,2), run dynamics with ULA for 1000 steps with dt=0.01 on a potential whose invariant is N(0,2)
and assert that the mean and variance is unchanged within a tolerance
"""
key, genkey = random.split(key)
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
x_ula_starter = random.multivariate_normal(key = genkey, mean = mu, cov = cov, shape=[num_runs])
dt=1e-2
batch_ula_move = vmap(ULA_move, in_axes=(0, None, None, 0, None))
potential_parameter = jnp.array([0.])
for i in tqdm.trange(100):
key, ula_keygen = random.split(key, 2)
ula_keys = random.split(ula_keygen, num_runs)
x_ULA = batch_ula_move(x_ula_starter, potential, dt, ula_keys, potential_parameter)
x_ula_starter = x_ULA
ula_mean, ula_std = x_ula_starter.mean(), x_ula_starter.std()
assert checker_function(ula_mean,0.2)
assert checker_function(ula_std - jnp.sqrt(2), 0.2)
def test_1D_driven_propagator(key = random.PRNGKey(0), num_runs=1000):
"""
take a batch of 1000 particles distributed according to N(0,2), run dynamics with driven langevin algorithm for 1000 steps with dt=0.01 on a potential whose invariant is N(0,2)
and assert that the mean and variance is unchanged within a tolerance.
"""
key, genkey = random.split(key)
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
x_driven_starter = random.multivariate_normal(key = genkey, mean = mu, cov = cov, shape=[num_runs])
dt=1e-2
#make dummy A and b functions
def A(x, a_param): return jnp.zeros((x.shape[0], x.shape[0]))
def b(x, b_param): return jnp.zeros(x.shape[0])
batch_driver_move = vmap(driven_Langevin_move, in_axes=(0,None,None,None,None,None,None,None,0))
potential_parameter = jnp.array([0.])
for i in tqdm.trange(100):
key, drive_keygen = random.split(key, 2)
drive_keys = random.split(drive_keygen, num_runs)
x_drive = batch_driver_move(x_driven_starter,
potential,
dt,
A,
b,
potential_parameter,
jnp.array([0.]),
jnp.array([0.]),
drive_keys)
x_driven_starter = x_drive
driven_mean, driven_std = x_driven_starter.mean(), x_driven_starter.std()
assert checker_function(driven_mean,0.2)
assert checker_function(driven_std - jnp.sqrt(2), 0.2)
def test_1d_kernel_consistency(key = random.PRNGKey(0)):
"""
with a 'dummy' driven forward kernel, assert that the log forward probability
is equal to that of the ULA propagator in one dimension
"""
from melange.propagators import generate_Euler_Maruyama_propagators, generate_driven_Langevin_propagators, Euler_Maruyama_log_proposal_ratio, driven_Langevin_log_proposal_ratio
dt=0.1
forward_potential_parameters= jnp.array([0.])
backward_potential_parameters = jnp.array([0.])
#make dummy A and b functions
def A(x, a_param): return jnp.zeros((x.shape[0], x.shape[0]))
def b(x, b_param): return jnp.zeros(x.shape[0])
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
xs = random.multivariate_normal(key = key, mean = jnp.array([1.]), cov = jnp.array([[1.]]), shape=[2])
EM_propagator, EM_kernel = generate_Euler_Maruyama_propagators()
driven_propagator, driven_kernel = generate_driven_Langevin_propagators()
EM_logp_ratio = Euler_Maruyama_log_proposal_ratio(xs[0], xs[1], potential, forward_potential_parameters, dt, potential, backward_potential_parameters, dt)
driven_logp_ratio = driven_Langevin_log_proposal_ratio(xs[0],
xs[1],
potential,
potential,
dt,
dt,
A,
b,
forward_potential_parameters,
backward_potential_parameters,
A_parameter = forward_potential_parameters,
b_parameter = forward_potential_parameters)
assert np.isclose(EM_logp_ratio, driven_logp_ratio)
def test_forward_ULA_driven_samplers(key = random.PRNGKey(0)):
"""
given a randomization key, execute `forward_ULA_sampler` and `forward_driven_diffusion_sampler`
with a time-independent potential that has the same mean and variance as the distribution of (5000) initial
samples. We only assert that the statistics of the post-propagated samples obey the same statistics (within a tolerance).
"""
from melange.propagators import forward_ULA_sampler, forward_driven_diffusion_sampler
dt=0.1
potential_parameters= jnp.zeros((100,1))
A_parameters = potential_parameters
b_parameters = potential_parameters
#make dummy A and b functions
def A(x, a_param): return jnp.zeros((x.shape[0], x.shape[0]))
def b(x, b_param): return jnp.zeros(x.shape[0])
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
xs = random.multivariate_normal(key = key, mean = mu, cov = cov, shape=[5000])
og_mean, og_variance = xs.mean(), xs.var()
#print(og_mean, og_variance)
ULA_trajs = forward_ULA_sampler(xs, potential, jnp.array([dt]*len(potential_parameters)), key, potential_parameters)
#print(ULA_trajs[-1].mean(), ULA_trajs[-1].var())
driven_trajs = forward_driven_diffusion_sampler(xs, potential, dt, key, A, b, potential_parameters, A_parameters, b_parameters)
#print(driven_trajs[-1].mean(), driven_trajs[-1].var())
mean_tolerance = 0.2
assert checker_function(ULA_trajs[-1].mean(), mean_tolerance)
assert checker_function(driven_trajs[-1].mean(), mean_tolerance)
variance_tolerance = 0.2
assert checker_function(ULA_trajs[-1].var() - 2., variance_tolerance)
assert checker_function(driven_trajs[-1].var()-2., variance_tolerance)
| 2.21875 | 2 |
lib/TmpGnmAnnApi/TmpGnmAnnApiClient.py | rsutormin/TmpGnmAnnApi | 0 | 12763143 | # -*- coding: utf-8 -*-
############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
############################################################
from __future__ import print_function
# the following is a hack to get the baseclient to import whether we're in a
# package or not. This makes pep8 unhappy hence the annotations.
try:
# baseclient and this client are in a package
from .baseclient import BaseClient as _BaseClient # @UnusedImport
except:
# no they aren't
from baseclient import BaseClient as _BaseClient # @Reimport
class TmpGnmAnnApi(object):
def __init__(
self, url=None, timeout=30 * 60, user_id=None,
password=<PASSWORD>, token=<PASSWORD>, ignore_authrc=False,
trust_all_ssl_certificates=False,
auth_svc='https://kbase.us/services/authorization/Sessions/Login'):
if url is None:
raise ValueError('A url is required')
self._service_ver = None
self._client = _BaseClient(
url, timeout=timeout, user_id=user_id, password=password,
token=<PASSWORD>, ignore_authrc=ignore_authrc,
trust_all_ssl_certificates=trust_all_ssl_certificates,
auth_svc=auth_svc)
def get_combined_data(self, params, context=None):
"""
Retrieve any part of GenomeAnnotation. Please don't use this method in full mode (with all parts included) in cases
of large eukaryotic datasets. It may lead to out-of-memory errors.
:param params: instance of type "GetCombinedDataParams" (* Retrieve
any part of GenomeAnnotation. * Any of exclude_genes,
include_mrnas and exclude_cdss flags override values listed in
include_features_by_type.) -> structure: parameter "ref" of type
"ObjectReference", parameter "exclude_genes" of type "boolean" (A
boolean - 0 for false, 1 for true. @range (0, 1)), parameter
"include_mrnas" of type "boolean" (A boolean - 0 for false, 1 for
true. @range (0, 1)), parameter "exclude_cdss" of type "boolean"
(A boolean - 0 for false, 1 for true. @range (0, 1)), parameter
"include_features_by_type" of list of String, parameter
"exclude_protein_by_cds_id" of type "boolean" (A boolean - 0 for
false, 1 for true. @range (0, 1)), parameter
"include_mrna_ids_by_gene_id" of type "boolean" (A boolean - 0 for
false, 1 for true. @range (0, 1)), parameter
"exclude_cds_ids_by_gene_id" of type "boolean" (A boolean - 0 for
false, 1 for true. @range (0, 1)), parameter
"include_cds_id_by_mrna_id" of type "boolean" (A boolean - 0 for
false, 1 for true. @range (0, 1)), parameter
"include_exons_by_mrna_id" of type "boolean" (A boolean - 0 for
false, 1 for true. @range (0, 1)), parameter
"include_utr_by_utr_type_by_mrna_id" of type "boolean" (A boolean
- 0 for false, 1 for true. @range (0, 1)), parameter
"exclude_summary" of type "boolean" (A boolean - 0 for false, 1
for true. @range (0, 1))
:returns: instance of type "GenomeAnnotation_data" (gene_id is a
feature id of a gene feature. mrna_id is a feature id of a mrna
feature. cds_id is a feature id of a cds feature.) -> structure:
parameter "gene_type" of String, parameter "mrna_type" of String,
parameter "cds_type" of String, parameter "feature_types" of list
of String, parameter "feature_by_id_by_type" of mapping from
String to mapping from String to type "Feature_data" -> structure:
parameter "feature_id" of String, parameter "feature_type" of
String, parameter "feature_function" of String, parameter
"feature_aliases" of mapping from String to list of String,
parameter "feature_dna_sequence_length" of Long, parameter
"feature_dna_sequence" of String, parameter "feature_md5" of
String, parameter "feature_locations" of list of type "Region" ->
structure: parameter "contig_id" of String, parameter "strand" of
String, parameter "start" of Long, parameter "length" of Long,
parameter "feature_publications" of list of String, parameter
"feature_quality_warnings" of list of String, parameter
"feature_quality_score" of list of String, parameter
"feature_notes" of String, parameter "feature_inference" of
String, parameter "protein_by_cds_id" of mapping from String to
type "Protein_data" -> structure: parameter "protein_id" of
String, parameter "protein_amino_acid_sequence" of String,
parameter "protein_function" of String, parameter
"protein_aliases" of list of String, parameter "protein_md5" of
String, parameter "protein_domain_locations" of list of String,
parameter "mrna_ids_by_gene_id" of mapping from String to list of
String, parameter "cds_ids_by_gene_id" of mapping from String to
list of String, parameter "cds_id_by_mrna_id" of mapping from
String to String, parameter "exons_by_mrna_id" of mapping from
String to list of type "Exon_data" -> structure: parameter
"exon_location" of type "Region" -> structure: parameter
"contig_id" of String, parameter "strand" of String, parameter
"start" of Long, parameter "length" of Long, parameter
"exon_dna_sequence" of String, parameter "exon_ordinal" of Long,
parameter "utr_by_utr_type_by_mrna_id" of mapping from String to
mapping from String to type "UTR_data" -> structure: parameter
"utr_locations" of list of type "Region" -> structure: parameter
"contig_id" of String, parameter "strand" of String, parameter
"start" of Long, parameter "length" of Long, parameter
"utr_dna_sequence" of String, parameter "summary" of type
"Summary_data" -> structure: parameter "scientific_name" of
String, parameter "taxonomy_id" of Long, parameter "kingdom" of
String, parameter "scientific_lineage" of list of String,
parameter "genetic_code" of Long, parameter "organism_aliases" of
list of String, parameter "assembly_source" of String, parameter
"assembly_source_id" of String, parameter "assembly_source_date"
of String, parameter "gc_content" of Double, parameter "dna_size"
of Long, parameter "num_contigs" of Long, parameter "contig_ids"
of list of String, parameter "external_source" of String,
parameter "external_source_date" of String, parameter "release" of
String, parameter "original_source_filename" of String, parameter
"feature_type_counts" of mapping from String to Long
"""
return self._client.call_method(
'TmpGnmAnnApi.get_combined_data',
[params], self._service_ver, context)
def status(self, context=None):
return self._client.call_method('TmpGnmAnnApi.status',
[], self._service_ver, context)
| 2.125 | 2 |
distributed/tests/test_utils_comm.py | brinkar/distributed | 0 | 12763144 | <reponame>brinkar/distributed
from __future__ import print_function, division, absolute_import
import pytest
from distributed.utils_test import gen_cluster
from distributed.utils_comm import (scatter_to_workers, pack_data,
gather_from_workers)
def test_pack_data():
data = {'x': 1}
assert pack_data(('x', 'y'), data) == (1, 'y')
assert pack_data({'a': 'x', 'b': 'y'}, data) == {'a': 1, 'b': 'y'}
assert pack_data({'a': ['x'], 'b': 'y'}, data) == {'a': [1], 'b': 'y'}
@gen_cluster()
def test_gather_from_workers_permissive(s, a, b):
while not a.batched_stream:
yield gen.sleep(0.01)
a.update_data(data={'x': 1})
with pytest.raises(KeyError):
yield gather_from_workers({'x': [a.address], 'y': [b.address]})
data, missing, bad_workers = yield gather_from_workers(
{'x': [a.address], 'y': [b.address]}, permissive=True)
assert data == {'x': 1}
assert list(missing) == ['y']
| 2.265625 | 2 |
attack.py | carlobar/resilient-forecast | 0 | 12763145 | <reponame>carlobar/resilient-forecast
import numpy as np
import os
import time
import random
import forecast_lib as fl
dropout=False
if dropout:
type_exp = '_dropout'
else:
type_exp = ''
# experiment parameters
directory = './experiments/models_diff_size'+type_exp+'/'
m = fl.num_meters
max_num_models = 20
m_d_frac = np.linspace(0.5, 1, 5)
m_a_frac = np.linspace(0.1, 0.5, 5)
reps = 20
unique_bias = True
strategic_attack=False
if strategic_attack:
type_exp='strategic_' + type_exp
else:
type_exp=''+ type_exp
impact = np.zeros((reps, len(m_d_frac), len(m_a_frac)))
pred_error = np.zeros((reps, len(m_d_frac), len(m_a_frac)))
for i in range(len(m_d_frac)):
m_d = int(m * m_d_frac[i])
print('m_d: '+str(m_d))
dir_models = directory + 'm_d_' + str(m_d) + '/'
try:
os.makedirs(dir_rep)
except:
pass
for j in range(len(m_a_frac)):
m_a = int(m_a_frac[j]*m)
print('\tm_a='+str(m_a))
t0 = time.perf_counter()
for k in range(max_num_models):
print('\t\tk='+str(k))
if strategic_attack:
meters_model = np.load(dir_models + 'meters_' + str(k) + '.npy', allow_pickle=True)
meters_a = random.sample( set( meters_model[0] ), m_a )
else:
meters_a = random.sample( set(range( m )), m_a )
y_test, hat_y, hat_y_a, bias_opt = fl.find_attack(dir_models, max_num_models, 1, meters_a, unique_bias)
impact[k, i, j] = fl.MAE(hat_y, hat_y_a)
pred_error[k, i, j] = fl.MAE(hat_y, y_test)
t_f = time.perf_counter()
print('\t***Train time: ' + str((t_f-t0)/60.0))
dir_results = './'
np.save( dir_results + 'impact'+type_exp+'.npy', impact)
np.save( dir_results + 'pred_error'+type_exp+'.npy', pred_error)
| 1.976563 | 2 |
examples/generative_models/dgmg/utils.py | jinyu-hou/dgl-lifesci | 1 | 12763146 | <reponame>jinyu-hou/dgl-lifesci<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import datetime
import math
import numpy as np
import os
import pickle
import random
import torch
import torch.distributed as dist
import torch.nn as nn
from collections import defaultdict
from datetime import timedelta
from dgl.data.utils import download, _get_dgl_url
from dgllife.model.model_zoo.dgmg import MoleculeEnv
from multiprocessing import Pool
from pprint import pprint
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.Crippen import MolLogP
from rdkit.Chem.QED import qed
from torch.utils.data import Dataset
from sascorer import calculateScore
########################################################################################################################
# configuration #
########################################################################################################################
def mkdir_p(path, log=True):
"""Create a directory for the specified path.
Parameters
----------
path : str
Path name
log : bool
Whether to print result for directory creation
"""
import errno
try:
os.makedirs(path)
if log:
print('Created directory {}'.format(path))
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path) and log:
print('Directory {} already exists.'.format(path))
else:
raise
def get_date_postfix():
"""Get a date based postfix for directory name.
Returns
-------
post_fix : str
"""
dt = datetime.datetime.now()
post_fix = '{}_{:02d}-{:02d}-{:02d}'.format(
dt.date(), dt.hour, dt.minute, dt.second)
return post_fix
def setup_log_dir(args):
"""Name and create directory for logging.
Parameters
----------
args : dict
Configuration
Returns
-------
log_dir : str
Path for logging directory
"""
date_postfix = get_date_postfix()
log_dir = os.path.join(
args['log_dir'],
'{}_{}_{}'.format(args['dataset'], args['order'], date_postfix))
mkdir_p(log_dir)
return log_dir
def save_arg_dict(args, filename='settings.txt'):
"""Save all experiment settings in a file.
Parameters
----------
args : dict
Configuration
filename : str
Name for the file to save settings
"""
def _format_value(v):
if isinstance(v, float):
return '{:.4f}'.format(v)
elif isinstance(v, int):
return '{:d}'.format(v)
else:
return '{}'.format(v)
save_path = os.path.join(args['log_dir'], filename)
with open(save_path, 'w') as f:
for key, value in args.items():
f.write('{}\t{}\n'.format(key, _format_value(value)))
print('Saved settings to {}'.format(save_path))
def configure(args):
"""Use default hyperparameters.
Parameters
----------
args : dict
Old configuration
Returns
-------
args : dict
Updated configuration
"""
configure = {
'node_hidden_size': 128,
'num_propagation_rounds': 2,
'lr': 1e-4,
'dropout': 0.2,
'nepochs': 400,
'batch_size': 1,
}
args.update(configure)
return args
def set_random_seed(seed):
"""Fix random seed for reproducible results.
Parameters
----------
seed : int
Random seed to use.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def setup_dataset(args):
"""Dataset setup
For unsupported dataset, we need to perform data preprocessing.
Parameters
----------
args : dict
Configuration
"""
if args['dataset'] in ['ChEMBL', 'ZINC']:
print('Built-in support for dataset {} exists.'.format(args['dataset']))
else:
print('Configure for new dataset {}...'.format(args['dataset']))
configure_new_dataset(args['dataset'], args['train_file'], args['val_file'])
def setup(args, train=True):
"""Setup
Parameters
----------
args : argparse.Namespace
Configuration
train : bool
Whether the setup is for training or evaluation
"""
# Convert argparse.Namespace into a dict
args = args.__dict__.copy()
# Dataset
args = configure(args)
# Log
print('Prepare logging directory...')
log_dir = setup_log_dir(args)
args['log_dir'] = log_dir
save_arg_dict(args)
if train:
setup_dataset(args)
args['checkpoint_dir'] = os.path.join(log_dir, 'checkpoint.pth')
pprint(args)
return args
########################################################################################################################
# multi-process #
########################################################################################################################
def synchronize(num_processes):
"""Synchronize all processes.
Parameters
----------
num_processes : int
Number of subprocesses used
"""
if num_processes > 1:
dist.barrier()
def launch_a_process(rank, args, target, minutes=720):
"""Launch a subprocess for training.
Parameters
----------
rank : int
Subprocess id
args : dict
Configuration
target : callable
Target function for the subprocess
minutes : int
Timeout minutes for operations executed against the process group
"""
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip=args['master_ip'], master_port=args['master_port'])
dist.init_process_group(backend='gloo',
init_method=dist_init_method,
# If you have a larger dataset, you will need to increase it.
timeout=timedelta(minutes=minutes),
world_size=args['num_processes'],
rank=rank)
assert torch.distributed.get_rank() == rank
target(rank, args)
########################################################################################################################
# optimization #
########################################################################################################################
class Optimizer(nn.Module):
"""Wrapper for optimization
Parameters
----------
lr : float
Initial learning rate
optimizer
model optimizer
"""
def __init__(self, lr, optimizer):
super(Optimizer, self).__init__()
self.lr = lr
self.optimizer = optimizer
self._reset()
def _reset(self):
self.optimizer.zero_grad()
def backward_and_step(self, loss):
"""Backward and update model.
Parameters
----------
loss : torch.tensor consisting of a float only
"""
loss.backward()
self.optimizer.step()
self._reset()
def decay_lr(self, decay_rate=0.99):
"""Decay learning rate.
Parameters
----------
decay_rate : float
Multiply the current learning rate by the decay_rate
"""
self.lr *= decay_rate
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
class MultiProcessOptimizer(Optimizer):
"""Wrapper for optimization with multiprocess
Parameters
----------
n_processes : int
Number of processes used
lr : float
Initial learning rate
optimizer
model optimizer
"""
def __init__(self, n_processes, lr, optimizer):
super(MultiProcessOptimizer, self).__init__(lr=lr, optimizer=optimizer)
self.n_processes = n_processes
def _sync_gradient(self):
"""Average gradients across all subprocesses."""
for param_group in self.optimizer.param_groups:
for p in param_group['params']:
if p.requires_grad and p.grad is not None:
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.n_processes
def backward_and_step(self, loss):
"""Backward and update model.
Parameters
----------
loss : torch.tensor consisting of a float only
"""
loss.backward()
self._sync_gradient()
self.optimizer.step()
self._reset()
########################################################################################################################
# data #
########################################################################################################################
def initialize_neuralization_reactions():
"""Reference neuralization reactions
Code adapted from RDKit Cookbook, by <NAME>.
"""
patts = (
# Imidazoles
('[n+;H]', 'n'),
# Amines
('[N+;!H0]', 'N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]', 'O'),
# Thiols
('[S-;X1]', 'S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]', 'N'),
# Enamines
('[$([N-;X2][C,N]=C)]', 'N'),
# Tetrazoles
('[n-]', '[n]'),
# Sulfoxides
('[$([S-]=O)]', 'S'),
# Amides
('[$([N-]C=O)]', 'N'),
)
return [(Chem.MolFromSmarts(x), Chem.MolFromSmiles(y, False)) for x, y in patts]
def neutralize_charges(mol, reactions=None):
"""Deprotonation for molecules.
Code adapted from RDKit Cookbook, by <NAME>.
DGMG currently cannot generate protonated molecules.
For example, it can only generate
CC(C)(C)CC1CCC[NH+]1Cc1nnc(-c2ccccc2F)o1
from
CC(C)(C)CC1CCCN1Cc1nnc(-c2ccccc2F)o1
even with correct decisions.
Deprotonation is therefore an important step to avoid
false novel molecules.
Parameters
----------
mol : Chem.rdchem.Mol
reactions : list of 2-tuples
Rules for deprotonation
Returns
-------
mol : Chem.rdchem.Mol
Deprotonated molecule
"""
if reactions is None:
reactions = initialize_neuralization_reactions()
for i, (reactant, product) in enumerate(reactions):
while mol.HasSubstructMatch(reactant):
rms = AllChem.ReplaceSubstructs(mol, reactant, product)
mol = rms[0]
return mol
def standardize_mol(mol):
"""Standardize molecule to avoid false novel molecule.
Kekulize and deprotonate molecules to avoid false novel molecules.
In addition to deprotonation, we also kekulize molecules to avoid
explicit Hs in the SMILES. Otherwise we will get false novel molecules
as well. For example, DGMG can only generate
O=S(=O)(NC1=CC=CC(C(F)(F)F)=C1)C1=CNC=N1
from
O=S(=O)(Nc1cccc(C(F)(F)F)c1)c1c[nH]cn1.
One downside is that we remove all explicit aromatic rings and to
explicitly predict aromatic bond might make the learning easier for
the model.
"""
reactions = initialize_neuralization_reactions()
Chem.Kekulize(mol, clearAromaticFlags=True)
mol = neutralize_charges(mol, reactions)
return mol
def smiles_to_standard_mol(s):
"""Convert SMILES to a standard molecule.
Parameters
----------
s : str
SMILES
Returns
-------
Chem.rdchem.Mol
Standardized molecule
"""
mol = Chem.MolFromSmiles(s)
return standardize_mol(mol)
def mol_to_standard_smile(mol):
"""Standardize a molecule and convert it to a SMILES.
Parameters
----------
mol : Chem.rdchem.Mol
Returns
-------
str
SMILES
"""
return Chem.MolToSmiles(standardize_mol(mol))
def get_atom_and_bond_types(smiles, log=True):
"""Identify the atom types and bond types
appearing in this dataset.
Parameters
----------
smiles : list
List of smiles
log : bool
Whether to print the process of pre-processing.
Returns
-------
atom_types : list
E.g. ['C', 'N']
bond_types : list
E.g. [Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC]
"""
atom_types = set()
bond_types = set()
n_smiles = len(smiles)
for i, s in enumerate(smiles):
if log:
print('Processing smiles {:d}/{:d}'.format(i + 1, n_smiles))
mol = smiles_to_standard_mol(s)
if mol is None:
continue
for atom in mol.GetAtoms():
a_symbol = atom.GetSymbol()
if a_symbol not in atom_types:
atom_types.add(a_symbol)
for bond in mol.GetBonds():
b_type = bond.GetBondType()
if b_type not in bond_types:
bond_types.add(b_type)
return list(atom_types), list(bond_types)
def eval_decisions(env, decisions):
"""This function mimics the way DGMG generates a molecule and is
helpful for debugging and verification in data preprocessing.
Parameters
----------
env : MoleculeEnv
MDP environment for generating molecules
decisions : list of 2-tuples of int
A decision sequence for generating a molecule
Returns
-------
str
SMILES for the molecule generated with decisions
"""
env.reset(rdkit_mol=True)
t = 0
def whether_to_add_atom(t):
assert decisions[t][0] == 0
atom_type = decisions[t][1]
t += 1
return t, atom_type
def whether_to_add_bond(t):
assert decisions[t][0] == 1
bond_type = decisions[t][1]
t += 1
return t, bond_type
def decide_atom2(t):
assert decisions[t][0] == 2
dst = decisions[t][1]
t += 1
return t, dst
t, atom_type = whether_to_add_atom(t)
while atom_type != len(env.atom_types):
env.add_atom(atom_type)
t, bond_type = whether_to_add_bond(t)
while bond_type != len(env.bond_types):
t, dst = decide_atom2(t)
env.add_bond((env.num_atoms() - 1), dst, bond_type)
t, bond_type = whether_to_add_bond(t)
t, atom_type = whether_to_add_atom(t)
assert t == len(decisions)
return env.get_current_smiles()
def get_DGMG_smile(env, mol):
"""Mimics the reproduced SMILES with DGMG for a molecule.
Given a molecule, we are interested in what SMILES we will
get if we want to generate it with DGMG. This is an important
step to check false novel molecules.
Parameters
----------
env : MoleculeEnv
MDP environment for generating molecules
mol : Chem.rdchem.Mol
A molecule
Returns
-------
canonical_smile : str
SMILES of the generated molecule with a canonical decision sequence
random_smile : str
SMILES of the generated molecule with a random decision sequence
"""
canonical_decisions = env.get_decision_sequence(mol, list(range(mol.GetNumAtoms())))
canonical_smile = eval_decisions(env, canonical_decisions)
order = list(range(mol.GetNumAtoms()))
random.shuffle(order)
random_decisions = env.get_decision_sequence(mol, order)
random_smile = eval_decisions(env, random_decisions)
return canonical_smile, random_smile
def preprocess_dataset(atom_types, bond_types, smiles, max_num_atoms=200):
"""Preprocess the dataset
1. Standardize the SMILES of the dataset
2. Only keep the SMILES that DGMG can reproduce
3. Drop repeated SMILES
Parameters
----------
atom_types : list
The types of atoms appearing in a dataset. E.g. ['C', 'N']
bond_types : list
The types of bonds appearing in a dataset.
E.g. [Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC]
Returns
-------
valid_smiles : list of str
SMILES left after preprocessing
"""
valid_smiles = []
env = MoleculeEnv(atom_types, bond_types)
for id, s in enumerate(smiles):
print('Processing {:d}/{:d}'.format(id + 1, len(smiles)))
raw_s = s.strip()
mol = smiles_to_standard_mol(raw_s)
if mol is None:
continue
standard_s = Chem.MolToSmiles(mol)
if (max_num_atoms is not None) and (mol.GetNumAtoms() > max_num_atoms):
continue
canonical_s, random_s = get_DGMG_smile(env, mol)
canonical_mol = Chem.MolFromSmiles(canonical_s)
random_mol = Chem.MolFromSmiles(random_s)
# if (standard_s != canonical_s) or (canonical_s != random_s) or (canonical_mol is None) or (random_mol is None):
# continue
valid_smiles.append(standard_s)
valid_smiles = list(set(valid_smiles))
print(f"valid smiles: {len(valid_smiles)}\n")
return valid_smiles
def download_data(dataset, fname):
"""Download dataset if built-in support exists
Parameters
----------
dataset : str
Dataset name
fname : str
Name of dataset file
"""
if dataset not in ['ChEMBL', 'ZINC']:
# For dataset without built-in support, they should be locally processed.
return
data_path = fname
# download(_get_dgl_url(os.path.join('dataset', fname)), path=data_path)
def load_smiles_from_file(f_name):
"""Load dataset into a list of SMILES
Parameters
----------
f_name : str
Path to a file of molecules, where each line of the file
is a molecule in SMILES format.
Returns
-------
smiles : list of str
List of molecules as SMILES
"""
with open(f_name, 'r') as f:
smiles = f.read().splitlines()
return smiles
def write_smiles_to_file(f_name, smiles):
"""Write dataset to a file.
Parameters
----------
f_name : str
Path to create a file of molecules, where each line of the file
is a molecule in SMILES format.
smiles : list of str
List of SMILES
"""
with open(f_name, 'w') as f:
for s in smiles:
f.write(s + '\n')
def configure_new_dataset(dataset, train_file, val_file):
"""Configure for a new dataset.
Parameters
----------
dataset : str
Dataset name
train_file : str
Path to a file with one SMILES a line for training data
val_file : str
Path to a file with one SMILES a line for validation data
"""
assert train_file is not None, 'Expect a file of SMILES for training, got None.'
assert val_file is not None, 'Expect a file of SMILES for validation, got None.'
train_smiles = load_smiles_from_file(train_file)
print(f"loaded train smiles: {len(train_smiles)}\n")
val_smiles = load_smiles_from_file(val_file)
print(f"loaded val smiles: {len(val_smiles)}\n")
all_smiles = train_smiles + val_smiles
# Get all atom and bond types in the dataset
path_to_atom_and_bond_types = '_'.join([dataset, 'atom_and_bond_types.pkl'])
if not os.path.exists(path_to_atom_and_bond_types):
atom_types, bond_types = get_atom_and_bond_types(all_smiles)
with open(path_to_atom_and_bond_types, 'wb') as f:
pickle.dump({'atom_types': atom_types, 'bond_types': bond_types}, f)
else:
with open(path_to_atom_and_bond_types, 'rb') as f:
type_info = pickle.load(f)
atom_types = type_info['atom_types']
bond_types = type_info['bond_types']
# Standardize training data
path_to_processed_train_data = '_'.join([dataset, 'DGMG', 'train.txt'])
if not os.path.exists(path_to_processed_train_data):
processed_train_smiles = preprocess_dataset(atom_types, bond_types, train_smiles, None)
write_smiles_to_file(path_to_processed_train_data, processed_train_smiles)
path_to_processed_val_data = '_'.join([dataset, 'DGMG', 'val.txt'])
if not os.path.exists(path_to_processed_val_data):
processed_val_smiles = preprocess_dataset(atom_types, bond_types, val_smiles, None)
write_smiles_to_file(path_to_processed_val_data, processed_val_smiles)
class MoleculeDataset(object):
"""Initialize and split the dataset.
Parameters
----------
dataset : str
Dataset name
order : None or str
Order to extract a decision sequence for generating a molecule. Default to be None.
modes : None or list
List of subsets to use, which can contain 'train', 'val', corresponding to
training and validation. Default to be None.
subset_id : int
With multiprocess training, we partition the training set into multiple subsets and
each process will use one subset only. This subset_id corresponds to subprocess id.
n_subsets : int
With multiprocess training, this corresponds to the number of total subprocesses.
"""
def __init__(self, dataset, order=None, modes=None, subset_id=0, n_subsets=1):
super(MoleculeDataset, self).__init__()
if modes is None:
modes = []
else:
assert order is not None, 'An order should be specified for extracting ' \
'decision sequences.'
assert order in ['random', 'canonical', None], \
"Unexpected order option to get sequences of graph generation decisions"
assert len(set(modes) - {'train', 'val'}) == 0, \
"modes should be a list, representing a subset of ['train', 'val']"
self.dataset = dataset
self.order = order
self.modes = modes
self.subset_id = subset_id
self.n_subsets = n_subsets
self._setup()
def collate(self, samples):
"""PyTorch's approach to batch multiple samples.
For auto-regressive generative models, we process one sample at a time.
Parameters
----------
samples : list
A list of length 1 that consists of decision sequence to generate a molecule.
Returns
-------
list
List of 2-tuples, a decision sequence to generate a molecule
"""
assert len(samples) == 1
return samples[0]
def _create_a_subset(self, smiles):
"""Create a dataset from a subset of smiles.
Parameters
----------
smiles : list of str
List of molecules in SMILES format
"""
# We evenly divide the smiles into multiple susbets with multiprocess
subset_size = len(smiles) // self.n_subsets
return Subset(smiles[self.subset_id * subset_size: (self.subset_id + 1) * subset_size],
self.order, self.env)
def _setup(self):
"""
1. Instantiate an MDP environment for molecule generation
2. Download the dataset, which is a file of SMILES
3. Create subsets for training and validation
"""
if self.dataset == 'ChEMBL':
# For new datasets, get_atom_and_bond_types can be used to
# identify the atom and bond types in them.
self.atom_types = ['O', 'Cl', 'C', 'S', 'F', 'Br', 'N']
self.bond_types = [Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE]
elif self.dataset == 'ZINC':
self.atom_types = ['Br', 'S', 'C', 'P', 'N', 'O', 'F', 'Cl', 'I']
self.bond_types = [Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE]
else:
path_to_atom_and_bond_types = '_'.join([self.dataset, 'atom_and_bond_types.pkl'])
with open(path_to_atom_and_bond_types, 'rb') as f:
type_info = pickle.load(f)
self.atom_types = type_info['atom_types']
self.bond_types = type_info['bond_types']
self.env = MoleculeEnv(self.atom_types, self.bond_types)
dataset_prefix = self._dataset_prefix()
if 'train' in self.modes:
fname = '_'.join([dataset_prefix, 'train.txt'])
download_data(self.dataset, fname)
smiles = load_smiles_from_file(fname)
print(f"unprocessed smiles train: {len(smiles)}\n")
self.train_set = self._create_a_subset(smiles)
if 'val' in self.modes:
fname = '_'.join([dataset_prefix, 'val.txt'])
download_data(self.dataset, fname)
smiles = load_smiles_from_file(fname)
print(f"unprocessed smiles val: {len(smiles)}\n")
# We evenly divide the smiles into multiple susbets with multiprocess
self.val_set = self._create_a_subset(smiles)
def _dataset_prefix(self):
"""Get the prefix for the data files of supported datasets.
Returns
-------
str
Prefix for dataset file name
"""
return '_'.join([self.dataset, 'DGMG'])
class Subset(Dataset):
"""A set of molecules which can be used for training, validation, test.
Parameters
----------
smiles : list
List of SMILES for the dataset
order : str
Specifies how decision sequences for molecule generation
are obtained, can be either "random" or "canonical"
env : MoleculeEnv object
MDP environment for generating molecules
"""
def __init__(self, smiles, order, env):
super(Subset, self).__init__()
self.smiles = smiles
self.order = order
self.env = env
self._setup()
def _setup(self):
"""Convert SMILES into rdkit molecule objects.
Decision sequences are extracted if we use a fixed order.
"""
smiles_ = []
mols = []
for s in self.smiles:
m = smiles_to_standard_mol(s)
if m is None:
continue
smiles_.append(s)
mols.append(m)
print(f"processed smiles: {len(smiles_)}\n")
self.smiles = smiles_
self.mols = mols
if self.order is 'random':
return
self.decisions = []
for m in self.mols:
self.decisions.append(
self.env.get_decision_sequence(m, list(range(m.GetNumAtoms())))
)
def __len__(self):
"""Get number of molecules in the dataset."""
return len(self.mols)
def __getitem__(self, item):
"""Get the decision sequence for generating the molecule indexed by item."""
if self.order == 'canonical':
return self.decisions[item]
else:
m = self.mols[item]
nodes = list(range(m.GetNumAtoms()))
random.shuffle(nodes)
return self.env.get_decision_sequence(m, nodes)
########################################################################################################################
# progress tracking #
########################################################################################################################
class Printer(object):
def __init__(self, num_epochs, dataset_size, batch_size, writer=None):
"""Wrapper to track the learning progress.
Parameters
----------
num_epochs : int
Number of epochs for training
dataset_size : int
batch_size : int
writer : None or SummaryWriter
If not None, tensorboard will be used to visualize learning curves.
"""
super(Printer, self).__init__()
self.num_epochs = num_epochs
self.batch_size = batch_size
self.num_batches = math.ceil(dataset_size / batch_size)
self.count = 0
self.batch_count = 0
self.writer = writer
self._reset()
def _reset(self):
"""Reset when an epoch is completed."""
self.batch_loss = 0
self.batch_prob = 0
def _get_current_batch(self):
"""Get current batch index."""
remainer = self.batch_count % self.num_batches
if (remainer == 0):
return self.num_batches
else:
return remainer
def update(self, epoch, loss, prob):
"""Update learning progress.
Parameters
----------
epoch : int
loss : float
prob : float
"""
self.count += 1
self.batch_loss += loss
self.batch_prob += prob
if self.count % self.batch_size == 0:
self.batch_count += 1
if self.writer is not None:
self.writer.add_scalar('train_log_prob', self.batch_loss, self.batch_count)
self.writer.add_scalar('train_prob', self.batch_prob, self.batch_count)
print('epoch {:d}/{:d}, batch {:d}/{:d}, loss {:.4f}, prob {:.4f}'.format(
epoch, self.num_epochs, self._get_current_batch(),
self.num_batches, self.batch_loss, self.batch_prob))
self._reset()
########################################################################################################################
# eval #
########################################################################################################################
def summarize_a_molecule(smile, checklist=None):
"""Get information about a molecule.
Parameters
----------
smile : str
Molecule in SMILES format
checklist : dict
Things to learn about the molecule
"""
if checklist is None:
checklist = {
'HBA': Chem.rdMolDescriptors.CalcNumHBA,
'HBD': Chem.rdMolDescriptors.CalcNumHBD,
'logP': MolLogP,
'SA': calculateScore,
'TPSA': Chem.rdMolDescriptors.CalcTPSA,
'QED': qed,
'NumAtoms': lambda mol: mol.GetNumAtoms(),
'NumBonds': lambda mol: mol.GetNumBonds()
}
summary = dict()
mol = Chem.MolFromSmiles(smile)
if mol is None:
summary.update({
'smile': smile,
'valid': False
})
for k in checklist.keys():
summary[k] = None
else:
mol = standardize_mol(mol)
summary.update({
'smile': Chem.MolToSmiles(mol),
'valid': True
})
Chem.SanitizeMol(mol)
for k, f in checklist.items():
summary[k] = f(mol)
return summary
def summarize_molecules(smiles, num_processes):
"""Summarize molecules with multiprocess.
Parameters
----------
smiles : list of str
List of molecules in SMILES for summarization
num_processes : int
Number of processes to use for summarization
Returns
-------
summary_for_valid : dict
Summary of all valid molecules, where
summary_for_valid[k] gives the values of all
valid molecules on item k.
"""
with Pool(processes=num_processes) as pool:
result = pool.map(summarize_a_molecule, smiles)
items = list(result[0].keys())
items.remove('valid')
summary_for_valid = defaultdict(list)
for summary in result:
if summary['valid']:
for k in items:
summary_for_valid[k].append(summary[k])
return summary_for_valid
def get_unique_smiles(smiles):
"""Given a list of smiles, return a list consisting of unique elements in it.
Parameters
----------
smiles : list of str
Molecules in SMILES
Returns
-------
list of str
Sublist where each SMIES occurs exactly once
"""
unique_set = set()
for mol_s in smiles:
if mol_s not in unique_set:
unique_set.add(mol_s)
return list(unique_set)
def get_novel_smiles(new_unique_smiles, reference_unique_smiles):
"""Get novel smiles which do not appear in the reference set.
Parameters
----------
new_unique_smiles : list of str
List of SMILES from which we want to identify novel ones
reference_unique_smiles : list of str
List of reference SMILES that we already have
"""
return set(new_unique_smiles).difference(set(reference_unique_smiles))
# if __name__ == "__main__":
# test_smiles = ["FC(=[Si](OOOOOOOOOI)OOOOO[Se]OOOOOOOOOC1(F)OOOSSSS[Se][Si](F)(Cl)S1)[Si](Br)(Br)I"]
# test_smiles = ["C#CC1C2C(C(C#CC3NC4CC5NC(C#CC6C7C(CC8C6C(=O)[SI](C)(C)C8=O)C(=O)N(C)C7=O)N(C)C5CC4N3C)C3C1C(=O)[SI](C)(C)C3=O)C(=O)N(C)C2=O"]
# test_smiles = ["Cc1c2cc(-c3c4c(cn3C)C(=O)C(C)(C)C4=O)sc2c(C)c2cc(-c3c4c(cc5c3C(=O)C(C)(C)C5=O)C(=O)N(C)C4=O)sc12"]
# atom_types, bond_types = get_atom_and_bond_types(test_smiles)
# print(atom_types, bond_types) | 1.84375 | 2 |
generators.py | xianqiu/math-homework | 1 | 12763147 | <filename>generators.py
__all__ = ['MathL1', 'MathL2', 'MathL3', 'MathL4',
'MathL5', 'MathL6', 'MathL7', 'MathL8',
'MathL9', 'MathL10', 'MathL11', 'MathL12',
'MathL13', 'MathL14', 'MathL15', 'MathL16',
'MathL17', 'MathL18', 'MathL19', 'MathL20',
'MathL21', 'MathL22', 'MathL23', 'MathL24',
'MathL25', 'MathL26', 'MathL27', 'MathL28'
]
import numpy as np
def _to_result(arr, ops, wrap=True, skip=None):
"""
把公式格式化成字符串
:param arr: 二维数组,每一行代表公式的数字,例如 [a, b]
:param ops: 二维数组,每一行代表公式的操作,例如 [+, =]
:param wrap: 自动加括号,例如 a + (-b)
:return: str list,例如 ['a1 + b1 = ', 'a2 + b2 = ']
"""
if skip is None:
skip = {}
res = []
for row, op in zip(arr, ops):
comb = []
for i in range(len(row)):
if wrap and row[i] < 0 and i not in skip:
comb.append('(' + str(int(row[i])) + ')')
else:
comb.append(str(int(row[i])))
if i < len(op):
comb.append(op[i])
res.append(' '.join(comb))
return res
def _gen_add_arr(lb, ub, k, num):
""" 生成二维数组。
1、每行 k 个数,用来相加,绝对值不大于 ub。
2、数组一共 num 行。
3、每个数不超过 ub,不低于lb。
"""
arr = np.random.randint(lb, ub, (num, k))
for i in range(len(arr)):
if abs(sum(arr[i])) > ub:
arr[i] = np.floor(arr[i] / k)
return arr
class MathL1(object):
"""
加法 a+b
"""
def __init__(self, ub=20):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 2, num)
ops = [['+', '=']] * num
return _to_result(arr, ops)
class MathL2(object):
"""
减法(结果为非负)a-b
"""
def __init__(self, ub=20):
self._ub = ub
def generate(self, num):
arr = np.random.randint(0, self._ub, (num, 2))
for i in range(len(arr)):
if arr[i][0] < arr[i][1]:
arr[i] = [arr[i][1], arr[i][0]]
ops = [['-', '=']] * num
return _to_result(arr, ops)
class MathL3(object):
"""
加减法 a+b 或 a-b(结果非负)
"""
def __init__(self, ub=20):
self._m1 = MathL1(ub)
self._m2 = MathL2(ub)
def generate(self, num):
res0 = self._m1.generate(num) + self._m2.generate(num)
indices = np.random.randint(0, len(res0), num)
return [res0[i] for i in indices]
class MathL4(object):
"""
连加法 a+b+c
"""
def __init__(self, ub=30):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 3, num)
ops = [['+', '+', '=']] * num
return _to_result(arr, ops)
class MathL5(object):
"""
连减法 a-b-c(结果非负)
"""
def __init__(self, ub):
self._ub = ub
def generate(self, num):
arr = np.random.randint(0, self._ub, (num, 2))
b = [0] * num
for i in range(len(arr)):
if arr[i][0] < arr[i][1]:
arr[i] = [arr[i][1], arr[i][0]]
b[i] = np.random.randint(0, arr[i][0] - arr[i][1])
arr = np.insert(arr, 2, values=b, axis=1)
ops = [['-', '-', '=']] * num
return _to_result(arr, ops)
class MathL6(object):
"""
连加 a+b+c 或 连减 a-b-c(结果非负)
"""
def __init__(self, ub=30):
self._m4 = MathL4(ub)
self._m5 = MathL5(ub)
def generate(self, num):
res0 = self._m4.generate(num) + self._m5.generate(num)
indices = np.random.randint(0, len(res0), num)
return [res0[i] for i in indices]
class MathL7(object):
"""
连加减法 a+b-c 或 a-b+c 或 a+b+c(结果非负)
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 3, num)
ops = [['']] * num
for i in range(len(arr)):
if arr[i][0] >= arr[i][1]:
ops[i] = ['-', '+', '=']
elif arr[i][1] >= arr[i][2]:
ops[i] = ['+', '-', '=']
else:
ops[i] = ['+', '+', '=']
return _to_result(arr, ops)
class MathL8(object):
"""
负数相加 - a - b
"""
def __init__(self, ub=30):
self._ub = ub
def generate(self, num):
arr1 = _gen_add_arr(-self._ub // 2, 0, 1, num)
arr2 = _gen_add_arr(0, self._ub // 2, 1, num)
arr = map(lambda a: [a[0][0], a[1][0]], zip(arr1, arr2))
ops = [['-', '=']] * num
return _to_result(arr, ops, skip={0})
class MathL9(object):
"""
减法 a-b, -a+b
"""
def __init__(self, ub=30):
self._ub = ub
def _generate1(self, num):
# a-b
arr = _gen_add_arr(0, self._ub, 2, num)
ops = [['-', '=']] * num
return _to_result(arr, ops)
def _generate2(self, num):
# -a+b
arr1 = _gen_add_arr(-self._ub, 0, 1, num)
arr2 = _gen_add_arr(0, self._ub, 1, num)
arr = map(lambda a: [a[0][0], a[1][0]], zip(arr1, arr2))
ops = [['+', '=']] * num
return _to_result(arr, ops, skip={0})
def generate(self, num):
res1 = self._generate1(num)
res2 = self._generate2(num)
res = res1 + res2
indices = np.random.randint(0, len(res), num)
return [res[i] for i in indices]
class MathL10(object):
"""
加法、减法:-a-b, -a+b, a-b
"""
def __init__(self, ub=30):
self._ub = ub
def _generate1(self, num):
# -a-b
arr1 = _gen_add_arr(-self._ub//2, 0, 1, num)
arr2 = _gen_add_arr(0, self._ub//2, 1, num)
arr = map(lambda a: [a[0][0], a[1][0]], zip(arr1, arr2))
ops = [['-', '=']] * num
return _to_result(arr, ops, skip={0})
def _generate2(self, num):
# -a+b
arr1 = _gen_add_arr(-self._ub, 0, 1, num)
arr2 = _gen_add_arr(0, self._ub, 1, num)
arr = map(lambda a: [a[0][0], a[1][0]], zip(arr1, arr2))
ops = [['+', '=']] * num
return _to_result(arr, ops, skip={0})
def _generate3(self, num):
# a-b
arr = _gen_add_arr(0, self._ub, 2, num)
ops = [['-', '=']] * num
return _to_result(arr, ops)
def generate(self, num):
res1 = self._generate1(num)
res2 = self._generate2(num)
res3 = self._generate3(num)
res = res1 + res2 + res3
indices = np.random.randint(0, len(res), num)
return [res[i] for i in indices]
class MathL11(object):
"""
连加法 a-b-c, -a-b-c
"""
def __init__(self, ub=30):
self._ub = ub
def _generate1(self, num):
# a-b-c
arr = _gen_add_arr(0, self._ub, 3, num)
ops = [['-', '-', '=']] * num
return _to_result(arr, ops)
def _generate2(self, num):
# -a-b-c
arr = _gen_add_arr(0, self._ub, 3, num)
arr[:, 0] = arr[:, 0] * -1
ops = [['-', '-', '=']] * num
return _to_result(arr, ops, skip={0})
def generate(self, num):
res1 = self._generate1(num)
res2 = self._generate2(num)
res = res1 + res2
indices = np.random.randint(0, len(res), num)
return [res[i] for i in indices]
class MathL12(object):
"""
连加减法 a+b-c 或 a-b+c
"""
def __init__(self, ub=30):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 3, num)
ops = [[]] * num
for i in range(len(arr)):
z = np.random.rand()
ops[i] = ['+', '-', '='] if z < 0.3 else ['-', '+', '=']
return _to_result(arr, ops)
class MathL13(object):
"""
连加减法 -a+b-c 或 -a-b+c
"""
def __init__(self, ub=30):
self._ub = ub
def generate(self, num):
arr1 = _gen_add_arr(-self._ub//2, 0, 1, num)
arr2 = _gen_add_arr(0, self._ub, 2, num)
arr = np.c_[arr1, arr2]
ops = [[]] * num
for i in range(len(arr)):
z = np.random.rand()
ops[i] = ['+', '-', '='] if z < 0.5 else ['-', '+', '=']
return _to_result(arr, ops, skip={0})
class MathL14(object):
"""
负负得正 a+(-b) 或 a-(-b)
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr1 = _gen_add_arr(0, self._ub//2, 1, num)
arr2 = _gen_add_arr(-self._ub//2, -1, 1, num)
arr = np.c_[arr1, arr2]
ops = [[]] * num
for i in range(num):
z = np.random.rand()
ops[i] = ['+', '='] if z > 0.5 else ['-', '=']
return _to_result(arr, ops)
class MathL15(object):
"""
负负得正 -a+(-b) 或 -a-(-b)
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(-self._ub, 0, 2, num)
ops = [[]] * num
for i in range(num):
z = np.random.rand()
ops[i] = ['+', '='] if z > 0.5 else ['-', '=']
return _to_result(arr, ops, skip={0})
class MathL16(object):
"""
a+b+c, abc可以带负号
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(-self._ub, self._ub, 3, num)
ops = [['+', '+', '=']] * num
return _to_result(arr, ops, skip={0})
class MathL17(object):
"""
加法填空 a+?=b
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 2, num)
ops = [['+ __ =']] * num
return _to_result(arr, ops)
class MathL18(object):
"""
减法填空 a-?=b
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 2, num)
ops = [['- __ =']] * num
return _to_result(arr, ops)
class MathL19(object):
"""
加减法填空 a+?=b 或 a-?=b
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 2, num)
ops = [['']] * num
for i in range(len(arr)):
z = np.random.rand()
if z > 0.5:
ops[i] = ['- __ =']
else:
ops[i] = ['+ __ =']
return _to_result(arr, ops)
class MathL20(object):
"""
加后填空 a+b+?=c 或 a+b-?=c
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 3, num)
ops = [[]] * num
for i in range(num):
z = np.random.rand()
ops[i] = ['+', '+ __ ='] if z < 0.4 else ['+', '- __ =']
return _to_result(arr, ops)
class MathL21(object):
"""
加法填空、减法填空 -a + ? = b 或 -a - ? = b
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr1 = _gen_add_arr(-self._ub, 0, 1, num)
arr2 = _gen_add_arr(0, self._ub, 1, num)
arr = np.c_[arr1, arr2]
ops = [[]] * num
for i in range(num):
z = np.random.rand()
ops[i] = ['+ __ ='] if z < 0.4 else ['- __ =']
return _to_result(arr, ops, skip={0})
class MathL22(object):
"""
加减法填空 a-b+?=c 或 a-b-?=c
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 3, num)
ops = [[]] * num
for i in range(num):
z = np.random.rand()
ops[i] = ['-', '+ __ ='] if z < 0.5 else ['-', '- __ =']
return _to_result(arr, ops)
class MathL23(object):
"""
加减法填空 -a-b+?=c 或 -a-b-?=c
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 3, num)
arr[:, 0] = arr[:, 0] * -1
ops = [[]] * num
for i in range(num):
z = np.random.rand()
ops[i] = ['-', '+ __ ='] if z < 0.5 else ['-', '- __ =']
return _to_result(arr, ops, skip={0})
class MathL24(object):
"""
加减法填空 -a+b+?=c 或 -a+b-?=c
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 3, num)
arr[:, 0] = arr[:, 0] * -1
ops = [[]] * num
for i in range(num):
z = np.random.rand()
ops[i] = ['+', '+ __ ='] if z < 0.5 else ['+', '- __ =']
return _to_result(arr, ops, skip={0})
class MathL25(object):
"""
加减法填空 -a+b+?=-c 或 -a-b+?=-c
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 3, num)
arr[:, 0] = arr[:, 0] * -1
arr[:, 2] = arr[:, 2] * -1
ops = [[]] * num
for i in range(num):
z = np.random.rand()
ops[i] = ['+', '+ __ ='] if z < 0.5 else ['-', '+ __ =']
return _to_result(arr, ops, skip={0, 2})
class MathL26(object):
"""
中间填空 a+?+b=c 或 a-?-b=c
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 3, num)
ops = [[]] * num
for i in range(num):
z = np.random.rand()
ops[i] = ['+ __ +', '='] if z < 0.5 else ['- __ -', '=']
return _to_result(arr, ops)
class MathL27(object):
"""
中间填空 -a+?+b=-c 或 -a-?-b=c
"""
def __init__(self, ub=40):
self._ub = ub
def generate(self, num):
arr = _gen_add_arr(0, self._ub, 3, num)
arr[:, 0] = arr[:, 0] * -1
ops = [[]] * num
for i in range(num):
z = np.random.rand()
ops[i] = ['+ __ +', '='] if z < 0.5 else ['- __ -', '=']
if np.random.rand() < 0.5:
arr[i][2] = arr[i][2] * -1
return _to_result(arr, ops, skip={0, 2})
class MathL28(object):
"""
a+b+?=c 或 ?+a+b=c 或 a+?+b=c, abc可以带负号
"""
def __init__(self, ub=40):
self._ub = ub
def _generate1(self, num):
# a+b+?=c
arr = _gen_add_arr(-self._ub, self._ub, 3, num)
ops = [['+', '+ __ =']] * num
return _to_result(arr, ops, skip={0, 2})
def _generate2(self, num):
# ?+a+b=c
arr = _gen_add_arr(-self._ub, self._ub, 3, num)
ops = [['+', '=']] * num
res = _to_result(arr, ops, skip={2})
return ['__ + ' + res[i] for i in range(num)]
def _generate3(self, num):
# a+?+b=c
arr = _gen_add_arr(-self._ub, self._ub, 3, num)
ops = [['+ __ +', '=']] * num
return _to_result(arr, ops, skip={0, 2})
def generate(self, num):
res1 = self._generate1(num)
res2 = self._generate2(num)
res3 = self._generate3(num)
res = res1 + res2 + res3
indices = np.random.randint(0, len(res), num)
return [res[i] for i in indices]
| 2.75 | 3 |
tests/plugins/tools/test_node_npmaudit.py | RiverSafeUK/eze-cli | 4 | 12763148 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring,line-too-long
from unittest import mock
import pytest
from eze.plugins.tools.node_npmaudit import NpmAuditTool
from eze.utils.io import create_tempfile_path
from tests.plugins.tools.tool_helper import ToolMetaTestBase
class TestNpmAuditTool(ToolMetaTestBase):
ToolMetaClass = NpmAuditTool
SNAPSHOT_PREFIX = "node-npmaudit"
def test_creation__no_config(self):
# Given
input_config = {}
expected_config = {
"REPORT_FILE": create_tempfile_path("tmp-npmaudit-report.json"),
"SOURCE": None,
"ONLY_PROD": True,
#
"ADDITIONAL_ARGUMENTS": "",
"IGNORED_FILES": None,
"EXCLUDE": [],
"IGNORED_VULNERABILITIES": None,
"IGNORE_BELOW_SEVERITY": None,
"DEFAULT_SEVERITY": None,
}
# When
testee = NpmAuditTool(input_config)
# Then
assert testee.config == expected_config
def test_creation__with_config(self):
# Given
input_config = {
"SOURCE": "src",
}
expected_config = {
"REPORT_FILE": create_tempfile_path("tmp-npmaudit-report.json"),
"SOURCE": "src",
"ONLY_PROD": True,
#
"ADDITIONAL_ARGUMENTS": "",
"IGNORED_FILES": None,
"EXCLUDE": [],
"IGNORED_VULNERABILITIES": None,
"IGNORE_BELOW_SEVERITY": None,
"DEFAULT_SEVERITY": None,
}
# When
testee = NpmAuditTool(input_config)
# Then
assert testee.config == expected_config
@mock.patch("eze.plugins.tools.node_npmaudit.extract_cmd_version", mock.MagicMock(return_value="6.14.11"))
def test_check_installed__success(self):
# When
expected_output = "6.14.11"
output = NpmAuditTool.check_installed()
# Then
assert output == expected_output
@mock.patch("eze.plugins.tools.node_npmaudit.extract_cmd_version", mock.MagicMock(return_value="5.12.11"))
def test_check_installed__failure_version_low(self):
# When
expected_output = ""
output = NpmAuditTool.check_installed()
# Then
assert output == expected_output
@mock.patch("eze.plugins.tools.node_npmaudit.extract_cmd_version", mock.MagicMock(return_value=False))
def test_check_installed__failure_unavailable(self):
# When
expected_output = False
output = NpmAuditTool.check_installed()
# Then
assert output == expected_output
def test_parse_report__npm6_snapshot(self, snapshot):
# Test container fixture and snapshot
self.assert_parse_report_snapshot_test(
snapshot,
{},
"__fixtures__/plugins_tools/raw-node-npmaudit-v6-report.json",
"plugins_tools/node-npmaudit-result-v6-output.json",
)
def test_parse_report__npm7_snapshot(self, snapshot):
# Test container fixture and snapshot
self.assert_parse_report_snapshot_test(
snapshot,
{},
"__fixtures__/plugins_tools/raw-node-npmaudit-v7-report.json",
"plugins_tools/node-npmaudit-result-v7-output.json",
)
# new v7 tests
def test_create_recommendation_v7__major_fix(self):
# Given
expected_output = """fix available via `npm audit fix --force`
Will install mocha@8.4.0, which is a breaking change"""
input_vulnerability = {"fixAvailable": {"name": "mocha", "version": "8.4.0", "isSemVerMajor": True}}
testee = NpmAuditTool()
# When
output = testee.create_recommendation_v7(input_vulnerability)
# Then
assert output == expected_output
def test_create_recommendation_v7__minor_fix(self):
# Given
expected_output = """fix available via `npm audit fix --force`
Will install mocha@8.4.0"""
input_vulnerability = {"fixAvailable": {"name": "mocha", "version": "8.4.0", "isSemVerMajor": False}}
testee = NpmAuditTool()
# When
output = testee.create_recommendation_v7(input_vulnerability)
# Then
assert output == expected_output
def test_create_recommendation_v7__no_details(self):
# Given
expected_output = """fix available via `npm audit fix --force`"""
input_vulnerability = {"fixAvailable": True}
testee = NpmAuditTool()
# When
output = testee.create_recommendation_v7(input_vulnerability)
# Then
assert output == expected_output
def test_create_recommendation_v7__no_fix_available(self):
# Given
expected_output = "no fix available"
input_vulnerability = {"fixAvailable": False}
testee = NpmAuditTool()
# When
output = testee.create_recommendation_v7(input_vulnerability)
# Then
assert output == expected_output
def test_create_path_v7__nested_vul(self):
# Given
expected_output = """helmet>connect(2.11.1 - 3.6.4): has insecure dependency finalhandler>debug"""
input_vulnerability = {
"name": "connect",
"severity": "low",
"via": ["debug", "finalhandler"],
"effects": ["helmet"],
"range": "2.11.1 - 3.6.4",
"nodes": ["node_modules/connect"],
"fixAvailable": True,
}
testee = NpmAuditTool()
# When
output = testee.create_path_v7(input_vulnerability)
# Then
assert output == expected_output
def test_create_path_v7__edge_vul(self):
# Given
expected_output = (
"""connect>finalhandler>mocha>debug(<= 2.6.8 || >= 3.0.0 <= 3.0.1): Regular Expression Denial of Service"""
)
input_vulnerability = {
"name": "debug",
"severity": "low",
"via": [
{
"source": 534,
"name": "debug",
"dependency": "debug",
"title": "Regular Expression Denial of Service",
"url": "https://npmjs.com/advisories/534",
"severity": "low",
"range": "<= 2.6.8 || >= 3.0.0 <= 3.0.1",
}
],
"effects": ["connect", "finalhandler", "mocha"],
"range": "<=2.6.8 || 3.0.0 - 3.0.1",
"nodes": ["node_modules/debug"],
"fixAvailable": {"name": "mocha", "version": "8.4.0", "isSemVerMajor": True},
}
testee = NpmAuditTool()
# When
output = testee.create_path_v7(input_vulnerability)
# Then
assert output == expected_output
@mock.patch("eze.utils.cli.async_subprocess_run")
@mock.patch("eze.utils.cli.is_windows_os", mock.MagicMock(return_value=True))
@mock.patch("eze.utils.language.node.install_node_dependencies", mock.MagicMock(return_value=True))
@pytest.mark.asyncio
async def test_run_scan__cli_command__std(self, mock_async_subprocess_run):
# Given
input_config = {"REPORT_FILE": "foo_report.json"}
expected_cmd = "npm audit --json --only=prod"
# Test run calls correct program
await self.assert_run_scan_command(input_config, expected_cmd, mock_async_subprocess_run)
@mock.patch("eze.utils.cli.async_subprocess_run")
@mock.patch("eze.utils.cli.is_windows_os", mock.MagicMock(return_value=True))
@mock.patch("eze.utils.language.node.install_node_dependencies", mock.MagicMock(return_value=True))
@pytest.mark.asyncio
async def test_run_scan__cli_command__non_prod(self, mock_async_subprocess_run):
# Given
input_config = {"REPORT_FILE": "foo_report.json", "ONLY_PROD": False}
expected_cmd = "npm audit --json"
# Test run calls correct program
await self.assert_run_scan_command(input_config, expected_cmd, mock_async_subprocess_run)
| 2.140625 | 2 |
starter/dog_shelters/migrations/0012_auto_20220319_1725.py | hadi-f90/mslearn-django-models-data | 0 | 12763149 | # Generated by Django 3.2.12 on 2022-03-19 17:25
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('dog_shelters', '0011_auto_20220319_1724'),
]
operations = [
migrations.RemoveField(
model_name='dog',
name='id',
),
migrations.AlterField(
model_name='dog',
name='shelter',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, primary_key=django.db.models.fields.BigAutoField, serialize=False, to='dog_shelters.shelter'),
),
]
| 1.46875 | 1 |
scripts/release/rethreshold_family.py | Rfam/rfam-production | 7 | 12763150 | """
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ----------------------------------------------------------------------------------
import argparse
import os
import subprocess
import sys
from subprocess import Popen, PIPE
from utils import db_utils as db
# ------------------------------------- GLOBALS ------------------------------------
# this group only allows 10 rfsearch jobs to run concurrently
# this means 10*100 = 1000 jobs running concurrently which is the lsf limit
LSF_GROUP = "/family_srch"
MEMORY = 2000
CPU = 8
MAX_JOB_COUNT = 1000
family_exceptions = {'RF02924': '', 'RF03064': '', 'RF02913': '',
'RF02543': '', 'RF00017': '', 'RF02540': ''}
# ----------------------------------------------------------------------------------
def checkout_family(rfam_acc):
"""
Checks out a family from Rfam based on a valid Rfam accession.
rfam_acc: A valid Rfam accession
return: None
"""
cmd = "rfco.pl %s" % rfam_acc
subprocess.call(cmd, shell=True)
# add some checks here
# ----------------------------------------------------------------------------------
def submit_new_rfsearch_job(family_dir, rfmake=False):
"""
Submits a new lsf job that runs rfsearch to update SCORES for a new release.
If no threshold is set with rfsearch.pl, it uses existing thresholds by default.
family_dir: The physical location of the family directory
rfmake: If True, run rfmake after rfsearch completes. Default False
return: None
"""
# use the pre-process command to change directory to family_dir
rfam_acc = os.path.basename(family_dir)
lsf_err_file = os.path.join(family_dir, "auto_rfsearch.err")
lsf_out_file = os.path.join(family_dir, "auto_rfsearch.out")
cmd = ("bsub -M %s -R \"rusage[mem=%s]\" -o %s -e %s -n %s -g %s -q production-rh7 "
"-J %s \"cd %s && rfsearch.pl -cnompi -q production-rh7 -relax\"")
# If rfmake is set to True, runs rfmake following rfsearch, otherwise run rfsearch
# only by default
if rfmake is True:
cmd = ("bsub -M %s -R \"rusage[mem=%s]\" -o %s -e %s -n %s -g %s -q production-rh7 "
"-J %s \"cd %s && rfsearch.pl -cnompi -q production-rh7 -relax && rfmake.pl\"")
subprocess.call(cmd % (MEMORY, MEMORY, lsf_out_file, lsf_err_file,
CPU, LSF_GROUP, rfam_acc, family_dir), shell=True)
# ----------------------------------------------------------------------------------
def submit_new_rfmake_job(family_dir):
"""
Submits a new lsf job that runs rfsearch to update SCORES for a new release.
If no threshold is set with rfsearch.pl, it uses existing thresholds by default.
family_dir: The physical location of the family directory
rfmake: If True, run rfmake after rfsearch completes. Default False
return: None
"""
# use the pre-process command to change directory to family_dir
rfam_acc = os.path.basename(family_dir)
lsf_err_file = os.path.join(family_dir, "auto_rfmake.err")
lsf_out_file = os.path.join(family_dir, "auto_rfmake.out")
cmd = ("bsub -M %s -R \"rusage[mem=%s]\" -o %s -e %s -n %s -g %s -q production-rh7 "
"-J %s \"cd %s && rfmake.pl\"")
subprocess.call(cmd % (MEMORY, MEMORY, lsf_out_file, lsf_err_file,
CPU, LSF_GROUP, rfam_acc, family_dir), shell=True)
# ----------------------------------------------------------------------------------
def load_rfam_accessions_from_file(accession_list):
"""
This function parses a .txt file containing Rfam accessions and returns those
accession_list: This is a .txt file containing a list of Rfam accessions
return: list of Rfam family accessions
"""
fp = open(accession_list, 'r')
accessions = [x.strip() for x in fp]
fp.close()
return accessions
# ----------------------------------------------------------------------------------
def checkout_and_search_family(rfam_acc, dest_dir, rfmake=False):
"""
This function combines family checkout (rfco.pl) and re-scoring of hits
using rfsearch.pl. If the family directory already exists, then the
checkout step will be ignored
rfam_acc: A valid Rfam family accession (RFXXXXX)
dest_dir: A valid destination directory, where to checkout the family
rfmake: If True, run rfmake after rfsearch completes. Default False
return: void
"""
# get family directory
family_dir = os.path.join(dest_dir, rfam_acc)
# checkout family if not done already
if not os.path.exists(family_dir):
os.chdir(dest_dir)
checkout_family(rfam_acc)
submit_new_rfsearch_job(family_dir, rfmake)
# ----------------------------------------------------------------------------------
def parse_arguments():
"""
Uses python's argparse to parse the command line arguments
return: Argparse parser object
"""
# create a new argument parser object
parser = argparse.ArgumentParser(description='Update scores for new release')
# group required arguments together
req_args = parser.add_argument_group("required arguments")
req_args.add_argument('--dest-dir', help='destination directory where to checkout families',
type=str, required=True)
mutually_exclusive_args = parser.add_mutually_exclusive_group()
mutually_exclusive_args.add_argument('-f', help='a file containing a list of Rfam family accessions', type=str)
mutually_exclusive_args.add_argument('--all', help='runs rfsearch on all families', action="store_true")
mutually_exclusive_args.add_argument('--acc', help="a valid rfam family accession RFXXXXX",
type=str, default=None)
parser.add_argument('--rfmake', help='run rfmake after rfsearch completion', action="store_true")
parser.add_argument('-v', help='runs validation checks', action="store_true")
parser.add_argument('--report', help='generates search reports', action="store_true")
# this is mutually exclusive with --acc option
parser.add_argument('--exclude-type', help='type(s) of ncRNAs to exclude', type=str, default=None)
return parser
# ----------------------------------------------------------------------------------
def is_valid_family(dest_dir, rfam_acc):
"""
Checks if the job ran successfully by checking if .err file is empty and
that Success keyword exists in .out file. As an additional sanity check, we
look for the rfsearch.log file as an indication that rfsearch actually ran.
return: True if the family is valid, False otherwise
"""
family_dir = os.path.join(dest_dir, rfam_acc)
# If log file does not exist rfsearch did not run for some reason
if not os.path.exists(os.path.join(family_dir, "rfsearch.log")):
return False
# check if lsf .err file is empty
if not os.path.getsize(os.path.join(family_dir, "auto_rfsearch.err")) == 0:
return check_rfsearch_log_success(family_dir)
# check if success in .out file
lsf_out_file = os.path.join(family_dir, "auto_rfsearch.out")
process = Popen(['grep', 'Success', lsf_out_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
if output.find("Successfully completed.") == -1:
return False
return True
# ----------------------------------------------------------------------------------
def get_missing_seeds_seedoutlist(num_seed_db, seedoutlist):
"""
Parses the seedoutlist file and compares the number of seed sequences
obtained from the database and the number of seed hits in the outlist file
num_seed_db: The number of seed sequences found in the database
seedoutlist: The SEED specific outlist file
return (boolean): True if the number is consistent, False otherwise.
"""
unique_seeds = extract_unique_seeds_from_seedoutlist(seedoutlist)
seed_count = len(unique_seeds.keys())
return num_seed_db - seed_count
# ----------------------------------------------------------------------------------
def check_rfsearch_log_success(family_dir):
"""
Checks if the rfsearch.log file contains the success string # [ok] in
order to mark the family as successfully completed.
"""
rfsearch_log_file = os.path.join(family_dir, "rfsearch.log")
process = Popen(['tail', '-1', rfsearch_log_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
if output.find("# [ok]") == -1:
return False
return True
# ----------------------------------------------------------------------------------
def count_hits(scores_file):
"""
Function to count SEED and FULL hits in outlist and species files at three
different thresholds (above ga, below ga, below rev)
scores_file: This is either the species or the outlist files from the family
directories
return: A dictionary with SEED and FULL counts at different thresholds
"""
# check point flags
flag_curr = 0
flag_rev = 0
# initialization of counts
counts = {"seed_above_ga": 0,
"full_above_ga": 0,
"full_below_ga": 0,
"seed_below_ga": 0,
"seed_below_rev": 0,
"full_below_rev": 0}
# load file for easy parsing
fp = open(scores_file, 'r')
# generate stats
for line in fp:
# make flag_curr = 1 when we reach that line
if line.find("CURRENT THRESHOLD") != -1:
flag_curr = 1
continue
# when we reach the reversed sequence line set the flag to 1
if line.find("BEST REVERSED") != -1:
flag_rev = 1
continue
# we are above the
if flag_curr == 0 and flag_rev == 0:
if line.find("SEED") != -1:
counts["seed_above_ga"] += 1
elif line.find("FULL") != -1:
counts["full_above_ga"] += 1
# we are somewhere in between current threshold and reversed cutoff
elif flag_curr == 1 and flag_rev == 0:
if line.find("SEED") != -1:
counts["seed_below_ga"] += 1
elif line.find("FULL") != -1:
counts["full_below_ga"] += 1
elif flag_curr == 1 and flag_rev == 1:
if line.find("SEED") != -1:
counts["seed_below_rev"] += 1
elif line.find("FULL") != -1:
counts["full_below_rev"] += 1
fp.close()
return counts
# ----------------------------------------------------------------------------------
def extract_unique_seeds_from_seedoutlist(seedoutlist):
"""
Extracts all unique SEED accessions in the form of rfamseq_acc/start-end.
Ignores duplicated hits.
"""
seeds_found = {}
fp = open(seedoutlist, 'r')
for line in fp:
if line[0] != '#':
line = [x for x in line.strip().split(' ') if x != '']
if line[3] not in seeds_found:
seeds_found[line[3]] = float(line[0])
fp.close()
return seeds_found
# ----------------------------------------------------------------------------------
def generate_search_stats(family_dir, scores_file='species', tag_miRNA=True):
"""
Function to generate useful search stats per family
family_dir: A valid Rfam family checkout directory where pre-computed searches
were ran
scores_file: A string specifying the scores file to parse (outlist, species)
return: report string
"""
rfam_acc = os.path.basename(family_dir)
# check point flags
flag_curr = 0
flag_rev = 0
elements = None
prev_line = None
seen_ga = False
seen_rev_before_ga = False
ga_bit_score = 0.0
rev_bit_score = 0.0
ga_rev_seq_gap = 0 # gap in sequences between GA/REV thresholds
is_miRNA = 0
seed_above_ga = None
last_seed_seen = None
seq_position = 0
last_seed_pos = 0
seed_above_ga_pos = 0
ga_position = 0
rev_position = 0
position = 1 # keeps hold of the index position with respect to the entire outlist
review_family = False
full_check = False
unique_seeds = {}
# initialization of counts
counts = {"seed_above_ga": 0,
"full_above_ga": 0,
"full_below_ga": 0,
"seed_below_ga": 0,
"seed_below_rev": 0,
"full_below_rev": 0,
"other_below_ga": 0}
# fetch miRNA accessions from the database
miRNAs = {}
if tag_miRNA is True:
miRNAs.update(db.fetch_type_specific_rfam_accessions("miRNA", return_type="dict"))
if rfam_acc in miRNAs:
is_miRNA = 1
# get some useful numbers from the database
num_seed_seqs_db = db.get_number_of_seed_sequences(rfam_acc)
num_full_hits_db = db.get_number_of_full_hits(rfam_acc)
unique_ncbi_ids_db = db.get_family_unique_ncbi_ids(rfam_acc)
seedoutlist = os.path.join(family_dir, "seedoutlist")
missing_seed_seqs_so = get_missing_seeds_seedoutlist(num_seed_seqs_db, seedoutlist)
scores_fp = open(os.path.join(family_dir, scores_file), 'r')
# this will basically read the first line which is a header so no harm
line = scores_fp.readline()
prev_line = line
ncbi_ids_from_hits = set()
# generate stats
for line in scores_fp:
position += 1 # starts from 1 because we read the 1st line out of the loop
# make flag_curr = 1 when we reach that line
if line.find("CURRENT GA THRESHOLD") != -1:
flag_curr = 1
# if we reached this point, it means we saw GA
seen_ga = True
ga_position = position
# get all the elements of the last score line above the GA threshold
seed_above_ga = last_seed_seen
seed_above_ga_pos = last_seed_pos
# get GA threshold
elements = line.split(' ')
ga_bit_score = float(elements[-3])
continue
# when we reach the reversed sequence line set the flag to 1
if line.find("BEST REVERSED") != -1:
flag_rev = 1
rev_position = position
# check if GA is false at this point. If yes, this means we saw REV first.
# setting flag to True
if seen_ga is False:
seen_rev_before_ga = True
continue
if line[0] != '#':
# increase sequence position
# seq_position += 1
elements = [x for x in line.strip().split(' ') if x != '']
# first line after hitting REV line
if flag_rev == 1 and rev_bit_score == 0.0:
rev_bit_score = float(elements[0])
# add id to ncbi_ids
ncbi_ids_from_hits.add(elements[5])
# we are above the GA
if flag_curr == 0 and flag_rev == 0:
if elements[2] == "SEED":
# make sure the sequences is not in the dictionary and that it starts from 1
if elements[3] not in unique_seeds:
counts["seed_above_ga"] += 1
unique_seeds[elements[3]] = (elements[8], elements[9])
elif elements[2] == "FULL" or elements[2] == "FULL-SEED":
counts["full_above_ga"] += 1
# we are somewhere in between current threshold and reversed cutoff
elif flag_curr == 1 and flag_rev == 0:
if elements[2] == "SEED":
if elements[3] not in unique_seeds:
counts["seed_below_ga"] += 1
unique_seeds[elements[3]] = (elements[8], elements[9])
elif flag_curr == 1 and flag_rev == 1:
if elements[2] == "SEED":
if elements[3] not in unique_seeds:
counts["seed_below_rev"] += 1
unique_seeds[elements[3]] = (elements[8], elements[9])
# if between GA and REV count sequences
if ((flag_curr == 1 and flag_rev == 0) or (flag_curr == 0 and flag_rev == 1)):
ga_rev_seq_gap += 1
# always stores the last seed up to the current iteration point
# at the end of the file, last_seed_seen will be holding the last SEED sequence
# seen in the outlist file
if elements[2] == "SEED":
last_seed_seen = elements
# sets position
last_seed_pos = position
# current line becomes previous at the end of each iteration
prev_line = line
scores_fp.close()
# computes the number of any missing SEED sequences. That is SEEDs that do not appear in the outlist
missing_seed_seqs_o = abs(
num_seed_seqs_db - (counts["seed_above_ga"] + counts["seed_below_ga"] + counts["seed_below_rev"]))
# compute the total number of ncbi_ids including
# total_ncbi_ids_found = len(list(set(unique_ncbi_ids_db).union(ncbi_ids_from_hits)))
# calulates the number of new ncbi ids added to the full region after a new search
# new_ncbi_ids_found = abs(total_ncbi_ids_found - len(unique_ncbi_ids_db))
# ABS(NFULL_OLD-NFULL_NEW) > 0.1 * NFULL_OLD
# full_diff = abs(num_full_hits_db - (counts["full_above_ga"] + counts["full_below_ga"]))
# compute GA/REV bit score difference
ga_rev_bitscore_diff = abs(ga_bit_score - rev_bit_score)
# if full_diff > (0.1 * num_full_hits_db):
# full_check = True
# constraints to be met for reviewing families
if (seen_rev_before_ga or (counts["seed_below_ga"] > 0) or (counts["seed_below_rev"] > 0)):
review_family = True
fields = [rfam_acc, str(num_seed_seqs_db), str(counts["seed_above_ga"]), str(counts["seed_below_ga"]),
str(counts["seed_below_rev"]), str(missing_seed_seqs_o), str(missing_seed_seqs_so),
str(ga_bit_score), str(rev_bit_score), str(ga_rev_bitscore_diff), str(ga_rev_seq_gap),
str(int(seen_rev_before_ga)), seed_above_ga[0], str(seed_above_ga_pos),
str(ga_position), str(rev_position), last_seed_seen[0], str(last_seed_pos),
str(int(review_family))]
"""
fields = [rfam_acc, str(num_seed_seqs_db), str(counts["seed_above_ga"]), str(counts["seed_below_ga"]),
str(counts["seed_below_rev"]), str(missing_seed_seqs_o), str(missing_seed_seqs_so),
str(num_full_hits_db), str(counts["full_above_ga"]), str(len(unique_ncbi_ids_db)),
str(new_ncbi_ids_found), str(ga_bit_score), str(rev_bit_score), str(ga_rev_bitscore_diff),
str(ga_rev_seq_gap), str(int(seen_rev_before_ga)), seed_above_ga[0], str(seed_above_ga_pos),
str(ga_position), str(rev_position), last_seed_seen[0], str(last_seed_pos), str(int(review_family))]
"""
if tag_miRNA is True:
fields.append(str(is_miRNA))
print ('\t'.join(fields))
# ----------------------------------------------------------------------------------
def write_family_report_file(family_dir, scores_file="species"):
"""
Function to generate a report about the outcome of a new search
family_dir: A valid location of an Rfam family checkout
scores_file: This is a string which specifies the file to parse (outlist | species)
It parses species file by default.
return (int): A number specifying the curation priority for a specific family, where
3: critical, 2: critical but not erroneous, 1: check seed, 0: no attention needed
"""
priority = 0
# fetch number of seed sequences from the database
rfam_acc = os.path.basename(family_dir)
no_seed_seqs = db.get_number_of_seed_sequences(rfam_acc)
scores_file_loc = os.path.join(family_dir, scores_file)
counts = count_hits(scores_file_loc)
report_fp = open(os.path.join(family_dir, "search_report.txt"), 'w')
# sum all seed counts to get total number of seed sequences
counted_seed_seqs = counts["seed_above_ga"] + counts["seed_below_ga"] + counts["seed_below_rev"]
# Critical SEED issues
if counts["seed_below_rev"] != 0:
report_fp.write("CRITICAL: %s SEED sequences below reversed cutoff\n" % str(counts["seed_below_rev"]))
priority = 3
if counts["seed_below_ga"] > counts["seed_above_ga"]:
percentage = float(counts["seed_below_ga"] * 100) / float(no_seed_seqs)
report_fp.write("CRITICAL: More SEED sequences below GA than above. %s\n" % percentage)
if priority < 2:
priority = 2
if counted_seed_seqs != no_seed_seqs:
report_fp.write(
"WARNING: The number of SEED sequences in the database does not match the number in the alignment\n\n")
priority = 3
# TODO - Develop code to check taxonomic distribution
# TODO - Use information from FULL hits too
# some useful information
report_fp.write("Total number of SEED sequences in DB: %s\n" % no_seed_seqs)
report_fp.write("Total number of SEED sequences counted: %s\n" % counted_seed_seqs)
report_fp.write("%s SEED sequences are above GA\n" % counts["seed_above_ga"])
report_fp.write("%s SEED sequences are below GA\n" % counts["seed_below_ga"])
report_fp.write("%s SEED sequences are below the reversed cutoff\n" % counts["seed_below_rev"])
report_fp.close()
return priority
# ----------------------------------------------------------------------------------
def extract_scores_from_outlist_file(outlist):
"""
:param outlist:
:return:
"""
scores = {'SEED': [], 'FULL': [], 'OTHER': []}
outlist_fp = open(outlist, 'r')
for line in outlist_fp:
if line[0] != '#':
line = [x for x in line.strip().split(' ') if x!='']
scores[line[2]].append(float(line[0]))
else:
# if we reached REVERSED line, we treat everything as TNs
# break and return
if line.find("BEST REVERSED") != -1:
break
outlist_fp.close()
return scores
# --------------------------------------------------------------
def print_report_header(extended=True):
"""
Prints the report header
extended (boolean): If true, prints all the columns, otherwise just the
short version
returns: void
"""
if extended is True:
print (
"RFAM_ACC\tnum_seed_seqs\tseed_above_GA\tseed_below_ga\tseed_below_rev\tmissing_seeds_outlist\t".upper()),
print ("missing_seeds_seedoutlist\tnum_full_DB\tfull_above_ga\tUNIQUE_NCBI_ID_DB\tNOVEL_NCBI_IDs\t".upper()),
print (
"ga_bit_SCORE\trev_bit_score\tGA_REV_SCORE_diff\tga_rev_seq_gap\tREV_before_GA\tseed_above_ga_score\t".upper()),
print ("seed_above_ga_pos\tga_pos\trev_pos\tlast_seed_score\tlast_seed_pos\treview_family\tis_miRNA\n".upper()),
else:
print (
"RFAM_ACC\tnum_seed_seqs\tseed_above_GA\tseed_below_ga\tseed_below_rev\tmissing_seeds_outlist\t".upper()),
print ("missing_seeds_seedoutlist\tga_bit_SCORE\trev_bit_score\tGA_REV_SCORE_diff\tga_rev_seq_gap\t".upper()),
print ("REV_before_GA\tseed_above_ga_score\tseed_above_ga_pos\tga_pos\trev_pos\tlast_seed_score\t".upper()),
print ("last_seed_pos\treview_family\tis_miRNA\n".upper()),
# ----------------------------------------------------------------------------------
if __name__ == '__main__':
outlist = "../data/../outlist"
print (extract_scores_from_outlist_file(outlist))
"""
# create a new argument parser object
parser = parse_arguments()
args = parser.parse_args()
if args.acc and not args.v and not args.report:
# check accession provided is valid
if args.acc[0:2] == 'RF' and len(args.acc) == 7:
os.chdir(args.dest_dir)
checkout_and_search_family(args.acc, args.dest_dir, rfmake=args.rfmake)
elif args.f and not args.v:
if not os.path.isfile(args.f):
print ("The file location you provided does not exist!\n")
sys.exit()
# move to destination directory
os.chdir(args.dest_dir)
accessions = load_rfam_accessions_from_file(args.f)
\"""
# get number of job batches we need to submit
# casting to int chops off decimals and ceil rounds up to nearest int
if len(accessions) > MAX_JOB_COUNT:
no_batches = int(math.ceil(len(accessions)/MAX_JOB_COUNT))
i = 0
while i < no_batches:
lidx = i * MAX_JOB_COUNT # left index
ridx = (i+1) * MAX_JOB_COUNT # right index
# get exactly MAX_JOB_COUNT items
if i < no_batches - 1:
new_batch = accessions[lidx:ridx]
# get remaining accessions for last batch
else:
new_batch = accessions[lidx:]
# call function to submit batch
# while monitoring is True:
# cluster monitoring function to be called here
i+1 # this is done when the monitoring loop becomes false which is a signal to submit another batch
\"""
for rfam_acc in accessions:
checkout_and_search_family(rfam_acc, args.dest_dir, rfmake=args.rfmake)
# run rfsearch on all families in the database
elif args.all and not args.v and not args.report and not args.rfmake:
# fetch Rfam family accessions from the database
# call checkout_and_search_family for every family in the list
# fetches all rfam accessions from the database in DESC order based on the number of sequences in SEEDs
rfam_acc_list = db.fetch_rfam_accs_sorted(order='DESC')
for rfam_acc in rfam_acc_list:
checkout_and_search_family(rfam_acc, args.dest_dir, rfmake=args.rfmake)
# validate rfsearch runs
elif args.v:
# validate a single family
if args.acc:
if not is_valid_family(args.dest_dir, args.acc):
print ("The family %s does not validate!" % args.acc)
# validate runs for all accessions in the input file
elif args.f:
validation_file = os.path.join(args.dest_dir, "validation.log")
fp = open(validation_file, 'w')
accessions = load_rfam_accessions_from_file(args.f)
for rfam_acc in accessions:
if not is_valid_family(args.dest_dir, rfam_acc):
fp.write(rfam_acc + '\n')
fp.close()
if os.path.getsize(validation_file) == 0:
print ("Validation process completed! All searches completed successfully!")
else:
print ("Validation process completed! Check validation.log for erroneous searches!")
# validate all families in the directory, but don't generate any reports
elif args.all and not args.report:
validation_file = os.path.join(args.dest_dir, "validation.log")
fp = open(validation_file, 'w')
accessions = [x for x in os.listdir(args.dest_dir) if os.path.isdir(os.path.join(args.dest_dir, x))]
for rfam_acc in accessions:
if not is_valid_family(args.dest_dir, rfam_acc):
fp.write(rfam_acc + '\n')
fp.close()
if os.path.getsize(validation_file) == 0:
print ("Validation process completed! All searches completed successfully!")
else:
print ("Validation process completed! Check validation.log for erroneous searches!")
# generate reports
elif args.report:
# print report header
print_report_header(extended=False)
# generate report for a specific family
if args.acc:
# check if searches where validated
if not os.path.exists(os.path.join(args.dest_dir, "validation.log")):
sys.exit("WARNING: This search may be invalid. Run validation and try again!")
family_dir = os.path.join(args.dest_dir, args.acc)
generate_search_stats(family_dir, scores_file='species', tag_miRNA=True)
# generate reports for all families in the destination directory
elif args.all:
families = [x for x in os.listdir(args.dest_dir) if os.path.isdir(os.path.join(args.dest_dir, x))]
# fetch Rfam family accessions to exclude if defined
exclude_accs = {}
if args.exclude_type:
exclude_accs = db.fetch_type_specific_rfam_accessions(args.exclude_type, return_type="dict")
for family in families:
# families of which searches did not complete
# remove the database on
if family not in exclude_accs and family not in family_exceptions:
family_dir = os.path.join(args.dest_dir, family)
generate_search_stats(family_dir, scores_file='species', tag_miRNA=True)
# run rfmake
elif args.rfmake:
# run rfmake on all families
if args.all:
families = [x for x in os.listdir(args.dest_dir) if os.path.isdir(os.path.join(args.dest_dir, x))]
for family in families:
family_dir = os.path.join(args.dest_dir, family)
submit_new_rfmake_job(family_dir)
# run rfmake for a specific family
elif args.acc:
family_dir = os.path.join(args.dest_dir, args.acc)
submit_new_rfmake_job(family_dir)
# run rfmake for all accessions in the file
elif args.f:
fp = open(args.f, r)
families = [x.strip() for x in fp]
fp.close()
for family in families:
family_dir = os.path.join(args.dest_dir, family)
submit_new_rfmake_job(family_dir)
""" | 1.6875 | 2 |