content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.datastore_admin_v1.types import datastore_admin
from google.cloud.datastore_admin_v1.types import index
from google.longrunning import operations_pb2 as operations # type: ignore
from .base import DatastoreAdminTransport, DEFAULT_CLIENT_INFO
from .grpc import DatastoreAdminGrpcTransport
class DatastoreAdminGrpcAsyncIOTransport(DatastoreAdminTransport):
"""gRPC AsyncIO backend transport for DatastoreAdmin.
Google Cloud Datastore Admin API
The Datastore Admin API provides several admin services for
Cloud Datastore.
-----------------------------------------------------------------------------
## Concepts
Project, namespace, kind, and entity as defined in the Google
Cloud Datastore API.
Operation: An Operation represents work being performed in the
background.
EntityFilter: Allows specifying a subset of entities in a
project. This is specified as a combination of kinds and
namespaces (either or both of which may be all).
-----------------------------------------------------------------------------
## Services
# Export/Import
The Export/Import service provides the ability to copy all or a
subset of entities to/from Google Cloud Storage.
Exported data may be imported into Cloud Datastore for any
Google Cloud Platform project. It is not restricted to the
export source project. It is possible to export from one project
and then import into another.
Exported data can also be loaded into Google BigQuery for
analysis.
Exports and imports are performed asynchronously. An Operation
resource is created for each export/import. The state (including
any errors encountered) of the export/import may be queried via
the Operation resource.
# Index
The index service manages Cloud Datastore composite indexes.
Index creation and deletion are performed asynchronously. An
Operation resource is created for each such asynchronous
operation. The state of the operation (including any errors
encountered) may be queried via the Operation resource.
# Operation
The Operations collection provides a record of actions performed
for the specified project (including any operations in
progress). Operations are not created directly but through calls
on other collections or resources.
An operation that is not yet done may be cancelled. The request
to cancel is asynchronous and the operation may continue to run
for some time after the request to cancel is made.
An operation that is done may be deleted so that it is no longer
listed as part of the Operation collection.
ListOperations returns all pending operations, but not completed
operations.
Operations are created by service DatastoreAdmin,
but are accessed via service google.longrunning.Operations.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "datastore.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
address (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
**kwargs,
)
def __init__(
self,
*,
host: str = "datastore.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_channel_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
client_info=client_info,
)
self._stubs = {}
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if "operations_client" not in self.__dict__:
self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self.__dict__["operations_client"]
@property
def export_entities(
self,
) -> Callable[
[datastore_admin.ExportEntitiesRequest], Awaitable[operations.Operation]
]:
r"""Return a callable for the export entities method over gRPC.
Exports a copy of all or a subset of entities from
Google Cloud Datastore to another storage system, such
as Google Cloud Storage. Recent updates to entities may
not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed
via the Operation resource that is created. The output
of an export may only be used once the associated
operation is done. If an export operation is cancelled
before completion it may leave partial data behind in
Google Cloud Storage.
Returns:
Callable[[~.ExportEntitiesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_entities" not in self._stubs:
self._stubs["export_entities"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ExportEntities",
request_serializer=datastore_admin.ExportEntitiesRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["export_entities"]
@property
def import_entities(
self,
) -> Callable[
[datastore_admin.ImportEntitiesRequest], Awaitable[operations.Operation]
]:
r"""Return a callable for the import entities method over gRPC.
Imports entities into Google Cloud Datastore.
Existing entities with the same key are overwritten. The
import occurs in the background and its progress can be
monitored and managed via the Operation resource that is
created. If an ImportEntities operation is cancelled, it
is possible that a subset of the data has already been
imported to Cloud Datastore.
Returns:
Callable[[~.ImportEntitiesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_entities" not in self._stubs:
self._stubs["import_entities"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ImportEntities",
request_serializer=datastore_admin.ImportEntitiesRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["import_entities"]
@property
def get_index(
self,
) -> Callable[[datastore_admin.GetIndexRequest], Awaitable[index.Index]]:
r"""Return a callable for the get index method over gRPC.
Gets an index.
Returns:
Callable[[~.GetIndexRequest],
Awaitable[~.Index]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_index" not in self._stubs:
self._stubs["get_index"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/GetIndex",
request_serializer=datastore_admin.GetIndexRequest.serialize,
response_deserializer=index.Index.deserialize,
)
return self._stubs["get_index"]
@property
def list_indexes(
self,
) -> Callable[
[datastore_admin.ListIndexesRequest],
Awaitable[datastore_admin.ListIndexesResponse],
]:
r"""Return a callable for the list indexes method over gRPC.
Lists the indexes that match the specified filters.
Datastore uses an eventually consistent query to fetch
the list of indexes and may occasionally return stale
results.
Returns:
Callable[[~.ListIndexesRequest],
Awaitable[~.ListIndexesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_indexes" not in self._stubs:
self._stubs["list_indexes"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ListIndexes",
request_serializer=datastore_admin.ListIndexesRequest.serialize,
response_deserializer=datastore_admin.ListIndexesResponse.deserialize,
)
return self._stubs["list_indexes"]
__all__ = ("DatastoreAdminGrpcAsyncIOTransport",)
|
import csv
from gurd import similar
name=''
date=''
left=[]
height=[]
with open('report/report_duration_tour.csv',encoding="utf_8") as f:
csv_reader = csv.reader(f, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count > 0 and line_count%2 == 0:
name=row[0]
print(name)
name=similar(name)
print(name)
name=name[::-1]
data=float(row[1])
left.append(name)
height.append(data)
line_count+=1
print(left)
print(height)
|
# Programmer: Chris Tralie
# Purpose: To extract similarity alignments for use in the GUI
import numpy as np
import os
import scipy.misc
import matplotlib.pyplot as plt
import json
import base64
from taiko_pytorch.graphditty.SimilarityFusion import getS
from taiko_pytorch.graphditty.DiffusionMaps import getDiffusionMap
from taiko_pytorch.graphditty.Laplacian import getRandomWalkLaplacianEigsDense
import time
def imresize(D, dims, kind='cubic'):
"""
Resize a floating point image
Parameters
----------
D : ndarray(M1, N1)
Original image
dims : tuple(M2, N2)
The dimensions to which to resize
kind : string
The kind of interpolation to use
Returns
-------
D2 : ndarray(M2, N2)
A resized array
"""
M, N = dims
x1 = np.array(0.5 + np.arange(D.shape[1]), dtype=np.float32)/D.shape[1]
y1 = np.array(0.5 + np.arange(D.shape[0]), dtype=np.float32)/D.shape[0]
x2 = np.array(0.5 + np.arange(N), dtype=np.float32)/N
y2 = np.array(0.5 + np.arange(M), dtype=np.float32)/M
f = scipy.interpolate.interp2d(x1, y1, D, kind=kind)
return f(x2, y2)
def getBase64File(filename):
fin = open(filename, "rb")
b = fin.read()
b = base64.b64encode(b)
fin.close()
return b.decode("ASCII")
def getBase64PNGImage(pD, cmapstr, logfloor_quantile=0):
"""
Get an image as a base64 string
"""
D = np.array(pD)
if logfloor_quantile > 0:
floor = np.quantile(pD.flatten(), logfloor_quantile)
D = np.log(D + floor)
c = plt.get_cmap(cmapstr)
D = D-np.min(D)
D = np.round(255.0*D/np.max(D))
C = c(np.array(D, dtype=np.int32))
scipy.misc.imsave("temp.png", C)
b = getBase64File("temp.png")
os.remove("temp.png")
return "data:image/png;base64, " + b
# http://stackoverflow.com/questions/1447287/
# format-floats-with-standard-json-module
class PrettyFloat(float):
def __repr__(self):
return '%.4g' % self
def pretty_floats(obj):
if isinstance(obj, float):
return PrettyFloat(obj)
elif isinstance(obj, dict):
return dict((k, pretty_floats(v)) for k, v in obj.items())
elif isinstance(obj, (list, tuple)):
return map(pretty_floats, obj)
return obj
def get_graph_obj(W, K=10, res=400):
"""
Return an object corresponding to a nearest neighbor graph
Parameters
----------
W: ndarray(N, N)
The N x N time-ordered similarity matrix
K: int
Number of nea)rest neighbors to use in graph representation
res: int
Target resolution of resized image
"""
fac = 1
if res > -1:
print(W.shape)
fac = int(np.round(W.shape[0]/float(res)))
res = int(W.shape[0]/fac)
WRes = imresize(W, (res, res))
else:
res = W.shape[0]
WRes = np.array(W)
np.fill_diagonal(WRes, 0)
pix = np.arange(res)
I, J = np.meshgrid(pix, pix)
WRes[np.abs(I - J) == 1] = np.max(WRes)
c = plt.get_cmap('Spectral')
C = c(np.array(np.round(np.linspace(0, 255, res)), dtype=np.int32))
C = np.array(np.round(C[:, 0:3]*255), dtype=int)
colors = C.tolist()
K = min(int(np.round(K*2.0/fac)), res) # Use slightly more edges
print("res = %i, K = %i" % (res, K))
S = getS(WRes, K).tocoo()
I, J, V = S.row, S.col, S.data
V *= 10
ret = {}
ret["nodes"] = [{"id": "%i" % i,
"color": colors[i]} for i in range(res)]
ret["links"] = [{"source": "%i" % I[i],
"target": "%i" % J[i],
"value": "%.3g" % V[i]} for i in range(I.shape[0])]
ret["fac"] = fac
return ret
def saveResultsJSON(filename,
times,
Ws,
neigs,
jsonfilename,
diffusion_znormalize):
"""
Save a JSON file holding the audio and structure information, which can
be parsed by SongStructureGUI.html. Audio and images are stored as
base64 for simplicity
Parameters
----------
filename: string
Path to audio
times: ndarray(N)
A list of times corresponding to each row in Ws
Ws: Dictionary of (str, ndarray(N, N))
A dictionary of N x N similarity matrices for different feature types
neigs: int
Number of eigenvectors to compute in graph Laplacian
jsonfilename: string
File to which to save the .json file
diffusion_znormalize: boolean
Whether to Z-normalize diffusion maps to spread things out more evenly
"""
Results = {'songname': filename, 'times': times.tolist()}
print("Saving results...")
# Add music as base64 files
_, ext = os.path.splitext(filename)
Results['audio'] = "data:audio/%s;base64, " % \
ext[1::] + getBase64File(filename)
print(Ws.keys())
W = Ws['Fused']
WOut = np.array(W)
np.fill_diagonal(WOut, 0)
Results['W'] = getBase64PNGImage(WOut, 'magma_r', logfloor_quantile=0.01)
Results['dim'] = W.shape[0]
# Compute Laplacian eigenvectors
tic = time.time()
v = getRandomWalkLaplacianEigsDense(W)
v = v[:, 1:neigs+1]
print("Elapsed Time Laplacian: %.3g" % (time.time()-tic))
# Resize the eigenvectors so they're easier to see
fac = 10
vout = np.zeros((v.shape[1]*fac, v.shape[0]))
for i in range(fac):
vout[i::fac, :] = v.T
Results['v'] = getBase64PNGImage(vout, 'magma_r')
Results['v_height'] = vout.shape[0]
# Setup the graph
Results['graph'] = json.dumps(get_graph_obj(WOut))
# Setup diffusion maps
c = plt.get_cmap('Spectral')
C = c(np.array(np.round(np.linspace(0, 255, W.shape[0])), dtype=np.int32))
C = C.flatten()
WDiff = np.array(W)
floor = np.quantile(WDiff, 0.01)
WDiff = np.log(WDiff+floor)
WDiff -= np.min(WDiff)
np.fill_diagonal(WDiff, 0)
X = getDiffusionMap(WDiff, neigs=4, thresh=0)
X = X[:, 0:-1]
if diffusion_znormalize:
X = X - np.mean(X, 0)[None, :]
X = X/np.sqrt(np.sum(X**2, 1))[:, None]
X = X.flatten()
Results['colors'] = C.tolist()
Results['X'] = X.tolist()
fout = open(jsonfilename, "w")
fout.write(json.dumps(Results))
fout.close()
if __name__ == '__main__':
filename = "/Users/chou/Documents/heroz/new/test/5kaija/wav/5kaija.wav"
path, ext = os.path.splitext(filename)
res = "data:audio/%s;base64, " % ext[1::] + getBase64File(filename)
# print(res)
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import sys
class ModelLoader(object):
def __init__(self, model_dir, use_cuda=False):
sys.path.append(model_dir)
mymodel = __import__("mymodel")
self.model = mymodel.Model()
self.model.build()
self.inputs = self.model.inputs
self.outputs = self.model.outputs
if use_cuda:
self.exe = fluid.Executor(fluid.CUDAPlace(0))
else:
self.exe = fluid.Executor(fluid.CPUPlace())
self.exe.run(fluid.default_startup_program())
var_list = list()
global_block = fluid.default_main_program().global_block()
with open(model_dir + "/save_var.list") as f:
for line in f:
try:
var = global_block.var(line.strip())
var_list.append(var)
except:
pass
fluid.io.load_vars(self.exe, model_dir, vars=var_list)
self.program = fluid.default_main_program()
def save_inference_model(self, save_dir):
fluid.io.save_inference_model(save_dir, self.model.inputs,
self.model.outputs, self.exe)
def inference(self, feed_dict):
result = self.exe.run(
self.program, feed=feed_dict, fetch_list=self.model.outputs)
return result
|
"""
As described in
http://celery.readthedocs.org/en/latest/django/first-steps-with-django.html
"""
import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "main.settings")
app = Celery("{{ cookiecutter.project_name }}")
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
|
from muridesu.parse_stmts import parse_stmts
from importlib.util import source_hash, MAGIC_NUMBER
import marshal
def _w_long(x):
return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
def _code_to_hash_pyc(code, source_hash, checked=True):
"Produce the data for a hash-based pyc."
data = bytearray(MAGIC_NUMBER)
flags = 0b1 | checked << 1
data.extend(_w_long(flags))
assert len(source_hash) == 8
data.extend(source_hash)
data.extend(marshal.dumps(code))
return data
def comp(path: str, out: str, raw_bytecode: bool = False):
with open(path, 'r') as f:
source = f.read()
mod = parse_stmts(source, path)
code = compile(mod, path, mode='exec')
with open(out, 'wb') as f:
if raw_bytecode:
f.write(marshal.dumps(code))
return
data = _code_to_hash_pyc(code, source_hash(source.encode('utf8')))
f.write(data)
def main():
from argser import call
call(comp)
|
#parses response from gateway
class UserParse(object):
@staticmethod
def sessions_replace(response, session_id):
importantdata = {}
activeCounter = {} #priority = 0
allCounter = {} #priority = 1
sessionidCounter = {} #priority = 2
#sessions_replace is one of those undocumented events that have weird formatting. :(
for session in response['d']:
if session.get("active") == True:
activeCounter = session
break; #no need to check anything else
elif session.get("session_id") == "all":
allCounter = session
elif session.get("session_id") == session_id:
sessionidCounter = session
#now start the processing
if len(activeCounter) > 0:
importantdata["status"] = activeCounter["status"]
importantdata["activities"] = {i["type"]:i for i in activeCounter["activities"]}
return importantdata
elif len(allCounter) > 0:
importantdata["status"] = allCounter["status"]
importantdata["activities"] = {j["type"]:j for j in allCounter["activities"]}
return importantdata
elif len(sessionidCounter) > 0:
importantdata["status"] = sessionidCounter["status"]
importantdata["activities"] = {k["type"]:k for k in sessionidCounter["activities"]}
return importantdata
else:
return {}
|
#!/usr/bin/env python3
import benchwork
import protos
from os import urandom
SAMPLE_RATE = 48000
SAMPLES_PER_MS = int(SAMPLE_RATE / 1000)
client = "org.mycompany.myclient.134567"
def make_sound(ms: int = 0) -> bytes:
return bytearray(urandom(ms * SAMPLES_PER_MS))
@benchwork.benchmark(with_classes=protos.all_protos)
def voice_send(proto: protos.VaProto):
proto.send_message("/asis_api/voice/", client, make_sound(ms=200))
@benchwork.benchmark(with_classes=protos.all_protos)
def sound_send(proto: protos.VaProto):
proto.send_message("/asis_api/sound/", client, make_sound(ms=2_000))
@benchwork.benchmark(with_classes=protos.all_protos)
def large_sound(proto: protos.VaProto):
proto.send_message("/asis_api/sound/", client, make_sound(ms=50_000))
@benchwork.benchmark(with_classes=protos.all_protos)
def skill_answer(proto: protos.VaProto):
proto.send_message("/asis_api/intent/request_weather", client, {
"input": "what is the weather like in london, england tomorrow?",
"intent": {
"intentName": "request_weather",
"confidenceScore": 0.8564,
},
"slots": [
{
"entity": "location",
"slot": "place",
"confidence": 0.687,
"rawValue": "london, england",
"value": {
"latitude": 51.5073,
"longitude": -0.1277,
},
"range": {
"start": 28,
"end": 43
}
}
],
"id": "13546789",
"siteId": client,
"sessionId": "1234a56d3c679e",
"asrConfidence": 0.678,
})
benchwork.run()
|
from collections import namedtuple
stgram_tuple = namedtuple('stgram_tuple', ['date', 'text_lines', 'in_text_votes',
'reg_by_name_dict',
# {rep_tuple: registered_bool}
'reg_by_party_dict',
# {name_string: reg_stats_per_party_tuple}
'sessions',
# [sesion_tuple]
])
rep_tuple = namedtuple('rep_tuple', ['name', 'party'])
session_tuple = namedtuple('session_tuple', ['description',
'time',
'votes_by_name_dict',
# {rep_tuple: vote_code_string}
'votes_by_party_dict',
# {name_string: vote_stats_per_party_tuple}
])
reg_stats_per_party_tuple = namedtuple('reg_stats_per_party_tuple', ['present', 'expected'])
vote_stats_per_party_tuple = namedtuple('vote_stats_per_party_tuple', ['yes', 'no', 'abstained', 'total'])
|
# %%
import os
import pandas as pd
import numpy as np
import datetime
from scripts import versionfinal,versionurgencia,versionespecifico,identificacionmotor,motorseguncilindrada,corregirmarca, progreso, motor, quitardecimal, valores, modelogeneral, especifico, origensegunvin, version, modelogenerico, especifico2, corregirmodelo, segmentacion, cilindrada, traccion, marca
# %% CARGA DE DATOS
path = r'D:\Basededatos\Origen\Honduras'
os.chdir(path)
files = os.listdir(path)
files
files_xls = [f for f in files if f[-4:] == 'xlsx']
files_xls
# %%
honduras = pd.DataFrame()
for f in files_xls:
data = pd.read_excel(f, engine='openpyxl')
honduras = pd.concat([honduras , data], ignore_index=True, join='outer')
data = None
# %% IDENTIFICAR DATOS DE ORIGEN DENTRO DEL FORMATO ESTANDAR
honduras.rename(columns={
'AÑO DEL VEHICULO': 'AÑO',
'TIPO': 'SEGMENTO.1',
"TIPO DE\nCOMBUSTIBLE": "COMBUSTIBLE",
'MODELO': 'MODELO/VERSION',
'CILINDRAJE': 'CILINDRADA',
"CANTIDAD DE\nPLACAS": "CANTIDAD"},
inplace=True)
# %% COLUMNAS A AGREGARSE E IDENTIFICAR MERCADO
honduras["MERCADO"] = "HONDURAS"
honduras["MOTOR"] = None
honduras["MODELO GENERICO"] = None
honduras["MODELO"] = None
honduras["VERSION"] = None
honduras["TIPO_VEHICULO"] = None
# %% SI NO TIENEN REFERENCIA NO SIRVE
condicion = honduras["MODELO/VERSION"].notna()
honduras = honduras[condicion]
# %%
columnasutiles = [
"MERCADO",
"TIPO_VEHICULO",
"SEGMENTO.1",
"MARCA",
"MODELO GENERICO",
"MODELO",
"MODELO/VERSION",
"VERSION",
"AÑO",
"MOTOR",
"CILINDRADA",
"COMBUSTIBLE",
"CANTIDAD"
]
honduras = honduras[columnasutiles]
# %% ARREGLAR AÑO
condicion = honduras["AÑO"] == 0
honduras.loc[condicion, "AÑO"] = None
condicion = None
condicion = honduras["AÑO"] > 2021
honduras.loc[condicion, "AÑO"] = None
condicion = None
condicion = honduras["AÑO"].notna()
honduras = honduras[condicion]
# %%
honduras = corregirmarca(honduras, columnasutiles)
# %%
honduras["MODELO/VERSION"] = honduras["MODELO/VERSION"].astype(str).str.strip()
honduras = especifico2(honduras, columnasutiles)
honduras = versionfinal(honduras)
honduras = corregirmodelo(honduras, columnasutiles)
honduras = segmentacion(honduras,columnasutiles)
# %%
condicion = honduras["COMBUSTIBLE"] == "G"
honduras.loc[condicion, "COMBUSTIBLE"] = "GASOLINA"
condicion = honduras["COMBUSTIBLE"] == "D"
honduras.loc[condicion, "COMBUSTIBLE"] = "DIESEL"
condicion = honduras["COMBUSTIBLE"].isin(["GASOLINA", "DIESEL"])
honduras.loc[~condicion, "COMBUSTIBLE"] = None
# %%
honduras = modelogenerico(honduras)
# %% MOTOR
honduras = motor(honduras)
# %%
honduras = quitardecimal(honduras, "AÑO")
# %%
honduras.to_csv(r'D:\Basededatos\Limpioparaunir\honduras.csv', index=False)
# %%
honduras.info(null_counts=True)
# %%
condicion = honduras["MODELO"].isna()
valores(honduras[condicion], "MODELO/VERSION")
# %%
|
import os
def install_dependencies():
stream = os.popen('git clone https://github.com/chrsmrrs/tudataset.git && \
pip --no-cache-dir install torch-scatter==latest+cu101 -f https://pytorch-geometric.com/whl/torch-1.7.0.html && \
pip --no-cache-dir install torch-sparse==latest+cu101 -f https://pytorch-geometric.com/whl/torch-1.7.0.html && \
pip --no-cache-dir install torch-cluster==latest+cu101 -f https://pytorch-geometric.com/whl/torch-1.7.0.html && \
pip --no-cache-dir install torch-spline-conv==latest+cu101 -f https://pytorch-geometric.com/whl/torch-1.7.0.html && \
pip --no-cache-dir install torch-geometric && \
pip --no-cache-dir install pybind11 && \
sudo apt-get install libeigen3-dev && \
cd .. && \
cd /content/tudataset/tud_benchmark/kernel_baselines/ && \
g++ -I /usr/include/eigen3 -03 -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` kernel_baselines.cpp src/*cpp -o ../kernel_baselines`python3-config --extension-suffix`')
output = stream.read()
print(output)
|
import logging
import os
import os.path
import urllib.request, urllib.parse, urllib.error
from edge.writer.solrtemplateresponsewriter import SolrTemplateResponseWriter
from edge.response.solrjsontemplateresponse import SolrJsonTemplateResponse
class Writer(SolrTemplateResponseWriter):
def __init__(self, configFilePath):
super(Writer, self).__init__(configFilePath)
self.contentType = 'application/json'
templatePath = os.path.dirname(configFilePath) + os.sep
templatePath += self._configuration.get('service', 'template')
self.template = self._readTemplate(templatePath)
def _generateOpenSearchResponse(self, solrResponse, searchText, searchUrl, searchParams, pretty):
response = SolrJsonTemplateResponse()
response.setTemplate(self.template)
return response.generate(solrResponse, pretty=pretty)
def _constructSolrQuery(self, startIndex, entriesPerPage, parameters, facets):
queries = []
filterQueries = []
filterQueries.append('status:1')
sort = None
for key, value in parameters.items():
if value != "":
if key == 'keyword':
#Special case keyword search on glossary_items only match title
if 'table' in parameters and parameters['table'] == 'glossary_items':
queries.append('title_t:('+urllib.parse.quote(value) + ')')
else:
queries.append(urllib.parse.quote(value))
elif key == 'year':
start = value + "-01-01T00:00:00.000Z"
end = value + "-12-31T23:59:59.999Z"
filterQueries.append('created_at:['+start+'%20TO%20'+end+']')
elif key == 'table':
filterQueries.append('type:' + value)
elif key == 'glossary_title':
range = value.lower().split('-')
filterQueries.append('{!frange%20l=' + range[0] + '%20u=' + range[1] + 'z}' + 'title_lc')
elif key == 'sort':
sort = urllib.parse.quote(value)
elif key == 'topic_id':
filterQueries.append('categories_id:' + value)
elif key == 'mission_id':
filterQueries.append('mission_ids_array:' + value)
else:
if type(value) is list:
if 'table' in parameters and parameters['table'] == 'news_items':
filterQueries.append(key + ':(' + '+OR+'.join([self._urlEncodeSolrQueryValue(v) for v in value]) + ')')
else:
for v in value:
filterQueries.append(key + ':' + self._urlEncodeSolrQueryValue(v))
else:
filterQueries.append(key + ':' + self._urlEncodeSolrQueryValue(value))
if len(queries) == 0:
queries.append('*:*')
query = 'q='+'+AND+'.join(queries)+'&version=2.2&indent=on&wt=json&start='+str(startIndex)+'&rows='+str(entriesPerPage)
if len(filterQueries) > 0:
query += '&fq='+'+AND+'.join(filterQueries)
if sort is not None:
query += '&sort=' + sort
logging.debug('solr query: '+query)
return query
|
from .iimpute import IImpute
from .version import __version__
name = "i-impute"
|
import glob
import numpy as np
import scipy.misc
import os
import time
import constants
import threading
from utils import bb_util
from utils import drawing
from utils import py_util
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
WEIGHT_PATH = os.path.join(DIR_PATH, 'yolo_weights/')
class ObjectDetector(object):
def __init__(self, detector_num=0):
import darknet as dn
dn.set_gpu(int(constants.DARKNET_GPU))
self.detector_num = detector_num
self.net = dn.load_net(py_util.encode(WEIGHT_PATH + 'yolov3-thor.cfg'),
py_util.encode(WEIGHT_PATH + 'yolov3-thor_final.weights'), 0)
self.meta = dn.load_meta(py_util.encode(WEIGHT_PATH + 'thor.data'))
self.count = 0
def detect(self, image, confidence_threshold=constants.DETECTION_THRESHOLD):
import darknet as dn
self.count += 1
start = time.time()
#results = dn.detect_numpy(self.net, self.meta, image, thresh=confidence_threshold)
results = dn.detect(self.net, self.meta, image, thresh=confidence_threshold)
if len(results) > 0:
classes, scores, boxes = zip(*results)
else:
classes = []
scores = []
boxes = np.zeros((0, 4))
boxes = np.array(boxes)
scores = np.array(scores)
classes = np.array([py_util.decode(cls) for cls in classes])
inds = np.where(np.logical_and(scores > confidence_threshold,
np.min(boxes[:, [2, 3]], axis=1) > .01 * image.shape[0]))[0]
used_inds = []
for ind in inds:
if classes[ind] in constants.OBJECTS_SET:
used_inds.append(ind)
inds = np.array(used_inds)
if len(inds) > 0:
classes = np.array(classes[inds])
boxes = boxes[inds]
if len(boxes) > 0:
boxes = bb_util.xywh_to_xyxy(boxes.T).T
boxes *= np.array([constants.SCREEN_HEIGHT * 1.0 / image.shape[1],
constants.SCREEN_WIDTH * 1.0 / image.shape[0]])[[0, 1, 0, 1]]
boxes = np.clip(np.round(boxes), 0, np.array([constants.SCREEN_WIDTH,
constants.SCREEN_HEIGHT])[[0, 1, 0, 1]]).astype(np.int32)
scores = scores[inds]
else:
boxes = np.zeros((0, 4))
classes = np.zeros(0)
scores = np.zeros(0)
return boxes, scores, classes
def visualize_detections(image, boxes, classes, scores):
out_image = image.copy()
if len(boxes) > 0:
boxes = (boxes / np.array([constants.SCREEN_HEIGHT * 1.0 / image.shape[1],
constants.SCREEN_WIDTH * 1.0 / image.shape[0]])[[0, 1, 0, 1]]).astype(np.int32)
for ii,box in enumerate(boxes):
drawing.draw_detection_box(out_image, box, classes[ii], confidence=scores[ii], width=2)
return out_image
singleton_detector = None
detectors = []
def setup_detectors(num_detectors=1):
global detectors
for dd in range(num_detectors):
detectors.append(ObjectDetector(dd))
detector_ind = 0
detector_lock = threading.Lock()
def get_detector():
global detectors, detector_ind
detector_lock.acquire()
detector = detectors[detector_ind % len(detectors)]
detector_ind += 1
detector_lock.release()
return detector
if __name__ == '__main__':
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = DIR_PATH + '/test_images'
TEST_IMAGE_PATHS = sorted(glob.glob(os.path.join(PATH_TO_TEST_IMAGES_DIR, '*.jpg')))
if not os.path.exists(DIR_PATH + '/test_images/output'):
os.mkdir(DIR_PATH + '/test_images/output')
setup_detectors()
detector = get_detector()
t_start = time.time()
import cv2
for image_path in TEST_IMAGE_PATHS:
print('image', image_path)
image = scipy.misc.imread(image_path)
(boxes, scores, classes) = detector.detect(image)
# Visualization of the results of a detection.
image = visualize_detections(image, boxes, classes, scores)
scipy.misc.imsave(DIR_PATH + '/test_images/output/' + os.path.basename(image_path), image)
total_time = time.time() - t_start
print('total time %.3f' % total_time)
print('per image time %.3f' % (total_time / len(TEST_IMAGE_PATHS)))
|
from .args import ConsoleArgumentParser
from .exception import ConsoleQuit, ConsoleExit
from .server import ConsoleHandler, ConsoleServer
from . import defaults
from . import commands
__all__ = ['ConsoleArgumentParser', 'ConsoleQuit', 'ConsoleExit', 'ConsoleHandler', 'ConsoleServer', 'defaults', 'commands']
__version__ = '0.1.0a1'
|
# Developer: Emre Cimen
# Date: 06-17-2019
# Include CF.py file for Random Subspace Ensemble Classifier based on Conic Functions
# Datasets' last column should be class information.
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import time
import numpy as np
from CF import EnsambleCF
from sklearn import preprocessing
np.random.seed(0)
# 1 if separated test file, 0 is cross validation
if 1:
dfTrain = pd.read_csv("/users/path/train.csv", header=None)
dfTest = pd.read_csv("/users/path/test.csv", header=None)
start_time = time.time()
X = dfTrain[dfTrain.columns[0:-1]]
YD = dfTrain[dfTrain.columns[-1]]
XTest = dfTest[dfTest.columns[0:-1]]
YDTest = dfTest[dfTest.columns[-1]]
YdfFac = pd.factorize(pd.concat([YD, YDTest]))[0]
Y=YdfFac[:YD.shape[0]]
YTest = YdfFac[YD.shape[0]:]
#Grid search for parameters
MEM = [5, 10, 20]
RAT = [0.1, 0.2, 0.3]
ALF = [0.01, 0.05, 0.1, 0.2]
paracount = 0
for mem in MEM:
for rat in RAT:
for alf in ALF:
start_time = time.time()
clf = EnsambleCF(maxRatio=rat, alfa=alf, member=mem)
mystr = ""
fld = 0
paracount = paracount + 1
print(paracount)
clf.fit(X, Y)
test_scores = clf.score(XTest, YTest)
train_score = clf.score(X, Y)
mystr = mystr + "*Fold" + str(fld) + "*Member*" + str(mem) + "*Ratio*" + str(rat) + "*Alfa*" + str(
alf) + "*Train *" + str(train_score) + "*Test*" + str(test_scores) + "*Time *" + str(
time.time() - start_time)
with open("Ensemble-Predictions.txt", "a") as myfile:
mystr = mystr + "\n"
myfile.write(mystr)
else:
df=pd.read_csv("/users/path/dataset.csv",header=None)
X = df[df.columns[0:-1]]
YD = df[df.columns[-1]]
Y=pd.factorize(YD)[0]
#5 fold cross validation
kf = StratifiedKFold(n_splits=5)
#Grid search for parameters
MEM=[5,10,20]
RAT=[0.1,0.2,0.3]
ALF=[0.01, 0.05, 0.1, 0.2]
paracount=0
for mem in MEM:
for rat in RAT:
for alf in ALF:
start_time = time.time()
clf = EnsambleCF(maxRatio=rat, alfa=alf,member=mem)
mystr = ""
fld=0
paracount=paracount+1
print(paracount)
for train_index, test_index in kf.split(X,Y):
fld=fld+1
clf.fit(X.iloc[train_index], Y[train_index])
test_scores=clf.score(X.iloc[test_index], Y[test_index])
train_score=clf.score(X.iloc[train_index], Y[train_index])
mystr = mystr+"*Fold"+str(fld) +"*Member*"+str(mem) +"*Ratio*"+str(rat) +"*Alfa*"+str(alf) +"*Train *"+str(train_score) + "*Test*" + str(test_scores) + "*Time *" + str(time.time() - start_time)
with open("Ensemble-Predictions.txt", "a") as myfile:
mystr = mystr + "\n"
myfile.write(mystr)
|
import segmentation_models_pytorch as smp
import torch
import torch.nn.functional as F
import torch.nn as nn
class BCEDiceLoss(smp.utils.losses.DiceLoss):
def __init__(self, eps=1e-7, activation="sigmoid"):
super().__init__(eps, activation)
if activation is None or activation == "none":
# activation was applied beforehand by the NN
self.bce = nn.BCE(reduction="mean")
else:
self.bce = nn.BCEWithLogitsLoss(reduction="mean")
def forward(self, y_pr, y_gt):
dice = super().forward(y_pr, y_gt)
bce = self.bce(y_pr, y_gt)
return dice + bce
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, logits=False, reduce=True):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.logits = logits
self.reduce = reduce
def forward(self, inputs, targets):
if self.logits:
BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduce=False)
else:
BCE_loss = F.binary_cross_entropy(inputs, targets, reduce=False)
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
class HengFocalLoss(nn.Module):
"""
From Heng's starter kit:
https://www.kaggle.com/c/understanding_cloud_organization/discussion/115787#latest-674710
Assumes that the model returns probabilities not logits! Also, not tested
for segmentation.
"""
def __init__(self):
super(HengFocalLoss, self).__init__()
print("Assumes the model returns probabilities, not logits!")
def forward(self, inputs, targets):
# clipping probabilities
p = torch.clamp(inputs, 1e-9, 1-1e-9)
loss_label = -targets*torch.log(p) - 2*(1-targets)*torch.log(1-p)
loss_label = loss_label.mean()
return loss_label
|
#!/usr/bin/env python
import pynuodb
import unittest
from nuodb_base import NuoBase
class NuoDBBasicTest(unittest.TestCase):
def test_toByteString(self):
self.assertEqual(pynuodb.crypt.toSignedByteString(1), '01'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(127), '7F'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(254), '00FE'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(255), '00FF'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-1), 'FF'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-2), 'FE'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-256), 'FF00'.decode('hex'))
self.assertEqual(pynuodb.crypt.toSignedByteString(-258), 'FEFE'.decode('hex'))
def test_fromByteString(self):
self.assertEqual(pynuodb.crypt.fromSignedByteString('01'.decode('hex')), 1)
self.assertEqual(pynuodb.crypt.fromSignedByteString('00FF'.decode('hex')), 255)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FF'.decode('hex')), -1)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FF01'.decode('hex')), -255)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FF00'.decode('hex')), -256)
self.assertEqual(pynuodb.crypt.fromSignedByteString('FEFE'.decode('hex')), -258)
def test_bothByteString(self):
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(1)), 1)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(0)), 0)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(-1)), -1)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(256)), 256)
self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(-256)), -256)
if __name__ == '__main__':
unittest.main()
|
from vizdoomaze.envs.vizdoomenv import VizdoomEnv
class vizdoomazeTwo3(VizdoomEnv):
def __init__(self):
super(vizdoomazeTwo3, self).__init__(36)
|
# -*- coding: utf-8 -*-
"""
Abstract base class implementation
"""
__author__ = 'Samir Adrik'
__email__ = 'samir.adrik@gmail.com'
from uuid import uuid4
from abc import ABC, abstractmethod
class Entity(ABC):
"""
abstract base entity class
"""
@abstractmethod
def __init__(self):
"""
Constructor / Instantiate the class. Only one property, i.e. id given by uuid4
"""
self._id = str(uuid4())
@property
def id_(self):
"""
Id getter
"""
return self._id
|
from RachelCore.RachelCore import *
|
import sqlite3
import traceback
import pandas as pd
from django.db import IntegrityError
from functions import pandda_functions
from xchem_db.models import *
def find_pandda_logs(search_path):
print('RUNNING')
# If statement checking if log files have been found
log_files = pandda_functions.find_log_files(search_path)
return log_files
def add_pandda_sites(log_file, sites_file):
run = PanddaRun.objects.get(pandda_log=str(log_file).rstrip())
sites_frame = pd.DataFrame.from_csv(sites_file, index_col=None)
for i in range(0, len(sites_frame['site_idx'])):
site = sites_frame['siteidx'][i]
aligned_centroid = eval(sites_frame['centroid'][i])
native_centroid = eval(sites_frame['native_centroid'][i])
print(f'Adding pandda site: {site}')
try:
pandda_site = PanddaSite.objects.get_or_create(pandda_run=run, site=site,
site_aligned_centroid_x=aligned_centroid[0],
site_aligned_centroid_y=aligned_centroid[1],
site_aligned_centroid_z=aligned_centroid[2],
site_native_centroid_x=native_centroid[0],
site_native_centroid_y=native_centroid[1],
site_native_centroid_z=native_centroid[2]
)[0]
except IntegrityError:
pandda_site = PanddaSite.objects.get(pandda_run=run, site=site)
pandda_site.site_aligned_centroid_x = aligned_centroid[0]
pandda_site.site_aligned_centroid_y = aligned_centroid[1]
pandda_site.site_aligned_centroid_z = aligned_centroid[2]
pandda_site.site_native_centroid_x = native_centroid[0]
pandda_site.site_native_centroid_y = native_centroid[1]
pandda_site.site_native_centroid_z = native_centroid[2]
pandda_site.save()
return ''
def add_pandda_events(events_file, log_file, sdbfile):
events_frame = pd.DataFrame.from_csv(events_file, index_col=None)
error_file = f'{log_file}.transfer.err'
for i in range(0, len(events_frame['dtag'])):
event_site = (events_frame['site_idx'][i])
run = PanddaRun.objects.get(pandda_log=log_file)
site = PanddaSite.objects.get_or_create(site=int(event_site), pandda_run=run)[0]
input_directory = run.input_dir
output_directory = run.pandda_analysis.pandda_dir
map_file_path, input_pdb_path, input_mtz_path, aligned_pdb_path, pandda_model_path, \
exists_array = pandda_functions.get_file_names(
bdc=events_frame['1-BDC'][i],
crystal=events_frame['dtag'][i],
input_dir=input_directory,
output_dir=output_directory,
event=events_frame['event_idx'][i]
)
if False not in exists_array:
lig_strings = pandda_functions.find_ligands(pandda_model_path)
try:
event_ligand, event_ligand_centroid, event_lig_dist, site_event_dist = \
pandda_functions.find_ligand_site_event(
ex=events_frame['x'][i],
ey=events_frame['y'][i],
ez=events_frame['z'][i],
nx=site.site_native_centroid_x,
ny=site.site_native_centroid_y,
nz=site.site_native_centroid_z,
lig_strings=lig_strings,
pandda_model_path=pandda_model_path
)
crystal = Crystal.objects.get_or_create(crystal_name=events_frame['dtag'][i],
visit=SoakdbFiles.objects.get_or_create(
filename=sdbfile)[0]
)[0]
pandda_event = PanddaEvent.objects.get_or_create(
crystal=crystal,
site=site,
refinement=Refinement.objects.get_or_create(crystal_name=crystal)[0],
data_proc=DataProcessing.objects.get_or_create(crystal_name=crystal)[0],
pandda_run=run,
event=events_frame['event_idx'][i],
event_centroid_x=events_frame['x'][i],
event_centroid_y=events_frame['y'][i],
event_centroid_z=events_frame['z'][i],
event_dist_from_site_centroid=site_event_dist,
lig_centroid_x=event_ligand_centroid[0],
lig_centroid_y=event_ligand_centroid[1],
lig_centroid_z=event_ligand_centroid[2],
lig_dist_event=event_lig_dist,
lig_id=event_ligand,
pandda_event_map_native=map_file_path,
pandda_model_pdb=pandda_model_path,
pandda_input_pdb=input_pdb_path,
pandda_input_mtz=input_mtz_path)[0]
pandda_event.save()
event_stats_dict = pandda_functions.translate_event_stats(events_file, i)
event_stats_dict['event'] = pandda_event
pandda_event_stats = PanddaEventStats.objects.get_or_create(**event_stats_dict)[0]
pandda_event_stats.save()
crystal.status = Crystal.PANDDA
crystal.save()
except Exception as exc:
print(traceback.format_exc())
print(exc)
else:
with open(error_file, 'a') as f:
f.write('CRYSTAL: ' + str(events_frame['dtag'][i]) + ' SITE: ' + str(event_site) +
' EVENT: ' + str(events_frame['event_idx'][i]) + '\n')
print('FILES NOT FOUND FOR EVENT: ' + str(events_frame['event_idx'][i]))
f.write('FILES NOT FOUND FOR EVENT: ' + str(events_frame['event_idx'][i]) + '\n')
print('EXPECTED: ')
f.write('EXPECTED: ' + '\n')
print(str([map_file_path, input_pdb_path, input_mtz_path, aligned_pdb_path, pandda_model_path]))
f.write(str([map_file_path, input_pdb_path, input_mtz_path, aligned_pdb_path, pandda_model_path])
+ '\n')
print(exists_array)
f.write(str(exists_array) + '\n')
f.write('\n\n')
return ''
def add_pandda_run(log_file, input_dir, output_dir, pver, sites_file, events_file):
print('ADDING PANDDA RUN...')
pandda_run = \
PanddaRun.objects.get_or_create(pandda_log=log_file, input_dir=input_dir,
pandda_analysis=PanddaAnalysis.objects.get_or_create(
pandda_dir=output_dir)[0],
pandda_version=pver, sites_file=sites_file,
events_file=events_file)[0]
pandda_run.save()
return ''
def find_pandda_info(inputs, output, sdbfile):
# inputs should be self.input()?
# output should be what is returned from self.output()?
# sdbfile : self.sbdfile
# read the list of log files
with inputs.open('r') as f:
log_files = [logfile.rstrip() for logfile in f.readlines()]
out_dict = {
'log_file': [],
'pver': [],
'input_dir': [],
'output_dir': [],
'sites_file': [],
'events_file': [],
'sdbfile': []
}
for log_file in log_files:
# read information from the log file
pver, input_dir, output_dir, sites_file, events_file, err = pandda_functions.get_files_from_log(log_file)
if not err and sites_file and events_file and '0.1.' not in pver:
# if no error, and sites and events present, add events from events file
# yield AddPanddaEvents(
out_dict['log_file'].append(log_file)
out_dict['pver'].append(pver)
out_dict['input_dir'].append(input_dir)
out_dict['output_dir'].append(output_dir)
out_dict['sites_file'].append(sites_file)
out_dict['events_file'].append(events_file)
out_dict['sdbfile'].append(sdbfile)
else:
print(pver)
print(input_dir)
print(output_dir)
print(sites_file)
print(events_file)
print(err)
frame = pd.DataFrame.from_dict(out_dict)
frame.to_csv(output.path)
return ''
def add_pandda_data():
# Do nothing?
return ''
def find_search_paths(inputs, output, soak_db_filepath):
# inputs: self.input()
# output: self.output()
# soak_db_filepath : self.soak_db_filepath
with inputs.open('r') as f:
paths = [datafile.rstrip() for datafile in f.readlines()]
search_paths = []
soak_db_files = []
for path in paths:
if 'database' not in path:
continue
else:
search_path = path.split('database')
if len(search_path) > 1:
search_paths.append(search_path[0])
soak_db_files.append(str('database' + search_path[1]))
zipped = list(zip(search_paths, soak_db_files))
to_exclude = []
for path in list(set(search_paths)):
count = search_paths.count(path)
if count > 1:
while path in search_paths:
search_paths.remove(path)
to_exclude.append(path)
out_dict = {'search_path': [], 'soak_db_filepath': [], 'sdbfile': []}
for path, sdbfile in zipped:
if path in to_exclude:
continue
else:
print(path)
print(sdbfile)
print(os.path.join(path, sdbfile))
out_dict['search_path'].append(path)
out_dict['soak_db_filepath'].append(soak_db_filepath)
out_dict['sdbfile'].append(os.path.join(path, sdbfile))
frame = pd.DataFrame.from_dict(out_dict)
print(output.path)
frame.to_csv(output.path)
return ''
def transfer_pandda():
return ''
def annotate_events(soakdb_filename):
events = PanddaEvent.objects.filter(crystal__visit__filename=soakdb_filename)
conn = sqlite3.connect(soakdb_filename)
conn.row_factory = sqlite3.Row
c = conn.cursor()
for e in events:
c.execute(
"select PANDDA_site_confidence, PANDDA_site_InspectConfidence from panddaTable where "
"CrystalName = ? and PANDDA_site_index = ? and PANDDA_site_event_index = ?",
(e.crystal.crystal_name, e.site.site, e.event)
)
results = c.fetchall()
if len(results) == 1:
e.ligand_confidence_inspect = results[0]['PANDDA_site_InspectConfidence']
e.ligand_confidence = results[0]['PANDDA_site_confidence']
e.ligand_confidence_source = 'SD'
e.save()
elif len(results) > 1:
raise Exception('too many events found in soakdb!')
return ''
def annotate_all_events():
return ''
|
def correct_sentence(text: str) -> str:
"""
returns a corrected sentence which starts with a capital letter
and ends with a dot.
"""
# your code here
if text.endswith('.'):
return text.capitalize()
else:
return text.capitalize() + "."
if __name__ == '__main__':
print("Example:")
print(correct_sentence("greetings, friends"))
# These "asserts" are used for self-checking and not for an auto-testing
assert correct_sentence("greetings, friends") == "Greetings, friends."
assert correct_sentence("Greetings, friends") == "Greetings, friends."
assert correct_sentence("Greetings, friends.") == "Greetings, friends."
assert correct_sentence("hi") == "Hi."
print("Coding complete? Click 'Check' to earn cool rewards!")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-04-26 20:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ArtDerVeranstaltung',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bezeichnung', models.CharField(max_length=30)),
('slug', models.SlugField(blank=True, max_length=30)),
('zeit_erstellt', models.DateTimeField(auto_now_add=True)),
('beschreibung', models.TextField(blank=True, max_length=1200, null=True)),
('preis_praesenz', models.SmallIntegerField()),
('preis_livestream', models.SmallIntegerField(blank=True, null=True)),
('preis_aufzeichnung', models.SmallIntegerField(blank=True, null=True)),
('max_teilnehmer', models.SmallIntegerField(blank=True, null=True)),
('zeit_beginn', models.TimeField()),
('zeit_ende', models.TimeField()),
],
options={
'verbose_name_plural': 'Arten der Veranstaltungen',
},
),
migrations.CreateModel(
name='Medium',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bezeichnung', models.CharField(max_length=30)),
('slug', models.SlugField(blank=True, max_length=30)),
('zeit_erstellt', models.DateTimeField(auto_now_add=True)),
('datei', models.FileField(upload_to='')),
('typ', models.CharField(blank=True, max_length=30, null=True)),
('beschreibung', models.TextField(blank=True, max_length=2000, null=True)),
('datum', models.DateField(blank=True, null=True)),
],
options={
'verbose_name_plural': 'Medien',
},
),
migrations.CreateModel(
name='Studiumdings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bezeichnung', models.CharField(max_length=30)),
('slug', models.SlugField(blank=True, max_length=30)),
('zeit_erstellt', models.DateTimeField(auto_now_add=True)),
('beschreibung1', models.TextField()),
('beschreibung2', models.TextField()),
],
options={
'verbose_name_plural': 'Studiendinger',
},
),
migrations.CreateModel(
name='Veranstaltung',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bezeichnung', models.CharField(max_length=30)),
('slug', models.SlugField(blank=True, max_length=30)),
('zeit_erstellt', models.DateTimeField(auto_now_add=True)),
('beschreibung', models.TextField()),
('datum', models.DateField()),
('art_veranstaltung', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Veranstaltungen.ArtDerVeranstaltung')),
],
options={
'verbose_name_plural': 'Veranstaltungen',
},
),
migrations.AddField(
model_name='medium',
name='gehoert_zu',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Veranstaltungen.Veranstaltung'),
),
]
|
import unittest
from decimal import Decimal
from test.conftest import mocked_order_result
from unittest.mock import patch
import binance.client
import pytest
from bot.commands import BuyCommand
from bot.data_types import (
ExchangeOrder,
MarketBuyStrategy,
MarketIndexStrategy,
OrderTimeInForce,
OrderType,
SupportedExchanges,
)
from bot.user import user_from_env
@pytest.mark.vcr
class TestBuyCommand(unittest.TestCase):
# min & max is set to the same value to isolate testing various details
PURCHASE_MIN = 25
# initial buys should prioritize coins that take up a large amount of the index first
@patch.object(binance.client.Client, "order_market_buy", side_effect=mocked_order_result)
@patch.object(binance.client.Client, "get_open_orders", return_value=[])
@patch("bot.exchanges.binance_portfolio", return_value=[])
def test_initial_buy(self, _binance_portfolio_mock, _open_order_mock, order_market_buy_mock):
user = user_from_env()
user.external_portfolio = []
user.purchase_min = self.PURCHASE_MIN
user.purchase_max = self.PURCHASE_MIN
user.buy_strategy = MarketBuyStrategy.MARKET
assert user.external_portfolio == []
assert set(user.deprioritized_coins) == set(["DOGE", "XRP", "BNB", "STORJ"])
assert True == user.livemode
assert self.PURCHASE_MIN == user.purchase_min
assert self.PURCHASE_MIN == user.purchase_max
assert MarketBuyStrategy.MARKET == user.buy_strategy
assert MarketIndexStrategy.MARKET_CAP == user.index_strategy
BuyCommand.execute(user=user, purchase_balance=Decimal(self.PURCHASE_MIN * 3))
# make sure the user minimum is respected
for mock_call in order_market_buy_mock.mock_calls:
assert float(mock_call.kwargs["quoteOrderQty"]) == self.PURCHASE_MIN
all_order_tokens = [mock_call.kwargs["symbol"] for mock_call in order_market_buy_mock.mock_calls]
# top market tokens should be prioritized
assert set(all_order_tokens) == set(["BTCUSD", "ETHUSD", "ADAUSD"])
def test_maximum_purchase_limit(self):
pass
# ensure that the target amount is adjusted by the currently owned amount of a token
def test_does_not_exceed_target(self):
pass
@patch.object(binance.client.Client, "order_market_buy", side_effect=mocked_order_result)
@patch.object(binance.client.Client, "get_open_orders", return_value=[])
def test_percentage_allocation_limit(self, _open_order_mock, order_market_buy_mock):
number_of_purchases = 10
# customized purchase min since we are testing with some live user data where the minimum was lower
purchase_min = 10
user = user_from_env()
user.allocation_drift_percentage_limit = 5
user.external_portfolio = []
user.purchase_min = purchase_min
user.purchase_max = purchase_min
assert user.external_portfolio == []
assert user.allocation_drift_multiple_limit == 5
assert user.allocation_drift_percentage_limit == 5
assert purchase_min == user.purchase_min
assert purchase_min == user.purchase_max
assert MarketIndexStrategy.MARKET_CAP == user.index_strategy
assert user.exchanges == [SupportedExchanges.BINANCE]
assert True == user.livemode
BuyCommand.execute(user=user, purchase_balance=Decimal(purchase_min * number_of_purchases))
# TODO this should be extracted out into some helper
for mock_call in order_market_buy_mock.mock_calls:
assert float(mock_call.kwargs["quoteOrderQty"]) == purchase_min
# in this example portfolio:
# - BTC & ETH are held, but are > 5% off the target allocation
# - AVAX is unowned
# - HNT is unowned
# - AXS, GRT, UNI is way off the target allocation
# - FIL, ATOM, AAVE, ALGO have all dropped within the last month
all_order_tokens = [mock_call.kwargs["symbol"] for mock_call in order_market_buy_mock.mock_calls]
assert len(all_order_tokens) == number_of_purchases
assert ["BTCUSD", "ETHUSD", "AVAXUSD", "HNTUSD", "AXSUSD", "UNIUSD", "FILUSD", "ATOMUSD", "AAVEUSD", "ALGOUSD"] == all_order_tokens
# does a portfolio overallocated on a specific token still purchase tokens that capture much of the market cap?
@patch.object(binance.client.Client, "order_market_buy", side_effect=mocked_order_result)
@patch.object(binance.client.Client, "get_open_orders", return_value=[])
@patch("bot.exchanges.binance_portfolio", return_value=[])
def test_off_allocation_portfolio(self, _binance_portfolio_mock, _open_order_mock, order_market_buy_mock):
number_of_orders = 4
user = user_from_env()
user.purchase_min = self.PURCHASE_MIN
user.purchase_max = self.PURCHASE_MIN
user.buy_strategy = MarketBuyStrategy.MARKET
user.allocation_drift_multiple_limit = 5
user.external_portfolio = [ # type: ignore
{"symbol": "DOGE", "amount": Decimal("1000000")},
{"symbol": "ETH", "amount": Decimal("0.01")},
{"symbol": "BTC", "amount": Decimal("0.01")},
]
assert set(user.deprioritized_coins) == set(["DOGE", "XRP", "BNB", "STORJ"])
assert True == user.livemode
assert self.PURCHASE_MIN == user.purchase_min
assert self.PURCHASE_MIN == user.purchase_max
assert MarketBuyStrategy.MARKET == user.buy_strategy
assert MarketIndexStrategy.MARKET_CAP == user.index_strategy
assert None == user.allocation_drift_percentage_limit
BuyCommand.execute(user=user, purchase_balance=Decimal(self.PURCHASE_MIN * number_of_orders))
all_order_tokens = [mock_call.kwargs["symbol"] for mock_call in order_market_buy_mock.mock_calls]
# top market tokens should be prioritized
assert len(all_order_tokens) == number_of_orders
assert set(all_order_tokens) == set(["BTCUSD", "ETHUSD", "ADAUSD", "SOLUSD"])
@patch(
"bot.exchanges.open_orders",
return_value=[
ExchangeOrder(
symbol="ADA",
trading_pair="ADAUSD",
quantity=Decimal("5.10000000"),
price=Decimal("2.0000"),
created_at=1631457393,
time_in_force=OrderTimeInForce("GTC"),
type=OrderType("BUY"),
id="259074455",
exchange=SupportedExchanges.BINANCE,
)
],
)
@patch("bot.exchanges.portfolio", return_value=[])
def test_cancelling_stale_orders(self, _mock_portfolio, _mock_open_orders):
user = user_from_env()
user.livemode = False
user.cancel_stale_orders = True
user.buy_strategy = MarketBuyStrategy.LIMIT
assert user.livemode == False
assert user.cancel_stale_orders == True
assert user.buy_strategy == MarketBuyStrategy.LIMIT
BuyCommand.execute(user=user)
def test_not_buying_open_orders(self):
pass
|
# -*- coding: utf-8 -*-
"""
# @file name : bn_in_123_dim.py
# @author : Jianhua Ma
# @date : 20210403
# @brief : three kinds of bn functions for different dimension data.
"""
import torch
import numpy as np
import torch.nn as nn
import sys
import os
from tools.common_tools import set_seed
hello_pytorch_DIR = os.path.abspath(os.path.dirname(__file__)+os.path.sep+"..")
sys.path.append(hello_pytorch_DIR)
set_seed(1)
# example 1: nn.BatchNorm1d
# flag = True
flag = False
if flag:
batch_size = 3
num_features = 5
momentum = 0.3
features_shape = 1
# 1d
feature_map = torch.ones(features_shape)
# 1d --> 2d
# [1], [2], [3], [4], [5]
feature_maps = torch.stack([feature_map * (i + 1) for i in range(num_features)], dim=0)
# 2d --> 3d
feature_maps_bs = torch.stack([feature_maps for i in range(batch_size)], dim=0)
print(f"input data:\n {feature_maps_bs} shape is {feature_maps_bs.shape}")
bn = nn.BatchNorm1d(num_features=num_features, momentum=momentum)
running_mean, running_var = 0, 1
for i in range(2):
outputs = bn(feature_maps_bs)
print(f"\niterations: {i}, running mean: {bn.running_mean}")
# check the second feature, the second feature is initialized as 2
mean_t, var_t = 2, 0
running_mean = (1 - momentum) * running_mean + momentum * mean_t
running_var = (1 - momentum) * running_var + momentum * var_t
print(f"iteration:{i}, running mean of the second feature: {running_mean} ")
print(f"iteration:{i}, running var of the second feature:{running_var}")
# example 2: nn.BatchNorm2d
# flag = True
flag = False
if flag:
batch_size = 3
num_features = 6
momentum = 0.3
features_shape = (2, 2)
feature_map = torch.ones(features_shape) # 2D
feature_maps = torch.stack([feature_map * (i + 1) for i in range(num_features)], dim=0) # 3D
feature_maps_bs = torch.stack([feature_maps for i in range(batch_size)], dim=0) # 4D
print("input data:\n{} shape is {}".format(feature_maps_bs, feature_maps_bs.shape))
bn = nn.BatchNorm2d(num_features=num_features, momentum=momentum)
running_mean, running_var = 0, 1
for i in range(2):
outputs = bn(feature_maps_bs)
print("\niter:{}, running_mean.shape: {}".format(i, bn.running_mean.shape))
print("iter:{}, running_var.shape: {}".format(i, bn.running_var.shape))
print("iter:{}, weight.shape: {}".format(i, bn.weight.shape))
print("iter:{}, bias.shape: {}".format(i, bn.bias.shape))
# example 3: nn.BatchNorm3d
flag = True
# flag = False
if flag:
batch_size = 3
num_features = 4
momentum = 0.3
features_shape = (2, 2, 3)
feature = torch.ones(features_shape) # 3D
feature_map = torch.stack([feature * (i + 1) for i in range(num_features)], dim=0) # 4D
feature_maps = torch.stack([feature_map for i in range(batch_size)], dim=0) # 5D
print("input data:\n{} shape is {}".format(feature_maps, feature_maps.shape))
bn = nn.BatchNorm3d(num_features=num_features, momentum=momentum)
running_mean, running_var = 0, 1
for i in range(2):
outputs = bn(feature_maps)
print("\niter:{}, running_mean.shape: {}".format(i, bn.running_mean.shape))
print("iter:{}, running_var.shape: {}".format(i, bn.running_var.shape))
print("iter:{}, weight.shape: {}".format(i, bn.weight.shape))
print("iter:{}, bias.shape: {}".format(i, bn.bias.shape))
|
#!/usr/bin/env python3
## Package stats.py
##
## Copyright (c) 2011 Steven D'Aprano.
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
##
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
## CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
## TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
## SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Statistics package for Python 3.
The statistics functions are divided up into separate modules:
Module Description
================== =============================================
stats Basic calculator statistics.
stats.co Coroutine versions of selected functions.
stats.multivar Multivariate (multiple variable) statistics.
stats.order Order statistics.
stats.univar Univariate (single variable) statistics.
stats.vectorize Utilities for vectorizing functions.
For further details, see the individual modules.
The ``stats`` module provides nine statistics functions:
Function Description
============== =============================================
mean* Arithmetic mean (average) of data.
minmax Minimum and maximum of the arguments.
product* Product of data.
pstdev* Population standard deviation of data.
pvariance* Population variance of data.
running_product Running product coroutine.
running_sum High-precision running sum coroutine.
stdev* Sample standard deviation of data.
sum* High-precision sum of data.
variance* Sample variance of data (bias-corrected).
Functions marked with * can operate on columnar data (see below).
The module also includes two public utility functions plus an exception
class used for some statistical errors:
Name Description
============== =============================================
add_partial Utility for performing high-precision sums.
coroutine Utility for initialising coroutines.
StatsError Subclass of ValueError.
Examples
--------
>>> import stats
>>> stats.mean([-1.0, 2.5, 3.25, 5.75])
2.625
>>> stats.stdev([2.5, 3.25, 5.5, 11.25, 11.75]) #doctest: +ELLIPSIS
4.38961843444...
>>> stats.minmax(iter([19, 23, 15, 42, 31]))
(15, 42)
Columnar data
-------------
As well as operating on a single row, as in the examples above, the functions
marked with * can operate on data in columns:
>>> data = [[0, 1, 1, 2], # row 1, 4 columns
... [1, 1, 2, 4], # row 2
... [2, 1, 3, 8]] # row 3
...
>>> stats.sum(data) # Sum each column.
[3, 3, 6, 14]
>>> stats.variance(data) #doctest: +ELLIPSIS
[1.0, 0.0, 1.0, 9.333333333333...]
For further details, see the individual functions.
"""
# Package metadata.
__version__ = "0.2.0a"
__date__ = "2011-03-?????????????????????????????????????????????????"
__author__ = "Steven D'Aprano"
__author_email__ = "steve+python@pearwood.info"
__all__ = [ 'add_partial', 'coroutine', 'mean', 'minmax', 'product',
'pstdev', 'pvariance', 'running_sum', 'StatsError', 'stdev',
'sum', 'variance',
]
import collections
import functools
import itertools
import math
import operator
from builtins import sum as _sum
import stats.vectorize as v
# === Exceptions ===
class StatsError(ValueError):
pass
# === Public utilities ===
def coroutine(func):
"""Decorator to prime coroutines when they are initialised."""
@functools.wraps(func)
def started(*args, **kwargs):
cr = func(*args,**kwargs)
cr.send(None)
return cr
return started
# Modified from http://code.activestate.com/recipes/393090/
# Thanks to Raymond Hettinger.
def add_partial(x, partials):
"""Helper function for full-precision summation of binary floats.
Adds finite (not NAN or INF) x in place to the list partials.
Example usage:
>>> partials = []
>>> add_partial(1e100, partials)
>>> add_partial(1e-100, partials)
>>> add_partial(-1e100, partials)
>>> partials
[1e-100, 0.0]
Initialise partials to be a list containing at most one finite float
(i.e. no INFs or NANs). Then for each float you wish to add, call
``add_partial(x, partials)``.
When you are done, call sum(partials) to round the summation to the
precision supported by float.
If you initialise partials with more than one value, or with special
values (NANs or INFs), results are undefined.
"""
# Rounded x+y stored in hi with the round-off stored in lo. Together
# hi+lo are exactly equal to x+y. The inner loop applies hi/lo summation
# to each partial so that the list of partial sums remains exact.
# Depends on IEEE-754 arithmetic guarantees. See proof of correctness at:
# www-2.cs.cmu.edu/afs/cs/project/quake/public/papers/robust-arithmetic.ps
i = 0
for y in partials:
if abs(x) < abs(y):
x, y = y, x
hi = x + y
lo = y - (hi - x)
if lo:
partials[i] = lo
i += 1
x = hi
partials[i:] = [x]
# === Private utilities ===
class _countiter:
"""Iterator that counts how many elements it has seen.
>>> c = _countiter(['a', 1, None, 'c'])
>>> _ = list(c)
>>> c.count
4
"""
def __init__(self, iterable):
self.it = iter(iterable)
self.count = 0
def __next__(self):
x = next(self.it)
self.count += 1
return x
def __iter__(self):
return self
def _is_numeric(obj):
"""Return True if obj is a number, otherwise False.
>>> _is_numeric(2.5)
True
>>> _is_numeric('spam')
False
"""
try:
obj + 0
except TypeError:
return False
else:
return True
class _Adder:
"""High precision addition."""
def __init__(self, partials=None):
if partials is None:
partials = []
self.partials = partials
def add(self, x):
"""Add numeric value x to self.partial."""
# Handle special values:
#
# | x | y | y+x | where y = partials
# +-------+-------+-------| * = any finite value
# | ? | [] | x | ? = any value, finite or not
# | NAN | ? | NAN | [] = empty partials list
# | ? | NAN | NAN |
# | INF | INF | INF | <= same sign
# | +INF | -INF | NAN | <= opposite signs
# | -INF | +INF | NAN |
# | * | INF | INF |
# | INF | * | INF |
#
partials = self.partials[:] # Make a copy.
if not partials:
# nothing + anything
partials = [x]
elif math.isnan(x):
# anything + NAN = NAN
partials = [x] # Latest NAN beats previous NAN (if any).
else:
y = partials[0]
if math.isnan(y):
# NAN + anything = NAN.
pass
elif math.isinf(y):
if math.isinf(x):
if float(x) == float(y):
# INFs have the same sign.
assert (x > 0) == (y > 0)
partials = [x] # Latest INF wins.
else:
# INFs have opposite sign.
assert (x > 0) != (y > 0)
partials = [type(x)('nan')]
else:
# INF + finite = INF
assert not math.isnan(x) # Handled earlier.
elif math.isinf(x):
# finite + INF = INF
assert not math.isnan(y) # Handled earlier.
assert not math.isinf(y)
partials = [x]
else:
# finite + finite
try:
add_partial(x, partials)
except TypeError:
# This probably means we're trying to add Decimal to
# float. Coerce to floats and try again.
partials[:] = map(float, partials)
x = float(x)
add_partial(x, partials)
return type(self)(partials)
def value(self):
return _sum(self.partials)
_add = functools.partial(v.apply_op, _Adder.add)
def _fsum(x, start=0):
return functools.reduce(_add, x, _Adder([start]))
def _len_sum(iterable, func=None):
"""\
_len_sum(iterable) -> len(iterable), sum(iterable)
_len_sum(iterable, func) -> len(iterable), sum(func(items of data))
Return a two-tuple of the length of data and the sum of func() of the
items of data. If func is None (the default)), use just the sum of items
of data.
"""
# Special case for speed.
if isinstance(iterable, list):
n = len(iterable)
else:
n = None
iterable = _countiter(iterable)
if func is None:
total = _fsum(iterable)
else:
total = _fsum(func(x) for x in iterable)
if n is None:
n = iterable.count
if isinstance(total, list):
total = [t.value() for t in total]
else:
total = total.value()
return (n, total)
def _std_moment(data, m, s, r):
"""Return the length and standardised moment of order r = 1...4."""
assert r in (1, 2, 3, 4), "private function not intended for r != 1...4"
if m is None or s is None:
# We need multiple passes over the data, so make sure we can.
if not isinstance(data, list):
data = list(data)
if m is None: m = mean(data)
if s is None: s = pstdev(data, m)
# Minimize the number of arithmetic operations needed for some
# common functions.
if False and s == 1: # FIXME this optimization is currently disabled.
if r == 1:
args = (m,)
f = lambda x, m: (x-m)
elif r == 2:
args = (m,)
f = lambda x, m: (x-m)**2
else:
args = (m, r)
f = lambda x, m, r: (x-m)**r
else:
args = (m, s, r)
f = lambda x, m, s, r: ((x-m)/s)**r
n, total = _len_sum(v.apply(f, x, *args) for x in data)
return (n, total)
# FIXME the above may not be accurate enough for 2nd moments (x-m)**2
# A more accurate algorithm is the compensated version:
# sum2 = sum((x-m)**2) as above
# sumc = sum(x-m) # Should be zero, but may not be.
# total = sum2 - sumc**2/n
# === Sums and products ===
def sum(data, start=0):
"""sum(iterable_of_numbers [, start]) -> sum of numbers
sum(iterable_of_rows [, start]) -> sums of columns
Return a high-precision sum of the given numbers or columns.
When passed a single sequence or iterator of numbers, ``sum`` adds the
numbers and returns the total:
>>> sum([2.25, 4.5, -0.5, 1.0])
7.25
If optional argument ``start`` is given, it is added to the total. If
the iterable is empty, ``start`` (defaulting to 0) is returned.
When passed an iterable of sequences, each sub-sequence represents a
row of data, and ``sum`` adds the columns. Each row must have the same
number of columns, or ValueError is raised. If ``start`` is given, it
must be either a single number, or a sequence with the same number of
columns as the data.
>>> data = [[0, 1, 2, 3],
... [1, 2, 4, 5],
... [2, 3, 6, 7]]
...
>>> sum(data)
[3, 6, 12, 15]
>>> sum(data, 1)
[4, 7, 13, 16]
>>> sum(data, [1, 0, 1.5, -1.5])
[4, 6, 13.5, 13.5]
The numbers are added using high-precision arithmetic that can avoid
some sources of round-off error:
>>> sum([1, 1e100, 1, -1e100] * 10000) # The built-in sum returns zero.
20000.0
"""
if isinstance(data, str):
raise TypeError('data argument cannot be a string')
# Calculate the length and sum of data.
count, total = _len_sum(data)
if not count:
return start
# Add start as needed.
return v.add(total, start)
@coroutine
def running_sum(start=None):
"""Running sum co-routine.
With no arguments, ``running_sum`` consumes values and returns the
running sum of arguments sent to it:
>>> rsum = running_sum()
>>> rsum.send(1)
1
>>> [rsum.send(n) for n in (2, 3, 4)]
[3, 6, 10]
If optional argument ``start`` is given and is not None, it is used as
the initial value for the running sum:
>>> rsum = running_sum(9)
>>> [rsum.send(n) for n in (1, 2, 3)]
[10, 12, 15]
"""
if start is None: start = []
else: start = [start]
total = _Adder(start)
x = (yield None)
while True:
total = total.add(x)
x = (yield total.value())
@coroutine
def running_product(start=None):
"""Running product co-routine.
With no arguments, ``running_product`` consumes values and returns the
running product of arguments sent to it:
>>> rp = running_product()
>>> rp.send(1)
1
>>> [rp.send(n) for n in (2, 3, 4)]
[2, 6, 24]
If optional argument ``start`` is given and is not None, it is used as
the initial value for the running product:
>>> rp = running_product(9)
>>> [rp.send(n) for n in (1, 2, 3)]
[9, 18, 54]
"""
if start is not None:
total = start
else:
total = 1
x = (yield None)
while True:
try:
total *= x
except TypeError:
if not _is_numeric(x):
raise
# Downgrade to floats and try again.
x = float(x)
total = float(total)
continue
x = (yield total)
def product(data, start=1):
"""product(iterable_of_numbers [, start]) -> product of numbers
product(iterable_of_rows [, start]) -> product of columns
Return the product of the given numbers or columns.
When passed a single sequence or iterator of numbers, ``product``
multiplies the numbers and returns the total:
>>> product([2.25, 4.5, -0.5, 10])
-50.625
>>> product([1, 2, -3, 2, -1])
12
If optional argument ``start`` is given, it is multiplied to the total.
If the iterable is empty, ``start`` (defaulting to 1) is returned.
When passed an iterable of sequences, each sub-sequence represents a
row of data, and product() multiplies each column. Each row must have
the same number of columns, or ValueError is raised. If ``start`` is
given, it must be either a single number, or a sequence with the same
number of columns as the data.
>>> data = [[0, 1, 2, 3],
... [1, 2, 4, 6],
... [2, 3, 6, 0.5]]
...
>>> product(data)
[0, 6, 48, 9.0]
>>> product(data, 2)
[0, 12, 96, 18.0]
>>> product(data, [2, 1, 0.25, -1.5])
[0, 6, 12.0, -13.5]
"""
if isinstance(data, str):
raise TypeError('data argument cannot be a string')
return v.prod(data, start)
# Note: do *not* be tempted to do something clever with logarithms:
# math.exp(sum([math.log(x) for x in data], start))
# is FAR less accurate than naive multiplication.
# === Basic univariate statistics ===
def mean(data):
"""mean(iterable_of_numbers) -> arithmetic mean of numbers
mean(iterable_of_rows) -> arithmetic means of columns
Return the arithmetic mean of the given numbers or columns.
The arithmetic mean is the sum of the data divided by the number of data
points. It is commonly called "the average", although it is actually only
one of many different mathematical averages. It is a measure of the
central location of the data.
When passed a single sequence or iterator of numbers, ``mean`` adds the
data points and returns the total divided by the number of data points:
>>> mean([1.0, 2.0, 3.0, 4.0])
2.5
When passed an iterable of sequences, each inner sequence represents a
row of data, and ``mean`` returns the mean of each column. The rows must
have the same number of columns, or ValueError is raised.
>>> data = [[0, 1, 2, 3],
... [1, 2, 4, 5],
... [2, 3, 6, 7]]
...
>>> mean(data)
[1.0, 2.0, 4.0, 5.0]
The sample mean is an unbiased estimator of the true population mean.
However, the mean is strongly effected by outliers and is not a robust
estimator for central location: the mean is not necessarily a typical
example of the data points.
"""
count, total = _len_sum(data)
if not count:
raise StatsError('mean of empty sequence is not defined')
return v.div(total, count)
def variance(data, m=None):
"""variance(iterable_of_numbers [, m]) -> sample variance of numbers
variance(iterable_of_rows [, m]) -> sample variance of columns
Return the unbiased sample variance of the given numbers or columns.
The variance is a measure of the variability (spread or dispersion) of
data. A large variance indicates that the data is spread out; a small
variance indicates it is clustered closely around the central location.
WARNING: The mathematical terminology related to variance is
often inconsistent and confusing. This is the variance with
Bessel's correction for bias, also known as variance with N-1
degrees of freedom. See Wolfram Mathworld for further details:
http://mathworld.wolfram.com/Variance.html
http://mathworld.wolfram.com/SampleVariance.html
When given a single iterable of data, ``variance`` returns the sample
variance of that data:
>>> variance([3.5, 2.75, 1.75, 1.25, 1.25,
... 0.5, 0.25]) #doctest: +ELLIPSIS
1.37202380952...
If you already know the mean of your data, you can supply it as the
optional second argument ``m``:
>>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25]
>>> m = mean(data) # Save the mean for later use.
>>> variance(data, m) #doctest: +ELLIPSIS
1.42857142857...
CAUTION: "Garbage in, garbage out" applies. If the value you
supply as ``m`` is not the mean for your data, the result
returned may not be statistically valid.
If the argument to ``variance`` is an iterable of sequences, each inner
sequence represents a row of data, and ``variance`` returns the variance
of each column. Each row must have exactly the same number of columns, or
ValueError will be raised.
>>> data = [[0, 1, 2],
... [1, 1, 3],
... [1, 2, 5],
... [2, 4, 6]]
...
>>> variance(data) #doctest: +ELLIPSIS
[0.6666666666..., 2.0, 3.3333333333...]
If ``m`` is given for such columnar data, it must be either a single
number, or a sequence with the same number of columns as the data.
See also ``pvariance``.
"""
return _variance(data, m, 1)
def stdev(data, m=None):
"""stdev(iterable_of_numbers [, m]) -> standard deviation of numbers
stdev(iterable_of_rows [, m]) -> standard deviation of columns
Returns the sample standard deviation (with N-1 degrees of freedom)
of the given numbers or columns. The standard deviation is the square
root of the variance.
Optional argument ``m`` has the same meaning as for ``variance``.
>>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) #doctest: +ELLIPSIS
1.08108741552...
>>> data = [[0, 1, 2],
... [1, 1, 3],
... [1, 2, 5],
... [2, 4, 6]]
...
>>> stdev(data) #doctest: +ELLIPSIS
[0.816496580927..., 1.41421356237..., 1.82574185835...]
Note that although ``variance`` is an unbiased estimate for the
population variance, ``stdev`` itself is *not* unbiased.
"""
svar = variance(data, m)
return v.sqrt(svar)
def pvariance(data, m=None):
"""pvariance(iterable_of_numbers [, m]) -> population variance of numbers
pvariance(iterable_of_rows [, m]) -> population variance of columns
Return the population variance of the given numbers or columns. The
variance is a measure of the variability (spread or dispersion) of
data. A large variance indicates that the data is spread out; a small
variance indicates it is clustered closely around the central location.
See ``variance`` for further information.
If your data represents the entire population, you should use this
function. If your data is a sample of the population, this function
returns a biased estimate of the variance. For an unbiased estimate,
use ``variance`` instead.
Calculate the variance of populations:
>>> pvariance([0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25])
1.25
If the argument to ``pvariance`` is an iterable of sequences, each inner
sequence represents a row of data, and the variance of each column is
returned:
>>> data = [[0, 1, 2, 3, 4],
... [1, 1, 3, 5, 4],
... [1, 2, 5, 5, 7],
... [2, 4, 6, 7, 9]]
...
>>> pvariance(data)
[0.5, 1.5, 2.5, 2.0, 4.5]
Each row must have exactly the same number of columns, or ValueError
will be raised.
If you already know the mean, you can pass it to ``pvariance`` as the
optional second argument ``m``. For columnar data, ``m`` must be either
a single number, or it must contain the same number of columns as the
data.
"""
return _variance(data, m, 0)
def pstdev(data, m=None):
"""pstdev(iterable_of_numbers [, m]) -> population std dev of numbers
pstdev(iterable_of_rows [, m]) -> population std dev of columns
Returns the population standard deviation (with N degrees of freedom)
of the given numbers or columns. The standard deviation is the square
root of the variance.
Optional argument ``m`` has the same meaning as for ``pvariance``.
>>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75]) #doctest: +ELLIPSIS
0.986893273527...
>>> data = [[0, 1, 2],
... [1, 1, 3],
... [1, 2, 5],
... [2, 4, 6]]
...
>>> pstdev(data) #doctest: +ELLIPSIS
[0.707106781186..., 1.22474487139..., 1.58113883008...]
"""
pvar = pvariance(data, m)
return v.sqrt(pvar)
def _variance(data, m, p):
"""Return an estimate of variance with N-p degrees of freedom."""
n, ss = _std_moment(data, m, 1, 2)
assert n >= 0
if n <= p:
raise StatsError(
'at least %d items are required but only got %d' % (p+1, n))
den = n - p
v.assert_(lambda x: x >= 0.0, ss)
return v.div(ss, den)
def minmax(*values, **kw):
"""minmax(iterable [, key=func]) -> (minimum, maximum)
minmax(a, b, c, ... [, key=func]) -> (minimum, maximum)
With a single iterable argument, return a two-tuple of its smallest and
largest items. With two or more arguments, return the smallest and
largest arguments. ``minmax`` is similar to the built-ins ``min`` and
``max``, but can return the two items with a single pass over the data,
allowing it to work with iterators.
>>> minmax([3, 2, 1, 6, 5, 4])
(1, 6)
>>> minmax(4, 5, 6, 1, 2, 3)
(1, 6)
The optional keyword-only argument ``key`` specifies a key function:
>>> minmax('aaa', 'bbbb', 'c', 'dd', key=len)
('c', 'bbbb')
"""
if len(values) == 0:
raise TypeError('minmax expected at least one argument, but got none')
elif len(values) == 1:
values = values[0]
if list(kw.keys()) not in ([], ['key']):
raise TypeError('minmax received an unexpected keyword argument')
if isinstance(values, collections.Sequence):
# For speed, fall back on built-in min and max functions when
# data is a sequence and can be safely iterated over twice.
# TODO this could be unnecessary if this were re-written in C.
minimum = min(values, **kw)
maximum = max(values, **kw)
# The number of comparisons is N-1 for both min() and max(), so the
# total used here is 2N-2, but performed in fast C.
else:
# Iterator argument, so fall back on a slow pure-Python solution
# that calculates the min and max lazily. Even if values is huge,
# this should work.
# Note that the number of comparisons is 3*ceil(N/2), which is
# approximately 50% fewer than used by separate calls to min & max.
key = kw.get('key')
if key is not None:
it = ((key(value), value) for value in values)
else:
it = ((value, value) for value in values)
try:
keyed_min, minimum = next(it)
except StopIteration:
# Don't directly raise an exception inside the except block,
# as that exposes the StopIteration to the caller. That's an
# implementation detail that should be avoided. See PEP 3134
# http://www.python.org/dev/peps/pep-3134/
# and specifically the open issue "Suppressing context".
empty = True
else:
empty = False
if empty:
raise ValueError('minmax argument is empty')
keyed_max, maximum = keyed_min, minimum
try:
while True:
a = next(it)
try:
b = next(it)
except StopIteration:
b = a
if a[0] > b[0]:
a, b = b, a
if a[0] < keyed_min:
keyed_min, minimum = a
if b[0] > keyed_max:
keyed_max, maximum = b
except StopIteration:
pass
return (minimum, maximum)
def average_deviation(data, m=None):
"""average_deviation(data [, m]) -> average absolute deviation of data.
Return the average deviation of the sample data from the population
centre ``m`` (usually the mean, or the median).
Arguments
---------
data
Non-empty iterable of non-complex numeric data.
m
None, or the central location of the population, usually the
population mean or median (optional, defaults to None).
Examples
--------
If you know the mean or median of the population which the sample data
has been taken from, pass it as the second element:
>>> data = [2.0, 2.25, 2.5, 2.5, 3.25] # A sample from a population
>>> mu = 2.75 # with a known mean.
>>> average_deviation(data, mu)
0.45
If you don't know the centre location, you can estimate it by passing
the sample mean or median instead. If ``m`` is not None, or not given,
the sample mean is calculated from the data and used as an estimate of
the population mean:
>>> average_deviation(data)
0.3
Additional Information
----------------------
The average deviation is a more robust (less effected by outliers) measure
of spread than standard deviation.
"""
if m is None:
# FIXME in principle there should be a one-pass method for
# calculating this, but for now just convert to a list and
# do two passes.
if iter(data) is data:
data = list(data)
m = mean(data)
n = len(data)
if not n:
raise StatisticsError('')
deviation = sum(abs(x-m) for x in data)
return deviation/n
|
while True:
a,b=input().strip().split()
if(a[0]=="*"):break
cnt=ans=0
for c,x in enumerate(a):
if(a[c]==b[c]):cnt=0
else:
if cnt==0:ans+=1
cnt+=1
print(ans)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright © 2014-2016 NetApp, Inc. All Rights Reserved.
#
# CONFIDENTIALITY NOTICE: THIS SOFTWARE CONTAINS CONFIDENTIAL INFORMATION OF
# NETAPP, INC. USE, DISCLOSURE OR REPRODUCTION IS PROHIBITED WITHOUT THE PRIOR
# EXPRESS WRITTEN PERMISSION OF NETAPP, INC.
from __future__ import unicode_literals
import json
from uuid import UUID
from future.utils import with_metaclass
VER3 = False
try:
unicode
except:
VER3 = True
KNOWN_CONVERSIONS = {
type(set): list,
UUID: str
}
def _as_ascii(val):
"""
Helper method for enforcing ascii encoding.
:param val: any string, basestring, or unicode string
:type val: basestring
:return: a string
"""
return str(val.encode('ascii', 'ignore'))
def serialize(val):
"""
DataObject serializer value based on MetaData attributes.
:param val: any value
:return: the serialized value
"""
if hasattr(val, 'custom_to_json'):
return_value = val.custom_to_json()
elif hasattr(val, 'to_json'):
return_value = val.to_json()
elif type(val) in KNOWN_CONVERSIONS:
return_value = KNOWN_CONVERSIONS[type(val)](val)
elif isinstance(val, dict):
return_value = dict((k, serialize(v)) for k, v in val.items())
elif isinstance(val, list):
return_value = list(serialize(v) for v in val)
elif hasattr(val, '_optional') and val.optional():
return_value = None
else:
return_value = val
return return_value
def extract(typ, src):
"""
DataObject value type converter.
:param typ: the type to extract.
:param src: the source to extract as type typ.
:return: if the type has the ability to extract (convert), otherwise the
original version is returned.
"""
if hasattr(typ, 'extract'):
return_value = typ.extract(src, False)
elif type(src) == typ:
return_value = src
elif typ == UUID:
return_value = UUID(src)
elif typ.__init__(src) is not None:
return_value = typ.__init__(src)
else:
return_value = src
return return_value
class ModelProperty(object):
"""
ModelProperty metadata container for API data type information.
"""
def __init__(self, member_name, member_type, array=False, optional=False,
documentation=None, dictionaryType = None):
"""
ModelProperty constructor.
:param member_name: the name of the property.
:type member_name: str
:param member_type: the type of the property.
:type member_type: str
:param array: is the property an array.
:type array: bool
:param optional: is the property optional.
:type optional: bool
:param documentation: documentation for the property.
:type documentation: str
"""
self._member_name = member_name
self._member_type = member_type
self._array = array
self._optional = optional
self._documentation = documentation
self._dictionaryType = dictionaryType
def __repr__(self):
if self._array:
arr = '[]'
else:
arr = ''
full_type = '{}'.format(self._member_type).replace('\'>', arr + '\'>')
return full_type
def extend_json(self, out, data):
"""
Serialize the property as json-like structure.
:param out: the resulting output.
:param data: the data to be converted.
"""
if data is None or hasattr(data, '_member_type'): # HACK ALERT
if not self._optional:
# We want to catch this error.
#raise ValueError(self._member_name+" is a required parameter.")
# THE OLD WAY!
out[self._member_name] = None
elif self._array:
out[self._member_name] = [serialize(x) for x in data]
elif self._optional:
optional_data = serialize(data)
if optional_data is not None:
out[self._member_name] = optional_data
else:
out[self._member_name] = serialize(data)
def extract_from(self, data):
"""
Deserialize the property from json.
:param data: the data to be converted.
:return: the extracted data.
"""
if self._array:
return [] if data is None else [extract(self._member_type, x) for x
in data]
if self._dictionaryType:
newdict = dict()
for key in data:
newdict[key] = extract(self._dictionaryType, data[key])
return newdict
else:
return None if data is None else extract(self._member_type, data)
def member_name(self):
"""
:return: the member name.
"""
return self._member_name
def member_type(self):
"""
:return: the member type.
"""
return self._member_type
def array(self):
"""
:return: is the property an array
"""
return self._array
def optional(self):
"""
:return: is the property optional
"""
return self._optional
def documentation(self):
"""
:return: the property documentation
"""
return self._documentation
def known_default(self):
"""
Helps convert a property to a default value.
:return: a known default for a type.
"""
if self._member_type is int:
return 0
elif self._member_type is float:
return 0.0
elif self._member_type is str:
return ''
elif self._member_type is bool:
return False
else:
pass
class MetaDataObject(type):
"""
MetaDataObject defines a method for attributing ModelProperties to a type.
"""
def __init__(cls, name, bases, classdict):
super(MetaDataObject, cls).__init__(name, bases, classdict)
cls._create_properties()
def _create_properties(self):
pass
class DataObject(with_metaclass(MetaDataObject, ModelProperty)):
"""
DataObject is the base type for all generated types, including the MetaData
properties, as described from the api descriptors.
"""
_properties = None
@classmethod
def _create_properties(cls):
"""
Maps api descriptor attributes to the MetaData properties for this
object.
"""
cls._properties = {}
for name in dir(cls):
prop = getattr(cls, name, None)
if isinstance(prop, ModelProperty):
cls._properties[name] = prop
def __init__(self, **kwargs):
# Iterate through available properties and start removing them
# from kwargs
for name, property in type(self)._properties.items():
if not property.optional() and name not in kwargs.keys() and name != "secret":
raise ValueError(name+" is a required parameter.")
if name in kwargs:
setattr(self, name, kwargs[name])
del kwargs[name]
if(len(kwargs.keys()) != 0):
raise ValueError("The following params are invalid: "+str(kwargs.keys()))
def get_properties(self):
"""
Exposes the type properties for a Data Object.
:return: the dictionary of property names and thier type information.
:rtype: dict
"""
return self._properties
def __repr__(self):
"""
Base repr() for all generated objects.
"""
props = []
member_items = self._properties
for name, prop in sorted(member_items.items()):
if prop.array() and hasattr(self, name):
try:
iter(getattr(self, name))
except TypeError:
attrs = []
else:
attrs = (repr(x) for x in getattr(self, name))
msg_fmt = '[{arr}]'
attr_repr = msg_fmt.format(
arr=str.join(str(', '), attrs))
else:
if hasattr(self, name):
attr_repr = getattr(self, name)
else:
attr_repr = None
msg_fmt = '{name}={repr!r}'
msg = msg_fmt.format(name=name, repr=attr_repr)
props.append(msg)
return str.format(str('{cls}({props})'),
cls=type(self).__name__,
props=str.join(str(', '), props))
def to_json(self):
"""
Converts DataObject to json.
:return: the DataObject as a json structure.
"""
out = {}
for name, prop in type(self)._properties.items():
prop.extend_json(out, getattr(self, name, None))
return out
@classmethod
def extract(cls, data, strict=False):
"""
Converts json to a DataObject.
:param data: json data to be deserialized back to a DataObject
:type data: str
:param strict: If True, missing values will raise an error, otherwise,
missing values will None or empty.
:type strict: bool
:return: a class deserialized from the data provided.
"""
ctor_dict = {}
if not cls._properties:
if VER3:
if type(data) == str:
ctor_dict['value'] = data
else:
if type(data) in [str, unicode]:
ctor_dict['value'] = data
for name, prop in cls._properties.items():
# If it has data
if data is None and strict:
pass
if data is None and not strict:
ctor_dict[name] = None
# If it is a chap secret
elif hasattr(prop.member_type(), 'custom_extract'):
ctor_dict[name] = prop.member_type().custom_extract(
data[prop.member_name()])
# If it has the right data which matches the data we need, set it
elif prop.member_name() in data and type(data) == dict:
data_val = data[prop.member_name()]
ctor_dict[name] = prop.extract_from(data_val)
# If we're dealing with an optional property which hasn't been provided
elif prop.optional():
ctor_dict[name] = None
# If we're dealing with an empty array
elif prop.array():
ctor_dict[name] = []
elif not strict:
ctor_dict[name] = None
else:
msg_fmt = 'Can not create {typ}: ' \
'missing required property "{name}" in {data}'
msg = msg_fmt.format(typ=cls.__name__,
name=prop.member_name(),
data=json.dumps(data)
)
raise TypeError(msg)
return cls(**ctor_dict)
def property(member_name, member_type,
array = False, optional = False,
documentation = None, dictionaryType = None):
"""
Constructs the type for a DataObject property.
:param member_name: the name of the property.
:type member_name: str
:param member_type: the type of the property.
:type member_type: type
:param array: is the property an array.
:type array: bool
:param optional: is the property optional.
:type optional: bool
:param documentation: documentation for the property.
:type documentation: str or NoneType
:return: the constructed type of a property
"""
msg_fmt = 'Property of type {typ}{arr}'
msg = msg_fmt.format(
typ=member_type,
arr='[]'
if array else ''
)
documentation = documentation or msg
typ = type(_as_ascii(member_name),
(ModelProperty,),
{
'__doc__': documentation,
})
return typ(member_name=member_name,
member_type=member_type,
array=array,
optional=optional,
documentation=documentation,
dictionaryType=dictionaryType)
|
class HmNotifyType(basestring):
"""
Notify type
"""
@staticmethod
def get_api_name():
return "hm-notify-type"
|
from django.contrib.auth import get_user_model
from django.http import Http404, HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import login as django_login
from accounts.forms import MyUserCreationForm
from accounts.decorators import must_be_staff
from todolist_web_api import utilities
def new_account( request ):
"""
Create a new user account.
"""
if request.method == 'POST':
form = MyUserCreationForm( request.POST )
if form.is_valid():
form.save()
utilities.set_message( request, "User '{}' created!".format( form.cleaned_data[ 'username' ] ) )
return HttpResponseRedirect( reverse( 'accounts:login' ) )
else:
form = MyUserCreationForm()
context = {
'form': form
}
return render( request, 'accounts/new_account.html', context )
def login( request ):
"""
Login an account.
"""
context = {}
utilities.get_message( request, context )
return django_login( request, 'accounts/login.html', extra_context= context )
@login_required
def user_page( request, username ):
"""
The user page has information about an user account.
Also where you can change some settings (like the password).
"""
if request.user.username != username and not request.user.is_staff:
return HttpResponseForbidden( "Not allowed." )
userModel = get_user_model()
try:
user = userModel.objects.get( username= username )
except userModel.DoesNotExist:
raise Http404( "User doesn't exist." )
context = {
'pageUser': user,
}
utilities.get_message( request, context )
return render( request, 'accounts/user_page.html', context )
def password_changed( request ):
"""
Inform that the password has been changed, and redirect to home.
"""
utilities.set_message( request, 'Password changed!' )
return HttpResponseRedirect( reverse( 'home' ) )
@must_be_staff
def remove_user_confirm( request, username ):
"""
Confirm an user removal.
"""
userModel = get_user_model()
try:
user = userModel.objects.get( username= username )
except userModel.DoesNotExist:
raise Http404( "User doesn't exist." )
context = {
'user_to_remove': user
}
return render( request, 'accounts/remove_user.html', context )
@must_be_staff
def remove_user( request, username ):
"""
Remove an user account (also removes everything associated with it).
"""
userModel = get_user_model()
try:
user = userModel.objects.get( username= username )
except userModel.DoesNotExist:
raise Http404( "User doesn't exist." )
else:
utilities.set_message( request, "'{}' user removed!".format( user ) )
user.delete()
return HttpResponseRedirect( reverse( 'home' ) )
@must_be_staff
def disable_user_confirm( request, username ):
"""
Confirm the enabling/disabling of an user account.
"""
userModel = get_user_model()
try:
user = userModel.objects.get( username= username )
except userModel.DoesNotExist:
raise Http404( "User doesn't exist." )
else:
context = {
'user_to_disable': user
}
return render( request, 'accounts/disable_user.html', context )
@must_be_staff
def disable_user( request, username ):
"""
Enable/disable an user account.
If the account is disabled, the user won't be able to login.
"""
userModel = get_user_model()
try:
user = userModel.objects.get( username= username )
except userModel.DoesNotExist:
raise Http404( "User doesn't exist." )
else:
value = not user.is_active
# only other staff users can enable/disable staff users
if user.is_staff:
if request.user.is_staff:
user.is_active = value
user.save()
else:
return HttpResponseForbidden( "Can't disable a staff member." )
else:
user.is_active = value
user.save()
if value:
message = "'{}' account is now active.".format( user )
else:
message = "'{}' account is now disabled.".format( user )
utilities.set_message( request, message )
return HttpResponseRedirect( user.get_url() )
@login_required
def new_api_key( request ):
"""
Get a new API key.
"""
request.user.new_api_key()
utilities.set_message( request, 'New API key set!' )
return HttpResponseRedirect( reverse( 'accounts:user_page', args= [ request.user.username ] ) )
|
# Queries to _insert database tables
database_tables = [
################################################################
# Versioning
"""
CREATE TABLE IF NOT EXISTS versions (
name TEXT PRIMARY KEY,
version TEXT
);
""",
################################################################
# D2 Steam Players
"""
CREATE TABLE IF NOT EXISTS d2SteamPlayers (
dateObj TIMESTAMP WITHOUT TIME ZONE PRIMARY KEY,
numberOfPlayers INT
);
""",
################################################################
# Persistent Messages
"""
CREATE TABLE IF NOT EXISTS persistentMessages (
messageName TEXT,
guildId BIGINT,
channelID BIGINT,
messageId BIGINT,
reactionsIdList BIGINT [],
PRIMARY KEY(messageName, guildId)
);
""",
################################################################
# Activities
"""
CREATE TABLE IF NOT EXISTS PgcrActivities (
instanceId BIGINT PRIMARY KEY,
referenceId BIGINT,
directorActivityHash BIGINT,
period TIMESTAMP WITHOUT TIME ZONE,
startingPhaseIndex SMALLINT,
mode SMALLINT,
modes SMALLINT [],
isPrivate BOOLEAN,
membershipType SMALLINT
);
""",
"""
CREATE TABLE IF NOT EXISTS PgcrActivitiesUsersStats (
instanceId BIGINT,
membershipId BIGINT,
characterId BIGINT,
characterClass TEXT,
characterLevel SMALLINT,
membershipType SMALLINT,
lightLevel INTEGER,
emblemHash BIGINT,
standing SMALLINT,
assists INTEGER,
completed SMALLINT,
deaths INTEGER,
kills INTEGER,
opponentsDefeated INTEGER,
efficiency NUMERIC,
killsDeathsRatio NUMERIC,
killsDeathsAssists NUMERIC,
score INTEGER,
activityDurationSeconds INTEGER,
completionReason SMALLINT,
startSeconds INTEGER,
timePlayedSeconds INTEGER,
playerCount SMALLINT,
teamScore INTEGER,
precisionKills INTEGER,
weaponKillsGrenade INTEGER,
weaponKillsMelee INTEGER,
weaponKillsSuper INTEGER,
weaponKillsAbility INTEGER,
PRIMARY KEY (instanceId, membershipId, characterId)
);
""",
"""
CREATE TABLE IF NOT EXISTS PgcrActivitiesUsersStatsWeapons (
instanceId BIGINT,
characterId BIGINT,
membershipId BIGINT,
weaponId BIGINT,
uniqueWeaponKills INTEGER,
uniqueWeaponPrecisionKills INTEGER,
PRIMARY KEY (instanceId, membershipId, characterId, weaponId)
);
""",
"""
CREATE TABLE IF NOT EXISTS PgcrActivitiesFailToGet(
instanceId BIGINT PRIMARY KEY,
period TIMESTAMP WITHOUT TIME ZONE
);
""",
################################################################
# Userdata
"""
CREATE TABLE IF NOT EXISTS characters(
destinyID BIGINT,
characterID BIGINT UNIQUE,
systemID INTEGER DEFAULT 3,
UNIQUE(destinyID, characterID)
);
""",
"""
CREATE TABLE IF NOT EXISTS discordGuardiansToken(
discordSnowflake BIGINT PRIMARY KEY,
destinyID BIGINT,
signupDate DATE,
serverID BIGINT,
token TEXT,
__refresh_token TEXT,
systemid INTEGER,
token_expiry TIMESTAMP,
refresh_token_expiry TIMESTAMP,
steamjoinid BIGINT,
activitiesLastUpdated TIMESTAMP
);
""",
"""
CREATE TABLE IF NOT EXISTS owned_emblems (
destiny_id BIGINT,
emblem_hash BIGINT
);
""",
################################################################
# Destiny Manifest
"""
CREATE TABLE IF NOT EXISTS DestinyActivityDefinition(
referenceId BIGINT PRIMARY KEY,
description TEXT,
name TEXT,
activityLevel SMALLINT,
activityLightLevel INTEGER,
destinationHash BIGINT,
placeHash BIGINT,
activityTypeHash BIGINT,
isPvP BOOLEAN,
directActivityModeHash BIGINT,
directActivityModeType SMALLINT,
activityModeHashes BIGINT [],
activityModeTypes SMALLINT []
);
""",
"""
CREATE TABLE IF NOT EXISTS DestinyActivityTypeDefinition(
referenceId BIGINT PRIMARY KEY,
description TEXT,
name TEXT
);
""",
"""
CREATE TABLE IF NOT EXISTS DestinyActivityModeDefinition(
referenceId SMALLINT PRIMARY KEY,
description TEXT,
name TEXT,
hash BIGINT,
activityModeCategory SMALLINT,
isTeamBased BOOLEAN,
friendlyName TEXT
);
""",
"""
CREATE TABLE IF NOT EXISTS DestinyCollectibleDefinition(
referenceId BIGINT PRIMARY KEY,
description TEXT,
name TEXT,
sourceHash BIGINT,
itemHash BIGINT,
parentNodeHashes BIGINT []
);
""",
"""
CREATE TABLE IF NOT EXISTS DestinyInventoryItemDefinition(
referenceId BIGINT PRIMARY KEY,
description TEXT,
name TEXT,
classType SMALLINT, -- 0 = titan, 1 = hunter, 2 = warlock
bucketTypeHash BIGINT,
tierTypeHash BIGINT,
tierTypeName TEXT,
equippable BOOLEAN
);
""",
"""
CREATE TABLE IF NOT EXISTS DestinyRecordDefinition(
referenceId BIGINT PRIMARY KEY,
description TEXT,
name TEXT,
hasTitle BOOLEAN, -- if it is a seal
titleName TEXT, -- this is None for non-seals
objectiveHashes BIGINT [],
ScoreValue INTEGER,
parentNodeHashes BIGINT []
);
""",
"""
CREATE TABLE IF NOT EXISTS DestinyInventoryBucketDefinition(
referenceId BIGINT PRIMARY KEY,
description TEXT,
name TEXT,
category SMALLINT,
itemCount SMALLINT,
location SMALLINT
);
""",
"""
CREATE TABLE IF NOT EXISTS DestinyPresentationNodeDefinition(
referenceId BIGINT PRIMARY KEY,
description TEXT,
name TEXT,
objectiveHash BIGINT,
presentationNodeType SMALLINT,
childrenPresentationNodeHash BIGINT [],
childrenCollectibleHash BIGINT [],
childrenRecordHash BIGINT [],
childrenMetricHash BIGINT [],
parentNodeHashes BIGINT [],
index SMALLINT,
redacted BOOLEAN
);
""",
################################################################
# LFG System
"""
CREATE TABLE IF NOT EXISTS LfgUsers(
user_id BIGINT PRIMARY KEY,
blacklisted_members BIGINT []
);
""",
"""
CREATE TABLE IF NOT EXISTS LfgMessages(
id INT PRIMARY KEY,
guild_id BIGINT,
channel_id BIGINT,
message_id BIGINT,
author_id BIGINT,
voice_channel_id BIGINT,
activity TEXT,
description TEXT,
start_time TIMESTAMP WITH TIME ZONE,
creation_time TIMESTAMP WITH TIME ZONE,
max_joined_members INT,
joined_members BIGINT [],
alternate_members BIGINT []
);
""",
################################################################
# RSS Feed Reader
"""
CREATE TABLE IF NOT EXISTS RssFeedItems(
id TEXT PRIMARY KEY
);
""",
################################################################
# Polls
"""
CREATE TABLE IF NOT EXISTS polls(
id int PRIMARY KEY,
name TEXT,
description TEXT,
data JSON,
author_id BIGINT,
guild_id BIGINT,
channel_id BIGINT,
message_id BIGINT
);
""",
]
|
#!/usr/bin/env python
"""
Script for copying NIAK fMRI preprocessing report output into new folder structure for 2018 Simexp fMRI QC dashboard
"""
import time
import json
import shutil
import inspect
import argparse
import pathlib as pal
from distutils import dir_util
copy_debug = False
def populate_report(report_p):
if not type(report_p) == pal.Path:
report_p = pal.Path(report_p)
# Copy the template into the report folder
repo_p = pal.Path(inspect.getfile(make_report)).parents[0].absolute()
dir_util.copy_tree(str(repo_p / 'data/report'), str(report_p), verbose=0)
# Create the directory tree for the files that are yet to be created
tree_structure = [
'assets/group/images',
'assets/group/js',
'assets/motion/images',
'assets/motion/js',
'assets/registration/images',
'assets/registration/js',
'assets/summary/js',
]
for branch in tree_structure:
branch_p = report_p / branch
branch_p.mkdir(parents=True, exist_ok=True)
return
def copy_all_files(p_src_folder_wildcard, p_dest_folder):
print("...{0}".format(p_dest_folder))
for file in p_src_folder_wildcard.parent.glob(p_src_folder_wildcard.name):
if copy_debug:
print("Copying {0} to {1}".format(file, p_dest_folder))
shutil.copy(str(file), str(p_dest_folder))
def create_dataset_ids(p_dataset_id_folder):
# Date and/or timestamp will be used by dashQC as unique identifiers for a data set
# The assumption here is that it is highly unlikely that two data sets will conflict
# based on this distinction criteria
with (p_dataset_id_folder / "datasetID.js").open("w") as data_id_file:
data_id_json = { "date": time.strftime("%Y-%m-%d-%H:%M:%S"),
"timestamp": int(time.time()) }
data_id_file.write("var datasetID = " + json.dumps(data_id_json) + ";")
def make_report(preproc_dir, report_dir):
if not issubclass(type(preproc_dir), pal.Path):
preproc_dir = pal.Path(preproc_dir)
if not issubclass(type(report_dir), pal.Path):
report_dir = pal.Path(report_dir)
print("Conversion of old NIAK QC dashboard folder structure to new one commencing...")
# (1) In output folder create the following folders:
print("Creating new folder structure in {0}...".format(report_dir))
populate_report(report_dir)
# (2) Copy files from old folder structure into new one
print("Copying files...")
# group/*.png -> assets/group/images
# group/*.js -> assets/group/js
copy_all_files(preproc_dir / "group/*.png",
report_dir / "assets/group/images")
copy_all_files(preproc_dir / "group/*.js",
report_dir / "assets/group/js")
# motion/*.html -> assets/motion/html
# motion/*.png -> assets/motion/images
# motion/*.js -> assets/motion/js
#copy_all_files(preproc_dir + "motion{0}*.html".format(os.sep),
# report_dir + "assets{0}motion{0}html".format(os.sep))
copy_all_files(preproc_dir / "motion/*.png",
report_dir / "assets/motion/images")
copy_all_files(preproc_dir / "motion/*.js",
report_dir / "assets/motion/js")
# qc_registration.csv -> assets/registration/csv
# registration/*.png -> assets/registration/images
copy_all_files(preproc_dir / "qc_registration.csv",
report_dir / "assets/registration/csv")
copy_all_files(preproc_dir / "registration/*.png",
report_dir / "assets/registration/images")
# summary/*.js -> assets/summary/js
copy_all_files(preproc_dir / "summary/*.js",
report_dir / "assets/summary/js")
# (3) Create a JSON file for this conversion session that registers this as a unique data set for the dashQC
print("Creating unique IDs for this data set...")
create_dataset_ids(report_dir / "assets/registration/js")
print("Conversion complete.")
if "__main__" == __name__:
parser = argparse.ArgumentParser()
parser.add_argument("preproc_dir", type=str,
help="path to the dire/mnt/data_sq/cisl/surchs/ABIDE/ABIDE_1/PREPROCESS_NIAK/NYUctory with the niak preprocessed data")
parser.add_argument("report_dir", type=str,
help="desired path for the report output")
args = parser.parse_args()
# We want to point at the report folder in the niak preprocessing root
preproc_p = pal.Path(args.preproc_dir)
if str(preproc_p).endswith('report'):
make_report(args.preproc_dir, args.report_dir)
elif (preproc_p / 'report').exists():
make_report(str(preproc_p / 'report'), args.report_dir)
else:
# It's probably an invalid path but we'll let it error out later down the line
make_report(args.preproc_dir, args.report_dir)
|
#!/usr/bin/env python3
# Test topic subscription. All SUBSCRIBE requests are denied. Check this
# produces the correct response, and check the client isn't disconnected (ref:
# issue #1016).
from mosq_test_helper import *
def write_config(filename, port):
with open(filename, 'w') as f:
f.write("port %d\n" % (port))
f.write("auth_plugin c/auth_plugin_acl_sub_denied.so\n")
f.write("allow_anonymous false\n")
port = mosq_test.get_port()
conf_file = os.path.basename(__file__).replace('.py', '.conf')
write_config(conf_file, port)
rc = 1
keepalive = 10
connect_packet = mosq_test.gen_connect("sub-denied-test", keepalive=keepalive, username="denied")
connack_packet = mosq_test.gen_connack(rc=0)
mid = 53
subscribe_packet = mosq_test.gen_subscribe(mid, "qos0/test", 0)
suback_packet = mosq_test.gen_suback(mid, 128)
mid_pub = 54
publish_packet = mosq_test.gen_publish("topic", qos=1, payload="test", mid=mid_pub)
puback_packet = mosq_test.gen_puback(mid_pub)
broker = mosq_test.start_broker(filename=os.path.basename(__file__), use_conf=True, port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port)
mosq_test.do_send_receive(sock, subscribe_packet, suback_packet, "suback")
mosq_test.do_send_receive(sock, publish_packet, puback_packet, "puback")
rc = 0
sock.close()
finally:
os.remove(conf_file)
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
exit(rc)
|
file_name = "ALIAS_TABLE_rate{}.txt"
def gen_alias(weights):
'''
@brief:
generate alias from a list of weights (where every weight should be no less than 0)
'''
n_weights = len(weights)
avg = sum(weights)/(n_weights+1e-6)
aliases = [(1, 0)]*n_weights
smalls = ((i, w/(avg+1e-6)) for i, w in enumerate(weights) if w < avg)
bigs = ((i, w/(avg+1e-6)) for i, w in enumerate(weights) if w >= avg)
small, big = next(smalls, None), next(bigs, None)
while big and small:
aliases[small[0]] = (float(small[1]), int(big[0]))
big = (big[0], big[1] - (1-small[1]))
if big[1] < 1:
small = big
big = next(bigs, None)
else:
small = next(smalls, None)
for i in range(len(aliases)):
tmp = list(aliases[i])
aliases[i] = (int(tmp[0] * (n_weights-1)),tmp[1])
return aliases
def write_table(table,file):
for i in table:
tmp = list(i)
file.write(str(tmp[0]) + " " + str(tmp[1]) + "\n")
file.close()
def read_table(file):
table = []
data = file.read().split("\n")
file.close()
for i in data:
if i == "":
continue
tmp = i.split()
table.append((int(tmp[0]),int(tmp[1])))
return table
def init_alias(weights):
file = open(file_name.format(0),"w")
table = gen_alias(weights)
write_table(table,file)
def update_alias(weights):
file = open(file_name.format(0),"w")
table = gen_alias(weights)
write_table(table,file)
def get_index_alias_method(indexes, rand_num):
file = open(file_name.format(0),"r")
table = read_table(file)
out = []
for i in range(2):
ind = indexes[i]
rand = rand_num[i]
if(table[ind][0]<rand):
out.append(table[ind][1])
else:
out.append(ind)
return out
|
import torch
from torch import nn
class BaselineFaceExpression(torch.nn.Module):
def __init__(self, in_features, out_features, hid_features, n_layers=2):
super().__init__()
assert hid_features > 1
backbone_layers = []
for i in range(n_layers - 1):
current_in_features = in_features if i == 0 else hid_features
backbone_layers.extend([
nn.Linear(current_in_features, hid_features),
nn.ReLU(inplace=True)
])
self.backbone = nn.Sequential(*backbone_layers)
# final
self.final = nn.Linear(hid_features, out_features)
def forward(self, keypoints_2d, beta):
bs = keypoints_2d.shape[0]
x = torch.cat([keypoints_2d.view(bs, -1), beta], dim=1)
x = self.backbone(x)
x = self.final(x)
expression = x[:, :10]
jaw_pose = x[:, 10:14]
return expression, jaw_pose
class SiameseModel(torch.nn.Module):
def __init__(self,
*,
n_keypoints=468, beta_size=10,
emb_size=32, hid_size=32,
expression_size=10, jaw_pose_size=3,
use_beta=True,
use_keypoints_3d=False
):
super().__init__()
self.use_beta = use_beta
self.use_keypoints_3d = use_keypoints_3d
keypoint_input_size = 3 * n_keypoints if use_keypoints_3d else 2 * n_keypoints
self.keypoint_backbone = nn.Sequential(
nn.Linear(keypoint_input_size, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Linear(256, 128),
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.Linear(128, emb_size),
)
if self.use_beta:
self.beta_backbone = nn.Sequential(
nn.Linear(beta_size, 32),
nn.BatchNorm1d(32),
nn.ReLU(inplace=True),
nn.Linear(32, 64),
nn.BatchNorm1d(64),
nn.ReLU(inplace=True),
nn.Linear(64, emb_size),
)
self.mix_backbone = nn.Sequential(
nn.Linear(2 * emb_size if use_beta else emb_size, 128),
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.Linear(128, 64),
nn.BatchNorm1d(64),
nn.ReLU(inplace=True),
nn.Linear(64, hid_size),
nn.BatchNorm1d(hid_size),
nn.ReLU(inplace=True)
)
self.expression_head = nn.Linear(hid_size, expression_size)
self.jaw_pose_head = nn.Linear(hid_size, jaw_pose_size)
def forward(self, keypoints, beta):
bs = keypoints.shape[0]
keypoints_emb = self.keypoint_backbone(keypoints.view(bs, -1))
if self.use_beta:
beta_emb = self.beta_backbone(beta)
if self.use_beta:
emb = torch.cat([keypoints_emb, beta_emb], dim=1)
else:
emb = keypoints_emb
feature = self.mix_backbone(emb)
expression = self.expression_head(feature)
jaw_pose = self.jaw_pose_head(feature)
return expression, jaw_pose
class SiameseModelSmall(torch.nn.Module):
def __init__(self,
*,
n_keypoints=468, beta_size=10,
emb_size=32, hid_size=32,
expression_size=10, jaw_pose_size=3
):
super().__init__()
self.keypoint_backbone = nn.Sequential(
nn.Linear(2 * n_keypoints, 32),
nn.BatchNorm1d(32),
nn.ReLU(inplace=True),
nn.Linear(32, emb_size),
)
self.beta_backbone = nn.Sequential(
nn.Linear(beta_size, 32),
nn.BatchNorm1d(32),
nn.ReLU(inplace=True),
nn.Linear(32, emb_size),
)
self.mix_backbone = nn.Sequential(
nn.Linear(2 * emb_size, 32),
nn.BatchNorm1d(32),
nn.ReLU(inplace=True),
)
self.expression_head = nn.Linear(hid_size, expression_size)
self.jaw_pose_head = nn.Linear(hid_size, jaw_pose_size)
def forward(self, keypoints_2d, beta):
bs = keypoints_2d.shape[0]
keypoints_2d_emb = self.keypoint_backbone(keypoints_2d.view(bs, -1))
beta_emb = self.beta_backbone(beta)
emb = torch.cat([keypoints_2d_emb, beta_emb], dim=1)
feature = self.mix_backbone(emb)
expression = self.expression_head(feature)
jaw_pose = self.jaw_pose_head(feature)
return expression, jaw_pose
class SiameseModelDropout(torch.nn.Module):
def __init__(self,
*,
n_keypoints=468, beta_size=10,
emb_size=32, hid_size=32,
expression_size=10, jaw_pose_size=3,
dropout_p=0.5
):
super().__init__()
self.keypoint_backbone = nn.Sequential(
nn.Linear(2 * n_keypoints, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(dropout_p),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Dropout(dropout_p),
nn.Linear(256, 128),
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.Dropout(dropout_p),
nn.Linear(128, emb_size),
)
self.beta_backbone = nn.Sequential(
nn.Linear(beta_size, 32),
nn.BatchNorm1d(32),
nn.ReLU(inplace=True),
nn.Linear(32, 64),
nn.BatchNorm1d(64),
nn.ReLU(inplace=True),
nn.Linear(64, emb_size),
)
self.mix_backbone = nn.Sequential(
nn.Linear(2 * emb_size, 128),
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.Dropout(dropout_p),
nn.Linear(128, 64),
nn.BatchNorm1d(64),
nn.ReLU(inplace=True),
nn.Dropout(dropout_p),
nn.Linear(64, hid_size),
nn.BatchNorm1d(hid_size),
nn.ReLU(inplace=True)
)
self.expression_head = nn.Linear(hid_size, expression_size)
self.jaw_pose_head = nn.Linear(hid_size, jaw_pose_size)
def forward(self, keypoints_2d, beta):
bs = keypoints_2d.shape[0]
keypoints_2d_emb = self.keypoint_backbone(keypoints_2d.view(bs, -1))
beta_emb = self.beta_backbone(beta)
emb = torch.cat([keypoints_2d_emb, beta_emb], dim=1)
feature = self.mix_backbone(emb)
expression = self.expression_head(feature)
jaw_pose = self.jaw_pose_head(feature)
return expression, jaw_pose
|
import os
from typing import Any, ClassVar, Dict, Optional
import mlflow
from mlflow.entities import Experiment
from pydantic import root_validator, validator
from coalescenceml.directory import Directory
from coalescenceml.environment import Environment
from coalescenceml.experiment_tracker import (
BaseExperimentTracker,
)
from coalescenceml.integrations.constants import MLFLOW
from coalescenceml.logger import get_logger
from coalescenceml.stack.stack_component_class_registry import (
register_stack_component_class,
)
from coalescenceml.stack.stack_validator import StackValidator
logger = get_logger(__name__)
MLFLOW_TRACKING_TOKEN = "MLFLOW_TRACKING_TOKEN"
MLFLOW_TRACKING_USERNAME = "MLFLOW_TRACKING_USERNAME"
MLFLOW_TRACKING_PASSWORD = "MLFLOW_TRACKING_PASSWORD"
# TODO: Determine whether to use insecure TLS
# TODO: How do we use a cert bundle if desired?
# https://www.mlflow.org/docs/latest/tracking.html#logging-to-a-tracking-server
@register_stack_component_class
class MLFlowExperimentTracker(BaseExperimentTracker):
"""Stores MLFlow Configuration and interaction functions.
CoalescenceML configures MLFlow for you....
"""
# TODO: Ask Iris about local mlflow runs
tracking_uri: Optional[str] = None # Can this
use_local_backend: bool = False # base it on tracking_uri
tracking_token: Optional[str] = None # The way I prefer using MLFlow
tracking_username: Optional[str] = None
tracking_password: Optional[str] = None
FLAVOR: ClassVar[str] = MLFLOW
@validator("tracking_uri")
def ensure_valid_tracking_uri(
cls, tracking_uri: Optional[str] = None,
) -> Optional[str]:
"""Ensure the tracking uri is a valid mlflow tracking uri.
Args:
tracking_uri: The value to verify
Returns:
Valid tracking uri
Raises:
ValueError: if mlflow tracking uri is invalid.
"""
if tracking_uri:
valid_protocols = DATABASE_ENGINES + ["http", "https", "file"]
if not any(
tracking_uri.startswith(protocol)
for protocol in valid_protocols
):
raise ValueError(
f"MLFlow tracking uri does not use a valid protocol "
f" which inclue: {valid_protocols}. Please see "
f"https://www.mlflow.org/docs/latest/tracking.html"
f"#where-runs-are-recorded for more information."
)
return tracking_uri
@staticmethod
def is_remote_tracking_uri(tracking_uri: str) -> bool:
"""Check whether URI is using remote protocol.
Args:
tracking_uri: MLFlow tracking server location
Returns:
True if server is remote else False
"""
return any(
tracking_uri.startswith(prefix)
for prefix in ["http", "https"]
# Only need these as tested with AWS MySQL generates hostname
# with HTTP. Hence any cloud servers will have to have http and
# local ones do not. But this might be wrong and need to change.
)
@root_validator
def ensure_tracking_uri_or_local(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Ensure that the tracking uri exists or use local backend is true.
Args:
values: Pydantic stored values of class
Returns:
Pydantic class values
"""
tracking_uri = values.get("tracking_uri")
use_local_backend = values.get("use_local_backend")
if not (tracking_uri or use_local_backend):
raise ValueError(
f"You must specify either a tracking uri or the use "
f"your local artifact store for the MLFlow "
f"experiment tracker."
# TODO: Message on how to fix.
)
return values
@root_validator
def ensure_authentication(
cls, values: Dict[str, Any],
) -> Dict[str, Any]:
"""Ensure that credentials exists for a remote tracking instance.
Args:
values: Pydantic stored values of class
Returns:
class pydantic values
"""
tracking_uri = values.get("tracking_uri")
if tracking_uri: # Check if exists
if cls.is_remote_tracking_uri(tracking_uri): # Check if remote
token_auth = values.get("tracking_token")
basic_http_auth = (
values.get("tracking_username")
and values.get("tracking_password")
)
if not (token_auth or basic_http_auth):
raise ValueError(
f"MLFlow experiment tracking with a remote tracking "
f"uri '{tracking_uri}' is only allowed when either a "
f"username and password or auth token is used."
# TODO: Add update commands to stack to all users to update the component.
)
return values
@staticmethod
def local_mlflow_tracker() -> str:
"""Return local mlflow folder inside artifact store.
Returns:
MLFlow tracking uri for local backend.
"""
dir_ = Directory(skip_directory_check=True)
artifact_store = dir_.active_stack.artifact_store
# TODO: MLFlow can connect to non-local stores however
# I am unsure what this entails and how to test this.
# AWS would be an interesting testbed for a future iteration.
# Ideally this would work for an arbitrary artifact store.
local_mlflow_uri = os.path.join(artifact_store.path, "mlruns")
if not os.path.exists(local_mlflow_uri):
os.makedirs(local_mlflow_uri)
return "file:" + local_mlflow_uri
def get_tracking_uri(self) -> str:
"""Return configured MLFlow tracking uri.
Returns:
MLFlow tracking uri
Raises:
ValueError: if tracking uri is empty and user didn't specify to use the local backend.
"""
if self.tracking_uri:
return self.tracking_uri
else:
if self.use_local_backend:
return self.local_mlflow_tracker()
else:
raise ValueError(
f"You must specify either a tracking uri or the use "
f"of your local artifact store for the MLFlow "
f"experiment tracker."
)
def prepare_step_run(self) -> None:
"""Configure MLFlow tracking URI and credentials."""
if self.tracking_token:
os.environ[MLFLOW_TRACKING_TOKEN] = self.tracking_token
if self.tracking_username:
os.environ[MLFLOW_TRACKING_USERNAME] = self.tracking_username
if self.tracking_password:
os.environ[MLFLOW_TRACKING_PASSWORD] = self.tracking_password
mlflow.set_tracking_uri(self.get_tracking_uri())
return
def cleanup_step_run(self) -> None:
mlflow.set_tracking_uri("")
@property
def validator(self) -> Optional[StackValidator]:
"""Validate that MLFlow config with the rest of stack is valid.
..note: We just need to check (for now) that if the use local flag
is used that there is a local artifact store
"""
if self.tracking_uri:
# Tracking URI exists so do nothing
return None
else:
from coalescenceml.artifact_store import LocalArtifactStore
# Presumably they've set the use_local_backend to true
# So check for local artifact store b/c thats all that
# works for now. This will be edited later (probably...)
return StackValidator(
custom_validation_function=lambda stack: (
isinstance(stack.artifact_store, LocalArtifactStore),
"MLFlow experiment tracker with local backend only "
"works with a local artifact store at this time."
)
)
def active_experiment(self, experiment_name=None) -> Optional[Experiment]:
"""Return currently active MLFlow experiment.
Args:
experiment_name: experiment name to set in MLFlow.
Returns:
None if not in step else will return an MLFlow Experiment
"""
step_env = Environment().step_env
if not step_env:
# I.e. we are not in a step running
return None
experiment_name = experiment_name or step_env.pipeline_name
mlflow.set_experiment(experiment_name=experiment_name)
return mlflow.get_experiment_by_name(experiment_name)
def active_run(self, experiment_name=None) -> Optional[mlflow.ActiveRun]:
""""""
step_env = Environment().step_env
active_experiment = self.active_experiment(experiment_name)
if not active_experiment:
# This checks for experiment + not being in setp
# Should we make this explicit or keep it as implicit?
return None
experiment_id = active_experiment.experiment_id
# TODO: There may be race conditions in the below code for parallel
# steps. For example for HP tuning if two train steps are running
# and they both create a run then we send it onwards to a testing step
# which will now not know which run to use. We don't want a new run
# But rather to search for the most recent run. But even that might
# Have conflicts when there are multiple running steps that do this...
# How can we handle this?
# Naive Idea: How about each step has an identifier with it?
runs = mlflow.search_runs(
experiment_ids=[experiment_id],
filter_string=f'tags.mlflow.runName = "{step_env.pipeline_run_id}"',
output_format="list",
)
run_id = runs[0].info.run_id if runs else None
current_active_run = mlflow.active_run()
if current_active_run and current_active_run.info.run_id == run_id:
# Is not None AND run_id matches
return current_active_run
return mlflow.start_run(
run_id = run_id,
run_name=step_env.pipeline_run_id,
experiment_id=experiment_id,
)
def log_params(self, params: Dict[str, Any]) -> None:
raise NotImplementedError()
def log_metrics(self, metrics: Dict[str, Any]) -> None:
raise NotImplementedError()
def log_artifacts(self, artifacts: Dict[str, Any]) -> None:
raise NotImplementedError()
|
from .build import *
from .parse.ast import *
from .parse.matcher import *
|
from pytocl.main import main
from driver import Driver
from rnn_model import RNNModelSteering
if __name__ == '__main__':
main(Driver(
RNNModelSteering("TrainedNNs/steer_norm_aalborg_provided_batch-50.pt"),
logdata=False))
|
######################################################################
# Author: Dr. Scott Heggen TODO: Change this to your name
# Username: heggens TODO: Change this to your username
#
# Assignment: A01
#
# Purpose: A program that returns your Chinese Zodiac animal given a
# birth year between 1988 and 1999. Also prints your friend's animal,
# and your compatibility with that friend's animal.
######################################################################
# Acknowledgements:
# Original Author: Dr. Scott Heggen
######################################################################
# Remember to read the detailed notes about each task in the A01 document.
######################################################################
# (Required) Task 1
# TODO Ask user for their birth year
# TODO Check the year using if conditionals, and print the correct animal for that year.
# See the a01_pets.py for examples
######################################################################
# (Required) Task 2
# TODO Ask the user for their friend's birth year
# TODO Similar to above, check your friend's year using if conditionals, and print the correct animal for that year
######################################################################
# (Optional) Task 3
# TODO Check for compatibility between your birth year and your friend's birth year
# NOTE: You can always assume the first input is your birth year (i.e., 1982 for me).
# This way, you are not writing a ton of code to consider every possibility.
# In other words, only do one row of the sample compatibility table.
# TODO print if you are a strong match, no match, or in between
my_input = input('What year were you born in?')
if my_input == '2000':
print("You're a hot breath dragon. Eww ")
elif my_input == '2001':
print("You're a Slithering Snake")
elif my_input == '2002':
print("You're a slow horse")
elif my_input == '2003':
print("You're the Greatest Of All Time (Goat)")
elif my_input == '2004':
print("You're a crazy monkey")
elif my_input == '2005':
print("You're a loud rooster")
elif my_input == '2006':
print("You're a dawg woof!")
elif my_input == '2007':
print("You're a stinky pig. Eww")
elif my_input =='2008':
print("You're a roaring dragon")
elif my_input == '2009':
print("You're a sneaky snake")
elif my_input == '2010':
print("You're an ugly horse")
elif my_input =='2011':
print("You're a goat with a goatee")
friend_input = input('What year was your friend born?')
if friend_input == '2001':
print("You're a snake")
elif friend_input == '2002':
print("You're a horse")
elif friend_input == '2003':
print("You're a goat")
elif friend_input == '2004':
print("You're a monkey")
elif friend_input == '2005':
print("You're a rooster")
elif friend_input == '2006':
print("You're a dog")
elif friend_input == '2007':
print("You're a pig")
elif friend_input == '2008':
print("You're a dragon")
elif friend_input == '2009':
print("You're a snake")
elif friend_input == '2010':
print("You're a horse")
elif friend_input == '2011':
print("You're a goat")
elif friend_input == '2012':
print("You're a dragon")
|
#!/usr/bin/env python
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
#设置字体,如果没有,也可以不设置
#font = ImageFont.truetype("/usr/share/fonts/truetype/ttf-dejavu/DejaVuSans.ttf",13)
#打开底版图片
imageFile = "./HTML/example1.jpg"
im1=Image.open(imageFile)
# 在图片上添加文字 1
draw = ImageDraw.Draw(im1)
draw.text((256,160),"3",(255,255,0))
draw = ImageDraw.Draw(im1)
# 保存
im1.save("target.png")
im1.show()
print("这是一条测试语句"," 432", " 256", " 128"," 145")
print("今天天气很不错"," 234", " 112", " 276"," 286")
|
from .models import UserData, Sounds
from .src.DataManager import DataManager
from .BasicManager import BasicManager, STORE_PATH
class SoundManager(BasicManager):
manager = DataManager(1, STORE_PATH)
def get(self, user_id):
result = None
user = UserData.user_object(user_id)
if user is not None:
self.manager.set_user_id(user_id)
result = Sounds.user_sounds_data(user_id)
return result
def create(self, user_id, sound_name, file):
result = None
user = UserData.user_object(user_id)
if user is not None:
self.manager.set_user_id(user_id)
is_existing_user_folder = self.manager.file_manager.is_valid_existing_folder_path(self.manager.user_folder)
if is_existing_user_folder is False:
# create user folder if user register in first time
self.manager.create_user_folder()
self.manager.save_audio_file(sound_name, file)
full_sound_path = self.manager.get_full_file_path(sound_name)
sound = Sounds(path=full_sound_path, name=sound_name, user=user)
sound.save()
result = Sounds.sound_data_by_id(sound.pk)
return result
def delete(self, user_id, sound_name):
result = None
user = UserData.user_object(user_id)
if user is not None:
result = Sounds.sound_data_by_name(user_id, sound_name)
if result and result is not None:
sound = Sounds.sound_object(result['id'])
if sound is not None:
sound.delete()
self.manager.set_user_id(user_id)
self.manager.delete_user_file(sound_name)
return result
def update(self, *args):
pass
def load(self, user_id, sound_name):
result = None
user = UserData.user_object(user_id)
if user is not None:
self.manager.set_user_id(user_id)
data = Sounds.sound_data_by_name(user_id, sound_name)
if data and data is not None:
file_path = data['path']
if file_path:
file_object = open(file_path, 'rb')
result = file_object
return result
|
n1 = float(input('Informe a 1° nota: '))
n2 = float(input('Informe a 2° nota: '))
media = (n1+n2) / 2
m = print('A média das notas = {}'. format(media))
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/MarketingStatus
Release: R5
Version: 4.5.0
Build ID: 0d95498
Last updated: 2021-04-03T00:34:11.075+00:00
"""
from pydantic import Field
from . import fhirtypes
from . import backbonetype
class MarketingStatus(backbonetype.BackboneType):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The marketing status describes the date when a medicinal product is
actually put on the market or the date as of which it is no longer
available.
"""
resource_type = Field("MarketingStatus", const=True)
country: fhirtypes.CodeableConceptType = Field(
None,
alias="country",
title=(
"The country in which the marketing authorisation has been granted "
"shall be specified It should be specified using the ISO 3166 \u2011 1 "
"alpha-2 code elements"
),
description=None,
# if property is element of this resource.
element_property=True,
)
dateRange: fhirtypes.PeriodType = Field(
None,
alias="dateRange",
title=(
"The date when the Medicinal Product is placed on the market by the "
"Marketing Authorisation Holder (or where applicable, the "
"manufacturer/distributor) in a country and/or jurisdiction shall be "
"provided A complete date consisting of day, month and year shall be "
"specified using the ISO 8601 date format NOTE \u201cPlaced on the market\u201d "
"refers to the release of the Medicinal Product into the distribution "
"chain"
),
description=None,
# if property is element of this resource.
element_property=True,
)
jurisdiction: fhirtypes.CodeableConceptType = Field(
None,
alias="jurisdiction",
title=(
"Where a Medicines Regulatory Agency has granted a marketing "
"authorisation for which specific provisions within a jurisdiction "
"apply, the jurisdiction can be specified using an appropriate "
"controlled terminology The controlled term and the controlled term "
"identifier shall be specified"
),
description=None,
# if property is element of this resource.
element_property=True,
)
restoreDate: fhirtypes.DateTime = Field(
None,
alias="restoreDate",
title=(
"The date when the Medicinal Product is placed on the market by the "
"Marketing Authorisation Holder (or where applicable, the "
"manufacturer/distributor) in a country and/or jurisdiction shall be "
"provided A complete date consisting of day, month and year shall be "
"specified using the ISO 8601 date format NOTE \u201cPlaced on the market\u201d "
"refers to the release of the Medicinal Product into the distribution "
"chain"
),
description=None,
# if property is element of this resource.
element_property=True,
)
restoreDate__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_restoreDate", title="Extension field for ``restoreDate``."
)
status: fhirtypes.CodeableConceptType = Field(
...,
alias="status",
title=(
"This attribute provides information on the status of the marketing of "
"the medicinal product See ISO/TS 20443 for more information and "
"examples"
),
description=None,
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from ``MarketingStatus`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"country",
"jurisdiction",
"status",
"dateRange",
"restoreDate",
]
|
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Services for create and manipulate objects via admin UI."""
from lib import url
from lib.page import dashboard
from lib.page.modal.create_new_person import CreateNewPersonModal
from lib.utils import selenium_utils
class AdminWebUiService(object):
"""Base class for business layer's services objects for Admin."""
# pylint: disable=too-few-public-methods
def __init__(self, driver):
self._driver = driver
class PeopleAdminWebUiService(AdminWebUiService):
"""Class for admin people business layer's services objects"""
def __init__(self, driver):
super(PeopleAdminWebUiService, self).__init__(driver)
self.people_tab = self._open_admin_people_tab()
def create_new_person(self, person):
"""Create new person on Admin People widget
- Return: lib.entities.entity.PersonEntity"""
self.people_tab.click_create_button()
self._create_new_person_on_modal(person)
# refresh page to make newly created person appear on Admin People Widget
self._driver.refresh()
return self.find_filtered_person(person)
def _open_admin_people_tab(self):
"""Open People widget on Admin dashboard.
- Return: lib.page.widget.admin_widget.People"""
selenium_utils.open_url(url.Urls().admin_people_tab)
return dashboard.AdminDashboard(self._driver).select_people()
def _create_new_person_on_modal(self, person):
"""Fill required fields and click on save and close button
on New Person modal"""
create_person_modal = CreateNewPersonModal(self._driver)
create_person_modal.enter_name(person.name)
create_person_modal.enter_email(person.email)
create_person_modal.name_tf.click()
create_person_modal.save_and_close()
def find_filtered_person(self, person):
"""Find person by email in the list on Admin People widget
- Return: list of PersonEntities"""
self.people_tab.filter_by_name_email_company(person.email)
return self.people_tab.get_people()[0]
|
# Python program for Dtra's single
# source shortest path algorithm. The program is
# for adjacency matrix representation of the graph
from pwn import *
# Library for INT_MAX
import sys
class Graph():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
# A utility function to find the vertex with
# minimum distance value, from the set of vertices
# not yet included in shortest path tree
def minDistance(self, dist, sptSet):
# Initilaize minimum distance for next node
min = sys.maxint
# Search not nearest vertex not in the
# shortest path tree
for v in range(self.V):
if dist[v] < min and sptSet[v] == False:
min = dist[v]
min_index = v
return min_index
# Funtion that implements Dtra's single source
# shortest path algorithm for a graph represented
# using adjacency matrix representation
def dtra(self, src):
dist = [sys.maxint] * self.V
dist[src] = 0
sptSet = [False] * self.V
for cout in range(self.V):
# Pick the minimum distance vertex from
# the set of vertices not yet processed.
# u is always equal to src in first iteration
u = self.minDistance(dist, sptSet)
# Put the minimum distance vertex in the
# shotest path tree
sptSet[u] = True
# Update dist value of the adjacent vertices
# of the picked vertex only if the current
# distance is greater than new distance and
# the vertex in not in the shotest path tree
for v in range(self.V):
if self.graph[u][v] > 0 and sptSet[v] == False and dist[v] > dist[u] + self.graph[u][v]:
dist[v] = dist[u] + self.graph[u][v]
sol = []
for i in xrange(1, 8, 1):
sol.append(dist[i*7-1])
return sol
def xy2num(x, y):
if x < 0 or x >= 7:
return -1
if y < y or y >= 7:
return -1
return y*7+x
r = remote("110.10.147.104", 15712)
r.sendlineafter(">> ", "G")
r.recvline()
ans = []
for i in xrange(100):
r.recvline()
smth = []
for i in xrange(7):
a = []
b = r.recvline().rstrip().replace(" ", " ").split(' ')
for i in b:
if i != "":
a.append(int(i))
smth.append(a)
# matrix to adjanceny
# smth = [[99, 99, 99, 99, 99, 99, 99],
# [99, 99, 99, 99, 99, 99, 99],
# [99, 99, 99, 99, 99, 99, 99],
# [99, 99, 99, 99, 99, 99, 99],
# [99, 1, 1, 1, 99, 1, 1],
# [1, 1, 99, 1, 99, 1, 99],
# [99, 99, 99, 1, 1, 1, 99]]
adj = []
for y in xrange(7):
for x in xrange(7):
a = [0]*(7*7)
if xy2num(x-1, y) >= 0:
a[xy2num(x-1, y)] = smth[y][x-1]
if xy2num(x+1, y) >= 0:
a[xy2num(x+1, y)] = smth[y][x+1]
if xy2num(x, y-1) >= 0:
a[xy2num(x, y-1)] = smth[y-1][x]
if xy2num(x, y+1) >= 0:
a[xy2num(x, y+1)] = smth[y+1][x]
adj.append(a)
bestest = 1203981293812038102093890213890122839012839012830912803912809312809380912389012839018209380
g = Graph(7*7)
g.graph = adj
for i in xrange(7):
best = min(g.dtra(i*7))
best += smth[i][0]
bestest = min(bestest, best)
r.sendlineafter(">>> ", str(bestest))
ans.append(bestest)
r.close()
print "".join([chr(i) for i in ans]).decode("base64")
|
import logging
import os
from aiogram import Bot, Dispatcher, types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from data import config
# these paths will be used in the handlers files
cwd = os.getcwd()
input_path = os.path.join(cwd, "user_files", "input")
output_path = os.path.join(cwd, "user_files", "output")
bot = Bot(token=config.BOT_TOKEN, parse_mode=types.ParseMode.HTML)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
logging.basicConfig(
level=logging.INFO,
format=u'%(filename)s [LINE:%(lineno)d] #%(levelname)-8s [%(asctime)s] %(message)s',
datefmt='%d-%b-%y %H:%M:%S',
)
|
# -*- coding: utf-8 -*-
"""
src
~~~~~~~~~~~~~~~~~~~
This module contains the application source code.
The application is based upon Model-View-Controller architectural pattern.
"""
|
import numpy as np
import pandas as pd
import time
from Bio import Entrez
def addBibs(df):
"""Takes output from mergeWrite and adds cols for corresponding pubmed features.
Parses Entrez esummary pubmed results for desired bibliographic features.
Iterates for each pmid in input's generifs col.
Casts results to new df and merges with input df.
Returns df with bib features for each pmid."""
# This should be made variable and entered by user
Entrez.email = 'jimmyjamesarnold@gmail.com'
bib_feats = ['Id', 'PubDate', 'Source', 'Title', 'LastAuthor',
'DOI', 'PmcRefCount'] # should be made arg later
print('')
print('Extracting PubMed data for...')
ls = [] # constructs list of biblio data for each generif
for pb in [i for i in df.pmid]:
# should be made arg later
print('Fetching %d' % pb)
record = Entrez.read(Entrez.esummary(db="pubmed", id=pb))
# use dict compr to extract bib_feats per record, convert to series and append to ls
ls.append(pd.Series({i: record[0][i]
for i in bib_feats if i in record[0]}))
time.sleep(0.5) # pause 1 sec to avoid spamming.
# merge with df, cast dtypes for merging.
# idea for future, add 'PubDate': 'datetime64' to astype for time series. Some pubs have weird timestr values, need to work on solution.
print('Done.')
return pd.merge(df, pd.DataFrame(ls).astype({'Id': 'int64'}),
left_on='pmid', right_on='Id').drop('Id', axis=1)
|
import numpy as np
import random
from q1_softmax import softmax
from q2_sigmoid import sigmoid, sigmoid_grad
from q2_gradcheck import gradcheck_naive
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
N = x.shape[0]
D = np.prod(x.shape[1:])
M = b.shape[1]
out = np.dot(x.reshape(N, D), w.reshape(D, M)) + b.reshape(1, M)
return out, (x,w,b)
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
N = x.shape[0]
D = np.prod(x.shape[1:])
M = b.shape[1]
dx = np.dot(dout, w.reshape(D, M).T).reshape(x.shape)
dw = np.dot(x.reshape(N, D).T, dout).reshape(w.shape)
db = np.sum(dout, axis=0)
return dx, dw, db
def sigmoid_forward(x):
"""
Computes the forward pass for a sigmoid activation.
Inputs:
- x: Input data, numpy array of arbitary shape;
Returns a tuple (out, cache)
- out: output of the same shape as x
- cache: identical to out; required for backpropagation
"""
return sigmoid(x), sigmoid(x)
def sigmoid_backward(dout, cache):
"""
Computes the backward pass for an sigmoid layer.
Inputs:
- dout: Upstream derivative, same shape as the input
to the sigmoid layer (x)
- cache: sigmoid(x)
Returns a tuple of:
- dx: back propagated gradient with respect to x
"""
x = cache
return sigmoid_grad(x) * dout
def forward_backward_prop(data, labels, params, dimensions):
"""
Forward and backward propagation for a two-layer sigmoidal network
Compute the forward propagation and for the cross entropy cost,
and backward propagation for the gradients for all parameters.
"""
### Unpack network parameters (do not modify)
ofs = 0
Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])
N = data.shape[0]
W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))
ofs += Dx * H
b1 = np.reshape(params[ofs:ofs + H], (1, H))
ofs += H
W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))
ofs += H * Dy
b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))
### YOUR CODE HERE: forward propagation
hidden = np.dot(data,W1) + b1
layer1_a = sigmoid(hidden)
layer2 = np.dot(layer1_a, W2) + b2
# need to calculate the softmax loss
probs = softmax(layer2)
cost = - np.sum(np.log(probs[np.arange(N), np.argmax(labels, axis=1)]))
### END YOUR CODE
### YOUR CODE HERE: backward propagation
#There is no regularization :/
# dx -> sigmoid -> W2 * layer1_a + b -> sigmoid -> W1 * data + b1 -> ..
dx = probs.copy()
dx -= labels
dlayer2 = np.zeros_like(dx)
gradW2 = np.zeros_like(W2)
gradW1 = np.zeros_like(W1)
gradb2 = np.zeros_like(b2)
gradb1 = np.zeros_like(b1)
gradW2 = np.dot(layer1_a.T, dx)
gradb2 = np.sum(dx, axis=0)
dlayer2 = np.dot(dx, W2.T)
dlayer1 = sigmoid_grad(layer1_a) * dlayer2
gradW1 = np.dot(data.T, dlayer1)
gradb1 = np.sum(dlayer1, axis=0)
# Decided to implement affine (forward and backward function)
# sigmoid (forward and backward function)
# These should work properly;
# scores, cache_1 = affine_forward(data, W1, b1)
# scores, cache_s1 = sigmoid_forward(scores)
# scores, cache_2 = affine_forward(scores, W2, b2)
# # need to calculate the softmax loss
# probs = softmax(scores)
# cost = -np.sum(np.log(probs[np.arange(N), np.argmax(labels)] + 1e-12)) / N
# softmax_dx = probs.copy()
# softmax_dx[np.arange(N), np.argmax(labels,axis=1)] -= 1
# softmax_dx /= N
# grads = {}
# dlayer2, grads['W2'], grads['b2'] = affine_backward(softmax_dx, cache_2)
# dlayer1s = sigmoid_backward(dlayer2, cache_s1)
# dlayer1, grads['W1'], grads['b1'] = affine_backward(dlayer1s, cache_1)
#softmax_dx is the gradient of the loss w.r.t. y_{est}
### END YOUR CODE
### Stack gradients (do not modify)
grad = np.concatenate((gradW1.flatten(), gradb1.flatten(),
gradW2.flatten(), gradb2.flatten()))
return cost, grad
def sanity_check():
"""
Set up fake data and parameters for the neural network, and test using
gradcheck.
"""
print("Running sanity check...")
N = 300
dimensions = [10, 5, 10]
data = np.random.randn(N, dimensions[0]) # each row will be a datum
labels = np.zeros((N, dimensions[2]))
for i in range(N):
labels[i,random.randint(0,dimensions[2]-1)] = 1
params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (
dimensions[1] + 1) * dimensions[2], )
#cost, _ = forward_backward_prop(data, labels, params, dimensions)
# # expect to get 1 in 10 correct
#print(np.exp(-cost))
# #cost is roughly correct
gradcheck_naive(lambda params: forward_backward_prop(data, labels, params,
dimensions), params)
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q2_neural.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print("Running your sanity checks...")
### YOUR CODE HERE
#raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
sanity_check()
your_sanity_checks()
|
print("sema")
|
from output.models.nist_data.list_pkg.negative_integer.schema_instance.nistschema_sv_iv_list_negative_integer_enumeration_2_xsd.nistschema_sv_iv_list_negative_integer_enumeration_2 import (
NistschemaSvIvListNegativeIntegerEnumeration2,
NistschemaSvIvListNegativeIntegerEnumeration2Type,
)
__all__ = [
"NistschemaSvIvListNegativeIntegerEnumeration2",
"NistschemaSvIvListNegativeIntegerEnumeration2Type",
]
|
import requests
from lxml import html # 导入lxml.html模块
def crawl_second(url):
#print(url)
r = requests.get(url).content
r_tree = html.fromstring(r)
for i in r_tree.xpath('//a'):
link = i.xpath('@href')[0]
name = i.text;
if(name is None):
continue
if('视频教程' not in name):
print(name,link)
if __name__ == '__main__':
url = 'http://zy.libraries.top/t/173.html?ma=666'
r = requests.get(url).content
r_tree = html.fromstring(r)
for i in r_tree.xpath('//div/h3/a'): # 用xpath选取节点
link = i.xpath('@href')[0]
name = i.xpath('span')[0].text
#print(name, link)
crawl_second(link)
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import traceback
import uuid
import eventlet.debug
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher
from oslo_messaging import target
from oslo_serialization import jsonutils
from oslo_service import service
from murano.common import auth_utils
from murano.common.helpers import token_sanitizer
from murano.common.plugins import extensions_loader
from murano.common import rpc
from murano.dsl import context_manager
from murano.dsl import dsl_exception
from murano.dsl import executor as dsl_executor
from murano.dsl import helpers
from murano.dsl import schema_generator
from murano.dsl import serializer
from murano.engine import execution_session
from murano.engine import package_loader
from murano.engine.system import status_reporter
from murano.engine.system import yaql_functions
from murano.policy import model_policy_enforcer as enforcer
CONF = cfg.CONF
PLUGIN_LOADER = None
LOG = logging.getLogger(__name__)
eventlet.debug.hub_exceptions(False)
# noinspection PyAbstractClass
class EngineService(service.Service):
def __init__(self):
super(EngineService, self).__init__()
self.server = None
def start(self):
endpoints = [
TaskProcessingEndpoint(),
StaticActionEndpoint(),
SchemaEndpoint()
]
transport = messaging.get_notification_transport(CONF)
s_target = target.Target('murano', 'tasks', server=str(uuid.uuid4()))
access_policy = dispatcher.DefaultRPCAccessPolicy
self.server = messaging.get_rpc_server(
transport, s_target, endpoints, 'eventlet',
access_policy=access_policy)
self.server.start()
super(EngineService, self).start()
def stop(self, graceful=False):
if self.server:
self.server.stop()
if graceful:
self.server.wait()
super(EngineService, self).stop()
def reset(self):
if self.server:
self.server.reset()
super(EngineService, self).reset()
def get_plugin_loader():
global PLUGIN_LOADER
if PLUGIN_LOADER is None:
PLUGIN_LOADER = extensions_loader.PluginLoader()
return PLUGIN_LOADER
class ContextManager(context_manager.ContextManager):
def create_root_context(self, runtime_version):
root_context = super(ContextManager, self).create_root_context(
runtime_version)
return helpers.link_contexts(
root_context, yaql_functions.get_context(runtime_version))
def create_package_context(self, package):
context = super(ContextManager, self).create_package_context(
package)
if package.name == 'io.murano':
context = helpers.link_contexts(
context, yaql_functions.get_restricted_context())
return context
class SchemaEndpoint(object):
@classmethod
def generate_schema(cls, context, *args, **kwargs):
session = execution_session.ExecutionSession()
session.token = context['token']
session.project_id = context['project_id']
with package_loader.CombinedPackageLoader(session) as pkg_loader:
return schema_generator.generate_schema(
pkg_loader, ContextManager(), *args, **kwargs)
class TaskProcessingEndpoint(object):
@classmethod
def handle_task(cls, context, task):
result = cls.execute(task)
rpc.api().process_result(result, task['id'])
@staticmethod
def execute(task):
s_task = token_sanitizer.TokenSanitizer().sanitize(task)
LOG.info('Starting processing task: {task_desc}'.format(
task_desc=jsonutils.dumps(s_task)))
result = None
reporter = status_reporter.StatusReporter(task['id'])
try:
task_executor = TaskExecutor(task, reporter)
result = task_executor.execute()
return result
finally:
LOG.info('Finished processing task: {task_desc}'.format(
task_desc=jsonutils.dumps(result)))
class StaticActionEndpoint(object):
@classmethod
def call_static_action(cls, context, task):
s_task = token_sanitizer.TokenSanitizer().sanitize(task)
LOG.info('Starting execution of static action: '
'{task_desc}'.format(task_desc=jsonutils.dumps(s_task)))
result = None
reporter = status_reporter.StatusReporter(task['id'])
try:
task_executor = StaticActionExecutor(task, reporter)
result = task_executor.execute()
return result
finally:
LOG.info('Finished execution of static action: '
'{task_desc}'.format(task_desc=jsonutils.dumps(result)))
class TaskExecutor(object):
@property
def action(self):
return self._action
@property
def session(self):
return self._session
@property
def model(self):
return self._model
def __init__(self, task, reporter=None):
if reporter is None:
reporter = status_reporter.StatusReporter(task['id'])
self._action = task.get('action')
self._model = task['model']
self._session = execution_session.ExecutionSession()
self._session.token = task['token']
self._session.project_id = task['project_id']
self._session.user_id = task['user_id']
self._session.environment_owner_project_id = self._model['project_id']
self._session.environment_owner_user_id = self._model['user_id']
self._session.system_attributes = self._model.get('SystemData', {})
self._reporter = reporter
self._model_policy_enforcer = enforcer.ModelPolicyEnforcer(
self._session)
def execute(self):
try:
self._create_trust()
except Exception as e:
return self.exception_result(e, None, '<system>')
with package_loader.CombinedPackageLoader(self._session) as pkg_loader:
pkg_loader.import_fixation_table(
self._session.system_attributes.get('Packages', {}))
result = self._execute(pkg_loader)
self._session.system_attributes[
'Packages'] = pkg_loader.export_fixation_table()
self._model['SystemData'] = self._session.system_attributes
self._model['project_id'] = self._session.environment_owner_project_id
self._model['user_id'] = self._session.environment_owner_user_id
result['model'] = self._model
if (not self._model.get('Objects') and
not self._model.get('ObjectsCopy')):
try:
self._delete_trust()
except Exception:
LOG.warning('Cannot delete trust', exc_info=True)
return result
def _execute(self, pkg_loader):
get_plugin_loader().register_in_loader(pkg_loader)
with dsl_executor.MuranoDslExecutor(
pkg_loader, ContextManager(), self.session) as executor:
try:
obj = executor.load(self.model)
except Exception as e:
return self.exception_result(e, None, '<load>')
if obj is not None:
try:
self._validate_model(obj.object, pkg_loader, executor)
except Exception as e:
return self.exception_result(e, obj, '<validate>')
try:
LOG.debug('Invoking pre-cleanup hooks')
self.session.start()
executor.object_store.cleanup()
except Exception as e:
return self.exception_result(e, obj, '<GC>')
finally:
LOG.debug('Invoking post-cleanup hooks')
self.session.finish()
self._model['ObjectsCopy'] = \
copy.deepcopy(self._model.get('Objects'))
action_result = None
if self.action:
try:
LOG.debug('Invoking pre-execution hooks')
self.session.start()
action_result = self._invoke(executor)
except Exception as e:
return self.exception_result(e, obj, self.action['method'])
finally:
LOG.debug('Invoking post-execution hooks')
self.session.finish()
self._model = executor.finalize(obj)
try:
action_result = serializer.serialize(action_result, executor)
except Exception as e:
return self.exception_result(e, None, '<result>')
pkg_loader.compact_fixation_table()
return {
'action': {
'result': action_result,
'isException': False
}
}
def exception_result(self, exception, root, method_name):
if isinstance(exception, dsl_exception.MuranoPlException):
LOG.error('\n' + exception.format(prefix=' '))
exception_traceback = exception.format()
else:
exception_traceback = traceback.format_exc()
LOG.exception(
("Exception %(exc)s occurred"
" during invocation of %(method)s"),
{'exc': exception, 'method': method_name})
self._reporter.report_error(root, str(exception))
return {
'action': {
'isException': True,
'result': {
'message': str(exception),
'details': exception_traceback
}
}
}
def _validate_model(self, obj, pkg_loader, executor):
if CONF.engine.enable_model_policy_enforcer:
if obj is not None:
with helpers.with_object_store(executor.object_store):
self._model_policy_enforcer.modify(obj, pkg_loader)
self._model_policy_enforcer.validate(obj.to_dictionary(),
pkg_loader)
def _invoke(self, mpl_executor):
obj = mpl_executor.object_store.get(self.action['object_id'])
method_name, kwargs = self.action['method'], self.action['args']
if obj is not None:
return mpl_executor.run(obj.type, method_name, obj, (), kwargs)
def _create_trust(self):
if not CONF.engine.use_trusts:
return
trust_id = self._session.system_attributes.get('TrustId')
if not trust_id:
trust_id = auth_utils.create_trust(
self._session.token, self._session.project_id)
self._session.system_attributes['TrustId'] = trust_id
self._session.trust_id = trust_id
def _delete_trust(self):
trust_id = self._session.trust_id
if trust_id:
auth_utils.delete_trust(self._session.trust_id)
self._session.system_attributes['TrustId'] = None
self._session.trust_id = None
class StaticActionExecutor(object):
@property
def action(self):
return self._action
@property
def session(self):
return self._session
def __init__(self, task, reporter=None):
if reporter is None:
reporter = status_reporter.StatusReporter(task['id'])
self._action = task['action']
self._session = execution_session.ExecutionSession()
self._session.token = task['token']
self._session.project_id = task['project_id']
self._session.user_id = task['user_id']
self._reporter = reporter
self._model_policy_enforcer = enforcer.ModelPolicyEnforcer(
self._session)
def execute(self):
with package_loader.CombinedPackageLoader(self._session) as pkg_loader:
get_plugin_loader().register_in_loader(pkg_loader)
executor = dsl_executor.MuranoDslExecutor(pkg_loader,
ContextManager())
action_result = self._invoke(executor)
action_result = serializer.serialize(action_result, executor)
return action_result
def _invoke(self, mpl_executor):
class_name = self.action['class_name']
pkg_name = self.action['pkg_name']
class_version = self.action['class_version']
version_spec = helpers.parse_version_spec(class_version)
if pkg_name:
package = mpl_executor.package_loader.load_package(
pkg_name, version_spec)
else:
package = mpl_executor.package_loader.load_class_package(
class_name, version_spec)
cls = package.find_class(class_name, search_requirements=False)
method_name, kwargs = self.action['method'], self.action['args']
return mpl_executor.run(cls, method_name, None, (), kwargs)
|
from typing import Sequence
from datetime import datetime
class Message(object):
def __init__(self, text: str, html: str, replies: Sequence[str], date: datetime, is_from_bot: bool):
self._text = text
self._html = html
self._replies = replies
self._date = date
self._is_from_bot = is_from_bot
@property
def text(self) -> str:
return self._text
@property
def html(self) -> str:
return self._html
@property
def date(self) -> datetime:
return self._date
@property
def replies(self) -> Sequence[str]:
return self._replies
@property
def is_from_bot(self) -> bool:
return self._is_from_bot
|
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from collections import OrderedDict
import json
import os.path as osp
from datumaro.components.annotation import AnnotationType
from datumaro.util import find
DATASET_META_FILE = 'dataset_meta.json'
def is_meta_file(path):
return osp.splitext(osp.basename(path))[1] == '.json'
def has_meta_file(path):
return osp.isfile(get_meta_file(path))
def get_meta_file(path):
return osp.join(path, DATASET_META_FILE)
def parse_meta_file(path):
meta_file = path
if osp.isdir(path):
meta_file = get_meta_file(path)
with open(meta_file) as f:
dataset_meta = json.load(f)
label_map = OrderedDict()
for label in dataset_meta.get('labels', []):
label_map[label] = None
colors = dataset_meta.get('segmentation_colors', [])
for i, label in dataset_meta.get('label_map', {}).items():
label_map[label] = None
if any(colors) and colors[int(i)] is not None:
label_map[label] = tuple(colors[int(i)])
return label_map
def save_meta_file(path, categories):
dataset_meta = {}
labels = [label.name for label in categories[AnnotationType.label]]
dataset_meta['labels'] = labels
if categories.get(AnnotationType.mask):
label_map = {}
segmentation_colors = []
for i, color in categories[AnnotationType.mask].colormap.items():
if color:
segmentation_colors.append([int(color[0]), int(color[1]), int(color[2])])
label_map[str(i)] = labels[i]
dataset_meta['label_map'] = label_map
dataset_meta['segmentation_colors'] = segmentation_colors
bg_label = find(categories[AnnotationType.mask].colormap.items(),
lambda x: x[1] == (0, 0, 0))
if bg_label is not None:
dataset_meta['background_label'] = str(bg_label[0])
meta_file = path
if osp.isdir(path):
meta_file = get_meta_file(path)
with open(meta_file, 'w') as f:
json.dump(dataset_meta, f)
|
#!/usr/bin/env python3.6
# Work with Python 3.6
import json
import time
from requests import get, post
with open("pool.json") as data_file:
pools = json.load(data_file)
with open("check_time.json") as data_file:
last_check = json.load(data_file)
with open("links.json") as data_file:
data = json.load(data_file)
for a in range(len(pools)):
if pools[a]["link"] == "https://comining.io/":
with post(pools[a]["api"], json={"method": "coins_list"}) as api:
if api.status_code == 200:
pool_api = api.json()
pools[a]["hash"] = int(pool_api["data"][0]["workersHashrate"])
else:
print(f"{pools[a]['api']} is down")
elif pools[a]["link"] == "https://aka.fairpool.xyz/":
with get(pools[a]["api"]) as api:
if api.status_code == 200:
pool_api = api.json()
pools[a]["hash"] = int(pool_api["pool"]["hashrate"])
else:
print(f"{pools[a]['api']} is down")
elif pools[a]["link"] == "https://aikapool.com/aka/":
with get(pools[a]["api"]) as api:
if api.status_code == 200:
pool_api = api.json()
pools[a]["hash"] = int(pool_api["pool_hashrate"])
else:
print(f"{pools[a]['api']} is down")
else:
with get(pools[a]["api"]) as api:
if api.status_code == 200:
pool_api = api.json()
pools[a]["hash"] = int(pool_api["hashrate"])
else:
print(f"{pools[a]['api']} is down")
pools.sort(key=lambda x: x["hash"])
with open("pool.json", "w") as file:
json.dump(pools, file, indent=2)
last_check["last_check"] = time.ctime() + " EET"
with open("check_time.json", "w") as file:
json.dump(last_check, file, indent=2)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.layers import ShapeSpec
from detectron2.utils.registry import Registry
from .backbone import Backbone
BACKBONE_REGISTRY = Registry("BACKBONE")
"""
Registry for backbones, which extract feature maps from images.
"""
def build_backbone(cfg, input_shape=None):
"""
Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
Returns:
an instance of :class:`Backbone`
"""
if input_shape is None:
input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
backbone_name = cfg.MODEL.BACKBONE.NAME
backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape)
assert isinstance(backbone, Backbone)
return backbone
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Usage:
with slim.arg_scope(overfeat.overfeat_arg_scope()):
outputs, end_points = overfeat.overfeat(inputs)
@@overfeat
"""
import tensorflow as tf
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def overfeat_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def overfeat(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='overfeat',
global_pool=False):
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 231x231. To use in fully
convolutional mode, set spatial_squeeze to false.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original OverFeat.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0 or
None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.conv2d(net, 512, [3, 3], scope='conv3')
net = slim.conv2d(net, 1024, [3, 3], scope='conv4')
net = slim.conv2d(net, 1024, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
with slim.arg_scope([slim.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.constant_initializer(0.1)):
net = slim.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
overfeat.default_image_size = 231
|
from distutils.core import setup
setup(
name = 'pyrelations',
packages = ['pyrelations'],
version = '0.1.1',
description = 'Small Python library for turning lists into relational record entries',
author = 'Shawn Niederriter',
author_email = 'shawnhitsback@gmail.com',
url = 'https://github.com/Sniedes722/pyRelations',
keywords = ['records','requests','sql','database','objects'],
classifiers = [
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
)
|
import os
import graphviz
from exploration import modelling
from exploration.data_exploration import X_train, y_train, X_test, y_test
from exploration.modelling import cv
from exploration.utils import BASE_DIR
from sklearn import tree
def format_result(result):
res = ('\n{result[name]}'
'\nbest_params_: {result[rs].best_params_}'
'\nscore: {result[score]}').format(result=result)
return res
model = {
'name': 'tree',
'model': tree.DecisionTreeClassifier,
'params': {
'criterion': ['gini', 'entropy'],
'splitter': ['best', 'random'],
'max_depth': range(1, 200),
'min_samples_split': range(1, 200),
# 'min_samples_leaf': range(1, 200),
# 'min_weight_fraction_leaf': 0.0,
# 'max_features': None,
# 'random_state': None,
# 'max_leaf_nodes': None,
# 'min_impurity_decrease': 0.0,
# 'min_impurity_split': None,
# 'class_weight': None,
# 'presort': False,
},
}
result = modelling.run_hyper(model, X_train, y_train, X_test, y_test, cv)
print(format_result(result))
dot_data = tree.export_graphviz(
result['estimator'],
feature_names=X_test.columns,
# class_names=['target'],
out_file=None,
filled=True,
rounded=True,
# special_characters=True,
)
graph = graphviz.Source(
dot_data,
directory=os.path.join(BASE_DIR, 'doc/report/img/'),
)
graph.render()
|
## Convert a String to a Number!
## 8 kyu
## https://www.codewars.com/kata/544675c6f971f7399a000e79
def string_to_number(s):
return int(s)
|
import scrapy
from bs4 import BeautifulSoup
from cultureBigdata.items import CultureNewsItem, CultureBasicItem, CultureEventItem
import re
import json
import time
from datetime import datetime
def timestamp_to_str(timestamp):
time_local = time.localtime(timestamp)
dt = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
return dt[:-3]
class Xinjiangwhyspider(scrapy.Spider):
name='xinjiangwhy'
start_urls = ['http://www.btggwh.com/xjwly/']
event_count=1
event_page_end=2
def start_requests(self):
for i in range(self.event_count,self.event_page_end+1):
url='http://www.btggwh.com/service/action/web/actionAndProjectListAll'
myFormData = {'pageSize': '8',
'pageNum': str(i),
'type': '',
'libcode':'xjwly',
'timeFormat': 'YY-MM-dd' }
yield scrapy.FormRequest(url, method='POST',formdata = myFormData, callback=self.event_parse,dont_filter = True)
def event_parse(self,response):
data=json.loads(response.body)
#print(data)
record_list=data['data']['list']
for record in record_list:
print(record)
item=CultureEventItem()
item['pav_name']='新疆生产建设兵团文化云'
item['url']='http://www.btggwh.com/xjwly/view/whactivity/activity-info1.html?id='+str(record['id'])
item['place']=record['addrDetail']
item['activity_name']=record['name']
item['activity_time']=datetime.fromtimestamp(record['startTime'] / 1000.0).strftime('%Y-%m-%d')+' '+datetime.fromtimestamp(record['endTime'] / 1000.0).strftime('%Y-%m-%d')
#record['startTimeStr']+"到"+record['endTimeStr']
url='http://www.btggwh.com/service/action/web/detailsActivity'
myFormData={
'id':str(record['id'])
}
yield scrapy.FormRequest(url,method='POST',formdata = myFormData, meta={'item':item},callback=self.event_text_parse,dont_filter = True)
def event_text_parse(self,response):
data=json.loads(response.body)
#print(data['data']['actionSpecial'])
item=response.meta['item']
soup=BeautifulSoup(data['data']['actionSpecial']['specialDesc'],'html.parser')
item['remark']=soup.text.replace('\n','').replace('\xa0','').replace('\u3000','')
try:
item['place']=data['data']['commonAddress']['addrName']+data['data']['commonAddress']['addrDetail']
except:
pass
return item
|
from random import randrange
pole = '--------------------'
import util
import ai
def vyhodnot(pole):
if ("xxx" in pole):
return "x"
elif ("ooo" in pole):
return "o"
elif ("-" not in pole):
return "!"
else:
return "-"
def tah_hrace(pole):
symbol = 'x'
while True:
cislo_policka = int(input('Na kterou pozici chces hrat? '))
if cislo_policka < 0 or cislo_policka > 19:
print('zadal jsi spatnou pozici')
elif pole[cislo_policka] != '-':
print('Policko je obsazene')
else:
return util.tah(pole, cislo_policka, symbol)
def piskvorky1d():
pole = "--------------------"
while True:
pole = tah_hrace (pole)
aktualni_stav = vyhodnot(pole)
if aktualni_stav == '-':
pole = ai.tah_pocitace (pole)
print(pole)
aktualni_stav = vyhodnot(pole)
if aktualni_stav == '!':
print ('Remiza')
break
elif aktualni_stav == 'x':
print ('Vyhral clovek')
break
elif aktualni_stav == 'o':
print ('Vyhral pocitac')
break
print(pole)
|
from setuptools import setup
setup(
name='pyCoinMiner',
packages=['test'],
test_suite='test',
)
|
from django.contrib import admin
from .models import User, Location, LocationUser
# Register your models here.
class UserAdmin(admin.ModelAdmin):
fields = ('resources', )
class LocationAdmin(admin.ModelAdmin):
fields = ('left_top', 'resources')
class LocationUserAdmin(admin.ModelAdmin):
fields = ('user', 'location')
admin.site.register(User, UserAdmin)
admin.site.register(Location, LocationAdmin)
admin.site.register(LocationUser, LocationUserAdmin)
|
from SimPEG import Survey, Problem, Utils, np, sp, Solver as SimpegSolver
from scipy.constants import mu_0
from SurveyFDEM import SurveyFDEM
from FieldsFDEM import FieldsFDEM, FieldsFDEM_e, FieldsFDEM_b, FieldsFDEM_h, FieldsFDEM_j
from simpegEM.Base import BaseEMProblem
from simpegEM.Utils.EMUtils import omega
class BaseFDEMProblem(BaseEMProblem):
"""
We start by looking at Maxwell's equations in the electric
field \\\(\\\mathbf{e}\\\) and the magnetic flux
density \\\(\\\mathbf{b}\\\)
.. math ::
\mathbf{C} \mathbf{e} + i \omega \mathbf{b} = \mathbf{s_m} \\\\
{\mathbf{C}^T \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - \mathbf{M_{\sigma}^e} \mathbf{e} = \mathbf{M^e} \mathbf{s_e}}
if using the E-B formulation (:code:`ProblemFDEM_e`
or :code:`ProblemFDEM_b`) or the magnetic field
\\\(\\\mathbf{h}\\\) and current density \\\(\\\mathbf{j}\\\)
.. math ::
\mathbf{C}^T \mathbf{M_{\\rho}^f} \mathbf{j} + i \omega \mathbf{M_{\mu}^e} \mathbf{h} = \mathbf{M^e} \mathbf{s_m} \\\\
\mathbf{C} \mathbf{h} - \mathbf{j} = \mathbf{s_e}
if using the H-J formulation (:code:`ProblemFDEM_j` or :code:`ProblemFDEM_h`).
The problem performs the elimination so that we are solving the system for \\\(\\\mathbf{e},\\\mathbf{b},\\\mathbf{j} \\\) or \\\(\\\mathbf{h}\\\)
"""
surveyPair = SurveyFDEM
fieldsPair = FieldsFDEM
def fields(self, m=None):
"""
Solve the forward problem for the fields.
"""
self.curModel = m
F = self.fieldsPair(self.mesh, self.survey)
for freq in self.survey.freqs:
A = self.getA(freq)
rhs = self.getRHS(freq)
Ainv = self.Solver(A, **self.solverOpts)
sol = Ainv * rhs
Srcs = self.survey.getSrcByFreq(freq)
ftype = self._fieldType + 'Solution'
F[Srcs, ftype] = sol
return F
def Jvec(self, m, v, f=None):
"""
Sensitivity times a vector
"""
if f is None:
f = self.fields(m)
self.curModel = m
Jv = self.dataPair(self.survey)
for freq in self.survey.freqs:
dA_du = self.getA(freq) #
dA_duI = self.Solver(dA_du, **self.solverOpts)
for src in self.survey.getSrcByFreq(freq):
ftype = self._fieldType + 'Solution'
u_src = f[src, ftype]
dA_dm = self.getADeriv_m(freq, u_src, v)
dRHS_dm = self.getRHSDeriv_m(src, v)
if dRHS_dm is None:
du_dm = dA_duI * ( - dA_dm )
else:
du_dm = dA_duI * ( - dA_dm + dRHS_dm )
for rx in src.rxList:
# df_duFun = u.deriv_u(rx.fieldsUsed, m)
df_duFun = getattr(f, '_%sDeriv_u'%rx.projField, None)
df_du = df_duFun(src, du_dm, adjoint=False)
if df_du is not None:
du_dm = df_du
df_dmFun = getattr(f, '_%sDeriv_m'%rx.projField, None)
df_dm = df_dmFun(src, v, adjoint=False)
if df_dm is not None:
du_dm += df_dm
P = lambda v: rx.projectFieldsDeriv(src, self.mesh, f, v) # wrt u, also have wrt m
Jv[src, rx] = P(du_dm)
return Utils.mkvc(Jv)
def Jtvec(self, m, v, f=None):
"""
Sensitivity transpose times a vector
"""
if f is None:
f = self.fields(m)
self.curModel = m
# Ensure v is a data object.
if not isinstance(v, self.dataPair):
v = self.dataPair(self.survey, v)
Jtv = np.zeros(m.size)
for freq in self.survey.freqs:
AT = self.getA(freq).T
ATinv = self.Solver(AT, **self.solverOpts)
for src in self.survey.getSrcByFreq(freq):
ftype = self._fieldType + 'Solution'
u_src = f[src, ftype]
for rx in src.rxList:
PTv = rx.projectFieldsDeriv(src, self.mesh, f, v[src, rx], adjoint=True) # wrt u, need possibility wrt m
df_duTFun = getattr(f, '_%sDeriv_u'%rx.projField, None)
df_duT = df_duTFun(src, PTv, adjoint=True)
if df_duT is not None:
dA_duIT = ATinv * df_duT
else:
dA_duIT = ATinv * PTv
dA_dmT = self.getADeriv_m(freq, u_src, dA_duIT, adjoint=True)
dRHS_dmT = self.getRHSDeriv_m(src, dA_duIT, adjoint=True)
if dRHS_dmT is None:
du_dmT = - dA_dmT
else:
du_dmT = -dA_dmT + dRHS_dmT
df_dmFun = getattr(f, '_%sDeriv_m'%rx.projField, None)
dfT_dm = df_dmFun(src, PTv, adjoint=True)
if dfT_dm is not None:
du_dmT += dfT_dm
real_or_imag = rx.projComp
if real_or_imag == 'real':
Jtv += du_dmT.real
elif real_or_imag == 'imag':
Jtv += - du_dmT.real
else:
raise Exception('Must be real or imag')
return Jtv
def getSourceTerm(self, freq):
"""
Evaluates the sources for a given frequency and puts them in matrix form
:param float freq: Frequency
:rtype: numpy.ndarray (nE or nF, nSrc)
:return: S_m, S_e
"""
Srcs = self.survey.getSrcByFreq(freq)
if self._eqLocs is 'FE':
S_m = np.zeros((self.mesh.nF,len(Srcs)), dtype=complex)
S_e = np.zeros((self.mesh.nE,len(Srcs)), dtype=complex)
elif self._eqLocs is 'EF':
S_m = np.zeros((self.mesh.nE,len(Srcs)), dtype=complex)
S_e = np.zeros((self.mesh.nF,len(Srcs)), dtype=complex)
for i, src in enumerate(Srcs):
smi, sei = src.eval(self)
if smi is not None:
S_m[:,i] = Utils.mkvc(smi)
if sei is not None:
S_e[:,i] = Utils.mkvc(sei)
return S_m, S_e
##########################################################################################
################################ E-B Formulation #########################################
##########################################################################################
class ProblemFDEM_e(BaseFDEMProblem):
"""
By eliminating the magnetic flux density using
.. math ::
\mathbf{b} = \\frac{1}{i \omega}\\left(-\mathbf{C} \mathbf{e} + \mathbf{s_m}\\right)
we can write Maxwell's equations as a second order system in \\\(\\\mathbf{e}\\\) only:
.. math ::
\\left(\mathbf{C}^T \mathbf{M_{\mu^{-1}}^f} \mathbf{C}+ i \omega \mathbf{M^e_{\sigma}} \\right)\mathbf{e} = \mathbf{C}^T \mathbf{M_{\mu^{-1}}^f}\mathbf{s_m} -i\omega\mathbf{M^e}\mathbf{s_e}
which we solve for \\\(\\\mathbf{e}\\\).
"""
_fieldType = 'e'
_eqLocs = 'FE'
fieldsPair = FieldsFDEM_e
def __init__(self, mesh, **kwargs):
BaseFDEMProblem.__init__(self, mesh, **kwargs)
def getA(self, freq):
"""
.. math ::
\mathbf{A} = \mathbf{C}^T \mathbf{M_{\mu^{-1}}^f} \mathbf{C} + i \omega \mathbf{M^e_{\sigma}}
:param float freq: Frequency
:rtype: scipy.sparse.csr_matrix
:return: A
"""
MfMui = self.MfMui
MeSigma = self.MeSigma
C = self.mesh.edgeCurl
return C.T*MfMui*C + 1j*omega(freq)*MeSigma
def getADeriv_m(self, freq, u, v, adjoint=False):
dsig_dm = self.curModel.sigmaDeriv
dMe_dsig = self.MeSigmaDeriv(u)
if adjoint:
return 1j * omega(freq) * ( dMe_dsig.T * v )
return 1j * omega(freq) * ( dMe_dsig * v )
def getRHS(self, freq):
"""
.. math ::
\mathbf{RHS} = \mathbf{C}^T \mathbf{M_{\mu^{-1}}^f}\mathbf{s_m} -i\omega\mathbf{M_e}\mathbf{s_e}
:param float freq: Frequency
:rtype: numpy.ndarray (nE, nSrc)
:return: RHS
"""
S_m, S_e = self.getSourceTerm(freq)
C = self.mesh.edgeCurl
MfMui = self.MfMui
# RHS = C.T * (MfMui * S_m) -1j * omega(freq) * Me * S_e
RHS = C.T * (MfMui * S_m) -1j * omega(freq) * S_e
return RHS
def getRHSDeriv_m(self, src, v, adjoint=False):
C = self.mesh.edgeCurl
MfMui = self.MfMui
S_mDeriv, S_eDeriv = src.evalDeriv(self, adjoint)
if adjoint:
dRHS = MfMui * (C * v)
S_mDerivv = S_mDeriv(dRHS)
S_eDerivv = S_eDeriv(v)
if S_mDerivv is not None and S_eDerivv is not None:
return S_mDerivv - 1j * omega(freq) * S_eDerivv
elif S_mDerivv is not None:
return S_mDerivv
elif S_eDerivv is not None:
return - 1j * omega(freq) * S_eDerivv
else:
return None
else:
S_mDerivv, S_eDerivv = S_mDeriv(v), S_eDeriv(v)
if S_mDerivv is not None and S_eDerivv is not None:
return C.T * (MfMui * S_mDerivv) -1j * omega(freq) * S_eDerivv
elif S_mDerivv is not None:
return C.T * (MfMui * S_mDerivv)
elif S_eDerivv is not None:
return -1j * omega(freq) * S_eDerivv
else:
return None
class ProblemFDEM_b(BaseFDEMProblem):
"""
We eliminate \\\(\\\mathbf{e}\\\) using
.. math ::
\mathbf{e} = \mathbf{M^e_{\sigma}}^{-1} \\left(\mathbf{C}^T \mathbf{M_{\mu^{-1}}^f} \mathbf{b} - \mathbf{s_e}\\right)
and solve for \\\(\\\mathbf{b}\\\) using:
.. math ::
\\left(\mathbf{C} \mathbf{M^e_{\sigma}}^{-1} \mathbf{C}^T \mathbf{M_{\mu^{-1}}^f} + i \omega \\right)\mathbf{b} = \mathbf{s_m} + \mathbf{M^e_{\sigma}}^{-1}\mathbf{M^e}\mathbf{s_e}
.. note ::
The inverse problem will not work with full anisotropy
"""
_fieldType = 'b'
_eqLocs = 'FE'
fieldsPair = FieldsFDEM_b
def __init__(self, mesh, **kwargs):
BaseFDEMProblem.__init__(self, mesh, **kwargs)
def getA(self, freq):
"""
.. math ::
\mathbf{A} = \mathbf{C} \mathbf{M^e_{\sigma}}^{-1} \mathbf{C}^T \mathbf{M_{\mu^{-1}}^f} + i \omega
:param float freq: Frequency
:rtype: scipy.sparse.csr_matrix
:return: A
"""
MfMui = self.MfMui
MeSigmaI = self.MeSigmaI
C = self.mesh.edgeCurl
iomega = 1j * omega(freq) * sp.eye(self.mesh.nF)
A = C * (MeSigmaI * (C.T * MfMui)) + iomega
if self._makeASymmetric is True:
return MfMui.T*A
return A
def getADeriv_m(self, freq, u, v, adjoint=False):
MfMui = self.MfMui
C = self.mesh.edgeCurl
MeSigmaIDeriv = self.MeSigmaIDeriv
vec = C.T * (MfMui * u)
MeSigmaIDeriv = MeSigmaIDeriv(vec)
if adjoint:
if self._makeASymmetric is True:
v = MfMui * v
return MeSigmaIDeriv.T * (C.T * v)
if self._makeASymmetric is True:
return MfMui.T * ( C * ( MeSigmaIDeriv * v ) )
return C * ( MeSigmaIDeriv * v )
def getRHS(self, freq):
"""
.. math ::
\mathbf{RHS} = \mathbf{s_m} + \mathbf{M^e_{\sigma}}^{-1}\mathbf{s_e}
:param float freq: Frequency
:rtype: numpy.ndarray (nE, nSrc)
:return: RHS
"""
S_m, S_e = self.getSourceTerm(freq)
C = self.mesh.edgeCurl
MeSigmaI = self.MeSigmaI
# Me = self.Me
RHS = S_m + C * ( MeSigmaI * S_e )
if self._makeASymmetric is True:
MfMui = self.MfMui
return MfMui.T * RHS
return RHS
def getRHSDeriv_m(self, src, v, adjoint=False):
C = self.mesh.edgeCurl
S_m, S_e = src.eval(self)
MfMui = self.MfMui
# Me = self.Me
if self._makeASymmetric and adjoint:
v = self.MfMui * v
if S_e is not None:
MeSigmaIDeriv = self.MeSigmaIDeriv(S_e)
if not adjoint:
RHSderiv = C * (MeSigmaIDeriv * v)
elif adjoint:
RHSderiv = MeSigmaIDeriv.T * (C.T * v)
else:
RHSderiv = None
S_mDeriv, S_eDeriv = src.evalDeriv(self, adjoint)
S_mDeriv, S_eDeriv = S_mDeriv(v), S_eDeriv(v)
if S_mDeriv is not None and S_eDeriv is not None:
if not adjoint:
SrcDeriv = S_mDeriv + C * (self.MeSigmaI * S_eDeriv)
elif adjoint:
SrcDeriv = S_mDeriv + Self.MeSigmaI.T * ( C.T * S_eDeriv)
elif S_mDeriv is not None:
SrcDeriv = S_mDeriv
elif S_eDeriv is not None:
if not adjoint:
SrcDeriv = C * (self.MeSigmaI * S_eDeriv)
elif adjoint:
SrcDeriv = self.MeSigmaI.T * ( C.T * S_eDeriv)
else:
SrcDeriv = None
if RHSderiv is not None and SrcDeriv is not None:
RHSderiv += SrcDeriv
elif SrcDeriv is not None:
RHSderiv = SrcDeriv
if RHSderiv is not None:
if self._makeASymmetric is True and not adjoint:
return MfMui.T * RHSderiv
return RHSderiv
##########################################################################################
################################ H-J Formulation #########################################
##########################################################################################
class ProblemFDEM_j(BaseFDEMProblem):
"""
We eliminate \\\(\\\mathbf{h}\\\) using
.. math ::
\mathbf{h} = \\frac{1}{i \omega} \mathbf{M_{\mu}^e}^{-1} \\left(-\mathbf{C}^T \mathbf{M_{\\rho}^f} \mathbf{j} + \mathbf{M^e} \mathbf{s_m} \\right)
and solve for \\\(\\\mathbf{j}\\\) using
.. math ::
\\left(\mathbf{C} \mathbf{M_{\mu}^e}^{-1} \mathbf{C}^T \mathbf{M_{\\rho}^f} + i \omega\\right)\mathbf{j} = \mathbf{C} \mathbf{M_{\mu}^e}^{-1} \mathbf{M^e} \mathbf{s_m} -i\omega\mathbf{s_e}
.. note::
This implementation does not yet work with full anisotropy!!
"""
_fieldType = 'j'
_eqLocs = 'EF'
fieldsPair = FieldsFDEM_j
def __init__(self, mesh, **kwargs):
BaseFDEMProblem.__init__(self, mesh, **kwargs)
def getA(self, freq):
"""
.. math ::
\\mathbf{A} = \\mathbf{C} \\mathbf{M^e_{mu^{-1}}} \\mathbf{C}^T \\mathbf{M^f_{\\sigma^{-1}}} + i\\omega
:param float freq: Frequency
:rtype: scipy.sparse.csr_matrix
:return: A
"""
MeMuI = self.MeMuI
MfRho = self.MfRho
C = self.mesh.edgeCurl
iomega = 1j * omega(freq) * sp.eye(self.mesh.nF)
A = C * MeMuI * C.T * MfRho + iomega
if self._makeASymmetric is True:
return MfRho.T*A
return A
def getADeriv_m(self, freq, u, v, adjoint=False):
"""
In this case, we assume that electrical conductivity, \\\(\\\sigma\\\) is the physical property of interest (i.e. \\\(\\\sigma\\\) = model.transform). Then we want
.. math ::
\\frac{\mathbf{A(\sigma)} \mathbf{v}}{d \\mathbf{m}} &= \\mathbf{C} \\mathbf{M^e_{mu^{-1}}} \\mathbf{C^T} \\frac{d \\mathbf{M^f_{\\sigma^{-1}}}}{d \\mathbf{m}}
&= \\mathbf{C} \\mathbf{M^e_{mu}^{-1}} \\mathbf{C^T} \\frac{d \\mathbf{M^f_{\\sigma^{-1}}}}{d \\mathbf{\\sigma^{-1}}} \\frac{d \\mathbf{\\sigma^{-1}}}{d \\mathbf{\\sigma}} \\frac{d \\mathbf{\\sigma}}{d \\mathbf{m}}
"""
MeMuI = self.MeMuI
MfRho = self.MfRho
C = self.mesh.edgeCurl
MfRhoDeriv_m = self.MfRhoDeriv(u)
if adjoint:
if self._makeASymmetric is True:
v = MfRho * v
return MfRhoDeriv_m.T * (C * (MeMuI.T * (C.T * v)))
if self._makeASymmetric is True:
return MfRho.T * (C * ( MeMuI * (C.T * (MfRhoDeriv_m * v) )))
return C * (MeMuI * (C.T * (MfRhoDeriv_m * v)))
def getRHS(self, freq):
"""
.. math ::
\mathbf{RHS} = \mathbf{C} \mathbf{M_{\mu}^e}^{-1}\mathbf{s_m} -i\omega \mathbf{s_e}
:param float freq: Frequency
:rtype: numpy.ndarray (nE, nSrc)
:return: RHS
"""
S_m, S_e = self.getSourceTerm(freq)
C = self.mesh.edgeCurl
MeMuI = self.MeMuI
RHS = C * (MeMuI * S_m) - 1j * omega(freq) * S_e
if self._makeASymmetric is True:
MfRho = self.MfRho
return MfRho.T*RHS
return RHS
def getRHSDeriv_m(self, src, v, adjoint=False):
C = self.mesh.edgeCurl
MeMuI = self.MeMuI
S_mDeriv, S_eDeriv = src.evalDeriv(self, adjoint)
if adjoint:
if self._makeASymmetric:
MfRho = self.MfRho
v = MfRho*v
S_mDerivv = S_mDeriv(MeMuI.T * (C.T * v))
S_eDerivv = S_eDeriv(v)
if S_mDerivv is not None and S_eDerivv is not None:
return S_mDerivv - 1j * omega(freq) * S_eDerivv
elif S_mDerivv is not None:
return S_mDerivv
elif S_eDerivv is not None:
return - 1j * omega(freq) * S_eDerivv
else:
return None
else:
S_mDerivv, S_eDerivv = S_mDeriv(v), S_eDeriv(v)
if S_mDerivv is not None and S_eDerivv is not None:
RHSDeriv = C * (MeMuI * S_mDerivv) - 1j * omega(freq) * S_eDerivv
elif S_mDerivv is not None:
RHSDeriv = C * (MeMuI * S_mDerivv)
elif S_eDerivv is not None:
RHSDeriv = - 1j * omega(freq) * S_eDerivv
else:
return None
if self._makeASymmetric:
MfRho = self.MfRho
return MfRho.T * RHSDeriv
return RHSDeriv
class ProblemFDEM_h(BaseFDEMProblem):
"""
We eliminate \\\(\\\mathbf{j}\\\) using
.. math ::
\mathbf{j} = \mathbf{C} \mathbf{h} - \mathbf{s_e}
and solve for \\\(\\\mathbf{h}\\\) using
.. math ::
\\left(\mathbf{C}^T \mathbf{M_{\\rho}^f} \mathbf{C} + i \omega \mathbf{M_{\mu}^e}\\right) \mathbf{h} = \mathbf{M^e} \mathbf{s_m} + \mathbf{C}^T \mathbf{M_{\\rho}^f} \mathbf{s_e}
"""
_fieldType = 'h'
_eqLocs = 'EF'
fieldsPair = FieldsFDEM_h
def __init__(self, mesh, **kwargs):
BaseFDEMProblem.__init__(self, mesh, **kwargs)
def getA(self, freq):
"""
.. math ::
\mathbf{A} = \mathbf{C}^T \mathbf{M_{\\rho}^f} \mathbf{C} + i \omega \mathbf{M_{\mu}^e}
:param float freq: Frequency
:rtype: scipy.sparse.csr_matrix
:return: A
"""
MeMu = self.MeMu
MfRho = self.MfRho
C = self.mesh.edgeCurl
return C.T * (MfRho * C) + 1j*omega(freq)*MeMu
def getADeriv_m(self, freq, u, v, adjoint=False):
MeMu = self.MeMu
C = self.mesh.edgeCurl
MfRhoDeriv_m = self.MfRhoDeriv(C*u)
if adjoint:
return MfRhoDeriv_m.T * (C * v)
return C.T * (MfRhoDeriv_m * v)
def getRHS(self, freq):
"""
.. math ::
\mathbf{RHS} = \mathbf{M^e} \mathbf{s_m} + \mathbf{C}^T \mathbf{M_{\\rho}^f} \mathbf{s_e}
:param float freq: Frequency
:rtype: numpy.ndarray (nE, nSrc)
:return: RHS
"""
S_m, S_e = self.getSourceTerm(freq)
C = self.mesh.edgeCurl
MfRho = self.MfRho
RHS = S_m + C.T * ( MfRho * S_e )
return RHS
def getRHSDeriv_m(self, src, v, adjoint=False):
_, S_e = src.eval(self)
C = self.mesh.edgeCurl
MfRho = self.MfRho
RHSDeriv = None
if S_e is not None:
MfRhoDeriv = self.MfRhoDeriv(S_e)
if not adjoint:
RHSDeriv = C.T * (MfRhoDeriv * v)
elif adjoint:
RHSDeriv = MfRhoDeriv.T * (C * v)
S_mDeriv, S_eDeriv = src.evalDeriv(self, adjoint)
S_mDeriv = S_mDeriv(v)
S_eDeriv = S_eDeriv(v)
if S_mDeriv is not None:
if RHSDeriv is not None:
RHSDeriv += S_mDeriv(v)
else:
RHSDeriv = S_mDeriv(v)
if S_eDeriv is not None:
if RHSDeriv is not None:
RHSDeriv += C.T * (MfRho * S_e)
else:
RHSDeriv = C.T * (MfRho * S_e)
return RHSDeriv
|
# Generated by Django 3.0.6 on 2020-05-21 10:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('created_date', models.DateTimeField(auto_now_add=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('content', models.TextField(blank=True)),
('user', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('slug', models.SlugField(max_length=100)),
('published_date', models.DateTimeField(auto_now_add=True)),
('publish', models.BooleanField(default=True)),
('is_page', models.BooleanField(default=False)),
('content', models.TextField()),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blogs.Blog')),
],
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 10 12:56:56 2019
@author: kylasemmendinger
"""
# import python libraries
import pandas as pd
import os
# back out a directory to load python functions from "Scripts" folder
org_dir_name = os.path.dirname(os.path.realpath('SensIndices_RCPlots.py'))
parent_dir_name = os.path.dirname(os.path.dirname(os.path.realpath('SensIndices_RCPlots.py')))
os.chdir(parent_dir_name + "/Scripts")
# load python functions from ‘Scripts’ folder
import delta
import sobol
import ols
import radial_conv_plots
import magnitude_percentile_plots
# move back into case study 0 folder
os.chdir(org_dir_name)
# Define the model inputs
problem = {
'num_vars': 11,
'names': ['w', 'n_imperv', 'n_perv', 's_imperv', 's_perv', 'k_sat', 'per_routed', 'cmelt', 'Tb', 'A1', 'B1'],
'bounds': [[500, 1500], # meters
[0.01, 0.2],
[0.01, 0.2],
[0, 10],
[0, 10],
[0.01, 10],
[0, 100],
[0, 4],
[-3, 3],
[0.0001, 0.01],
[1, 3]]
}
# load in model parameter sets (Saltelli sampled) and objective function values
pars = pd.read_csv("input/params.csv", header = 0)
OF = pd.read_csv("input/OF_values.csv")
# save the parameter names
param_names = problem['names']
# calculate Sobol first-, second-, and total order indices --> MUST BE BASED ON SALTELLI SAMPLING SCHEME
results_SI = []
results_SI = sobol.objective_function_sobol(problem, OF)
# create radial convergence plots based on results_SI
radial_conv_plots.radial_conv_plots(problem, results_SI, OF)
# calculate delta indices and sobol first-order indices
results_delta = []
results_delta = delta.objective_function_delta(problem, pars, OF)
# calculate R^2 from OLS regression
results_R2 = []
results_R2 = ols.objective_function_OLS(OF, pars, param_names)
|
net_device = {
'ip_addr':'10.5.6.6',
'vendor':'cisco',
'platform':'ios',
'username':'malford',
'password':'chicken',
}
bgp_fields = {
'bgp_as':'65002',
'peer_as':'20551',
'peer_ip':'10.232.232.2'
}
net_device.update(bgp_fields)
print("These are the dictionary keys: ")
for x in net_device:
print(x)
print('\n')
print('\n')
print("These are the keys and values: ")
for x in net_device.items():
print(x)
|
import numpy as np
from copy import deepcopy
from sklearn.linear_model import LinearRegression
class Transform:
def __init__(self, parent=None):
self.__parent = parent
def __repr__(self):
return f'Transform object of {self.__parent}'
def level(self):
"""수평맞추기
"""
avatar = deepcopy(self.__parent)
data = avatar.get_node_data(['lfoot', 'rfoot'])
# Horizontal Regression on lfoot and rfoot node data
model = LinearRegression()
model.fit(data[['x','y']], data[['z']])
a, b = model.coef_[0]
vector1 = np.stack([np.array([-a, -b, 1])]*len(avatar.data))
vector2 = np.stack([np.array([ 0, 0, 1])]*len(avatar.data))
R = avatar.get_rotation_matrix(vector1, vector2)
avatar = avatar.transform.rotate(R)
return avatar
def add(self, vector):
avatar = deepcopy(self.__parent)
for k, v in avatar.nodes.items():
cols = avatar._nodes[k]
avatar.data[cols] = v+vector
avatar.set_nodes()
avatar.set_vectors()
return avatar
def sub(self, vector):
avatar = deepcopy(self.__parent)
for k, v in avatar.nodes.items():
cols = avatar._nodes[k]
avatar.data[cols] = v-vector
avatar.set_nodes()
avatar.set_vectors()
return avatar
def fix(self, node):
avatar = deepcopy(self.__parent)
for k, v in avatar.nodes.items():
cols = avatar._nodes[k]
avatar.data[cols] = v-avatar[node]
avatar.set_nodes()
avatar.set_vectors()
return avatar
def rotate(self, rotation_matrix):
avatar = deepcopy(self.__parent)
for k, v in avatar.nodes.items():
cols = avatar._nodes[k]
avatar.data[cols] = np.einsum('nij,nj->ni', rotation_matrix, v)
avatar.set_nodes()
avatar.set_vectors()
return avatar
def align_on_axis(self, offset_node='anus', direction_node='chest', axis='y'):
avatar = deepcopy(self.__parent)
avatar = avatar.transform.fix(node=offset_node)
R = avatar.get_rotation_matrix(avatar[direction_node], avatar.get_unit_vector(axis=axis))
avatar = avatar.transform.rotate(R)
return avatar
def align_on_plane(self, offset_node='anus', direction_node='chest', plane='xz'):
axis = next(iter(set('xyz')-set(plane)))
avatar = deepcopy(self.__parent)
avatar = avatar.transform.fix(node=offset_node)
R = avatar.get_rotation_matrix(avatar.xy_projection(direction_node), avatar.get_unit_vector(axis=axis))
avatar = avatar.transform.rotate(R)
return avatar
|
import os
from py.path import local
import pypy
from pypy.tool.udir import udir
from pypy.translator.c.test.test_genc import compile
from pypy.rpython import extregistry
import errno
import sys
import py
def getllimpl(fn):
return extregistry.lookup(fn).lltypeimpl
def test_access():
filename = str(udir.join('test_access.txt'))
fd = file(filename, 'w')
fd.close()
for mode in os.R_OK, os.W_OK, os.X_OK, os.R_OK | os.W_OK | os.X_OK:
result = getllimpl(os.access)(filename, mode)
assert result == os.access(filename, mode)
def test_times():
"""
posix.times should compile as an RPython function and should return a
five-tuple giving float-representations (seconds, effectively) of the four
fields from the underlying struct tms and the return value.
"""
times = compile(lambda: os.times(), ())()
assert isinstance(times, tuple)
assert len(times) == 5
for value in times:
assert isinstance(value, float)
def test__getfullpathname():
if os.name != 'nt':
py.test.skip('nt specific function')
posix = __import__(os.name)
sysdrv = os.getenv('SystemDrive', 'C:')
stuff = sysdrv + 'stuff'
data = getllimpl(posix._getfullpathname)(stuff)
assert data == posix._getfullpathname(stuff)
# the most intriguing failure of ntpath.py should not repeat, here:
assert not data.endswith(stuff)
def test_getcwd():
data = getllimpl(os.getcwd)()
assert data == os.getcwd()
def test_strerror():
data = getllimpl(os.strerror)(2)
assert data == os.strerror(2)
def test_system():
filename = str(udir.join('test_system.txt'))
arg = 'python -c "print 1+1" > %s' % filename
data = getllimpl(os.system)(arg)
assert data == 0
assert file(filename).read().strip() == '2'
os.unlink(filename)
EXECVE_ENV = {"foo": "bar", "baz": "quux"}
execve_tests = str(local(__file__).dirpath().join('execve_tests.py'))
def test_execve():
if os.name != 'posix':
py.test.skip('posix specific function')
base = " ".join([
sys.executable,
execve_tests,
str(local(pypy.__file__).join('..', '..')),
''])
# Test exit status and code
result = os.system(base + "execve_true")
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 0
result = os.system(base + "execve_false")
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 1
# Test environment
result = os.popen(base + "execve_env").read()
assert dict([line.split('=') for line in result.splitlines()]) == EXECVE_ENV
# These won't actually execute anything, so they don't need a child process
# helper.
execve = getllimpl(os.execve)
# If the target does not exist, an OSError should result
info = py.test.raises(
OSError, execve, execve_tests + "-non-existent", [], {})
assert info.value.errno == errno.ENOENT
# If the target is not executable, an OSError should result
info = py.test.raises(
OSError, execve, execve_tests, [], {})
assert info.value.errno == errno.EACCES
class ExpectTestOs:
def setup_class(cls):
if not hasattr(os, 'ttyname'):
py.test.skip("no ttyname")
def test_ttyname(self):
import os
import py
from pypy.rpython.test.test_llinterp import interpret
def ll_to_string(s):
return ''.join(s.chars)
def f(num):
try:
return os.ttyname(num)
except OSError:
return ''
assert ll_to_string(interpret(f, [0])) == f(0)
assert ll_to_string(interpret(f, [338])) == ''
|
#!/usr/bin/env python
'''
Copyright (c) 2020 Modul 9/HiFiBerry
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# !/usr/bin/env python
import sys
import logging
from math import sqrt, log
from struct import unpack_from
import os
import alsaaudio
output_stopped = True
# Which audio device to use
DEVICE_NAME = 'default'
# The maximum value which can be read from the input device (in other words, the value for maximum volume)
SAMPLE_MAXVAL = 32768
CHANNELS = 2
# Sample rate in samples per second
SAMPLE_RATE = 48000
PERIOD_SIZE = 1024
# The duration of a measurement interval (after which the thresholds will be checked) in seconds.
SAMPLE_SECONDS_BEFORE_CHECK = 0.5
# The number of samples before each check
SAMPLE_COUNT_BEFORE_CHECK = int((SAMPLE_RATE / CHANNELS) * SAMPLE_SECONDS_BEFORE_CHECK)
# The time during which the input threshold hasn't been reached, before output is stopped.
# This is useful for preventing the output device from turning off and on when there is a short silence in the input.
SAMPLE_SECONDS_BEFORE_TURN_OFF = 15
# The number of checks which have to fail before audio is turned off.
CHECK_NUMBER_BEFORE_TURN_OFF = int(SAMPLE_SECONDS_BEFORE_TURN_OFF / SAMPLE_SECONDS_BEFORE_CHECK)
def open_sound(output=False):
input_device = alsaaudio.PCM(
alsaaudio.PCM_CAPTURE,
alsaaudio.PCM_NONBLOCK,
device=DEVICE_NAME,
channels=CHANNELS,
rate=SAMPLE_RATE,
format=alsaaudio.PCM_FORMAT_S16_LE,
periodsize=PERIOD_SIZE )
if output:
output_device = alsaaudio.PCM(
alsaaudio.PCM_PLAYBACK,
alsaaudio.PCM_NONBLOCK,
device=DEVICE_NAME,
channels=CHANNELS,
rate=SAMPLE_RATE,
format=alsaaudio.PCM_FORMAT_S16_LE,
periodsize=PERIOD_SIZE )
return input_device, output_device
else:
return input_device
def decibel(value):
return 20 * log(value / SAMPLE_MAXVAL, 10)
def stop_playback(_signalNumber, _frame):
global output_stopped
logging.info("received USR1, stopping music playback")
output_stopped = True
if __name__ == '__main__':
start_db_threshold = 0
stop_db_threshold = 0
try:
start_db_threshold = float(sys.argv[1])
if start_db_threshold > 0:
start_db_threshold = -start_db_threshold
# Define the stop threshold. This prevents hysteresis when the volume fluctuates just around the threshold.
stop_db_threshold = start_db_threshold - 3
print("using alsaloop with input level detection {:.1f} to start, {:.1f} to stop"
.format(start_db_threshold, stop_db_threshold))
except:
print("using alsaloop without input level detection")
input_device = open_sound(output=False)
output_device = None
finished = False
samples = 0
sample_sum = 0
max_sample = 0
status = "-"
rms_volume = 0
input_detected = False
# Counter for subsequent intervals in which the threshold has not been met while playback is active
count_playback_threshold_not_met = 0
while not finished:
# Read data from device
data_length, data = input_device.read()
if data_length < 0:
# Something's wrong when this happens. Just try to read again.
logging.error("?")
continue
if (len(data) % 4) != 0:
# Additional sanity test: If the length isn't a multiple of 4, something's wrong
print("oops %s".format(len(data)))
continue
offset = 0
# Read through the currently captured audio data
while offset < data_length:
try:
# Read the left and right channel from the data packet
(sample_l, sample_r) = unpack_from('<hh', data, offset=offset)
except:
# logging.error("%s %s %s",l,len(data), offset)
# Set a default value of zero so the program can keep running
(sample_l, sample_r) = (0, 0)
offset += 4
samples += 2
# Calculate the sum of all samples squared, used to determine rms later.
sample_sum += sample_l * sample_l + sample_r * sample_r
# Determine the max value of all samples
max_sample = max(max_sample, abs(sample_l), abs(sample_r))
if samples >= SAMPLE_COUNT_BEFORE_CHECK:
# Calculate RMS
rms_volume = sqrt(sample_sum / samples)
# Determine which threshold value to use
if output_stopped:
threshold = start_db_threshold
else:
threshold = stop_db_threshold
# Check if the threshold has been exceeded
if start_db_threshold == 0 or decibel(max_sample) > threshold:
input_detected = True
status = "P"
else:
input_detected = False
status = "-"
print("{} {:.1f} {:.1f}".format(status, decibel(rms_volume), decibel(max_sample)), flush=True)
sample_sum = 0
samples = 0
max_sample = 0
if output_stopped and input_detected:
del input_device
logging.info("Input signal detected, pausing other players")
os.system("/opt/hifiberry/bin/pause-all alsaloop")
(input_device, output_device) = open_sound(output=True)
output_stopped = False
continue
elif not output_stopped and not input_detected:
count_playback_threshold_not_met += 1
logging.info(f"No input signal for {count_playback_threshold_not_met} intervals")
if count_playback_threshold_not_met > CHECK_NUMBER_BEFORE_TURN_OFF:
del input_device
output_device = None
logging.info("Input signal lost, stopping playback")
input_device = open_sound(output=False)
output_stopped = True
continue
if input_detected:
# Reset counter when input detected
count_playback_threshold_not_met = 0
if not output_stopped:
output_device.write(data)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import sys
from rrsg_cgreco._helper_fun import nlinvns
from rrsg_cgreco import linop
import skimage.filters
from scipy.ndimage.morphology import binary_dilation as dilate
# Estimates sensitivities and complex image.
# (see Martin Uecker: Image reconstruction by regularized nonlinear
# inversion joint estimation of coil sensitivities and image content)
def estimate_coil_sensitivities(data, trajectory, par,
coils=None, NLINV=False):
"""
Estimate complex coil sensitivities.
Estimate complex coil sensitivities using either a sum-of-squares based
approach (NLINV=False) or NLINV from Martin Uecker et al. (NLINV=True)
Args
----
data (numpy.array):
complex k-space data
coils (numpy.array):
Complex coil sensitivites, possibly read from File
trajectory (numpy.array):
trajectory information
par (dict):
A python dict containing the necessary information to
setup the object. Needs to contain the number of slices
(num_slc), number of scans (num_scans),
image dimensions (dimX, dimY), number of coils (num_coils),
sampling pos (num_reads) and read outs (num_proj).
NLINV (bool):
Switch between NLINV or sum-of-squares based coil estimation.
Defaults to sum-of-squares (NLINV=False).
"""
if coils is not None:
print("Using supplied coil sensitivity profiles...")
par["Data"]["coils"] = coils
par["Data"]["phase_map"] = np.zeros(
(par["Data"]["image_dimension"],
par["Data"]["image_dimension"]),
dtype=par["Data"]["DTYPE"])
cropfov = slice(
int(
np.ceil(par["Data"]["coils"].shape[-2]/2
- par["Data"]["image_dimension"]/2
)
),
int(
np.ceil(par["Data"]["coils"].shape[-1]/2
+ par["Data"]["image_dimension"]/2))
)
par["Data"]["coils"] = np.squeeze(
par["Data"]["coils"][..., cropfov, cropfov])
if type(par["Data"]["mask"]) is np.ndarray:
par["Data"]["mask"] = np.squeeze(
par["Data"]["mask"][cropfov, cropfov])
_norm_coils(par)
else:
if NLINV:
print("Estimating coil sensitivity profiles using NLINV...")
estimate_coil_sensitivities_NLINV(data, trajectory, par)
else:
print("Estimating coil sensitivity profiles using SoS...")
estimate_coil_sensitivities_SOS(data, trajectory, par)
def estimate_coil_sensitivities_SOS(data, trajectory, par):
"""
Estimate complex coil sensitivities using a sum-of-squares approach.
Estimate complex coil sensitivities by dividing each coil channel
with the SoS reconstruciton. A Hann window is used to filter out high
k-space frequencies.
Args
----
data (numpy.array):
complex k-space data
trajectory (numpy.array):
trajectory information
par (dict):
A python dict containing the necessary information to
setup the object. Needs to contain the number of slices
(num_slc), number of scans (num_scans),
image dimensions (dimX, dimY), number of coils (num_coils),
sampling pos (num_reads) and read outs (num_proj).
"""
par["Data"]["phase_map"] = np.zeros(
(par["Data"]["image_dimension"],
par["Data"]["image_dimension"]),
dtype=par["Data"]["DTYPE"])
FFT = linop.NUFFT(par=par, trajectory=trajectory)
windowsize = par["Data"]["num_reads"]/10
window = np.hanning(windowsize)
window = np.pad(window, int((par["Data"]["num_reads"]-windowsize)/2))
lowres_data = data*window.T
coil_images = FFT.adjoint(lowres_data * par["FFT"]["dens_cor"])
combined_image = np.sqrt(
1/coil_images.shape[0]
* np.sum(np.abs(coil_images)**2, 0)
)
coils = coil_images/combined_image
thresh = skimage.filters.threshold_otsu(combined_image)
mask = combined_image > thresh*0.3
mask = dilate(mask, iterations=10)
par["Data"]["coils"] = coils
par["Data"]["mask"] = mask
_norm_coils(par)
def estimate_coil_sensitivities_NLINV(data, trajectory, par):
"""
Estimate complex coil sensitivities using NLINV from Martin Uecker et al.
Estimate complex coil sensitivities using NLINV from Martin Uecker et al.
Non-uniform data is first regridded and subsequently transformed back to
k-space using a standard fft.
The result ist stored in the parameter (par) dict. Internally the nlinvns
function is called.
This is just a workaround for now to allow
for fast coil estimation. The final script will use precomputed
profiles most likely from an Espirit reconstruction.
Args
----
data (numpy.array):
complex k-space data
trajectory (numpy.array):
trajectory information
par (dict):
A python dict containing the necessary information to
setup the object. Needs to contain the number of slices
(num_slc), number of scans (num_scans),
image dimensions (dimX, dimY), number of coils (num_coils),
sampling pos (num_reads) and read outs (num_proj).
"""
nlinv_newton_steps = 6
nlinv_real_constr = False
par["Data"]["coils"] = np.zeros(
(par["Data"]["num_coils"],
par["Data"]["image_dimension"],
par["Data"]["image_dimension"]),
dtype=par["Data"]["DTYPE"])
par["Data"]["phase_map"] = np.zeros(
(par["Data"]["image_dimension"],
par["Data"]["image_dimension"]),
dtype=par["Data"]["DTYPE"])
FFT = linop.NUFFT(par=par, trajectory=trajectory)
combined_data = FFT.adjoint(data * par["FFT"]["dens_cor"])
combined_data = np.fft.fft2(
combined_data,
norm='ortho')
sys.stdout.write(
"Computing coil sensitivity map\n")
sys.stdout.flush()
result = nlinvns.nlinvns(
np.squeeze(combined_data),
nlinv_newton_steps,
True,
nlinv_real_constr
)
par["Data"]["coils"] = result[2:, -1]
if not nlinv_real_constr:
par["Data"]["phase_map"] = np.exp(
1j
*
np.angle(
result[0, -1]
)
)
# normalize coil sensitivity profiles
_norm_coils(par)
def _norm_coils(par):
# normalize coil sensitivity profiles
sumSqrC = np.sqrt(
np.sum(
(par["Data"]["coils"] * np.conj(par["Data"]["coils"])),
0
)
)
sumSqrC[sumSqrC == 0] = 1
par["Data"]["in_scale"] = sumSqrC
if par["Data"]["num_coils"] == 1:
par["Data"]["coils"] = sumSqrC
else:
par["Data"]["coils"] = (
par["Data"]["coils"] / sumSqrC)
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
from time import sleep
from . import AICSLiveScenarioTest
from azext_iot.product.shared import (
TaskType,
BadgeType,
DeviceTestTaskStatus,
AttestationType,
)
class TestProductDeviceTestTasks(AICSLiveScenarioTest):
def __init__(self, test_case):
super(TestProductDeviceTestTasks, self).__init__(test_case)
self.kwargs.update(
{
"generate_task": TaskType.GenerateTestCases.value,
"queue_task": TaskType.QueueTestRun.value,
}
)
def test_e2e(self):
# Test requirement list
self.cmd("iot product requirement list --base-url {BASE_URL}")
self.kwargs.update({"badge_type": BadgeType.IotDevice.value})
requirements_output = self.cmd(
"iot product requirement list --bt {badge_type} --base-url {BASE_URL}"
).get_output_in_json()
expected = [
{
"badgeType": "IotDevice",
"provisioningRequirement": {
"provisioningTypes": ["SymmetricKey", "TPM", "X509"]
},
}
]
assert requirements_output == expected
# Device test operations
test = self.cmd(
"iot product test create --at SymmetricKey --dt DevKit --base-url {BASE_URL}"
).get_output_in_json()
assert test["deviceType"].lower() == "devkit"
assert test["provisioningConfiguration"]["type"].lower() == "symmetrickey"
assert test["provisioningConfiguration"]["symmetricKeyEnrollmentInformation"][
"primaryKey"
]
self.kwargs.update({"device_test_id": test["id"]})
test = self.cmd(
"iot product test show -t {device_test_id} --base-url {BASE_URL}"
).get_output_in_json()
assert test["id"] == self.kwargs["device_test_id"]
updated = self.cmd(
"iot product test update -t {device_test_id} --at symmetricKey --base-url {BASE_URL}"
).get_output_in_json()
assert updated["id"] == self.kwargs["device_test_id"]
assert (
updated["provisioningConfiguration"]["type"]
== AttestationType.symmetricKey.value
)
assert updated["provisioningConfiguration"]["symmetricKeyEnrollmentInformation"]
# Generate test cases
generate_task = self.cmd(
"iot product test task create -t {device_test_id} --type {generate_task} --base-url {BASE_URL}"
).get_output_in_json()
assert generate_task["status"] == DeviceTestTaskStatus.queued.value
test_task = self.cmd(
"iot product test task show --running -t {device_test_id} --base-url {BASE_URL}"
).get_output_in_json()[0]
assert json.dumps(test_task)
assert test_task.get("status") == DeviceTestTaskStatus.queued.value
assert test_task.get("error") is None
assert test_task.get("type") == TaskType.GenerateTestCases.value
# wait for generate task to complete
sleep(5)
self.kwargs.update({"generate_task_id": test_task["id"]})
test_task = self.cmd(
"iot product test task show -t {device_test_id} --task-id {generate_task_id} --base-url {BASE_URL}"
).get_output_in_json()
assert test_task.get("error") is None
assert test_task.get("type") == TaskType.GenerateTestCases.value
# Test case operations
case_list = self.cmd(
"iot product test case list -t {device_test_id} --base-url {BASE_URL}"
).get_output_in_json()
assert json.dumps(case_list)
assert json.dumps(case_list["certificationBadgeTestCases"])
# TODO: Test case update
# Queue a test run, await the run results
run = self.cmd(
"iot product test task create -t {device_test_id} --type {queue_task} --wait --base-url {BASE_URL}"
).get_output_in_json()
# test run currently fails without simulator
assert run["status"] == DeviceTestTaskStatus.failed.value
assert json.dumps(run["certificationBadgeResults"])
self.kwargs.update({"run_id": run["id"]})
# show run
run_get = self.cmd(
"iot product test run show -t {device_test_id} -r {run_id} --base-url {BASE_URL}"
).get_output_in_json()
# show latest run
run_latest = self.cmd(
"iot product test run show -t {device_test_id} --base-url {BASE_URL}"
).get_output_in_json()
assert run_get == run_latest
assert run_get["id"] == run_latest["id"] == self.kwargs["run_id"]
assert (
run_get["status"]
== run_latest["status"]
== DeviceTestTaskStatus.failed.value
)
# Queue a test run without wait, get run_id
queue_task = self.cmd(
"iot product test task create -t {device_test_id} --type {queue_task} --base-url {BASE_URL}"
).get_output_in_json()
assert queue_task["type"] == TaskType.QueueTestRun.value
assert queue_task["status"] == DeviceTestTaskStatus.queued.value
self.kwargs.update({"queue_task_id": queue_task["id"]})
# allow test to start running
sleep(5)
queue_task = self.cmd(
"iot product test task show -t {device_test_id} --task-id {queue_task_id} --base-url {BASE_URL}"
).get_output_in_json()
assert queue_task["type"] == TaskType.QueueTestRun.value
assert queue_task["status"] == DeviceTestTaskStatus.running.value
# Cancel running test task
self.cmd(
"iot product test task delete -t {device_test_id} --task-id {queue_task_id} --base-url {BASE_URL}"
)
# allow test to be cancelled
sleep(5)
# get cancelled test task
show = self.cmd(
"iot product test task show -t {device_test_id} --task-id {queue_task_id} --base-url {BASE_URL}"
).get_output_in_json()
assert show["status"] == DeviceTestTaskStatus.cancelled.value
# # Submit run
# self.cmd(
# "iot product test run submit -t {device_test_id} -r {run_id} --base-url {BASE_URL}",
# expect_failure=True,
# )
|
import csv
from datetime import datetime
import numpy as np
import ray
from ray.tune.logger import pretty_print
from ray.rllib.agents.dqn.apex import ApexTrainer
from ray.rllib.agents.dqn.apex import APEX_DEFAULT_CONFIG
from ray.rllib.models import ModelCatalog
from custom_mcar import MountainCar
from masking_model import ParametricActionsModel
from mcar_demo import DEMO_DATA_DIR
ALL_STRATEGIES = [
"default",
"with_dueling",
"custom_reward",
"custom_reward_n_dueling",
"demonstration",
"curriculum",
"curriculum_n_dueling",
"action_masking",
]
STRATEGY = "demonstration"
CURRICULUM_MAX_LESSON = 4
CURRICULUM_TRANS = 150
MAX_STEPS = 2e6
MAX_STEPS_OFFLINE = 4e5
NUM_TRIALS = 5
NUM_FINAL_EVAL_EPS = 20
def get_apex_trainer(strategy):
config = APEX_DEFAULT_CONFIG.copy()
config["env"] = MountainCar
config["buffer_size"] = 1000000
config["learning_starts"] = 10000
config["target_network_update_freq"] = 50000
config["rollout_fragment_length"] = 200
config["timesteps_per_iteration"] = 10000
config["num_gpus"] = 1
config["num_workers"] = 20
config["evaluation_num_workers"] = 10
config["evaluation_interval"] = 1
if strategy not in [
"with_dueling",
"custom_reward_n_dueling",
"curriculum_n_dueling",
]:
config["hiddens"] = []
config["dueling"] = False
if strategy == "action_masking":
ModelCatalog.register_custom_model("pa_model", ParametricActionsModel)
config["env_config"] = {"use_action_masking": True}
config["model"] = {
"custom_model": "pa_model",
}
elif strategy == "custom_reward" or strategy == "custom_reward_n_dueling":
config["env_config"] = {"reward_fun": "custom_reward"}
elif strategy in ["curriculum", "curriculum_n_dueling"]:
config["env_config"] = {"lesson": 0}
elif strategy == "demonstration":
config["input"] = DEMO_DATA_DIR
#config["input"] = {"sampler": 0.7, DEMO_DATA_DIR: 0.3}
config["explore"] = False
config["input_evaluation"] = []
config["n_step"] = 1
trainer = ApexTrainer(config=config)
return trainer, config["env_config"]
def set_trainer_lesson(trainer, lesson):
trainer.evaluation_workers.foreach_worker(
lambda ev: ev.foreach_env(lambda env: env.set_lesson(lesson))
)
trainer.workers.foreach_worker(
lambda ev: ev.foreach_env(lambda env: env.set_lesson(lesson))
)
def increase_lesson(lesson):
if lesson < CURRICULUM_MAX_LESSON:
lesson += 1
return lesson
def final_evaluation(trainer, n_final_eval, env_config={}):
env = MountainCar(env_config)
eps_lengths = []
for i_episode in range(n_final_eval):
observation = env.reset()
done = False
t = 0
while not done:
t += 1
action = trainer.compute_action(observation)
observation, reward, done, info = env.step(action)
if done:
eps_lengths.append(t)
print(f"Episode finished after {t} time steps")
print(
f"Avg. episode length {np.mean(eps_lengths)} out of {len(eps_lengths)} episodes."
)
return np.mean(eps_lengths)
### START TRAINING ###
ray.init()
avg_eps_lens = []
for i in range(NUM_TRIALS):
trainer, env_config = get_apex_trainer(STRATEGY)
if STRATEGY in ["curriculum", "curriculum_n_dueling"]:
lesson = 0
set_trainer_lesson(trainer, lesson)
# Training
while True:
results = trainer.train()
print(pretty_print(results))
if STRATEGY == "demonstration":
demo_training_steps = results["timesteps_total"]
if results["timesteps_total"] >= MAX_STEPS_OFFLINE:
trainer, _ = get_apex_trainer("with_dueling")
if results["timesteps_total"] >= MAX_STEPS:
if STRATEGY == "demonstration":
if results["timesteps_total"] >= MAX_STEPS + demo_training_steps:
break
else:
break
if "evaluation" in results and STRATEGY in ["curriculum", "curriculum_n_dueling"]:
if results["evaluation"]["episode_len_mean"] < CURRICULUM_TRANS:
lesson = increase_lesson(lesson)
set_trainer_lesson(trainer, lesson)
print(f"Lesson: {lesson}")
# Final evaluation
checkpoint = trainer.save()
if STRATEGY in ["curriculum", "curriculum_n_dueling"]:
env_config["lesson"] = CURRICULUM_MAX_LESSON
if STRATEGY == "action_masking":
# Action masking is running into errors in Ray 1.0.1 during compute action
# So, we use evaluation episode lengths.
avg_eps_len = results["evaluation"]["episode_len_mean"]
else:
avg_eps_len = final_evaluation(trainer, NUM_FINAL_EVAL_EPS, env_config)
date_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
result = [date_time, STRATEGY, str(i), avg_eps_len, checkpoint]
avg_eps_lens.append(avg_eps_len)
with open(r"results.csv", "a") as f:
writer = csv.writer(f)
writer.writerow(result)
print(f"Average episode length: {np.mean(avg_eps_lens)}")
|
import sys
sys.path.append("../../")
from appJar import gui
def launch(win):
app.showSubWindow(win)
def stopper(btn=None):
return app.yesNoBox("Stop", "Stop?")
app=gui()
app.startSubWindow("Modal", modal=True, blocking=False, transient=False, grouped=True)
app.setStopFunction(stopper)
app.addLabel("l1", "SubWindow One")
app.addEntry("e1")
app.addButtons(["HIDE", "SHOW"], [app.hide, app.show])
app.stopSubWindow()
app.startSubWindow("unModal", grouped=True, transient=True)
app.addLabel("l2", "SubWindow Two")
app.addEntry("e2")
app.addButtons(["HIDE2", "SHOW2"], [app.hide, app.show])
app.stopSubWindow()
app.addLabel("mt", "Modal Testing")
app.addButtons(["Modal", "unModal"], launch)
#launch("Modal")
app.go(startWindow="Modal")
|
"""
实现 strStr() 函数。
给定一个 haystack 字符串和一个 needle 字符串,在 haystack 字符串中找出 needle 字符串出现的第一个位置 (从0开始)。如果不存在,则返回 -1。
示例 1:
输入: haystack = "heallo", needle = "ll"
输出: 2
示例 2:
输入: haystack = "aaaaa", needle = "bba"
输出: -1
说明:
当 needle 是空字符串时,我们应当返回什么值呢?这是一个在面试中很好的问题。
对于本题而言,当 needle 是空字符串时我们应当返回 0 。这与C语言的 strstr() 以及 Java的 indexOf() 定义相符。
"""
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
i = 0
index = 0
if not needle:
return 0
if len(needle) == 1 and haystack == needle:
return 0
length = len(haystack)
while i < length:
if index == len(needle):
break
if haystack[i] == needle[index]:
index += 1
else:
i -= index
index = 0
i += 1
if index == len(needle):
return i-index
return -1
class Solution: # 不推荐
def strStr(self, haystack: str, needle: str) -> int:
return haystack.find(needle)
def main():
s = Solution()
res = s.strStr("heallo", "ll")
# res = s.strStr("aaaaa", "baa")
# res = s.strStr("s", "s")
# res = s.strStr("abc", "c")
# res = s.strStr("mississippi", "issip")
res = s.strStr("a", "abc")
print(res)
if __name__ == "__main__":
main()
|
from django.db import models
from rest_framework import serializers
from rest_framework.reverse import reverse
class InterfaceFile(models.Model):
# Type choices
TYPE_INPUT = 'input'
TYPE_OUTPUT = 'output'
TYPE_CHOICES = [
(TYPE_INPUT, 'Input'),
(TYPE_OUTPUT, 'Output')
]
name = models.CharField(max_length=100)
description = models.TextField(default='No description', blank=True)
filename = models.CharField(default='', blank=True, max_length=100)
type = models.CharField(choices=TYPE_CHOICES, max_length=100)
extension = models.CharField(default='', blank=True, max_length=100)
data = models.FileField(blank=True, null=True)
data_size = models.IntegerField(default=0)
model = models.ForeignKey('Model',
related_name='interface_files',
on_delete=models.CASCADE)
owner = models.ForeignKey('auth.User',
related_name='interface_files',
on_delete=models.CASCADE)
class InterfaceFileSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='interface-file-detail')
data = serializers.SerializerMethodField()
model = serializers.HyperlinkedRelatedField(view_name='model-detail',
read_only=True,
many=False)
def get_data(self, obj):
if obj.data.name:
return '{}data'.format(reverse('interface-file-detail',
args=[obj.id],
request=self.context['request']))
else:
return None
class Meta:
model = InterfaceFile
fields = ('url',
'id',
'name',
'description',
'filename',
'type',
'extension',
'data',
'data_size',
'model',)
|
from .pygraphs_base import Graph, Vertex, Edge
from .tools import load_db, save_db
__version__ = '0.0.2'
def start():
print('''
pygraphs import successfully,
version: {version}
Author: Guo Fei,
Email: guofei9987@foxmail.com
repo: https://github.com/guofei9987/pygraphs,
documents: https://github.com/guofei9987/pygraphs
'''.format(version=__version__))
|
__all__ = ["near_miss_v1", "near_miss_v2", "near_miss_v3", "condensed_knn", "edited_knn", "knn_und", "tomek_link"]
|
from flask import jsonify
from limbook_api import AuthError, ImageUploadError
from limbook_api.errors.validation_error import ValidationError
def register_error_handlers(app):
"""
--------------------------------------
Error handlers for all expected errors
--------------------------------------
"""
@app.errorhandler(AuthError)
def auth_error(error):
return jsonify({
"success": False,
"error": error.status_code,
"error_code": error.error.get('code'),
"message": error.error.get('description')
}), error.status_code
@app.errorhandler(ImageUploadError)
def image_upload_error(error):
return jsonify({
"success": False,
"error": error.status_code,
"error_code": error.error.get('code'),
"message": error.error.get('description')
}), error.status_code
@app.errorhandler(ValidationError)
def validation_error(error):
return jsonify({
"success": False,
"error": error.status_code,
"error_code": error.error.get('code'),
"message": error.error.get('description'),
"errors": error.error.get('errors')
}), error.status_code
@app.errorhandler(401)
def unauthorized(error):
return jsonify({
"success": False,
"error": 401,
"error_code": "unauthorized",
"message": "Unauthorized"
}), 401
@app.errorhandler(403)
def forbidden(error):
return jsonify({
"success": False,
"error": 403,
"error_code": "forbidden",
"message": "Forbidden"
}), 403
@app.errorhandler(404)
def not_found(error):
return jsonify({
"success": False,
"error": 404,
"error_code": "not_found",
"message": "Resource not found"
}), 404
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"error_code": "unprocessable",
"message": "Unprocessable"
}), 422
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"error_code": "bad_request",
"message": "Bad Request"
}), 400
@app.errorhandler(405)
def method_not_allowed(error):
return jsonify({
"success": False,
"error": 405,
"error_code": "method_not_allowed",
"message": "Method not allowed"
}), 405
@app.errorhandler(413)
def method_not_allowed(error):
return jsonify({
"success": False,
"error": 413,
"error_code": "entity_too_large",
"message": "Entity too large"
}), 413
@app.errorhandler(500)
def unknown(error):
return jsonify({
"success": False,
"error": 500,
"error_code": "server_error",
"message": "Unknown server error"
}), 500
|
"""Entry point for the 'python -m electionsbot' command."""
import electionsbot
if __name__ == "__main__":
electionsbot.main()
|
import sys
from contextlib import contextmanager
from traceback import print_tb
import yaml
@contextmanager
def capture_all_exception(_run):
"""Capture all Errors and Exceptions, print traceback and flush stdout stderr."""
try:
yield None
except Exception:
exc_type, exc_value, trace = sys.exc_info()
print(exc_type, exc_value, trace)
print_tb(trace)
# _run._stop_heartbeat()
# _run._emit_failed(exc_type, exc_value, trace.tb_next)
# raise
finally:
sys.stdout.flush()
sys.stderr.flush()
def yaml_load(data_path):
with open(data_path) as f:
return yaml.load(f)
def yaml_dump(data, data_path):
with open(data_path, 'w') as f:
yaml.dump(data, f)
|
#!/usr/bin/env python3
# dcfac0e3-1ade-11e8-9de3-00505601122b
# 7d179d73-3e93-11e9-b0fd-00505601122b
import argparse
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn.datasets
import sklearn.metrics
import sklearn.model_selection
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--C", default=1, type=float, help="Inverse regularization strenth"
)
parser.add_argument("--examples", default=200, type=int, help="Number of examples")
parser.add_argument(
"--kernel", default="rbf", type=str, help="Kernel type [poly|rbf]"
)
parser.add_argument(
"--kernel_degree", default=5, type=int, help="Degree for poly kernel"
)
parser.add_argument(
"--kernel_gamma", default=1.0, type=float, help="Gamma for poly and rbf kernel"
)
parser.add_argument(
"--num_passes",
default=10,
type=int,
help="Number of passes without changes to stop after",
)
parser.add_argument(
"--plot", default=False, action="store_true", help="Plot progress"
)
parser.add_argument("--seed", default=42, type=int, help="Random seed")
parser.add_argument(
"--test_ratio", default=0.5, type=float, help="Test set size ratio"
)
parser.add_argument(
"--tolerance",
default=1e-4,
type=float,
help="Default tolerance for KKT conditions",
)
args = parser.parse_args()
# Set random seed
np.random.seed(args.seed)
# Generate an artifical regression dataset, with +-1 as targets
data, target = sklearn.datasets.make_classification(
n_samples=args.examples,
n_features=2,
n_informative=2,
n_redundant=0,
random_state=args.seed,
)
target = 2 * target - 1
# Split the data randomly to train and test using `sklearn.model_selection.train_test_split`,
# with `test_size=args.test_ratio` and `random_state=args.seed`.
train_data, test_data, train_target, test_target = sklearn.model_selection.train_test_split(
data, target, stratify=target, test_size=args.test_ratio, random_state=args.seed
)
# We consider the following kernels:
# - linear: K(x, y) = x^T y
# - poly: K(x, y; degree, gamma) = (gamma * x^T y + 1) ^ degree
# - rbf: K(x, y; gamma) = exp^{- gamma * ||x - y||^2}
def kernel(x, y):
if args.kernel == "linear":
return x @ y
if args.kernel == "poly":
return (args.kernel_gamma * x @ y + 1) ** args.kernel_degree
if args.kernel == "rbf":
return np.exp(-args.kernel_gamma * ((x - y) @ (x - y)))
def calc_b(X, y, w):
b_tmp = y - np.dot(w.T, X.T)
return np.mean(b_tmp)
def calc_w(alpha, y, X):
return np.dot(X.T, np.multiply(alpha, y))
def clip(a, H, L):
if a > H:
return H
if L > a:
return L
return a
def predict(a, b, train_data, train_target, x):
return (
sum(
a[i] * train_target[i] * kernel(train_data[i], x) for i in range(len(a))
)
+ b
)
# Create initial weights
a, b = np.zeros(len(train_data)), 0
j_generator = np.random.RandomState(args.seed)
passes = 0
while passes < args.num_passes:
a_changed = 0
for i in range(len(a)):
pred_i = predict(a, b, train_data, train_target, train_data[i, :])
Ei = pred_i - train_target[i]
cond_1 = (a[i] < args.C) and (train_target[i] * Ei < -args.tolerance)
cond_2 = (a[i] > 0) and (train_target[i] * Ei > args.tolerance)
if cond_1 or cond_2:
j = j_generator.randint(len(a) - 1)
j = j + (j >= i)
pred_j = predict(a, b, train_data, train_target, train_data[j, :])
Ej = pred_j - train_target[j]
second_derivative_j = (
2 * kernel(train_data[i,], train_data[j,])
- kernel(train_data[i,], train_data[i,])
- kernel(train_data[j,], train_data[j,])
)
a_j_new = a[j] - train_target[j] * ((Ei - Ej) / (second_derivative_j))
if second_derivative_j >= -args.tolerance:
continue
if train_target[i] == train_target[j]:
L = np.maximum(0, a[i] + a[j] - args.C)
H = np.minimum(args.C, a[i] + a[j])
else:
L = np.maximum(0, a[j] - a[i])
H = np.minimum(args.C, args.C + a[j] - a[i])
if (H - L) < args.tolerance:
continue
# nothing
a_j_new = clip(a_j_new, H, L)
if np.abs(a_j_new - a[j]) < args.tolerance:
continue
a_i_new = a[i] - train_target[i] * train_target[j] * (a_j_new - a[j])
b_j = (
b
- Ej
- train_target[i]
* (a_i_new - a[i])
* kernel(train_data[i,], train_data[j,])
- train_target[j]
* (a_j_new - a[j])
* kernel(train_data[j,], train_data[j,])
)
b_i = (
b
- Ei
- train_target[i]
* (a_i_new - a[i])
* kernel(train_data[i,], train_data[i,])
- train_target[j]
* (a_j_new - a[j])
* kernel(train_data[j,], train_data[i,])
)
a[j] = a_j_new
a[i] = a_i_new
# - increase a_changed
if 0 < a[i] < args.C:
b = b_i
elif 0 < a[j] < args.C:
b = b_j
else:
b = (b_i + b_j) / 2
a_changed = a_changed + 1
passes = 0 if a_changed else passes + 1
pred_train = np.sign(
[
predict(a, b, train_data, train_target, train_data[o, :])
for o in range(train_data.shape[0])
]
)
pred_test = np.sign(
[
predict(a, b, train_data, train_target, test_data[o, :])
for o in range(test_data.shape[0])
]
)
train_accuracy = np.mean(pred_train == train_target)
test_accuracy = np.mean(pred_test == test_target)
# TODO: After each iteration, measure the accuracy for both the
# train test and the test set and print it in percentages.
print(
"Train acc {:.1f}%, test acc {:.1f}%".format(
100 * train_accuracy, 100 * test_accuracy
)
)
if args.plot:
def predict_simple(x):
return (
sum(
a[i] * train_target[i] * kernel(train_data[i], x)
for i in range(len(a))
)
+ b
)
xs = np.linspace(np.min(data[:, 0]), np.max(data[:, 0]), 50)
ys = np.linspace(np.min(data[:, 1]), np.max(data[:, 1]), 50)
predictions = [[predict_simple(np.array([x, y])) for x in xs] for y in ys]
plt.contourf(xs, ys, predictions, levels=0, cmap=plt.cm.RdBu)
plt.contour(xs, ys, predictions, levels=[-1, 0, 1], colors="k", zorder=1)
plt.scatter(
train_data[:, 0],
train_data[:, 1],
c=train_target,
marker="o",
label="Train",
cmap=plt.cm.RdBu,
zorder=2,
)
plt.scatter(
train_data[a > args.tolerance, 0],
train_data[a > args.tolerance, 1],
marker="o",
s=90,
label="Support Vectors",
c="#00dd00",
)
plt.scatter(
test_data[:, 0],
test_data[:, 1],
c=test_target,
marker="*",
label="Test",
cmap=plt.cm.RdBu,
zorder=2,
)
plt.legend(loc="upper center", ncol=3)
plt.show()
|
import json
from .Handler import Handler
class PutPokemonHandler(Handler):
def __init__(self):
pass
def put_handler(self, pokemon_info):
poke_number, poke_name, poke_types = pokemon_info["number"], pokemon_info["name"], pokemon_info["types"]
# mapping to list
mapping_id = self.get_mapping_info(poke_number)
# pokemon match or not
if mapping_id == 'None':
return 'This pokemon not in database.'
else:
self.put_match_pokemon(poke_number, poke_name, poke_types)
return 'Put pokemon info Success.'
def get_mapping_info(self, poke_number):
# get mapping id
with open('./Database/mapping_list.json') as mapping:
mapping_dict = json.load(mapping)
mapping_id = mapping_dict.get(poke_number, 'None')
return mapping_id
def put_match_pokemon(self, poke_number, poke_name, poke_types):
# update category_db
category_db = json.load(open('./Database/category.json'))
category_db[poke_number] = {
"name":poke_name,
"types":poke_types
}
with open('./Database/category.json', 'w') as db:
json.dump(category_db, db)
|
b='Yun Wen Xiong Gai Gai Bao Cong Yi Xiong Peng Ju Tao Ge Pu E Pao Fu Gong Da Jiu Gong Bi Hua Bei Nao Shi Fang Jiu Yi Za Jiang Kang Jiang Kuang Hu Xia Qu Fan Gui Qie Zang Kuang Fei Hu Yu Gui Kui Hui Dan Gui Lian Lian Suan Du Jiu Jue Xi Pi Qu Yi Ke Yan Bian Ni Qu Shi Xun Qian Nian Sa Zu Sheng Wu Hui Ban Shi Xi Wan Hua Xie Wan Bei Zu Zhuo Xie Dan Mai Nan Dan Ji Bo Shuai Bo Kuang Bian Bu Zhan Ka Lu You Lu Xi Gua Wo Xie Jie Jie Wei Ang Qiong Zhi Mao Yin Wei Shao Ji Que Luan Chi Juan Xie Xu Jin Que Wu Ji E Qing Xi San Chang Wei E Ting Li Zhe Han Li Ya Ya Yan She Di Zha Pang Ya Qie Ya Zhi Ce Pang Ti Li She Hou Ting Zui Cuo Fei Yuan Ce Yuan Xiang Yan Li Jue Sha Dian Chu Jiu Jin Ao Gui Yan Si Li Chang Lan Li Yan Yan Yuan Si Gong Lin Rou Qu Qu Er Lei Du Xian Zhuan San Can Can Can Can Ai Dai You Cha Ji You Shuang Fan Shou Guai Ba Fa Ruo Shi Shu Zhuo Qu Shou Bian Xu Jia Pan Sou Ji Wei Sou Die Rui Cong Kou Gu Ju Ling Gua Dao Kou Zhi Jiao Zhao Ba Ding Ke Tai Chi Shi You Qiu Po Ye Hao Si Tan Chi Le Diao Ji Liao Hong'
|
#!/usr/bin/env python
from __future__ import print_function
import glob, re, os, sys, time
import boto3
import urllib
import argparse
parameters = argparse.ArgumentParser(description="Create a new EBS Volume and attach it to the current instance")
parameters.add_argument("-s","--size", type=int, required=True)
parameters.add_argument("-t","--type", type=str, default="gp2")
parameters.add_argument("-e","--encrypted", type=bool, default=True)
parameters.add_argument("-i", "--instance-id", type=str)
parameters.add_argument("-z", "--availability-zone", type=str)
def device_exists(path):
try:
return os.path.stat.S_ISBLK(os.stat(path).st_mode)
except:
return False
def detect_num_devices():
devices = 0
rgx = re.compile("sd.+|xvd.+")
for device in glob.glob('/dev/[sx]*'):
if rgx.match(os.path.basename(device)):
devices += 1
return devices
def get_next_logical_device():
# first ASCII character letter integer is 97
device_name = "/dev/sd{0}".format( chr(97 + detect_num_devices()) )
return device_name
def get_metadata(key):
return urllib.urlopen(("/").join(['http://169.254.169.254/latest/meta-data', key])).read()
# create a EBS volume
def create_and_attach_volume(size=20,
vol_type="gp2",
encrypted=True,
instance_id=None,
availability_zone=None):
region = availability_zone[0:-1]
ec2 = boto3.resource("ec2", region_name=region)
instance = ec2.Instance(instance_id)
volume = ec2.create_volume(
AvailabilityZone=availability_zone,
Encrypted=encrypted,
VolumeType=vol_type,
Size=size
)
while True:
volume.reload()
if volume.state == "available":
break
else:
time.sleep(1)
device = get_next_logical_device()
instance.attach_volume(
VolumeId=volume.volume_id,
Device=device
)
# wait until device exists
while True:
if device_exists(device):
break
else:
time.sleep(1)
instance.modify_attribute(
Attribute="blockDeviceMapping",
BlockDeviceMappings=[{"DeviceName": device,
"Ebs": {"DeleteOnTermination":True,"VolumeId":volume.volume_id}
}]
)
return device
if __name__ == '__main__':
args = parameters.parse_args()
if not args.instance_id:
args.instance_id = get_metadata("instance-id")
if not args.availability_zone:
args.availability_zone = get_metadata("placement/availability-zone")
print(create_and_attach_volume(size=args.size,
instance_id=args.instance_id,
availability_zone=args.availability_zone),
end='')
sys.stdout.flush()
|
# -*- coding: utf-8 -*-
from matplotlib.patches import Circle, Patch
from matplotlib.pyplot import axis, legend, subplots
from numpy import exp, pi, sqrt
from ....definitions import config_dict
COND_COLOR = config_dict["PLOT"]["COLOR_DICT"]["PHASE_COLORS"][0].copy()
INS_COLOR = config_dict["PLOT"]["COLOR_DICT"]["PHASE_COLORS"][1].copy()
COND_INS_COLOR = config_dict["PLOT"]["COLOR_DICT"]["PHASE_COLORS"][2].copy()
# Remove alpha from phases
COND_COLOR[3] = 1
INS_COLOR[3] = 1
COND_INS_COLOR[3] = 1
def plot(self, is_show_fig=True):
"""Plot a Conductor in a matplotlib fig
Parameters
----------
self : CondType12
A CondType12 object
is_show_fig : bool
To call show at the end of the method
Returns
-------
None
Raises
_______
NotPlotableError
You can't plot a coil with Nwppc>4
"""
patches_list = []
# Conductor insultation
patches_list.append(Circle((0, 0), self.Wins_cond / 2, color=COND_INS_COLOR))
# Computation of the center of the wire
center_list = []
if self.Nwppc == 1:
center_list.append((0, 0))
elif self.Nwppc == 2:
center_list.append((0, self.Wwire / 2 + self.Wins_wire))
center_list.append((0, -self.Wwire / 2 - self.Wins_wire))
elif self.Nwppc == 3:
# The 3 centers are on the edges of an Equilateral Triangle
# (side length : a = Dwire + 2Wins_wire)
# The Radius of the circumscribed cercle is : a *sqrt(3)/3
R = (self.Wwire + 2 * self.Wins_wire) * sqrt(3) / 3.0
center_list.append((0, R))
# We found the coordinate of the other center by complex rotation
Z2 = R * 1j * exp(1j * 2 * pi / 3)
Z3 = R * 1j * exp(-1j * 2 * pi / 3)
center_list.append((Z2.real, Z2.imag))
center_list.append((Z3.real, Z3.imag))
elif self.Nwppc == 4:
# The 4 centers are on the edges of a square
# (side length : a =Dwire + 2Wins_wire)
a = self.Wwire / 2 + self.Wins_wire
center_list.append((a, a))
center_list.append((a, -a))
center_list.append((-a, a))
center_list.append((-a, -a))
else:
raise NotPlotableError("You can't plot a coil with Nwppc>4")
# Creation of the wires
for center in center_list:
# Wire insulation
patches_list.append(
Circle(center, self.Wwire / 2 + self.Wins_wire, color=INS_COLOR)
)
# Wire conductor
patches_list.append(Circle(center, self.Wwire / 2, color=COND_COLOR))
# Display
fig, ax = subplots()
for patch in patches_list:
ax.add_patch(patch)
# Axis Setup
axis("equal")
# The conductor is centered
ax_lim = self.Wins_cond / 2 + self.Wins_cond / 10
ax.set_xlim(-ax_lim, ax_lim)
ax.set_ylim(-ax_lim, ax_lim)
# Legend
patch_leg = list() # Symbol
label_leg = list() # Text
patch_leg.append(Patch(color=COND_INS_COLOR))
label_leg.append("Coil insulation")
patch_leg.append(Patch(color=INS_COLOR))
label_leg.append("Wire insulation")
patch_leg.append(Patch(color=COND_COLOR))
label_leg.append("Active wire section")
legend(patch_leg, label_leg)
if is_show_fig:
fig.show()
class NotPlotableError(Exception):
""" """
pass
|
from albumcollections import db
MAX_SPOTIFY_PLAYLIST_ID_LENGTH = 50
MAX_SPOTIFY_SNAPSHOT_ID_LENGTH = 100
MAX_SPOTIFY_USER_ID_LENGTH = 50
class AcUser(db.Model):
id = db.Column(db.Integer, primary_key=True)
spotify_user_id = db.Column(db.String(MAX_SPOTIFY_USER_ID_LENGTH), unique=True, nullable=False)
playback_playlist_id = db.Column(db.String(MAX_SPOTIFY_PLAYLIST_ID_LENGTH))
collections = db.relationship('Collection', backref=db.backref('ac_user', lazy=True))
def __repr__(self):
return '<AcUser %r>' % self.id
class Collection(db.Model):
id = db.Column(db.Integer, primary_key=True)
playlist_id = db.Column(db.String(MAX_SPOTIFY_PLAYLIST_ID_LENGTH), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('ac_user.id'), nullable=False)
def __repr__(self):
return '<Collection %r>' % self.id
|
from tensorflow import keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat','Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images.shape
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=12)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
|
import re
def filter_language(el, res: dict = None):
if not res:
res = {}
if isinstance(el, dict):
for k, v in el.items():
match = re.fullmatch(r'\w\w_\w\w', k)
if match:
res[k] = filter_language(v, res=res)
else:
return filter_language(v, res=res)
if isinstance(el, (list, tuple)):
list_ = []
l = len(el)
for item in el:
result = filter_language(item, res=res)
if not result:
continue
if isinstance(result, (str, int, float)) and l > 1:
list_.append(result)
else:
return result
if list_:
return list_
if isinstance(el, (str, int, float)):
return el
if el is None:
return el
return res
def remove_country_from_lang_codes(word_dict):
return {k[:2]: v for k, v in word_dict.items()}
|
# coding: utf-8
"""
Uptrends API v4
This document describes Uptrends API version 4. This Swagger environment also lets you execute API methods directly. Please note that this is not a sandbox environment: these API methods operate directly on your actual Uptrends account. For more information, please visit https://www.uptrends.com/api. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class VaultItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'vault_item_guid': 'str',
'hash': 'str',
'name': 'str',
'value': 'str',
'vault_section_guid': 'str',
'vault_item_type': 'object',
'is_sensitive': 'bool',
'notes': 'str',
'user_name': 'str',
'password': 'str',
'certificate_archive': 'object',
'file_info': 'object'
}
attribute_map = {
'vault_item_guid': 'VaultItemGuid',
'hash': 'Hash',
'name': 'Name',
'value': 'Value',
'vault_section_guid': 'VaultSectionGuid',
'vault_item_type': 'VaultItemType',
'is_sensitive': 'IsSensitive',
'notes': 'Notes',
'user_name': 'UserName',
'password': 'Password',
'certificate_archive': 'CertificateArchive',
'file_info': 'FileInfo'
}
def __init__(self, vault_item_guid=None, hash=None, name=None, value=None, vault_section_guid=None, vault_item_type=None, is_sensitive=None, notes=None, user_name=None, password=None, certificate_archive=None, file_info=None): # noqa: E501
"""VaultItem - a model defined in Swagger""" # noqa: E501
self._vault_item_guid = None
self._hash = None
self._name = None
self._value = None
self._vault_section_guid = None
self._vault_item_type = None
self._is_sensitive = None
self._notes = None
self._user_name = None
self._password = None
self._certificate_archive = None
self._file_info = None
self.discriminator = None
self.vault_item_guid = vault_item_guid
if hash is not None:
self.hash = hash
if name is not None:
self.name = name
if value is not None:
self.value = value
self.vault_section_guid = vault_section_guid
self.vault_item_type = vault_item_type
self.is_sensitive = is_sensitive
if notes is not None:
self.notes = notes
if user_name is not None:
self.user_name = user_name
if password is not None:
self.password = password
if certificate_archive is not None:
self.certificate_archive = certificate_archive
if file_info is not None:
self.file_info = file_info
@property
def vault_item_guid(self):
"""Gets the vault_item_guid of this VaultItem. # noqa: E501
The unique key of this vault item # noqa: E501
:return: The vault_item_guid of this VaultItem. # noqa: E501
:rtype: str
"""
return self._vault_item_guid
@vault_item_guid.setter
def vault_item_guid(self, vault_item_guid):
"""Sets the vault_item_guid of this VaultItem.
The unique key of this vault item # noqa: E501
:param vault_item_guid: The vault_item_guid of this VaultItem. # noqa: E501
:type: str
"""
if vault_item_guid is None:
raise ValueError("Invalid value for `vault_item_guid`, must not be `None`") # noqa: E501
self._vault_item_guid = vault_item_guid
@property
def hash(self):
"""Gets the hash of this VaultItem. # noqa: E501
The hash of this vault item # noqa: E501
:return: The hash of this VaultItem. # noqa: E501
:rtype: str
"""
return self._hash
@hash.setter
def hash(self, hash):
"""Sets the hash of this VaultItem.
The hash of this vault item # noqa: E501
:param hash: The hash of this VaultItem. # noqa: E501
:type: str
"""
self._hash = hash
@property
def name(self):
"""Gets the name of this VaultItem. # noqa: E501
The name of this vault item # noqa: E501
:return: The name of this VaultItem. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this VaultItem.
The name of this vault item # noqa: E501
:param name: The name of this VaultItem. # noqa: E501
:type: str
"""
self._name = name
@property
def value(self):
"""Gets the value of this VaultItem. # noqa: E501
The value that is stored in this vault item. Not used for Certificate Archives # noqa: E501
:return: The value of this VaultItem. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this VaultItem.
The value that is stored in this vault item. Not used for Certificate Archives # noqa: E501
:param value: The value of this VaultItem. # noqa: E501
:type: str
"""
self._value = value
@property
def vault_section_guid(self):
"""Gets the vault_section_guid of this VaultItem. # noqa: E501
The unique identifier of the vault section that this vault item belongs to # noqa: E501
:return: The vault_section_guid of this VaultItem. # noqa: E501
:rtype: str
"""
return self._vault_section_guid
@vault_section_guid.setter
def vault_section_guid(self, vault_section_guid):
"""Sets the vault_section_guid of this VaultItem.
The unique identifier of the vault section that this vault item belongs to # noqa: E501
:param vault_section_guid: The vault_section_guid of this VaultItem. # noqa: E501
:type: str
"""
if vault_section_guid is None:
raise ValueError("Invalid value for `vault_section_guid`, must not be `None`") # noqa: E501
self._vault_section_guid = vault_section_guid
@property
def vault_item_type(self):
"""Gets the vault_item_type of this VaultItem. # noqa: E501
The vault item type # noqa: E501
:return: The vault_item_type of this VaultItem. # noqa: E501
:rtype: object
"""
return self._vault_item_type
@vault_item_type.setter
def vault_item_type(self, vault_item_type):
"""Sets the vault_item_type of this VaultItem.
The vault item type # noqa: E501
:param vault_item_type: The vault_item_type of this VaultItem. # noqa: E501
:type: object
"""
if vault_item_type is None:
raise ValueError("Invalid value for `vault_item_type`, must not be `None`") # noqa: E501
self._vault_item_type = vault_item_type
@property
def is_sensitive(self):
"""Gets the is_sensitive of this VaultItem. # noqa: E501
Whether or not the vault item is considered sensitive. # noqa: E501
:return: The is_sensitive of this VaultItem. # noqa: E501
:rtype: bool
"""
return self._is_sensitive
@is_sensitive.setter
def is_sensitive(self, is_sensitive):
"""Sets the is_sensitive of this VaultItem.
Whether or not the vault item is considered sensitive. # noqa: E501
:param is_sensitive: The is_sensitive of this VaultItem. # noqa: E501
:type: bool
"""
if is_sensitive is None:
raise ValueError("Invalid value for `is_sensitive`, must not be `None`") # noqa: E501
self._is_sensitive = is_sensitive
@property
def notes(self):
"""Gets the notes of this VaultItem. # noqa: E501
Notes about this vault item # noqa: E501
:return: The notes of this VaultItem. # noqa: E501
:rtype: str
"""
return self._notes
@notes.setter
def notes(self, notes):
"""Sets the notes of this VaultItem.
Notes about this vault item # noqa: E501
:param notes: The notes of this VaultItem. # noqa: E501
:type: str
"""
self._notes = notes
@property
def user_name(self):
"""Gets the user_name of this VaultItem. # noqa: E501
The UserName of a credentialset # noqa: E501
:return: The user_name of this VaultItem. # noqa: E501
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""Sets the user_name of this VaultItem.
The UserName of a credentialset # noqa: E501
:param user_name: The user_name of this VaultItem. # noqa: E501
:type: str
"""
self._user_name = user_name
@property
def password(self):
"""Gets the password of this VaultItem. # noqa: E501
The password associated with a credentialset # noqa: E501
:return: The password of this VaultItem. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this VaultItem.
The password associated with a credentialset # noqa: E501
:param password: The password of this VaultItem. # noqa: E501
:type: str
"""
self._password = password
@property
def certificate_archive(self):
"""Gets the certificate_archive of this VaultItem. # noqa: E501
The certificate archive that is stored in this vault item, if applicable # noqa: E501
:return: The certificate_archive of this VaultItem. # noqa: E501
:rtype: object
"""
return self._certificate_archive
@certificate_archive.setter
def certificate_archive(self, certificate_archive):
"""Sets the certificate_archive of this VaultItem.
The certificate archive that is stored in this vault item, if applicable # noqa: E501
:param certificate_archive: The certificate_archive of this VaultItem. # noqa: E501
:type: object
"""
self._certificate_archive = certificate_archive
@property
def file_info(self):
"""Gets the file_info of this VaultItem. # noqa: E501
The file info that is stored in this vault item, if applicable # noqa: E501
:return: The file_info of this VaultItem. # noqa: E501
:rtype: object
"""
return self._file_info
@file_info.setter
def file_info(self, file_info):
"""Sets the file_info of this VaultItem.
The file info that is stored in this vault item, if applicable # noqa: E501
:param file_info: The file_info of this VaultItem. # noqa: E501
:type: object
"""
self._file_info = file_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VaultItem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VaultItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from django import forms
from .models import Competition, Training, Competitor, Trainingpresence, Driver, Event, Result, Location
from datetimewidget.widgets import DateTimeWidget
class CompetitionForm(forms.ModelForm):
class Meta:
model = Competition
fields = ['name', 'starttime', 'endtime', 'description', 'link', 'sign_in_date', 'subscription', 'selfsubscription', 'meeting_time', 'file', 'car_seats_required', 'car_seats_available', 'location', 'meetingpoint']
widgets = {
# Use localization and bootstrap 3
'starttime': DateTimeWidget(attrs={'id': "starttime"}, options={'format':'yyyy-mm-dd hh:ii'}),
'endtime': DateTimeWidget(attrs={'id': "endtime"}, options={'format':'yyyy-mm-dd hh:ii'}),
'sign_in_date': DateTimeWidget(attrs={'id': "starttime"}, options={'format': 'yyyy-mm-dd hh:ii'}),
'meeting_time': DateTimeWidget(attrs={'id': "endtime"}, options={'format': 'yyyy-mm-dd hh:ii'}),
}
class TrainingForm(forms.ModelForm):
class Meta:
model = Training
fields = ['name', 'starttime', 'endtime', 'intensity', 'short_description', 'detailed_description', 'file', 'location']
widgets = {
# Use localization and bootstrap 3
'starttime': DateTimeWidget(attrs={'id': "starttime"}, options={'format':'yyyy-mm-dd hh:ii'}),
'endtime': DateTimeWidget(attrs={'id': "endtime"}, options={'format':'yyyy-mm-dd hh:ii'}),
}
class CompetitorForm(forms.ModelForm):
class Meta:
model = Competitor
fields = ['yesorno', 'note', 'user', 'competition']
class TrainingpresenceForm(forms.ModelForm):
class Meta:
model = Trainingpresence
fields = ['yesorno', 'excused', 'feedback_to_coach', 'feedback_to_athlete', 'user', 'training']
class DriverForm(forms.ModelForm):
class Meta:
model = Driver
fields = ['number_seats', 'note', 'user', 'competition']
class EventForm(forms.ModelForm):
class Meta:
model = Event
fields = ['name', 'short_description', 'file', 'is_duration', 'is_distance', 'is_repetitions', 'detailed_description']
class ResultForm(forms.ModelForm):
class Meta:
model = Result
fields = ['duration', 'distance', 'repetitions', 'note', 'user', 'event', 'location']
class LocationForm(forms.ModelForm):
class Meta:
model = Location
fields = ['name', 'address', 'googlemapurl', 'description']
|
'''OpenGL extension NV.viewport_array2
This module customises the behaviour of the
OpenGL.raw.GLES2.NV.viewport_array2 to provide a more
Python-friendly API
Overview (from the spec)
This extension provides new support allowing a single primitive to be
broadcast to multiple viewports and/or multiple layers. A shader output
gl_ViewportMask[] is provided, allowing a single primitive to be output to
multiple viewports simultaneously. Also, a new shader option is provided
to control whether the effective viewport index is added into gl_Layer.
These capabilities allow a single primitive to be output to multiple
layers simultaneously.
The gl_ViewportMask[] output is available in vertex, tessellation
control, tessellation evaluation, and geometry shaders. gl_ViewportIndex
and gl_Layer are also made available in all these shader stages. The
actual viewport index or mask and render target layer values are taken
from the last active shader stage from this set of stages.
This extension is a superset of the GL_AMD_vertex_shader_layer and
GL_AMD_vertex_shader_viewport_index extensions, and thus those extension
strings are expected to be exported if GL_NV_viewport_array2 is
supported. This extension includes the edits for those extensions, recast
against the reorganized OpenGL 4.3 specification.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/viewport_array2.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.viewport_array2 import *
from OpenGL.raw.GLES2.NV.viewport_array2 import _EXTENSION_NAME
def glInitViewportArray2NV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
#!/usr/bin/env python
#
# Copyright (c) 2018 TrueChain Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import os
# import sys
import ecdsa
# from collections import defaultdict
from fastchain.ecdsa_sig import generate_keys
from fastchain.node import Node
# C = ecdsa.NIST256p
# SIG_SIZE = 64
# HASH_SIZE = 32
class Mempools(object):
def __init__(self):
self.txpool = []
# def __repr__(self):
# return "Fork(mempool) --> snailpool"
class SubProtoDailyBFT(object):
'''
Subprotocol Daily BFT[R]
'''
def __init__(self, current_day=None):
self.R = current_day
self.l = 0
self.log = []
self.mykeys = []
self.comm = []
self.nodes = []
self.TXs = []
def keygen(self):
"""
@pk public key
@sk secret key
"""
pk, sk = generate_keys()
self.mykeys.append(pk)
return pk
def stop(self):
# TODO: add signal / queue
# for BFTpk in self.nodes:
pass
def start(self, comm):
pass
def forkVirtualNode(self):
BFTpk = Node()
def trigger(self, isMember=False):
if isMember:
for pk in set(self.mykeys) & set(self.comm):
self.forkVirtualNode()
|
## this is where i test all of my shit code
## before I put it in my other shit code
# from selenium import webdriver
#
# PROXY = '142.54.163.90:19006' # IP:PORT
#
# chrome_options = webdriver.Options()
# chrome_options.add_argument('--proxy-server=http://%s' % PROXY)
#
# chrome = webdriver.Chrome(chrome_options=chrome_options, executable_path='/users/drew/Projects/drivers/chromedriver73/chromedriver' )
# chrome.get("http://whatismyipaddress.com")
#################
# I am an idiot hahahahahahaha
# import json
#
# with open('drew.drew') as data:
# data = json.load(data)
#
# print(data['productType'])
#################
# import checkStock
#
# destinations = checkStock.main()
#################
import csv
import threading
proxies = []
with open('/users/drew/Projects/proxies/proxies.csv') as f:
f = csv.reader(f)
for elem in f:
for proxy in elem:
proxies.append(proxy)
# give us our proxies
list = [proxy for proxy in proxies]
def getThread(list):
|
"""Intranet de la Rez - Main Pages Routes"""
import datetime
import json
import flask
from flask_babel import _
from discord_webhook import DiscordWebhook
from app import context
from app.main import bp, forms
from app.models import Ban
from app.tools import captcha, utils, typing
@bp.route("/")
@bp.route("/index")
@context.all_good_only
def index() -> typing.RouteReturn:
"""IntraRez home page for the internal network."""
return flask.render_template("main/index.html", title=_("Accueil"))
@bp.route("/external_home")
def external_home() -> typing.RouteReturn:
"""IntraRez homepage for the Internet."""
if flask.g.internal:
return utils.ensure_safe_redirect("auth.auth_needed")
return flask.render_template("main/external_home.html",
title=_("Bienvenue sur l'IntraRez !"))
@bp.route("/contact", methods=["GET", "POST"])
def contact() -> typing.RouteReturn:
"""IntraRez contact page."""
with open("app/static/gris.json") as fp:
gris = json.load(fp)
form = forms.ContactForm()
if form.validate_on_submit():
if (not flask.g.internal) and (not captcha.verify_captcha()):
flask.flash(_("Le captcha n'a pas pu être vérifié. "
"Veuillez réessayer."), "danger")
else:
role_id = flask.current_app.config["GRI_ROLE_ID"]
webhook = DiscordWebhook(
url=flask.current_app.config["MESSAGE_WEBHOOK"],
content=f"<@&{role_id}> Nouveau message !",
)
webhook.add_embed(form.create_embed())
rep = webhook.execute()
if rep:
flask.flash(_("Message transmis !"), "success")
return utils.ensure_safe_redirect("main.index")
flask.flash(flask.Markup(_(
"Oh non ! Le message n'a pas pu être transmis. N'hésitez pas "
"à contacter un GRI aux coordonnées en bas de page.<br/>"
"(Erreur : ") + f"<code>{rep.code} / {rep.text}</code>)"),
"danger"
)
return flask.render_template("main/contact.html", title=_("Contact"),
form=form, gris=gris)
@bp.route("/legal")
def legal() -> typing.RouteReturn:
"""IntraRez legal page."""
return flask.render_template("main/legal.html",
title=_("Mentions légales"))
@bp.route("/changelog")
def changelog() -> typing.RouteReturn:
"""IntraRez changelog page."""
return flask.render_template("main/changelog.html",
title=_("Notes de mise à jour"),
datetime=datetime)
@bp.route("/connect_check")
@context.internal_only
def connect_check() -> typing.RouteReturn:
"""Connect check page."""
return flask.render_template("main/connect_check.html",
title=_("Accès à Internet"))
@bp.route("/banned")
def banned() -> typing.RouteReturn:
"""Page shown when the Rezident is banned."""
flask.g._ban = 1
try:
ban = Ban.query.get(flask.g._ban)
except AttributeError:
return utils.redirect_to_next()
return flask.render_template("main/banned.html", ban=ban,
title=_("Accès à Internet restreint"))
@bp.route("/home")
def rickroll() -> typing.RouteReturn:
"""The old good days..."""
if flask.g.logged_in:
with open("logs/rickrolled.log", "a") as fh:
fh.write(f"{datetime.datetime.now()}: rickrolled "
f"{flask.g.rezident.full_name}\n")
return flask.redirect("https://www.youtube.com/watch?v=dQw4w9WgXcQ")
@bp.route("/test")
@context.gris_only
def test() -> typing.RouteReturn:
"""Test page."""
# return flask.render_template("errors/other.html")
# flask.abort(403)
# raise RuntimeError("obanon")
utils.log_action("Bonjour ceci est un test")
flask.flash("Succès", "success")
flask.flash("Info", "info")
flask.flash("Warning", "warning")
flask.flash("Danger", "danger")
pt = {}
pt["BRF"] = flask.current_app.before_request_funcs
pt["ARF"] = flask.current_app.after_request_funcs
for name in dir(flask.request):
if name.startswith("_"):
continue
obj = getattr(flask.request, name)
if not callable(obj):
pt[name] = obj
return flask.render_template("main/test.html", title=_("Test"), pt=pt)
@bp.route("/test_mail/<blueprint>/<template>")
@context.gris_only
def test_mail(blueprint: str, template: str) -> typing.RouteReturn:
"""Mails test route"""
from app.email import process_html, html_to_plaintext
body = flask.render_template(f"{blueprint}/mails/{template}.html",
rezident=flask.g.rezident,
token="sample_t0ken",
sub=flask.g.rezident.current_subscription)
body = process_html(body)
if flask.request.args.get("txt"):
return f"<pre>{flask.escape(html_to_plaintext(body))}</pre>"
else:
return body
|
from lxml import html
from pytracking.tracking import get_configuration, get_open_tracking_url, get_click_tracking_url
DEFAULT_ATTRIBUTES = {"border": "0", "width": "0", "height": "0", "alt": ""}
def adapt_html(html_text, extra_metadata, click_tracking=True, open_tracking=True, configuration=None, **kwargs):
"""Changes an HTML string by replacing links (<a href...>) with tracking
links and by adding a 1x1 transparent pixel just before the closing body
tag.
:param html_text: The HTML to change (unicode or bytestring).
:param extra_metadata: A dict that can be json-encoded and that will
be encoded in the tracking link.
:param click_tracking: If links (<a href...>) must be changed.
:param open_tracking: If a transparent pixel must be added before the
closing body tag.
:param configuration: An optional Configuration instance.
:param kwargs: Optional configuration parameters. If provided with a
Configuration instance, the kwargs parameters will override the
Configuration parameters.
"""
configuration = get_configuration(configuration, kwargs)
tree = html.fromstring(html_text)
if click_tracking:
_replace_links(tree, extra_metadata, configuration)
if open_tracking:
_add_tracking_pixel(tree, extra_metadata, configuration)
new_html_text = html.tostring(tree)
return new_html_text.decode("utf-8")
def _replace_links(tree, extra_metadata, configuration):
for (element, attribute, link, pos) in tree.iterlinks():
if element.tag == "a" and attribute == "href" and _valid_link(link):
new_link = get_click_tracking_url(link, extra_metadata, configuration)
element.attrib["href"] = new_link
def _add_tracking_pixel(tree, extra_metadata, configuration):
url = get_open_tracking_url(extra_metadata, configuration)
pixel = html.Element("img", {"src": url})
tree.body.append(pixel)
def _valid_link(link):
return link.startswith("http://") or link.startswith("https://") or link.startswith("//")
|
from __future__ import division, unicode_literals, print_function
"""
Utility classes for retrieving elemental properties. Provides
a uniform interface to several different elemental property resources
including ``pymatgen`` and ``Magpie``.
"""
import os
import json
import six
import abc
import numpy as np
import pandas as pd
from glob import glob
from pymatgen import Element
from pymatgen.core.periodic_table import _pt_data
__author__ = 'Kiran Mathew, Jiming Chen, Logan Ward, Anubhav Jain'
module_dir = os.path.dirname(os.path.abspath(__file__))
class AbstractData(six.with_metaclass(abc.ABCMeta)):
"""Abstract class for retrieving elemental properties
All classes must implement the `get_elemental_property` operation. These operations
should return scalar values (ideally floats) and `nan` if a property does not exist"""
@abc.abstractmethod
def get_elemental_property(self, elem, property_name):
"""Get a certain elemental property for a certain element.
Args:
elem - (Element) element to be assessed
property_name - (str) property to be retreived
Returns:
float, property of that element
"""
pass
def get_elemental_properties(self, elems, property_name):
"""Get elemental properties for a list of elements
Args:
elems - ([Element]) list of elements
property_name - (str) property to be retrieved
Returns:
[float], properties of elements
"""
return [self.get_elemental_property(e, property_name) for e in elems]
class OxidationStatesMixin(six.with_metaclass(abc.ABCMeta)):
"""Abstract class interface for retrieving the oxidation states
of each element"""
@abc.abstractmethod
def get_oxidation_states(self, elem):
"""Retrieve the possible oxidation states of an element
Args:
elem - (Element), Target element
Returns:
[int] - oxidation states
"""
pass
class OxidationStateDependentData(AbstractData):
"""Abstract class that also includes oxidation-state-dependent properties"""
@abc.abstractmethod
def get_charge_dependent_property(self, element, charge, property_name):
"""Retrieve a oxidation-state dependent elemental property
Args:
element - (Element), Target element
charge - (int), Oxidation state
property_name - (string), name of property
Return:
(float) - Value of property
"""
pass
def get_charge_dependent_property_from_specie(self, specie, property_name):
"""Retrieve a oxidation-state dependent elemental property
Args:
specie - (Specie), Specie of interest
property_name - (string), name of property
Return:
(float) - Value of property
"""
return self.get_charge_dependent_property(specie.element, specie.oxi_state, property_name)
class CohesiveEnergyData(AbstractData):
"""Get the cohesive energy of an element.
Data is extracted from KnowledgeDoor Cohesive Energy Handbook online
(http://www.knowledgedoor.com/2/elements_handbook/cohesive_energy.html),
which in turn got the data from Introduction to Solid State Physics,
8th Edition, by Charles Kittel (ISBN 978-0-471-41526-8), 2005.
"""
def __init__(self):
# Load elemental cohesive energy data from json file
with open(os.path.join(module_dir, 'data_files',
'cohesive_energies.json'), 'r') as f:
self.cohesive_energy_data = json.load(f)
def get_elemental_property(self, elem, property_name='cohesive energy'):
"""
Args:
elem: (Element) Element of interest
property_name (str): unused, always returns cohesive energy
Returns:
(float): cohesive energy of the element
"""
return self.cohesive_energy_data[elem]
class DemlData(OxidationStateDependentData, OxidationStatesMixin):
"""
Class to get data from Deml data file. See also: A.M. Deml,
R. O'Hayre, C. Wolverton, V. Stevanovic, Predicting density functional
theory total energies and enthalpies of formation of metal-nonmetal
compounds by linear regression, Phys. Rev. B - Condens. Matter Mater. Phys.
93 (2016).
"""
def __init__(self):
from matminer.utils.data_files.deml_elementdata import properties
self.all_props = properties
self.available_props = list(self.all_props.keys()) + \
["formal_charge", "valence_s", "valence_p",
"valence_d", "first_ioniz", "total_ioniz"]
# Compute the FERE correction energy
fere_corr = {}
for k, v in self.all_props["GGAU_Etot"].items():
fere_corr[k] = self.all_props["mus_fere"][k] - v
self.all_props["FERE correction"] = fere_corr
# List out the available charge-dependent properties
self.charge_dependent_properties = ["xtal_field_split", "magn_moment", "so_coupling", "sat_magn"]
def get_elemental_property(self, elem, property_name):
if "valence" in property_name:
valence_dict = self.all_props["valence_e"][
self.all_props["col_num"][elem.symbol]]
if property_name[-1] in ["s", "p", "d"]:
# Return one of the shells
return valence_dict[property_name[-1]]
else:
return sum(valence_dict.values())
elif property_name == "first_ioniz":
return self.all_props["ionization_en"][elem.symbol][0]
else:
return self.all_props[property_name].get(elem.symbol, float("NaN"))
def get_oxidation_states(self, elem):
return self.all_props["charge_states"][elem.symbol]
def get_charge_dependent_property(self, element, charge, property_name):
if property_name == "total_ioniz":
if charge < 0:
raise ValueError("total ionization energy only defined for charge > 0")
return sum(self.all_props["ionization_en"][element.symbol][:charge])
else:
return self.all_props[property_name].get(element.symbol, {}).get(charge, np.nan)
class MagpieData(AbstractData, OxidationStatesMixin):
"""
Class to get data from Magpie files. See also:
L. Ward, A. Agrawal, A. Choudhary, C. Wolverton, A general-purpose machine
learning framework for predicting properties of inorganic materials,
Npj Comput. Mater. 2 (2016) 16028.
"""
def __init__(self):
self.all_elemental_props = dict()
available_props = []
self.data_dir = os.path.join(module_dir, "data_files",
'magpie_elementdata')
# Make a list of available properties
for datafile in glob(os.path.join(self.data_dir, "*.table")):
available_props.append(
os.path.basename(datafile).replace('.table', ''))
# parse and store elemental properties
for descriptor_name in available_props:
with open(os.path.join(self.data_dir,
'{}.table'.format(descriptor_name)),
'r') as f:
self.all_elemental_props[descriptor_name] = dict()
lines = f.readlines()
for atomic_no in range(1, len(_pt_data) + 1): # max Z=103
try:
if descriptor_name in ["OxidationStates"]:
prop_value = [float(i) for i in
lines[atomic_no - 1].split()]
else:
prop_value = float(lines[atomic_no - 1])
except ValueError:
prop_value = float("NaN")
self.all_elemental_props[descriptor_name][
Element.from_Z(atomic_no).symbol] = prop_value
def get_elemental_property(self, elem, property_name):
return self.all_elemental_props[property_name][elem.symbol]
def get_oxidation_states(self, elem):
return self.all_elemental_props["OxidationStates"][elem.symbol]
class PymatgenData(OxidationStateDependentData, OxidationStatesMixin):
"""
Class to get data from pymatgen. See also:
S.P. Ong, W.D. Richards, A. Jain, G. Hautier, M. Kocher, S. Cholia, et al.,
Python Materials Genomics (pymatgen): A robust, open-source python library
for materials analysis, Comput. Mater. Sci. 68 (2013) 314-319.
"""
def __init__(self, use_common_oxi_states=True):
self.use_common_oxi_states = use_common_oxi_states
def get_elemental_property(self, elem, property_name):
if property_name == "block":
block_key = {"s": 1.0, "p": 2.0, "d": 3.0, "f": 3.0}
return block_key[getattr(elem, property_name)]
else:
value = getattr(elem, property_name)
return np.nan if value is None else value
def get_oxidation_states(self, elem):
"""Get the oxidation states of an element
Args:
elem - (Element) target element
common - (boolean), whether to return only the common oxidation states,
or all known oxidation states
Returns:
[int] list of oxidation states
"""
return elem.common_oxidation_states if self.use_common_oxi_states \
else elem.oxidation_states
def get_charge_dependent_property(self, element, charge, property_name):
return getattr(element, property_name)[charge]
class MixingEnthalpy:
"""
Values of :math:`\Delta H^{max}_{AB}` for different pairs of elements.
Based on the Miedema model. Tabulated by:
A. Takeuchi, A. Inoue, Classification of Bulk Metallic Glasses by Atomic
Size Difference, Heat of Mixing and Period of Constituent Elements and
Its Application to Characterization of the Main Alloying Element.
Mater. Trans. 46, 2817–2829 (2005).
"""
def __init__(self):
mixing_dataset = pd.read_csv(os.path.join(module_dir, 'data_files',
'MiedemaLiquidDeltaHf.tsv'),
delim_whitespace=True)
self.mixing_data = {}
for a, b, dHf in mixing_dataset.itertuples(index=False):
key = tuple(sorted((a, b)))
self.mixing_data[key] = dHf
def get_mixing_enthalpy(self, elemA, elemB):
"""
Get the mixing enthalpy between different elements
Args:
elemA (Element): An element
elemB (Element): Second element
Returns:
(float) mixing enthalpy, nan if pair is not in a table
"""
key = tuple(sorted((elemA.symbol, elemB.symbol)))
return self.mixing_data.get(key, np.nan)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.