blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cca87d464d7c97efeb9fb43c0d6328b3a9531f32
|
bfdb3daf5fb202087c8e578a33787d61afa9b43b
|
/approximate_matcher/__init__.py
|
ce4c677f6c4a1b258d188785c289068d850b4ae6
|
[] |
no_license
|
ptefu/SNP_Mutation_Finding
|
09cc7a24f19300f6efa49a76eab54170e09ed0e1
|
1b7b7aa54755ca4242cd84d1f17f78a002acec81
|
refs/heads/master
| 2021-01-18T13:26:50.223990
| 2016-09-21T12:50:26
| 2016-09-21T12:50:26
| 68,816,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,272
|
py
|
from bwt import BWT
from seed_and_check import SeedChecker
# class encapsulating approximate matching with
# seed and check strategy, using BWT for exact matching
# of seeds
#
# Fields:
# _text: target string, with '$' character appended
# _bwt: object of class BWT, used for exact matching of seeds
class ApproximateMatcher:
def __init__(self, target):
self._text = target + '$'
self._bwt = BWT(self._text)
# return indices in target that contain
# matches of string pattern with up to d
# mismatches
def get_matches(self, pattern, d):
# initialze seed and check object
seed_checker = SeedChecker(pattern, d)
# for each seed k-mer in pattern
for seed, seed_index in seed_checker.enumerate():
# find exact matches of seed using BWT
indices = self._bwt.get_matches(seed)
# add candidate approximate matches based on
# seed exact matches
seed_checker.add_candidates(indices, seed_index)
# verify that candidate approximate matches are within
# minimum edit distance, and return final matches
matches = seed_checker.filter_candidates(self._text)
return matches
|
[
"noreply@github.com"
] |
ptefu.noreply@github.com
|
980437e82e17e3bf36c9971ae0f863c06893be96
|
7e0907bfe3fc66b2717ff43bbfc496d36e6ca733
|
/objectfactory/factory.py
|
04140eff2743bb8ec8da0b9656c6e00bbe61c1e4
|
[
"MIT"
] |
permissive
|
devinaconley/py-object-factory
|
3941fff594d183cbef940288e02c97a0246ba62f
|
6c97821feea8c47f7ad909cedbe57938c92761aa
|
refs/heads/develop
| 2023-02-05T19:02:37.930532
| 2021-09-28T21:40:14
| 2021-09-28T21:40:14
| 144,293,421
| 12
| 0
|
MIT
| 2023-08-17T22:15:42
| 2018-08-10T14:03:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,695
|
py
|
"""
factory module
implements serializable object factory
"""
# lib
from typing import Type, TypeVar
# src
from .serializable import Serializable
# type var for hinting from generic function
T = TypeVar( 'T', bound=Serializable )
class Factory( object ):
"""
factory class for registering and creating serializable objects
"""
def __init__( self, name ):
self.name = name
self.registry = {}
def register( self, serializable: Serializable ):
"""
decorator to register class with factory
:param serializable: serializable object class
:return: registered class
"""
self.registry[serializable.__module__ + '.' + serializable.__name__] = serializable
self.registry[serializable.__name__] = serializable
return serializable
def create( self, body: dict, object_type: Type[T] = Serializable ) -> T:
"""
create object from dictionary
:param body: serialized object data
:param object_type: (optional) specified object type
:raises TypeError: if the object is not an instance of the specified type
:return: deserialized object of specified type
"""
obj = None
try:
obj = self.registry[body['_type']]()
except KeyError:
pass
if obj is None:
try:
obj = self.registry[body['_type'].split( '.' )[-1]]()
except KeyError:
pass
if obj is None:
raise ValueError(
'Object type {} not found in factory registry'.format( body['_type'] )
)
if not isinstance( obj, object_type ):
raise TypeError(
'Object type {} is not a {}'.format(
type( obj ).__name__,
object_type.__name__
)
)
obj.deserialize( body )
return obj
# global registry
_global_factory = Factory( 'global' )
def create( body: dict, object_type: Type[T] = Serializable ) -> T:
"""
create object from dictionary with the global factory
:param body: serialized object data
:param object_type: (optional) specified object type
:raises TypeError: if the object is not an instance of the specified type
:return: deserialized object of specified type
"""
return _global_factory.create( body, object_type=object_type )
def register( serializable: Serializable ):
"""
decorator to register class with the global factory
:param serializable: serializable object class
:return: registered class
"""
return _global_factory.register( serializable )
|
[
"devinaconley@gmail.com"
] |
devinaconley@gmail.com
|
261edbb03d21edcfbbddac4cbc666aaf4627ef69
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_199/1122.py
|
85e9b1f3a0e158754afb90be8fab4af477aa1b48
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 8 18:03:36 2017
@author: marshi
"""
def flipper(s,k):
'''
s:str(ex '-++')
k:str(ex '3')
'''
k = int(k)
s = [1 if c=='+' else 0 for c in s]
flip_range = len(s)-k+1
ret = 0
for i in range(flip_range):
if s[i] == 0:
s[i:i+k] = [(s[j]+1)%2 for j in range(i,i+k)]
ret += 1
if sum(s)==len(s):
return str(ret)
else:
return 'IMPOSSIBLE'
n = int(input())
for i in range(n):
s,k = input().split(' ')
ret = flipper(s,k)
print('Case #%d: %s'%(i+1,ret))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
dc24d3f5bbfdcd6719b2dce791a8e9b807c6f9df
|
040f749318ab420de29e1a8d4814f48279aa903c
|
/Radiosondenaufsteig/docs/source/tephigram.py
|
28792da77eb3af771f09d2aff01c56a1ed059260
|
[] |
no_license
|
meindlm97/Radiosondenaufstieg
|
14df1df2eed066e4e644aca9dabd884d1f68b0ce
|
396affe151aa3bd773b8255fde07b2a9c22d5a6a
|
refs/heads/master
| 2020-12-03T16:58:05.186176
| 2020-01-06T22:33:33
| 2020-01-06T22:33:33
| 231,399,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,132
|
py
|
#!/usr/bin/env python
"""
**Title: Radiosondenaufstieg**
*Author: Maximilian Meindl*
Description: Short Program to plot a Tephigram which includes a Temperature and humidity line. In second part a geographical map is plotted which shows the ascent point of the radiosonde.
"""
# Import the required packages
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from tephigram_python import Tephigram
from mpl_toolkits.basemap import Basemap
# Initialization of tephigram
tephigram = Tephigram(x_range=(-40,60))
def read_in():
"""
**Title: Function for reading in the data**
*Description: The file called 'Wien.dat' includes all measurement data which is needed. The following attributes are defined as global and represent the columns of the file.*
:variable P: pressure
:variable T: temperature
:variable T_dp: dew point temperature
:variable RH: relative humidity
:method np.loadtxt: loads the text from the file into the read_in function of the python-program.
:variable sounding: contains the columns of the file loaded before
"""
sounding = np.loadtxt('Wien.dat', unpack=True)
global P
global T
global T_dp
global RH
P = sounding[0]
T = sounding[2]
T_dp = sounding[3]
RH = sounding[4]/100
def plot_temp():
"""
**Title: Function for plotting the temperature line**
*Description: Plotting line of temperature*
:method plot_temp: plots line of temperature depending on pressure and temperature
"""
tephigram.plot_temp(P=P, T=T)
def plot_dewpoint_temp():
"""
**Title: Function for plotting the humidity line**
*Description: Plotting line of humidity*
:method plot_sounding: plots line of humidity depening on pressure, temperature and dewpoint temperature
"""
tephigram.plot_sounding(P=P, T=T, T_dp=T_dp)
def plot_legend():
"""
**Title: Function for plotting the legend**
*Description: Plotting legend*
:method plot_legend: plots legend which contoins meteorological parameters
:method savefig: saves the plot as png-File
"""
tephigram.plot_legend()
# Saving the Plot as png-File
tephigram.savefig('tephigram.png')
def plot_card():
"""
**Title: Function for plotting a geographical map**
*Description: Plotting a geographical map which contains a marker und text*
:variable fig: plots figure with figuresize 12,8
:variable m: creates a Basemap instance
:variable parallels: includes coordinates range and interval
:variable meridians: includes coordinates range and interval
:variable Vienna: include coordinates of city Vienna
:method m.drawparallels: draws paralells
:method m.drawmeridians: draws meridians
:method m.drawcoastlines: draws coastlines
:method m.drawstates: draws states
:method m.drawcountries: draws countries
:method m.drawlsmask: coloring the ocean and the land
:method m.shadedrelief: add relief
:method m.plot: add marker the the card at position of vienna
:method plt.text: add name of city to the marker
:method plt.title: add title above the card plotted before
:method plt.savefig: saving the card as png-file
:method plt.show: shows plot after setting the properties
"""
fig = plt.figure(figsize=(12,8))
# create a Basemap instance
m = Basemap(projection='cyl', # try different projection, e.g. 'merc' for mercator
llcrnrlon = 5.0, llcrnrlat = 45,
urcrnrlon = 30, urcrnrlat = 62.5,
resolution='h', area_thresh=10.)
# set properites...
parallels = np.arange(-90.,90.,10.0)
m.drawparallels(parallels,labels=[True,False,False,True],fontsize=10)
# draw meridians
meridians = np.arange(-180.0,180.0,10.0)
m.drawmeridians(meridians,labels=[True,False,False,True],fontsize=10)
# draw coastlines, states and countries
m.drawcoastlines()
m.drawstates()
m.drawcountries()
# coloring the oceans and the land
m.drawlsmask(land_color='lightgreen', ocean_color='aqua', lakes=True)
# add relief
m.shadedrelief()
# draw some data to the map (coordinates)
Vienna = (16.3720800, 48.2084900)
# transform coordinates for the projection
Vienna=m(Vienna[0], Vienna[1])
# add marker the the card at position of vienna
m.plot(Vienna[0], Vienna[1], 'c*', markersize=12, color='black') # the markers are the same as in the case of normal plot..
# add name of city to the marker
plt.text(Vienna[0]+0.25, Vienna[1]+0.25, "Vienna",size=18)
# add title above the card plotted before
plt.title("09.Juli 2018, 12:00 UTC, Hohe Warte 38 (ZAMG), Vienna")
# saving the card as png-file and show plot
plt.savefig('basemap.png')
plt.show()
# Execution of the previously defined functions
if __name__ == "__main__":
read_in()
plot_temp()
plot_dewpoint_temp()
plot_legend()
plot_card()
|
[
"noreply@github.com"
] |
meindlm97.noreply@github.com
|
b8e37fd7750fd45aa1e19ae924b11d15d1cdaeec
|
9e25a087c55cdf8acbd80d41c3a1fff702ca8593
|
/python/custom/models.py
|
267a0d7bbbff53bcf78c057ce51196d89d5a61e5
|
[] |
no_license
|
hav4ik/unzip-nets
|
7dd04072fe434cb45bca844866dc272357664f32
|
f10926ca9131e3c5ab77a979f30a1dcd332a9b8f
|
refs/heads/master
| 2020-03-30T21:43:32.372606
| 2019-01-04T08:36:01
| 2019-01-04T08:36:01
| 151,639,696
| 2
| 0
| null | 2018-10-31T23:40:39
| 2018-10-04T21:44:34
|
Python
|
UTF-8
|
Python
| false
| false
| 5,976
|
py
|
import tensorflow as tf
layers = tf.keras.layers
def _get_keras_update_ops(model):
"""Gets a list of update_ops of keras model
"""
update_ops = []
for op in model.updates:
if isinstance(op, tuple):
update_ops.append(tf.assign(op[0], op[1]))
else:
update_ops.append(op)
return update_ops
def _get_keras_regularizers(model):
"""Gets a list of update_ops of keras model
"""
regularizers = None
if len(model.losses) > 0:
regularizers = tf.add_n(model.losses)
return regularizers
def lenet(outputs):
inputs = layers.Input(shape=(32, 32, 3))
x = layers.Conv2D(32, (3, 3), kernel_initializer='glorot_uniform')(inputs)
x = layers.Activation('relu')(x)
x = layers.Conv2D(64, (3, 3), kernel_initializer='glorot_uniform')(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Flatten()(x)
x = layers.Dense(128, activation='relu', kernel_initializer='glorot_uniform')(x)
x = layers.Dropout(0.5)(x)
ys = []
for output in outputs:
ys.append(layers.Dense(output['num'], activation='softmax',
kernel_initializer='glorot_uniform')(x))
model = tf.keras.models.Model(inputs=[inputs], outputs=ys)
return (model.inputs, model.outputs,
_get_keras_update_ops(model), _get_keras_regularizers(model))
def alexnet(outputs):
img_in = layers.Input(shape=(32, 32, 3))
x = layers.Convolution2D(
32, (3, 3), kernel_regularizer=None, padding='same')(img_in)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Convolution2D(
32, (3, 3), kernel_regularizer=None, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Convolution2D(
64, (3, 3), kernel_regularizer=None, padding='same')(x)
x = layers.Activation('relu')(x)
x = layers.BatchNormalization()(x)
x = layers.Convolution2D(
64, (3, 3), kernel_regularizer=None, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Convolution2D(
128, (3, 3), kernel_regularizer=None, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Convolution2D(
128, (3, 3), kernel_regularizer=None, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Flatten()(x)
ys = []
for output in outputs:
ys.append(layers.Dense(
output['num'], activation='softmax')(x))
model = tf.keras.models.Model(inputs=[img_in], outputs=ys)
return (model.inputs, model.outputs,
_get_keras_update_ops(model), _get_keras_regularizers(model))
def resnet_20(outputs):
def residual_block(x, o_filters, block_count, increase=False):
stride = (1, 1) if not increase else (2, 2)
with tf.variable_scope('block_{}'.format(block_count)):
o1 = layers.Activation('relu')(layers.BatchNormalization()(x))
conv_1 = layers.Conv2D(
o_filters, kernel_size=(3, 3), strides=stride, padding='same',
kernel_initializer="he_normal")(o1)
o2 = layers.Activation('relu')(layers.BatchNormalization()(conv_1))
conv_2 = layers.Conv2D(
o_filters, kernel_size=(3, 3), strides=(1, 1), padding='same',
kernel_initializer="he_normal")(o2)
if increase:
projection = layers.Conv2D(
o_filters, kernel_size=(1, 1), strides=(2, 2), padding='same',
kernel_initializer="he_normal")(o1)
block = layers.add([conv_2, projection])
else:
block = layers.add([conv_2, x])
return block
img_input = layers.Input(shape=(32, 32, 3))
stack_n = 2
x = layers.Conv2D(
filters=16, kernel_size=(3, 3), strides=(1, 1), padding='same',
kernel_initializer="he_normal")(img_input)
block_count = 1
for _ in range(stack_n):
block_count += 1
x = residual_block(x, 16, block_count, False)
block_count += 1
x = residual_block(x, 32, block_count, True)
for _ in range(1, stack_n):
block_count += 1
x = residual_block(x, 32, block_count, False)
block_count += 1
x = residual_block(x, 64, block_count, True)
for _ in range(1, stack_n):
block_count += 1
x = residual_block(x, 64, block_count, False)
ys = []
for output in outputs:
y = layers.BatchNormalization()(x)
y = layers.Activation('relu')(y)
y = layers.GlobalAveragePooling2D()(y)
ys.append(layers.Dense(
output['num'], activation='softmax',
kernel_initializer='he_normal')(y))
model = tf.keras.models.Model(inputs=[img_input], outputs=ys)
return (model.inputs, model.outputs,
_get_keras_update_ops(model), _get_keras_regularizers(model))
def mobilenetv2_mtc(num_outputs=[1000],
freeze_till='out_relu',
alpha=1.0,
size=224):
"""Loads a MobileNetV2 network for multiple classification tasks (MTC)
"""
bottom = tf.keras.applications.mobilenet_v2.MobileNetV2(
input_shape=(size, size, 3), alpha=alpha, include_top=False)
x = bottom.outputs[0]
x = layers.GlobalAveragePooling2D()(x)
outputs = []
for i in range(len(num_outputs)):
y = layers.Dense(num_outputs[i], activation='softmax')(x)
outputs.append(y)
model = tf.keras.models.Model(inputs=bottom.inputs, outputs=outputs)
return (bottom.inputs, outputs,
_get_keras_update_ops(model), _get_keras_regularizers(model))
|
[
"tran.thecoder@gmail.com"
] |
tran.thecoder@gmail.com
|
1ca9e7a15a4e3592e15187e133c4201e94259a15
|
45196ae7cd92bed9889c8396476b4d55e3a2a435
|
/Working/ClassExamples/1_2.py
|
052800e7fb7ecc37811f24a5552ec2bf17125415
|
[] |
no_license
|
StRobertCHSCS/fabroa-PHRZORO
|
3b67dbabd747cc8025c0fb0cf9d51bcc167ee617
|
2b6cb2b900029ea6997d417dd2abd8f0d4364535
|
refs/heads/master
| 2020-07-24T21:28:46.922129
| 2019-12-09T14:46:24
| 2019-12-09T14:46:24
| 208,054,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
print(12345//10)
print(12345//100)
print(12345//1000)
print(12345 % 10)
print(12345 % 100)
print(12345 % 1000)
print(5 + 30 * 20)
print((5 + 30) * 20)
print(((5 + 30) * 20) // 10)
print("y"+"e"*2+"t")
print("a"+str(3))
|
[
"charlie.ma22@ycdsbk12.ca"
] |
charlie.ma22@ycdsbk12.ca
|
dd8d212b9e0b58bef8a3d146df4ed91f431c6704
|
c1bc72c54c607d7431dff293824659d640c41f2c
|
/djapps/auth/local/gaemodels.py
|
ebc1778d185d09fc62f15c49ec25329625ca4946
|
[] |
no_license
|
panyam/djapps
|
ded6ac4777048750986d9b554434d3983c297c3a
|
e54c5b04b50c2d074567dd652755e76bfe1b6c42
|
refs/heads/master
| 2021-01-01T19:25:11.029214
| 2014-01-22T06:13:15
| 2014-01-22T06:13:15
| 32,132,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,518
|
py
|
from google.appengine.ext import db
import utils
UNUSABLE_PASSWORD = '!' # This will never be a valid hash
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return hsh == utils.get_hexdigest(algo, salt, raw_password)
#
# A user in the model - not like Appengine'e User object.
# This is a copy of DJANGO's User model
#
class LocalUser(db.Model):
"""Users within the Django authentication system are represented by this model.
Username and password are required. Other fields are optional.
"""
username = db.StringProperty()
first_name = db.StringProperty(default = "")
last_name = db.StringProperty(default = "")
email = db.EmailProperty(required = False)
password = db.StringProperty(default = "")
is_staff = db.BooleanProperty(default=False)
is_active = db.BooleanProperty(default=True)
is_superuser = db.BooleanProperty(default=False)
last_login = db.DateTimeProperty(auto_now_add = True)
date_joined = db.DateTimeProperty(auto_now_add = True)
algo = db.StringProperty(default = "")
salt = db.StringProperty(default = "")
hash = db.StringProperty(default = "")
@classmethod
def create(cls, **kwds):
uname = ""
if "username" in kwds:
uname = kwds["username"]
Unique.check("username", uname)
user = LocalUser(username = name)
user.put()
return user
def __unicode__(self):
return self.username
def is_anonymous(self):
"Always returns False. This is a way of comparing User objects to anonymous users."
return False
def is_authenticated(self):
"""Always return True. This is a way to tell if the user has been authenticated in templates.
"""
return True
def get_full_name(self):
"Returns the first_name plus the last_name, with a space in between."
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def set_password(self, raw_password):
self.password = utils.salt_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
# Backwards-compatibility check. Older passwords won't include the
# algorithm or salt.
if '$' not in self.password:
is_correct = (self.password == utils.get_hexdigest('md5', '', raw_password))
if is_correct:
# Convert the password to the new, more secure format.
self.set_password(raw_password)
self.put()
return is_correct
return check_password(raw_password, self.password)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def has_usable_password(self):
return self.password != UNUSABLE_PASSWORD
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
model = db.get_model(app_label, model_name)
self._profile_cache = model._default_manager.get(user__id__exact=self.id)
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
#
# A table for holding basic registration info about a user,
# the actual user profile will be in a different class
#
class LocalUserRegistration(db.Model):
user = db.ReferenceProperty(LocalUser)
activation_key = db.StringProperty()
key_expires = db.DateTimeProperty()
|
[
"sri.panyam@gmail.com"
] |
sri.panyam@gmail.com
|
a886c659da594993e85f09eb146cb505efe5fa7a
|
558165d47cb4b545d7e81cdb0108db8f436a5ea6
|
/habra_proxy/app/views.py
|
26123907d886f1b378a0c893dd3d0bde7a438de5
|
[] |
no_license
|
Nick1994209/interview_tasks
|
08365f152f2cfec559fbeceaeb117110ad02d9bc
|
1f8c5279f2f0d52c3d147cac8b79918d780ec524
|
refs/heads/master
| 2020-04-24T10:58:15.569067
| 2019-02-25T17:48:10
| 2019-03-10T20:05:22
| 171,911,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
from aiohttp import web
from app import settings
from app.helpers import add_symbols
async def habra_proxy_handler(request):
path = request.match_info['path']
habra_status_code, habra_headers, habra_body = await request.app['habra_fetcher'].fetch(path)
response_headers = {header: habra_headers[header] for header in settings.BASE_PROXY_HEADERS
if header in habra_headers}
if not is_html_response(habra_headers):
return web.Response(body=habra_body, status=habra_status_code, headers=response_headers)
habra_text = habra_body.decode('utf8')
habra_text = habra_text.replace(
'https://habr.com/',
f'http://{settings.APP_HOST}:{settings.APP_PORT}/',
)
habra_text = add_symbols(habra_text,
symbol='™',
searcher=request.app['habra_word_change_finder'])
return web.Response(text=habra_text, status=habra_status_code, headers=response_headers)
def is_html_response(headers):
return 'text/html' in headers.get('Content-Type', '')
|
[
"NVKorolkov@domclick.ru"
] |
NVKorolkov@domclick.ru
|
3bec2c9d3fb8aa08d9400de024315c8c6a5b51a6
|
abd9db1e08b8cd0dc7e988a04d0403037d035887
|
/canvas_mouse.py
|
2b27304d69da42a3f25d8905473ede7b3dd571dd
|
[] |
no_license
|
atm1504/tkinter-learn
|
4fd5bb368d75e38696af367612e384866b9a331c
|
1e41cda5ba8c3aaeb64fa0621b58d83f0e53f242
|
refs/heads/main
| 2023-03-02T22:38:28.456185
| 2021-02-12T16:14:15
| 2021-02-12T16:14:15
| 323,787,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,453
|
py
|
from tkinter import *
from PIL import ImageTk, Image
root = Tk()
root.title("Learning Tkinter")
root.iconbitmap("./images/quality.ico")
root.geometry("800x600")
w = 600
h = 400
x = w // 2
y = h // 2
myCanvas = Canvas(root, width=w, height=h, bg="white")
myCanvas.pack(pady=20)
# myCircle = myCanvas.create_oval(x, y, x + 10, y + 10)
img = PhotoImage(file="images/me.png")
myImage = myCanvas.create_image(260, 125, anchor=NW, image=img)
# def left(event):
# x = -10
# y = 0
# myCanvas.move(myImage, x, y)
# def right(event):
# x = 10
# y = 0
# myCanvas.move(myImage, x, y)
# def up(event):
# x = 0
# y = -10
# myCanvas.move(myImage, x, y)
# def down(event):
# x = 0
# y = 10
# myCanvas.move(myImage, x, y)
# def pressing(event):
# x = 0
# y = 0
# if event.char == "a": x = -10
# if event.char == "d": x = 10
# if event.char == "r": y = -10
# if event.char == "x": y = 10
# myCanvas.move(myImage, x, y)
def move(e):
global img
myLabel.config(text="Coordinates: x: " + str(e.x) + " y: " + str(e.y))
img = PhotoImage(file="images/me.png")
myImage = myCanvas.create_image(e.x, e.y, anchor=NW, image=img)
# root.bind("<Key>",pressing)
# root.bind("<Left>", left)
# root.bind("<Right>", right)
# root.bind("<Up>", up)
# root.bind("<Down>", down)
myLabel = Label(root, text="")
myLabel.pack(pady=20)
myCanvas.bind('<B1-Motion>',move)
root.mainloop();
|
[
"atm1504.in@gmail.com"
] |
atm1504.in@gmail.com
|
8ee11b9aa31d99bf49a6ec750dce32bd8bb88126
|
c27dd3290d07c210f4b9b2d28b9d2b5730a2a936
|
/TensorStudy/Word2Vec.py
|
5a161b96b063fae29e8dc0001a9e593f99ea6a47
|
[] |
no_license
|
1493115830/test
|
a8985b1780c7376371b7c9f18573eedb8cb4e6e8
|
2b134aa197a23d835385ca35dc74ed893213772e
|
refs/heads/master
| 2020-06-03T09:51:31.873104
| 2019-07-09T14:40:29
| 2019-07-09T14:40:29
| 191,527,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,554
|
py
|
# encoding=utf8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import pandas as pd
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# # Step 1: Download the data.
# url = 'http://mattmahoney.net/dc/'
#
# # 下载数据集
# def maybe_download(filename, expected_bytes):
# """Download a file if not present, and make sure it's the right size."""
# if not os.path.exists(filename):
# filename, _ = urllib.request.urlretrieve(url + filename, filename)
# # 获取文件相关属性
# statinfo = os.stat(filename)
# # 比对文件的大小是否正确
# if statinfo.st_size == expected_bytes:
# print('Found and verified', filename)
# else:
# print(statinfo.st_size)
# raise Exception(
# 'Failed to verify ' + filename + '. Can you get to it with a browser?')
# return filename
#
# filename = maybe_download('text8.zip', 31344016)
filename = 'DMDMT.zip'
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split('\r\n')
return data
# 单词表
words = np.array(read_data(filename))
print(words.shape)
# Data size
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
# 只留50000个单词,其他的词都归为UNK
vocabulary_size = 1200
def build_dataset(words, vocabulary_size):
count = [['UNK', -1]]
# extend追加一个列表
# Counter用来统计每个词出现的次数
# most_common返回一个TopN列表,只留50000个单词包括UNK
# c = Counter('abracadabra')
# c.most_common()
# [('a', 5), ('r', 2), ('b', 2), ('c', 1), ('d', 1)]
# c.most_common(3)
# [('a', 5), ('r', 2), ('b', 2)]
# 前50000个出现次数最多的词
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
# 生成 dictionary,词对应编号, word:id(0-49999)
# 词频越高编号越小
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
# data把数据集的词都编号
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
# 记录UNK词的数量
count[0][1] = unk_count
# 编号对应词的字典
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
# data 数据集,编号形式
# count 前50000个出现次数最多的词
# dictionary 词对应编号
# reverse_dictionary 编号对应词
data, count, dictionary, reverse_dictionary = build_dataset(words, vocabulary_size)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
# 双向队列
buffer = collections.deque(maxlen=span)
# [ skip_window target skip_window ]
# [ skip_window target skip_window ]
# [ skip_window target skip_window ]
# [0 1 2 3 4 5 6 7 8 9 ...]
# t i
# 循环3次
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# 获取batch和labels
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
# 循环2次,一个目标单词对应两个上下文单词
for j in range(num_skips):
while target in targets_to_avoid:
# 可能先拿到前面的单词也可能先拿到后面的单词
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
# 回溯3个词。因为执行完一个batch的操作之后,data_index会往右多偏移span个位置
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
# 打印sample data
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
# 词向量维度
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 2 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
# 从0-100抽取16个整数,无放回抽样
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
# 负采样样本数
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
# with tf.device('/cpu:0'):
# 词向量
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
# embedding_lookup(params,ids)其实就是按照ids顺序返回params中的第ids行
# 比如说,ids=[1,7,4],就是返回params中第1,7,4行。返回结果为由params的1,7,4行组成的tensor
# 提取要训练的词
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the noise-contrastive estimation(NCE) loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
# 抽取一些常用词来测试余弦相似度
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
# valid_size == 16
# [16,1] * [1*50000] = [16,50000]
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 1000001
final_embeddings = []
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
# 获取一个批次的target,以及对应的labels,都是编号形式的
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
# 计算训练2000次的平均loss
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 20000 == 0:
sim = similarity.eval()
# 计算验证集的余弦相似度最高的词
for i in xrange(valid_size):
# 根据id拿到对应单词
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
# 从大到小排序,排除自己本身,取前top_k个值
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
# 训练结束得到的词向量
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne30.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
# 设置图片大小
plt.figure(figsize=(30, 30)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')# mac:method='exact'
# 画500个点
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
|
[
"1493115830@qq.com"
] |
1493115830@qq.com
|
25ab6daf0b1b321bc621a135b76275efad1bb1a6
|
6f5451b3d46aaf94caea94580cf836124f05048d
|
/WebTextbook_scanner.py
|
7614d0ec960c80cd8f70836bbaf32376635ac3ad
|
[] |
no_license
|
are-38-a/WebTextbook_Scanner
|
90b9cadf55338423d42359e783d13e3302f00211
|
7c455ea3f834c9733a633d1f4bec708310a5d0e9
|
refs/heads/main
| 2023-08-25T21:15:42.515283
| 2021-10-07T09:30:09
| 2021-10-07T09:30:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,676
|
py
|
import pyautogui
import sys
import time
import os
import img2pdf
from PIL import Image
from PIL import ImageEnhance
def enhance_image(filename):
image1 =Image.open(filename)
con1 = ImageEnhance.Contrast(image1)
image2 = con1.enhance(1.5)
con2 = ImageEnhance.Sharpness(image2)
image3 = con2.enhance(1.5)
image3.save(filename)
def main():
print("左ボタンの位置")
os.system('PAUSE')
hidari_x,hidari_y = pyautogui.position()
print("右ボタンの位置")
os.system('PAUSE')
migi_x,migi_y = pyautogui.position()
print("左ページ左上")
os.system('PAUSE')
hidari_page_1,hidari_page_2 = pyautogui.position()
print("左ページ右下")
os.system('PAUSE')
hidari_page_3,hidari_page_4 = pyautogui.position()
print("右ページ左上")
os.system('PAUSE')
migi_page_1,migi_page_2 = pyautogui.position()
print("右ページ右下")
os.system('PAUSE')
migi_page_3,migi_page_4 = pyautogui.position()
print("フォルダ名を入力")
dirname = input()
print("総ページ数を入力")
max_number = int(input())
print("10秒後に開始します")
time.sleep(10)
dirpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), dirname)
os.makedirs(dirpath, exist_ok=True)
#総ページ数に達するまでスクリーンショットを撮る
i = 0
while i < max_number:
time.sleep(2)
#左側ページ
filename = dirpath + "/" + "{:03}".format(i) + ".png"
sc = pyautogui.screenshot(region=(hidari_page_1,hidari_page_2, hidari_page_3-hidari_page_1, hidari_page_4-hidari_page_2))
sc.save(filename)
enhance_image(filename) #明瞭化
i += 1
#右側ページ
filename = dirpath + "/" + "{:03}".format(i) + ".png"
sc = pyautogui.screenshot(region=(migi_page_1,migi_page_2, migi_page_3-migi_page_1, migi_page_4-migi_page_2))
sc.save(filename)
enhance_image(filename) #明瞭化
i += 1
pyautogui.click(migi_x,migi_y)
# 画像フォルダの中にあるPNGファイルを取得し配列に追加
extension = ".png" # 拡張子がPNGのものを対象
list_image = []
for j in os.listdir(dirpath+"/"):
if j.endswith(extension):
list_image.append(Image.open(dirpath+"/"+j).filename)
#pdf形式で書き込み
pdf_filename = dirpath + "/" + dirname + ".pdf" # 出力するPDFの名前
with open(pdf_filename,"wb") as f:
f.write(img2pdf.convert(list_image))
print("完了")
os.system('PAUSE')
if __name__ == "__main__":
main()
|
[
"85498974+miya-38-a@users.noreply.github.com"
] |
85498974+miya-38-a@users.noreply.github.com
|
2a12e578f31192c0001b9b89e9a719f50b5da2da
|
c01a58ecd6614128e3c29a70e3e768b220a2a4a2
|
/common/xrd-ui-tests-qautomate/variables/strings.py
|
a233680dec41ae8660d3bc1c1cb81e372fbfefeb
|
[
"MIT"
] |
permissive
|
nordic-institute/X-Road-tests
|
772a6d7485606c1f10b61a1260b8fb66111bf0be
|
e030661a0ad8ceab74dd8122b751e88025a3474a
|
refs/heads/develop
| 2021-06-03T01:38:20.542859
| 2019-03-18T12:16:18
| 2019-03-18T12:16:18
| 125,643,677
| 2
| 3
|
MIT
| 2018-06-14T15:09:21
| 2018-03-17T15:36:32
|
Python
|
UTF-8
|
Python
| false
| false
| 7,221
|
py
|
import os
from QAutoLibrary.extension.parsers.parameter_parser import get_parameter, get_all_parameters
# Jetty log events
failed_to_generate_global_config = u'Processing internal configuration failed:'
# audit log events
login_user = u'Log in user'
logout_user = u'Log out user'
login_user_failed = u'Log in user failed'
login_token = u'Log in to token'
logout_token = u'Log out from token'
login_token_failed = u'Log in to token failed'
add_timestamping_services = u'Add timestamping service'
add_timestamping_services_failed = u'Add timestamping service failed'
delete_timestamping_services = u'Delete timestamping service'
set_ui_language = u'Set UI language'
edit_cs_address = u'Edit central server address'
edit_cs_address_failed = u'Edit central server address failed'
recreate_configuration_anchor = u'Re-create {} configuration anchor'
generate_config_signing_key = u'Generate {} configuration signing key'
activate_config_signing_key = u'Activate {} configuration signing key'
delete_config_signing_key = u'Delete {} configuration signing key'
restore_backup_failed_audit_log = u'Restore configuration failed'
restore_configuration_audit_log = u'Restore configuration'
delete_backup_audit_log = u'Delete backup file'
upload_backup_audit_log = u'Upload backup file'
upload_backup_failed_audit_log = u'Upload backup file failed'
generate_backup_audit_log = u'Back up configuration'
failed_generate_backup_audit_log = u'Back up configuration failed'
configuration_part_upload_audit_log = u'Upload configuration part'
failed_configuration_part_upload_audit_log = u'Upload configuration part failed'
# Ui strings
authentication_failed = u'Authentication failed'
login_restore_in_progress = u'Restore in progress, try again later'
message_failed_to_add_timestamping = u'Failed to add timestamping service: timestamping service already exists'
request_cert_deletion = u'Certificate deletion'
request_client_deletion = u'Client deletion'
security_server_version = u'Security Server version 6'
reg_auth_cert_deletion = u'Authentication certificate deletion'
# Messages
key_success_deleted_from_cs = u'Key successfully deleted from central server configuration'
internal_conf_anchor_generated_success = u'Internal configuration anchor generated successfully'
token_key_removed = u'Key successfully deleted from token'
token_key_removed_fail = u"Failed to delete key from token '{}': Key '{}' not found"
change_address_error = u'Central server address must be DNS name or IP address'
external_conf_anchor_generated_success = u'External configuration anchor generated successfully'
restore_failed = u"Failed to restore configuration: Restoring configuration from file '{}' failed."
backup_restored = u"Configuration restored successfully from file '{}'."
backup_deleted = u'Selected backup deleted successfully'
backup_created = u'Configuration backup created'
backup_created_error = u"Failed to back up configuration: Error making configuration backup, script exited with status code '{}'"
backup_file_uploaded = u'New backup file uploaded successfully'
backup_file_upload_invalid_char = u"Failed to upload new backup file: Filename '{}' contains invalid characters. Valid characters include: (A-Z), (a-z), (0-9), (_), (.), (-)."
backup_file_uploaded_invalid_extension = u"Failed to upload new backup file: Uploaded file name '{}' has an invalid extension, the only valid one is 'tar'"
backup_file_uploaded_invalid_format = u"Failed to upload new backup file: Content of uploaded file must be in tar format"
configuration_file_upload = u"Configuration file for content identifier '{}' uploaded successfully."
configuration_file_upload_validation_fail = u"Failed to upload configuration part: Validation of configuration file with content identifier '{}' failed."
configuration_file_upload_missing_validation_fail = u"Failed to upload configuration part: Validation program '{}' does not exist in the file system."
configuration_generation_fail = u"Global configuration generation failing since"
login_software_token_missing_pin = u"Missing parameter: pin"
login_software_token_invalid_pin = u'PIN incorrect'
parameter_missing= u"Missing parameter: {}"
parameter_exceed_255 = u"Parameter '{}' input exceeds 255 characters"
failed_to_activae_signing_key = u'Failed to activate signing key: token or key not available'
lanquage_eng = u'ENGLISH (EN)'
# enviroment information
ssh_type_environment = "ssh"
lxd_type_environment = "lxd"
# Key types
sign_key_usage = "Sign"
auth_key_usage = "Auth"
# Backup paths
# Log file names and paths
backup_directory = u"/var/lib/xroad/backup"
generated_confs_directory = u'/var/lib/xroad/public'
invalid_backup_file_name = "invalid.tar"
invalid_backup_file = os.path.join(backup_directory, invalid_backup_file_name)
configuration_parts_directory = "/etc/xroad/configuration-parts"
devices_file = "/etc/xroad/devices.ini"
audit_log = "/var/log/xroad/audit.log"
jetty_log = "/var/log/xroad/jetty/jetty.log"
signer_log = "/var/log/xroad/signer.log"
signer_console_log = "/var/log/xroad/signer-console.log"
configuration_client_log = "/var/log/xroad/configuration_client.log"
monitor_log = "/var/log/xroad/monitor.log"
proxy_log = "/var/log/xroad/proxy.log"
ss_all_logs = [jetty_log, audit_log, signer_log, signer_console_log, configuration_client_log, monitor_log, proxy_log]
# key names
sign_key_label = "ta_generated_key_sign"
auth_key_label = "ta_generated_key_auth"
sign_key_label_2 = "ta_generated_key_sign_b"
auth_key_label_2 = "ta_generated_key_auth_b"
def generate_subject_name(section=u'member1_configuration'):
parameters = get_all_parameters()
member_name = parameters[section][u'member_name']
member_code = parameters[section][u'member_code']
instance_identifier = parameters[section][u'instance_identifier']
subject_name_string = u'C=FI, O={}, CN={}, serialNumber={}/'.format(member_name, member_code, instance_identifier)
print(subject_name_string)
return subject_name_string
def generate_member_id_short(parameters=None):
instance_identifier = parameters[u'instance_identifier']
member_class = parameters[u'member_class']
member_code = parameters[u'member_code']
member_id_short = u'{}:{}:{}:*'.format(instance_identifier, member_class, member_code)
print(member_id_short)
return member_id_short
def server_environment_type():
return get_parameter(section=u'server_environment', name=u'type')
def server_environment_csr_format():
return get_parameter(section=u'server_environment', name=u'csr_format')
def server_environment_approved_ca():
return get_parameter(section=u'server_environment', name=u'approved_ca')
def server_request_comment(section=u'member1_configuration'):
parameters = get_all_parameters()
instance_identifier = parameters[section][u'instance_identifier']
member_class = parameters[section][u'member_class']
member_code = parameters[section][u'member_code']
member_server = parameters[section][u'security_server_code']
request_comment = u'\'SERVER:{}/{}/{}/{}\' deletion'.format(instance_identifier, member_class,
member_code, member_server)
return request_comment
|
[
"lasse.matikainen@qautomate.fi"
] |
lasse.matikainen@qautomate.fi
|
b53760c5590f9a61ad98412ea6665d450fccb0e3
|
a08d0201faf9f96c1785307009eb951640d6db37
|
/protocol/impl/radio_pb2.py
|
6386b17e9085be46ff5a6e13e06f5917953cf95f
|
[
"Apache-2.0"
] |
permissive
|
ykelle/spotipy-control
|
4729ee71e23ca403db3951127388684c8270a158
|
f2f25b86b4e40f5769fb7b74f6dabcfa19cbc2ff
|
refs/heads/master
| 2020-08-19T02:34:03.709703
| 2020-03-10T20:12:49
| 2020-03-10T20:12:49
| 215,865,822
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 18,004
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: radio.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='radio.proto',
package='',
syntax='proto2',
serialized_pb=_b('\n\x0bradio.proto\"a\n\x0cRadioRequest\x12\x0c\n\x04uris\x18\x01 \x03(\t\x12\x0c\n\x04salt\x18\x02 \x01(\x05\x12\x0e\n\x06length\x18\x04 \x01(\x05\x12\x11\n\tstationId\x18\x05 \x01(\t\x12\x12\n\nlastTracks\x18\x06 \x03(\t\" \n\x10MultiSeedRequest\x12\x0c\n\x04uris\x18\x01 \x03(\t\"8\n\x08\x46\x65\x65\x64\x62\x61\x63k\x12\x0b\n\x03uri\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x11\n\ttimestamp\x18\x03 \x01(\x01\"e\n\x06Tracks\x12\x0c\n\x04gids\x18\x01 \x03(\t\x12\x0e\n\x06source\x18\x02 \x01(\t\x12\x10\n\x08identity\x18\x03 \x01(\t\x12\x0e\n\x06tokens\x18\x04 \x03(\t\x12\x1b\n\x08\x66\x65\x65\x64\x62\x61\x63k\x18\x05 \x03(\x0b\x32\t.Feedback\"\xb8\x01\n\x07Station\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05title\x18\x02 \x01(\t\x12\x10\n\x08titleUri\x18\x03 \x01(\t\x12\x10\n\x08subtitle\x18\x04 \x01(\t\x12\x13\n\x0bsubtitleUri\x18\x05 \x01(\t\x12\x10\n\x08imageUri\x18\x06 \x01(\t\x12\x12\n\nlastListen\x18\x07 \x01(\x01\x12\r\n\x05seeds\x18\x08 \x03(\t\x12\x10\n\x08thumbsUp\x18\t \x01(\x05\x12\x12\n\nthumbsDown\x18\n \x01(\x05\"\x13\n\x05Rules\x12\n\n\x02js\x18\x01 \x01(\t\"I\n\x0fStationResponse\x12\x19\n\x07station\x18\x01 \x01(\x0b\x32\x08.Station\x12\x1b\n\x08\x66\x65\x65\x64\x62\x61\x63k\x18\x02 \x03(\x0b\x32\t.Feedback\")\n\x0bStationList\x12\x1a\n\x08stations\x18\x01 \x03(\x0b\x32\x08.Station\"\x1c\n\rLikedPlaylist\x12\x0b\n\x03uri\x18\x01 \x01(\t')
)
_RADIOREQUEST = _descriptor.Descriptor(
name='RadioRequest',
full_name='RadioRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uris', full_name='RadioRequest.uris', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='salt', full_name='RadioRequest.salt', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='length', full_name='RadioRequest.length', index=2,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stationId', full_name='RadioRequest.stationId', index=3,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lastTracks', full_name='RadioRequest.lastTracks', index=4,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=15,
serialized_end=112,
)
_MULTISEEDREQUEST = _descriptor.Descriptor(
name='MultiSeedRequest',
full_name='MultiSeedRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uris', full_name='MultiSeedRequest.uris', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=114,
serialized_end=146,
)
_FEEDBACK = _descriptor.Descriptor(
name='Feedback',
full_name='Feedback',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uri', full_name='Feedback.uri', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='Feedback.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp', full_name='Feedback.timestamp', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=148,
serialized_end=204,
)
_TRACKS = _descriptor.Descriptor(
name='Tracks',
full_name='Tracks',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='gids', full_name='Tracks.gids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source', full_name='Tracks.source', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='identity', full_name='Tracks.identity', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokens', full_name='Tracks.tokens', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='feedback', full_name='Tracks.feedback', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=206,
serialized_end=307,
)
_STATION = _descriptor.Descriptor(
name='Station',
full_name='Station',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Station.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='title', full_name='Station.title', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='titleUri', full_name='Station.titleUri', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subtitle', full_name='Station.subtitle', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subtitleUri', full_name='Station.subtitleUri', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='imageUri', full_name='Station.imageUri', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lastListen', full_name='Station.lastListen', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seeds', full_name='Station.seeds', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='thumbsUp', full_name='Station.thumbsUp', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='thumbsDown', full_name='Station.thumbsDown', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=310,
serialized_end=494,
)
_RULES = _descriptor.Descriptor(
name='Rules',
full_name='Rules',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='js', full_name='Rules.js', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=496,
serialized_end=515,
)
_STATIONRESPONSE = _descriptor.Descriptor(
name='StationResponse',
full_name='StationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='station', full_name='StationResponse.station', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='feedback', full_name='StationResponse.feedback', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=517,
serialized_end=590,
)
_STATIONLIST = _descriptor.Descriptor(
name='StationList',
full_name='StationList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stations', full_name='StationList.stations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=592,
serialized_end=633,
)
_LIKEDPLAYLIST = _descriptor.Descriptor(
name='LikedPlaylist',
full_name='LikedPlaylist',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uri', full_name='LikedPlaylist.uri', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=635,
serialized_end=663,
)
_TRACKS.fields_by_name['feedback'].message_type = _FEEDBACK
_STATIONRESPONSE.fields_by_name['station'].message_type = _STATION
_STATIONRESPONSE.fields_by_name['feedback'].message_type = _FEEDBACK
_STATIONLIST.fields_by_name['stations'].message_type = _STATION
DESCRIPTOR.message_types_by_name['RadioRequest'] = _RADIOREQUEST
DESCRIPTOR.message_types_by_name['MultiSeedRequest'] = _MULTISEEDREQUEST
DESCRIPTOR.message_types_by_name['Feedback'] = _FEEDBACK
DESCRIPTOR.message_types_by_name['Tracks'] = _TRACKS
DESCRIPTOR.message_types_by_name['Station'] = _STATION
DESCRIPTOR.message_types_by_name['Rules'] = _RULES
DESCRIPTOR.message_types_by_name['StationResponse'] = _STATIONRESPONSE
DESCRIPTOR.message_types_by_name['StationList'] = _STATIONLIST
DESCRIPTOR.message_types_by_name['LikedPlaylist'] = _LIKEDPLAYLIST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RadioRequest = _reflection.GeneratedProtocolMessageType('RadioRequest', (_message.Message,), dict(
DESCRIPTOR = _RADIOREQUEST,
__module__ = 'radio_pb2'
# @@protoc_insertion_point(class_scope:RadioRequest)
))
_sym_db.RegisterMessage(RadioRequest)
MultiSeedRequest = _reflection.GeneratedProtocolMessageType('MultiSeedRequest', (_message.Message,), dict(
DESCRIPTOR = _MULTISEEDREQUEST,
__module__ = 'radio_pb2'
# @@protoc_insertion_point(class_scope:MultiSeedRequest)
))
_sym_db.RegisterMessage(MultiSeedRequest)
Feedback = _reflection.GeneratedProtocolMessageType('Feedback', (_message.Message,), dict(
DESCRIPTOR = _FEEDBACK,
__module__ = 'radio_pb2'
# @@protoc_insertion_point(class_scope:Feedback)
))
_sym_db.RegisterMessage(Feedback)
Tracks = _reflection.GeneratedProtocolMessageType('Tracks', (_message.Message,), dict(
DESCRIPTOR = _TRACKS,
__module__ = 'radio_pb2'
# @@protoc_insertion_point(class_scope:Tracks)
))
_sym_db.RegisterMessage(Tracks)
Station = _reflection.GeneratedProtocolMessageType('Station', (_message.Message,), dict(
DESCRIPTOR = _STATION,
__module__ = 'radio_pb2'
# @@protoc_insertion_point(class_scope:Station)
))
_sym_db.RegisterMessage(Station)
Rules = _reflection.GeneratedProtocolMessageType('Rules', (_message.Message,), dict(
DESCRIPTOR = _RULES,
__module__ = 'radio_pb2'
# @@protoc_insertion_point(class_scope:Rules)
))
_sym_db.RegisterMessage(Rules)
StationResponse = _reflection.GeneratedProtocolMessageType('StationResponse', (_message.Message,), dict(
DESCRIPTOR = _STATIONRESPONSE,
__module__ = 'radio_pb2'
# @@protoc_insertion_point(class_scope:StationResponse)
))
_sym_db.RegisterMessage(StationResponse)
StationList = _reflection.GeneratedProtocolMessageType('StationList', (_message.Message,), dict(
DESCRIPTOR = _STATIONLIST,
__module__ = 'radio_pb2'
# @@protoc_insertion_point(class_scope:StationList)
))
_sym_db.RegisterMessage(StationList)
LikedPlaylist = _reflection.GeneratedProtocolMessageType('LikedPlaylist', (_message.Message,), dict(
DESCRIPTOR = _LIKEDPLAYLIST,
__module__ = 'radio_pb2'
# @@protoc_insertion_point(class_scope:LikedPlaylist)
))
_sym_db.RegisterMessage(LikedPlaylist)
# @@protoc_insertion_point(module_scope)
|
[
"dborisov@pymedia.org"
] |
dborisov@pymedia.org
|
405a91a398a5c0fb9ef017dddb47ea0b984c35ca
|
78c08cd3ef66836b44373280a333c040ccb99605
|
/ostap/tools/splot.py
|
6058ac4b1fcc07a0d338268419fd8dfb0b5d9790
|
[
"BSD-3-Clause"
] |
permissive
|
Pro100Tema/ostap
|
11ccbc546068e65aacac5ddd646c7550086140a7
|
1765304fce43714e1f51dfe03be0daa5aa5d490f
|
refs/heads/master
| 2023-02-24T08:46:07.532663
| 2020-01-27T13:46:30
| 2020-01-27T13:46:30
| 200,378,716
| 0
| 0
|
BSD-3-Clause
| 2019-08-03T13:28:08
| 2019-08-03T13:28:07
| null |
UTF-8
|
Python
| false
| false
| 7,485
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==========================================================================================
## @file ostap/tools/splot.py
# Helper utilities to get sWeights in a form of function/histogram
# (often needed in practice, e.g to add these values to TTree,
# avoiding the direct usage of ROOT.RooStat.SPlot)
#
# @see RooStat::SPlot
# @see M.Pivk, F.R. Le Deberder,
# "SPlot: A Statistical tool to unfold data distributions"
# Published in Nucl.Instrum.Meth. A555 (2005) 356
# @see http://arxiv.org/abs/physics/0402083
# @see https://doi.org/10.1016/j.nima.2005.08.106
# @date 2019-05-14
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# =============================================================================
""" Helper utilities to get sWeights in a form of function/histogram
(often needed in practice, e.g to add these values to TTree,
avoiding the direct usage of ROOT.RooStat.SPlot)
- see RooStat::SPlot
- see M.Pivk, F.R. Le Deberder,
... ``SPlot: A Statistical tool to unfold data distributions''
... Published in Nucl.Instrum.Meth. A555 (2005) 356
- see http://arxiv.org/abs/physics/0402083
- see https://doi.org/10.1016/j.nima.2005.08.106
"""
# =============================================================================
__author__ = 'Vanya BELYAEV Ivan.Belyaev@itep.ru'
__date__ = "2019-05-14"
__version__ = '$Revision$'
__all__ = (
'sPlot1D' , ## 1D-splot
)
# =============================================================================
import ROOT
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
# =============================================================================
if '__main__' == __name__ : logger = getLogger ( 'ostap.tools.splot' )
else : logger = getLogger ( __name__ )
# =============================================================================
import ostap.fitting.roofitresult
from ostap.fitting.basic import PDF, Generic1D_pdf
from ostap.fitting.variables import FIXVAR
from ostap.histos.histos import Histo1DFun
# =============================================================================
# @class sPlot1D
# Helper class to get <code>sWeigts</code> in a form of historgams/function objects.
# It is often useful to avoid the direct usage of ROOT.RooStat.SPlot
# @see RooStat::SPlot
# @see M.Pivk, F.R. Le Deberder,
# "SPlot: A Statistical tool to unfold data distributions"
# Published in Nucl.Instrum.Meth. A555 (2005) 356
# @see http://arxiv.org/abs/physics/0402083
# @see https://doi.org/10.1016/j.nima.2005.08.106
# @date 2019-05-14
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
class sPlot1D(object) :
""" Helper class to get sWeigtts in form of historgams/function objects.
It is often useful to avoid the direct usage of ROOT.RooStat.SPlot
- see ROOT.RooStat.SPlot
- see M.Pivk, F.R. Le Deberder,
...``SPlot: A Statistical tool to unfold data distributions''
... Published in Nucl.Instrum.Meth. A555 (2005) 356
- see http://arxiv.org/abs/physics/0402083
- see https://doi.org/10.1016/j.nima.2005.08.106
"""
def __init__ ( self ,
pdf ,
dataset = None ,
fitresult = None ,
fast = True , ## fast histogram filling ? (bin centers)
nbins = 100 , ## histogram bining
access = {} , ## historgam access options
fitopts = {} ) : ## PDF.fitTo options
assert dataset or fitresult, 'Either dataset or fitresult must be specified!'
assert isinstance ( pdf , PDF ) and \
isinstance ( pdf.pdf , ROOT.RooAddPdf ) and \
len ( pdf.alist1 ) == len ( pdf.alist2 ) , 'Invalid type of PDF!'
cmps = pdf.alist2
names = [ c.name for c in cmps ]
## if datset is specified - perform the fit
if dataset :
vars = pdf.pdf.getParameters ( dataset )
## make a proper (re)fit fixing everything but yields
with FIXVAR ( [ v for v in vars if not v in cmps ] ) :
fitresult , f = pdf.fitTo ( dataset , silent = True , draw = False , **fitopts )
elif fitresult :
pars = fitresult.floatParsFinal()
pnames = set( [ p.name for p in pars ] )
if set ( names ) != pnames :
raise RuntimeError("Rerun fit with with only %s floating " % names )
## temlate historgam
template = pdf.make_histo ( nbins )
## dictionary of components
hcomponents = {}
## the list of PDFs
cpdfs = [ Generic1D_pdf ( p , xvar = pdf.xvar ) for p in pdf.alist1 ]
for p , n in zip ( cpdfs , names ) :
if fast : hc = p.roo_histo ( histo = template , events = False )
else : hc = p. histo ( histo = template , errors = False )
## ## convert to density historgam ?
## hc = hc.density()
hcomponents [ n ] = hc
## sum of all histograms
hsum = template.clone()
hsum.Reset() ; hsum . Sumw2()
for k in hcomponents : hsum += hcomponents[k] * fitresult( k )[0].value()
hweights = {}
l = len ( names )
for i in range ( l ) :
cmp = template.clone() ;
cmp.Reset() ; cmp.Sumw2()
for j in range ( l ) :
cmp += fitresult.cov ( names[i] , names[j] ) * hcomponents [ names[j] ]
cmp /= hsum
hweights [ names [ i ] ] = cmp
del hsum
del template
components = {}
for k in hcomponents : components [k] = Histo1DFun ( hcomponents [k] , **access )
weights = {}
for k in hweights : weights [k] = Histo1DFun ( hweights [k] , **access )
self.__hcomponents = hcomponents
self.__components = components
self.__hweights = hweights
self.__weights = weights
@property
def components ( self ) :
"""``components'' : get fit components (as functions)"""
return self.__components
@property
def hcomponents ( self ) :
"""``hcomponents'' : get fit components (as histograms)"""
return self.__hcomponents
@property
def weights ( self ) :
"""``weights'' : get sWeights (as functions)"""
return self.__weights
@property
def hweights ( self ) :
"""``hweights'' : get sWeights (as histograms)"""
return self.__hweights
## =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
# The END
# =============================================================================
|
[
"Ivan.Belyaev@cern.ch"
] |
Ivan.Belyaev@cern.ch
|
488869c32fe0e14bcb21f369089469ef0f757606
|
403417ce4d126d7054942b0b4cb09aafec3daa17
|
/genconfig.py
|
5fe41167ecd4f6fffe549647c925f6a917635b25
|
[] |
no_license
|
gagnonlg/pixel-NN-training
|
74f05802796df0e2d73cddf11114767f6985167b
|
a2a8758a7c79625d4926faa2557c8fdafe434c84
|
refs/heads/master
| 2019-07-13T21:15:00.026854
| 2017-08-07T13:57:40
| 2017-08-07T13:57:40
| 110,745,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,003
|
py
|
import argparse
import re
import sys
def parse_args(argv):
p = argparse.ArgumentParser()
p.add_argument("--sizeX", type=int, default=7)
p.add_argument("--sizeY", type=int, default=7)
p.add_argument("--type", choices=['number','pos1','pos2','pos3', 'error1x', 'error1y', 'error2x', 'error2y', 'error3x', 'error3y', ], required=True)
p.add_argument("--old", action='store_true', default=False)
return p.parse_args(argv)
def gen_inputs(sizeX, sizeY, type=None):
fields = []
for i in range(sizeX*sizeY):
fields.append("NN_matrix%d" % i)
for i in range(sizeY):
fields.append("NN_pitches%d" % i)
fields += [
'NN_layer',
'NN_barrelEC',
'NN_phi',
'NN_theta'
]
if type.startswith('error'):
for i in range(int(type[-2])):
fields.append('NN_position_id_X_%d_pred' % i)
fields.append('NN_position_id_Y_%d_pred' % i)
return fields
def gen_targets(type):
fields = []
if type.startswith('pos'):
m = re.match('pos([123])', type)
for i in range(int(m.group(1))):
fields.append('NN_position_id_X_%d' % i)
fields.append('NN_position_id_Y_%d' % i)
elif type == 'number':
fields.append('NN_nparticles1')
fields.append('NN_nparticles2')
fields.append('NN_nparticles3')
elif type.startswith('error'):
d = 'X' if type[-1] == 'x' else 'Y'
if type[-2] == '1':
n = 30
elif type[-2] == '2':
n = 25
elif type[-2] == '3':
n = 20
for i in range(int(type[-2])):
for j in range(n):
fields.append('NN_error_{}_{}_{}'.format(d,i,j))
return fields
def gen_metadata(sizeY, type=None):
fields = [
'RunNumber',
'EventNumber',
'ClusterNumber',
'NN_sizeX',
'NN_sizeY',
'NN_localEtaPixelIndexWeightedPosition',
'NN_localPhiPixelIndexWeightedPosition',
'NN_layer',
'NN_barrelEC',
'NN_etaModule',
'NN_phi',
'NN_theta',
'globalX',
'globalY',
'globalZ',
'globalEta'
]
for i in range(sizeY):
fields.append('NN_pitches%d' % i)
if type.startswith('error'):
for i in range(int(type[-2])):
fields.append('NN_position_id_X_%d' % i)
fields.append('NN_position_id_Y_%d' % i)
fields.append('NN_position_id_X_%d_pred' % i)
fields.append('NN_position_id_Y_%d_pred' % i)
return fields
def main(argv):
args = parse_args(argv)
print "inputs:"
for field in gen_inputs(args.sizeX,args.sizeY, args.type):
print " - %s" % field
print "targets:"
for field in gen_targets(args.type):
print " - %s" % field
print "metadata:"
for field in gen_metadata(args.sizeY, args.type):
print " - %s" % field
return 0
if __name__ == '__main__':
exit(main(sys.argv[1:]))
|
[
"louis.guillaume.gagnon@gmail.com"
] |
louis.guillaume.gagnon@gmail.com
|
ad473d28906000616ee429c441ad8565e19a76db
|
fa43c7ce1afa489996dac762e88431e8e8f58ff5
|
/old/scripts/script3.py
|
dc92c029bf073c530a003c3ee76dc9996e07a7b8
|
[] |
no_license
|
JasThiara/Asset-Bubbles
|
2ed588c0ded5ed8bef2097f76d143d584063f8f3
|
2188700ed9e7cdc5b96d52fa7df9b60eda9df1e2
|
refs/heads/master
| 2021-01-16T21:23:34.423436
| 2014-06-07T22:31:22
| 2014-06-07T22:31:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,400
|
py
|
import ystockquote # going to ystockquote
import sched, time, datetime # to recieve scheduler, time and datetime
from sys import argv # from system get argument variable
script, symbol, start_specific_time, end_specific_time, filename, datarate = argv
def get_price_time(symbol):
price = ystockquote.get_price(symbol)
current_time = datetime.datetime.now()
return [symbol, price, str(current_time)]
def create_file(filename):
return open(filename,'w')
def close_file(target):
target.close()
def write_data_to_file(target, data):
target.write(data[0])# first element of list which is symbol
target.write(",")
target.write(data[1])# 2nd element of list which is price
target.write(",")
target.write(str(data[2])) # 3rd element date which is string
target.write ("\n") # create new line
def input_datetime_convertor(time_string): # creating function which will convert start_specific_time to datetime
year=int(time_string[0:4])
month = int(time_string[4:6])
day= int(time_string[6:8])
hour = int(time_string[8:10])
minute = int(time_string[10:12])
seconds = int(time_string[12:14])
input_datetime = datetime.datetime(year,month,day,hour,minute,seconds)
return input_datetime
def delta_time(later_datetime):
current_datetime = datetime.datetime.now()
delta =input_datetime_convertor(later_datetime)-current_datetime
return delta.seconds
def sleep(later_datetime):
time.sleep(delta_time(later_datetime))
def get_additional_data(symbol,datalist):
new_data = get_price_time(symbol)
datalist.append(new_data)
def build_scheduler(datarate, start_time, end_time,symbol,datalist):
s = sched.scheduler(time.time, time.sleep)
print time.time()
seconds = datetime.timedelta(0,int(datarate))
k = input_datetime_convertor(start_time)
i =input_datetime_convertor(start_time)# we need to pass time through convertor so it will be in string
while i<=input_datetime_convertor(end_time):
s.enter((i-k).seconds, 1,get_additional_data, (symbol,datalist))
print (i-k).seconds
i = i+seconds
s.run()
def csv_file_generator(filename,Data):
target = open(filename,'w')
for item in Data:
for element in item:
target.write(element)
target.write(",")
target.write("\n")
target.close()
sleep(end_specific_time)
datalist = list()
build_scheduler(datarate, start_specific_time, end_specific_time,symbol,datalist)
csv_file_generator(filename,datalist)
|
[
"Jas@127.0.0.1"
] |
Jas@127.0.0.1
|
d199bf88678cf0570adfc068e858f6ac73ff02e1
|
5c831405523d54b662de714cfc1ea1759d36d55a
|
/set4/NATOalphabet.py
|
9edb4a339efed010f215ce55d83f8d40e32dd8e8
|
[] |
no_license
|
dandumitriu33/codewars-Python
|
99feeb979723d556a4eeb0b2130ba48c2fd394c6
|
e54ca88a3ec026e5fb770cfa44c1e8b669711a5e
|
refs/heads/master
| 2020-08-29T13:08:26.325554
| 2019-11-24T18:19:36
| 2019-11-24T18:19:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
def nato(string):
nato_dictionary = {
'a': 'Alfa',
'b': 'Bravo',
'c': 'Charlie',
'd': 'Delta',
'e': 'Echo',
'f': 'Foxtrot',
'g': 'Golf',
'h': 'Hotel',
'i': 'India',
'j': 'Juliett',
'k': 'Kilo',
'l': 'Lima',
'm': 'Mike',
'n': 'November',
'o': 'Oscar',
'p': 'Papa',
'q': 'Quebec',
'r': 'Romeo',
's': 'Sierra',
't': 'Tango',
'u': 'Uniform',
'v': 'Victor',
'w': 'Whiskey',
'x': 'Xray',
'y': 'Yankee',
'z': 'Zulu'
}
out = []
for i in string:
if i.lower().isalpha():
out.append(nato_dictionary[i.lower()] + ' ')
elif i == ' ':
out.append('')
else:
out.append(i + ' ')
return ''.join(out).strip()
print(nato('If you can read'))
|
[
"dandumitriu33@gmail.com"
] |
dandumitriu33@gmail.com
|
6ea1c1cd65f230e1a97f8f67ddff9de781985aec
|
b0e9fcea70640eb73c62febc99ebe6ffe85ce10d
|
/research/improve_nas/trainer/optimizer.py
|
03e74fb7f8a176ae3b07165d2e2b04c88674a8ab
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
mns2013/taek
|
3a550eee9604b5cdfe95ac2c691a20e3126ccae1
|
385cb5117159a481998d729dda88c1fbd12df77c
|
refs/heads/master
| 2023-09-02T01:58:04.015297
| 2021-10-22T20:40:45
| 2021-10-22T20:40:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,015
|
py
|
# Lint as: python3
"""Definition of optimizers and learning rate schedules.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import tensorflow.compat.v1 as tf
class LearningRateSchedule(object):
"""A learning rate decay schedule interface."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def apply(self, learning_rate):
"""Applies the learning rate decay schedule to the given learning rate.
Args:
learning_rate: Float `Tensor` learning rate.
Returns:
Float `Tensor` learning rate with applied decay schedule.
"""
class Constant(LearningRateSchedule):
"""A constant schedule."""
def apply(self, learning_rate):
"""See `LearningRateSchedule`."""
return learning_rate
class Cosine(LearningRateSchedule):
"""Cosine."""
def __init__(self, decay_steps, alpha):
"""Returns a `Cosine` instance.
Args:
decay_steps: Number of steps to decay over.
alpha: Minimum learning rate value as a fraction of learning_rate.
Returns:
A `Cosine` instance.
"""
self._decay_fn = functools.partial(
tf.train.cosine_decay, decay_steps=decay_steps, alpha=alpha)
def apply(self, learning_rate):
"""See `LearningRateSchedule`."""
# Start at -1 since we increment before reading.
global_step = tf.get_variable("decay_step", initializer=-1, trainable=False)
increment_op = tf.assign_add(global_step, 1)
with tf.control_dependencies([increment_op]):
learning_rate = self._decay_fn(
learning_rate=learning_rate, global_step=global_step.read_value())
return learning_rate
def fn_with_name(optimizer_name,
learning_rate_schedule="constant",
cosine_decay_steps=None):
"""Returns an optimizer_fn with the given name.
Args:
optimizer_name: Optimizer name string for identifying the optimizer. Either
'adagrad', 'adam', 'momentum', or 'sgd'.
learning_rate_schedule: Type of learning rate schedule to use. Opened for
future extensions.
cosine_decay_steps: See `Cosine`.
Returns:
An optimizer_fn which takes a `learning_rate` scalar `Tensor` argument and
returns an `Optimizer` instance.
Raises:
ValueError: If `optimizer_name` is invalid.
"""
optimizers = {
"adagrad": tf.train.AdagradOptimizer,
"adam": tf.train.AdamOptimizer,
"lazy_adam": tf.contrib.opt.LazyAdamOptimizer,
"momentum": functools.partial(tf.train.MomentumOptimizer, momentum=.9),
"rmsprop": tf.train.RMSPropOptimizer,
"sgd": tf.train.GradientDescentOptimizer,
}
optimizer_name = optimizer_name.lower()
if optimizer_name not in optimizers:
raise ValueError("Invalid optimizer '{}'".format(optimizer_name))
optimizer_fn = optimizers[optimizer_name]
schedules = {
"constant":
Constant(),
"cosine":
Cosine(decay_steps=cosine_decay_steps, alpha=0.0),
}
schedule_name = learning_rate_schedule.lower()
if schedule_name not in schedules:
raise ValueError(
"Invalid learning_rate_schedule '{}'".format(schedule_name))
schedule = schedules[schedule_name]
def _optimizer_with_schedule(learning_rate):
learning_rate = schedule.apply(learning_rate)
optimizer = optimizer_fn(learning_rate)
return optimizer, learning_rate
return _optimizer_with_schedule
|
[
"weill@google.com"
] |
weill@google.com
|
7a4afe01efdff40a87a5862423142c8f4ca45da6
|
67358d856f1ec155a15b0de72a7e5c112fed7766
|
/foreman/architect.py
|
6fe0cdc4220cfe83cc883e62fd5a961603bd4c9b
|
[] |
no_license
|
rsumner33/pyforeman
|
03c528bcd79c16c7a2062300209416ca35a99f70
|
b7a5e32423b62acbc5d0f94d01a471cac84282f7
|
refs/heads/master
| 2022-07-18T02:52:19.038226
| 2018-05-07T01:25:30
| 2018-05-07T01:25:30
| 132,389,810
| 0
| 0
| null | 2020-02-22T08:57:13
| 2018-05-07T01:12:36
|
HTML
|
UTF-8
|
Python
| false
| false
| 338
|
py
|
# Container class
class Blueprint:
def __init__(name, builder, materials, foundation):
self.name = name # String: uid of target material
self.builder = builder # Builder llambda
self.materials = materials # Frozenset of Strings (material uids)
self.foundation = foundation # Tuple of Blueprints
|
[
"rsumner868@icloud.com"
] |
rsumner868@icloud.com
|
85686c4d05e6b6d3672cc35bd6d1be81f39dc1a3
|
569d0015efa37a91730de1f2fb2a3187cb334046
|
/advent_of_code_2017/03.py
|
8b5a0c3f422a581900845b23b375b171a6bce197
|
[] |
no_license
|
jcbbeiter/misc-projects
|
a14bad6b0a879c44e19c09dfc8566841bece9a94
|
f0f1f0abf5f45e8e2c4198ae52a03cc6b647e198
|
refs/heads/master
| 2020-09-09T06:11:33.000834
| 2018-07-08T00:52:29
| 2018-07-08T00:52:29
| 94,440,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
#!/usr/bin/env python2.7
import math
# First part
nums = [12, 23, 1024, 368078]
for num in nums:
factor = 1
while num > factor**2:
factor = factor + 2
largest = factor**2
smallest = (factor-2)**2
ring = (factor-1)/2
ordinal = (num-smallest)%(2*ring)
diff = ring-ordinal if ordinal < ring else ordinal-ring
distance = ring+diff
print "Data from square " + str(num) + " is carried " + str(distance) + " squares"
# For the second part -- this sequence is documented on OEIS!
# https://oeis.org/A141481/b141481.txt
# Lazy lookup
|
[
"jbeiter@nd.edu"
] |
jbeiter@nd.edu
|
add2d5dbd5073d4c3256e137cc374c427b69771a
|
11e4bd1b29a66b97df9b3b32b2827eac88a24fd8
|
/pysrc/829.py
|
0cfb02a821fd116e08ad27b2f72a325433c7760d
|
[] |
no_license
|
linkinpark213/leetcode-practice
|
4db17462b67e7a1a34184aada041cb3854f78385
|
13379e6fdd9299c606889fefa0a38426ef4fa5e7
|
refs/heads/master
| 2021-07-08T16:16:28.428003
| 2020-09-16T14:30:51
| 2020-09-16T14:30:51
| 185,179,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
class Solution:
def consecutiveNumbersSum(self, N: int) -> int:
i = 1
count = 1
while i ** 2 <= 2 * N:
i = i + 1
temp = N - i * (i + 1) // 2
if temp >= 0 and temp % i == 0:
count += 1
print(i, end=', ')
print(count)
return count
if __name__ == '__main__':
solution = Solution()
print(solution.consecutiveNumbersSum(1) == 1)
print(solution.consecutiveNumbersSum(3) == 2)
print(solution.consecutiveNumbersSum(4) == 1)
print(solution.consecutiveNumbersSum(5) == 2)
print(solution.consecutiveNumbersSum(9) == 3)
print(solution.consecutiveNumbersSum(15) == 4)
print(solution.consecutiveNumbersSum(85) == 4)
|
[
"linkinpark213@outlook.com"
] |
linkinpark213@outlook.com
|
69ea2fccd1bf4675dcc82489a7e1e6a2774b3b49
|
c564aeb1370ed6dc9eb319f2cd3c6df6f0f73c8c
|
/schemas/person.py
|
85df48bff97ee98c4e1b37fd54c4822fe2387560
|
[] |
no_license
|
dexx1220/flask-rest-api
|
206603f17fb9057dfa53cd2c6031a3bd5bc8e508
|
99aac87f069f9b2025089e54b6c752b1af0b297c
|
refs/heads/master
| 2022-12-10T00:06:31.336994
| 2020-01-07T17:44:00
| 2020-01-07T17:44:00
| 232,381,623
| 0
| 0
| null | 2022-12-08T03:24:17
| 2020-01-07T17:44:25
|
Python
|
UTF-8
|
Python
| false
| false
| 296
|
py
|
from models.person import Person
from config import db, ma
from marshmallow import fields
from .person_note import PersonNoteSchema
class PersonSchema(ma.ModelSchema):
class Meta:
model = Person
sqla_session = db.session
notes = fields.Nested(PersonNoteSchema, default=[], many=True)
|
[
"dexter.heng@S05242-MBPR.local"
] |
dexter.heng@S05242-MBPR.local
|
1383e6c1c998e8920abcf806361d456eb0406ada
|
beed259c9aaf824c5307d93ffa736255f2d98831
|
/leetcode/huawei/鸡精.py
|
c46d16b5e3409e2a911d05f02e8ee154f8c8440d
|
[
"Apache-2.0"
] |
permissive
|
chaofan-zheng/python_learning_code
|
21345f97ebf74c3cad0ef488a93ec8a7fd771a63
|
5d05848911d55aa49eaee4afd7ffd80536fad7aa
|
refs/heads/main
| 2023-05-27T16:17:18.130492
| 2021-06-06T14:23:31
| 2021-06-06T14:23:31
| 338,234,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
"""
1、数字涂色,涂相同色的数字都可以被同色的最小数整除,问最少需要多少种颜色?
2、输入一个K,从1到100报数,报到K后移除K,然后下一个从1开始继续报数,直到剩下的人数比K小,问剩下的人原来的编号是多少?
二星题:
3、服务器广播问题,输入一个二维数组,1和0组成,array[i][j]==1表示i和j直接相连,不等于1是间接链接,直接和间接连接的服务器都可以互通广播,
比如:A和B直接连接,B和C直接连接,则A和C间接连接。问初始需要给几台服务器,才能使所有服务器收到广播?
"""
# T1
# nums = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] # 考虑空和1
# bases = [2]
# for num in nums:
# con_sign = 0
# for base in bases:
# if num % base == 0:
# con_sign = 1
# if con_sign == 1:
# continue
# else:
# bases.append(num)
# print(len(bases))
# T2
# k = int(input())
# list01 = list(range(1, 101))
# res_list = list01
# i = 0
# while len(res_list) >= k:
# i = (i + k-1) % len(res_list)
# if i != len(res_list) - 1:
# res_list = res_list[:i] + res_list[i + 1:]
# else: # 防止越界的情况
# res_list = res_list[:i]
# i = 0
# print(res_list)
# T3
map = [
[1, 1, 0, 0, 0],
[0, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 1, 1],
]
res_map = {}
count = len(map)
for i in range(len(map)):
res_map[i] = set()
for j in range(len(map)):
if map[i][j] == 1:
res_map[i].add(j)
print(res_map)
count = 0
pool = set()
for key1 in res_map:
if pool.intersection(res_map[key1]) == set():
pool = pool.union(res_map[key1])
count += 1
else:
pool.union(res_map[key1])
print(count)
# 图片排序
while True:
try:
print("".join(sorted(input())))
except:break
|
[
"417355570@qq.com"
] |
417355570@qq.com
|
38cc87aaa7de26eae7ccf2dc51247306d80ed3bb
|
9df85a773a056e50c7bbc9d1c821c5a33c1577fe
|
/handlers/tools.py
|
c7907e1518afebc1bcc81c4429effd0983c904f8
|
[] |
no_license
|
patrickt/mltshp
|
207aa5cc2871400426daaa6e45293d420ca54cd7
|
7cc8a4f5af10f69c81048e6f82fc8f62fcf35ffb
|
refs/heads/master
| 2021-01-18T20:53:18.755287
| 2017-04-02T05:46:06
| 2017-04-02T05:46:06
| 86,997,957
| 0
| 0
| null | 2017-04-02T16:06:41
| 2017-04-02T16:06:41
| null |
UTF-8
|
Python
| false
| false
| 23,428
|
py
|
from urlparse import urlparse, parse_qs
import os
import re
import random
import json
from tornado.httpclient import HTTPRequest
import tornado.auth
import tornado.web
from tornado.escape import url_escape, json_decode
from tornado.options import define, options
from BeautifulSoup import BeautifulSoup
from models import Externalservice, User, Sourcefile, Sharedfile, Shake, ExternalRelationship, ShakeCategory
from base import BaseHandler
from lib.utilities import base36encode
import lib.feathers
class PickerPopupHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
url = self.get_argument('url', None)
source_url = self.get_argument('source_url', '')
file_name = self.get_argument('title', '')
current_user = self.get_current_user_object()
if not url:
raise tornado.web.HTTPError(404)
#Hide filepile URLs
if source_url and (source_url.find('filepile.org') > -1):
source_url = ''
#If this is a Google Image URL, extract the referer
if source_url.startswith("http://www.google.com/imgres?imgurl="):
parsed_google_url = urlparse(source_url)
if parsed_google_url.query:
parsed_google_query = parse_qs(parsed_google_url.query)
if parsed_google_query['imgrefurl']:
source_url = parsed_google_query['imgrefurl'][0]
else:
source_url = ""
else:
source_url = ""
#Clear out nasty reader cruft
if source_url.find('utm_source=') > -1:
source_url = source_url[:source_url.find('utm_source=')]
if source_url[-1] == '?':
source_url = source_url[:-1]
parsed_url = urlparse(url)
if not os.path.basename(parsed_url.path):
raise tornado.web.HTTPError(404)
if parsed_url.scheme.lower() not in ['http', 'https']:
raise tornado.web.HTTPError(404)
#need to determine if we can save it here. ERROR if you can't get a file name
parsed_url_query = ''
if parsed_url.query:
parsed_url_query = "?" + parsed_url.query
#determine if this is a vimeo or youtube URL
is_video = False
shakes = current_user.shakes(include_managed=True)
#replace plus signs with %20's
return self.render("tools/picker.html", file_name=file_name, width="", height="", \
url=parsed_url.scheme + "://" + parsed_url.netloc + parsed_url.path + parsed_url_query, \
source_url=source_url, description='', is_video=is_video, shakes=shakes)
@tornado.web.authenticated
@tornado.web.asynchronous
def post(self):
"""
TODO: better determination of correct file name, if it is indeed a file, plus type.
"""
self.url = self.get_argument('url', None)
self.content_type = None
if not self.url:
raise tornado.web.HTTPError(404)
#TODO : check if it is a valid URL
# copy from above
http = tornado.httpclient.AsyncHTTPClient()
#this sends a header value for cookie to d/l protected FP files
fp_cookie = None
b = re.compile(r"^http(s?)://(.*?)(.?)filepile\.org")
m = b.match(self.url)
if m:
for char in [' ', '[', ']']:
self.url = self.url.replace(char, url_escape(char))
fp_cookie = {'Cookie':'_filepile_session=4c2eff30dd27e679d38fbc030b204488'}
request = HTTPRequest(self.url, headers=fp_cookie, header_callback=self.on_header)
http.fetch(request, self.on_response)
def on_response(self, response):
url_parts = urlparse(response.request.url)
file_name = os.path.basename(url_parts.path)
title = self.get_argument("title", None)
source_url = self.get_argument('source_url', None)
description = self.get_argument('description', None)
shake_id = self.get_argument('shake_id', None)
if title == file_name:
title = None
if self.content_type not in self.approved_content_types:
if response.body[0:50].find('JFIF') > -1:
self.content_type = 'image/jpeg'
else:
return self.render("tools/picker-error.html")
if len(file_name) == 0:
return self.render("tools/picker-error.html")
sha1_file_key = Sourcefile.get_sha1_file_key(file_data=response.body)
user = self.get_current_user()
try:
fh = open("%s/%s" % (options.uploaded_files, sha1_file_key), 'wb')
fh.write(response.body)
fh.close()
except Exception as e:
raise tornado.web.HTTPError(500)
sf = Sharedfile.create_from_file(
file_path = "%s/%s" % (options.uploaded_files, sha1_file_key),
file_name = file_name,
sha1_value = sha1_file_key,
content_type = self.content_type,
user_id = user['id'],
title = title,
shake_id = shake_id)
sf.source_url = source_url
sf.description = description
sf.save()
if not options.debug:
# file cleanup
try:
os.remove("%s/%s" % (options.uploaded_files, sha1_file_key))
except:
pass
self.render("tools/picker-success.html", sf=sf)
def on_header(self, header):
if header.startswith("Content-Length:"):
content_length = re.search("Content-Length: (.*)", header)
if int(content_length.group(1).rstrip()) > 10000000: #this is not hte correct size to error on
raise tornado.web.HTTPError(413)
elif header.startswith("Content-Type:"):
ct = re.search("Content-Type: (.*)", header)
self.content_type = ct.group(1).rstrip()
class PluginsHandler(BaseHandler):
def get(self):
return self.render("tools/plugins.html")
class ToolsTwitterHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
return self.render("tools/twitter.html")
class ToolsTwitterHowToHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
return self.render("tools/twitter-how-to.html")
class ToolsTwitterConnectHandler(BaseHandler, tornado.auth.TwitterMixin):
@tornado.web.asynchronous
@tornado.web.authenticated
def get(self):
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self._on_auth)
return
self.authorize_redirect(callback=self._on_redirect)
def _on_redirect(self):
pass
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Twitter auth failed")
#is there an existing external account?
current_user = self.get_current_user()
authenticated_user = User.get("id=%s", current_user['id'])
existing = Externalservice.by_user(authenticated_user, Externalservice.TWITTER)
if existing:
existing.service_id = user['access_token']['user_id']
existing.service_secret = user['access_token']['secret']
existing.service_key = user['access_token']['key']
existing.screen_name = user['access_token']['screen_name']
existing.save()
else:
external_service = Externalservice(
user_id=authenticated_user.id,
service_id=user['access_token']['user_id'],
screen_name=user['access_token']['screen_name'],
type=Externalservice.TWITTER,
service_key=user['access_token']['key'],
service_secret=user['access_token']['secret'])
external_service.save()
# if not, insert credentials for this user
# if there is, update that account
return self.render("tools/twitter-connected.html")
class BookmarkletPageHandler(BaseHandler):
"""Displays a page for a user to save the bookmarklet."""
@tornado.web.authenticated
def get(self):
return self.render("tools/bookmarklet.html")
class NewPostHandler(BaseHandler):
"""
Renders a panel to kick off the new post process.
"""
@tornado.web.authenticated
def get(self):
user = self.get_current_user_object();
shakes = user.shakes(include_managed=True)
can_upload_this_month = user.can_upload_this_month()
return self.render("tools/new-post.html", shakes=shakes, \
can_upload_this_month=can_upload_this_month)
class SaveVideoHandler(BaseHandler):
@tornado.web.asynchronous
@tornado.web.authenticated
def get(self):
url = self.get_argument('url', None)
shake_id = self.get_argument('shake_id', "")
if not url:
self.render("tools/save-video.html", url= url, shake_id=shake_id)
return
url = Sourcefile.make_oembed_url(url.strip())
if url:
self.handle_oembed_url(url)
else:
self.render("tools/save-video-error.html", message="Invalid URL. We didn't recognize that URL")
def on_oembed_response(self, response):
if response.code == 401:
self.render("tools/save-video-error.html", message="Embedding disabled by request. The user who uploaded this file has requested it not be embedded on other web sites.")
return
self.handle_oembed_data(response.body)
def handle_oembed_url(self, url):
"""Takes a sanitized URL (as created by models.sourcefile.make_oembed_url) and
issues a request for it. If the URL is actually a data URI, strip off the well-known
header, and handle the oembed JSON encoded into it instead.
"""
if url.startswith('data:text/json;charset=utf-8,'):
j_oembed = url.replace('data:text/json;charset=utf-8,', '', 1)
self.handle_oembed_data(j_oembed)
else:
request = HTTPRequest(url, 'GET')
http = tornado.httpclient.AsyncHTTPClient()
http.fetch(request,self.on_oembed_response)
def handle_oembed_data(self, oembed):
try:
j_oembed = json_decode(oembed)
except Exception as e:
self.render("tools/save-video-error.html", message="We could not load the embed code for this file. Please contact support.")
return
if 'provider_name' not in j_oembed:
self.render("tools/save-video-error.html", message="We could not load the embed code for this file. Please contact support.")
return
if j_oembed.has_key('type') and j_oembed['provider_name'] == 'Flickr' and j_oembed['type'] != 'video':
self.render("tools/save-video-error.html", message="We could not load the embed code for this file. Please contact support.")
return
shake_id = self.get_argument('shake_id', "")
url = self.get_argument('url', None)
if j_oembed['provider_name'] == 'YouTube':
m = re.search(r"src=\"(.*)v\/([A-Za-z0-9\-\_]+)", j_oembed['html']) or \
re.search(r"src=\"(.*)embed\/([A-Za-z0-9\-\_]+)", j_oembed['html'])
if m:
url = "%swatch?v=%s" % (m.group(1), m.group(2))
j_oembed['html'] = """<iframe class="youtube-player"
type="text/html" width="%s" height="%s"
src="http://www.youtube.com/embed/%s?fs=1&feature=oembed&rnd=%s" frameborder="0" id="ytframe"></iframe>""" % (550, 339, m.group(2), str(random.random()))
else:
self.render("tools/save-video-error.html", message="We could not load the embed code for this file. Please contact support.")
return
elif j_oembed['provider_name'] == "Flickr":
j_oembed['thumbnail_url'] = url
elif j_oembed['provider_name'] == "Vine":
clean_path = re.search('^(https?://vine.co/v/[a-zA-Z0-9]+)', url)
if clean_path and clean_path.group(1):
url = clean_path.group(1)
j_oembed['thumbnail_url'] = url
if self.request.method == "POST":
self.oembed_doc = j_oembed
request = HTTPRequest(self.oembed_doc['thumbnail_url'], 'GET')
http = tornado.httpclient.AsyncHTTPClient()
http.fetch(request,self.on_thumbnail_response)
else:
self.render("tools/save-video.html", url=url, html=j_oembed['html'], shake_id=shake_id)
def on_thumbnail_response(self, response):
if response.code != 200:
self.render("tools/save-video-error.html", message="We could not load the thumbnail for this file and therefore could not save this video. Please contact support.")
return
#if the thumbnail url needs to be extracted (Flickr) let's see if
# we got back HTML that points to the thumbnail
if self.oembed_doc['provider_name'] == "Flickr" and response.headers['Content-Type']=='text/html; charset=utf-8':
#if we're here, that means we need to extract the thumbnail and make a call to the actual jpg
s = re.search('<link rel="image_src" href="http://farm(\d).static.flickr.com/(\d+)/(\d+)_([a-zA-Z0-9]+)_m.jpg">', response.body)
try:
if s and s.group(0) and s.group(1) and s.group(2) and s.group(3) and s.group(4):
self.oembed_doc['thumbnail_url'] = "http://farm%s.static.flickr.com/%s/%s_%s_b.jpg" % (s.group(1), s.group(2), s.group(3), s.group(4))
request = HTTPRequest(self.oembed_doc['thumbnail_url'], 'GET')
http = tornado.httpclient.AsyncHTTPClient()
http.fetch(request,self.on_thumbnail_response)
except:
self.render("tools/save-video-error.html", message="We could not load the thumbnail for this file and therefore could not save this video. Please contact support.")
return
elif self.oembed_doc['provider_name'] == "Vine" and response.headers['Content-Type']=='text/html; charset=utf-8':
# if we're here, that means we need to extract the thumbnail and make a call to the actual jpg
# use BeautfilSoup to parse for the title and meta tag. We'll do this bit of danger in a
# try block and shrug if something bad happens
try:
soup = BeautifulSoup(response.body, convertEntities=BeautifulSoup.HTML_ENTITIES)
self.oembed_doc['title'] = soup.title.text
thumbnail = soup.find('meta', {"property": "og:image"})
if thumbnail:
self.oembed_doc['thumbnail_url'] = thumbnail.attrMap['content']
request = HTTPRequest(self.oembed_doc['thumbnail_url'], 'GET')
http = tornado.httpclient.AsyncHTTPClient()
http.fetch(request,self.on_thumbnail_response)
return
except:
pass
# either we failed to find a thumbnail url, or an exception was raised
# while attempting to fetch.
self.render("tools/save-video-error.html", message="We could not load the thumbnail for this file and therefore could not save this video. Please contact support.")
else:
# save the response
url = self.get_argument('url')
current_user = self.get_current_user_object()
sha1_key = Sourcefile.get_sha1_file_key(file_path=None, file_data=url)
thumbnail_path = "%s/%s" % (options.uploaded_files, sha1_key)
fh = open(thumbnail_path, 'wb')
fh.write(response.body)
fh.close()
source_file = Sourcefile.create_from_json_oembed(link=url, oembed_doc=self.oembed_doc, thumbnail_file_path=thumbnail_path)
#cleanup
if not options.debug:
try:
os.remove(thumbnail_path)
except:
pass
title = ''
if self.oembed_doc.has_key('title'):
title = self.oembed_doc['title']
shared_file = Sharedfile(user_id=current_user.id, name=url, content_type='text/html', source_id=source_file.id, title=title, source_url=url)
shared_file.save()
share_key = base36encode(shared_file.id)
shared_file.share_key = share_key
shared_file.save()
user_shake = Shake.get('user_id = %s and type=%s', current_user.id, 'user')
shared_file.add_to_shake(self.destination_shake)
if self.oembed_doc.has_key('description'):
shared_file.description = self.oembed_doc['description']
self.write({'path' : "/p/%s" % (share_key)})
self.finish()
@tornado.web.asynchronous
@tornado.web.authenticated
def post(self):
url = self.get_argument('url', None)
if not url:
self.render("tools/save-video.html", url = url, title = None, description=None)
url = Sourcefile.make_oembed_url(url.strip())
if url:
current_user = self.get_current_user_object();
shake_id = self.get_argument('shake_id', None)
if not shake_id:
self.destination_shake = Shake.get('user_id=%s and type=%s', current_user.id, 'user')
else:
self.destination_shake = Shake.get('id=%s', shake_id)
if not self.destination_shake:
return self.render("tools/save-video-error.html", message="We couldn't save the video to specified shake. Please contact support.")
if not self.destination_shake.can_update(current_user.id):
return self.render("tools/save-video-error.html", message="We couldn't save the video to specified shake. Please contact support.")
if current_user.email_confirmed != 1:
return self.render("tools/save-video-error.html", message="You must confirm your email address before you can post.")
self.handle_oembed_url(url)
else:
self.render("tools/save-video-error.html", message="We could not load the embed code. The video server may be down. Please contact support.")
class FindShakesGroups(BaseHandler):
"""
path: /tools/find-shakes
Returns a list of recommended group shakes.
"""
@tornado.web.authenticated
def get(self):
user = self.get_current_user_object()
categories = ShakeCategory.all('ORDER BY name')
users_sidebar = User.recommended_for_user(user)
featured_shakes = Shake.featured_shakes(3)
return self.render('tools/find-shakes.html', current_user_obj=user,
users_sidebar=users_sidebar, categories=categories,
featured_shakes=featured_shakes)
class FindShakesPeople(BaseHandler):
"""
path: /tools/find-shakes/people
Returns a list of recommended users.
"""
@tornado.web.authenticated
def get(self):
user = self.get_current_user_object()
users = User.random_recommended(limit=30)
users_sidebar = User.recommended_for_user(user)
return self.render('tools/find-shakes-people.html', current_user_obj=user, users=users, users_sidebar=users_sidebar)
class FindShakesTwitter(BaseHandler):
"""
path: /tools/find-shakes/twitter
A shell of a page that sets up the asynchronous request to fetch
twitter friends.
"""
@tornado.web.authenticated
def get(self):
user = self.get_current_user_object()
users_sidebar = User.recommended_for_user(user)
return self.render('tools/find-shakes-twitter.html', current_user_obj=user, users_sidebar=users_sidebar)
class FindShakesQuickFetchCategory(BaseHandler):
@tornado.web.authenticated
def get(self, name):
category = ShakeCategory.get("short_name = %s", name)
if not category:
raise tornado.web.HTTPError(404)
user = self.get_current_user_object()
shakes = Shake.for_category(category)
return self.render("tools/find-shakes-quick-fetch-category.html",
shakes=shakes, current_user_obj=user)
class FindShakesQuickFetchTwitter(BaseHandler):
"""
path: /tools/find-shakes/quick-fetch-twitter
This method gets called as an AJAX call from the /tools/find-shakes/twitter
page. If the user has no twitter account associated, will render
page with link to connect twitter account.
If user has twitter account connected and his friend graph
populated (ExternalRelationship), it will list friends.
If twitter account connected, but no friend graph, will call twitter
asynchronously, populate friend graph and show friends. Will also
call Twitter asynchronously if "refresh" arg is passed in.
"""
@tornado.web.asynchronous
@tornado.web.authenticated
def get(self):
refresh = self.get_argument('refresh', None)
self.user = self.get_current_user_object()
feather_client = lib.feathers.Feathers(key=options.twitter_consumer_key, secret=options.twitter_consumer_secret)
self.external_service = Externalservice.by_user(self.user, Externalservice.TWITTER)
if not self.external_service:
self.add_error('no_service', 'No Service.')
return self.render('tools/find-shakes-quick-fetch-twitter.html')
friends = self.external_service.find_mltshp_users()
if friends and not refresh:
return self.render('tools/find-shakes-quick-fetch-twitter.html', current_user_obj=self.user,\
friends=friends, externalservice=self.external_service)
params = {
'user_id' : self.external_service.service_id,
'cursor' : -1
}
feather_client.friends.ids.get(params=params,callback=self._add_friends, \
token_key=self.external_service.service_key, token_secret=self.external_service.service_secret)
def _add_friends(self, response):
# 503 - overloaded, 502 - down, 500 - broken.
if response.code == 503 or response.code == 502 or response.code == 500:
self.add_error('twitter_down', "Twitter down.")
return self.render('tools/find-shakes-quick-fetch-twitter.html')
# 400 - rate limit
if response.code == 400:
self.add_error('twitter_rate_limit', "You are over the Twitter rate limit.")
return self.render('tools/find-shakes-quick-fetch-twitter.html')
json_response = json.loads(response.body)
for service_id in json_response['ids']:
ExternalRelationship.add_relationship(self.user, service_id, ExternalRelationship.TWITTER)
friends = self.external_service.find_mltshp_users()
if not friends:
self.add_error('no_friends', "No friends.")
return self.render('tools/find-shakes-quick-fetch-twitter.html')
return self.render('tools/find-shakes-quick-fetch-twitter.html', current_user_obj=self.user, \
friends=friends, externalservice=self.external_service)
|
[
"brad@bradchoate.com"
] |
brad@bradchoate.com
|
85f4fa6516e46e1b52ec1a2be786f56b046039ea
|
52d8e56620497a8780de1888afc45cfbe5af3548
|
/secuenciales/ejercicio6.py
|
7fb04f40290c33255b96bf00dc329a26c87126b5
|
[] |
no_license
|
Dwensdc/Upeu_practice1_FDP
|
649c8c9a06146003fe2febc8ced1533f7724a00e
|
d9fc58eb80b643af08f74ef74be9383ca4bd1799
|
refs/heads/main
| 2023-04-16T19:03:07.919262
| 2021-05-09T23:16:31
| 2021-05-09T23:16:31
| 357,931,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
print("======================================================================")
print("CALCULAR ")
print("======================================================================")
|
[
""
] | |
3c02048a3bf779942c02996c3dea76cb18814ae5
|
78edd838985658cc1e1abb9dd482e937652526fe
|
/reverse.py
|
7522f642a26e99c3b01847a60e1f4518600517f1
|
[] |
no_license
|
Tommyhu28/Epi-school2
|
a576f00bb21708ea4a48215d600c1a9b38789f4a
|
21a0d3b9dbc01a59b1993875524ca9401b03e2c6
|
refs/heads/master
| 2023-06-01T09:32:04.984032
| 2021-06-21T21:15:21
| 2021-06-21T21:15:21
| 379,064,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
def my_reverse(items):
size = len(items)
if (size < 1): return None
my_list = []
count = 1
while (count <= size):
my_list.append(items[-count])
count += 1
return print(my_list)
# s_list = [12,11,10,9,8]
# my_reverse(s_list)
|
[
"zonghonghu@gmail.com"
] |
zonghonghu@gmail.com
|
4df9e1ecab90826d4911cafc525a27c48d0ed37d
|
e8ab0029ebb965fc0479134b6faa5e6ebc790083
|
/6_correlation.py
|
fe4bb91bdc53a133c80b2a001a085e5b760a62b6
|
[] |
no_license
|
shenglih/fluency
|
0182411a0b867dd01c8e6f481dddad8975d0d3b7
|
2d38ed05703ee1237cd23fa1ab53fa5aa91557ac
|
refs/heads/master
| 2021-09-16T12:45:20.579437
| 2018-06-20T19:18:03
| 2018-06-20T19:18:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,739
|
py
|
#!/usr/sheng/env python
print "system argument 1: which feature file to use, no need to include .npy"
print "system argument 2: take log of memscore or not: 0, don't; 1, do."
print "system argument 3: take log of merged['mean'] or not: 0, don't; 1, do"
print "system argument 4: take log of popscore or not: 0, don't; 1, do."
print "system argument 5: take log of entropy/kl or not: 0, don't; 1, do."
import os
from PIL import Image
import numpy as np
import sys
import pandas as pd
import codecs
import statsmodels.api as sm
import scipy.stats
from scipy.stats import entropy
from statsmodels.iolib.summary2 import summary_col
from itertools import izip
import pickle
#folder = sys.argv[1]
#img_color = Image.open(image_file)
#img_grey = img_color.convert('L')
#img_color = np.array(img_color)
#img_grey = np.array(img_grey)
input_dir = "/mnt/saswork/sh2264/vision/data/"
#input_dir = "/Users/sheng/image"
directories = os.listdir(input_dir)
index = 0
#index2 = 0
for folder in directories:
if folder == '.DS_Store':
pass
else:
print "folder "+folder
#images = os.listdir(input_dir + '/' + folder)
os.chdir(input_dir + '/' + folder)
index += 1
try:
entropies_name = [x[:-5] for x in np.load("entropy_name.npy")] # remove .jpeg
except IOError:
continue
entropies = np.load("entropy.npy")
entropy_mean = [x.mean() for x in entropies]
entropy_std = [x.std() for x in entropies]
imagenet = np.load("imagenet_"+folder+".npy")
imagenet_name = np.load("imagenet_index_"+folder+".npy")
merged_imagenet = pd.DataFrame({'imagenet':[scipy.stats.entropy(x) for x in imagenet],'name':[x[0][:-5] for x in imagenet_name]})
memscore = []
memname = []
ms = codecs.open(input_dir+folder+"/memscore.txt",encoding = "utf-8")
for line in ms:
x = line.split("\t")
try:
memscore.append(float(x[1].split("\n")[0]))
memname.append(x[0])
except ValueError:
pass
mem = pd.DataFrame({'name':memname, 'mscore':memscore})
popscore = []
popname = []
pop = codecs.open(input_dir+folder+"/popscore.txt",encoding = "utf-8")
for line in pop:
x = line.split("\t")
try:
popscore.append(float(x[1].split("\n")[0]))
popname.append(x[0])
except ValueError:
pass
adjusted_popscore = [x if x>0 else 0.016587944219523178 for x in popscore]
pop = pd.DataFrame({'name':popname, 'pscore':adjusted_popscore})
entropy = pd.DataFrame({'name': entropies_name, 'mean':entropy_mean, 'std':entropy_std})
if index == 1:
merged = pd.merge(pd.merge(pd.merge(pop, entropy, how="inner", on="name"),mem, how="inner",on="name"), merged_imagenet, how = "inner", on = "name")
else:
merged = merged.append(pd.merge(pd.merge(pd.merge(pop, entropy, how="inner", on="name"),mem, how="inner",on="name"), merged_imagenet, how = "inner", on = "name"))
# merged.keys(): name, mscore, pscore, mean, std
os.chdir("/mnt/saswork/sh2264/vision/code/")
ca = np.load("category_crosswalk.npy")
co = np.load("country_crosswalk.npy")
category = pd.DataFrame({'name':[x[1][:-5] for x in ca],'category':[x[0] for x in ca]})
country = pd.DataFrame({'name':[x[1][:-5] for x in co],'country':[x[0] for x in co]})
co_ca = pd.merge(category,country,how="inner", on="name")
merged = pd.merge(merged, co_ca, how="inner", on="name")
merged0 = merged
features = np.load(sys.argv[1]+".npy")
print features.shape
y_train = np.load("y_train_processed.npy")
print y_train.shape
features_name = np.load("X_train_name_processed.npy")
print features_name.shape
print "the above three should have the same first dimension..."
## temp when results for only 118576 instances are available:
#features0 = features
#features = features[:118576]
#y_train0 = y_train
#y_train = y_train[:118576]
#features_name0=features_name
#features_name = features_name[:118576]
## end temp
features_entropy = [scipy.stats.entropy(x) for x in features]
# kullback leibler divergence of x,y --- scipy.stats.entropy(x,y): x is truth, y is candidate
features_kl = [scipy.stats.entropy(x,y) for x,y in izip(y_train,features)]
# take the log:
if sys.argv[5] == "1":
features_entropy = [np.log(x) for x in features_entropy]
features_kl = [np.log(x) for x in features_kl]
merged_features = pd.DataFrame({'entropy':features_entropy,'kl':features_kl,'name':[x[:-5] for x in features_name]})
merged = pd.merge(merged0, merged_features, how="inner", on="name")
memoscore = merged['mscore']
memoscore_ln = np.log(memoscore)
memoname = merged['name']
popscore = merged['pscore']
# normalize popscore onto [0,1]
popscore_max = popscore.max()
popscore_min = popscore.min()
popscore = [float(x - popscore_min)/(popscore_max-popscore_min) for x in popscore]
popscore_ln = np.log(popscore)
popname = merged['name']
del merged['pscore']
del merged['name']
del merged['mscore']
if sys.argv[3] == "1":
logmean = np.log(merged['mean'])
del merged['mean']
merged['mean'] = logmean
elif sys.argv[3] == "0":
pass
# dummy variables for category and country
dummies = pd.concat([pd.get_dummies(merged['category']).reset_index(drop=True), pd.get_dummies(merged['country'])], axis=1)
category_index = merged['category']
del merged['category']
country_index = merged['country']
del merged['country']
merged = pd.concat([merged.reset_index(drop=True), dummies], axis = 1)
print "checkpoint 1"
# squared terms
### model0
print "model0: no squared, no interaction"
if sys.argv[2] == "1":
#modelm = sm.OLS(np.array(memoscore_ln), np.array(sm.add_constant(merged)))
modelm0 = sm.OLS(np.array(memoscore_ln), np.array(sm.add_constant(merged)), missing = "drop")
elif sys.argv[2] == "0":
#modelm = sm.OLS(np.array(memoscore), np.array(sm.add_constant(merged)))
modelm0 = sm.OLS(np.array(memoscore), np.array(sm.add_constant(merged)), missing = "drop")
print "checkpoint 3"
if sys.argv[4] == "1":
modelp0 = sm.OLS(np.array(popscore_ln), np.array(sm.add_constant(merged)), missing = "drop")
#modelp = sm.OLS(np.array(memoscore_ln), np.array(sm.add_constant(merged)))
elif sys.argv[4] == "0":
modelp0 = sm.OLS(np.array(popscore), np.array(sm.add_constant(merged)), missing = "drop")
#modelp = sm.OLS(np.array(memoscore), np.array(sm.add_constant(merged)))
# model1: squared
merged['meansq'] = pd.DataFrame({'meansq':[x**2 for x in merged['mean']]})
merged['stdsq'] = pd.DataFrame({'stdsq':[x**2 for x in merged['std']]})
merged['entropysq'] = pd.DataFrame({'entropysq':[x**2 for x in merged['entropy']]})
merged['imagenetsq'] = pd.DataFrame({'imagenetsq':[x**2 for x in merged['imagenet']]})
merged['klsq'] = pd.DataFrame({'klsq':[x**2 for x in merged['kl']]})
print "model1: squared"
if sys.argv[2] == "1":
#modelm = sm.OLS(np.array(memoscore_ln), np.array(sm.add_constant(merged)))
modelm1 = sm.OLS(np.array(memoscore_ln), np.array(sm.add_constant(merged)), missing = "drop")
elif sys.argv[2] == "0":
#modelm = sm.OLS(np.array(memoscore), np.array(sm.add_constant(merged)))
modelm1 = sm.OLS(np.array(memoscore), np.array(sm.add_constant(merged)), missing = "drop")
print "checkpoint 3"
if sys.argv[4] == "1":
modelp1 = sm.OLS(np.array(popscore_ln), np.array(sm.add_constant(merged)), missing = "drop")
#modelp = sm.OLS(np.array(memoscore_ln), np.array(sm.add_constant(merged)))
elif sys.argv[4] == "0":
modelp1 = sm.OLS(np.array(popscore), np.array(sm.add_constant(merged)), missing = "drop")
#modelp = sm.OLS(np.array(memoscore), np.array(sm.add_constant(merged)))
# interaction terms!
# complexity * content ambiguity
merged['mean-entropy'] = pd.DataFrame({'mean-entropy':[x*y for x,y in izip(merged['mean'],merged['entropy'])]})
# complexity * content ambiguity (relative)
merged['mean-kl'] = pd.DataFrame({'mean-kl':[x*y for x,y in izip(merged['mean'],merged['kl'])]})
# imagenet entropy * content ambiguity
merged['imagenet-entropy'] = pd.DataFrame({'imagenet-entropy':[x*y for x,y in izip(merged['imagenet'],merged['entropy'])]})
# imagenet entropy * content ambiguity (relative)
merged['imagenet-kl'] = pd.DataFrame({'imagenet-kl':[x*y for x,y in izip(merged['imagenet'],merged['kl'])]})
# imagenet entropy * complexity * content ambiguity
merged['imagenet-mean-entropy'] = pd.DataFrame({'imagenet-mean-entropy':[x*y*z for x,y,z in izip(merged['imagenet'], merged['mean'], merged['entropy'])]})
# imagenet entropy * complexity * content ambiguity (relative)
merged['imagenet-mean-kl'] = pd.DataFrame({'imagenet-mean-kl':[x*y*z for x,y,z in izip(merged['imagenet'], merged['mean'], merged['kl'])]})
merged['std-entropy'] = pd.DataFrame({'std-entropy':[x*y for x,y in izip(merged['std'],merged['entropy'])]})
merged['std-kl'] = pd.DataFrame({'std-kl':[x*y for x,y in izip(merged['std'],merged['kl'])]})
print "model2: squared, interactions"
if sys.argv[2] == "1":
#modelm = sm.OLS(np.array(memoscore_ln), np.array(sm.add_constant(merged)))
modelm2 = sm.OLS(np.array(memoscore_ln), np.array(sm.add_constant(merged)), missing = "drop")
elif sys.argv[2] == "0":
#modelm = sm.OLS(np.array(memoscore), np.array(sm.add_constant(merged)))
modelm2 = sm.OLS(np.array(memoscore), np.array(sm.add_constant(merged)), missing = "drop")
print "checkpoint 3"
if sys.argv[4] == "1":
modelp2 = sm.OLS(np.array(popscore_ln), np.array(sm.add_constant(merged)), missing = "drop")
#modelp = sm.OLS(np.array(memoscore_ln), np.array(sm.add_constant(merged)))
elif sys.argv[4] == "0":
modelp2 = sm.OLS(np.array(popscore), np.array(sm.add_constant(merged)), missing = "drop")
#modelp = sm.OLS(np.array(memoscore), np.array(sm.add_constant(merged)))
print "checkpoint 3"
resultsm0 = modelm0.fit()
resultsp0 = modelp0.fit()
resultsm1 = modelm1.fit()
resultsp1 = modelp1.fit()
resultsm2 = modelm2.fit()
resultsp2 = modelp2.fit()
print "memorability quadratic model results"
print(resultsm2.summary())
print "popularity quadratic model results"
print(resultsp2.summary())
os.chdir("/mnt/saswork/sh2264/vision/results/")
pickle.dump(resultsm2, open('results_imagenet_'+sys.argv[1]+'_'+sys.argv[2]+'_'+sys.argv[3]+'_'+sys.argv[4]+'_'+ sys.argv[5] +'.pkl','wb'))
pickle.dump(modelm2, open('model_imagenet_'+sys.argv[1]+'_'+sys.argv[2]+'_'+sys.argv[3]+'_'+sys.argv[4]+'_'+ sys.argv[5] +'.pkl','wb'))
pickle.dump(modelp2, open('modelsq_imagenet_'+sys.argv[1]+'_'+sys.argv[2]+'_'+sys.argv[3]+'_'+sys.argv[4]+'_'+ sys.argv[5] +'.pkl','wb'))
pickle.dump(resultsp2, open('resultssq_imagenet_'+sys.argv[1]+'_'+sys.argv[2]+'_'+sys.argv[3]+'_'+sys.argv[4]+'_'+ sys.argv[5] +'.pkl','wb'))
# write to latex file
print "checkpoint 4: latex file!"
text_file = open('texm_imagenet_'+sys.argv[1]+'_'+sys.argv[2]+'_'+sys.argv[3]+'_'+sys.argv[4]+'_'+ sys.argv[5] +'.txt', 'w')
text_file.write(summary_col([resultsm0,resultsm1,resultsm2], stars=True, float_format='%0.4f').as_latex())
text_file.close()
text_file = open('texp_imagenet_'+sys.argv[1]+'_'+sys.argv[2]+'_'+sys.argv[3]+'_'+sys.argv[4]+'_'+ sys.argv[5] +'.txt', 'w')
text_file.write(summary_col([resultsp0,resultsp1,resultsp2], stars=True, float_format='%0.4f').as_latex())
text_file.close()
text_file = open('tex_imagenet_'+sys.argv[1]+'_'+sys.argv[2]+'_'+sys.argv[3]+'_'+sys.argv[4]+'_'+ sys.argv[5] +'.txt', 'w')
text_file.write(summary_col([resultsm2,resultsp2], stars=True, float_format='%0.4f').as_latex())
text_file.close()
# as a memo: merged.keys()
# variable names: intercept, mean, std, imagenet, category, country, entropy,
# kl, meansq, stdsq, entropysq, imagenetsq, klsq, mean-entropy, mean-kl,
# imagenet-entropy, imagenet-kl, imagenet-mean-entropy, imagenet-mean-kl
# std-entropy, std-kl
|
[
"noreply@github.com"
] |
shenglih.noreply@github.com
|
ead2d26459b946ec911a3765ca81dc830819ffb2
|
72805a17297659bf290083e01ac1579846f264f1
|
/Virtual Dice roller/Dice_roller.py
|
9b7fba5d883a38532152bdee73bee0bcb7000851
|
[] |
no_license
|
Viresh-nalwa/hacktoberfest_2021
|
b502016b8a32a4b66de27da119547424a5f86f20
|
7d9eb5f7276dfd6e0e80a1732c315b10a02a4c2b
|
refs/heads/main
| 2023-08-30T02:20:07.512512
| 2021-10-02T12:41:11
| 2021-10-02T12:41:11
| 412,615,808
| 0
| 0
| null | 2021-10-01T21:01:10
| 2021-10-01T21:01:09
| null |
UTF-8
|
Python
| false
| false
| 1,672
|
py
|
import tkinter
from PIL import Image, ImageTk
import random
# toplevel widget of Tk which represents mostly the main window of an application
root = tkinter.Tk()
root.geometry('700x500')
root.title('Virtual Dice Roller')
root["bg"] = "#296c92"
dice = ['dice1.png', 'dice2.png', 'dice3.png', 'dice4.png', 'dice5.png', 'dice6.png']
image1 = ImageTk.PhotoImage(Image.open(random.choice(dice)),master=root)
image2 = ImageTk.PhotoImage(Image.open(random.choice(dice)),master=root)
# construct a label widget for each image
label1 = tkinter.Label(root, image=image1)
label2 = tkinter.Label(root, image=image2)
# keep a reference, if needed
label1.image = image1
label2.image = image2
# pack a widget in the parent widget with placement
label1.place(x=50, y=100)
label2.place(x=400, y=100)
# function activated by button
def roll_dice():
image1 = ImageTk.PhotoImage(Image.open(random.choice(dice)),master=root)
# update image
label1.configure(image=image1)
# keep a reference
label1.image = image1
image2 = ImageTk.PhotoImage(Image.open(random.choice(dice)),master=root)
# update image
label2.configure(image=image2)
# keep a reference
label2.image = image2
# button
# command will use roll_dice function
w = tkinter.Label(root, text="This is Virtual Dice Roller",bg="#296c92",fg="#3eb489",font=('Helvetica', 25))
w.pack()
button = tkinter.Button(root, text='Roll the dice',width=10,height=2,bg="#3eb489", foreground='#296c92', command=roll_dice,font=('Helvetica', 15))
button.place(x=300,y=420)
# pack a widget in the parent widget
#button.pack(side=tkinter.BOTTOM)
# call the mainloop of Tk
# keeps window open
root.mainloop()
|
[
"varuneshnalwa2002@gmail.com"
] |
varuneshnalwa2002@gmail.com
|
6bc6a2fd982a58318b5ceeb28018bff8b8151a24
|
e69bda77652222a71dd0f9a7cf73fedf8c6b0d1d
|
/python/args.py
|
7e02f3f4b3024d24a3068f32077901f09701920e
|
[] |
no_license
|
wajustinzhang/projects
|
acbfc8a65f4c4b1e77818d1e5987841c158fd34c
|
19e11d3a1440105bb9a0cb3ebcd28ba0a06a0a9d
|
refs/heads/master
| 2020-04-05T13:37:10.248424
| 2017-07-20T01:42:00
| 2017-07-20T01:42:00
| 94,920,320
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
import sys
# read args
print(sys.argv)
# string format
print('test {}'.format('python'))
# string join
print('test'.join('-----'))
# reverse a string
'hello world'[::-1]
oldList = [1,3,4]
# shallow copy
newList = oldList[:]
def foo(x='thisis'):
pass
def foo(x, *y, **z):
pass
def foo(x, y='test'):
pass
def foo(x, l=[]):
pass
def concat(*args, sep="/"):
return sep.join(args)
def lbd(n):
return lambda x:x**n
# List, tuple, set, dict, sequence
squares = list(map(lambda x: x**2, range(10)))
|
[
"justinwazhang@gmail.com"
] |
justinwazhang@gmail.com
|
5b08782e2dbfa386aa516497f8b080c229e7762a
|
492b1144c72a393f944e341176f9e42796c7598b
|
/ceilometer/objectstore/swift_middleware.py
|
6bc77e95c8a2bf53213b3bd00ba5cabf1e165abc
|
[
"Apache-2.0"
] |
permissive
|
lexxito/bachelors_thesis
|
09913eb8357e2d8439f718da63bea0ebb358b86b
|
bec8dfb8d3610331c7ae5ec543e0b8da0948c164
|
refs/heads/master
| 2022-01-22T23:52:27.870965
| 2019-05-05T11:04:48
| 2019-05-05T11:04:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,611
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 eNovance <licensing@enovance.com>
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Ceilometer Middleware for Swift Proxy
Configuration:
In /etc/swift/proxy-server.conf on the main pipeline add "ceilometer" just
before "proxy-server" and add the following filter in the file:
[filter:ceilometer]
use = egg:ceilometer#swift
# Some optional configuration
# this allow to publish additional metadata
metadata_headers = X-TEST
# Set reseller prefix (defaults to "AUTH_" if not set)
reseller_prefix = AUTH_
"""
from __future__ import absolute_import
from swift.common import utils
import webob
REQUEST = webob
try:
# Swift >= 1.7.5
import swift.common.swob
REQUEST = swift.common.swob
except ImportError:
pass
try:
# Swift > 1.7.5 ... module exists but doesn't contain class.
from swift.common.utils import InputProxy
except ImportError:
# Swift <= 1.7.5 ... module exists and has class.
from swift.common.middleware.proxy_logging import InputProxy
from ceilometer.openstack.common import context
from ceilometer.openstack.common import timeutils
from ceilometer import pipeline
from ceilometer import sample
from ceilometer import service
from ceilometer import transformer
class CeilometerMiddleware(object):
"""Ceilometer middleware used for counting requests."""
def __init__(self, app, conf):
self.app = app
self.logger = utils.get_logger(conf, log_route='ceilometer')
self.metadata_headers = [h.strip().replace('-', '_').lower()
for h in conf.get(
"metadata_headers",
"").split(",") if h.strip()]
service.prepare_service([])
self.pipeline_manager = pipeline.setup_pipeline(
transformer.TransformerExtensionManager(
'ceilometer.transformer',
),
)
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
self.reseller_prefix += '_'
def __call__(self, env, start_response):
start_response_args = [None]
input_proxy = InputProxy(env['wsgi.input'])
env['wsgi.input'] = input_proxy
def my_start_response(status, headers, exc_info=None):
start_response_args[0] = (status, list(headers), exc_info)
def iter_response(iterable):
if start_response_args[0]:
start_response(*start_response_args[0])
bytes_sent = 0
try:
for chunk in iterable:
if chunk:
bytes_sent += len(chunk)
yield chunk
finally:
try:
self.publish_sample(env,
input_proxy.bytes_received,
bytes_sent)
except Exception:
self.logger.exception('Failed to publish samples')
try:
iterable = self.app(env, my_start_response)
except Exception:
self.publish_sample(env, input_proxy.bytes_received, 0)
raise
else:
return iter_response(iterable)
def publish_sample(self, env, bytes_received, bytes_sent):
req = REQUEST.Request(env)
try:
version, account, container, obj = utils.split_path(req.path, 2,
4, True)
except ValueError:
return
now = timeutils.utcnow().isoformat()
resource_metadata = {
"path": req.path,
"version": version,
"container": container,
"object": obj,
}
for header in self.metadata_headers:
if header.upper() in req.headers:
resource_metadata['http_header_%s' % header] = req.headers.get(
header.upper())
with self.pipeline_manager.publisher(
context.get_admin_context()) as publisher:
if bytes_received:
publisher([sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
unit='B',
volume=bytes_received,
user_id=env.get('HTTP_X_USER_ID'),
project_id=env.get('HTTP_X_TENANT_ID'),
resource_id=account.partition(self.reseller_prefix)[2],
timestamp=now,
resource_metadata=resource_metadata)])
if bytes_sent:
publisher([sample.Sample(
name='storage.objects.outgoing.bytes',
type=sample.TYPE_DELTA,
unit='B',
volume=bytes_sent,
user_id=env.get('HTTP_X_USER_ID'),
project_id=env.get('HTTP_X_TENANT_ID'),
resource_id=account.partition(self.reseller_prefix)[2],
timestamp=now,
resource_metadata=resource_metadata)])
# publish the event for each request
# request method will be recorded in the metadata
resource_metadata['method'] = req.method.lower()
publisher([sample.Sample(
name='storage.api.request',
type=sample.TYPE_DELTA,
unit='request',
volume=1,
user_id=env.get('HTTP_X_USER_ID'),
project_id=env.get('HTTP_X_TENANT_ID'),
resource_id=account.partition(self.reseller_prefix)[2],
timestamp=now,
resource_metadata=resource_metadata)])
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def ceilometer_filter(app):
return CeilometerMiddleware(app, conf)
return ceilometer_filter
|
[
"you@example.com"
] |
you@example.com
|
66c68a488457c61a4901c1167a03a57947674085
|
84e7c65cf71e52ee8bda64ea59026961f1d2bf12
|
/examples/demo_dag.py
|
e9fd77714f041959e8668b34794f62e54eaaace0
|
[
"Apache-2.0"
] |
permissive
|
whugoh/marquez-airflow
|
a2d981562d14beecd417f036e21d7112a81a67d1
|
003a46507c2e1d97072d4a4c9e733374e04116a1
|
refs/heads/master
| 2020-04-27T09:46:54.560307
| 2019-02-21T18:07:57
| 2019-02-21T18:07:57
| 174,228,584
| 0
| 0
| null | 2019-03-06T22:07:22
| 2019-03-06T22:07:21
| null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
from marquez.airflow import MarquezDag as DAG
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime
DAG_NAME = 'test_dag_v2'
default_args = {
'mqz_namespace': 'demo',
'mqz_location': 'github://my_dag_location',
'mqz_input_datasets': ["s3://great_data", "s3://not_so_good_data"],
'mqz_output_datasets': ["s3://amazing_data"],
'owner': 'some dag developer',
'depends_on_past': False,
'start_date': datetime(2019, 1, 31),
}
dag = DAG(DAG_NAME, schedule_interval='*/10 * * * *',
default_args=default_args, description="My awesome DAG")
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag)
run_this_2.set_upstream(run_this_1)
|
[
"rodrigo.araya@gmail.com"
] |
rodrigo.araya@gmail.com
|
8c98d8ffb72a318856ececa30e636d03426c350b
|
2342616ec6a52911116e75253e119c6e72d6cb7f
|
/WholeBrain/Utils/permutation_htest2_np.py
|
ebecacc87bf64b8192720d6ba016a0f29b282ec1
|
[] |
no_license
|
dagush/WholeBrain
|
40e435c874571429c3353de95b2c27e2ae6b5fd5
|
747ee61a5278fba39888edb31ae6ce69c41bf64d
|
refs/heads/master
| 2023-08-31T04:06:19.608429
| 2023-08-28T20:40:49
| 2023-08-28T20:40:49
| 188,561,840
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,899
|
py
|
# --------------------------------------------------------------------------------------
# PERMUTATION_HTEST2_NP - A "non-parametric" two-sample hypotesis test that, instead of
# relying on the test-type standard distribution, uses permutations of group labels to
# estimate the null distribution. The null distribution is computed
# independently for each data point (= row), i.e. we do not assume the same
# distribution for each datapoint. However, we do assume that the data
# points are comparable (e.g. they correspond to the same location
# collected across all subjects)
#
# Henrique Fernandes 2014
# Adapted from: Enrico Glerean 2013
# Translated to Python by Gustavo Patow 2023
#
# --------------------------------------------------------------------------------------
import numpy as np
from scipy import stats
def permutation_htest2_np(data1, data2, niter, htest='ttest2'):
# USAGE:
# stats = bramila_ttest2_np(data,design,niter)
# INPUT:
# data1,2 - a matrix where each column is a subject and each row is a
# data-point for example a voxel intensity in fMRI, a node level
# value in a network, etc. NaN values will be ignored.
# niter - number of permutations (recommended 5000)
# htest - hypothesis test used to compare populations. The script is
# prepared to run the ttest2, kstest2, and ranksum tests.
#
# OUTPUT:
# result is a dict with the following subfields:
# pvals - p-values for each datapoint; it returns in order the p-values
# for the right tail and for the left tail
# tvals - test statistic values for datapoint, positive tvals mean
# group 1 > group 2
#
# Notes: the null distribution is estimated using the matlab function
# ksdensity by interpolating the permuted data. The distribution is
# estimated over 200 points if niter<=5000, otherwise it is estimated over
# round(200*niter/5000) points, for greater precision.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# INPUT VALIDATION
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Nsubj = size(data, 2) # number of subjects
# if (size(design, 2) != Nsubj):
# raise Exception('Mismatched number of subjects: the number of columns of data variable should match the number of columns of the design variable.')
# if (size(design, 1) != 1):
# raise Exception('The design variable should only contain 1 row')
#
# g1 = find(design == 1)
# g2 = find(design == 2)
# if ((length(g1) + length(g2)) != Nsubj):
# raise Exception('The design variable should only contain numbers 1 and 2.')
if niter <= 0:
print('The variable niter should be a positive integer, function will continue assuming niter=5000.')
niter = 5000
# % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
# HYPOTHESIS TESTING(for each row / area)
# % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
# stats.tvals = tt_np(data, g1, g2); % similar to ttest2
result = {}
NC = 1 if data1.ndim == 1 else data1.shape[1] # number of comparisons
tvals = np.zeros(NC)
diffs = np.zeros(NC)
# means = np.zeros(NC)
# if htest == 'ttest':
# # - the population means are not equal. (alternative hypothesis)
# # - the two groups are derived from normal distributions with unknown and unequal variances.
# for t = 1:NC:
# [H, P, CI, STATS] = ttest(data(t, g1)',data(t,g2)', pthr, 'both')
# tvals(t,:) = STATS.tstat
# diffs(t,:) = mean(data(t, g1)) - mean(data(t, g2))
if htest == 'ttest2':
# - the population means are not equal. (alternative hypothesis)
# - the two groups are derived from normal distributions with unknown and unequal variances.
if NC == 1:
statstt = stats.ttest_ind(data1, data2, equal_var=False, alternative='two-sided')
tvals[0] = statstt.statistic
diffs[0] = np.mean(data1) - np.mean(data2)
else:
for t in range(NC):
statstt = stats.ttest_ind(data1[t,:],data2[t,:], equal_var=False, alternative='two-sided')
tvals[t] = statstt.statistic
diffs[t] = np.mean(data1) - np.mean(data2)
# case 'kstest'
# for t=1:NC
# [H,P,STATS]=kstest2(data(t,g1)',data(t,g2)',pthr);
# tvals(t,:)=STATS;
# diffs(t,:) = mean(data(t,g1))-mean(data(t,g2));
# end
# case 'ranksum'
# for t=1:NC
# [P,H,STATS]=ranksum(data(t,g1)',data(t,g2)','alpha',pthr);
# tvals(t,:)=STATS.zval;
# diffs(t,:) = mean(data(t,g1))-mean(data(t,g2));
# end
else:
raise Exception('\n-------------------------------\n\nHypothesis test %s not recognized. \n\n-------------------------------\n',htest)
result['tvals'] = tvals
# % tvals(isnan(tvals)) = 0; % or tvals(tvals ~ = tvals) = 0
result['diffs'] = diffs
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % PERMUTATION TESTING (for each row/area)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % outptus the pval (from the computed null distribution using permutation
# % testing) given the tstat previously calculated.
# % each comparison is treated independently
pvals = np.zeros((NC,2))
for n in range(NC):
if np.median(data1) != 0 or np.median(data2) != 0: # Exclude tests where all (tstat=NaN) or most of the population (median=0) as a null value.
pvals[n] = test_np_pval(data1,data2,niter,tvals[n])
else:
pvals[n] = [np.NaN, np.NaN]
result['pvals'] = pvals
return result
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# NESTED FUNCTIONS
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def test_np_pval(data1,data2,niter,tval):
alldata = np.concatenate([data1, data2])
a_n = data1.shape[0]
outiter=np.zeros(niter)
a_n=np.size(data1)
for iter in range(niter):
np.random.shuffle(alldata)
# one could add a test to see that they are indeed permuted
temp1 = alldata[:a_n]
temp2 = alldata[a_n:]
statsRes = stats.ttest_ind(temp1, temp2, equal_var=False, alternative='two-sided')
outiter[iter] = statsRes.statistic
NCDF = 200
if niter > 5000:
NCDF = np.int(np.round(200.*niter/5000))
# estimated cumulative distribution function
# [fi xi]=ksdensity(outiter,'function','cdf','npoints',NCDF)
kde = stats.gaussian_kde(outiter)
xi = np.linspace(outiter.min(), outiter.max(), NCDF)
fi = kde(xi)
# trick to avoid NaNs, we approximate the domain of the CDF between
# -Inf and Inf using the atanh function and the eps matlab precision variable
eps = np.spacing(1)
pval_left = np.interp(tval,
np.concatenate([[np.arctanh(-1+eps)], xi, [np.arctanh(1-eps)]]),
np.concatenate([[0], fi, [1]])) # G1 > G2
pval_right = 1 - pval_left # G1 < G2
pval = [pval_right, pval_left]
return pval
# ================================================================================================================
# ================================================================================================================
# ================================================================================================================EOF
|
[
"31112588+dagush@users.noreply.github.com"
] |
31112588+dagush@users.noreply.github.com
|
c14d44c7e4d61d0ee286a68e6aa4a3540fb69fdc
|
48871bea340c97091e75ab041101cebb860c603c
|
/freddie/db/fields.py
|
3cbdb237f76f24c78cc389f04579076874aac200
|
[
"MIT"
] |
permissive
|
Keegan-y/freddie
|
9826f3706315a8c7124fd7ae750804627481497b
|
7fb812a66650d07612b4055509fac25d009acc68
|
refs/heads/master
| 2023-01-31T06:51:40.055938
| 2020-12-16T08:29:00
| 2020-12-16T08:29:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,971
|
py
|
from typing import Any, Iterable, Type, Union
from peewee import (
Expression,
Field as DBField,
FieldAccessor,
ForeignKeyField,
MetaField,
Model,
Query,
)
class ManyToManyAccessor(FieldAccessor):
field: 'ManyToManyField'
def __get__(
self, instance: Model, instance_type: Type[Model] = None
) -> Union[list, 'ManyToManyField']:
if instance is not None:
return instance.__data__.get(self.name, []) # type: ignore
return self.field
class ManyToManyField(MetaField):
accessor_class = ManyToManyAccessor
model: 'ModelType'
rel_model: 'ModelType'
through_model_name: str
through_model: 'ModelType'
def __init__(self, rel_model: 'ModelType', through_model_name: str, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.rel_model = rel_model
self.through_model_name = through_model_name
def __call__(self, pk: Any) -> 'QueryBuilder':
return QueryBuilder(pk, self)
@property
def model_name(self) -> str:
return self.model.__name__.lower()
@property
def rel_model_name(self) -> str:
return self.rel_model.__name__.lower()
@property
def rel_model_keys(self) -> Iterable[str]:
return tuple(self.rel_model._meta.fields.keys())
@property
def rel_model_pk(self) -> DBField:
return self.rel_model._meta.primary_key
@property
def model_fk(self) -> ForeignKeyField:
return getattr(self.through_model, self.model_name)
@property
def rel_model_fk(self) -> ForeignKeyField:
return getattr(self.through_model, self.rel_model_name)
class QueryBuilder:
pk: Any
field: ManyToManyField
name: str
__slots__ = ('pk', 'field', 'name')
def __init__(self, pk: Any, field: ManyToManyField):
super().__init__()
self.pk = pk
self.field = field
def get(
self, fields: Iterable[DBField] = None, conditions: Iterable[Expression] = None,
) -> Query:
related_objects_pks = self.field.through_model.select(self.field.rel_model_fk).where(
self.field.model_fk == self.pk
)
rel_model_fields = fields if fields else (self.field.rel_model,)
query = self.field.rel_model.select(*rel_model_fields).where(
self.field.rel_model_pk << related_objects_pks, *(conditions or ())
)
return query
def add(self, *related_model_ids: Any) -> Query:
if not related_model_ids:
raise ValueError('No objects IDs passed for many-to-many relation')
data = [
{self.field.rel_model_name: related_id, self.field.model_name: self.pk}
for related_id in related_model_ids
]
return self.field.through_model.insert_many(data)
def clear(self) -> Query:
return self.field.through_model.delete().where(self.field.model_fk == self.pk)
ModelType = Type[Model]
|
[
"a.gabdullin@tinkoff.ru"
] |
a.gabdullin@tinkoff.ru
|
0871ad25a47fbfb9af1d7c9305e12f557acba868
|
45e37cfce061f5c023af2d95611246991af79445
|
/main.py
|
3f51fec79de41270bb8ba8ffe22c805642319014
|
[] |
no_license
|
SYFT/5184Spider
|
ee075f1243e56935b529177f1350a1e335a73fba
|
d943bdceaed83f0fcc04206425094a7a123075dc
|
refs/heads/master
| 2021-01-20T18:47:54.538110
| 2018-08-11T13:04:41
| 2018-08-11T13:04:41
| 62,187,793
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,739
|
py
|
# -*- coding: utf-8 -*-
# package import
import urllib, urllib2, cookielib
import pytesseract
from PIL import Image
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
# my pyfiles
from config import *
import denoise
# functions
def getCheckcode() :
"""
Use 'checkcode.jpg'
return the string it contains
"""
img = Image.open(checkcode_file)
img.load()
img = denoise.denoise(img)
tmp = pytesseract.image_to_string(img, config = "-psm 7 digits")
ret = tmp
number = "0123456789"
for c in tmp :
if c not in number :
ret = ret.replace(c, '')
return ret
def addBirthday(bir) :
"""
Add birthday and return next birthday
"""
bir += 1
if bir % 100 == 13 :
bir = ((bir / 100) + 1) * 100 + 1
return bir
def getSubjectDetails(x) :
p = x.find("cj")
k = x.find('"', p)
s = x.find('"', k + 1)
e = x.find('"', s + 1)
ret = x[s + 1 : e]
x = x[e + 1 : ]
return (ret, x)
def getDetail(student, birth, result) :
"""
Change the result into standard format
studentnumber 0000 Fail None 0 0 0 0 0
or
studentnumber birth Category name YuWen ShuXue ZongHE YingYu ZongFen
"""
ret = unicode(student) + u" " + unicode(birth)
if "Fail" in result :
ret = ret + " " + unicode("Fail None 0 0 0 0 0")
else :
# get wenke or like
cat = ""
if "\u7406\u79d1" in result :
cat = "\u7406\u79d1"
else :
cat = "\u6587\u79d1"
ret = ret + u" " + unicode(cat)
# get name
k = result.find("name")
k = result.find('"', k)
s = result.find('"', k + 1)
e = result.find('"', s + 1)
# print "Debug:", result, s, e
ret = ret + u" " + unicode(result[s + 1 : e])
# print unicode(result[s + 1 : e])
result = result[e:]
for times in range(0, 5) :
# get each subject's score
tmp = ""
tmp, result = getSubjectDetails(result)
ret = ret + u" " + unicode(tmp)
ret = ret.decode("unicode_escape")
return ret
def getScore(student, birS, birE) :
"""
student - student number
birS, birE - birthday start day and end day
try each birthday and try to fetch score from 5184
"""
count_try_bir = 0
succeed = False
ret = "Fail"
rightBir = "0000"
while count_try_bir < TIMES_TRY_BIRTHDAY :
count_try_bir += 1
bir = birS
while bir != birE and (not succeed) :
print bir
result = ""
# get checkcode.jpg
# urllib.urlretrieve(checkcode_url, checkcode_file)
checkcode_succeed = False
count_try_checkcode = 0
while (not checkcode_succeed) and (count_try_checkcode < TIMES_TRY_CHECKCODE) :
count_try_checkcode += 1
try :
post_data = None
request = urllib2.Request(checkcode_url, post_data, getCheckcode_headers)
response = opener.open(request, timeout=300)
checkcode = open(checkcode_file, "wb")
checkcode.write(response.read())
checkcode.close()
user_checkcode = getCheckcode()
data = {
"csny":str(bir),
"zkzh":str(student),
"yzm":user_checkcode
}
post_data = urllib.urlencode(data)
request = urllib2.Request(query_url, post_data, query_headers)
response = opener.open(request, timeout=300)
result = response.read()
print student, bir, user_checkcode, result
except Exception as e :
print e, 'fail try'
if wrong_checkcode not in result :
checkcode_succeed = True
break
if (busy_server not in result) and (success_feedback in result) :
succeed = True
ret = result
rightBir = bir
break
bir = addBirthday(bir)
if succeed == True :
break
ret = getDetail(student, rightBir, ret)
return ret
# build opener
cookie = cookielib.MozillaCookieJar(cookie_file)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
print "Spider begin:"
# main procedure
post_data = None
try :
request = urllib2.Request(query_web_url, post_data, default_headers)
response = opener.open(request, timeout=100)
print "Successfully open query web."
except Exception as e :
print e
exit(0)
student_range = raw_input("Please tell me the range of student_number(begin_number end_number): ")
student_range = student_range.split(' ')
begin_number = int(student_range[0])
end_number = int(student_range[1])
end_number += 1
birthday_range = raw_input("Please tell me the range of students' birthday (like:9701 9806): ")
birthday_range = birthday_range.split(' ')
birthday_begin = int(birthday_range[0])
birthday_end = int(birthday_range[1])
birthday_end = addBirthday(birthday_end)
# begin_number = 2000102442
# end_number= 2000102443
# birthday_begin = 9801
# birthday_end = 9802
for student_number in range(begin_number, end_number) :
result = getScore(student_number, birthday_begin, birthday_end)
file_handler = open(result_file, "a")
print result
file_handler.write(result + "\n")
file_handler.close()
# request = urllib2.Request(checkcode_url, post_data, getCheckcode_headers)
# response = opener.open(request, timeout=1000)
# checkcode = open(checkcode_file, "wb")
# checkcode.write(response.read())
# checkcode.close()
# # print "Successfully download checkcode."
#
# user_checkcode = getCheckcode()
# print user_checkcode
# first_data = {
# "csny":"9801",
# "zkzh":"2000102442",
# "yzm":user_checkcode
# }
# post_data = urllib.urlencode(first_data)
# request = urllib2.Request(query_url, post_data, query_headers)
# response = opener.open(request, timeout=1000)
# print response.read()
# result = unicode(response.read())
# print result.decode("unicode_escape")
cookie.save(ignore_discard=True, ignore_expires=True)
# main prpcedure
|
[
"smallrqnoj@163.com"
] |
smallrqnoj@163.com
|
28409a0ac09ff5b5b2961428042e5d16a38e6e92
|
cf6af4c65a8f0b02a41951388123b3dde233399d
|
/backend/venv/bin/flake8
|
ab687fc978e1a73da18cd0a05a20d0637d713b19
|
[
"MIT"
] |
permissive
|
byamba3/mobilemetrics
|
58e967e41c9da237866ba147b9bd160a04bcb371
|
2e6c53325ecff842bde8c8fe19de220e8f90cb1d
|
refs/heads/master
| 2020-03-16T20:56:07.292243
| 2018-09-11T02:40:20
| 2018-09-11T02:40:20
| 132,978,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
#!/Users/cogitau/Github/Mobile-Metrics/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flake8.main.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"cogitau@Byambas-MacBook-Pro.local"
] |
cogitau@Byambas-MacBook-Pro.local
|
|
ca7734726038079648104456812ee5d54bd3a63a
|
55025c7978dd54461cbc433a2f6e973b473bb81b
|
/athena/models/kws/conformer_wakeup.py
|
076a50f99c6a1734699329bb4f0f32ea2861944e
|
[
"Apache-2.0"
] |
permissive
|
ishine/athena-1
|
32dabd36ef90e684dfddcf2402feaa2a6bde41b1
|
0be22c155c782207349ad6b2135f974d8673611f
|
refs/heads/master
| 2023-07-27T17:03:40.353380
| 2022-12-02T02:43:19
| 2022-12-02T02:43:19
| 239,663,565
| 0
| 0
|
Apache-2.0
| 2020-06-23T02:12:49
| 2020-02-11T03:00:59
|
Python
|
UTF-8
|
Python
| false
| false
| 5,754
|
py
|
# coding=utf-8
# Copyright (C) 2022 ATHENA AUTHORS; Yanguang Xu; Yang Han; Jianwei Sun
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode
# pylint: disable=no-member, invalid-name, relative-beyond-top-level
# pylint: disable=too-many-locals, too-many-statements, too-many-arguments, too-many-instance-attributes
""" speech conformer implementation"""
from absl import logging
import tensorflow as tf
from .base import BaseModel
from ...utils.hparam import register_and_parse_hparams
from ...layers.commons import PositionalEncoding
from ...layers.conformer import ConformerEncoderLayer, ConformerEncoder
class KWSConformer(BaseModel):
""" Standard implementation of a KWSConformer. Model mainly consists of three parts:
the x_net for input preparation, the conformer itself
"""
default_config = {
"num_classes":1,
"num_filters": 512,
"d_model": 512,
"num_heads": 8,
"cnn_module_kernel": 15,
"num_encoder_layers": 12,
"rate": 0.1
}
def __init__(self, data_descriptions, config=None):
super().__init__()
self.hparams = register_and_parse_hparams(self.default_config, config, cls=self.__class__)
layers = tf.keras.layers
self.loss_function = tf.keras.losses.BinaryCrossentropy(from_logits=True, label_smoothing=0.1)
self.metric = tf.keras.metrics.BinaryAccuracy()
# learnable class embedding to learn a global feature that represents the whole spectrogram
self.class_emb = self.add_weight("class_emb", shape=(1, 1, self.hparams.d_model), initializer=tf.keras.initializers.TruncatedNormal(mean=0., stddev=0.02))
input_features = layers.Input(
shape=data_descriptions.sample_shape["input"],
dtype=tf.float32)
# tf.ensure_shape(input_features, [None, None, 63, 1])
inner = layers.Conv2D(
filters=self.hparams.num_filters,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
use_bias=False,
data_format="channels_last",
)(input_features)
# tf.ensure_shape(inner, [None, None, 32, 512])
inner = layers.BatchNormalization()(inner)
inner = tf.nn.relu6(inner)
inner = layers.Conv2D(
filters=self.hparams.num_filters,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
use_bias=False,
data_format="channels_last",
)(inner)
inner = layers.BatchNormalization()(inner)
# tf.ensure_shape(inner, [None, None, 16, 512])
inner = tf.nn.relu6(inner)
_, _, dim, channels = inner.get_shape().as_list()
output_dim = dim * channels
inner = layers.Reshape((-1, output_dim))(inner)
inner = layers.Dense(self.hparams.d_model, activation=tf.nn.relu6)(inner)
self.x_net = tf.keras.Model(inputs=input_features, outputs=inner, name="x_net")
logging.info(self.x_net.summary())
self.concat_layer = layers.Concatenate(axis=1)
self.dropout_layer = layers.Dropout(self.hparams.rate)
# conformer encoder
encoder_layers = [
ConformerEncoderLayer(self.hparams.d_model, self.hparams.num_heads, self.hparams.cnn_module_kernel) for _ in range(self.hparams.num_encoder_layers)
]
self.encoder = ConformerEncoder(encoder_layers)
# last layer for output
self.final_layer = layers.Dense(self.hparams.num_classes, input_shape=(self.hparams.d_model,))
self.tflite_model = self.build_model(data_descriptions)
def call(self, samples, training=None):
x0 = samples["input"]
x = self.x_net(x0, training=training)
batch_size = tf.shape(x)[0]
class_emb = tf.broadcast_to(self.class_emb, [batch_size, 1, self.hparams.d_model])
x = self.concat_layer([class_emb, x])
x = PositionalEncoding(self.hparams.d_model, scale=False)(x)
x = self.dropout_layer(x,training=training) # self.hparams.rate
x = self.encoder(x, training=training)
class_output = x[:, 0]
y = self.final_layer(class_output)
return y
def build_model(self, data_descriptions):
input_features = tf.keras.layers.Input(
shape= data_descriptions.sample_shape["input"],
dtype=tf.float32,
name="input_features")
x = self.x_net(input_features, training=False)
batch_size = tf.shape(x)[0]
class_emb = tf.broadcast_to(self.class_emb, [batch_size, 1, self.hparams.d_model])
x = self.concat_layer([class_emb, x])
x = PositionalEncoding(self.hparams.d_model, scale=False)(x)
x = self.dropout_layer(x,training=False) # self.hparams.rate
x = self.encoder(x, training=False)
class_output = x[:, 0]
logits = self.final_layer(class_output)
prob = tf.math.sigmoid(logits, name="sigmoid")
tflite_model = tf.keras.Model(inputs=input_features, outputs=prob, name="kws")
return tflite_model
|
[
"nene.luo@gmail.com"
] |
nene.luo@gmail.com
|
6f5d642f6f4e18d927d2edafc5f17adb36ed550e
|
a2931ad429ba6848261c4c18abcb74be2c05a30b
|
/return.py
|
bce8695072d4d74d268d8bf90c37295bdfd01b49
|
[] |
no_license
|
amansarosh/python-functionality
|
9c19d94c73ea3906f00e5efa8c2289386d05933d
|
09fba1ffd43345e46240ca88b23d846c65ffc6d9
|
refs/heads/master
| 2020-11-29T09:06:53.281052
| 2019-12-27T06:25:20
| 2019-12-27T06:25:20
| 230,076,605
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
def cube (num):
return num*num*num # return breaks the function so nothing can be added after
result = cube(4)
print(result)
|
[
"amansarosh13@gmail.com"
] |
amansarosh13@gmail.com
|
04fd223d42564ea7b883a0d71c3e3e63662024b4
|
33a865b20240007d8ff63bba5d8ddd20eaf37ccf
|
/5-A.py
|
3acd184846e03437d602719085a950b4a6279e4d
|
[] |
no_license
|
Guan-Ling/20210125
|
b39673c4e4a1d3a47b6e5b7b7b3b9f7bb18609ab
|
970eeb4e30ba22142eaae6f2ba4713515ac12f6c
|
refs/heads/master
| 2023-02-22T10:11:25.715025
| 2021-01-29T09:01:28
| 2021-01-29T09:01:28
| 332,667,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
# Given a string, delete all the characters @ from this string.
# Bilbo.Baggins@bagend.hobbiton.shire.me
# Bilbo.Bagginsbagend.hobbiton.shire.me
s=input()
k=s.replace("@","")
print(k)
|
[
"x5j035j03x@gmail.com"
] |
x5j035j03x@gmail.com
|
e5f051658711903eb3072efecc81121e0d84abf4
|
96d31f7125e4f790bb648127a0843a5e6eff7925
|
/3rdparty/blender/io_export_glhck/export_glhckm.py
|
c5730f6ef5b38719a8e9cf06e76770cf54b20878
|
[] |
no_license
|
Cloudef/glhck
|
6797703960d7987736126b9dfb28fcb3d894d965
|
3592bd436c3571165a04881319a834cf49dbf9d0
|
refs/heads/master
| 2021-01-22T03:04:58.731501
| 2014-04-27T15:31:32
| 2014-04-27T15:31:32
| 4,010,391
| 8
| 0
| null | 2014-04-27T15:29:51
| 2012-04-12T23:17:22
|
C
|
UTF-8
|
Python
| false
| false
| 51,326
|
py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# All rights reserved.
#
# ##### END GPL LICENSE BLOCK #####
# pylint: disable=C1001, C0111, F0401, R0903, R0912, R0913, R0914, R0915, R0902
# pylint: disable=W0142
# <pep8-80 compliant>
import bpy, os, struct
from bpy_extras.io_utils import path_reference, path_reference_copy
from itertools import zip_longest
from mathutils import Matrix
# File headers
GLHCKM_VERSION = [0, 1]
GLHCKM_HEADER = "glhckm"
GLHCKA_HEADER = "glhcka"
# Map geometryType enum
ENUM_GEOMETRYTYPE = {
'TRIANGLES':0,
}
# Map materialFlags bitflags
ENUM_MATERIALFLAGS = {
'LIGHTING': 1 << 0
}
# Map vertexDataFlags bitflags
ENUM_VERTEXDATAFLAGS = {
'HAS_NORMALS': 1 << 0,
'HAS_UVS': 1 << 1,
'HAS_VERTEX_COLORS': 1 << 2,
}
# Binary sizes for datatypes
SZ_INT8 = 1
SZ_INT16 = 2
SZ_INT32 = 4
SZ_COLOR3 = SZ_INT8 * 3
SZ_COLOR4 = SZ_INT8 * 4
# Return string from float without trailing zeroes
def strflt(flt):
return ("{:.6f}".format(flt)).rstrip("0").rstrip(".")
# Binary size of string
def sz_string(string):
return SZ_INT8 + len(string.encode('UTF-8'))
# Binary size of long string
def sz_longstring(string):
return SZ_INT16 + len(string.encode('UTF-8'))
# Binary size of string float
def sz_float(flt):
return sz_string(strflt(flt))
# Binary size of string vector2d
def sz_vector2(vector):
return sz_string("{},{}".format(*[strflt(x) for x in vector]))
# Binary size of string vector3d
def sz_vector3(vector):
return sz_string("{},{},{}".format(*[strflt(x) for x in vector]))
# Binary size of string quaternion
def sz_quaternion(quaternion):
return sz_string("{},{},{},{}".format(*[strflt(x) for x in quaternion]))
# Binary size of string matrix4x4
def sz_matrix4x4(matrix):
return sz_longstring("{},{},{},{} " \
"{},{},{},{} " \
"{},{},{},{} " \
"{},{},{},{} ".format(
*[strflt(x) for y in matrix for x in y]))
# Binary size of material
def sz_material(mtl):
return sz_string(mtl.name) + \
SZ_COLOR3 + SZ_COLOR4 + SZ_COLOR4 + SZ_INT16 + \
SZ_INT8 + sz_longstring(mtl.textures['diffuse'])
# Binary size of skinbone
def sz_skinbone(bone):
size = sz_string(bone.aobj.name + "_" + bone.name) + \
sz_matrix4x4(bone.offset_matrix) + SZ_INT32
for weight in bone.weights:
size += SZ_INT32 + sz_float(weight)
return size
# Binary size of bone
def sz_bone(eobj, bone, matrix):
return sz_string(eobj.name + "_" + bone.name) + \
sz_matrix4x4(matrix) + SZ_INT16
# Binary size of node
def sz_node(node):
size = sz_string(node.name) + SZ_INT32 + SZ_INT32 + SZ_INT32
for itr in node.rotation_keys:
size += SZ_INT32 + sz_quaternion(itr[1])
for itr in node.scaling_keys:
size += SZ_INT32 + sz_vector3(itr[1])
for itr in node.translation_keys:
size += SZ_INT32 + sz_vector3(itr[1])
return size
# Write block header
def write_block(file, block_name, block_size):
file.write(bytes(block_name, 'ascii')) # header
file.write(struct.pack("<I", block_size)) # block size
# Write color3 as binary
def write_color3(file, color):
file.write(struct.pack("<BBB", *color)) # rgb
# Write color4 as binary
def write_color4(file, color):
file.write(struct.pack("<BBBB", *color)) # rgba
# Write string with binary length
def write_string(file, string):
file.write(struct.pack("<B", len(string.encode('UTF-8')))) # length
file.write(bytes(string, 'UTF-8')) # char array
# Write long string with binary length
def write_longstring(file, string):
file.write(struct.pack("<H", len(string.encode('UTF-8')))) # length
file.write(bytes(string, 'UTF-8')) # char array
# Write float to file as string
def write_float(file, flt):
write_string(file, strflt(flt))
# Write vector2d to file as string
def write_vector2(file, vector):
write_string(file, "{},{}".format(*[strflt(x) for x in vector]))
# Write vector3d to file as string
def write_vector3(file, vector):
write_string(file, "{},{},{}".format(*[strflt(x) for x in vector]))
# Write quaternion to file as string
def write_quaternion(file, quaternion):
write_string(file, "{},{},{},{}".format(strflt(quaternion[1]),
strflt(quaternion[2]), strflt(quaternion[3]), strflt(quaternion[0])))
# Write matrix4x4 to file as string
def write_matrix4x4(file, matrix):
write_longstring(file, "{},{},{},{} " \
"{},{},{},{} " \
"{},{},{},{} " \
"{},{},{},{} ".format(
*[strflt(x) for y in matrix for x in y]))
# Write material to file as binary
def write_material(file, mtl):
write_string(file, mtl.name) # name
write_color3(file, mtl.ambient) # ambient
write_color4(file, mtl.diffuse) # diffuse
write_color4(file, mtl.specular) # specular
file.write(struct.pack("<H", mtl.shininess)) # shininess
file.write(struct.pack("<B", mtl.flags)) # materialFlags
write_longstring(file, mtl.textures['diffuse']) # diffuse
# Write skinbone to file as binary
def write_skinbone(file, bone):
write_string(file, bone.aobj.name + "_" + bone.name) # name
write_matrix4x4(file, bone.offset_matrix) # offsetMatrix
file.write(struct.pack("<I", len(bone.weights))) # weightCount
for index, weight in zip_longest(bone.indices, bone.weights): # weights
file.write(struct.pack("<I", index)) # vertexIndex
write_float(file, weight) # weight
# Write node to file as binary
def write_node(file, node):
write_string(file, node.name) # name
file.write(struct.pack("<I", len(node.rotation_keys))) # rotationCount
file.write(struct.pack("<I", len(node.scaling_keys))) # scalingCount
file.write(struct.pack("<I", len(node.translation_keys))) # translationCount
# quaternionKeys
for frame, key in node.rotation_keys:
file.write(struct.pack("<I", frame)) # frame
write_quaternion(file, key) # quaternion
# scalingKeys
for frame, key in node.scaling_keys:
file.write(struct.pack("<I", frame)) # frame
write_vector3(file, key) # vector
# translationKeys
for frame, key in node.translation_keys:
file.write(struct.pack("<I", frame)) # frame
write_vector3(file, key) # vector
# Almost equality check of floating point
def almost_equal(aflt, bflt, error=0.0001):
return aflt + error > bflt and aflt - error < bflt
# Almost equality check of 3d vector
def ae3d(vec1, vec2, error=0.0001):
return (almost_equal(vec1[0], vec2[0], error) and
almost_equal(vec1[1], vec2[1], error) and
almost_equal(vec1[2], vec2[2], error))
# Almost equality check of quaternion
def aeqt(qat1, qat2, error=0.0001):
return ae3d(qat1, qat2, error) and almost_equal(qat1[3], qat2[3], error)
# Round 2d vector
def r2d(vec, num=6):
return round(vec[0], num), round(vec[1], num)
# Round 3d vector
def r3d(vec, num=6):
return r2d(vec) + (round(vec[2], num),)
# Round quaternion
def rqt(qat, num=6):
return r3d(qat) + (round(qat[3], num),)
# Clamp to range
def clamp(val, minimum, maximum):
return max(minimum, min(val, maximum))
# Blender color to RGB unsigned byte color
def bc3b(bcol):
return clamp(round(bcol[0] * 255), 0, 255), \
clamp(round(bcol[1] * 255), 0, 255), \
clamp(round(bcol[2] * 255), 0, 255)
# Blender color to RGBA unsigned byte color
def bc4b(bcol):
return bc3b(bcol) + (clamp(round(bcol[3] * 255), 0, 255),)
# Sort list with each element name field
def sort_by_name_field(lst):
def sort_key(obj):
return obj.name
return sorted(lst, key=sort_key)
class Material:
def __init__(self, bmtl, mesh, options):
self.bmtl = bmtl
self.name = bmtl.name
images = Material._get_texture_images(bmtl)
if images['diffuse'] is None and mesh.uv_textures:
images['diffuse'] = mesh.uv_textures.active.data[:][0].image
self.textures = {}
src_dir = os.path.dirname(bpy.data.filepath)
dst_dir = os.path.dirname(options['filepath'])
for key in images.keys():
if images[key] is not None:
self.textures[key] = path_reference(images[key].filepath,
src_dir, dst_dir, options['path_mode'], "",
options['copy_set'], images[key].library)
if self.textures[key] == '.':
self.textures[key] = ''
else:
self.textures[key] = ''
self.ambient = bc3b(bmtl.ambient * bmtl.diffuse_color)
if self.textures['diffuse'] != '':
self.diffuse = (255, 255, 255, 255)
else:
self.diffuse = list(bmtl.diffuse_intensity * bmtl.diffuse_color)
self.diffuse.append(bmtl.alpha)
self.diffuse = bc4b(self.diffuse)
self.specular = list(bmtl.specular_intensity * bmtl.specular_color)
self.specular.append(bmtl.specular_alpha)
self.specular = bc4b(self.specular)
self.shininess = bmtl.specular_hardness
self.flags = 0
if bmtl.use_shadeless:
self.flags |= ENUM_MATERIALFLAGS['LIGHTING']
@staticmethod
def _get_texture_images(mtl):
"""Get relevant images from material"""
images = {'diffuse':None,
'normal':None,
'displacement':None,
'reflection':None,
'ambient':None,
'alpha':None,
'translucency':None,
'emit':None,
'specular_intensity':None,
'specular_color':None,
'specular_hardness':None}
# Create a list of textures that have type 'IMAGE'
textures = [mtl.texture_slots[tslot]
for tslot in mtl.texture_slots.keys()
if mtl.texture_slots[tslot].texture is not None
and mtl.texture_slots[tslot].texture.type == 'IMAGE']
textures = [tex for tex in textures if tex.texture.image is not None]
for tex in textures:
image = tex.texture.image
if tex.use_map_color_diffuse and not tex.use_map_warp and \
tex.texture_coords != 'REFLECTION':
images['diffuse'] = image
if tex.use_map_normal:
images['normal'] = image
if tex.use_map_displacement:
images['displacement'] = image
if tex.use_map_color_diffuse and tex.texture_coords == 'REFLECTION':
images['reflection'] = image
if tex.use_map_ambient:
images['ambient'] = image
if tex.use_map_alpha:
images['alpha'] = image
if tex.use_map_translucency:
images['translucency'] = image
if tex.use_map_emit:
images['emit'] = image
if tex.use_map_specular:
images['specular'] = image
if tex.use_map_color_spec:
images['specular_color'] = image
if tex.use_map_hardness:
images['specular_hardness'] = image
return images
def materials_from_blender_mesh(mesh, options):
"""Get materials from blender object"""
materials = []
for mtl in mesh.materials:
if mtl is not None:
materials.append(Material(mtl, mesh, options))
return materials
class SkinBone:
def __init__(self, bobj, aobj, name, options):
self.aobj = aobj
self.name = name
self.bone = aobj.data.bones[name]
self.indices = []
self.weights = []
# BoneMatrix transforms mesh vertices into the space of the bone.
# Here are the final transformations in order:
# - Object Space to World Space
# - World Space to Armature Space
# - Armature Space to Bone Space
# This way, when matrix is transformed by the bone's Frame matrix,
# the vertices will be in their final world position.
amat = options['global_matrix'] * aobj.matrix_world
self.offset_matrix = self.bone.matrix_local.inverted()
self.offset_matrix *= (bobj.matrix_local * amat).inverted()
self.offset_matrix *= bobj.matrix_local
def add_vertex(self, index, weight):
self.indices.append(index)
self.weights.append(weight)
def skin_bones_from_blender_object(bobj, options):
"""Get bone vertex groups from blender object"""
armature_modifier_list = [modifier for modifier in bobj.modifiers
if modifier.type == 'ARMATURE' and modifier.show_viewport]
armature_objects = [modifier.object for modifier in armature_modifier_list
if modifier.object is not None]
skin_bones = []
for aobj in armature_objects:
# Determine the names of the bone vertex groups
pose_bone_names = [bone.name for bone in aobj.pose.bones]
vertex_group_names = [group.name for group in bobj.vertex_groups]
used_bone_names = set(pose_bone_names).intersection(vertex_group_names)
# Create a SkinBone for each group name
skin_bones.extend([SkinBone(bobj, aobj, bone_name, options)
for bone_name in used_bone_names])
return skin_bones
def blender_object_to_mesh(context, bobj, options, has_bones=False):
"""Convert blender object to mesh for export"""
if options['use_mesh_modifiers']:
# Certain modifiers shouldn't be applied in some cases
# Deactivate them until after mesh generation is complete
deactivated_modifier_list = []
# If we're exporting armature data, we shouldn't apply
# armature modifiers to the mesh
if has_bones:
deactivated_modifier_list = [modifier for modifier in bobj.modifiers
if modifier.type == 'ARMATURE' and modifier.show_viewport]
for modifier in deactivated_modifier_list:
modifier.show_viewport = False
mesh = bobj.to_mesh(context.scene, True, 'PREVIEW')
# Restore the deactivated modifiers
for modifier in deactivated_modifier_list:
modifier.show_viewport = True
else:
mesh = bobj.to_mesh(context.scene, False, 'PREVIEW')
# finally transform
mesh.transform(options['global_matrix'] * bobj.matrix_world)
return mesh
def blender_object_to_data(context, bobj, options):
"""Turn blender object to bunch of data usable for our export"""
vertices = []
normals = []
uvs = []
vertex_colors = []
indices = []
materials = []
skin_bones = []
used_bones = []
if bobj.type == 'EMPTY' or bobj.data is None:
return {'vertices':vertices,
'normals':normals,
'uvs':uvs,
'vertex_colors':vertex_colors,
'indices':indices,
'materials':materials,
'skin_bones':skin_bones}
if options['use_bones']:
skin_bones = skin_bones_from_blender_object(bobj, options)
vertex_group_map = {group.index : group for group in bobj.vertex_groups}
# helper function for getting all the skin bones for vertex group
def get_skin_bones_for_vgroup(vgroup):
bones = []
vertex_group = vertex_group_map[vgroup.group]
for bone in skin_bones:
if vertex_group.name == bone.name:
bones.append(bone)
return bones
mesh = blender_object_to_mesh(context, bobj, options, len(skin_bones))
if options['use_materials']:
materials = materials_from_blender_mesh(mesh, options)
active_uvs = None
if options['use_uvs'] and mesh.tessface_uv_textures:
active_uvs = mesh.tessface_uv_textures.active.data
active_vertex_colors = None
if options['use_vertex_colors'] and mesh.vertex_colors:
active_vertex_colors = mesh.vertex_colors.active.data
vertex_count = 0
stored_data = {}
for fidx, face in enumerate(mesh.tessfaces):
tmp_faces = []
for fvidx, vidx in enumerate(face.vertices):
vertex = r3d(mesh.vertices[vidx].co)
if options['use_normals']:
if face.use_smooth:
normal = mesh.vertices[vidx].normal
else:
normal = face.normal
normal = r3d(normal)
else:
normal = (0.0, 0.0, 0.0)
if active_uvs is not None:
uvc = r2d(active_uvs[fidx].uv[fvidx])
else:
uvc = (0.0, 0.0)
if active_vertex_colors is not None:
color = bc4b(active_vertex_colors[vidx].color + (1.0,))
else:
color = (0, 0, 0, 0)
# Get total weight for vertex and number of influences
influences = 0
weight_total = 0.0
if skin_bones:
for vgroup in mesh.vertices[vidx].groups:
bones = get_skin_bones_for_vgroup(vgroup)
for bone in bones:
weight_total += vgroup.weight
influences += 1
# Check for duplicate vertex
key = vertex, normal, uvc, color, round(weight_total, 6), influences
duplicate_index = stored_data.get(key)
if duplicate_index is None:
# Store new vertex
stored_data[key] = vertex_count
vertices.append(vertex)
if options['use_normals']:
normals.append(normal)
if active_uvs is not None:
uvs.append(uvc)
if active_vertex_colors is not None:
vertex_colors.append(color)
# Add vertex to BoneGroup if it's in any
if influences:
for vgroup in mesh.vertices[vidx].groups:
bones = get_skin_bones_for_vgroup(vgroup)
for bone in bones:
if bone not in used_bones:
used_bones.append(bone)
weight = vgroup.weight / weight_total
bone.add_vertex(vertex_count, weight)
tmp_faces.append(vertex_count)
vertex_count += 1
else:
# Reuse the vertex
tmp_faces.append(duplicate_index)
# Is the format already triangles?
if len(tmp_faces) == 3:
indices.append(tmp_faces)
else:
indices.append([tmp_faces[0], tmp_faces[1], tmp_faces[2]])
indices.append([tmp_faces[0], tmp_faces[2], tmp_faces[3]])
bpy.data.meshes.remove(mesh)
return {'vertices':vertices,
'normals':normals,
'uvs':uvs,
'vertex_colors':vertex_colors,
'indices':indices,
'materials':materials,
'skin_bones':used_bones}
class Node:
def __init__(self, name):
self.name = name
self.rotation_keys = []
self.scaling_keys = []
self.translation_keys = []
class ExportAnimation:
def __init__(self, action):
self.name = action.name
self.action = action
self.first_frame = round(action.frame_range[0])
self.last_frame = round(action.frame_range[1])
self.objects = []
def __repr__(self):
return "[ExportAnimation: {}]".format(self.name)
def add_object(self, eobj):
"""Add object relevant to this animation, this also
means child objects! ExportAnimation should never
enumerate each object's children"""
if eobj not in self.objects:
self.objects.append(eobj)
def _generate_nodes(self, context, options):
"""Generate nodes for animation"""
# We bake only the selected objects!
# Thus select the objects relevant to this animation
if options['bake_animations']:
old_layers = context.scene.layers[:]
old_active = bpy.context.scene.objects.active
old_selection = context.selected_objects
# Select all relevant objects
context.scene.layers = [True for l in old_layers]
for bobj in old_selection:
bobj.select = False
for eobj in self.objects:
if eobj.bobj.type != 'ARMATURE':
# Only armatures need baking(?)
eobj.baked_action = eobj.action
continue
eobj.bobj.select = True
bpy.context.scene.objects.active = eobj.bobj
eobj.bobj.animation_data.action = self.action
from bpy_extras.anim_utils import bake_action as bake
eobj.baked_action = bake(self.first_frame, self.last_frame, 1,
only_selected=False,
do_pose=True,
do_object=False,
do_visual_keying=True,
do_constraint_clear=False,
do_parents_clear=False,
do_clean=True,
action=None)
eobj.bobj.select = False
# Restore selection and layers
for bobj in old_selection:
bobj.select = True
bpy.context.scene.objects.active = old_active
context.scene.layers = old_layers
# No baking
if not options['bake_animations']:
for eobj in self.objects:
eobj.baked_action = self.action
# Something failed, fallback
for eobj in self.objects:
if eobj.baked_action is None:
for eobj2 in self.objects:
eobj2.bobj.animation_data.action = eobj2.action
return []
def generate_nodes_from_armature(eobj):
"""Generate nodes for animation from Armature ExportObject"""
nodes = [Node(eobj.name + "_" + bone.name)
for bone in eobj.bobj.pose.bones]
old_rotation = {}
old_scaling = {}
old_translation = {}
old_rotation_mode = {}
for bone in eobj.bobj.pose.bones:
old_rotation[bone.name] = [1, 0, 0, 0]
old_scaling[bone.name] = [1, 1, 1]
old_translation[bone.name] = [0, 0, 0]
old_rotation_mode[bone.name] = bone.rotation_mode
bone.rotation_mode = 'QUATERNION'
for frame in range(self.first_frame, self.last_frame + 1):
context.scene.frame_set(frame)
for bone, node in zip_longest(eobj.bobj.pose.bones, nodes):
matrix = bone.matrix
if bone.parent:
matrix = bone.parent.matrix.inverted() * matrix
rotation = rqt(bone.bone.matrix.to_quaternion() *
bone.rotation_quaternion)
scaling = r3d(matrix.to_scale())
translation = r3d(matrix.to_translation())
if not aeqt(old_rotation[bone.name], rotation):
node.rotation_keys.append([frame, rotation])
old_rotation[bone.name] = rotation
if not ae3d(old_scaling[bone.name], scaling):
node.scaling_keys.append([frame, scaling])
old_scaling[bone.name] = scaling
if not ae3d(old_translation[bone.name], translation):
node.translation_keys.append([frame, translation])
old_translation[bone.name] = translation
for bone in eobj.bobj.pose.bones:
bone.rotation_mode = old_rotation_mode[bone.name]
return nodes
def generate_nodes_from_object(eobj):
"""Generate nodes for animation from Object ExportObject"""
nodes = []
old_rotation = [1, 0, 0, 0]
old_scaling = [1, 1, 1]
old_translation = [0, 0, 0]
node = Node(eobj.name)
nodes.append(node)
for frame in range(self.first_frame, self.last_frame + 1):
context.scene.frame_set(frame)
rotation = rqt(eobj.bobj.rotation_quaternion)
scaling = r3d(eobj.bobj.matrix_local.to_scale())
translation = r3d(eobj.bobj.matrix_local.to_translation())
if not aeqt(old_rotation, rotation):
node.rotation_keys.append([frame, rotation])
old_rotation = rotation
if not ae3d(old_scaling, scaling):
node.scaling_keys.append([frame, scaling])
old_scaling = scaling
if not ae3d(old_translation, translation):
node.translation_keys.append([frame, translation])
old_translation = translation
return nodes
nodes = []
for eobj in self.objects:
old_rotation_mode = eobj.bobj.rotation_mode
eobj.bobj.rotation_mode = 'QUATERNION'
eobj.bobj.animation_data.action = eobj.baked_action
if eobj.bobj.type == 'ARMATURE':
nodes.extend(generate_nodes_from_armature(eobj))
else:
nodes.extend(generate_nodes_from_object(eobj))
eobj.bobj.animation_data.action = eobj.action
eobj.bobj.rotation_mode = old_rotation_mode
if eobj.baked_action is not self.action:
bpy.data.actions.remove(eobj.baked_action)
eobj.baked_action = None
return nodes
def write(self, context, file, options):
"""Write animation data to file"""
old_frame = context.scene.frame_current
nodes = self._generate_nodes(context, options)
context.scene.frame_set(old_frame)
node_data_size = 0
for node in nodes:
node_data_size += sz_node(node)
block_size = sz_string(self.name) # name
block_size += SZ_INT32 # nodeCount
block_size += node_data_size # nodes
print("struct AND (size: {}) {{".format(block_size))
print(" name = {} (size: {})".format(self.name, sz_string(self.name)))
print(" nodeCount = {} (size: {})".format(len(nodes), SZ_INT32))
print(" nodes = stripped (size: {})".format(node_data_size))
print("};")
write_block(file, "AND", block_size) # header
write_string(file, self.name) # name
file.write(struct.pack("<I", len(nodes))) # nodeCount
for node in nodes:
write_node(file, node) # nodes
class ExportObject:
def __init__(self, bobj):
self.bobj = bobj
self.name = bobj.name
self.parent = None
self.children = []
# workaround to remember action after baking one for export
# when you run bake_action it seems to set the baked action active
if bobj.animation_data is not None:
self.action = bobj.animation_data.action
self.baked_action = None
def __repr__(self):
return "[ExportObject: {} '{}']".format(self.name, self.bobj.type)
def _write_object(self, context, file, options):
"""Write object to file"""
data = blender_object_to_data(context, self.bobj, options)
vertices = data['vertices']
normals = data['normals']
uvs = data['uvs']
vertex_colors = data['vertex_colors']
indices = data['indices']
materials = data['materials']
skin_bones = data['skin_bones']
geometry_type = ENUM_GEOMETRYTYPE['TRIANGLES']
vertex_data_flags = 0
if normals:
vertex_data_flags |= ENUM_VERTEXDATAFLAGS['HAS_NORMALS']
if uvs:
vertex_data_flags |= ENUM_VERTEXDATAFLAGS['HAS_UVS']
if vertex_colors:
vertex_data_flags |= ENUM_VERTEXDATAFLAGS['HAS_VERTEX_COLORS']
vertex_data_size = 0
for vertex in vertices:
vertex_data_size += sz_vector3(vertex)
for normal in normals:
vertex_data_size += sz_vector3(normal)
for uvc in uvs:
vertex_data_size += sz_vector2(uvc)
vertex_data_size += len(vertex_colors) * SZ_COLOR3
index_data_size = len(indices) * 3 * SZ_INT32
material_data_size = 0
for mtl in materials:
material_data_size += sz_material(mtl)
bone_data_size = 0
for bone in skin_bones:
bone_data_size += sz_skinbone(bone)
block_size = sz_string(self.name) # name
block_size += SZ_INT8 # geometryType
block_size += SZ_INT8 # vertexDataFlags
block_size += SZ_INT32 # indexCount
block_size += SZ_INT32 # vertexCount
block_size += SZ_INT16 # materialCount
block_size += SZ_INT16 # skinBoneCount
block_size += SZ_INT16 # childCount
block_size += index_data_size # indices
block_size += vertex_data_size # vertices
block_size += material_data_size # materials
block_size += bone_data_size # skinBones
print("struct OBD (size: {}) {{".format(block_size))
print(" name = {} (size: {})".format(self.name, sz_string(self.name)))
print(" geometryType = {} (size: {})".format(geometry_type, SZ_INT8))
print(" vrtxDataFlags = {} (size: {})".format(vertex_data_flags, SZ_INT8))
print(" indexCount = {} (size: {})".format(len(indices)*3, SZ_INT32))
print(" vertexCount = {} (size: {})".format(len(vertices), SZ_INT32))
print(" materialCount = {} (size: {})".format(len(materials), SZ_INT16))
print(" skinBoneCount = {} (size: {})".format(len(skin_bones), SZ_INT16))
print(" childCount = {} (size: {})".format(len(self.children), SZ_INT16))
print(" indices = stripped (size: {})".format(index_data_size))
print(" vertices = stripped (size: {})".format(vertex_data_size))
print(" materials = stripped (size: {})".format(material_data_size))
print(" skinBones = stripped (size: {})".format(bone_data_size))
print("};")
write_block(file, "OBD", block_size) # header
write_string(file, self.name) # name
file.write(struct.pack("<B", geometry_type)) # geometryType
file.write(struct.pack("<B", vertex_data_flags)) # vertexDataFlags
file.write(struct.pack("<i", len(indices)*3)) # indexCount
file.write(struct.pack("<i", len(vertices))) # vertexCount
file.write(struct.pack("<H", len(materials))) # materialCount
file.write(struct.pack("<H", len(skin_bones))) # skinBoneCount
file.write(struct.pack("<H", len(self.children))) # childCount
# indices
for index in indices:
file.write(struct.pack("<III", *index)) # index
# vertices
for idx in range(len(vertices)):
write_vector3(file, vertices[idx])
if normals:
write_vector3(file, normals[idx])
if uvs:
write_vector2(file, uvs[idx])
if vertex_colors:
write_color4(file, vertex_colors[idx])
for mtl in materials:
write_material(file, mtl) # materials
for bone in skin_bones:
write_skinbone(file, bone) # skinBones
def _write_armature(self, file, options):
"""Write armature to file"""
bone_matrices = []
bone_data_size = 0
for bone in self.bobj.data.bones:
if options['use_rest_pose']:
matrix = bone.matrix_local
if bone.parent:
matrix = bone.parent.matrix_local.inverted() * matrix
else:
pose_bone = self.bobj.pose.bones[bone.name]
matrix = pose_bone.matrix
if pose_bone.parent:
matrix = pose_bone.parent.matrix.inverted() * matrix
bone_data_size += sz_bone(self, bone, matrix)
bone_matrices.append(matrix)
# root bone matrix
matrix = options['global_matrix'] * self.bobj.matrix_local
block_size = SZ_INT16 # boneCount
block_size += sz_string(self.name) # root bone name
block_size += sz_matrix4x4(matrix) # root bone matrix
block_size += SZ_INT16 # root bone parent
block_size += bone_data_size # bones
print("struct BND (size: {}) {{".format(block_size))
print(" boneCount = {} (size: {})".format(len(self.bobj.data.bones)+1, SZ_INT16))
print(" bones = stripped (size: {})".format(bone_data_size))
print("};")
write_block(file, "BND", block_size) # header
file.write(struct.pack("<H", len(self.bobj.data.bones)+1)) # boneCount
# root bone (actually part of bones)
write_string(file, self.name) # name
write_matrix4x4(file, matrix) # transformationMatrix
file.write(struct.pack("<H", 0)) # parent
# bones
for bone, matrix in zip_longest(self.bobj.data.bones, bone_matrices):
def get_parent_idx():
if bone.parent:
idx = 1
for ibn in self.bobj.data.bones:
if ibn == bone.parent:
return idx
idx += 1
return 0
parent = get_parent_idx()
write_string(file, self.name + "_" + bone.name) # name
write_matrix4x4(file, matrix) # transformationMatrix
file.write(struct.pack("<H", parent)) # parent
def write(self, context, file, options):
"""Write object/armature data to file"""
if self.bobj.type == 'ARMATURE':
self._write_armature(file, options)
else:
self._write_object(context, file, options)
for eobj in self.children:
eobj.write(context, file, options)
class GlhckExporter:
def __init__(self):
self.lists = {}
def list_count(self, key):
"""Get count of every object and their children in list"""
def animation_count(lst):
return len(lst)
def object_count(lst):
count = 0
for eobj in lst:
count += object_count(eobj.children) + 1
return count
countmap = {'OBJECT' :object_count,
'ARMATURE' :object_count,
'ANIMATION':animation_count}
return countmap[key](self.lists[key])
def _gather_data(self, context, options):
""" Collect list of exportable objects, bones and animations """
self.lists = {'OBJECT':[], 'ARMATURE':[], 'ANIMATION':[]}
listmap = {'EMPTY':'OBJECT', 'MESH':'OBJECT', 'ARMATURE':'ARMATURE'}
actionmap = {}
if options['use_bones']:
whitelist = ['EMPTY', 'MESH', 'ARMATURE']
else:
whitelist = ['EMPTY', 'MESH']
if options['use_selection']:
# Selected objects only
export_list = list(context.selected_objects)
else:
# What you see, is what you get (check your layers)
export_list = list(context.selectable_objects)
# Add children and missing armatures to export_list
def add_children(bobj):
armatures = [modifier.object for modifier in bobj.modifiers
if modifier.type == 'ARMATURE' and modifier.show_viewport
and modifier.object is not None]
for arm in armatures:
if arm not in export_list:
export_list.append(arm)
for bchd in bobj.children:
if bchd not in export_list:
export_list.append(bchd)
add_children(bchd)
# We mutate this list
for bobj in export_list[:]:
add_children(bobj)
# Should now contain filtered list of all objects or selected objects
# and their children
export_list = [ob for ob in export_list if ob.type in whitelist]
# 1. Check if object or its children/bones contain animation
# 2. Use actionmap to figure out already created animations
# 3. Add object to animation
def animations_from_object(eobj):
def action_contains_object(action, eobj):
for group in action.groups:
if group.name == eobj.name:
return True
if eobj.bobj.type == 'ARMATURE':
for bone in eobj.bobj.data.bones:
if group.name == bone.name:
return True
return False
if not options['use_animations']:
return
if eobj.bobj.animation_data is None:
return
for action in bpy.data.actions:
if action_contains_object(action, eobj):
if action.name in actionmap:
eanm = actionmap[action.name]
else:
eanm = ExportAnimation(action)
actionmap[action.name] = eanm
eanm.add_object(eobj)
# 1. Build reparented children (Cant be parent of armature, vice versa)
# 2. Check if child has animation what we haven't yet added,
# and reference that possible animation
def reparented_children(parent, src_list):
dst_list = []
for bobj in src_list:
if bobj.type not in whitelist:
continue
eobj = ExportObject(bobj)
if parent.bobj.type == 'ARMATURE' and bobj.type != 'ARMATURE':
self.lists['OBJECT'].append(eobj)
elif bobj.type == 'ARMATURE':
self.lists['ARMATURE'].append(eobj)
else:
eobj.parent = parent
dst_list.append(eobj)
eobj.children = reparented_children(eobj, bobj.children)
animations_from_object(eobj)
return dst_list
# 1. Filter export_list to root ExportObjects
# 2. Add them to correct self.lists[] using listmap
# 3. Build reparented children for ExportObjects
# 4. Build list of animations for our selection,
# and reference each object to their own animation
for bobj in export_list:
if bobj.parent not in export_list:
eobj = ExportObject(bobj)
self.lists[listmap[bobj.type]].append(eobj)
eobj.children = reparented_children(eobj, bobj.children)
animations_from_object(eobj)
# List of animations can be now get from actionmap
self.lists['ANIMATION'] = actionmap.values()
# Finally sort our gathered lists
for key in self.lists:
self.lists[key] = sort_by_name_field(self.lists[key])
def write(self, context, filepath, options):
"""Gather and write data from blender scene to file"""
# Store filepath to options
options['filepath'] = filepath
options['copy_set'] = set()
# Exit edit mode before exporting, so current object states are
# exported properly.
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
self._gather_data(context, options)
bnh = self.list_count('ARMATURE')
obh = self.list_count('OBJECT')
anh = self.list_count('ANIMATION')
if bnh == 0 and obh == 0 and anh == 0:
print("Nothing to export")
return
print("---- Armature List ----")
print(self.lists['ARMATURE'])
print("")
print("---- Animation List ----")
print(self.lists['ANIMATION'])
print("")
print("---- Object List ----")
print(self.lists['OBJECT'])
print("")
print("---- Readable Header ----")
print("BNH {}".format(bnh))
print("OBH {}".format(obh))
print("ANH {}".format(anh))
print("")
file = None
if bnh > 0 or obh > 0 or (anh > 0 and not options['split_animations']):
file = open(filepath, 'wb')
file.write(bytes(GLHCKM_HEADER, 'ascii'))
file.write(struct.pack("<BB", *GLHCKM_VERSION))
if bnh > 0:
write_block(file, "BNH", bnh) # header block
if obh > 0:
write_block(file, "OBH", obh) # header block
if not options['split_animations'] and anh > 0:
write_block(file, "ANH", anh) # header block
# Export bones
for eobj in self.lists['ARMATURE']:
eobj.write(context, file, options)
# Export objects
for eobj in self.lists['OBJECT']:
eobj.write(context, file, options)
# Export animations
for eanm in self.lists['ANIMATION']:
if options['split_animations']:
if file is not None:
file.close()
path = os.path.dirname(filepath) + "/" + eanm.name + ".glhcka"
print(path)
file = open(path, 'wb')
file.write(bytes(GLHCKA_HEADER, 'ascii'))
file.write(struct.pack("<BB", *GLHCKM_VERSION))
write_block(file, "ANH", 1) # header block
eanm.write(context, file, options)
file.close()
# Copy all collected files from export
path_reference_copy(options['copy_set'])
##
## GLhck Model Export v0.1
##
## NOTE: This format is not yet considered "final"
## Drastic changes can happen, and maintaining easy backwards
## compatibility thus is not yet a goal.
##
## Planned changes:
## - MAD block for materials
## - Reference to MAD blocks from OBD blocks
## - DBD blocks for duplicate objects
## - EBD blocks for empty objects
## - Optional zlib (.gz) compression
##
## Version history:
## -- 0.1 (Thu Nov 14 00:42:23 UTC 2013)
## First release
##
## Glhck model format is combination of binary and text.
## Text is used to serialize floats and block headers.
##
## Some major points of the format:
## - Everything is little-endian
## - Matrices are in column-major
## - Quaternions are in order x,y,z,w
## - Floats, Vectors, Quaternions, Matrices are packed as strings
## - Strings for names and textures filepaths are UTF-8
##
## Glhck files start with either glhckm or glhcka header text followed by
## version (major, minor) serialized as two uint8_t.
##
## glhckm header is used for the master format and can contain any data
## (BND, OBD, AND blocks).
##
## glhcka header is used for the animation format and should only contain
## animation data (AND blocks).
##
## Version can be used to provide backwards compatibility.
##
## Blocks should be ordered in following order:
## 1. BND blocks
## 2. OBD blocks
## 3. AND blocks
##
## This makes it easier to reference skeleton bones to objects through
## skinbones for importers
##
## The format contains following data blocks (BND, OBD, AND) with structs:
##
## // Arrays which size cant be known without reading data block for each
## // element are marked with <no-known-size> comment.
##
## struct STRING {
## uint8_t len;
## char str[len];
## }
##
## struct LONGSTRING {
## uint16_t len;
## char str[len];
## }
##
## struct MATRIX4x4 {
## LONGSTRING m4x4;
## // Matrix is serialized as (column-major):
## // "%f,%f,%f,%f %f,%f,%f,%f %f,%f,%f,%f %f,%f,%f,%f"
## };
##
## // Actual bone information
## struct BONE {
## STRING name; // never empty, and must be unique!
## MATRIX4x4 transformationMatrix;
## uint16_t parent; // 0 == root bone
## };
##
## // Always starts with the root bone (armature)
## struct BND {
## uint16_t boneCount;
## BONE bones[boneCount]; // <no-known-size>
## };
##
## struct COLOR4 {
## uint8_t r, g, b, a;
## };
##
## struct COLOR3 {
## uint8_t r, g, b;
## };
##
## struct FLOAT {
## STRING flt;
## // Float is serialised as:
## // "%f"
## };
##
## // Bitflags for materialFlags member of MATERIAL struct
## enum {
## LIGHTING = 1<<0,
## };
##
## struct MATERIAL {
## STRING name;
## COLOR3 ambient;
## COLOR4 diffuse;
## COLOR4 specularity;
## uint16_t shininess; // range: 1 - 511
## uint8_t materialFlags;
## LONGSTRING diffuseTexture;
## };
##
## struct WEIGHT {
## uint32_t vertexIndex;
## FLOAT weight;
## };
##
## struct SKINBONE {
## STRING name; // must reference to BONE
## MATRIX4x4 offsetMatrix;
## uint32_t weightCount;
## WEIGHT weights[weightCount]; // <no-known-size>
## };
##
## struct VECTOR3 {
## STRING vec3;
## // Vector is serialized as (x,y,z):
## // "%f,%f,%f"
## };
##
## struct VECTOR2 {
## STRING vec2;
## // Vector is serialized as (x,y):
## // "%f,%f"
## };
##
## struct VERTEXDATA {
## VECTOR3 vertex; // always exists
## VECTOR3 normal; // only if (vertexDataFlags & HAS_NORMALS)
## VECTOR2 uv; // only if (vertexDataFlags & HAS_UV)
## COLOR4 color; // only if (vertexDataFlags & HAS_VERTEX_COLORS)
## };
##
## enum geometryTypeEnum {
## TRIANGLES,
## };
##
## // Bitflags for vertexDataFlags member of OB struct
## enum {
## HAS_NORMALS = 1<<0,
## HAS_UV = 1<<1,
## HAS_VERTEX_COLORS = 1<<2,
## };
##
## // Repeated until there is no children and their children
## struct OBD {
## STRING name; // should not be empty, if used for animation (unique)
## geometryTypeEnum geometryType; // uint8_t
## uint8_t vertexDataFlags;
## int32_t indexCount;
## int32_t vertexCount;
## uint16_t materialCount;
## uint16_t skinBoneCount;
## uint16_t childCount;
## uint32_t indices[indexCount];
## VERTEXDATA vertices[vertexCount]; // <no-known-size>
## MATERIAL materials[materialCount]; // <no-known-size>
## SKINBONE skinBones[skinBoneCount]; // <no-known-size>
## OB children[childCount]; // <no-known-size>
## };
##
## struct QUATERNION {
## STRING quat;
## // Quaternion is serialized as (x,y,z,w):
## // "%f,%f,%f,%f"
## };
##
## struct QUATERNIONKEY {
## uint32_t frame;
## QUATERNION quaternion;
## };
##
## struct VECTORKEY {
## uint32_t frame;
## VECTOR3 vector;
## };
##
## struct NODE {
## STRING name; // must reference to a OBD/BONE
## uint32_t rotationCount;
## uint32_t scalingCount;
## uint32_t translationCount;
## QUATERNIONKEY quaternionKeys[rotationCount]; // <no-known-size>
## VECTORKEY scalingKeys[scalingCount]; // <no-known-size>
## VECTORKEY translationKeys[translationCount]; // <no-known-size>
## };
##
## struct AND {
## STRING name;
## uint32_t nodeCount;
## NODE nodes[nodeCount]; // <no-known-size>
## };
##
## Every data block has header block at top of file:
## BNH<uint32_t> (BND blocks count in file)
## OBH<uint32_t> (OBD blocks count in file)
## ANH<uint32_t> (AND blocks count in file)
##
## The header block doesn't need to be written, if the count of data blocks is 0
##
## Example of glhckm file:
## glhckm<uint8_t:0><uint8_t:1>
## BNH<uint32_t:1>
## OBH<uint32_t:4>
## ANH<uint32_t:0>
## BND<uint32_t:size><...data...>
## OBD<uint32_t:size><...data...>
## OBD<uint32_t:size><...data...>
## OBD<uint32_t:size><...data...>
## OBD<uint32_t:size><...data...>
##
## When importing data, the block sizes make it easier to skip data you don't
## care about.
##
def save(context, filepath,
use_selection=False,
use_animations=True,
bake_animations=False,
split_animations=False,
use_bones=True,
use_rest_pose=True,
use_mesh_modifiers=True,
use_normals=True,
use_uvs=True,
use_vertex_colors=False,
use_materials=True,
global_matrix=None,
path_mode='AUTO'):
print("")
print(":::::::::::::::::::::::::::::")
print(":: GLhck Model Export v0.1 ::")
print(":::::::::::::::::::::::::::::")
print(":: filepath = {}".format(filepath))
print(":: use_selection = {}".format(use_selection))
print(":: use_animations = {}".format(use_animations))
print(":: bake_animations = {}".format(bake_animations))
print(":: split_animations = {}".format(split_animations))
print(":: use_bones = {}".format(use_bones))
print(":: use_rest_pose = {}".format(use_rest_pose))
print(":: use_mesh_modifiers = {}".format(use_mesh_modifiers))
print(":: use_uvs = {}".format(use_uvs))
print(":: use_normals = {}".format(use_normals))
print(":: use_vertex_colors = {}".format(use_vertex_colors))
print(":: use_materials = {}".format(use_materials))
print(":: path_mode = {}".format(path_mode))
print("")
print("{}".format(global_matrix))
print("")
import time
time_start = time.time()
if not global_matrix:
global_matrix = Matrix()
options = {}
for i in ['use_selection', 'use_animations', 'bake_animations',
'split_animations', 'use_bones', 'use_rest_pose',
'use_mesh_modifiers', 'use_normals', 'use_uvs',
'use_vertex_colors', 'use_materials',
'path_mode', 'global_matrix']:
options[i] = locals()[i]
exporter = GlhckExporter()
exporter.write(context, filepath, options)
print("")
print("::::::::::::::::::::::::::::")
print(":: {}".format(filepath))
print(":: Finished in {:.4f}".format(time.time() - time_start))
print("::::::::::::::::::::::::::::")
print("")
return {'FINISHED'}
# vim: set ts=8 sw=4 tw=0 :
|
[
"mailroxas@gmail.com"
] |
mailroxas@gmail.com
|
a6cb063cbc50ca0b9dcd3bb40bca8952037c08a0
|
11b420a9e6dbe371167227f41ef8e344e3382612
|
/ConvNets/active_learning/Acquisition_Functions/BCNN_Maximal_Uncertainty/Variation_Ratio/trial_variation_ratio.py
|
efcf695711ce8e03da6928171e4e1a4aaa424c3f
|
[
"MIT"
] |
permissive
|
tarek-ullah/Active-Learning-Bayesian-Convolutional-Neural-Networks
|
7092386758b68dc922efaa2c2eba055930bf2896
|
f8b68038bd3b97c473e9c1de6b6cdee4538021f4
|
refs/heads/master
| 2021-01-13T06:57:19.343775
| 2016-11-02T12:22:16
| 2016-11-02T12:22:16
| 81,338,773
| 1
| 0
| null | 2017-02-08T14:34:15
| 2017-02-08T14:34:15
| null |
UTF-8
|
Python
| false
| false
| 8,234
|
py
|
from __future__ import print_function
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adadelta, Adagrad, Adam
from keras.utils import np_utils, generic_utils
from six.moves import range
import numpy as np
import scipy as sp
from keras import backend as K
import random
import scipy.io
import matplotlib.pyplot as plt
from keras.regularizers import l2, activity_l2
from scipy.stats import mode
batch_size = 128
nb_classes = 10
nb_epoch = 1
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# the data, shuffled and split between tran and test sets
(X_train_All, y_train_All), (X_test, y_test) = mnist.load_data()
X_train_All = X_train_All.reshape(X_train_All.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
#after 50 iterations with 10 pools - we have 500 pooled points - use validation set outside of this
X_valid = X_train_All[2000:2150, :, :, :]
y_valid = y_train_All[2000:2150]
X_train = X_train_All[0:200, :, :, :]
y_train = y_train_All[0:200]
X_Pool = X_train_All[5000:15000, :, :, :]
y_Pool = y_train_All[5000:15000]
# X_test = X_test[0:2000, :, :, :]
# y_test = y_test[0:2000]
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_valid = X_valid.astype('float32')
X_Pool = X_Pool.astype('float32')
X_train /= 255
X_valid /= 255
X_Pool /= 255
X_test /= 255
Y_test = np_utils.to_categorical(y_test, nb_classes)
Y_valid = np_utils.to_categorical(y_valid, nb_classes)
Y_Pool = np_utils.to_categorical(y_Pool, nb_classes)
score=0
all_accuracy = 0
acquisition_iterations = 1
dropout_iterations = 3
Queries = 10
Pool_Valid_Loss = np.zeros(shape=(nb_epoch, 1)) #row - no.of epochs, col (gets appended) - no of pooling
Pool_Train_Loss = np.zeros(shape=(nb_epoch, 1))
x_pool_All = np.zeros(shape=(1))
Y_train = np_utils.to_categorical(y_train, nb_classes)
print('Training Model Without Acquisitions')
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_valid, Y_valid))
# Train_Result_Optimizer = hist.history
# Train_Loss = np.asarray(Train_Result_Optimizer.get('loss'))
# Train_Loss = np.array([Train_Loss]).T
# Valid_Loss = np.asarray(Train_Result_Optimizer.get('val_loss'))
# Valid_Loss = np.asarray([Valid_Loss]).T
# Pool_Train_Loss = Train_Loss
# Pool_Valid_Loss = Valid_Loss
# print('Evaluating Test Accuracy Without Acquisition')
# score, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
# all_accuracy = acc
print('Starting Active Learning')
for i in range(acquisition_iterations):
print('POOLING ITERATION', i)
All_Dropout_Classes = np.zeros(shape=(X_Pool.shape[0],1))
print('Use trained model for test time dropout')
for d in range(dropout_iterations):
print ('Dropout Iteration', d)
dropout_classes = model.predict_classes(X_Pool,batch_size=batch_size, verbose=1)
dropout_classes = np.array([dropout_classes]).T
np.save('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/active_learning/Acquisition_Functions/BCNN_Maximal_Uncertainty/Variation_Ratio/Dropout_Scores/'+'Dropout_Score_'+str(d)+'.npy',dropout_classes)
All_Dropout_Classes = np.append(All_Dropout_Classes, dropout_classes, axis=1)
Variation = np.zeros(shape=(X_Pool.shape[0]))
for t in range(X_Pool.shape[0]):
L = np.array([0])
for d_iter in range(dropout_iterations):
print('Fill Up')
L = np.append(L, All_Dropout_Classes[t, d_iter+1])
print('Computing Variation Ratio')
Predicted_Class, Mode = mode(L[1:])
v = np.array( [1 - Mode/float(dropout_iterations)])
Variation[t] = v
a_1d = Variation.flatten()
x_pool_index = a_1d.argsort()[-Queries:][::-1]
'''
#store all the pooled images indexes
x_pool_All = np.append(x_pool_All, x_pool_index)
#saving pooled images
for im in range(2):
Image = X_Pool[x_pool_index[im], :, :, :]
img = Image.reshape((28,28))
sp.misc.imsave('/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/active_learning/Acquisition_Functions/BCNN_Maximal_Uncertainty/GPU/Bayes_Segnet/Pooled_Images/'+'Pool_Iter'+str(i)+'_Image_'+str(im)+'.jpg', img)
Pooled_X = X_Pool[x_pool_index, 0:1, 0:28, 0:28]
Pooled_Y = y_Pool[x_pool_index]
delete_std = np.delete(BayesSegnet_Sigma, (x_pool_index), axis=0)
delete_Pool_X = np.delete(X_Pool, (x_pool_index), axis=0)
delete_Pool_Y = np.delete(y_Pool, (x_pool_index), axis=0)
print('Acquised Points added to training set')
X_train = np.concatenate((X_train, Pooled_X), axis=0)
y_train = np.concatenate((y_train, Pooled_Y), axis=0)
print('Train Model with pooled points')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_valid, Y_valid))
Train_Result_Optimizer = hist.history
Train_Loss = np.asarray(Train_Result_Optimizer.get('loss'))
Train_Loss = np.array([Train_Loss]).T
Valid_Loss = np.asarray(Train_Result_Optimizer.get('val_loss'))
Valid_Loss = np.asarray([Valid_Loss]).T
#Accumulate the training and validation/test loss after every pooling iteration - for plotting
Pool_Valid_Loss = np.append(Pool_Valid_Loss, Valid_Loss, axis=1)
Pool_Train_Loss = np.append(Pool_Train_Loss, Train_Loss, axis=1)
print('Evaluate Model Test Accuracy with pooled points')
score, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
print('Test score:', score)
print('Test accuracy:', acc)
all_accuracy = np.append(all_accuracy, acc)
print('Use this trained model with pooled points for Dropout again')
print('Done with Pooling ----- Saving Results')
np.savetxt("Variation Ratio Accuracy Values.csv", all_accuracy, delimiter=",")
np.save(''+'All_Train_Loss'+'.npy', Pool_Train_Loss)
np.save(''+ 'All_Valid_Loss'+'.npy', Pool_Valid_Loss)
np.save(''+'All_Pooled_Image_Index'+'.npy', x_pool_All)
np.save(''+ 'All_Accuracy_Results'+'.npy', all_accuracy)
'''
|
[
"riashat.islam.93@gmail.com"
] |
riashat.islam.93@gmail.com
|
5dcc3330197b0a722daae4604cb2a2df775f3a8a
|
1526f01b31f6970025b30c6f07fb7de5bb45162c
|
/objects.py
|
d30ab972cebcb2934ea6c1b4dcea497742bc77e5
|
[] |
no_license
|
andremene182/ProgrammingLab
|
b768268f06b9cdf03b289e47ba5709202eabe611
|
2daab45f174e9cc21cde0719061a999755002f6b
|
refs/heads/main
| 2023-02-18T09:31:18.972594
| 2021-01-17T19:43:01
| 2021-01-17T19:43:01
| 310,743,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
class Pagina:
def __init__(self, numero, capitolo, testo):
self.numero = numero
self.capitolo = capitolo
self.testo = testo
pagina1 = Pagina(numero=1, capitolo="Ciccio", testo="Ciccio Bello")
print(pagina1.capitolo)
libro = []
class PaginaVuota(Pagina):
#pass = codice "vuoto"
pass
class PaginaDestra(Pagina):
def posizione_numero(self):
return 'destra'
class PaginaSinista(Pagina):
def posizione_numero(self):
return 'sinistra'
paginaDestra = PaginaDestra(numero=1, capitolo="Ciccio", testo="Ciccio Bello")
print(paginaDestra.posizione_numero())
libro.append(paginaDestra)
print(isinstance(paginaDestra,Pagina))
print(libro)
|
[
"replituser@example.com"
] |
replituser@example.com
|
72bc738c1242776943eb2708e5355cd14ad5642c
|
54383b87d0b37da153cf8e03b33bd29a21fd41a9
|
/stockalyzer/twitter/viewsets/ticker.py
|
59f69f251b50399834578eae739371af803ba0ad
|
[] |
no_license
|
webclinic017/stockalyzer
|
ea626b03501ccd44cd7f930c55ea4fb71f370ede
|
cc8952585590965fac3ab3032e9abf052de96d29
|
refs/heads/master
| 2023-07-30T22:50:05.826530
| 2020-06-02T03:33:52
| 2020-06-02T03:33:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
from rest_framework import viewsets
from ..models import Ticker
from ..serializers import TickerSerializer
class TickerViewSet(viewsets.ModelViewSet):
queryset = Ticker.objects.all()
serializer_class = TickerSerializer
|
[
"52392083+UnrealConclusion@users.noreply.github.com"
] |
52392083+UnrealConclusion@users.noreply.github.com
|
617b7fcfbb3ffd362a97f9fcad74bb32df8204c0
|
2fc14d2a8940c3b242f62cecbfe44cd4912e1f60
|
/travelloapp/migrations/0001_initial.py
|
a842549ffe2e55683016cb88a7731f840f5a98d3
|
[] |
no_license
|
yashkuma/mydjangosite
|
d16a2b94791acca165224f259b5ae78390d2dac9
|
9f7712821523259ab641c51f236f4c2d869986c6
|
refs/heads/master
| 2020-12-15T08:28:52.291026
| 2020-01-25T11:21:46
| 2020-01-25T11:21:46
| 235,040,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
# Generated by Django 3.0.2 on 2020-01-19 09:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='destination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('img', models.ImageField(upload_to='pics')),
('desc', models.TextField()),
('price', models.IntegerField()),
('offer', models.BooleanField(default=False)),
],
),
]
|
[
"yashsriavstava05@gmail.com"
] |
yashsriavstava05@gmail.com
|
8fff146cf36fb4c9489dfd115b45aa95b4f9a17e
|
872f24199d847f05ddb4d8f7ac69eaed9336a0d5
|
/gcwrap/python/scripts/tests/test_fluxscale.py
|
1cee186afb0855c93ed9c1f512518faf237ac9e3
|
[] |
no_license
|
schiebel/casa
|
8004f7d63ca037b4579af8a8bbfb4fa08e87ced4
|
e2ced7349036d8fc13d0a65aad9a77b76bfe55d1
|
refs/heads/master
| 2016-09-05T16:20:59.022063
| 2015-08-26T18:46:26
| 2015-08-26T18:46:26
| 41,441,084
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,132
|
py
|
import os
import shutil
import testhelper as th
import numpy as np
from __main__ import default
from tasks import fluxscale
from taskinit import *
import unittest
import exceptions
''' Python unit tests for the fluxscale task
These tests will only verify if the fluxscale
tables created for an MS and an MMS agree. These are
not full unit tests for the fluxscale task.
'''
datapath = os.environ.get('CASAPATH').split()[0] +\
'/data/regression/unittest/fluxscale/'
# Pick up alternative data directory to run tests on MMSs
testmms = False
if os.environ.has_key('TEST_DATADIR'):
DATADIR = str(os.environ.get('TEST_DATADIR'))+'/fluxscale/'
if os.path.isdir(DATADIR):
testmms = True
datapath = DATADIR
else:
print 'WARN: directory '+DATADIR+' does not exist'
print 'fluxscale tests will use data from '+datapath
class fluxscale1_test(unittest.TestCase):
def setUp(self):
# Input names
self.prefix = 'ngc5921'
self.msfile = self.prefix + '.ms'
if testmms:
self.msfile = self.prefix + '.mms'
self.gtable = self.prefix + '.ref1a.gcal'
self.reffile = self.prefix + '.def.fcal'
self.reffile2 = self.prefix + '.def.inc.fcal'
self.tearDown()
fpath = os.path.join(datapath,self.msfile)
if os.path.lexists(fpath):
shutil.copytree(fpath, self.msfile, symlinks=True)
fpath = os.path.join(datapath,self.gtable)
shutil.copytree(fpath, self.gtable, symlinks=True)
fpath = os.path.join(datapath,self.reffile)
shutil.copytree(fpath, self.reffile, symlinks=True)
fpath = os.path.join(datapath,self.reffile2)
shutil.copytree(fpath, self.reffile2, symlinks=True)
else:
self.fail('Data does not exist -> '+fpath)
default('fluxscale')
def tearDown(self):
#pass
shutil.rmtree(self.msfile, ignore_errors=True)
os.system('rm -rf ngc5921*.fcal')
os.system('rm -rf ngc5921*.gcal')
def test_default(self):
'''Fluxscale test 1.1: Create a flux table using field=0 as reference'''
# Input
gtable = self.gtable
# Output
outtable = self.msfile + '.fcal'
thisdict = fluxscale(vis=self.msfile, caltable=gtable, fluxtable=outtable, reference='1331*',
transfer='1445*')
self.assertTrue(os.path.exists(outtable))
# File to compare with
reference = self.reffile
# Compare the calibration table with a reference
self.assertTrue(th.compTables(outtable, reference, ['WEIGHT']))
# compare some determined values returned in the dict
#refdict={'1': {'spidxerr': np.array([ 0., 0., 0.]), 'spidx': np.array([ 0., 0., 0.]), \
# 'fluxdErr': np.array([0.00055571]), \
# 'fieldName': '1445+09900002_0', 'numSol': np.array([54]), \
#'fluxd': np.array([0.16825763])}, \
# flux density seems changed a bit. Updated - 2013.01.29 TT
# for linux on current trunk 22670
# for OSX 10.6 got the previous value
# 'fluxd': np.array([0.16825765])}, \
# 'freq': np.array([1.41266507e+09]), \
# 'spwName': np.array(['none'], dtype='|S5'), \
# 'spwID': np.array([0])}
# new returned dictionary (2013.09.12 TT)
refdict={'1': {'fitRefFreq': 0.0,
'spidxerr': np.array([ 0., 0., 0.]),
'spidx': np.array([ 0., 0., 0.]),
'0': {'fluxdErr': np.array([ 0.00055574, 0. , 0. , 0. ]),
'numSol': np.array([ 54., 0., 0., 0.]),
'fluxd': np.array([ 0.16825768, 0. , 0. , 0. ])},
'fitFluxd': 0.0,
'fieldName': '1445+09900002_0',
'fitFluxdErr': 0.0},
'freq': np.array([ 1.41266507e+09]),
'spwName': np.array(['none'],dtype='|S5'),
'spwID': np.array([0], dtype=np.int32)}
diff_fluxd=abs(refdict['1']['0']['fluxd'][0]-thisdict['1']['0']['fluxd'][0])/refdict['1']['0']['fluxd'][0]
#self.assertTrue(diff_fluxd<1.5e-8)
# increase the tolerance level
self.assertTrue(diff_fluxd<1e-5)
def test_incremental(self):
'''Fluxscale test 1.2: Create an incremental flux table using field=0 as reference'''
# Input
gtable = self.gtable
# Output
outtable = self.msfile + '.inc.fcal'
thisdict = fluxscale(vis=self.msfile, caltable=gtable, fluxtable=outtable, reference='1331*',
transfer='1445*', incremental=True)
self.assertTrue(os.path.exists(outtable))
# File to compare with
reference = self.reffile2
# Compare the calibration table with a reference
self.assertTrue(th.compTables(outtable, reference, ['WEIGHT']))
def test_gainthreshold(self):
'''Fluxscale test 1.3: gainthreshold parameter test'''
# Input
gtable = self.gtable
# Output
outtable = self.msfile + '.thres.fcal'
thisdict = fluxscale(vis=self.msfile, caltable=gtable, fluxtable=outtable, reference='1331*',
transfer='1445*', gainthreshold=0.05,incremental=True)
self.assertTrue(os.path.exists(outtable))
# File to compare with
#reference = self.reffile2
# Compare the calibration table with a reference
#self.assertTrue(th.compTables(outtable, reference, ['WEIGHT']))
def test_antennasel(self):
'''Fluxscale test 1.4: antenna de-selection test'''
# Input
gtable = self.gtable
# Output
outtable = self.msfile + '.antsel.fcal'
thisdict = fluxscale(vis=self.msfile, caltable=gtable, fluxtable=outtable, reference='1331*',
transfer='1445*', antenna='!24',incremental=True)
self.assertTrue(os.path.exists(outtable))
# File to compare with
#reference = self.reffile2
# Compare the calibration table with a reference
#self.assertTrue(th.compTables(outtable, reference, ['WEIGHT']))
def test_antennaselwithtime(self):
'''Fluxscale test 1.5: empy selection case: antenna with time selection test'''
# Input
gtable = self.gtable
# Output
outtable = self.msfile + '.antsel.fcal'
# This time selection deselect all the data for the reference source and would raise an exception.
try:
thisdict = fluxscale(vis=self.msfile, caltable=gtable, fluxtable=outtable, reference='1331*',
transfer='1445*', antenna='!24', timerange='>1995/04/13/09:38:00', incremental=True)
except exceptions.RuntimeError, instance:
print "Expected exception raised:",instance
def test_antennaselwithscan(self):
'''Fluxscale test 1.6: antenna selection with scan selection test'''
# Input
gtable = self.gtable
# Output
outtable = self.msfile + '.antsel.fcal'
thisdict = fluxscale(vis=self.msfile, caltable=gtable, fluxtable=outtable, reference='1331*',
transfer='1445*', antenna='!24', scan='1~5', incremental=True)
self.assertTrue(os.path.exists(outtable))
class fluxscale2_test(unittest.TestCase):
def setUp(self):
# Input names
prefix = 'ngc4826'
self.msfile = prefix + '.ms'
if testmms:
self.msfile = prefix + '.mms'
self.gtable = prefix + '.spw.gcal'
self.reffile = prefix + '.spw.fcal'
fpath = os.path.join(datapath,self.msfile)
if os.path.lexists(fpath):
shutil.copytree(fpath, self.msfile, symlinks=True)
fpath = os.path.join(datapath,self.gtable)
shutil.copytree(fpath, self.gtable, symlinks=True)
fpath = os.path.join(datapath,self.reffile)
shutil.copytree(fpath, self.reffile, symlinks=True)
else:
self.fail('Data does not exist -> '+fpath)
default('fluxscale')
def tearDown(self):
shutil.rmtree(self.msfile, ignore_errors=True)
os.system('rm -rf ngc4826*.gcal')
os.system('rm -rf ngc4826*.fcal')
def test_spws(self):
'''Fluxscale 2: Create a fluxscale table for an MS with many spws'''
# Input
gtable = self.gtable
# Output
outtable = self.msfile + '.fcal'
# torelance for value tests
tol = 1.e-5
thisdict = fluxscale(vis=self.msfile, caltable=gtable, fluxtable=outtable, reference='3C273-F0',
transfer=['1310+323-F0'],refspwmap=[0,0])
self.assertTrue(os.path.exists(outtable))
# File to compare with
reference = self.reffile
# Compare the calibration table with a reference
self.assertTrue(th.compTables(outtable, reference, ['WEIGHT']))
# compare some determined values returned in the dict
#refdict={'1': {'spidxerr': np.array([ 0., 0., 0.]), 'spidx': np.array([ 0., 0., 0.]), \
# 'fluxdErr': np.array([-1. , 0.04080052, -1. , -1. , -1. , -1. ]), \
# 'fieldName': '1310+323-F0', 'numSol': np.array([-1, 8, -1, -1, -1, -1], dtype=np.int32), \
# 'fluxd': np.array([-1. , 1.44578847, -1. , -1. , -1. , -1. ])}, \
# 'freq': np.array([ 1.15138579e+11, 1.15217017e+11, -1.00000000e+00, -1.00000000e+00, -1.00000000e+00, \
# -1.00000000e+00]), 'spwName': np.array(['', '', '', '', '', ''], dtype='|S1'), \
# 'spwID': np.array([0, 1, 2, 3, 4, 5], dtype=np.int32)}
# updated for new returned dictionary format (2013.09.12 TT)
refdict= {'1': {'fitRefFreq': 0.0,
'spidxerr': np.array([ 0., 0., 0.]),
'fitFluxd': 0.0,
'spidx': np.array([ 0., 0., 0.]),
'1': {'fluxdErr': np.array([ 0.04080052, 0. , 0. , 0. ]),
'numSol': np.array([ 8., 0., 0., 0.]),
'fluxd': np.array([ 1.44578847, 0. , 0. , 0. ])},
'0': {'fluxdErr': np.array([-1., -1., -1., -1.]),
'numSol': np.array([-1., -1., -1., -1.]),
'fluxd': np.array([-1., -1., -1., -1.])},
'3': {'fluxdErr': np.array([-1., -1., -1., -1.]),
'numSol': np.array([-1., -1., -1., -1.]),
'fluxd': np.array([-1., -1., -1., -1.])},
'2': {'fluxdErr': np.array([-1., -1., -1., -1.]),
'numSol': np.array([-1., -1., -1., -1.]),
'fluxd': np.array([-1., -1., -1., -1.])},
'5': {'fluxdErr': np.array([-1., -1., -1., -1.]),
'numSol': np.array([-1., -1., -1., -1.]),
'fluxd': np.array([-1., -1., -1., -1.])},
'4': {'fluxdErr': np.array([-1., -1., -1., -1.]),
'numSol': np.array([-1., -1., -1., -1.]),
'fluxd': np.array([-1., -1., -1., -1.])},
'fieldName': '1310+323-F0',
'fitFluxdErr': 0.0},
'freq': np.array([ 1.15138579e+11, 1.15217017e+11, -1.00000000e+00, -1.00000000e+00,
-1.00000000e+00, -1.00000000e+00]),
'spwName': np.array(['', '', '', '', '', ''], dtype='|S1'),
'spwID': np.array([0, 1, 2, 3, 4, 5], dtype=np.int32)}
diff_fluxd=abs(refdict['1']['1']['fluxd'][0]-thisdict['1']['1']['fluxd'][0])/refdict['1']['1']['fluxd'][0]
self.assertTrue(diff_fluxd<tol)
class fluxscale3_test(unittest.TestCase):
def setUp(self):
# Input names
# ngc2403.tutorial.ms reduced in size
prefix = 'n2403.short'
self.prefix = prefix
self.msfile = prefix + '.ms'
if testmms:
self.msfile = prefix + '.mms'
self.gtable = prefix + '.flagFld1.gcal'
self.reffile = prefix + '.flagFld1.ref.fcal'
self.gtable2 = prefix + '.flagPartFld3.gcal'
self.reffile2 = prefix + '.flagPartFld3.ref.fcal'
fpath = os.path.join(datapath,self.msfile)
if os.path.lexists(fpath):
shutil.copytree(fpath, self.msfile, symlinks=True)
fpath = os.path.join(datapath,self.gtable)
shutil.copytree(fpath, self.gtable, symlinks=True)
fpath = os.path.join(datapath,self.reffile)
shutil.copytree(fpath, self.reffile, symlinks=True)
fpath = os.path.join(datapath,self.gtable2)
shutil.copytree(fpath, self.gtable2, symlinks=True)
fpath = os.path.join(datapath,self.reffile2)
shutil.copytree(fpath, self.reffile2, symlinks=True)
else:
self.fail('Data does not exist -> '+fpath)
default('fluxscale')
def tearDown(self):
shutil.rmtree(self.msfile, ignore_errors=True)
os.system('rm -rf n2403*.gcal')
os.system('rm -rf n2403*.fcal')
def test_flaggedref1(self):
'''Fluxscale test3: Ref field 1 in caltable is all flagged'''
# Input
gtable = self.gtable
# Output
outtable = self.prefix + '.flagFld1.fcal'
# torelance for value test
tol = 1.e-5
thisdict = fluxscale(vis=self.msfile, caltable=gtable, fluxtable=outtable, reference='1,3,4',
transfer='2')
self.assertTrue(os.path.exists(outtable))
# File to compare with
reference = self.reffile
# Compare the calibration table with a reference
self.assertTrue(th.compTables(outtable, reference, ['WEIGHT']))
# compare some determined values returned in the dict (tested on RHEL6)
refdict={'freq': np.array([ 1.41825202e+09]),
'2': {'fitRefFreq': 0.0,
'spidxerr': np.array([ 0., 0., 0.]),
'spidx': np.array([ 0., 0., 0.]),
'0': {'fluxdErr': np.array([ 0.00189683, 0., 0., 0.]),
'numSol': np.array([ 54., 0., 0., 0.]),
'fluxd': np.array([ 3.20832802, 0., 0., 0.])},
'fitFluxd': 0.0, 'fieldName': '0841+708', 'fitFluxdErr': 0.0},
'spwName': np.array(['127*24.4 kHz channels @ 1.42 GHz (BARY)'],dtype='|S40'),
'spwID': np.array([0], dtype=np.int32)}
diff_fluxd=abs(refdict['2']['0']['fluxd'][0]-thisdict['2']['0']['fluxd'][0])/refdict['2']['0']['fluxd'][0]
self.assertTrue(diff_fluxd<tol)
def test_flaggedref2(self):
'''Fluxscale test3: Ref field 3 in caltable is partially flagged'''
# Input
gtable = self.gtable2
# Output
outtable = self.prefix + '.flagPartFld3.fcal'
# torelance for value test
tol = 1.e-5
thisdict = fluxscale(vis=self.msfile, caltable=gtable, fluxtable=outtable, reference='1,3,4',
transfer='2')
self.assertTrue(os.path.exists(outtable))
# File to compare with
reference = self.reffile2
# Compare the calibration table with a reference
self.assertTrue(th.compTables(outtable, reference, ['WEIGHT']))
# compare some determined values returned in the dict (tested on RHEL6)
refdict={'freq': np.array([ 1.41825202e+09]),
'2': {'fitRefFreq': 0.0, 'spidxerr': np.array([ 0., 0., 0.]),
'spidx': np.array([ 0., 0., 0.]),
'0': {'fluxdErr': np.array([ 0.0022236, 0., 0., 0.]),
'numSol': np.array([ 54., 0., 0., 0.]),
'fluxd': np.array([ 3.19455455, 0., 0., 0.])},
'fitFluxd': 0.0, 'fieldName': '0841+708', 'fitFluxdErr': 0.0},
'spwName': np.array(['127*24.4 kHz channels @ 1.42 GHz (BARY)'], dtype='|S40'),
'spwID': np.array([0], dtype=np.int32)}
diff_fluxd=abs(refdict['2']['0']['fluxd'][0]-thisdict['2']['0']['fluxd'][0])/refdict['2']['0']['fluxd'][0]
self.assertTrue(diff_fluxd<tol)
def suite():
return [fluxscale1_test, fluxscale2_test, fluxscale3_test]
|
[
"darrell@schiebel.us"
] |
darrell@schiebel.us
|
891926bf78ef016c3894b110c6f83115b85d83e0
|
3cc04ab9312defd4ac6446083a8242dd02bfa29f
|
/auctions/admin.py
|
f2b3410565efd9b537e8ec6412c5cb90382570ef
|
[] |
no_license
|
Aairah-iiitd/commerce
|
ccc883281cbfd66e0c757c38cab4427a36d6305d
|
f56e9a4a3dd678d76baa610b0fb72d88e18dd62f
|
refs/heads/master
| 2022-12-10T06:58:28.229339
| 2020-09-09T14:37:34
| 2020-09-09T14:37:34
| 294,139,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(User)
admin.site.register(Listing)
admin.site.register(Bid)
admin.site.register(Comment)
admin.site.register(Category)
admin.site.register(Watchlist)
|
[
"aairah19003@iiitd.ac.in"
] |
aairah19003@iiitd.ac.in
|
fd260fb87845a2aba848eb47e9c48d96a23f5186
|
3eeceb468cecedf598e758139a79a08caa1dac86
|
/menufuncts.py
|
3b28260b96051cb7fa21f023b82b8738203d081c
|
[] |
no_license
|
emragins/aui-phone-directory
|
6d12f3d3c455d6a1fb4c448dd4038907035dd728
|
05bfb3fb5f6c0eeeeea1b2b130bd5eed4ae01e62
|
refs/heads/master
| 2020-05-17T12:32:08.535836
| 2014-08-16T01:42:51
| 2014-08-16T01:42:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
'''
THESE WILL NOT WORK RIGHT
EXIT, however, works.
'''
import wx
import manager
def OnOpen(e):
parent = e.GetEventObject()
value = None
dlg = wx.FileDialog(parent, "Choose a file", parent.dirname, "", "*.*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename=dlg.GetFilename()
dirname=dlg.GetDirectory()
value = parent.shelf.read(filename)
parent.filename = filename
parent.dirname = dirname
dlg.Destroy()
return value
def OnSave(e):
parent = e.GetEventObject()
parent.Save()
def OnSaveAs(e):
parent = e.GetEventObject()
dlg = wx.FileDialog(parent, "Save as...", parent.dirname, "", "*.*", wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
parent.filename=dlg.GetFilename()
parent.dirname=dlg.GetDirectory()
parent.shelf.ChangeFiles(parent.filename)
parent.shelf.write(parent.info)
dlg.Destroy()
def OnExit(e):
OnSave(e)
parent = e.GetEventObject()
parent.Close(True) # Close the frame.
#------------
def OnSort(e):
parent = e.GetEventObject()
print 'lo'
info = manager.GetInfo()
info.Sort()
manager.SetInfo(info)
parent.UpdateAll()
|
[
"emragins@gmail.com"
] |
emragins@gmail.com
|
1fb69af258c774d30aa4281624d9eb7c87a2454f
|
89a90707983bdd1ae253f7c59cd4b7543c9eda7e
|
/programming_python/Dstruct/OldIntro/graph.py
|
664d9a73795568c14155b1f41928a37388d75057
|
[] |
no_license
|
timothyshull/python_reference_code
|
692a7c29608cadfd46a6cc409a000023e95b9458
|
f3e2205dd070fd3210316f5f470d371950945028
|
refs/heads/master
| 2021-01-22T20:44:07.018811
| 2017-03-17T19:17:22
| 2017-03-17T19:17:22
| 85,346,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
class Graph:
def __init__(self, label):
self.name = label
self.arcs = []
def __repr__(self):
return self.name
def search(self, goal):
Graph.solns = []
self.generate([self], goal)
Graph.solns.sort(lambda x, y: cmp(len(x), len(y)))
return Graph.solns
def generate(self, path, goal):
if self == goal:
self.solns.append(path)
else:
for arc in self.arcs:
if arc not in path:
arc.generate(path + [arc], goal)
if __name__ == '__main__':
S = Graph('s')
P = Graph('p')
A = Graph('a') # make nodes
M = Graph('m')
S.arcs = [P, M] # S leads to P and M
P.arcs = [S, M, A] # arcs: embedded objects
A.arcs = [M]
print
S.search(M) # find paths from S to M
|
[
"timothyshull@gmail.com"
] |
timothyshull@gmail.com
|
fea6958d1b0ade9e32fc89a6eeb59c733ab95515
|
af84e2773f7e3d2bcf3cad922680e90e9335a9f3
|
/DocTrace_v3/DocTrace_v3/compare1.py
|
e732223b8f630c004f5f7051cf23971fc32ae4fb
|
[] |
no_license
|
Durant21/docTrace_v3_test
|
2e1f52e2e855a4a0ece577ed3bfa36b9b9e2f854
|
2ab789530d901452dcd85810c82dc56e3bd29f11
|
refs/heads/master
| 2020-08-05T06:14:08.730319
| 2019-10-02T19:38:33
| 2019-10-02T19:38:33
| 212,425,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,502
|
py
|
#!/usr/bin/env python3
""" Command line interface to difflib.py providing diffs in four formats:
* ndiff: lists every line and highlights interline changes.
* context: highlights clusters of changes in a before/after format.
* unified: highlights clusters of changes in an inline format.
* html: generates side by side comparison with change highlights.
"""
import sys, os, difflib, argparse
from datetime import datetime, timezone, date
def file_mtime(path):
t = datetime.fromtimestamp(os.stat(path).st_mtime,
timezone.utc)
return t.astimezone().isoformat()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', action='store_true', default=False,
help='Produce a context format diff (default)')
parser.add_argument('-u', action='store_true', default=False,
help='Produce a unified format diff')
parser.add_argument('-m', action='store_true', default=True,
help='Produce HTML side by side diff '
'(can use -c and -l in conjunction)')
parser.add_argument('-n', action='store_true', default=False,
help='Produce a ndiff format diff')
parser.add_argument('-l', '--lines', type=int, default=3,
help='Set number of context lines (default 3)')
# parser.add_argument('fromfile')
# parser.add_argument('tofile')
options = parser.parse_args()
n = options.lines
fromfile = "fromfile" # options.fromfile
tofile = "tofile" # options.tofile
fromdate = str(date(year = 2018, month = 7, day = 12)) # file_mtime(fromfile)
todate = str(date(year = 2018, month = 7, day = 12)) # file_mtime(tofile)
with open(fromfile) as ff:
fromlines = ff.readlines()
with open(tofile) as tf:
tolines = tf.readlines()
if options.u:
diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
elif options.n:
diff = difflib.ndiff(fromlines, tolines)
elif options.m:
diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile,context=options.c,numlines=n)
outfile = open( 'difftest.html', 'w' )
outfile.write(diff)
outfile.close()
else:
diff = difflib.context_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
sys.stdout.writelines(diff)
if __name__ == '__main__':
main()
|
[
"Dante.Fernandez@RESPEC.com"
] |
Dante.Fernandez@RESPEC.com
|
c467d043f39c9bba2a797c175826b9828869eee5
|
726edece32d8c4b71d7f3fb41ab3671d0b5c1268
|
/simplesql/bin/pyflakes
|
f3466cef39b36cb79e87a066be43e1d8e2b82bd7
|
[
"MIT"
] |
permissive
|
Adebayo2016/DynamicResourceGCP
|
b6fe8e32861e6bfb7d934321c207843ae921d1d3
|
42e38615ca4bfd2dae17781f533663c40b250572
|
refs/heads/main
| 2023-07-18T11:15:10.050297
| 2021-08-25T22:22:18
| 2021-08-25T22:22:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
#!/Users/user/Desktop/sqlproject/simplesql/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pyflakes.api import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"adewaleadebayo630@gmail.com"
] |
adewaleadebayo630@gmail.com
|
|
10920a1bf7052f37820165e8fbb2f2bc7b6b4a0e
|
6acfa9e789d932b9162b3e173e24e1ac8b3c707b
|
/timetable.py
|
10051d817a2786bf578158a652a24ba350ba4f20
|
[] |
no_license
|
Hariomagr/VIT-DATA-Scraper
|
b5ebbd7ce8846a4b0246329267c51f2b0901971e
|
e475e4d6370834988c09206b6590a659b316e261
|
refs/heads/master
| 2020-03-22T23:48:33.027877
| 2018-10-10T07:01:36
| 2018-10-10T07:01:36
| 140,829,772
| 2
| 1
| null | 2018-10-10T07:01:37
| 2018-07-13T09:52:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
import index
import re
import json
from bs4 import BeautifulSoup
import warnings
warnings.filterwarnings('ignore', 'Unverified HTTPS request')
if(index.error==""):
url="https://vtopbeta.vit.ac.in/vtop/processViewTimeTable"
data={'semesterSubId':indeex.semester}
url=index.s.post(url,data=data,headers=index.headers,verify=False)
soup=BeautifulSoup(url.content,'html.parser')
table=soup.find_all('table')[0]
length=len(table.find_all('tr'))
timetable1={}
timetable=[]
for i in range(2,length-2):
course={}
tr=table.find_all('tr')[i]
td=tr.find_all('td')
course['code']=td[2].find_all('p')[0].text
course['title']=td[3].find_all('p')[0].text
course['type']=td[4].find_all('p')[0].text
course['credit']=td[5].find_all('p')[0].text
course['class_no']=td[7].find_all('p')[0].text
course['slot']=td[8].find_all('p')[0].text
course['venue']=td[9].find_all('p')[0].text
course['faculty']=td[10].find_all('p')[0].text
timetable.append(course)
timetable1['Timetable']=timetable
timetable1=json.dumps(timetable1)
timetable1=json.loads(timetable1)
with open('timetable.json','w') as outfile:
json.dump(timetable1,outfile)
|
[
"33291061+Hariomagr@users.noreply.github.com"
] |
33291061+Hariomagr@users.noreply.github.com
|
651a56387f752839ab1652841288259a7d5ef429
|
0562eb8b24c3b3dabdb87818cb3fe8b06d3f9502
|
/pornhub/items.py
|
94a5ced776c6571821e9ff6244a70a7e96dcbed6
|
[] |
no_license
|
danmeiyihen/pornhub
|
24a4cd87eeaddc92791e8a7f3d0cf70dc26b995d
|
b875a7b87e49659ada07ed18d2796d7189b4cf52
|
refs/heads/master
| 2022-12-29T09:43:46.754001
| 2020-10-11T10:39:52
| 2020-10-11T10:39:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class WebmItem(scrapy.Item):
url = scrapy.Field()
filename = scrapy.Field()
key = scrapy.Field()
title = scrapy.Field()
class Mp4Item(scrapy.Item):
url = scrapy.Field()
filename = scrapy.Field()
key = scrapy.Field()
title = scrapy.Field()
categories = scrapy.Field()
uploader = scrapy.Field()
pornstars = scrapy.Field()
productions = scrapy.Field()
tags = scrapy.Field()
|
[
"adultfree@qq.com"
] |
adultfree@qq.com
|
fc72eb8d3d2bd8bd7531d260fa73e1825386e41a
|
f2debc551cad02c8cd4d475fd61ddd5e87b94628
|
/React_python/section5/01.py
|
57acd1e0bfc5c92af96c071f26cd31ad8377beee
|
[] |
no_license
|
JHadley1406/udemy_coursework
|
36827d1a2f34d1b1538b7136d442a7ce15e18364
|
4da641311e1dad6e0e99a1576270f831f157cecd
|
refs/heads/master
| 2020-07-05T11:46:22.327126
| 2019-08-16T02:02:29
| 2019-08-16T02:02:29
| 202,640,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
import unittest
from rx.testing import TestScheduler, ReactiveTest
from rx import Observable
from rx.subjects import Subject
class TestRx(unittest.TestCase):
def test_interval(self):
scheduler = TestScheduler()
interval_time = 300
def create():
return Observable.interval(interval_time, scheduler)
subscribed = 300
disposed = 1400
results = scheduler.start(create, created=1, subscribed=subscribed, disposed=disposed)
print(results.messages)
assert results.messages == [
ReactiveTest.on_next(600, 0),
ReactiveTest.on_next(900, 1),
ReactiveTest.on_next(1200, 2),
]
def test_custom_subject(self):
scheduler = TestScheduler()
self.mouse_click_stream = None
self.click_count = 0
def create(scheduler, state):
self.mouse_click_stream = Subject()
def click(scheduler, state):
self.mouse_click_stream.on_next('clicked')
def subscribe(scheduler, state):
def update(i):
self.click_count += 1
self.mouse_click_stream.subscribe(update)
scheduler.schedule_absolute(1, create)
scheduler.schedule_absolute(2, click)
scheduler.schedule_absolute(3, subscribe)
scheduler.schedule_absolute(4, click)
scheduler.schedule_absolute(5, click)
results = scheduler.start()
print(results.messages)
assert self.click_count == 2
if __name__ == '__main__':
unittest.main()
|
[
"josiah.hadley@gmail.com"
] |
josiah.hadley@gmail.com
|
d1b9737e56a9e1f1030c73d7a5a44687fd579c5d
|
e1e59984f535a324aa5a0d416efa7c2e6a63d68c
|
/bastion/compute/vm/base.py
|
ad56a6f757a584c89683b6c0b76a75d2a247d34f
|
[
"Apache-2.0"
] |
permissive
|
laureanok/bas7ion
|
8cc85120537929f2b4fec7daaabb2bb72402a05a
|
4cab1f5830e88beb208c4dfd564bf03eab1c2e8f
|
refs/heads/master
| 2020-03-14T08:03:34.320308
| 2018-04-20T20:45:40
| 2018-04-20T20:45:40
| 131,516,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
# Clases base
class VirtualMachine:
def __init__(self, id, name, size, image, compute, primary_network_interface=None, subnet=None):
self.id = id
self.name = name
self.size = size
self.image = image
self.compute = compute
self.public_ips = []
self.private_ips = []
self.primary_network_interface = primary_network_interface
self.subnet = subnet
def start(self):
raise NotImplementedError(
'start not implemented for this driver')
def stop(self):
raise NotImplementedError(
'stop not implemented for this driver')
def restart(self):
raise NotImplementedError(
'restart not implemented for this driver')
def attach_nic(self, nic):
raise NotImplementedError (
'attach_nic not implemented for this driver')
def provision(self, playbook_path, additional_options=[], parameters=None, user=None):
raise NotImplementedError(
'provision not implemented for this driver')
def delete(self):
raise NotImplementedError(
'delete not implemented for this driver')
def add_hostname(self, hostname, domain=None):
raise NotImplementedError(
'add_hostname not implemented for this driver')
|
[
"locokluver@gmail.com"
] |
locokluver@gmail.com
|
b91ffe4a68c3964c38ba62c0c390f8833054cd1b
|
2b682a01d19960e2039e2e064a742289b30da62c
|
/test/SConsArguments/UserManual/sconstest-arguments-usermanual-example_8.py
|
1cce4717527a0d9fa8ff92be0cc930a6ea64ca86
|
[
"MIT"
] |
permissive
|
mcqueen256/scons-arguments
|
952a427977c42161802225464e99bfeb4e5e9fd5
|
f4b783fc79fe3fc16e8d0f58308099a67752d299
|
refs/heads/master
| 2021-01-01T16:11:53.403454
| 2017-02-15T19:46:28
| 2017-02-15T19:46:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,170
|
py
|
#
# Copyright (c) 2012-2017 by Pawel Tomulik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
__docformat__ = "restructuredText"
"""
User manual for scons-arguments: Example 8
This is copy-pasted Example 8 from user manual (HTML version)
"""
import TestSCons
##############################################################################
#
##############################################################################
test = TestSCons.TestSCons()
test.dir_fixture('../../../SConsArguments', 'site_scons/SConsArguments')
test.write('SConstruct',
"""
from SConsArguments import DeclareArguments
decls = DeclareArguments(
foo = { 'env_key': 'ENV_FOO', 'var_key' : 'var_foo', 'opt_key' : 'opt_foo',
'option' : '--foo', 'default' : 'Default FOO',
'help' : 'foo help' },
bar = { 'env_key': 'ENV_BAR', 'var_key' : 'var_bar', 'opt_key' : 'opt_bar',
'option' : '--bar', 'default' : 'Default VAR', 'type' : 'string',
'help' : 'bar help' }
)
""")
test.run(arguments = ['-Q'])
test.pass_test()
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
|
[
"ptomulik@meil.pw.edu.pl"
] |
ptomulik@meil.pw.edu.pl
|
01269b55904d518e01037bdb411e333e58eb02c2
|
de2d70dfc457554cde2e8fde46f6717a40be3f68
|
/movieratings/lensview/migrations/0002_auto_20160505_0130.py
|
412deb03ddf2eb930cbb699c040e54896577e6ca
|
[] |
no_license
|
vamsden/django-movies-2
|
bc2982f4bc8441a71d81271ddcbe53bb931b2e09
|
ff05fe54c076a2fd33ad68d06f37e04a81da016b
|
refs/heads/master
| 2021-06-07T13:24:28.089015
| 2016-05-16T01:09:19
| 2016-05-16T01:09:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,371
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-05 01:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lensview', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='rater',
name='gender',
field=models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1),
),
migrations.AlterField(
model_name='rater',
name='occupation',
field=models.CharField(choices=[(0, 'Other'), (1, 'Academic/Educator'), (2, 'Artist'), (3, 'Clerical/Admin'), (4, 'College/Grad Student'), (5, 'Customer Service'), (6, 'Doctor/Health Care'), (7, 'Executive/Managerial'), (8, 'Farmer'), (9, 'Homemaker'), (10, 'K-12 Student'), (11, 'Lawyer'), (12, 'Programmer'), (13, 'Retired'), (14, 'Sales/Marketing'), (15, 'Scientist'), (16, 'Self-employed'), (17, 'Technician/Engineer'), (18, 'Tradesman/Craftsman'), (19, 'Unemployed'), (20, 'Writer')], max_length=50),
),
migrations.AlterField(
model_name='rater',
name='zipcode',
field=models.CharField(choices=[(1, 'Under 18'), (18, '18-24'), (25, '25-34'), (35, '35-44'), (45, '45-49'), (50, '50-55'), (56, '56+')], max_length=10),
),
]
|
[
"kross.jonathan+github@gmail.com"
] |
kross.jonathan+github@gmail.com
|
8f2d9fd868fec94161a6f765e8c7b5ea7f7220ab
|
f159aeec3408fe36a9376c50ebb42a9174d89959
|
/26.Remove-Duplicates-from-Sorted-Array.py
|
559bb0b49b3d162fc130d94c13f8768157617751
|
[
"MIT"
] |
permissive
|
mickey0524/leetcode
|
83b2d11ab226fad5da7198bb37eeedcd8d17635a
|
fc5b1744af7be93f4dd01d6ad58d2bd12f7ed33f
|
refs/heads/master
| 2023-09-04T00:01:13.138858
| 2023-08-27T07:43:53
| 2023-08-27T07:43:53
| 140,945,128
| 27
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
# https://leetcode.com/problems/remove-duplicates-from-sorted-array/description/
#
# algorithms
# Easy (38.18%)
# Total Accepted: 458.2K
# Total Submissions: 1.2M
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
hash_map = {}
idx = 0
for n in nums:
if n not in hash_map:
hash_map[n] = True
nums[idx] = n
idx += 1
return idx
|
[
"buptbh@163.com"
] |
buptbh@163.com
|
23c6187e887e014e44b469f54bcc6ee61a7c1b59
|
2f444fd5cbe959d5c2f54db62fe036b8632d63e1
|
/features/steps/edit_name_steps.py
|
a832c1b47faa7dc10a7f3c7d754de5df76f59372
|
[] |
no_license
|
2678201791/WEB_01
|
d50c915674ef21e496ad6b6fd92ef517f233a47f
|
c02218350e9b08283c299a7acd7edfeabef6c002
|
refs/heads/master
| 2023-04-27T06:37:35.575280
| 2021-05-20T12:56:56
| 2021-05-20T12:56:56
| 369,201,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,863
|
py
|
from behave import given, when, then
from selenium import webdriver
from time import sleep
@given('(修改用户名)登录账号 {user}、密码 {password} 和新名字 {new_name}') # 对应步骤 Given 关键词 behave, 参数放在{}中
def step_impl(context, user,password,new_name): # context是上下文对象,有参数的话,加上对应参数
context.user = user # 将参数绑定上下文对象,以便其他步骤使用
context.password = password
context.new_name=new_name
@when('(修改用户名)进入我的用户信息界面')
def step_impl(context):
context.driver = webdriver.Firefox() # 同样绑定上下文对象
context.driver.get('http://localhost:8080/SpringMVCMybatis/loginJsp')
sleep(0.5)
context.driver.find_element_by_xpath('/html/body/form/input[1]').send_keys(context.user)
context.driver.find_element_by_xpath('/html/body/form/input[2]').send_keys(context.password)
context.driver.find_element_by_xpath('/html/body/form/input[3]').click()
sleep(0.5)
context.driver.find_element_by_xpath('//*[@id="indeColuDiv"]/a[2]').click()
sleep(0.5)
@then('(修改用户名)修改用户名')
def step_impl(context):
context.driver.find_element_by_xpath('//*[@id="modiButt"]/input[3]').click()
sleep(0.5)
context.driver.find_element_by_xpath('//*[@id="userForm"]/input[1]').clear()
context.driver.find_element_by_xpath('//*[@id="userForm"]/input[1]').send_keys(context.new_name)
context.driver.find_element_by_xpath('//*[@id="userForm"]/input[2]').click()
sleep(1)
@then('(修改用户名)检查用户名是否真的修改')
def step_impl(context):
name = '名称 '+context.new_name
assert name in context.driver.page_source
@then('(修改用户名)关闭网页')
def step_impl(context):
context.driver.quit()
|
[
"1592256093@qq.com"
] |
1592256093@qq.com
|
2d50b76a94b81eb63a3e1bb14c08395998f192ab
|
c9ffbf4d2f639b18f89c287a79879d77a1592ccd
|
/labs60.py
|
5b813715d6e9cbb39691f49491d9959403697a33
|
[] |
no_license
|
mummyy/14royalmechd12018
|
34aedb5f05d03d5f0133c01597b652d51e2a76db
|
eb56ac34e299876d7f84acda3bcc55017a70976a
|
refs/heads/master
| 2020-03-27T21:54:58.062568
| 2018-12-07T07:56:33
| 2018-12-07T07:56:33
| 147,186,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 787
|
py
|
a=['1','2','3','4','5','6','7','8','9']
def print_Board():
print(a[0],'|',a[1],'|',a[2])
print('----------')
print(a[3],'|',a[4],'|',a[5])
print('----------')
print(a[6],'|',a[7],'|',a[8])
playerOneTurn = True
while True:
print_Board()
p=input("choose an available place:")
if(p in a):
if(a[int(p)-1]=='X' or a[int(p)-1]=='O'):
print("Place taken, choose another place...")
continue
else:
if playerOneTurn:
a[int(p)-1] ='X'
playerOneTurn = not playerOneTurn
else:
a[int(p)-1] = 'O'
playerOneTurn = not playerOneTurn
for i in(0,3,6):
if(a[i]==a[i+1] and a[i]==a[i+2]):
print("Game Over");
exit()
for i in range(3):
if(a[i]==a[i+3] and a[i]==a[i+6]):
print("Game Over...")
exit()
else:
continue
|
[
"noreply@github.com"
] |
mummyy.noreply@github.com
|
fcc7e7268b04893fa2336b169bbf1b0d9b9bcf63
|
e18a22cda2b1f0e659c620cb87f8ad25fad52c41
|
/tests/websocket_test.py
|
ea0bc189ef5bbe67a0ed2a4bd2ca6b86a6bd24d9
|
[
"MIT"
] |
permissive
|
robin-io/robin.io-py
|
b584c523e24c280a263a7c44e08c10798c62628b
|
5c09523f0968714b8f0d0074126b21355a5cba67
|
refs/heads/main
| 2023-08-27T07:15:14.241639
| 2021-11-06T20:41:40
| 2021-11-06T20:41:40
| 389,207,073
| 1
| 0
| null | 2021-11-06T20:41:41
| 2021-07-24T21:40:31
|
Python
|
UTF-8
|
Python
| false
| false
| 612
|
py
|
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import unittest
from robin import Robin
robin_test = Robin("NT-QuNtKolpzoWLahimkIjGAllEcJwGrymaVxQX", True)
class BaseCase(unittest.TestCase):
def test_connect_success(self):
robin_test.connect(user_token="IZiawwHPpHeE")
chann = robin_test.create_channel("test")
subscription = robin_test.subscribe(chann)
print(subscription)
message = robin_test.send_message({"msg": "hello"}, chann)
print(message)
if __name__ == '__main__':
unittest.main()
|
[
"olufekosamuel@gmail.com"
] |
olufekosamuel@gmail.com
|
8a2551f9303c7bf7ee35bf83b055d6346882ac83
|
894e31222c54f21d8ae08b560f56a36e9a925e6e
|
/perm_comb.py
|
aa8dcc051d2fd7520cd12984eab99cb943d17a45
|
[] |
no_license
|
alx1056/permuation-combinatoric
|
d836bb0c2866ecaa2902a906251b038f5b0fe9a8
|
8feb3594ccfe9623b211767f65766fdf9af8708e
|
refs/heads/master
| 2020-03-27T04:06:20.903553
| 2018-08-23T22:08:19
| 2018-08-23T22:08:19
| 145,911,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
import math
def permutation(n,r):
perm = (math.factorial(n)/(math.factorial(n-r)))
return perm
def combinatoric(n,r):
comb = (math.factorial(n)/(math.factorial(n-r))*(math.factorial(r)))
return comb
ans = input("Would you like to try a combinatoric (c) or permutation (p) ?")
if ans == "P" or "p":
n = input("what is your n? ")
r = input("What is your r? ")
print("Your permutation is: " + str(permutation(int(n), int(r))))
else:
n = input("what is your n? ")
r = input("What is your r? ")
print("Your combinatoric is: " + str(combinatoric(int(n), int(r))))
|
[
"noreply@github.com"
] |
alx1056.noreply@github.com
|
2701ac289f95407695e869f6884bbbb02718b657
|
064e8f310d55b29e75a870a3c196bd6e3e06edbd
|
/ReadWrite.py
|
ffa9ea7e408e0cf752acb8ac88e142ebe8b9874f
|
[] |
no_license
|
jsdosanj/Learning-Python-1.0
|
800a4dbd258bdca01a01bdd1ec715b76d18193b5
|
99d94e185c9ad11f588adb437b07a9066dbe7a0c
|
refs/heads/master
| 2021-02-03T21:18:48.851845
| 2020-02-27T14:35:09
| 2020-02-27T14:35:09
| 243,541,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
FileName = "./Files/File1.txt"
Contents_str = ""
# FileWrite = open(FileName, "a") # a -> append
FileWrite = open(FileName, "w") # w -> write
FileWrite.write("Line1\n")
FileWrite.write("\n")
FileWrite.write("\n")
FileWrite.write("Line2\n")
FileWrite.write("\n")
FileWrite.write("\n")
FileWrite.close()
FileRead = open(FileName, "r") # r -> read
Contents_str = FileRead.read()
FileRead.close()
print(Contents_str)
print("There are " + str(len(Contents_str)) + " characters in " + FileName + ".")
Lines = Contents_str.split("\n")
for Line in Lines:
if len(Line) > 0:
print(Line)
print("\n###############\n")
FileRead = open("./Files/File1.txt", "r")
Line = FileRead.readline();
Line = Line[0:len(Line) - 1] # Get rid of "\n"
print(Line)
Line = FileRead.readline();
Line = Line[0:len(Line) - 1] # Get rid of "\n"
print(Line)
FileRead.close()
print("\n###############\n")
FileRead = open(FileName, "r")
Line = FileRead.readline()
while(Line):
if len(Line) > 1: # If the file is not empty ("\n")
Line = Line[0:len(Line) - 1] # Get rid of the trailing "\n"
print(Line)
Line = FileRead.readline()
FileRead.close()
print("\n###############\n")
FileRead = open(FileName, "r")
Lines = FileRead.readlines()
for Line in Lines:
if len(Line) > 1:
Line = Line[0:len(Line) - 1]
print(Line)
FileRead.close()
print("Therer are " + str(len(Lines)) + " lines in " + FileName + ".")
|
[
"noreply@github.com"
] |
jsdosanj.noreply@github.com
|
7cb9678900eef37341455b55ff59bd660712939e
|
2eb1a7f2cd9e3bb15be7aea454ef3d4f0cde905e
|
/calc.py
|
f96fed201ba098f2b95edaaf9a7fb6c3387b02e8
|
[] |
no_license
|
ndmchv/first-project
|
ba1575d8e82fe87f8876743ea025d5c01d3977f4
|
8a69da23e727f3a656fa3b70587024052bc05282
|
refs/heads/master
| 2020-09-05T08:56:16.157141
| 2019-11-06T18:56:31
| 2019-11-06T18:56:31
| 220,049,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get("https://ndmchv.github.io/first-project/")
driver.set_window_size(1050, 840)
driver.find_element(By.NAME, "a").click()
driver.find_element(By.NAME, "a").send_keys("3")
driver.find_element(By.NAME, "b").click()
driver.find_element(By.NAME, "b").send_keys("44")
driver.find_element(By.XPATH, "/html/body/input[3]").click()
assert driver.find_element(By.XPATH, "/html/body/h1").text == "blablablablab blabla"
|
[
"noemi.csato@pontsystems.eu"
] |
noemi.csato@pontsystems.eu
|
4baa96326472ed548d38a8212fecc830b6eca38c
|
6c0339c10e241fed0b409bb01c60eec2e726b5f1
|
/exr03.py
|
2168ef95617974641e8576f1f41cb5b7e0cc511c
|
[] |
no_license
|
itongit/PythonSelenium
|
3e0e2397cade6ead9ad9c2f8ff180a9f49fc1a41
|
ee8b7b3166349922d9bdef4ea2cd224238c70fce
|
refs/heads/master
| 2016-09-06T14:04:44.901709
| 2014-03-03T15:22:09
| 2014-03-03T15:22:09
| 16,912,231
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
print "I will now count my chickens:"
print "Hens", 25 + 30 /6
print "Rooters ", 100 -25 *3 % 4
print "Now I will count the eggs:"
print 3 + 2 + 1 -5 +4 % 2 -1 / 4 +6
print "Is it true that 3 +2 < 5 - 7?"
print 3 + 2 < 5 - 7
print "What is 3 + 2?", 3 +2
print "What is 5 - 7 ?", 5 - 7
print "Oh ,that's why it's False"
print "How about some more."
print "Is it greater?", 5 > -2
print "Is it greater or equal", 5 >= -2
print "Is it less or equal?", 5 <= -2
|
[
"hnuiton@gmail.com"
] |
hnuiton@gmail.com
|
37af1632d459c7092a3cd92f798ab33ae9196f2f
|
4520f56d4952c788e198ee7eee39911c9a76c60f
|
/03_Bigdata/01_Collection/03_Web_crawling/02_Reguar_Expression/99_example/3_exam.py
|
b5823c6c37267bb153e001691f64f6f0b5a58c37
|
[] |
no_license
|
SuHyeonJung/iot_python2019
|
bef8877a1cd41981ad2125291f5af44f4fd1701c
|
7860630ae28c53677a3c2761c9e997b28ea55f26
|
refs/heads/master
| 2020-06-14T22:18:27.503781
| 2019-11-08T05:50:41
| 2019-11-08T05:50:41
| 195,142,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
import re
def text_match(text):
pattern = 'ab+'
if re.search(pattern, text):
return 'Found a match'
else:
return 'Not match'
print(text_match('ac'))
print(text_match('a'))
print(text_match('ab'))
|
[
"galma94815@naver.com"
] |
galma94815@naver.com
|
f49f1cb53592cf5f6046837ea0eb160f4e614079
|
fbf3b618e2f48e98d93b7839668f6206876029dd
|
/SpiderForPics/SpiderForPicsByMultiKey.py
|
8cd3d6eb1beaa09d1120e3f07b160d783cacbb01
|
[] |
no_license
|
ProZoom/ProSpider
|
928690665cef1d7e785779ac7506c3142488b8b3
|
13d29de42708f0c7d852d2f8317daf8e82fe56e1
|
refs/heads/master
| 2021-05-05T12:37:21.018564
| 2018-01-21T05:37:45
| 2018-01-21T05:37:45
| 118,307,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,016
|
py
|
# coding:utf-8
import requests
import re
import itertools
import urllib
from Utils.baseUtils import *
from SpiderForPics.SpiderForPicsConfig import *
# 解码
def decode(url):
for key, value in str_table.items():
url = url.replace(key, value)
return url.translate(char_table)
def buildUrls(word):
word = urllib.parse.quote(word)
url = URL
urls = (url.format(word=word, pn=x) for x in itertools.count(start=0, step=60))
return urls
re_url = re.compile(r'"objURL":"(.*?)"')
# 获取imgURL
def resolveImgUrl(html):
imgUrls = [decode(x) for x in re_url.findall(html)]
return imgUrls
if __name__ == '__main__':
print("欢迎使用百度图片下载爬虫<多个关键字搜索>")
# choosePath = input('请输入你想保存的路径方式\n 1. 默认路径 path = imgs/ \n 2. 相对路径 path_input/path_input/ \n 3. 绝对路径,比如 D:/img/\n')
# if int(choosePath) == 3:
# dirpath = input('请输入您要保存图片的路径\n')
# elif int(choosePath) == 2:
# path = input('请输入您要保存图片的路径\n')
# dirpath = mkDir(path)
# else:
# path = 'imgs'
# dirpath = mkDir(path)
print(" ➤ 抓包的默认路径为相对目录imgs!")
path = 'imgs'
dirpath = mkDir(path)
print("➸ " + "♔" * 50 + " ☚")
word = input("请输入你要下载的图片关键词<多个关键字请用空格进行分割>:\n")
print("➸ " + "♔" * 50 + " ☚")
chooseImgType = input('请选择你要保存的图片格式\n 0. default: jpg \n 1. jpg\n 2. png\n 3. gif\n 4. 自定义\n')
chooseImgType = int(chooseImgType)
if chooseImgType == 4:
imgType = input('请输入自定义图片类型\n')
elif chooseImgType == 1:
imgType = 'jpg'
elif chooseImgType == 2:
imgType = 'png'
elif chooseImgType == 3:
imgType = 'gif'
else:
imgType = 'jpg'
print("➸ " + "♔" * 50 + " ☚")
strtag = input("请输入您要下载图片名字,最后格式为 number+' '+name.%s\n" % imgType)
print("➸ " + "♔" * 50 + " ☚")
numIMGS = input('请输入您要下载图片的数量\n')
numIMGS = int(numIMGS)
urls = buildUrls(word)
index = 0
print("➸ " + "♔" * 50 + " ☚")
for url in urls:
print("正在请求:", url)
html = requests.get(url, timeout=10).content.decode('utf-8')
imgUrls = resolveImgUrl(html)
# print(imgUrls)
if len(imgUrls) == 0: # 没有图片则结束
break
for url in imgUrls:
if downImgWithFormat(url, dirpath, str(index + 1) + ' ' + strtag, imgType):
index += 1
print("已下载 %s 张" % index)
# 双 break 跳出下载循环
if index == numIMGS:
break
if index == numIMGS:
print('您一共下载了 %s 张图片' % index)
print('程序正在终止')
break
|
[
"liyang.ok@outlook.com"
] |
liyang.ok@outlook.com
|
c53cf62606d1efe10e20058d3c82affdd1926dba
|
4ed8c60a92fe7796a96ebc479ff2f76263ab17db
|
/pstp/models.py
|
2534be14910f4b3f89927da0e85fcfac096f7a19
|
[] |
no_license
|
sethwoodworth/dashboard
|
53771e7e629903185c0782a0bc0675428b47e231
|
1acd9421d02bae6d7ac04304bf956b4772b41719
|
refs/heads/master
| 2020-04-06T04:52:12.974756
| 2011-06-26T16:47:54
| 2011-06-26T16:47:54
| 936,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
from django.db import models
class Subject(models.Model):
#TODO
# - make last_update inform the last time this was saved
name = models.CharField(max_length = 20)
# last_update = models.DateTimeField('last updated')
def __unicode__(self):
return self.name
class Subheader(models.Model):
name = models.CharField(max_length= 20)
subject = models.ForeignKey(Subject)
def __unicode__(self):
return self.name
class Grade(models.Model):
grade = models.CharField(max_length = 30)
def __unicode__(self):
return self.grade
class GradedAttr(models.Model):
subject = models.ForeignKey(Subject)
subheader = models.ForeignKey(Subheader)
rubric_row = models.CharField(max_length = 150)
rubric_desc = models.TextField()
grade = models.ForeignKey(Grade)
def __unicode__(self):
return self.rubric_row
class CommentField(models.Model):
username = models.CharField(max_length = 20)
subject = models.ForeignKey(Subject)
input = models.TextField()
def __unicode__(self):
return self.username
|
[
"alxjrvs@gmail.com"
] |
alxjrvs@gmail.com
|
2e544843338ba65b96975e4df4a525aa8bce87dc
|
06d1b75d3651aa49c9b03dd43087bcf69ce30bd1
|
/Wavelet_Handle_33/py/WaveletFitingAllModel_F1.py
|
63484f72b4bcde8d363f7a61d09bbccaeb90c049
|
[] |
no_license
|
haven009/Tianchi_power
|
ee4c76418ec4c9b38170b943c933e7bfd4bde050
|
a5bdcef8d82a357c4c288f2dc80df290d4891ca3
|
refs/heads/master
| 2021-01-23T02:04:57.418835
| 2017-06-13T03:22:41
| 2017-06-13T03:22:41
| 92,903,852
| 10
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,127
|
py
|
import pandas as pd
import numpy as np
import pywt
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.externals import joblib
from sklearn.model_selection import KFold, train_test_split, GridSearchCV
from sklearn import metrics
import xgboost as xgb
from sklearn.feature_selection import SelectFromModel
import AllModelWaveletTransformByDOW
print "loading dataset .............."
handledataset = pd.read_csv(u'/home/haven/Tianchi_power/Tianchi_power_Hanle_addDayOfWeek.csv')
print "transform date to datetime .............."
handledataset.record_date = pd.to_datetime(handledataset.record_date)
print "select train1 dataset ............."
train1 = handledataset[(handledataset.record_date>=pd.to_datetime('2015-01-01')+pd.to_timedelta(7*59, unit='D')) & (handledataset.record_date<(pd.to_datetime('2015-01-01')+pd.to_timedelta(7*78, unit='D')))]
train1_MeanStdSum = train1.groupby(['user_id'])['power_consumption'].agg({'power_mean':np.mean, 'power_std':np.std, 'power_sum':np.sum}).reset_index()
train1_MeanStdSum['power_rate'] = train1_MeanStdSum.power_sum / train1_MeanStdSum.power_sum.sum()
train1_DOW_MeanStdSum = train1.groupby(['user_id','day_of_week'])['power_consumption'].agg({'DOW_power_mean':np.mean, 'DOW_power_std':np.std, 'DOW_power_sum':np.sum}).reset_index()
train1_DOW_MeanStdSum.groupby('day_of_week')['DOW_power_sum'].agg({'DOW_allsum':sum}).reset_index()
train1_DOW_MeanStdSumAllsum = train1_DOW_MeanStdSum.merge(train1_DOW_MeanStdSum.groupby('day_of_week')['DOW_power_sum'].agg({'DOW_allsum':sum}).reset_index(), on='day_of_week', how='left', copy=True)
train1_DOW_MeanStdSumAllsum['DOW_power_rate'] = train1_DOW_MeanStdSumAllsum.DOW_power_sum / train1_DOW_MeanStdSumAllsum.DOW_allsum
train1_mergeDataset = pd.merge(train1_DOW_MeanStdSumAllsum, train1_MeanStdSum, on='user_id', how='left')
train1_dayofweek_dataset = AllModelWaveletTransformByDOW.waveletTransform(train1)
train1_dayofweek_dataset.drop('level_2', axis=1,inplace=True)
train1_mergeDataset_add = pd.merge(train1_mergeDataset,train1_dayofweek_dataset,on=['user_id', 'day_of_week'], how='left')
#train1_mergeDataset_add = train1_mergeDataset
#train1_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_mean', 'DOW_power_std', 'DOW_powaer_rate', 'power_mean', 'power_std', 'power_rate'], axis=1,inplace=True)
#train1_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_mean', 'DOW_powaer_rate', 'power_mean', 'power_rate'], axis=1,inplace=True)
#train1_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum'], axis=1,inplace=True)
train1_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_rate', 'power_rate'], axis=1,inplace=True)
train1_Y = handledataset[(handledataset.record_date>=(pd.to_datetime('2015-01-01') + pd.to_timedelta(7*78, unit='D'))) & (handledataset.record_date<(pd.to_datetime('2015-01-01') + pd.to_timedelta(7*79, unit='D')))]
final_train1 = pd.merge(train1_mergeDataset_add,train1_Y,on=['user_id', 'day_of_week'], how='left')
print final_train1.columns
print "select train2 dataset ............."
train2 = handledataset[(handledataset.record_date>=pd.to_datetime('2015-01-01')+pd.to_timedelta(7*60, unit='D')) & (handledataset.record_date<(pd.to_datetime('2015-01-01')+pd.to_timedelta(7*79, unit='D')))]
train2_MeanStdSum = train2.groupby(['user_id'])['power_consumption'].agg({'power_mean':np.mean, 'power_std':np.std, 'power_sum':np.sum}).reset_index()
train2_MeanStdSum['power_rate'] = train2_MeanStdSum.power_sum / train2_MeanStdSum.power_sum.sum()
train2_DOW_MeanStdSum = train2.groupby(['user_id','day_of_week'])['power_consumption'].agg({'DOW_power_mean':np.mean, 'DOW_power_std':np.std, 'DOW_power_sum':np.sum}).reset_index()
train2_DOW_MeanStdSum.groupby('day_of_week')['DOW_power_sum'].agg({'DOW_allsum':sum}).reset_index()
train2_DOW_MeanStdSumAllsum = train2_DOW_MeanStdSum.merge(train2_DOW_MeanStdSum.groupby('day_of_week')['DOW_power_sum'].agg({'DOW_allsum':sum}).reset_index(), on='day_of_week', how='left', copy=True)
train2_DOW_MeanStdSumAllsum['DOW_power_rate'] = train2_DOW_MeanStdSumAllsum.DOW_power_sum / train2_DOW_MeanStdSumAllsum.DOW_allsum
train2_mergeDataset = pd.merge(train2_DOW_MeanStdSumAllsum, train2_MeanStdSum, on='user_id', how='left')
train2_dayofweek_dataset = AllModelWaveletTransformByDOW.waveletTransform(train2)
train2_dayofweek_dataset.drop('level_2', axis=1,inplace=True)
train2_mergeDataset_add = pd.merge(train2_mergeDataset, train2_dayofweek_dataset, on=['user_id', 'day_of_week'], how='left')
#train2_mergeDataset_add = train2_mergeDataset
#train2_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_mean', 'DOW_power_std', 'DOW_powaer_rate', 'power_mean', 'power_std', 'power_rate'], axis=1,inplace=True)
#train2_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_mean', 'DOW_powaer_rate', 'power_mean', 'power_rate'], axis=1,inplace=True)
#train2_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum'], axis=1,inplace=True)
train2_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_rate', 'power_rate'], axis=1,inplace=True)
train2_Y = handledataset[(handledataset.record_date>=(pd.to_datetime('2015-01-01') + pd.to_timedelta(7*79, unit='D'))) & (handledataset.record_date<(pd.to_datetime('2015-01-01') + pd.to_timedelta(7*80, unit='D')))]
final_train2 = pd.merge(train2_mergeDataset_add,train2_Y,on=['user_id', 'day_of_week'], how='left')
print "select train3 dataset ............."
train3 = handledataset[(handledataset.record_date>=pd.to_datetime('2015-01-01')+pd.to_timedelta(7*61, unit='D')) & (handledataset.record_date<(pd.to_datetime('2015-01-01')+pd.to_timedelta(7*80, unit='D')))]
train3_MeanStdSum = train3.groupby(['user_id'])['power_consumption'].agg({'power_mean':np.mean, 'power_std':np.std, 'power_sum':np.sum}).reset_index()
train3_MeanStdSum['power_rate'] = train3_MeanStdSum.power_sum / train3_MeanStdSum.power_sum.sum()
train3_DOW_MeanStdSum = train3.groupby(['user_id','day_of_week'])['power_consumption'].agg({'DOW_power_mean':np.mean, 'DOW_power_std':np.std, 'DOW_power_sum':np.sum}).reset_index()
train3_DOW_MeanStdSum.groupby('day_of_week')['DOW_power_sum'].agg({'DOW_allsum':sum}).reset_index()
train3_DOW_MeanStdSumAllsum = train3_DOW_MeanStdSum.merge(train3_DOW_MeanStdSum.groupby('day_of_week')['DOW_power_sum'].agg({'DOW_allsum':sum}).reset_index(), on='day_of_week', how='left', copy=True)
train3_DOW_MeanStdSumAllsum['DOW_power_rate'] = train3_DOW_MeanStdSumAllsum.DOW_power_sum / train3_DOW_MeanStdSumAllsum.DOW_allsum
train3_mergeDataset = pd.merge(train3_DOW_MeanStdSumAllsum, train3_MeanStdSum, on='user_id', how='left')
train3_dayofweek_dataset = AllModelWaveletTransformByDOW.waveletTransform(train3)
train3_dayofweek_dataset.drop('level_2', axis=1,inplace=True)
train3_mergeDataset_add = pd.merge(train3_mergeDataset,train3_dayofweek_dataset,on=['user_id', 'day_of_week'], how='left')
#train3_mergeDataset_add = train3_mergeDataset
#train3_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_mean', 'DOW_power_std', 'DOW_powaer_rate', 'power_mean', 'power_std', 'power_rate'], axis=1,inplace=True)
#train3_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_mean', 'DOW_powaer_rate', 'power_mean', 'power_rate'], axis=1,inplace=True)
#train3_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum'], axis=1,inplace=True)
train3_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_rate', 'power_rate'], axis=1,inplace=True)
train3_Y = handledataset[(handledataset.record_date>=(pd.to_datetime('2015-01-01') + pd.to_timedelta(7*80, unit='D'))) & (handledataset.record_date<(pd.to_datetime('2015-01-01') + pd.to_timedelta(7*81, unit='D')))]
final_train3 = pd.merge(train3_mergeDataset_add,train3_Y,on=['user_id', 'day_of_week'], how='left')
print "select train4 dataset ............."
train4 = handledataset[(handledataset.record_date>=pd.to_datetime('2015-01-01')+pd.to_timedelta(7*62, unit='D')) & (handledataset.record_date<(pd.to_datetime('2015-01-01')+pd.to_timedelta(7*81, unit='D')))]
train4_MeanStdSum = train4.groupby(['user_id'])['power_consumption'].agg({'power_mean':np.mean, 'power_std':np.std, 'power_sum':np.sum}).reset_index()
train4_MeanStdSum['power_rate'] = train4_MeanStdSum.power_sum / train4_MeanStdSum.power_sum.sum()
train4_DOW_MeanStdSum = train4.groupby(['user_id','day_of_week'])['power_consumption'].agg({'DOW_power_mean':np.mean, 'DOW_power_std':np.std, 'DOW_power_sum':np.sum}).reset_index()
train4_DOW_MeanStdSum.groupby('day_of_week')['DOW_power_sum'].agg({'DOW_allsum':sum}).reset_index()
train4_DOW_MeanStdSumAllsum = train4_DOW_MeanStdSum.merge(train4_DOW_MeanStdSum.groupby('day_of_week')['DOW_power_sum'].agg({'DOW_allsum':sum}).reset_index(), on='day_of_week', how='left', copy=True)
train4_DOW_MeanStdSumAllsum['DOW_power_rate'] = train4_DOW_MeanStdSumAllsum.DOW_power_sum / train4_DOW_MeanStdSumAllsum.DOW_allsum
train4_mergeDataset = pd.merge(train4_DOW_MeanStdSumAllsum, train4_MeanStdSum, on='user_id', how='left')
train4_dayofweek_dataset = AllModelWaveletTransformByDOW.waveletTransform(train4)
train4_dayofweek_dataset.drop('level_2', axis=1,inplace=True)
train4_mergeDataset_add = pd.merge(train4_mergeDataset,train4_dayofweek_dataset,on=['user_id', 'day_of_week'], how='left')
#train4_mergeDataset_add = train4_mergeDataset
#train4_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_mean', 'DOW_power_std', 'DOW_powaer_rate', 'power_mean', 'power_std', 'power_rate'], axis=1,inplace=True)
#train4_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_mean', 'DOW_powaer_rate', 'power_mean', 'power_rate'], axis=1,inplace=True)
#train4_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum'], axis=1,inplace=True)
train4_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_rate', 'power_rate'], axis=1,inplace=True)
train4_Y = handledataset[(handledataset.record_date>=(pd.to_datetime('2015-01-01') + pd.to_timedelta(7*81, unit='D'))) & (handledataset.record_date<(pd.to_datetime('2015-01-01') + pd.to_timedelta(7*82, unit='D')))]
final_train4 = pd.merge(train4_mergeDataset_add,train4_Y,on=['user_id', 'day_of_week'], how='left')
print "select test dataset ............."
test = handledataset[(handledataset.record_date>=pd.to_datetime('2015-01-01')+pd.to_timedelta(7*63, unit='D')) & (handledataset.record_date<(pd.to_datetime('2015-01-01')+pd.to_timedelta(7*82, unit='D')))]
test_MeanStdSum = train4.groupby(['user_id'])['power_consumption'].agg({'power_mean':np.mean, 'power_std':np.std, 'power_sum':np.sum}).reset_index()
test_MeanStdSum['power_rate'] = test_MeanStdSum.power_sum / test_MeanStdSum.power_sum.sum()
test_DOW_MeanStdSum = test.groupby(['user_id','day_of_week'])['power_consumption'].agg({'DOW_power_mean':np.mean, 'DOW_power_std':np.std, 'DOW_power_sum':np.sum}).reset_index()
test_DOW_MeanStdSum.groupby('day_of_week')['DOW_power_sum'].agg({'DOW_allsum':sum}).reset_index()
test_DOW_MeanStdSumAllsum = test_DOW_MeanStdSum.merge(test_DOW_MeanStdSum.groupby('day_of_week')['DOW_power_sum'].agg({'DOW_allsum':sum}).reset_index(), on='day_of_week', how='left', copy=True)
test_DOW_MeanStdSumAllsum['DOW_power_rate'] = test_DOW_MeanStdSumAllsum.DOW_power_sum / test_DOW_MeanStdSumAllsum.DOW_allsum
test_mergeDataset = pd.merge(test_DOW_MeanStdSumAllsum, test_MeanStdSum, on='user_id', how='left')
test_dayofweek_dataset = AllModelWaveletTransformByDOW.waveletTransform(test)
test_dayofweek_dataset.drop('level_2', axis=1,inplace=True)
test_mergeDataset_add = pd.merge(test_mergeDataset,test_dayofweek_dataset,on=['user_id', 'day_of_week'], how='left')
#test_mergeDataset_add = test_mergeDataset
#test_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_mean', 'DOW_power_std', 'DOW_powaer_rate', 'power_mean', 'power_std', 'power_rate'], axis=1,inplace=True)
#test_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_mean', 'DOW_powaer_rate', 'power_mean', 'power_rate'], axis=1,inplace=True)
#test_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum'], axis=1,inplace=True)
test_mergeDataset_add.drop(['DOW_power_sum', 'DOW_allsum', 'power_sum', 'DOW_power_rate', 'power_rate'], axis=1,inplace=True)
test_Y = handledataset[(handledataset.record_date>=(pd.to_datetime('2015-01-01') + pd.to_timedelta(7*82, unit='D'))) & (handledataset.record_date<(pd.to_datetime('2015-01-01') + pd.to_timedelta(7*83, unit='D')))]
final_test = pd.merge(test_mergeDataset_add,test_Y,on=['user_id', 'day_of_week'], how='left')
print "make all train dataset ........."
#train = pd.concat([final_train1, final_train2, final_train3, final_train4], axis=0, ignore_index=True)
train1_matrix = final_train1.drop(['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
train2_matrix = final_train2.drop(['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
train3_matrix = final_train3.drop(['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
train4_matrix = final_train4.drop(['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
#final_train_matrix = train.drop(['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
final_train_matrix = np.row_stack((train1_matrix, train2_matrix, train3_matrix, train4_matrix))
train_X = final_train_matrix[:,:-1]
train_Y = final_train_matrix[:,-1]
print "make test datset"
final_test_matrix = final_test.drop(['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
test_matrix_X = final_test_matrix[:,:-1]
test_matrix_Y = final_test_matrix[:,-1]
print("hyper-parameter optimization...................")
xgb_model = xgb.XGBRegressor()
params = {'max_depth':[2,3,4,5,6], 'learning_rate':[0.05,0.1,0.15], 'n_estimators':[50,100,150,200], 'max_delta_step':[1] ,
'objective':['reg:linear', 'reg:gamma','reg:tweedie',]}
# , 'colsample_bytree':[1], 'colsample_bylevel':[1], 'reg_alpha':[0], 'reg_lambda':[1], 'scale_pos_weight':[1], 'base_score':[0.5], 'seed':[0], 'missing':[None],'nthread':[-1], 'gamma':[0], 'min_child_weight':[1], , 'subsample':[0.5,0.8,1]
gridsearchcvRegression = GridSearchCV(xgb_model, params, iid=True,scoring=None, n_jobs=1, refit=True, verbose=2, return_train_score=True)
print "optimization fitting ..............."
gridsearchcvRegression.fit(train_X,train_Y)
print "/n"
print "Best Score : ",gridsearchcvRegression.best_score_
print "Best Params : ",gridsearchcvRegression.best_params_
print "predict fiting............."
xgb_model = xgb.XGBRegressor(n_estimators=gridsearchcvRegression.best_params_['n_estimators'],max_depth=gridsearchcvRegression.best_params_['max_depth'],objective=gridsearchcvRegression.best_params_['objective'], max_delta_step=1, learning_rate=gridsearchcvRegression.best_params_['learning_rate'],silent=False)
xgb_model.fit(train_X, train_Y)
input_flag = raw_input("Do you want to save the model by train? Input[y/n]:")
if input_flag == 'y':
joblib.dump(xgb_model, "../model/F1_xgb_model.m")
print "predict ............"
predict_Y = xgb_model.predict(test_matrix_X)
predict_Y = np.round(predict_Y).astype(int)
print "MSE = ",metrics.mean_squared_error(test_matrix_Y, predict_Y)
summ =0
for i in range(len(test_matrix_Y)):
if (predict_Y[i]+test_matrix_Y[i]) == 0:
continue
summ+=abs((predict_Y[i]-test_matrix_Y[i])/(predict_Y[i]+test_matrix_Y[i]))
meansum = summ/len(test_matrix_Y)
print "MeanSum = ",meansum
print "corrcoef = ",np.corrcoef(predict_Y, test_matrix_Y)
print "create next datasets ..........."
train1_matrix_X = train1_matrix[:,:-1]
train1_predict_Y = xgb_model.predict(train1_matrix_X)
train1_Y['power_consumption'] = train1_predict_Y
train1AndPredictY = pd.concat([train1, train1_Y], axis=0, ignore_index=True)
train1AndPredictY.to_csv(u'../F1_Result/train1AndPredictY.csv', header=True, index=False)
train2_matrix_X = train2_matrix[:,:-1]
train2_predict_Y = xgb_model.predict(train2_matrix_X)
train2_Y['power_consumption'] = train2_predict_Y
train2AndPredictY = pd.concat([train2, train2_Y], axis=0, ignore_index=True)
train2AndPredictY.to_csv(u'../F1_Result/train2AndPredictY.csv', header=True, index=False)
train3_matrix_X = train3_matrix[:,:-1]
train3_predict_Y = xgb_model.predict(train3_matrix_X)
train3_Y['power_consumption'] = train3_predict_Y
train3AndPredictY = pd.concat([train3, train3_Y], axis=0, ignore_index=True)
train3AndPredictY.to_csv(u'../F1_Result/train3AndPredictY.csv', header=True, index=False)
train4_matrix_X = train4_matrix[:,:-1]
train4_predict_Y = xgb_model.predict(train4_matrix_X)
train4_Y['power_consumption'] = train4_predict_Y
train4AndPredictY = pd.concat([train4, train4_Y], axis=0, ignore_index=True)
train4AndPredictY.to_csv(u'../F1_Result/train4AndPredictY.csv', header=True, index=False)
test_Y['power_consumption'] = predict_Y
testAndPredictY = pd.concat([test, test_Y], axis=0, ignore_index=True)
testAndPredictY.to_csv(u'../F1_Result/testAndPredictY.csv', header=True, index=False)
|
[
"13021093200@163.com"
] |
13021093200@163.com
|
9abd5c922f0ed5f3fd7b3019a1b07f912904db92
|
bcf2ac8e565ceb2ddae87b8194afe21fdee6a9b0
|
/network/transpose_conv_bn_relu.py
|
ddc8703b4642ba68362bef74179507caea4186b1
|
[] |
no_license
|
xfcy6/ReversibleImage
|
d1644a426aacb4b20d1e2a09160cf1b59a07e13a
|
ff3f4823b06256a6a8a206050a670556f28f792c
|
refs/heads/master
| 2022-12-22T04:15:38.356514
| 2020-09-17T13:10:14
| 2020-09-17T13:10:14
| 295,636,625
| 0
| 0
| null | 2020-09-15T06:34:22
| 2020-09-15T06:34:21
| null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
import torch.nn as nn
class TransposeConvBNRelu(nn.Module):
"""
Is a sequence of Convolution, Batch Normalization, and ReLU activation
"""
def __init__(self, channels_in, channels_out, stride=1):
super(TransposeConvBNRelu, self).__init__()
self.layers = nn.Sequential(
nn.ConvTranspose2d(channels_in, channels_out, kernel_size=3, stride=stride, padding=1),
nn.BatchNorm2d(channels_out),
nn.ReLU(inplace=True)
)
# nn.ConvTranspose2d(channels_in, channels_out, kernel_size=3, stride=stride, padding=1)
def forward(self, x):
return self.layers(x)
|
[
"shinydotcom@163.com"
] |
shinydotcom@163.com
|
a2253899a346aaccd40f4bf9fdc2b3cba65b90bd
|
cabfcc9d0bddfa610afa701c43f197dbe5b542e0
|
/scorecard.py
|
fe4acd6043bc10d6d314418c096254f00491be1b
|
[] |
no_license
|
KRiteshchowdary/myfiles
|
2d120a46b8b786a526e566155b77b7c7fddb1aa5
|
6a1a823e288fd8669d46cf1d1644dc7e27f8ac71
|
refs/heads/master
| 2020-04-05T15:02:40.140641
| 2018-11-10T05:16:47
| 2018-11-10T05:16:47
| 156,949,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
up = {'tim':'cook','steve':'jobs','elon':'musk','satyam':'nadela'}
user_id = raw_input("user_id please " )
if user_id not in up:
print("invalid user_id")
else:
password = raw_input("Enter password")
if(password == up[user_id]):
print("Access Granted")
else:
print("Access Denied")
|
[
"noreply@github.com"
] |
KRiteshchowdary.noreply@github.com
|
67cb3304011ad3481c46db49247be7cbf1fe3fed
|
80b6ad7f54edfca7210efe5b86339c109d961f95
|
/module-1-create-blockchain/blockchain.py
|
79db7dc785115ed45a57c60ee5aef6f36b61fbaf
|
[] |
no_license
|
jamesevag/blockchain-mining-app
|
054603a87025ebcddca0529948a5e5722456ce5e
|
d9ac3eff7ac0c1a60ef48fc07139aa0a8993e023
|
refs/heads/master
| 2020-04-09T12:45:55.880125
| 2018-12-04T13:43:33
| 2018-12-04T13:43:33
| 160,363,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
# Module 1 - Create a Blockchain
# To be installed:
# Flask==0.12.2: pip install Flask==0.12.2
# Postman HTTP Client: https://www.getpostman.com/
# Importing the libraries
import datetime
import hashlib
import json
from flask import Flask, jsonify
# Part 1 - Building a Blockchain
"""
Created on Tue Dec 4 09:50:02 2018
@author: evangelopoulos
"""
class Blockchain:
def __init__(self):
self.chain = []
self.create_block(proof = 1, previous_hash = '0')
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash}
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
# Part 2 - Mining our Blockchain
# Creating a Web App
app = Flask(__name__)
# Creating a Blockchain
blockchain = Blockchain()
# Mining a new block
@app.route('/mine_block', methods = ['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash']}
return jsonify(response), 200
# Getting the full Blockchain
@app.route('/get_chain', methods = ['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the Blockchain is valid
@app.route('/is_valid', methods = ['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message': 'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
# Running the app
app.run(host = '0.0.0.0', port = 5000)
|
[
"evangelopoulos@ckc.de"
] |
evangelopoulos@ckc.de
|
1dfbcde19d32b44caa566c604a5d48f049345777
|
d49d46f7c4b97098893ff38c17044b789b66c66e
|
/change23.py
|
90063020707d52c034b61f32cb15bb720fd62522
|
[] |
no_license
|
luolingyu/shiyanlou
|
5120372cc0649a5678b2d67bd3368ce7a5a42e8d
|
5a1848236b10debd90c4cfe8199d2294b7f316de
|
refs/heads/master
| 2020-03-13T13:48:08.509265
| 2018-07-16T03:46:09
| 2018-07-16T03:46:09
| 131,145,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
def data_plot():
df = pd.read_json('./Code/user_study.json')
data = df.groupby('user_id').sum().head(20000)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_title('StudyData')
ax.set_xlabel('User ID')
ax.set_ylabel('Study Time')
ax.plot(data.index,data.minutes)
fig.show()
return ax
if __name__=='__main__':
data_plot()
|
[
"1324013683@qq.com"
] |
1324013683@qq.com
|
d794340af0cf032252f4714625641533e2874df9
|
5967300de22f73c0d26377cf698afa7da1c161ae
|
/{{cookiecutter.project_slug}}/test/test_unit.py
|
b8040f0b3fa5b3b2ac171fc89358125405d02d1c
|
[
"MIT"
] |
permissive
|
blueskyideas/cookiecutter-python
|
2b966d9a24236cc8445474fdffc6d8111b242411
|
754c1c7f457e3cd13918308a4b946eaeac5b2529
|
refs/heads/master
| 2021-09-15T00:38:13.373862
| 2018-05-23T01:12:50
| 2018-05-23T01:12:50
| 104,700,380
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
import pytest
from .context import {{ cookiecutter.project_slug }}
def test_a():
assert 1 == 2
|
[
"jeremyarr@gmail.com"
] |
jeremyarr@gmail.com
|
25b8e987ada61731b8253208098a1fe699cec88d
|
7b2ee8dc4dea3f96bdf5b8c9a7fdf7f3a7428da4
|
/plot_confusion_matrix.py
|
6e13d57b7c1b089e408025e9ab788751305a0869
|
[] |
no_license
|
NathanVenos/Classifying_LEGO_Value_Retention
|
a33de67a5dae130151cfe5eb000286896af30277
|
31c5e89be4455da5e0c1becd063b67237586d899
|
refs/heads/master
| 2020-09-11T06:15:22.995835
| 2020-01-16T18:34:33
| 2020-01-16T18:34:33
| 221,967,566
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,169
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues,
figure=None,
axis=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# Create fig and ax if not passed
if (figure==None)*(axis==None):
fig, ax = plt.subplots()
elif (figure==None)+(axis==None):
print('Pass both fig and ax or neither')
else:
fig, ax = figure, axis
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
|
[
"nathanvenos@gmail.com"
] |
nathanvenos@gmail.com
|
30df9c3e6107c42a45cb51396d0d85bb418a2609
|
219194b550158689727452fc63cb9ab9ab2719d1
|
/task_20/urls.py
|
8b04b3420fe765b9bec6d259b8e19f5504d98138
|
[] |
no_license
|
RashaAlorabi/task_20
|
a0d50622273f7831acf5486f38780631fd394014
|
01277ade8902ec4ca9eb381e26c9e0672bd8e6dc
|
refs/heads/master
| 2020-04-24T03:24:01.847485
| 2019-02-20T13:28:02
| 2019-02-20T13:28:02
| 171,668,804
| 0
| 1
| null | 2019-02-20T12:26:00
| 2019-02-20T12:26:00
| null |
UTF-8
|
Python
| false
| false
| 2,597
|
py
|
"""task_20 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from restaurants import views
from api.views import (
RestaurantListView,
RestaurantDetailView,
RestaurantUpdateView,
RestaurantDeleteView,
RestaurantCreateView,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('restaurants/list/',views.restaurant_list ,name='restaurant-list'),
path('restaurants/favorite/',views.favorite_restaurants ,name='favorite-restaurant'),
path('restaurants/<int:restaurant_id>/detail/',views.restaurant_detail ,name='restaurant-detail'),
path('restaurants/create/',views.restaurant_create ,name='restaurant-create'),
path('restaurants/<int:restaurant_id>/update/',views.restaurant_update ,name='restaurant-update'),
path('restaurants/<int:restaurant_id>/delete/',views.restaurant_delete ,name='restaurant-delete'),
path('restaurants/<int:restaurant_id>/favorite/',views.restaurant_favorite ,name='restaurant-favorite'),
path('restaurants/<int:restaurant_id>/item/add/',views.item_create ,name='item-create'),
path('signup/',views.signup ,name='signup'),
path('signin/',views.signin ,name='signin'),
path('signout/',views.signout ,name='signout'),
path('no-access/',views.no_access ,name='no-access'),
path('api/list/', RestaurantListView.as_view(), name='api-list'),
path('api/create/', RestaurantCreateView.as_view(), name='api-create'),
path('api/<int:restaurant_id>/detail/', RestaurantDetailView.as_view(), name='api-detail'),
path('api/<int:restaurant_id>/update/', RestaurantUpdateView.as_view(), name='api-update'),
path('api/<int:restaurant_id>/delete/', RestaurantDeleteView.as_view(), name='api-delete'),
]
if settings.DEBUG:
urlpatterns+=static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"hamzamakia@gmail.com"
] |
hamzamakia@gmail.com
|
a3d4a897e93e9d0b048b5fe950873ab873cc4250
|
3d5e2e39a6b164fdbb27d232af08f0abae15f931
|
/scripts/vis_inputs.py
|
3a5988e1c242aacf7bb2ae220005aec29a696042
|
[] |
no_license
|
siyeopyoon/template_ffd
|
431c46c907fa57d4c6de658be691d3d9efef739e
|
0f9111ffb340449ad87fbf52220273a15819ec4f
|
refs/heads/master
| 2021-09-20T08:48:44.426004
| 2018-08-07T10:58:18
| 2018-08-07T10:58:18
| 266,313,119
| 1
| 0
| null | 2020-05-23T10:27:49
| 2020-05-23T10:27:48
| null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
#!/usr/bin/python
def main(model_id, mode):
from template_ffd.model import get_builder
builder = get_builder(model_id)
builder.vis_inputs()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_id', help='id of model defined in params')
parser.add_argument(
'-m', '--mode', default='train', choices=['train', 'eval', 'infer'])
args = parser.parse_args()
main(args.model_id, args.mode)
|
[
"thedomjack@gmail.com"
] |
thedomjack@gmail.com
|
f54b39d5c8881c8cec7fac0f99b7b4bc9c51323d
|
9c68dc3cfae4d264e667ea7d8be9e1e9e9e74348
|
/djasana/migrations/0009_auto_20180907_0850.py
|
df5fb8cc741e35e54f231adcc32ae6029ef85dc6
|
[
"MIT"
] |
permissive
|
zaptim/django-asana
|
9b53584e68b88ce0f5c1b1ae0a2cff595452e431
|
ab6d7166f28945292d5632ef766fc13cc2ea4cf3
|
refs/heads/master
| 2020-03-21T19:35:29.486609
| 2018-09-19T12:31:14
| 2018-09-19T12:31:14
| 138,958,510
| 0
| 0
|
MIT
| 2018-06-28T02:54:06
| 2018-06-28T02:54:05
| null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
# Generated by Django 2.1 on 2018-09-07 08:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djasana', '0008_auto_20180906_1407'),
]
operations = [
migrations.AlterField(
model_name='story',
name='target',
field=models.BigIntegerField(db_index=True, null=True),
),
]
|
[
"tim@zapatacomputing.com"
] |
tim@zapatacomputing.com
|
7b7fd3da8f6a6ee41cc78c075dff5098dfcfa7ce
|
fb3d54b24081ed814ce8e25b5dc404d4b0ea839e
|
/Python2-Example100/Exercise094.py
|
2535992f32d6de630fff356cb7a9227a5267460d
|
[] |
no_license
|
zuolinye/Python-Learing
|
7d55f062a03aec6270b74f63ce2da84b32d2f2df
|
8eaac436a5c2647a06635cac1064824712af52cb
|
refs/heads/master
| 2022-01-14T05:00:07.752781
| 2019-07-02T13:33:42
| 2019-07-02T13:33:42
| 112,070,315
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#题目:时间函数举例4,一个猜数游戏,判断一个人反应快慢。
if __name__ == '__main__':
import time
import random
play_it = raw_input('do you want to play it.(\'y\' or \'n\')')
while play_it == 'y':
c = raw_input('input a character:\n')
i = random.randint(0,2**32) % 100
print 'please input number you guess:\n'
start = time.clock()
a = time.time()
guess = int(raw_input('input your guess:\n'))
while guess != i:
if guess > i:
print 'please input a little smaller'
guess = int(raw_input('input your guess:\n'))
else:
print 'please input a little bigger'
guess = int(raw_input('input your guess:\n'))
end = time.clock()
b = time.time()
var = (end - start) / 18.2
print var
# print 'It took you %6.3 seconds' % time.difftime(b,a))
if var < 15:
print 'you are very clever!'
elif var < 25:
print 'you are normal!'
else:
print 'you are stupid!'
print 'Congradulations'
print 'The number you guess is %d' % i
play_it = raw_input('do you want to play it.')
|
[
"33795120+zuolinye@users.noreply.github.com"
] |
33795120+zuolinye@users.noreply.github.com
|
fb5a845898f4defbdf1187d17eadfd18ebab8342
|
6ca787fc18673d032a324a10a429d48bf257e11e
|
/examples/multidomain3d/SConscript
|
dacf16754a63b80ac93240770757d2f65d2fc6e6
|
[] |
no_license
|
maierbn/opendihu-snapshot
|
714375c85264e2b20ec808daadd79eae540ece75
|
e726802b3efed87a2d0ac5deb5389fbe070e73a2
|
refs/heads/master
| 2020-04-07T09:41:15.815678
| 2018-11-19T18:30:18
| 2018-11-19T18:30:18
| 158,261,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 715
|
# This script declares to SCons how to compile the example.
# It has to be called from a SConstruct file.
# The 'env' object is passed from there and contains further specification like directory and debug/release flags.
#
# Note: If you're creating a new example and copied this file, adjust the desired name of the executable in the 'target' parameter of env.Program.
Import('env') # import Environment object from calling SConstruct
# if the option no_tests was given, quit the script
if not env['no_examples']:
# create the main executable
#env.Program(target = 'multidomain', source = "src/multidomain.cpp")
env.Program(target = 'multidomain_strang', source = "src/multidomain_strang.cpp")
|
[
"maier.bn@gmail.com"
] |
maier.bn@gmail.com
|
|
674d0eb07f6adf1242d6178b627ef73d1d8ddf0e
|
e6428366f4392cb5be9fb16e98f04fa73ecb819a
|
/lambda_handler.py
|
bc53b76284df0c9c5153584ad1844df2046c0cb0
|
[] |
no_license
|
skambuilds/ColorPicker
|
d30c5d29c83c237b64fe8d9486168443a7c47f20
|
d0a98f97ce5340cbe46c0761cef9f6ebd24ad01e
|
refs/heads/master
| 2020-07-20T13:28:36.666772
| 2020-04-28T15:37:10
| 2020-04-28T15:37:10
| 206,650,323
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,530
|
py
|
import json
import cv2
import numpy as np
import paho.mqtt.client as mqtt
import boto3
from botocore.exceptions import ClientError
# jpeg layers color
BLUE, GREEN, RED = 0, 1, 2
# mqtt_broker_ip = "insert public ip of the broker"
def lambda_handler(event, context):
s3 = boto3.client('s3')
# read image from event object
file_meta = event['Records'][0]['s3']
event_image = file_meta['object']['key']
bucket_name = file_meta['bucket']['name']
image_local_path = "/tmp/"+event_image
s3.download_file(bucket_name, event_image, image_local_path)
img = cv2.imread(image_local_path)
if img is None:
return 1
# for each color compute the sum
red = np.sum(img[:, :, RED])
green = np.sum(img[:, :, GREEN])
blue = np.sum(img[:, :, BLUE])
del img # save some space
# total amount of color
base = (blue+green+red)
# percentage of each color in the image
red = int(red / base * 100)
green = int(green / base * 100)
blue = int(blue / base * 100)
# todo if the file doesn't exist then create it
# the json file contains the index of the images for each user
json_name="images.json"
json_path = "/tmp/"+json_name
json_images = {} # read the json file
# if the file exists, it is downloaded and loaded, otherwise it throws ClientError
# before it tries to open the file
try:
s3.download_file(bucket_name, "images.json", json_path)
with open(json_path, 'r') as fp:
json_images = json.load(fp)
except ClientError:
pass
user_code = event_image.split("_")[0]
img_data = {'name': event_image,
'red': red,
'green': green,
'blue': blue
}
stop = False
for users, macnames in json_images.items():
for macname in macnames.keys():
if user_code == macname: # append new data
json_images[users][user_code].append(img_data)
stop = True
break
# insert user for the first time if not already present
if not stop:
json_images['users'] = {user_code: [img_data]}
with open(json_path, 'w') as fp:
json.dump(json_images, fp)
# upload json file in the bucket
response = s3.upload_file(json_path, bucket_name, json_name)
client = mqtt.Client()
client.connect(mqtt_broker_ip)
client.publish(user_code, payload="{:02d}/{:02d}/{:02d}".format(red,green,blue))
return 0
|
[
"noreply@github.com"
] |
skambuilds.noreply@github.com
|
b90b91e0b4e9270c4064c52c5a88b4c433c7b1ec
|
a0cecab2c63c89b13d68105ba55a429c3d2d86a2
|
/prepostpy/file.py
|
83f6ff45160e677e1a1a1c635f316c949d3270f9
|
[
"MIT"
] |
permissive
|
adtzlr/prepostpy
|
d7f69603f7d32ee6839f452dab2b2a693604ee7e
|
c4ce1a06536d6b1a88be20a0c9ec7361a07ef708
|
refs/heads/master
| 2020-07-12T02:27:44.792039
| 2019-08-29T12:15:05
| 2019-08-29T12:15:05
| 204,693,591
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,695
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 15:05:55 2019
@author: dutzi
"""
import subprocess
import time
import os
import numpy as np
from types import SimpleNamespace
import py_mentat as pm
from .core import Table
from .core import Point
from .core import Curve
from .core import Node
from .core import Material
from .core import Element
from .core import Boundary
from .core import Loadcase
from .core import Job
from .core import Automesh
#from .expand import Expand
#from .sweep import Sweep
from .core import Select
from .core import Operations
from .core import Links
from .tools import generate_proc
#from .post_file import PostFile
class File:
def __init__(self,filename):
self.filename = filename
self.filepath = '\\'.join(self.filename.split('\\')[:-1])
self.tables = []
self.points = []
self.curves = []
self.surfaces = []
self.nodes = []
self.elements = []
self.boundaries = []
self.materials = []
self.loadcases = []
self.jobs = []
self.Automesh = Automesh()
#self.Expand = Expand()
#self.Sweep = Sweep()
self.Operations = Operations()
self.Select = Select()
self.Links = Links()
self.exported = False
self.item = SimpleNamespace()
generate_proc()
#self.PostFile = PostFile()
def reset(self):
self.tables = []
self.points = []
self.curves = []
self.surfaces = []
self.nodes = []
self.elements = []
self.boundaries = []
self.materials = []
self.loadcases = []
self.jobs = []
def build(self):
self.reset()
for k,o in sorted(vars(self.item).items()):
if isinstance(o,Point): self.points.append(o)
if isinstance(o,Curve): self.curves.append(o)
if isinstance(o,Node): self.nodes.append(o)
if isinstance(o,Element): self.elements.append(o)
if isinstance(o,Table): self.tables.append(o)
if isinstance(o,Material): self.materials.append(o)
if isinstance(o,Boundary): self.boundaries.append(o)
if isinstance(o,Loadcase): self.loadcases.append(o)
if isinstance(o,Job): self.jobs.append(o)
def __add(self,ob):
try :
len(ob)
obj = ob
except:
obj = [ob]
for o in obj:
if isinstance(o,Point): self.points.append(o)
if isinstance(o,Curve): self.curves.append(o)
if isinstance(o,Node): self.nodes.append(o)
if isinstance(o,Element): self.elements.append(o)
if isinstance(o,Table): self.tables.append(o)
if isinstance(o,Material): self.materials.append(o)
if isinstance(o,Boundary): self.boundaries.append(o)
if isinstance(o,Loadcase): self.loadcases.append(o)
if isinstance(o,Job): self.jobs.append(o)
def savefile(self,write_input=True):
pm.py_send(r'*save_as_model '+self.filename+' yes')
if write_input:
for job in self.jobs:
job.write_input()
def tomentat(self):
self.build()
#reset model
pm.py_send("*new_model yes")
#pm.py_send("yes")
self.objects = [*self.points,
*self.curves,
*self.nodes,
*self.elements,
*self.tables,
*self.materials,
*self.boundaries,
*self.loadcases,
*self.jobs]
for o in self.objects:
o.tomentat()
for j in self.jobs:
j.filename = self.filename
self.exported = True
def _get_installed_version(self,msc_path):
return next(os.walk(msc_path+r'\Marc'))[1][-1]
def submit(self,job,verbose=1,
msc_path=r'C:\MSC.Software',version='latest',threads=1,
scratch_dir=None):
workdir = '\\'.join(self.filename.split('\\')[:-1])
stsfile = '.'.join(self.filename.split('.')[:-1])+'_'+job.label+'.sts'
datfile = '.'.join(self.filename.split('.')[:-1])+'_'+job.label+'.dat'
# select latest installed version of mentat
if version == 'latest':
marc_version = self._get_installed_version(msc_path)
else:
marc_version = version
marc_year = marc_version.split('.')[0]
marc_path = msc_path+r'\Marc'+'\\'+ \
marc_version+r'\marc'+marc_year+ \
r'\tools\run_marc.bat'
runjob = [marc_path, '-jid', datfile, '-dir', workdir]
if threads > 1:
runjob += ['-nts', str(threads), '-nte', str(threads)]
if scratch_dir is not None:
runjob += ['-sdir', scratch_dir]
subprocess.run(r' '.join(runjob),
stdout=subprocess.PIPE,
stderr = subprocess.PIPE)
if verbose > 0:
with open(stsfile,'r') as f1:
lines = f1.readlines()
exit_message = lines[-3]
#check if job has finished
if '3004' in exit_message:
print('Job completed sucessful (Exit 3004).')
else:
raise ValueError('Job error.', exit_message)
# 'cmd.exe /C',
# # pause script while job is running
# while True:
# # check every 5 seconds if job has finished by evaluating
# # model_job##.sts file
# time.sleep(5)
# with open(stsfile,'r') as f1:
# lines = f1.readlines()
# try: # catch error if less than 3 lines are present
# exit_message = lines[-3]
# except:
# exit_message = ''
#
# # check if job has finished
# if '3004' in exit_message:
# if verbose > 0: print(exit_message)
# break
# else: # print current loadcase and increment
# try: # catch error if the last line does not contain the progress
# lc = int(lines[-1][:7])
# inc = int(lines[-1][7:15])
# progress = 'current loadcase %d / inc. %d' % (lc, inc)
# if verbose > 0: print(progress)
# except:
# pass
|
[
"dutzi@tugraz.at"
] |
dutzi@tugraz.at
|
ea5a7386ec4bcbae5f86d8531379450dcb7233bd
|
d46d182a5ebf505883c079d696b7ddceeea80560
|
/migrations/versions/3b23e222e51e_.py
|
84f3e321b96e252b5e1f5b9d511bcf65ecd59ff9
|
[] |
no_license
|
bunchito/fyyur
|
6851907b2cf5b95f6867c4cc7b2dcf6e2a91b8e4
|
6ba66913688c13ab3fe28f972852882f0f78a091
|
refs/heads/main
| 2023-04-03T02:18:30.125088
| 2021-04-07T00:48:40
| 2021-04-07T00:48:40
| 354,876,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,616
|
py
|
"""empty message
Revision ID: 3b23e222e51e
Revises:
Create Date: 2021-04-05 10:08:01.943767
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3b23e222e51e'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('artists',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('city', sa.String(length=120), nullable=True),
sa.Column('state', sa.String(length=120), nullable=True),
sa.Column('phone', sa.String(length=120), nullable=True),
sa.Column('facebook_link', sa.String(length=120), nullable=True),
sa.Column('genres', sa.ARRAY(sa.String(length=120)), nullable=True),
sa.Column('website', sa.String(length=120), nullable=True),
sa.Column('seeking_venue', sa.Boolean(), nullable=True),
sa.Column('seeking_description', sa.String(length=480), nullable=True),
sa.Column('image_link', sa.String(length=480), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('venues',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('city', sa.String(length=120), nullable=True),
sa.Column('state', sa.String(length=120), nullable=True),
sa.Column('address', sa.String(length=120), nullable=True),
sa.Column('phone', sa.String(length=120), nullable=True),
sa.Column('image_link', sa.String(length=500), nullable=True),
sa.Column('facebook_link', sa.String(length=120), nullable=True),
sa.Column('genres', sa.ARRAY(sa.String(length=120)), nullable=True),
sa.Column('website', sa.String(length=120), nullable=True),
sa.Column('seeking_talent', sa.Boolean(), nullable=True),
sa.Column('seeking_description', sa.String(length=480), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('shows',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('venue_id', sa.Integer(), nullable=False),
sa.Column('artist_id', sa.Integer(), nullable=False),
sa.Column('start_time', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['artist_id'], ['artists.id'], ),
sa.ForeignKeyConstraint(['venue_id'], ['venues.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('shows')
op.drop_table('venues')
op.drop_table('artists')
# ### end Alembic commands ###
|
[
"enotragalaxiahay@outlook.com"
] |
enotragalaxiahay@outlook.com
|
01de628894d27c40f94c31eef7b2c069af409929
|
3428650cbf4dff148dfcdea98f9071fa0529a205
|
/app.py
|
e8ae58f23bc316c3e5380de803b75e02bfba7765
|
[] |
no_license
|
Rfluegel1/timely-warning
|
262398cf36d7ab5fc2e7375e84f0cae58867aab2
|
513a34ba6f83ab1b203cbd41bb51df87f98c7536
|
refs/heads/master
| 2022-12-12T05:26:50.651634
| 2020-01-19T20:22:36
| 2020-01-19T20:22:36
| 232,352,025
| 0
| 0
| null | 2022-12-08T07:02:19
| 2020-01-07T15:17:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,624
|
py
|
from flask import Flask, render_template, request
from flask_wtf import FlaskForm
from wtforms import SubmitField, SelectField, Form, FloatField, BooleanField, StringField, PasswordField, validators
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bootstrap import Bootstrap
from flask_wtf.csrf import CSRFProtect
from config import Config
csrf = CSRFProtect()
app = Flask(__name__)
Bootstrap(app)
app.config.from_object(Config)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///timely_warning.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
csrf.init_app(app)
from models import Reports
import os
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
class WordForm(FlaskForm):
name = StringField("Name", [validators.DataRequired()]);
lat = FloatField("Latitude", [validators.NumberRange(min=43, max=45, message="Latitude value not in range 43 to 45"), validators.DataRequired()])
lon = FloatField("Longitude", [validators.NumberRange(min=-94, max=-92, message="Longitude value not in range -92 to -94"), validators.DataRequired()])
description = StringField("Description")
submit = SubmitField("Submit")
@app.route('/')
def home():
form = WordForm()
return render_template('home.html', form=form)
#return'Choose: submit report or view reports'
@app.route('/submit')
def submit():
form = WordForm()
return render_template('submit.html', form=form)
#return 'Submit your report here'
@app.route('/view')
def view():
form = WordForm()
reports = Reports.query.all()
print(reports)
return render_template('view.html', form=form, list=reports)
#return 'View all reports here'
@app.route('/insert', methods=['POST','GET'])
def insert():
form = WordForm()
if form.validate_on_submit():
name = form.name.data
lat = form.lat.data
lon = form.lon.data
desc = form.description.data
else:
return render_template('submit.html', form=form)
print(name)
print(lat)
print(lon)
print(desc)
# Nuke database
# db.session.execute('DELETE FROM reports WHERE true')
rows = db.session.execute('SELECT * FROM reports')
id = len(rows.fetchall()) + 1
print(id)
#insert = 'INSERT INTO reports (id, name, lon, lat, description) VALUE ({}, \'{}\', \'{}\', \'{}\', \'{}\')'.format(id, name, lat, lon, desc)
#print(insert)
r = Reports(id, name, lat, lon, desc)
db.session.add(r)
db.session.commit()
return render_template('successful.html')
if __name__ == '__main__':
app.run(debug=True, port=8003, host="0.0.0.0")
|
[
"noreply@github.com"
] |
Rfluegel1.noreply@github.com
|
c3575f7e25c72d4769383952cf02767e33e8b400
|
a9ef505f36d62ae02b8af5f20ff225489486a5fc
|
/setup.py
|
196b8dc3ee559b1674e106967ad84560e464445d
|
[] |
no_license
|
deaconblues86/starwars
|
aea24bceb6c44e35f23d8a50de681c65b7db9693
|
1d824357ac7d09d9e01091aa875d6f6c1060c4d9
|
refs/heads/master
| 2020-04-20T22:59:12.566940
| 2019-02-05T18:42:10
| 2019-02-05T18:42:10
| 169,156,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
from distutils.core import setup
setup(name='StarWars',
version='1.0',
description='A few demo scripts run against the Star Wars API (https://swapi.co/)',
author='Brian Hammill',
author_email='hammillbc@gmail.com',
url='https://github.com/deaconblues86/starwars',
install_requires=[
'protobuf>=3.0.0',
'mysqlclient==1.4.1',
'sqlalchemy==1.2.17',
'requests==2.21.0',
]
)
|
[
"hammillbc@gmail.com"
] |
hammillbc@gmail.com
|
1e3f6cee13dfa20b5d5e267097948edeb4ec3c7c
|
8e56e2f9877774d1e21814fd42bc55f1fcfd4259
|
/tests/test_membername_json.py
|
ed0c368eaa767db1bf3a2d811a32ad8d8cf89059
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
reedlaw/www.gittip.com
|
538246f96ab2282c5f7fc2062171cda6b3d5e558
|
a2d9b2680b283eaf72b6185419b7e5d14d9dfe2a
|
refs/heads/master
| 2020-12-29T03:18:23.972632
| 2013-10-05T17:08:52
| 2013-10-05T17:08:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,763
|
py
|
from __future__ import unicode_literals
from nose.tools import assert_equal
import json
from aspen.utils import utcnow
from gittip.testing import Harness
from gittip.testing.client import TestClient
class TestMembernameJson(Harness):
def make_client_and_csrf(self):
client = TestClient()
csrf_token = client.get('/').request.context['csrf_token']
return client, csrf_token
def make_team_and_participant(self):
self.make_participant("team", claimed_time=utcnow(), number='plural')
self.make_participant("alice", claimed_time=utcnow())
def test_post_team_is_not_team_returns_404(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
response = client.post('/alice/members/team.json'
, { 'csrf_token': csrf_token }
, user='alice'
)
actual = response.code
assert actual == 404, actual
def test_post_participant_doesnt_exist_returns_404(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
response = client.post('/team/members/bob.json'
, { 'csrf_token': csrf_token }
, user='team'
)
actual = response.code
assert actual == 404, actual
def test_post_user_is_not_member_or_team_returns_403(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
self.make_participant("bob", claimed_time=utcnow(), number='plural')
response = client.post('/team/members/alice.json'
, {
'take': '0.01'
, 'csrf_token': csrf_token
}
, user='team'
)
actual = response.code
assert actual == 200, actual
response = client.post('/team/members/bob.json'
, {
'take': '0.01'
, 'csrf_token': csrf_token
}
, user='team'
)
actual = response.code
assert actual == 200, actual
response = client.post('/team/members/alice.json'
, { 'csrf_token': csrf_token }
, user='bob'
)
actual = response.code
assert actual == 403, actual
def test_post_take_is_not_decimal_returns_400(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
response = client.post('/team/members/alice.json'
, {
'take': 'bad'
, 'csrf_token': csrf_token
}
, user='team'
)
actual = response.code
assert actual == 400, actual
def test_post_member_equals_team_returns_400(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
response = client.post('/team/members/team.json'
, {
'take': '0.01'
, 'csrf_token': csrf_token
}
, user='team'
)
actual = response.code
assert actual == 400, actual
def test_post_take_is_not_zero_or_penny_returns_400(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
response = client.post('/team/members/alice.json'
, {
'take': '0.02'
, 'csrf_token': csrf_token
}
, user='team'
)
actual = response.code
assert actual == 400, actual
def test_post_zero_take_on_non_member_returns_500(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
response = client.post('/team/members/alice.json'
, {
'take': '0.00'
, 'csrf_token': csrf_token
}
, user='team'
)
actual = response.code
assert actual == 500, actual
def test_post_can_add_member_to_team(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
response = client.post('/team/members/alice.json'
, {
'take': '0.01'
, 'csrf_token': csrf_token
}
, user='team'
)
data = json.loads(response.body)
actual = len(data)
assert actual == 2, actual
for rec in data:
assert rec['username'] in ('team', 'alice'), rec['username']
def test_post_can_remove_member_from_team(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
response = client.post('/team/members/alice.json'
, {
'take': '0.01'
, 'csrf_token': csrf_token
}
, user='team'
)
data = json.loads(response.body)
actual = len(data)
assert actual == 2, actual
for rec in data:
assert rec['username'] in ('team', 'alice'), rec['username']
response = client.post('/team/members/alice.json'
, {
'take': '0.00'
, 'csrf_token': csrf_token
}
, user='team'
)
data = json.loads(response.body)
actual = len(data)
assert actual == 1, actual
actual = data[0]['username']
assert actual == 'team', actual
def test_post_non_team_member_adds_member_returns_403(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
self.make_participant("bob", claimed_time=utcnow())
response = client.post('/team/members/alice.json'
, {
'take': '0.01'
, 'csrf_token': csrf_token
}
, user='team'
)
actual = response.code
assert actual == 200, actual
response = client.post('/team/members/bob.json'
, {
'take': '0.01'
, 'csrf_token': csrf_token
}
, user='alice'
)
actual = response.code
assert actual == 403, actual
def test_get_team_when_team_equals_member(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
response = client.get('/team/members/team.json', 'team')
data = json.loads(response.body)
actual = response.code
assert actual == 200, actual
actual = data['username']
assert actual == 'team', actual
def test_get_team_member_returns_null_when_non_member(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
response = client.get('/team/members/alice.json', 'team')
actual = response.code
assert actual == 200, actual
actual = response.body
assert actual == 'null', actual
def test_get_team_members_returns_take_when_member(self):
client, csrf_token = self.make_client_and_csrf()
self.make_team_and_participant()
response = client.post('/team/members/alice.json'
, {
'take': '0.01'
, 'csrf_token': csrf_token
}
, user='team'
)
actual = response.code
assert actual == 200, actual
response = client.get('/team/members/alice.json', 'team')
data = json.loads(response.body)
actual = response.code
assert actual == 200, actual
actual = data['username']
assert actual == 'alice', actual
actual = data['take']
assert actual == '0.01', actual
|
[
"neil@neilkistner.com"
] |
neil@neilkistner.com
|
4e535af2087d9d1e0c238e0b4d90748c74249280
|
32179992ead39aefb0503ec1ca9c929c063f256c
|
/geppytto/browser_agent/__init__.py
|
9571bd8f25c74f62aa17fcac2a1b0c4fa8228fa4
|
[] |
no_license
|
myrfy001/geppytto
|
60e63ad7351a6c82ad7d9bc2bd1692618b4b5d93
|
fb13c46a2e0664fe872179490215208194bce8a8
|
refs/heads/master
| 2022-03-22T01:27:50.036626
| 2019-05-08T01:48:46
| 2019-05-08T01:48:46
| 160,212,159
| 2
| 0
| null | 2021-06-11T17:47:56
| 2018-12-03T15:25:58
|
Python
|
UTF-8
|
Python
| false
| false
| 996
|
py
|
# coding:utf-8
import pyppeteer
import asyncio
import sys
from os.path import abspath, dirname, join
import atexit
class AgentSharedVars:
host = None
port = None
agent_id = None
agent_name = None
advertise_address = None
user_id = None
api_client = None
node_name = None
is_steady = None
last_ack_time = None
running = True
soft_exit = False
bgt_manager = None
browser_pool = None
sanic_app = None
chrome_executable_path = None
user_data_dir = None
server_task = None
access_token = None
is_cluster_mode = False
@classmethod
def set_soft_exit(cls):
cls.soft_exit = True
cls.bgt_manager.soft_exit()
started_agents = {}
started_named_browsers = {}
geppytto_is_exiting = False
@atexit.register
def close_all_agents():
global geppytto_is_exiting
geppytto_is_exiting = True
for pid, proc_info in started_agents.items():
proc_info['process_handle'].terminate()
|
[
"myrfy001@users.noreply.github.com"
] |
myrfy001@users.noreply.github.com
|
862c4485a1d357053b435263145fa8aa8e94f5fd
|
4b9b505c716fe9461c46699985167d00f2b1b0b7
|
/karlooper/utils/security.py
|
a6b49ad9bf9fe9cf468ed1cf0067c6d57f8743f8
|
[] |
no_license
|
smallearth/karlooper
|
90544265f14467c2d3b2f294d581b07d1f81f039
|
ce9175cd373836fd33c203144785b66ab4586c16
|
refs/heads/master
| 2021-01-16T19:35:59.225192
| 2016-06-24T06:50:58
| 2016-06-24T06:50:58
| 62,021,191
| 1
| 0
| null | 2016-06-27T02:56:54
| 2016-06-27T02:56:54
| null |
UTF-8
|
Python
| false
| false
| 7,815
|
py
|
# -*-coding:utf-8-*-
"""
security
~~~~~~~~
Use this model to encrypt string.
Usage
=====
>>> d = DES()
>>> d.input_key("123456789")
>>> s = "/static/hello.js"
>>> a = d.encode(s)
>>> print a
b14f1453ceddc91e492fbe883d552a2e
>>> b = d.decode(a)
>>> print b
/static/hello.js
"""
from functools import partial
__author__ = 'karlvorndoenitz@gmail.com'
class DES(object):
"""
DES encrypt method
interface: input_key(s, base=10), encode(s), decode(s)
"""
__ip = [
58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7,
]
__ip1 = [
40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25,
]
__e = [
32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1,
]
__p = [
16, 7, 20, 21, 29, 12, 28, 17,
1, 15, 23, 26, 5, 18, 31, 10,
2, 8, 24, 14, 32, 27, 3, 9,
19, 13, 30, 6, 22, 11, 4, 25,
]
__s = [
[
0xe, 0x4, 0xd, 0x1, 0x2, 0xf, 0xb, 0x8, 0x3, 0xa, 0x6, 0xc, 0x5, 0x9, 0x0, 0x7,
0x0, 0xf, 0x7, 0x4, 0xe, 0x2, 0xd, 0x1, 0xa, 0x6, 0xc, 0xb, 0x9, 0x5, 0x3, 0x8,
0x4, 0x1, 0xe, 0x8, 0xd, 0x6, 0x2, 0xb, 0xf, 0xc, 0x9, 0x7, 0x3, 0xa, 0x5, 0x0,
0xf, 0xc, 0x8, 0x2, 0x4, 0x9, 0x1, 0x7, 0x5, 0xb, 0x3, 0xe, 0xa, 0x0, 0x6, 0xd,
],
[
0xf, 0x1, 0x8, 0xe, 0x6, 0xb, 0x3, 0x4, 0x9, 0x7, 0x2, 0xd, 0xc, 0x0, 0x5, 0xa,
0x3, 0xd, 0x4, 0x7, 0xf, 0x2, 0x8, 0xe, 0xc, 0x0, 0x1, 0xa, 0x6, 0x9, 0xb, 0x5,
0x0, 0xe, 0x7, 0xb, 0xa, 0x4, 0xd, 0x1, 0x5, 0x8, 0xc, 0x6, 0x9, 0x3, 0x2, 0xf,
0xd, 0x8, 0xa, 0x1, 0x3, 0xf, 0x4, 0x2, 0xb, 0x6, 0x7, 0xc, 0x0, 0x5, 0xe, 0x9,
],
[
0xa, 0x0, 0x9, 0xe, 0x6, 0x3, 0xf, 0x5, 0x1, 0xd, 0xc, 0x7, 0xb, 0x4, 0x2, 0x8,
0xd, 0x7, 0x0, 0x9, 0x3, 0x4, 0x6, 0xa, 0x2, 0x8, 0x5, 0xe, 0xc, 0xb, 0xf, 0x1,
0xd, 0x6, 0x4, 0x9, 0x8, 0xf, 0x3, 0x0, 0xb, 0x1, 0x2, 0xc, 0x5, 0xa, 0xe, 0x7,
0x1, 0xa, 0xd, 0x0, 0x6, 0x9, 0x8, 0x7, 0x4, 0xf, 0xe, 0x3, 0xb, 0x5, 0x2, 0xc,
],
[
0x7, 0xd, 0xe, 0x3, 0x0, 0x6, 0x9, 0xa, 0x1, 0x2, 0x8, 0x5, 0xb, 0xc, 0x4, 0xf,
0xd, 0x8, 0xb, 0x5, 0x6, 0xf, 0x0, 0x3, 0x4, 0x7, 0x2, 0xc, 0x1, 0xa, 0xe, 0x9,
0xa, 0x6, 0x9, 0x0, 0xc, 0xb, 0x7, 0xd, 0xf, 0x1, 0x3, 0xe, 0x5, 0x2, 0x8, 0x4,
0x3, 0xf, 0x0, 0x6, 0xa, 0x1, 0xd, 0x8, 0x9, 0x4, 0x5, 0xb, 0xc, 0x7, 0x2, 0xe,
],
[
0x2, 0xc, 0x4, 0x1, 0x7, 0xa, 0xb, 0x6, 0x8, 0x5, 0x3, 0xf, 0xd, 0x0, 0xe, 0x9,
0xe, 0xb, 0x2, 0xc, 0x4, 0x7, 0xd, 0x1, 0x5, 0x0, 0xf, 0xa, 0x3, 0x9, 0x8, 0x6,
0x4, 0x2, 0x1, 0xb, 0xa, 0xd, 0x7, 0x8, 0xf, 0x9, 0xc, 0x5, 0x6, 0x3, 0x0, 0xe,
0xb, 0x8, 0xc, 0x7, 0x1, 0xe, 0x2, 0xd, 0x6, 0xf, 0x0, 0x9, 0xa, 0x4, 0x5, 0x3,
],
[
0xc, 0x1, 0xa, 0xf, 0x9, 0x2, 0x6, 0x8, 0x0, 0xd, 0x3, 0x4, 0xe, 0x7, 0x5, 0xb,
0xa, 0xf, 0x4, 0x2, 0x7, 0xc, 0x9, 0x5, 0x6, 0x1, 0xd, 0xe, 0x0, 0xb, 0x3, 0x8,
0x9, 0xe, 0xf, 0x5, 0x2, 0x8, 0xc, 0x3, 0x7, 0x0, 0x4, 0xa, 0x1, 0xd, 0xb, 0x6,
0x4, 0x3, 0x2, 0xc, 0x9, 0x5, 0xf, 0xa, 0xb, 0xe, 0x1, 0x7, 0x6, 0x0, 0x8, 0xd,
],
[
0x4, 0xb, 0x2, 0xe, 0xf, 0x0, 0x8, 0xd, 0x3, 0xc, 0x9, 0x7, 0x5, 0xa, 0x6, 0x1,
0xd, 0x0, 0xb, 0x7, 0x4, 0x9, 0x1, 0xa, 0xe, 0x3, 0x5, 0xc, 0x2, 0xf, 0x8, 0x6,
0x1, 0x4, 0xb, 0xd, 0xc, 0x3, 0x7, 0xe, 0xa, 0xf, 0x6, 0x8, 0x0, 0x5, 0x9, 0x2,
0x6, 0xb, 0xd, 0x8, 0x1, 0x4, 0xa, 0x7, 0x9, 0x5, 0x0, 0xf, 0xe, 0x2, 0x3, 0xc,
],
[
0xd, 0x2, 0x8, 0x4, 0x6, 0xf, 0xb, 0x1, 0xa, 0x9, 0x3, 0xe, 0x5, 0x0, 0xc, 0x7,
0x1, 0xf, 0xd, 0x8, 0xa, 0x3, 0x7, 0x4, 0xc, 0x5, 0x6, 0xb, 0x0, 0xe, 0x9, 0x2,
0x7, 0xb, 0x4, 0x1, 0x9, 0xc, 0xe, 0x2, 0x0, 0x6, 0xa, 0xd, 0xf, 0x3, 0x5, 0x8,
0x2, 0x1, 0xe, 0x7, 0x4, 0xa, 0x8, 0xd, 0xf, 0xc, 0x9, 0x0, 0x3, 0x5, 0x6, 0xb,
],
]
__k1 = [
57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4,
]
__k2 = [
14, 17, 11, 24, 1, 5, 3, 28,
15, 6, 21, 10, 23, 19, 12, 4,
26, 8, 16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55, 30, 40,
51, 45, 33, 48, 44, 49, 39, 56,
34, 53, 46, 42, 50, 36, 29, 32,
]
__k0 = [
1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1,
]
__hex_bin = {
'0': '0000', '1': '0001', '2': '0010', '3': '0011',
'4': '0100', '5': '0101', '6': '0110', '7': '0111',
'8': '1000', '9': '1001', 'a': '1010', 'b': '1011',
'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111',
' ': '0000'
}
__re = lambda t, s: ''.join(s[i - 1] for i in t)
__IP = partial(__re, __ip)
__IP1 = partial(__re, __ip1)
__E = partial(__re, __e)
__P = partial(__re, __p)
__K1 = partial(__re, __k1)
__K2 = partial(__re, __k2)
__B = partial(lambda hex_bin, s: ''.join(hex_bin[w] for w in ''.join('%2x' % ord(w) for w in s)), __hex_bin)
__DB = partial(lambda s: ''.join(chr(int(s[i:i + 8], 2)) for i in range(0, len(s), 8)))
__S = partial(lambda hex_bin, __s, s: ''.join(
hex_bin['%x' % __s[i][int(s[i * 6] + s[i * 6 + 5], 2) * 16 + int(s[i * 6 + 1:i * 6 + 5], 2)]] for i in
range(8)), __hex_bin, __s)
__F = partial(lambda s, k: ''.join('0' if s[i] == k[i] else '1' for i in range(len(s))))
__K0 = partial(
lambda k0, K2, k: map(K2, (k[k0[i]:28] + k[0:k0[i]] + k[k0[i] + 28:56] + k[28:k0[i] + 28] for i in range(16))),
__k0, __K2)
__K = partial(lambda K1, K0, k: K0(K1(k)), __K1, __K0)
def __init__(self):
pass
def input_key(self, key, base=10):
if base == 2:
pass
elif base == 16:
key = ''.join(self.__class__.__hex_bin[w] for w in key)
else:
key = self.__class__.__B(key)
self.__k = self.__class__.__K(key)
def __code(self, s, k):
s = self.__IP(s)
l, r = s[0:32], s[32:64]
for i in range(16):
r_t = r
r = self.__E(r)
r = self.__F(r, k[i])
r = self.__S(r)
r = self.__P(r)
r = self.__F(r, l)
l = r_t
return self.__class__.__IP1(r + l)
def encode(self, s):
s = str(s)
a = ''
s += ' ' * ((8 - len(s) % 8) % 8)
for i in range(0, len(s), 8):
before = self.__class__.__B(s[i:i + 8])
after = self.__code(before, self.__k)
a += '%16x' % int(after, 2)
return ''.join(w if w != ' ' else '0' for w in a)
def decode(self, s):
a = ""
s.lower()
for i in range(0, len(s), 16):
before = ''.join(self.__class__.__hex_bin[s[j]] for j in range(i, i + 16))
after = self.__code(before, self.__k[::-1])
a += self.__class__.__DB(after)
return a.rstrip().decode('utf-8')
|
[
"karlvorndoenitz@gmail.com"
] |
karlvorndoenitz@gmail.com
|
00f837391df758c0483bf3e8b30ade2b348a0302
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/android_ip_webcam/test_config_flow.py
|
881585ed5dc41540ac67e466440910a2322a55b4
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,556
|
py
|
"""Test the Android IP Webcam config flow."""
from unittest.mock import Mock, patch
import aiohttp
from homeassistant import config_entries
from homeassistant.components.android_ip_webcam.const import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResultType
from .test_init import MOCK_CONFIG_DATA
from tests.common import MockConfigEntry
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_form(hass: HomeAssistant, aioclient_mock_fixture) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == FlowResultType.FORM
assert result["errors"] is None
with patch(
"homeassistant.components.android_ip_webcam.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"port": 8080,
},
)
await hass.async_block_till_done()
assert result2["type"] == FlowResultType.CREATE_ENTRY
assert result2["title"] == "1.1.1.1"
assert result2["data"] == {
"host": "1.1.1.1",
"port": 8080,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_device_already_configured(
hass: HomeAssistant, aioclient_mock_fixture
) -> None:
"""Test aborting if the device is already configured."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG_DATA)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == FlowResultType.FORM
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"port": 8080,
},
)
await hass.async_block_till_done()
assert result2["type"] == FlowResultType.ABORT
assert result2["reason"] == "already_configured"
async def test_form_invalid_auth(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we handle invalid auth error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
aioclient_mock.get(
"http://1.1.1.1:8080/status.json?show_avail=1",
exc=aiohttp.ClientResponseError(Mock(), (), status=401),
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1", "port": 8080, "username": "user", "password": "wrong-pass"},
)
assert result2["type"] == FlowResultType.FORM
assert result2["errors"] == {"username": "invalid_auth", "password": "invalid_auth"}
async def test_form_cannot_connect(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
aioclient_mock.get(
"http://1.1.1.1:8080/status.json?show_avail=1",
exc=aiohttp.ClientError,
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
},
)
assert result2["type"] == FlowResultType.FORM
assert result2["errors"] == {"base": "cannot_connect"}
|
[
"noreply@github.com"
] |
home-assistant.noreply@github.com
|
df733e92f407fdfe059b200e39d3b933c86f04cb
|
80b7f2a10506f70477d8720e229d7530da2eff5d
|
/uhd_restpy/testplatform/sessions/ixnetwork/quicktest/customstep_98f1ecce7c2d88439541c6c12fb66c1d.py
|
34a6a6c4ca7a2e371547464582d6029264da156a
|
[
"MIT"
] |
permissive
|
OpenIxia/ixnetwork_restpy
|
00fdc305901aa7e4b26e4000b133655e2d0e346a
|
c8ecc779421bffbc27c906c1ea51af3756d83398
|
refs/heads/master
| 2023-08-10T02:21:38.207252
| 2023-07-19T14:14:57
| 2023-07-19T14:14:57
| 174,170,555
| 26
| 16
|
MIT
| 2023-02-02T07:02:43
| 2019-03-06T15:27:20
|
Python
|
UTF-8
|
Python
| false
| false
| 20,533
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from uhd_restpy.base import Base
from uhd_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class CustomStep(Base):
"""Allows to configure the parameters for the Custom TestCustom Step test.
The CustomStep class encapsulates a list of customStep resources that are managed by the user.
A list of resources can be retrieved from the server using the CustomStep.find() method.
The list can be managed by using the CustomStep.add() and CustomStep.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'customStep'
_SDM_ATT_MAP = {
'ForceApplyQTConfig': 'forceApplyQTConfig',
'InputParameters': 'inputParameters',
'Mode': 'mode',
'Name': 'name',
}
_SDM_ENUM_MAP = {
'mode': ['existingMode', 'newMode'],
}
def __init__(self, parent, list_op=False):
super(CustomStep, self).__init__(parent, list_op)
@property
def LearnFrames(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_979aa024a8c6ecab4da280f24c4bd0e2.LearnFrames): An instance of the LearnFrames class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_979aa024a8c6ecab4da280f24c4bd0e2 import LearnFrames
if len(self._object_properties) > 0:
if self._properties.get('LearnFrames', None) is not None:
return self._properties.get('LearnFrames')
return LearnFrames(self)._select()
@property
def PassCriteria(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_187edfaffc98596c486494bddee6f495.PassCriteria): An instance of the PassCriteria class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_187edfaffc98596c486494bddee6f495 import PassCriteria
if len(self._object_properties) > 0:
if self._properties.get('PassCriteria', None) is not None:
return self._properties.get('PassCriteria')
return PassCriteria(self)._select()
@property
def Results(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.quicktest.results_1c1d3ae4bbabf5d1fa604da0fbec8169.Results): An instance of the Results class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.quicktest.results_1c1d3ae4bbabf5d1fa604da0fbec8169 import Results
if len(self._object_properties) > 0:
if self._properties.get('Results', None) is not None:
return self._properties.get('Results')
return Results(self)._select()
@property
def TestConfig(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_e75d4c7d47ecac53df9ceb22747a20d4.TestConfig): An instance of the TestConfig class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_e75d4c7d47ecac53df9ceb22747a20d4 import TestConfig
if len(self._object_properties) > 0:
if self._properties.get('TestConfig', None) is not None:
return self._properties.get('TestConfig')
return TestConfig(self)._select()
@property
def TrafficSelection(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_6f2d1ffa44a1ec0777728af1fe5dc2f8.TrafficSelection): An instance of the TrafficSelection class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_6f2d1ffa44a1ec0777728af1fe5dc2f8 import TrafficSelection
if len(self._object_properties) > 0:
if self._properties.get('TrafficSelection', None) is not None:
return self._properties.get('TrafficSelection')
return TrafficSelection(self)
@property
def ForceApplyQTConfig(self):
# type: () -> bool
"""
Returns
-------
- bool: Apply QT config
"""
return self._get_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'])
@ForceApplyQTConfig.setter
def ForceApplyQTConfig(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'], value)
@property
def InputParameters(self):
# type: () -> str
"""
Returns
-------
- str: Input Parameters
"""
return self._get_attribute(self._SDM_ATT_MAP['InputParameters'])
@InputParameters.setter
def InputParameters(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InputParameters'], value)
@property
def Mode(self):
# type: () -> str
"""
Returns
-------
- str(existingMode | newMode): Test mode
"""
return self._get_attribute(self._SDM_ATT_MAP['Mode'])
@Mode.setter
def Mode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Mode'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Test name
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> CustomStep
"""Updates customStep resource on the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> CustomStep
"""Adds a new customStep resource on the server and adds it to the container.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with all currently retrieved customStep resources using find and the newly added customStep resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained customStep resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> CustomStep
"""Finds and retrieves customStep resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve customStep resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all customStep resources from the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with matching customStep resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of customStep data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the customStep resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the apply operation on the server.
Applies the specified Quick Test.
apply(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyAsync operation on the server.
applyAsync(async_operation=bool)
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the applyAsyncResult operation on the server.
applyAsyncResult(async_operation=bool)bool
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool:
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
applyITWizardConfiguration(async_operation=bool)
------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
generateReport(async_operation=bool)string
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: This method is asynchronous and has no return value.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(async_operation=bool)list
-----------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
run(InputParameters=string, async_operation=bool)list
-----------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(InputParameters=string, async_operation=bool)
---------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
waitForTest(async_operation=bool)list
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('waitForTest', payload=payload, response_object=None)
|
[
"andy.balogh@keysight.com"
] |
andy.balogh@keysight.com
|
36b07b88f0974c367727cfea1f47bf2556260b8e
|
7c7f9c64ddeb0f8f169ecc52bc2ce8e60a350472
|
/Airbnb_pricer/model/models/__init__.py
|
aa26de62c176d3bccfe8e6b680d4e1713127d31d
|
[] |
no_license
|
moe18/full-stack-ml-projects
|
0fecd34ee93c24871e0b7221131807088c899756
|
a1db2c6769c2e3ba078866c87fe4f6b6cbad34d4
|
refs/heads/master
| 2022-09-15T02:12:27.594149
| 2020-06-04T13:31:57
| 2020-06-04T13:31:57
| 269,367,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21
|
py
|
"""Model modules."""
|
[
"mordechaichabot@Mordechais-MacBook-Air.local"
] |
mordechaichabot@Mordechais-MacBook-Air.local
|
26ca64a684cdd38a638de21d89583c4080b77706
|
91bb3c4e91b90af8340ef9120132e6e7dd3b947f
|
/ticket/test_view.py
|
51289d49a98d1b38500e044d70be958c210f3039
|
[] |
no_license
|
Code-Institute-Submissions/Support-Software-Inc
|
74a6eb5d343769e52e3e1c0dced7831dc62b8f30
|
b854f2293593ff492e511d4b504eded5bab2add0
|
refs/heads/master
| 2021-05-18T04:48:41.653527
| 2020-03-29T19:22:02
| 2020-03-29T19:22:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,326
|
py
|
'''
Test file for Ticket views
'''
from django.contrib.auth.models import User
from django.test import Client, TestCase
from authentication.models import MyUser
class TestTicketViews(TestCase):
'''
Suite of tests for the Ticket model
'''
def setUp(self):
'''
Setup MyUser instance
'''
validuser = User.objects.create_user(
username='jacob',
email='jacob@…',
password='top_secret'
)
validuser.save()
self.validuser = MyUser.objects.create(
user=User.objects.get(pk=validuser.id),
role='USR'
)
self.client = Client()
#####################
# DASHBOARD
#####################
def test_get_dashboard_not_logged_in(self):
'''
Ensure that the return code for the dashboard
is 302 if the user is not signed in
'''
page = self.client.get("/dashboard")
self.assertEqual(page.status_code, 302)
def test_get_dashboard_logged_in(self):
'''
Ensure that the return code for the dashboard
is 200 if the user is signed in
'''
self.client.login(username='jacob', password='top_secret')
page = self.client.get("/dashboard")
self.assertEqual(page.status_code, 200)
|
[
"jameslowe241@gmail.com"
] |
jameslowe241@gmail.com
|
5f86eda4ff367ce24dccdd3d9112e5d7c56a9821
|
f571f604bf6f467d0325a9d2ee0f3032f045b260
|
/adjusted_goals_analysis.py
|
614baee864b7b04c8ccfd03b35c18a63d76c89bd
|
[
"MIT"
] |
permissive
|
leaffan/pynhldb
|
307fab2cda3a99a6273bb0cbb81bcec5efb2966c
|
73586d924060ccf92b0384346a5fc7013dde0b0d
|
refs/heads/master
| 2022-11-27T06:07:48.170879
| 2022-11-14T11:43:10
| 2022-11-14T11:43:10
| 78,532,400
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,955
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import argparse
from analysis._goals_per_game import retrieve_goals_per_season
from analysis._goals_per_game import calculate_adjustment_factors
from analysis._goal_leaders import retrieve_career_leaders
from analysis._goal_leaders import retrieve_yearly_top
from analysis._adjust_goals import retrieve_and_adjust_goal_totals
from utils import prepare_logging
prepare_logging(log_types=['screen'])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Adjusting individual goal scoring totals in dependance" +
"of league-wide scoring rate.")
parser.add_argument(
'steps',
metavar='processing_steps',
help='Processing step(s) to conduct.',
choices=['1', '2', '3', 'all'])
# TODO: add arguments for goal scoring leader retrieval, i.e. maximum top
# position threshold or minimum career season total
args = parser.parse_args()
setup_steps = args.steps
goals_per_season_path = os.path.join(
"analysis", "goals_per_season.json")
goal_leaders_path = os.path.join(
"analysis", "career_goal_leaders.json")
adjusted_goal_data_path = os.path.join(
"analysis", "adjusted_goal_data.json")
# retrieving goals per season and season adjustment factors
if setup_steps in ['1', 'all']:
season_data = retrieve_goals_per_season(1917, 2017)
calculate_adjustment_factors(season_data)
open(goals_per_season_path, 'w').write(
json.dumps(season_data, sort_keys=True, indent=2))
# retrieving goal scoring leaders
if setup_steps in ['2', 'all']:
career_goal_leaders = list()
yearly_top = list()
# retrieving all players with at least 300 career goals
career_goal_leaders = retrieve_career_leaders(300)
# retrieving top five goalscorers per season
yearly_top = retrieve_yearly_top(5, 1917, 2017)
# retrieving urls to player pages for goal-scoring career leaders
career_leaders_urls = [d['url'] for d in career_goal_leaders]
# creating new list of all goal-scoring leaders
goal_leaders = career_goal_leaders
# adding goal-scoring per season leaders to list of all goal-scoring
# leaders if they are not yet a part of it
for yearly_leader in yearly_top:
if yearly_leader['url'] not in career_leaders_urls:
goal_leaders.append(yearly_leader)
open(goal_leaders_path, 'w').write(
json.dumps(goal_leaders, indent=2))
# adjusting goal scoring totals according to goals scored per season
if setup_steps in ['3', 'all']:
adjusted_goal_data = retrieve_and_adjust_goal_totals(
goal_leaders_path, goals_per_season_path)
open(adjusted_goal_data_path, 'w').write(
json.dumps(adjusted_goal_data, sort_keys=True, indent=2))
|
[
"leaffan@gmx.net"
] |
leaffan@gmx.net
|
fecfa9414c8e6724a1669ff02fbb1293b2e03231
|
8fcae139173f216eba1eaa01fd055e647d13fd4e
|
/.history/scraper_20191220162402.py
|
a911c5737fce871af11f9e6df21d169327a70b5e
|
[] |
no_license
|
EnriqueGalindo/backend-web-scraper
|
68fdea5430a0ffb69cc7fb0e0d9bcce525147e53
|
895d032f4528d88d68719838a45dae4078ebcc82
|
refs/heads/master
| 2020-11-27T14:02:59.989697
| 2019-12-21T19:47:34
| 2019-12-21T19:47:34
| 229,475,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,367
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module docstring: One line description of what your program does.
There should be a blank line in between description above, and this
more detailed description. In this section you should put any caveats,
environment variable expectations, gotchas, and other notes about running
the program. Author tag (below) helps instructors keep track of who
wrote what, when grading.
"""
__author__ = "Enrique Galindo"
# Imports go at the top of your file, after the module docstring.
# One module per import line. These are for example only.
import sys
import requests
import re
import pprint
from html.parser import HTMLParser
regex_email = r'''(?:[a-z0-9!#$%&‘*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&‘*+/=?^_`{|}~-]+)*|“(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*“)@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])'''
regex_phone = r'''(1?\W*([2-9][0-8][0-9])\W*([2-9][0-9]{2})\W*([0-9]{4})(\se?x?t?(\d*))?)'''
class MyHTMLParser(HTMLParser):
a_list = []
def handle_starttag(self, tag, attrs):
link_list = []
if tag == 'a' and "http://":
for attr, value in attrs:
if attr == 'href' and value.startswith("http"):
self.a_list.append(value)
if attr == 'src' and value.startswith("http")
def main(args):
"""Main function is declared as standalone, for testability"""
good_phone_list = []
url = args[0]
response = requests.get(url)
response.raise_for_status()
url_list = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', response.text)
email_list = set(re.findall(regex_email, response.text))
bad_phone_list = set(re.findall(regex_phone, response.text))
for number in bad_phone_list:
good_phone_list.append(number[1] + number[2] + number[3])
print(email_list)
pprint.pprint(good_phone_list)
parser = MyHTMLParser()
parser.feed(response.text)
pprint.pprint(parser.a_list)
if __name__ == '__main__':
"""Docstring goes here"""
main(sys.argv[1:])
|
[
"egalindo@protonmail.com"
] |
egalindo@protonmail.com
|
a7d0334fa16307ad59aca5e1fc4c491510487be5
|
1be7f4cef4693d115be8ec53ee3ad303579a0f37
|
/SQLite/sqlite_demo.py
|
dd6ed1537b7fd8a050ecae485ad06af557e7337c
|
[] |
no_license
|
schehat/python_snippets
|
fab511d415bbf1dc6ec06c2c0cbbde1b3411c1f0
|
93b74b8be446fc951e60945c73b29ce44c6b0c16
|
refs/heads/main
| 2023-08-04T22:21:23.303000
| 2021-09-19T08:50:23
| 2021-09-19T08:50:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
import os
import sqlite3
from employee import Employee
os.chdir(os.path.dirname(__file__))
# :memory: possible which runs on RAM, good for testing
conn = sqlite3.connect("employees.db")
# with the cursor sql commands can be executed
c = conn.cursor()
# c.execute(
# """Create Table employees(
# first text,
# last text,
# pay integer
# )"""
# )
# c.execute("INSERT INTO employees VALUES('Schehat', 'Abdel Kader', 0)")
emp_1 = Employee("John", "Doe", 60000)
emp_2 = Employee("Arthur", "Doe", 70000)
# if only 1 argument tuple still required and a comma like: (<arg>,)
# c.execute("INSERT INTO employees VALUES(?, ?, ?)", (emp_1.first, emp_1.last, emp_1.pay))
# # another option
# c.execute(
# "INSERT INTO employees VALUES(:first, :last, :pay)",
# {"first": emp_2.first, "last": emp_2.last, "pay": emp_2.pay},
# )
# conn.commit()
c.execute("SELECT * FROM employees")
# c.fetchone(), c.fetchmany(<insert how many>) returns list, c.fetchall()
print(c.fetchall())
# commits current transaction
conn.commit()
def insert_emp(emp):
with conn:
c.execute(
"INSERT INTO employees VALUES(:first, :last, :pay)",
{"first": emp.first, "last": emp.last, "pay": emp.pay},
)
def get_emps_by_lastname(last):
c.execute("SELECT * FROM employees WHERE last = :last", {"last": last})
return c.fetchall()
def update_pay(emp, pay):
with conn:
c.execute(
"""
UPDATE employees SET pay = :pay WHERE first = :first AND last = :last""",
{"first": emp.first, "last": emp.last, "pay": pay},
)
def remove_emp(emp):
with conn:
c.execute(
"""
DELETE FROM employees WHERE first = :first AND last = :last""",
{"first": emp.first, "last": emp.last},
)
emp_3 = Employee("Maria", "Doe", 40000)
# insert_emp(emp_3)
# print(get_emps_by_lastname(emp_3.last))
update_pay(emp_1, 90000)
remove_emp(emp_2)
c.execute("SELECT * FROM employees")
print(c.fetchall())
conn.close()
|
[
"“schehat2000@live.de”"
] |
“schehat2000@live.de”
|
623adc6589da313688c6c4a0cbb0df1e70d47d73
|
a47c11905907cb76d5c32382383d9e2b00f24599
|
/exercises/guided_tutorials/mazesforprogrammers/common/mask.py
|
b83ff0d6d1696ef3981dcb4cc4cafd6f711d54e9
|
[] |
no_license
|
tetrismegistus/minutia
|
9ea7db3c7e9f164c83a8cc3f082000fd894fb55b
|
51d0d41740701ef117598ef3e00c99e208ee5ca8
|
refs/heads/master
| 2022-12-06T15:16:59.968911
| 2020-07-22T21:42:47
| 2020-07-22T21:42:47
| 160,570,462
| 12
| 1
| null | 2022-11-22T05:16:44
| 2018-12-05T19:51:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,781
|
py
|
from random import randint
from PIL import Image
from common.grids.grid import Rectangle
class Mask:
def __init__(self, grid_size: Rectangle):
self.rows = grid_size.w
self.cols = grid_size.h
self.bits = [[None for c in range(self.cols)] for r in range(self.rows)]
self.states = []
def __getitem__(self, tup):
row, col = tup
vrow = row in range(0, self.rows)
vcol = col in range(0, len(self.bits[row - 1]))
return self.bits[row][col] if vrow and vcol else False
def __setitem__(self, tup, state):
row, col = tup
self.bits[row][col] = state
def __iter__(self):
for r in self.bits:
yield r
def count_enabled_bits(self):
return sum([r.count(True) for r in self.bits])
def get_random_enabled_bit(self, value=None):
# will only ever return an enabled bit
while True:
row = randint(0, self.rows - 1)
col = randint(0, self.cols - 1)
if value:
if self[row, col] and self[row, col] == value:
return row, col
else:
if self[row, col]:
return row, col
def each_row(self):
for row in range(self.rows):
yield self.bits[row]
def each_bit(self):
for row in self.each_row():
for bit in row:
if bit:
yield bit
@staticmethod
def from_text_file(file):
with open(file, 'r') as f:
lines = f.read().splitlines()
while len(lines[-1]) < 1:
lines.pop()
rows = len(lines)
cols = len(lines[0])
mask = Mask(Rectangle(rows, cols))
for row in range(mask.rows):
for col in range(mask.cols):
if lines[row][col] == "X":
mask[row, col] = False
else:
mask[row, col] = True
return mask
@staticmethod
def from_png(file):
image = Image.open(file)
w, h = image.size
mask = Mask(Rectangle(w, h))
states = []
for row in range(mask.rows):
for col in range(mask.cols):
if image.getpixel((row, col)) == 0:
mask[row, col] = False
else:
pixel = image.getpixel((row, col))
hexval = Mask.rgb2hex(pixel[0], pixel[1], pixel[2])
states.append(hexval)
mask[row, col] = hexval
states = list(set(states))
mask.states = states
return mask
@staticmethod
def rgb2hex(r, g, b):
return '#{:02x}{:02x}{:02x}'.format(r, g, b)
|
[
"madducks@gmail.com"
] |
madducks@gmail.com
|
23e7485c634591ed2bb85ccf70c390ca88d47ecf
|
762e45c322763f11b20fb5c33aae9365142566a4
|
/exchanges/bxinth.py
|
1c6e8624cc8a40e89255d2d802c9f47da6467598
|
[] |
no_license
|
BlackSuns/MTScript
|
6a5a898ba626bada57f754df4eec881a6f9a838b
|
7d0b4524e4b9ead8f4ecb391c96acc878a9a41d9
|
refs/heads/master
| 2021-09-17T17:55:21.229159
| 2018-01-10T02:00:31
| 2018-01-10T02:00:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
import os
from .base import BaseExchange
class BxinthExchange(BaseExchange):
def __init__(self):
super().__init__()
self.exchange = 'bxinth'
self.exchange_id = 51
self.base_url = 'https://bx.in.th/api'
self.ticker_url = '/'
self.alias = ''
self.with_name = False
self.exchange_conf = os.path.abspath(os.path.dirname(__file__)) +\
'/exchange_conf/{}.json'.format(self.exchange)
def get_remote_data(self):
url = '{}{}'.format(
self.base_url, self.ticker_url)
return self.ticker_callback(self.get_json_request(url))
def ticker_callback(self, result):
return_data = []
for k in result.keys():
symbol = result[k]['secondary_currency']
anchor = result[k]['primary_currency']
if anchor and symbol:
pair = '{}/{}'.format(symbol.upper(), anchor.upper())
return_data.append({
'pair': pair,
'price': result[k]['last_price'],
'volume_anchor': result[k]['last_price'] * result[k]['volume_24hours'],
'volume': result[k]['volume_24hours'],
})
# print(return_data)
return return_data
|
[
"larryrun80@gmail.com"
] |
larryrun80@gmail.com
|
8f341b240ac473bc82679b6ce68af47a730fe897
|
2c036ee6317aa5e1db0c6978c89b1ec6a08757b5
|
/desktop_noti_for_stats_of_corona/main.py
|
8bde75a67ef5e831bb0085e6e3d3d6369d60ebab
|
[] |
no_license
|
Chayan199916/play-with-python
|
3d648f39424418b6f6369887c8f5cd4e0d178684
|
36087556fc3e2e6b081e87c238b1575465989436
|
refs/heads/master
| 2023-05-09T08:25:24.665913
| 2020-11-19T14:17:53
| 2020-11-19T14:17:53
| 260,530,968
| 3
| 0
| null | 2021-06-02T01:39:47
| 2020-05-01T18:30:51
|
Python
|
UTF-8
|
Python
| false
| false
| 244
|
py
|
import win10toast
def notify_me(title, message):
toaster = win10toast.ToastNotifier()
toaster.show_toast(title, message, duration = 10)
if __name__ == "__main__":
notify_me("Chayan", "Let's stop the spread of this virus together")
|
[
"swapnomoy199916@gmail.com"
] |
swapnomoy199916@gmail.com
|
310cc9dbe57bb11fcf6618986a485917ab67391a
|
e39245418c39eb083634f1ec22daf9458ac938f3
|
/class_func_inst.py
|
bab75f7a554b36899496173b6b0e6840438fc862
|
[] |
no_license
|
lbs1991/py
|
c5b2426978a622a1d82dd3751e9c5914052ead1e
|
ac5ca5b5df418c1a364238422c6d82f9ea69ecb6
|
refs/heads/master
| 2021-01-01T18:12:10.416736
| 2014-10-20T07:15:12
| 2014-10-20T07:15:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
#!/usr/bin/python27
class MyClass():
def __init__(self,x):
self.v = x
def func1(self):
print("haha",self.v)
inst1 = MyClass(3)
inst1.func1()
|
[
"root@centos65.localdomain"
] |
root@centos65.localdomain
|
15f727855f6ed7f316a260a6a348715848ad38a2
|
18b14893e5305c3f2d03b4a1cb5707ff3415f709
|
/api/views/fileupload.py
|
227cb4d15de58e0af8797f8c9768c1dfcfbeb136
|
[] |
no_license
|
cqnu/ImageAI_RESTful_example
|
04dbad5ab44cb85a7363f71ab162d924c42235dc
|
c9cc1c8561c34bee3cc802b3311f761e8e029e64
|
refs/heads/master
| 2023-08-30T23:24:26.020475
| 2020-01-29T06:27:33
| 2020-01-29T06:27:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,914
|
py
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse, HttpResponseNotFound
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework.response import Response
import json
from django.core.files.storage import FileSystemStorage
from django.conf import settings as djangoSettings
import os
import random
import string
from api.apps import *
import base64
from api.saveBase64Image import *
from PIL import Image
import datetime
import time
from imageai.Detection import ObjectDetection
import tensorflow as tf
####################################################################################################
execution_path = os.getcwd()
@api_view(["POST"])
def UploadFile(request):
try:
if request.method != 'POST' or request.FILES['uploadfile'] == None:
return Response(
{'Error': "Thiếu tham số"},
status=ERROR_CODE, content_type="application/json")
#start count time
start_time = time.time()
#get image data
uploadfile = request.FILES['uploadfile']
#set path
upload_folder_abs = os.path.join(djangoSettings.MEDIA_ROOT)
random_name = GenerateRandomName(uploadfile.name)
saved_file_abs = os.path.join(upload_folder_abs, random_name)
#save file
fs = FileSystemStorage(upload_folder_abs)
filename = fs.save(random_name , uploadfile)
#output path
tempFilePath = upload_folder_abs + "\\temp.jpg"
#load image to detect object
strResult = ""
#objects = detector.detectObjectsFromImage(input_image='D:\\img.jpg', output_image_path='D:\\output.jpg', minimum_percentage_probability=30)
g_detector = ObjectDetection()
g_detector.setModelTypeAsRetinaNet()
g_detector.setModelPath( "resnet50_coco_best_v2.0.1.h5")
g_detector.loadModel()
objects = g_detector.detectObjectsFromImage(input_image=saved_file_abs, output_image_path=tempFilePath)
for eachObject in objects:
strResult += eachObject["name"] + " : " + str(eachObject["percentage_probability"]) + "\n"
print(eachObject["name"] + " : " + str(eachObject["percentage_probability"]) )
#convert output image to base64
base64Str =""
with open(tempFilePath, "rb") as image_file:
base64Str = base64.b64encode(image_file.read())
#remove tempfile
os.remove(saved_file_abs)
os.remove(tempFilePath)
elapsed_time = time.time() - start_time
#result is json object
result = {
"num" : len(objects),
"text" : strResult,
"img": base64Str,
"elapsed" : elapsed_time}
return Response(
result,
status=SUCCESS_CODE, content_type="application/json")
except Exception as e:
return Response(
{'Error': str(e)},
status=ERROR_CODE, content_type="application/json")
####################################################################################################
@api_view(["POST"])
def uploadbase64(request):
try:
folder_name = request.POST.get("folder_name")
file_name = request.POST.get("file_name")
images = request.POST.get("imgs")
uploaded_file_urls = SaveBase64ToImg(folder_name, file_name, images)
respond = {"url" : uploaded_file_urls}
return Response(
{respond},
content_type="application/json",
status=SUCCESS_CODE)
except Exception as e:
print(str(e))
return Response(
{'Error': str(e)},
status=ERROR_CODE,
content_type="application/json"
)
|
[
"vohungvi27@gmail.com"
] |
vohungvi27@gmail.com
|
e885b7756e8bc74d885b5349d2691775dc8e7f1a
|
15c50cd776872bde5cd13ad644039ba279c51387
|
/lambda_reboot.py
|
4f691f3ae5390136feee3369f46c3382f7d5a31e
|
[] |
no_license
|
hoopajube/workspaces
|
fa65bd68d19d175ca24f869117ecd1b3f0e583f8
|
dc0671b011ce2540e6e2ec03cebd966c24f2f693
|
refs/heads/master
| 2020-05-21T14:37:49.662249
| 2019-05-02T04:34:36
| 2019-05-02T04:34:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
import boto3
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
client = boto3.client('workspaces')
# --- Main handler ---
def lambda_handler(event, context):
logger.info(event)
username = event['currentIntent']['slots']['username']
directoryid = event['currentIntent']['slots']['directory']
response = client.describe_workspaces(
DirectoryId= directoryid,
UserName= username
)
if not response['Workspaces']:
msg = 'A WorkSpace does not exist for the specified user.'
else:
workspaceid = response['Workspaces'][0]['WorkspaceId']
response = client.reboot_workspaces(
RebootWorkspaceRequests=[
{
'WorkspaceId': workspaceid
},
]
)
msg = 'Your Amazon WorkSpace ID {} is being rebooted!'.format(workspaceid)
answer = {}
answer["dialogAction"] = {}
answer["dialogAction"]["type"] = "ElicitIntent"
answer["dialogAction"]["message"] = {}
answer["dialogAction"]["message"]["contentType"] = "PlainText"
answer["dialogAction"]["message"]["content"] = msg
return answer
|
[
"alexanderpereyra@Alexanders-MacBook-Pro.local"
] |
alexanderpereyra@Alexanders-MacBook-Pro.local
|
a19c0f6fae3e6e7291510b544a360b1760e0497a
|
8ba9e6ba4d48dbee1c55d3f2493626a5e0211ed2
|
/reservation/migrations/0011_auto_20190510_1649.py
|
8df7b85129c2c4abd32453e0e07d87a204729515
|
[] |
no_license
|
sarajoha/reservation-project
|
a23fe370e922f723fc7dbbff03eb1b9dc7c814f1
|
27ebd3d5e92213d1fde0b230186c27a449e869d7
|
refs/heads/master
| 2020-05-20T15:17:53.169840
| 2019-05-15T16:36:45
| 2019-05-15T16:36:45
| 185,641,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# Generated by Django 2.2 on 2019-05-10 21:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reservation', '0010_auto_20190510_1612'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=255, null=True, unique=True),
),
]
|
[
"sjcc333@gmail.com"
] |
sjcc333@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.