blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
01a4792e02f8e7a6911af7ab76554e70e0471d8b | 694d57c3e512ce916269411b51adef23532420cd | /leetcode/215kth_largest_element.py | 58f31f67f601ead30791f3de73429cab51b47b5f | [] | no_license | clovery410/mycode | 5541c3a99962d7949832a0859f18819f118edfba | e12025e754547d18d5bb50a9dbe5e725fd03fd9c | refs/heads/master | 2021-05-16T02:46:47.996748 | 2017-05-10T23:43:50 | 2017-05-10T23:43:50 | 39,235,141 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | from random import randint
class Solution(object):
def partition(self, start, end, p, nums):
pivot = nums[p]
nums[p], nums[start] = nums[start], nums[p]
i = j = k = start
while j <= end:
if nums[j] == pivot:
nums[i], nums[j] = nums[j], nums[i]
i += 1
elif nums[j] < pivot:
nums[i], nums[j] = nums[j], nums[i]
nums[k], nums[i] = nums[i], nums[k]
i += 1
k += 1
j += 1
return k, i-1
def findKthLargest(self, nums, k):
n = len(nums)
target = n - k
mid_h, mid_e = self.partition(0, n - 1, randint(0, n-1), nums)
s, e = 0, n-1
while True:
if target >= mid_h - s and target <= mid_e - s:
return nums[mid_h]
elif target > mid_e - s:
r = randint(mid_e + 1, e)
mid_h, mid_e = self.partition(mid_e + 1, e, r, nums)
else:
r = randint(s, mid_h - 1)
mid_h, mid_e = self.partition(s, mid_h - 1, r, nums)
| [
"seasoul410@gmail.com"
] | seasoul410@gmail.com |
f6e2e1219bb19b1ec4f59e29fe4b3593536497ff | 804fcf4a3cfba431be701b214cc383a930e08ff3 | /pracCsv/solution.py | fd3a6ce93cc43238d9e366f48713febe8cd45a95 | [] | no_license | showkat2203/APIwithPython | ea7f70a885480ddae1880af5520425682fe283f1 | c2b6f6cf4a2e4d61a94bd11132daeafa7c86d1f7 | refs/heads/master | 2023-06-14T11:45:40.625423 | 2021-07-11T15:46:18 | 2021-07-11T15:46:18 | 106,168,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | import csv
import json
row_count = -1
result = ""
fileName = 'test.csv'
fields = ""
with open(fileName, 'r') as inputFile:
csvReader = csv.reader(inputFile)
fields = next(csvReader)
resList = []
for row in csvReader:
cnt = 0
resDict = {}
for i in row:
if fields[cnt] == 'id':
i = int(i)
elif fields[cnt] == 'isStudent':
i = bool(int(i))
doubleQuKey = "{0}".format(fields[cnt])
resDict.update({str(doubleQuKey): i})
cnt += 1
# print(dict)
resList.append(resDict)
numberOfData = csvReader.line_num
# print(fields)
ansDic = {"number_of_data": numberOfData - 1, "file_name": fileName}
ansDic.update({"data": resList})
# print(resList)
# print("\n\n")
ansDic = json.dumps(ansDic)
print(ansDic) | [
"showkat@alemcloud.com"
] | showkat@alemcloud.com |
733b3c00a643028efa8182beee51f32d547342d9 | 4363b7428a09543a9e6c2a3e1009e98be8a2ab43 | /models/__init__.py | 013c5a42bc95a280f8f21f6eafe38928848e8752 | [] | no_license | DelfimRocha/E-learning | 2284cf75da979af5c8d260599255870ff7fc021d | c05c48725850ecfa80bac6a5a2a2e93a8699629f | refs/heads/main | 2023-07-15T13:28:57.877641 | 2021-09-01T08:44:12 | 2021-09-01T08:44:12 | 401,990,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | from . import courses
from . import program
from . import instructor
from . import student
from . import enrolment | [
"dalfimrocha@yahoo.com"
] | dalfimrocha@yahoo.com |
f83ded7bf3caf726e79346e44ecd420f736fa43e | 10465a8e4771b221fb1f4a92b804f8f2cf031edf | /server/app/service/job_runner.py | 82938c4f7cceab7a81d085557060f64e12a16aef | [
"MIT"
] | permissive | raspihats/EvePnP | fd51ba35ebef2838e87108b617b873254863adc3 | 2961d759554c38609f0f431c012ea8d38016f347 | refs/heads/master | 2023-01-24T21:17:42.789363 | 2019-06-18T11:46:57 | 2019-06-18T11:46:57 | 164,684,352 | 0 | 0 | MIT | 2023-01-14T00:16:23 | 2019-01-08T15:59:56 | Python | UTF-8 | Python | false | false | 9,741 | py | import gevent
from ..dao import axis_dao, head_dao, feeders_dao, packages_dao
from .controllers import controllers_service
from .actuators import actuators_service
class DictKeysToObjectProps(object):
def __init__(self, config):
self._config = dict(config)
def __getattr__(self, attribute):
return self._config[attribute]
def add_points(p1, p2):
result = dict(p1)
for key in p2:
if key in p1:
result[key] = p1[key] + p2[key]
else:
result[key] = p2[key]
return result
def copy_keys(source, dest, omit):
for key in omit:
source.pop(key, None)
for key in source:
if key not in dest:
dest[key] = source[key]
def run_func(target, name, *args, **kwargs):
axis = {}
for axis_config in axis_dao.get_list():
axis[axis_config['id']] = DictKeysToObjectProps(axis_config)
_globals = dict(target)
_globals['controllers'] = controllers_service.controllers
_globals['actuators'] = actuators_service.actuators
_globals['axis'] = axis
_locals = {}
exec(target['code'], _globals, _locals)
func = _locals[name]
return func(*args, **kwargs)
class Package(object):
def __init__(self, config):
self._config = dict(config)
def __getattr__(self, attribute):
return self._config[attribute]
class Feeder(object):
def __init__(self, config):
self._config = config
def get_point(self):
_globals = {}
_locals = {}
exec(self._config['code'], _globals, _locals)
get_point = _locals['get_point']
point = get_point(
dict(self._config['point']), self._config['count'], self._config['size'])
self._config['count'] -= 1
feeders_dao.update(self._config['id'], self._config)
return point
class Head(object):
class FullException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class EmptyException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class PlacementHead(object):
def __init__(self, config):
self._config = config
self._component = None
def isEmpty(self):
return self._component == None
def park(self):
print('Parking placement head: {}'.format(self._config['id']))
run_func(self._config, 'park', ['x', 'y', 'z', 'r'])
def pick(self, component):
point = add_points(
component['pick_point'], self._config['offset'])
print('Pick point: {}'.format(point))
run_func(self._config, 'pick', point)
self._component = component
def place(self):
point = add_points(
self._component['place_point'], self._config['offset'])
rotation = self._component['rotation']
package = Package(packages_dao.get(self._component['package']))
point['z'] += package.height
print('Place point: {} {}'.format(point, rotation))
run_func(self._config, 'place', point, rotation)
placed_component = self._component
self._component = None
return placed_component
def __init__(self):
self._config = head_dao.get_first()
self.placement_heads = []
for ph_config in self._config['placement_heads']:
# migrate some config keys from head to placement head
copy_keys(self._config, ph_config, omit=[
'placement_heads', 'cameras'])
self.placement_heads.append(Head.PlacementHead(ph_config))
def park(self):
for ph in self.placement_heads:
ph.park()
def pick(self, component):
for ph in self.placement_heads:
if ph.isEmpty():
ph.pick(component)
return
raise self.FullException()
def place(self):
for ph in self.placement_heads:
if not ph.isEmpty():
return ph.place()
raise self.EmptyException()
def isLoaded(self):
for ph in self.placement_heads:
if ph.isEmpty():
return False
return True
def isEmpty(self):
for ph in self.placement_heads:
if not ph.isEmpty():
return False
return True
class State(object):
IDLE = 'idle'
RUN = 'run'
PAUSE = 'pause'
class JobRunnerService(object):
class FeederNotFoundError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class PickError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class PlaceError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
def __init__(self):
self._head = Head()
self._pause = False
self._stop = False
self.status = {
'state': State.IDLE,
'job_id': "",
'boards_ids': [],
'components_ids': []
}
def start(self, job):
if self.status['state'] == State.IDLE:
gevent.spawn(lambda: self._run(job))
elif self.status['state'] == State.PAUSE:
self.status['state'] = State.RUN
else:
raise Exception("Job '{}' is already running".format(
self.status['job_id']))
def pause(self, id):
if self.status['state'] == State.RUN:
if self.status['job_id'] == id:
self._pause = True
def stop(self, id):
if self.status['state'] == State.RUN or self.status['state'] == State.PAUSE:
if self.status['job_id'] == id:
self._stop = True
def _select_feeder(self, component):
from tinydb import Query
# search for matching packages
q = Query()['component']['package'] == component['package']
feeders = feeders_dao.search(q)
# discard empty feeders
feeders = [x for x in feeders if x['count'] > 0]
# search for matching values
# search for perfect value match
feeders_pvm = [x for x in feeders if x['component']
['value'] == component['value']]
if len(feeders_pvm) > 0:
return Feeder(feeders_pvm[0])
# search for value match
feeders_vm = [x for x in feeders if component['value']
in x['component']['value']]
for feeder in feeders_vm:
if component['value'] == feeder['component']['value'].split(' ')[0]:
return Feeder(feeder)
if len(feeders_vm) > 0:
return Feeder(feeders_vm[0])
raise self.FeederNotFoundError("Can't find feeder for: {} {} {}".format(
component['id'], component['value'], component['package']))
def _run(self, job):
self._stop = False
self._pause = False
print("Job '{}' started".format(job['id']))
self.status = {
'state': State.RUN,
'job_id': job['id'],
'boards_ids': [],
'components_ids': []
}
self._head.park()
actuators_service.actuators['VacuumPump'].set(True)
# build new boards list including only the ones that should be placed
boards = [x for x in job['boards'] if x['operation'] == 'place']
for board in boards:
self.status['boards_ids'].append(board['id'])
self.status['components_ids'] = []
# build new components list including only the ones that should be placed
components = [x for x in job['components']
if x['operation'] == 'place']
# sort components, group them using packages and values
components.sort(key=lambda x: '{} {}'.format(
x['package'], x['value']))
while len(components):
if self._stop:
break
if self._pause:
self._pause = False
self.status['state'] = State.PAUSE
if self.status['state'] == State.RUN:
# pick multiple components
while not self._head.isLoaded() and len(components):
component = components[0]
try:
feeder = self._select_feeder(component)
component['pick_point'] = feeder.get_point()
component['place_point'] = add_points(
board['origin'], component['offset'])
self._head.pick(component)
except self.FeederNotFoundError as e:
print(e)
finally:
components.remove(component)
# place multiple components
while not self._head.isEmpty():
placed_component = self._head.place()
self.status['components_ids'].append(
placed_component['id'])
gevent.sleep(0.1)
actuators_service.actuators['VacuumPump'].set(False)
self._head.park()
if self._stop:
self._stop = False
print("Job '{}' stopped".format(job['id']))
else:
print("Job '{}' finished".format(job['id']))
self.status['state'] = State.IDLE
job_runner_service = JobRunnerService()
| [
"florin.costa83@gmail.com"
] | florin.costa83@gmail.com |
e31378c2345f786daf6d340cfcfcacf070cdc24a | 5656b4c420b284d2c0443138e02246ecc0484c9c | /api/socialmedia/Posts/models/post_model.py | f7606be465f83ad2dcc837e5b62a1af63a722854 | [] | no_license | Pbasnal/flask-twitterclone | 94e3b52f74b9bf957cd0797034372687918d0d38 | 1d7d2da28448ab710848aa760e2899c3a65ee150 | refs/heads/master | 2023-04-30T15:35:42.768869 | 2021-05-25T14:33:01 | 2021-05-25T14:56:43 | 370,718,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | from flask_mongoengine.wtf import model_form
from api.models.MongoEngine import dbcontext
from logging_setup.logger import ApiLogger
class Post(dbcontext.Document):
content = dbcontext.StringField(required=True)
sentiments = dbcontext.DictField()
created_by = dbcontext.StringField(max_length=50)
created_at = dbcontext.DateTimeField() | [
"pankajbasnal17@gmail.com"
] | pankajbasnal17@gmail.com |
29acead529edc72fe4dc197e2a785872c12c51e0 | 3cedc2e0867a53ed2f36e01624f369693d1a050d | /rnn/rnn88_females_original.py | f4f2cc56c161654c5daaec73a43be8329ccc3589 | [] | no_license | lkpiel/mastersthesis | a471d8c6a5881e13599b22965dd3f437c83fc967 | 71c723b435b347d2805e159b6e10828f89541e98 | refs/heads/master | 2023-02-20T11:57:45.266361 | 2018-05-06T11:17:43 | 2018-05-06T11:17:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,742 | py | #! /usr/bin/python3
import sys
print(sys.version)
import sys
import pandas
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential, load_model
from keras.layers import Dense, Input, Dropout, Average, Merge, Layer, Conv2D, MaxPooling2D, GlobalAveragePooling2D, GlobalAveragePooling2D, AveragePooling2D, Reshape, BatchNormalization
from keras.optimizers import SGD, Adam
from keras import initializers
from keras import regularizers
from keras import constraints
from keras import backend as K
from IPython.core.debugger import Tracer
from keras.layers import Masking, LSTM, TimeDistributed, Bidirectional, Flatten
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras import callbacks
from keras.constraints import maxnorm, unitnorm
from sklearn.preprocessing import OneHotEncoder
from keras.models import Model
import pdb
import keras
#FORMAT DATA
#ONE HOT ENCODES A GIVEN COLUMN
def onehot(x): return np.array(OneHotEncoder().fit_transform(x.values.reshape(-1,1)).todense())
def format(data):
del data['Unnamed: 605']
mask = data['AgeGroup'] == 'ag1'
column_name = 'AgeGroup'
data.loc[mask, column_name] = 0
mask = data['AgeGroup'] == 'ag2'
column_name = 'AgeGroup'
data.loc[mask, column_name] = 1
mask = data['AgeGroup'] == 'ag3'
column_name = 'AgeGroup'
data.loc[mask, column_name] = 2
mask = data['Gender'] == 'm'
column_name = 'Gender'
data.loc[mask, column_name] = 0
mask = data['Gender'] == 'f'
column_name = 'Gender'
data.loc[mask, column_name] = 1
return data
def smooth_labels(y, smooth_factor):
'''Convert a matrix of one-hot row-vector labels into smoothed versions.
# Arguments
y: matrix of one-hot row-vector labels to be smoothed
smooth_factor: label smoothing factor (between 0 and 1)
# Returns
A matrix of smoothed labels.
'''
assert len(y.shape) == 2
if 0 <= smooth_factor <= 1:
# label smoothing ref: https://www.robots.ox.ac.uk/~vgg/rg/papers/reinception.pdf
y *= 1 - smooth_factor
y += smooth_factor / y.shape[1]
else:
raise Exception('Invalid label smoothing factor: ' + str(smooth_factor))
return y
def dot_product(x, kernel):
"""
Wrapper for dot product operation, in order to be compatible with both
Theano and Tensorflow
Args:
x (): input
kernel (): weights
Returns:
"""
if K.backend() == 'tensorflow':
return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
else:
return K.dot(x, kernel)
class AttentionWithContext(Layer):
"""
Attention operation, with a context/query vector, for temporal data.
Supports Masking.
Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
"Hierarchical Attention Networks for Document Classification"
by using a context vector to assist the attention
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
How to use:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Note: The layer has been tested with Keras 2.0.6
Example:
model.add(LSTM(64, return_sequences=True))
model.add(AttentionWithContext())
# next add a Dense layer (for classification/regression) or whatever...
"""
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
def build(self, input_shape):
print(input_shape)
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(AttentionWithContext, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
uit = dot_product(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = dot_product(uit, self.u)
a = K.exp(ait)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number epsilon to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
import tensorflow as tf
import keras
from keras import backend as K
def age_group_accuracy(y_true, y_pred):
array = np.array([0]*13 + [1]*2 + [2]*10000000)
age_to_group = K.variable(value=array, dtype='int32', name='age_to_group')
ages_true = tf.gather(age_to_group, tf.cast(tf.rint(y_true), tf.int32))
ages_pred = tf.gather(age_to_group, tf.cast(tf.rint(y_pred), tf.int32))
return K.mean(K.equal(ages_true, ages_pred), axis=-1)
train_data = pandas.read_csv("/storage/tanel/child_age_gender/exp/ivectors_2048/train/export.csv", sep=" ")
val_data = pandas.read_csv("/storage/tanel/child_age_gender/exp/ivectors_2048/dev/export.csv", sep=" ")
test_data = pandas.read_csv("/storage/tanel/child_age_gender/exp/ivectors_2048/test/export.csv", sep=" ")
train_data = format(train_data)
val_data = format(val_data)
test_data = format(test_data)
trainFemaleIndexes = train_data.index[train_data['Gender'] == 1].tolist()
valFemaleIndexes = val_data.index[val_data['Gender'] == 1].tolist()
testFemaleIndexes = test_data.index[test_data['Gender'] == 1].tolist()
train_data_females = train_data[train_data['Gender'] == 1]
val_data_females = val_data[val_data['Gender'] == 1]
test_data_females = test_data[test_data['Gender'] == 1]
test_data_males = test_data[test_data['Gender'] == 0]
train_labels_females = onehot(train_data_females['AgeGroup'])
val_labels_females = onehot(val_data_females['AgeGroup'])
test_labels_females = onehot(test_data_females['AgeGroup'])
test_labels_males = onehot(test_data_males['AgeGroup'])
train_i_vectors_females = train_data_females.iloc[:, 5:].as_matrix()
val_i_vectors_females = val_data_females.iloc[:, 5:].as_matrix()
test_i_vectors_females = test_data_females.iloc[:, 5:].as_matrix()
test_i_vectors_males = test_data_males.iloc[:, 5:].as_matrix()
#testMaleIndexes = test_data_i_vectors.index[test_data_i_vectors['Gender'] == 1].tolist()
print ("LABELS LOADED")
train_data_padded = np.load("/storage/hpc_lkpiel/data/fbank_train_data_padded.npy", encoding="bytes")[..., np.newaxis]
val_data_padded = np.load("/storage/hpc_lkpiel/data/fbank_val_data_padded.npy", encoding="bytes")[..., np.newaxis]
test_data_padded = np.load("/storage/hpc_lkpiel/data/fbank_test_data_padded.npy", encoding="bytes")[..., np.newaxis]
train_data_padded = train_data_padded[np.array(trainFemaleIndexes)]
val_data_padded = val_data_padded[np.array(valFemaleIndexes)]
test_data_padded = test_data_padded[np.array(testFemaleIndexes)]
print ("DATA LOADED")
################################################################################################
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.7,
patience=2, min_lr=0.0001, verbose=1)
kernel_regularizer = regularizers.l2(0.0001)
input_layer = Input(shape=(1107, 20, 1), name="lstm_input")
x = Conv2D(128, (3, 20), activation='relu', border_mode='valid')(input_layer)
x = Conv2D(128, (5, 1), strides=(3,1), activation='relu', border_mode='valid')(x)
x = Conv2D(128, (5, 1), strides=(3,1), activation='relu', border_mode='valid')(x)
x = Reshape((-1, 128))(x)
x = Bidirectional(LSTM(128, return_sequences=True))(x)
x = AttentionWithContext()(x)
output_layer_1 = Dense(3, activation='softmax', name='group_output')(x)
output_layer_2 = Dense(2, name='gender')(x)
print ("model_88 BUILT")
model_88 = Model(inputs=[input_layer], outputs=[output_layer_1, output_layer_2])
model_88.compile(loss={'group_output':'categorical_crossentropy', 'gender':'categorical_crossentropy'},
optimizer=SGD(0.01),
metrics={'group_output':'accuracy','gender':'accuracy'})
print ("model_88 COMPILED")
checkpoint = ModelCheckpoint(filepath='/models/model_88.hdf5', monitor='val_group_output_acc', save_best_only=True)
'''
history = model_88.fit([train_data_padded],
[train_labels_age_group, train_labels_gender],
validation_data=([val_data_padded], [val_labels_age_group, val_labels_gender]),
epochs=30,
verbose=1,
batch_size=64,
callbacks=[checkpoint]
)
np.save('../history/history_model_88.npy', history.history)
modelHistory = np.load('../history/history_model_88.npy').item()
print ("HISTORY: ")
print (modelHistory)
'''
model_88.load_weights('/models/model_88.hdf5')
val_predictions = model_88.predict(val_data_padded)
print ("VAL PREDICTED")
test_predictions = model_88.predict(test_data_padded)
print ("TEST PREDICTED")
np.save('/home/hpc_lkpiel/predictions/val/model_88_females_original_age_group.npy', val_predictions[0])
print ("VAL SAVED")
np.save('/home/hpc_lkpiel/predictions/test/model_88_females_original_age_group.npy', test_predictions[0])
print ("TEST SAVED")
'''
np.save('/home/hpc_lkpiel/predictions/val/model_88_gender.npy', val_predictions[1])
print ("VAL SAVED")
np.save('/home/hpc_lkpiel/predictions/test/model_88_gender.npy', test_predictions[1])
print ("TEST SAVED")
'''
'''
valResult = model_88.evaluate([val_data_padded, val_i_vectors], [val_labels_age_group, val_labels_gender])
testResult = model_88.evaluate([test_data_padded, test_i_vectors], [test_labels_age_group, test_labels_gender] )
print (str(valResult))
print (str(testResult))
print ("WROTE TO FILE model_88")
'''
######################################## | [
"leo.piel@pipedrive.com"
] | leo.piel@pipedrive.com |
ad992d9f127c3b1b9a1220f553dd8f520df52a66 | 4dad1ae37b41ae6c96f3345dee1d9d1cbdf42b7b | /searchElementRotatedSorted/main.py | e99e851deefba668b9178d0eb38496366e6b3341 | [] | no_license | RoaRobinson97/python | 011f9dcdd4414d312780e9f11e1143404161d9e7 | 449ecccecb445a3d49bd3820a855dade1d759ddb | refs/heads/master | 2023-01-29T22:53:45.122752 | 2020-12-07T16:10:41 | 2020-12-07T16:10:41 | 295,473,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | # Python Program to search an element
# in a sorted and pivoted array
# Searches an element key in a pivoted
# sorted array arrp[] of size n
def pivotedBinarySearch(arr, n, key):
pivot = findPivot(arr, 0, n - 1);
# If we didn't find a pivot,
# then array is not rotated at all
if pivot == -1:
return binarySearch(arr, 0, n - 1, key);
if arr[pivot] == key:
return pivot
if arr[0] <= key:
return binarySearch(arr, 0, pivot - 1, key);
return binarySearch(arr, pivot + 1, n - 1, key);
def findPivot(arr, low, high):
# base cases
if high < low:
return -1
if high == low:
return low
# low + (high - low)/2;
mid = int((low + high) / 2)
if mid < high and arr[mid] > arr[mid + 1]:
return mid
if mid > low and arr[mid] < arr[mid - 1]:
return (mid - 1)
if arr[low] >= arr[mid]:
return findPivot(arr, low, mid - 1)
return findPivot(arr, mid + 1, high)
#Standard Binary Search
def binarySearch(arr, low, high, key):
if high < low:
return -1
# low + (high - low)/2;
mid = int((low + high) / 2)
if key == arr[mid]:
return mid
if key > arr[mid]:
return binarySearch(arr, (mid + 1), high,
key);
return binarySearch(arr, low, (mid - 1), key);
arr1 = [5, 6, 7, 8, 9, 10, 1, 2, 3]
n = len(arr1)
key = 3
print("Index of the element is : ",
pivotedBinarySearch(arr1, n, key)) | [
"roa.robinson97@gmail.com"
] | roa.robinson97@gmail.com |
2651fa548b6cced6e67fddac0af3a0c00d304481 | e3eb265f16c0cb41e2d7cbe17ea2523803812247 | /py-scoring/src/ai/h2o/sparkling/ml/__init__.py | 2b8785f3ffa0d4d0b155c8cc6bf2f109f14bc6cf | [
"Apache-2.0"
] | permissive | joscani/sparkling-water | 973cea0c96c8cab1505ed452c78ef2615d4efe2a | 71a0cdb763c3b2f8f4cd8cb9a4c3d1874df05089 | refs/heads/master | 2023-07-11T03:35:28.186554 | 2021-08-18T16:23:21 | 2021-08-18T16:23:21 | 257,917,903 | 0 | 0 | Apache-2.0 | 2020-04-22T13:59:54 | 2020-04-22T13:59:53 | null | UTF-8 | Python | false | false | 1,349 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ai.h2o.sparkling.ml.models import H2OSupervisedMOJOModel, H2OTreeBasedSupervisedMOJOModel, H2OUnsupervisedMOJOModel, H2OTreeBasedUnsupervisedMOJOModel, H2OBinaryModel
from ai.h2o.sparkling.ml.models import H2OKMeansMOJOModel, H2OGLMMOJOModel, H2OGAMMOJOModel, H2OGBMMOJOModel, H2OXGBoostMOJOModel
from ai.h2o.sparkling.ml.models import H2ODeepLearningMOJOModel, H2ODRFMOJOModel, H2OIsolationForestMOJOModel, H2OPCAMOJOModel
from ai.h2o.sparkling.ml.models import H2OMOJOModel, H2OAlgorithmMOJOModel, H2OFeatureMOJOModel, H2OMOJOPipelineModel, H2OMOJOSettings
| [
"noreply@github.com"
] | noreply@github.com |
bdd26c536928ecc4169204488d28c7ea79fac6d1 | 6e0d8d91dd22e2275cd713822679d5cabbc9331a | /thespian/system/__init__.py | 2c69b0574347c752e0c9daedb76632e6daa45a22 | [
"MIT"
] | permissive | kquick/Thespian | 711712eb0a9ad3370f1013c8393cc461b9541dfe | dfc6d3e865c05f929328b85e98671a5c8fc3a54a | refs/heads/master | 2023-05-26T15:51:57.959690 | 2023-05-22T15:08:00 | 2023-05-22T15:08:00 | 78,292,621 | 203 | 32 | MIT | 2021-06-22T14:42:09 | 2017-01-07T17:18:27 | Python | UTF-8 | Python | false | false | 636 | py | # This module contains the various ActorSystemBase implementations
# upon which the ActorSystem operates.
from thespian.actors import ActorAddress, ActorSystemMessage, PoisonMessage
from thespian.system.addressManager import *
from thespian.system.messages.status import *
from thespian.system.messages.convention import *
def isInternalActorSystemMessage(msg):
if isinstance(msg, PoisonMessage):
msg = msg.poisonMessage
return isinstance(msg, ActorSystemMessage) and \
not isinstance(msg, (Thespian_SystemStatus,
Thespian_ActorStatus,
PoisonMessage))
| [
"kquick@godaddy.com"
] | kquick@godaddy.com |
531ffd783fb26e6eb32629fb492934c669fbac42 | 46e01962d3eef258562b4b5e0204368a80eb2101 | /resources/site-packages/dns/rdtypes/ANY/EUI48.py | aa260e205d08298646a600c4789522285c6fd6d8 | [
"WTFPL"
] | permissive | elgatito/script.elementum.burst | fb4fe3f10f722822373e2739a9308130482d1734 | e37adca9634f644890673dee236a9c215c6744c1 | refs/heads/master | 2023-08-26T20:30:59.300868 | 2023-07-27T11:23:44 | 2023-07-27T11:23:44 | 111,373,790 | 108 | 181 | WTFPL | 2023-08-28T11:57:32 | 2017-11-20T06:59:52 | Python | UTF-8 | Python | false | false | 1,124 | py | # Copyright (C) 2015 Red Hat, Inc.
# Author: Petr Spacek <pspacek@redhat.com>
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.euibase
class EUI48(dns.rdtypes.euibase.EUIBase):
"""EUI48 record
@ivar fingerprint: 48-bit Extended Unique Identifier (EUI-48)
@type fingerprint: string
@see: rfc7043.txt"""
byte_len = 6 # 0123456789ab (in hex)
text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab
| [
"denis.kuzmenok@gmail.com"
] | denis.kuzmenok@gmail.com |
411a798533ad2659caf537d88c31d8b5872d0645 | 8b61c079a068f7f888e9a2683b95ca81b403a681 | /program/pizaid/ioserver.py | 0069f77acbdfa9c4bc81e583104e06a8b4117a93 | [
"BSD-2-Clause"
] | permissive | Pizaid/Pizaid-LCDPanel | 69faef0fe401a3fd943301ca7202853152152179 | e66b6d33afb9da273a823bfeb2f3bbb9d3d3faab | refs/heads/master | 2016-09-05T11:20:22.764960 | 2014-08-09T12:30:37 | 2014-08-09T12:30:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
#
# Author: Makoto Shimazu <makoto.shimaz@gmail.com>
# URL: https://github.com/Pizaid
# License: 2-Clause BSD License
# Created: 2014-07-30
#
import threading
from utils import getch, tprint
from lcdcontroller import get_lcdcontroller
from threading import Timer
import RPi.GPIO as GPIO
def setupGPIO(pin, handler):
GPIO.setup(pin, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.add_event_detect(pin, GPIO.FALLING, callback = handler, bouncetime=200)
class IOServer(threading.Thread):
# 6 7 8 9
# l d u r
uPin = 15 # u <-> d
lPin = 27 # l <-r
rPin = 14
dPin = 18
def __init__(self):
threading.Thread.__init__(self)
# tprint(self.currentState)
GPIO.setmode(GPIO.BCM)
setupGPIO(self.uPin, self.upButtonHandler)
setupGPIO(self.lPin, self.leftButtonHandler)
setupGPIO(self.rPin, self.rightButtonHandler)
setupGPIO(self.dPin, self.downButtonHandler)
self.quit = False
self.lcd = get_lcdcontroller()
self.timer = Timer(0.5, self.timeoutHandler)
def run(self):
tprint("start ioserver")
self.timer.start()
lcd = self.lcd
while True:
c = getch()
if (c == 'w'):
lcd.up()
elif (c == 'a'):
lcd.left()
elif (c == 's'):
lcd.down()
elif (c == 'd'):
lcd.right()
elif (c == ' '):
lcd.center()
elif (c == 'u'):
lcd.updateDisplay()
elif (c == 'q'):
self.quit = True
break
else:
tprint("Unknown Command")
def upButtonHandler(self, pin = None):
self.lcd.up()
def downButtonHandler(self, pin = None):
self.lcd.down()
def leftButtonHandler(self, pin = None):
self.lcd.left()
def rightButtonHandler(self, pin = None):
self.lcd.right()
def timeoutHandler(self):
self.lcd.updateDisplay()
if not self.quit:
self.timer = Timer(0.5, self.timeoutHandler)
self.timer.start()
| [
"makoto.shimaz@gmail.com"
] | makoto.shimaz@gmail.com |
9c1f3e860dc2261ed09a22aa123382a828211c1a | 490186e345bdc0e2db1a3cb9a53f5e3e784a0d35 | /tutorials/blitz/neural_networks.py | c5a889d3d723f19f486722be161ebe60260365cf | [] | no_license | zephan-spencer/learning-nas | 8bf2b6b50df14eb838fd2589fbef2ff7228febb2 | 1ca5fa6ee05542142b13599c4e9541c9aa0ede4e | refs/heads/main | 2023-04-19T04:44:19.261960 | 2021-05-01T22:52:06 | 2021-05-01T22:52:06 | 363,273,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,430 | py | # Taken from a pytorch.org tutorial
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5x5 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120) # 5*5 from image dimension
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square, you can specify with a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
# Model Parameters
params = list(net.parameters())
print(len(params))
print(params[0].size()) # conv1's .weight
# Feed some data through the network
input = torch.randn(1, 1, 32, 32)
out = net(input)
print(out)
net.zero_grad()
out.backward(torch.randn(1, 10))
output = net(input)
target = torch.randn(10) # a dummy target, for example
target = target.view(1, -1) # make it the same shape as output
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)
print(loss.grad_fn) # MSELoss
print(loss.grad_fn.next_functions[0][0]) # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU
net.zero_grad() # zeroes the gradient buffers of all parameters
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
learning_rate = 0.01
for f in net.parameters():
f.data.sub_(f.grad.data * learning_rate)
import torch.optim as optim
# create your optimizer
optimizer = optim.SGD(net.parameters(), lr=0.01)
# in your training loop:
optimizer.zero_grad() # zero the gradient buffers
output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step() # Does the update | [
"number45zephan@gmail.com"
] | number45zephan@gmail.com |
b1bb1dfa462be18d9be81098971bfbdf1023cb30 | 7ce761781e7f5b57b2469adce459a71b4758694d | /env/lib/python2.7/site-packages/graphlab/toolkits/_internal/search/_search.py | 0870866d1721ce31a94ab4442179cea4425399d7 | [] | no_license | hophamtenquang/RecSys | c4fa18d1ba262670a284b2fba2ca97b882ef0f4c | 535472844a046cadd9230302da647a54afff95e8 | refs/heads/master | 2021-01-19T17:00:32.924064 | 2017-08-30T10:31:32 | 2017-08-30T10:31:32 | 101,031,687 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,660 | py | import sys as _sys
import graphlab as _gl
import graphlab.connect.main as glconnect
from graphlab.toolkits._internal_utils import _raise_error_if_not_sframe
from graphlab.toolkits._model import SDKModel as _SDKModel
from graphlab.toolkits._main import ToolkitError as _ToolkitError
from graphlab.toolkits._internal_utils import _toolkit_repr_print
from graphlab.util import _make_internal_url
from graphlab.util import _raise_error_if_not_of_type
from graphlab.util import _raise_error_if_not_of_type
def create(data, features=None,
bm25_k1=1.5,
bm25_b=0.75,
tfidf_threshold=0.01,
verbose=True):
"""
Create a searchable index of text columns in an SFrame.
Parameters
----------
data : SFrame
An SFrame containing at least one str column containing text that should
be indexed.
features : list of str
A list of column names that contain text that should be indexed.
Default: all str columns in the provided dataset.
bm25_k1 : float
Tuning parameter for the relative importance of term frequencies when
computing the BM25 score between a query token and a document.
bm25_b : float
Tuning parameter to downweight scores of long documents when
computing the BM25 score between a query token and a document.
tfidf_threshold : float
Tuning parameter to skip indexing words that have a TF-IDF score below
this value.
verbose : bool
Controls whether or not to print progress during model creation.
Returns
-------
out
SearchModel
See Also
--------
SearchModel.query
References
----------
Christopher D. Manning, Hinrich Schutze, and Prabhakar Raghavan.
Introduction to information retrieval.
http://nlp.stanford.edu/IR-book/pdf/irbookonlinereading.pdf
Examples
--------
>>> import graphlab as gl
>>> sf = gl.SFrame({'text': ['Hello my friend', 'I love this burrito']})
>>> m = gl.toolkits._internal.search.create(sf)
>>> print m.query('burrito')
"""
# Input validation on data and features
if features is None:
features = _get_str_columns(data)
_raise_error_if_not_of_type(data, [_gl.SFrame])
_raise_error_if_not_of_type(features, [list])
for f in features:
if data[f].dtype() != str:
raise _ToolkitError("Feature `%s` must be of type str" % f)
# Store options
options = {}
options['bm25_b'] = bm25_b
options['bm25_k1'] = bm25_k1
options['tfidf_threshold'] = tfidf_threshold
options['verbose'] = verbose
options['features'] = features
# Construct model
proxy = _gl.extensions._SearchIndex()
proxy.init_options(options)
proxy.index(data)
return SearchModel(proxy)
class SearchModel(_SDKModel):
"""
SearchModel objects can be used to search text data for a given query.
This model should not be constructed directly. Instead, use
:func:`graphlab.toolkits._internal.search.create` to create an
instance of this model.
"""
def __init__(self, model_proxy=None):
super(SearchModel, self).__init__(model_proxy)
self.__name__ = 'search'
def _get_wrapper(self):
_class = self.__proxy__.__class__
proxy_wrapper = self.__proxy__._get_wrapper()
def model_wrapper(unity_proxy):
model_proxy = proxy_wrapper(unity_proxy)
return SearchModel(model_proxy)
return model_wrapper
@classmethod
def _get_queryable_methods(cls):
'''Returns a list of method names that are queryable through Predictive
Service'''
return {'query': {}}
def get_current_options(self):
return self.__proxy__.get_current_options()
def __str__(self):
return self.__repr__()
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, etc.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
data_fields = [
('Number of documents', 'num_documents'),
('Average tokens/document', 'average_document_length')]
param_ranking_fields = [
('BM25 k1', 'bm25_k1'),
('BM25 b', 'bm25_b'),
('TF-IDF threshold', 'tfidf_threshold')]
index_fields = [
('Number of unique tokens indexed', 'num_tokens'),
('Preprocessing time (s)', 'elapsed_processing'),
('Indexing time (s)', 'elapsed_indexing')]
section_titles = ['Corpus',
'Indexing settings',
'Index']
return ([data_fields,
param_ranking_fields,
index_fields],
section_titles)
def __repr__(self):
(sections, section_titles) = self._get_summary_struct()
return _toolkit_repr_print(self, sections,
section_titles, width=32)
def query(self, query, num_results=10,
expansion_k=5,
expansion_epsilon=0.1,
expansion_near_match_weight=.5):
"""
Search for text.
Parameters
----------
query: str
A string of text.
num_results : int
The number of results to return.
expansion_k : int
Maximum number of nearest words to include from query token.
expansion_epsilon : float
Maximum distance to allow between query token and nearby word when
doing query expansion. Must be between 0 and 1.
expansion_near_match_weight : float
Multiplier to use on BM25 scores for documents indexed via an
approximate match with a given token. This will be used for each of
the `expansion_k` words that are considered an approximate match.
Must be between 0 and 1.
Returns
-------
out: SFrame
The rows of the original SFrame along with a `score` column
which contains the BM25 score between this query and the row.
Examples
--------
>>> import graphlab as gl
>>> sf = gl.SFrame({'text': ['Hello my friend', 'I love this burrito']})
>>> s = gl.search.create(sf, features=['text'])
>>> s.query('burrito')
"""
if _sys.version_info.major == 2:
_raise_error_if_not_of_type(query, [str, unicode])
else:
_raise_error_if_not_of_type(query, [str])
q = query.split(' ')
results = self.__proxy__.query_index(q,
expansion_k=expansion_k,
expansion_epsilon=expansion_epsilon,
expansion_near_match_weight=expansion_near_match_weight)
results = self.__proxy__.join_query_result(results, method='default',
num_results=num_results)
return results
def _get_str_columns(sf):
"""
Returns a list of names of columns that are string type.
"""
return [name for name in sf.column_names() if sf[name].dtype() == str]
| [
"hophamtenquang@gmail.com"
] | hophamtenquang@gmail.com |
98e494f9969bfbe0e38927f5f5a9e9da3f675862 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /tjMNAEgkNvM5eyEqJ_9.py | 009b0a6620c65bbe7a418ed56973cde1d60c4685 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | """
You are given two inputs:
1. An array of abbreviations.
2. An array of words.
Write a function that returns `True` if each abbreviation **uniquely
identifies** a word, and `False` otherwise.
### Examples
unique_abbrev(["ho", "h", "ha"], ["house", "hope", "happy"]) ➞ False
// "ho" and "h" are ambiguous and can identify either "house" or "hope"
unique_abbrev(["s", "t", "v"], ["stamina", "television", "vindaloo"]) ➞ True
unique_abbrev(["bi", "ba", "bat"], ["big", "bard", "battery"]) ➞ False
unique_abbrev(["mo", "ma", "me"], ["moment", "many", "mean"]) ➞ True
### Notes
Abbreviations will be a substring from `[0, n]` from the original string.
"""
def unique_abbrev(abbs, words):
al = []
for x in abbs:
temp = []
for y in words:
temp.append(y.startswith(x))
al.append((temp))
return sum(al[0]+al[1]+al[2]) == 3
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
78bb3e33b198664933fcf0bc38618fc403aed04b | 2393a8fabee3d39bf1623e26c0313e3351caf814 | /python/study/IO/示例代码/nonblocking/nonblocking-client.py | bb4e00884ca6c2f8d4878a852bc366caf746f668 | [] | no_license | shidg/note | 5d3aaff9d1c6cf87b89513b7712638c9b808653c | d46aceaed64e3e2f854149f71f18fa92d650dc37 | refs/heads/master | 2023-05-26T16:14:51.715966 | 2023-05-19T02:08:36 | 2023-05-19T02:08:36 | 27,533,612 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-#
'''
author: -- shidegang --
Created Time: 2019-08-28 10:43:17
'''
import socket
sk = socket.socket()
server_addr = ('127.0.0.1',9000)
sk.connect(server_addr)
while True:
sk.sendall('hello'.encode(encoding='utf8'))
data = sk.recv(1024)
print(data.decode(encoding='utf8')) | [
"shidg@feezu.cn"
] | shidg@feezu.cn |
17712dbfabbed2c84ca874770c52e2749ece99e5 | 0b8503f8fe659284edaf58ed88e0d80b08995206 | /security/apache/TestPenetration_Home.py | d8ecf592421287a49865bc4e667808785d945ca2 | [] | no_license | ahopgood/Python | bfd2f12efadfde94f8f569ce0bbf9018c39bbc5d | 7cbfe64aa39b1cb969a09cac0040bd77b6980309 | refs/heads/master | 2021-01-18T21:24:48.831454 | 2016-05-23T20:28:04 | 2016-05-23T20:28:04 | 47,199,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,241 | py | __author__ = 'Alexander Hopgood'
from apache import checkStaticPage
class test_Penetration_Home():
#Scaffolding class for testing liveness of a domain with multiple sub domains
def domain(self, domain, subdomains):
resultDict = dict()
outputStr = "Checks for domain ["+domain+"]:"
for name,subdomain in subdomains.items():
# print "name "+name+" subdomain"+subdomain
result = checkStaticPage("http://"+subdomain+domain)
# print result
outputStr = outputStr+" "+name+" "+str(result)
resultDict.setdefault("http://"+subdomain+domain, result)
# print outputStr
return resultDict
def combineDictionaries(self, dict1, dict2):
# for key, value in dict2.items():
dict = {}
dict.update(dict1)
dict.update(dict2)
return dict
if __name__ == '__main__':
test = test_Penetration_Home()
baseSubDomains = {"root":"", "www":"www."}
subdomains = test.combineDictionaries(baseSubDomains, {"cv":"cv.", "blog":"blog.", "kanboard":"kanboard." })
print test.domain("alexanderhopgood.com", subdomains)
print test.domain("alexhopgood.co.uk", subdomains)
print test.domain("alexanderhopgood.net", subdomains)
print test.domain("altairbob.com", test.combineDictionaries(baseSubDomains, {}))
print test.domain("altairbob.co.uk", test.combineDictionaries(baseSubDomains, {}))
print test.domain("altairbob.net", test.combineDictionaries(baseSubDomains, {}))
print test.domain("katherinemorley.co.uk", test.combineDictionaries(baseSubDomains,{"blog":"blog."}) )
#@,www,blog
print test.domain("katherinemorley.net", test.combineDictionaries(baseSubDomains,{"blog":"blog."}) )
#blog,www, @
print test.domain("katherinemorley.net", test.combineDictionaries(baseSubDomains,{"blog":"blog."}) )
#Static resouces on Apache web pages are accessible, I'm not sure if this is a good thing (tm)
print "cv.alexanderhopgood.com/CvResources/"+" "+str(checkStaticPage("http://cv.alexanderhopgood.com/CvResources/"))
print "cv.alexanderhopgood.com/CvResources/isca-logo.png"+" "+str(checkStaticPage("http://cv.alexanderhopgood.com/CvResources/isca-logo.png")) | [
"alex.hopgood@gmail.com"
] | alex.hopgood@gmail.com |
7ff3f503b6109c982a1c88f57ebeb4903c7ad1a8 | 47c0c375f85d13802c52fac358a6434a9ec24387 | /pencil sketch/cv2.py | d3dbc3620edc17c2074460b870a8e0e29f144e62 | [] | no_license | hemant5668/computer-vision | d4f8211b35897fee8a7e8b41a7ae7176581e458e | 0afd4127e064fa955edee08bff8f73642baa3a00 | refs/heads/master | 2020-12-17T22:14:44.618814 | 2020-08-09T09:35:13 | 2020-08-09T09:35:13 | 235,297,212 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py |
# coding: utf-8
# In[1]:
import cv2
import numpy as np
# In[36]:
#sketch generating function
def sketch(img):
#converting to gray
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#remove noice
gray_blur=cv2.GaussianBlur(gray,(5,5),0)
#seperate edges
canny=cv2.Canny(gray_blur,-10,50)
#sharp edges
M=np.array([[-1,-1,-1],[-1,10,-1],[-1,-1,-1]])
sharp=cv2.filter2D(canny,-1,M)
#do an invert binarise the image
ret,mask=cv2.threshold(canny,70,255,cv2.THRESH_BINARY_INV)
return mask
#intialise web cam
#it contains a boolean indicating if is successful(ret)
#it also contains the images collected from web cam
cap=cv2.VideoCapture(0)
while True:
ret,frame=cap.read()
cv2.imshow('live sketcher',sketch(frame))
if cv2.waitKey(1)==13: #13 is the enter key
break
#release camera
cap.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
c48eddd65d99df02570dcee5cae5f93fb26f50e7 | 3552acf30ba11692df7f138d3ba6686fce07cddd | /assignment4/main_task2.py | 4db759bc8163c20837de092d78ac0d8e9820fd41 | [
"Apache-2.0"
] | permissive | gnayoaixgnaw/Big_Data_Analytics | 07ac247b204122d0c44a841947a231898f86b876 | 4866bbb3de203304edbf107b833539b28be9b35a | refs/heads/main | 2023-03-05T23:55:24.191846 | 2021-02-17T20:41:50 | 2021-02-17T20:41:50 | 325,404,711 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,686 | py | import sys
import re
import numpy as np
from operator import add
from numpy import dot
from numpy.linalg import norm
from pyspark import SparkContext
from pyspark.sql import functions as func
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
def buildArray(listOfIndices):
returnVal = np.zeros(f)
for index in listOfIndices:
returnVal[index] = returnVal[index] + 1
mysum = np.sum(returnVal)
returnVal = np.divide(returnVal, mysum)
return returnVal
def build_zero_one_array(listOfIndices):
returnVal = np.zeros(f)
for index in listOfIndices:
if returnVal[index] == 0: returnVal[index] = 1
return returnVal
def get_tf_idf(textFile):
wikiPages = sc.textFile(textFile)
#wikiCategoryLinks=sc.textFile(sys.argv[2])
#wikiCats=wikiCategoryLinks.map(lambda x: x.split(",")).map(lambda x: (x[0].replace('"', ''), x[1].replace('"', '') ))
numberOfDocs = wikiPages.count()
# Each entry in validLines will be a line from the text file
validLines = wikiPages.filter(lambda x : 'id' in x and 'url=' in x)
# Now, we transform it into a set of (docID, text) pairs
keyAndText = validLines.map(lambda x : (x[x.index('id="') + 4 : x.index('" url=')], x[x.index('">') + 2:][:-6]))
regex = re.compile('[^a-zA-Z]')
# remove all non letter characters
keyAndListOfWords = keyAndText.map(lambda x : (str(x[0]), regex.sub(' ', x[1]).lower().split()))
# Next, we get a RDD that has, for each (docID, ["word1", "word2", "word3", ...]),
# ("word1", docID), ("word2", docId), ...
allWordsWithDocID = keyAndListOfWords.flatMap(lambda x: ((j, x[0]) for j in x[1]))
# Now join and link them, to get a set of ("word1", (dictionaryPos, docID)) pairs
allDictionaryWords = dictionary.join(allWordsWithDocID)
# Now, we drop the actual word itself to get a set of (docID, dictionaryPos) pairs
justDocAndPos = allDictionaryWords.map(lambda x:(x[1][1],x[1][0]))
# Now get a set of (docID, [dictionaryPos1, dictionaryPos2, dictionaryPos3...]) pairs
allDictionaryWordsInEachDoc = justDocAndPos.groupByKey()
# The following line this gets us a set of
# (docID, [dictionaryPos1, dictionaryPos2, dictionaryPos3...]) pairs
# and converts the dictionary positions to a bag-of-words numpy array...
allDocsAsNumpyArrays = allDictionaryWordsInEachDoc.map(lambda x: (x[0], buildArray(x[1])))
# Now, create a version of allDocsAsNumpyArrays where, in the array,
# every entry is either zero or one.
# A zero means that the word does not occur,
# and a one means that it does.
zeroOrOne = allDocsAsNumpyArrays.map(lambda x: (x[0],np.where(x[1] > 0, 1, 0)))
dfArray = zeroOrOne.reduce(lambda x1, x2: ("", np.add(x1[1], x2[1])))[1]
# Create an array of 20,000 entries, each entry with the value numberOfDocs (number of docs)
multiplier = np.full(f, numberOfDocs)
# Get the version of dfArray where the i^th entry is the inverse-document frequency for the
# i^th word in the corpus
idfArray = np.log(np.divide(np.full(f, numberOfDocs), dfArray+1))
# Finally, convert all of the tf vectors in allDocsAsNumpyArrays to tf * idf vectors
allDocsAsNumpyArraysTFidf = allDocsAsNumpyArrays.map(lambda x: (x[0], np.multiply(x[1], idfArray)))
return allDocsAsNumpyArraysTFidf
def convert1(x):
temp = []
if 'AU' in x[0]:
return(1,
-1*1*np.dot(x[1],parameter_vector_current),
np.log(1+np.e**np.dot(x[1],parameter_vector_current)),
-1*x[1]*1+x[1]*(np.e**np.dot(x[1],parameter_vector_current)/(1+np.e**np.dot(x[1],parameter_vector_current)))
)
else:
return(0,
-1*0*np.dot(x[1],parameter_vector_current),
np.log(1+np.e**np.dot(x[1],parameter_vector_current)),
-1*x[1]*0+x[1]*(np.e**np.dot(x[1],parameter_vector_current)/(1+np.e**np.dot(x[1],parameter_vector_current)))
)
if __name__ == "__main__":
f = 20000
sc = SparkContext(appName="task2")
wikiPages = sc.textFile(sys.argv[1])
#wikiCategoryLinks=sc.textFile(sys.argv[2])
#wikiCats=wikiCategoryLinks.map(lambda x: x.split(",")).map(lambda x: (x[0].replace('"', ''), x[1].replace('"', '') ))
numberOfDocs = wikiPages.count()
# Each entry in validLines will be a line from the text file
validLines = wikiPages.filter(lambda x : 'id' in x and 'url=' in x)
# Now, we transform it into a set of (docID, text) pairs
keyAndText = validLines.map(lambda x : (x[x.index('id="') + 4 : x.index('" url=')], x[x.index('">') + 2:][:-6]))
keyAndText = validLines.map(lambda x : (x[x.index('id="') + 4 : x.index('" url=')], x[x.index('">') + 2:][:-6]))
# Now, we split the text in each (docID, text) pair into a list of words
# After this step, we have a data set with
# (docID, ["word1", "word2", "word3", ...])
# We use a regular expression here to make
# sure that the program does not break down on some of the documents
regex = re.compile('[^a-zA-Z]')
# remove all non letter characters
keyAndListOfWords = keyAndText.map(lambda x : (str(x[0]), regex.sub(' ', x[1]).lower().split()))
# better solution here is to use NLTK tokenizer
# Now get the top 20,000 words... first change (docID, ["word1", "word2", "word3", ...])
# to ("word1", 1) ("word2", 1)...
allWords = keyAndListOfWords.flatMap(lambda x: ((i,1) for i in x[1]))
# Now, count all of the words, giving us ("word1", 1433), ("word2", 3423423), etc.
allCounts = allWords.reduceByKey(lambda x,y: x+y)
# Get the top 20,000 words in a local array in a sorted format based on frequency
# If you want to run it on your laptio, it may a longer time for top 20k words.
topWords = allCounts.top(f,key = lambda x: x[1])
#
#print("Top Words in Corpus:", allCounts.top(10, key=lambda x: x[1]))
# We'll create a RDD that has a set of (word, dictNum) pairs
# start by creating an RDD that has the number 0 through 20000
# 20000 is the number of words that will be in our dictionary
topWordsK = sc.parallelize(range(f))
# Now, we transform (0), (1), (2), ... to ("MostCommonWord", 1)
# ("NextMostCommon", 2), ...
# the number will be the spot in the dictionary used to tell us
# where the word is located
dictionary = topWordsK.map (lambda x : (topWords[x][0], x))
print("Word Postions in our Feature Matrix. Last 20 words in 20k positions: ", dictionary.top(20, lambda x : x[1]))
dictionary.cache()
a = get_tf_idf(sys.argv[1])
a.cache()
learningRate = 0.1
lambdas = 0.001
num_iteration = 0
precision = 0.1
oldCost = 0
mu, sigma = 0, 0.1 # mean and standard deviation
parameter_vector_current = np.random.normal(mu, sigma, f)
result_list = []
while True:
# Calculate the prediction with current regression coefficients.
# We compute costs just for monitoring
cost_list = a.map(convert1).reduce(lambda x, y: (x[0] + y[0], x[1] + y[1], x[2] + y[2], x[3] + y[3]))
cost = cost_list[1] + cost_list[2]+ lambdas*np.square(parameter_vector_current).sum()
# calculate gradients.
l_gradient_list = cost_list[3]+2*lambdas*parameter_vector_current
prevp = np.sqrt(np.square(parameter_vector_current).sum())
parameter_vector_current = parameter_vector_current - learningRate *l_gradient_list
nextp = np.sqrt(np.square(parameter_vector_current).sum())
if abs(nextp - prevp) < 0.001:
break
# update the weights - Regression Coefficients
# Stop if the cost is not descreasing
if cost > oldCost:
learningRate = learningRate * 0.5
oldCost = cost
if cost < oldCost:
learningRate = learningRate * 1.05
oldCost = cost
print("Iteration No.=", num_iteration, " Cost=", cost)
print("parameter : ", parameter_vector_current)
num_iteration+=1
ragulation_parameter = parameter_vector_current
word_dic = dict()
for item in dictionary.collect():
word_dic[item[1]] = item[0]
dic = dict()
for i in range(len(ragulation_parameter)):
dic[i] = ragulation_parameter[i]
top_five =sorted(dic.items(),key=lambda x:x[1],reverse=True)[0:5]
resut_list = []
for j in top_five:
resut_list.append(word_dic[j[0]])
print(word_dic[j[0]]) # task2
z=sc.parallelize(result_list).coalesce(1)
z.collect()
z.saveAsTextFile(sys.argv[2])
sc.stop()
| [
"noreply@github.com"
] | noreply@github.com |
c4ae49b5ef3dff9cda6859483ab61b793df6c6e4 | 90e6860b5370b742f01c0664ac84f14dc1272155 | /src/ziggurat/config/StandardConfigurator.py | fdce59892f864b753760a8508b5a30a278cc7f28 | [] | no_license | sernst/Ziggurat | e63f876b8f2cb3f78c7a7a4dcf79af810a540722 | 4ae09bbd9c467b2ad740e117ed00354c04951e22 | refs/heads/master | 2021-01-17T07:20:17.138440 | 2016-05-27T14:27:43 | 2016-05-27T14:27:43 | 9,278,283 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,944 | py | # StandardConfigurator.py
# (C)2013
# Scott Ernst
from __future__ import print_function, absolute_import, unicode_literals, division
import re
from pyaid.string.StringUtils import StringUtils
from pyramid.config import Configurator
from pyaid.file.FileUtils import FileUtils
#___________________________________________________________________________________________________ StandardConfigurator
class StandardConfigurator(Configurator):
"""A class for..."""
#===================================================================================================
# C L A S S
_REST_PATTERN = re.compile('\*[A-Za-z0-9]+$')
_DEFAULT_SETTINGS = {
'host':'0.0.0.0',
'port':6543,
'pyramid.reload_templates':True,
'pyramid.debug_authorization':False,
'pyramid.debug_notfound':False,
'pyramid.debug_routematch':False,
'pyramid.debug_templates':True,
'pyramid.default_locale_name':'en',
'pyramid.includes':'pyramid_tm',
'mako.input_encoding':'utf-8' }
#___________________________________________________________________________________________________ __init__
def __init__(self, app, rootViewPackage =None, **kwargs):
"""Creates a new instance of StandardConfigurator."""
super(StandardConfigurator, self).__init__(**kwargs)
self._isPopulated = False
self._app = app
self._rootViewPackage = rootViewPackage
self.add_request_method(
self._getMyAppRequestProperty,
StringUtils.toStrStr('ziggurat'), reify=True)
#===================================================================================================
# G E T / S E T
#___________________________________________________________________________________________________ GS: rootViewPackage
@property
def rootViewPackage(self):
return self._rootViewPackage
#___________________________________________________________________________________________________ GS: makoRootTemplatePath
@property
def makoRootTemplatePath(self):
return FileUtils.createPath(self._app.rootPath, 'templates', 'mako', isDir=True)
#___________________________________________________________________________________________________ GS: makoModuleDirectory
@property
def makoModuleDirectory(self):
return FileUtils.createPath(self._app.rootPath, 'operations', 'mako', isDir=True)
#===================================================================================================
# P U B L I C
#___________________________________________________________________________________________________ populateConfigs
def populateConfigs(self):
if self._isPopulated:
return
self._isPopulated = True
self._populateRoutes()
settings = dict(self._DEFAULT_SETTINGS.items())
p = self.makoRootTemplatePath
if p:
settings['mako.directories'] = p
p = self.makoModuleDirectory
if p:
settings['mako.module_directory'] = p
self._populateSettings(settings)
self.add_settings(settings)
#___________________________________________________________________________________________________ addRouteItem
def addRouteItem(self, name, pattern, className, renderer =None, package =None, subpackage =None):
"""Adds a route to the registry."""
# Adds optional end slash argument to URLs that don't enforce an end slash themselves
if not pattern.endswith('/'):
if self._REST_PATTERN.search(pattern) is None:
pattern += '{endSlash:[/]*}'
importDef = [className, className]
if subpackage:
importDef.insert(0, subpackage)
importDef.insert(0, package if package else self.rootViewPackage)
self.add_route(name, pattern)
self.add_view('.'.join(importDef), route_name=name, renderer=renderer)
#___________________________________________________________________________________________________ addStaticRouteItem
def addStaticRouteItem(self, name, path):
self.add_static_view(name=name, path=path)
#===================================================================================================
# P R O T E C T E D
#___________________________________________________________________________________________________ _getMyAppRequestProperty
def _getMyAppRequestProperty(self, request):
return self._app
#___________________________________________________________________________________________________ _populateSettings
def _populateSettings(self, settings):
pass
#___________________________________________________________________________________________________ _populateRoutes
def _populateRoutes(self):
"""Doc..."""
pass
#===================================================================================================
# I N T R I N S I C
#___________________________________________________________________________________________________ __repr__
def __repr__(self):
return self.__str__()
#___________________________________________________________________________________________________ __unicode__
def __unicode__(self):
return StringUtils.toUnicode(self.__str__())
#___________________________________________________________________________________________________ __str__
def __str__(self):
return '<%s>' % self.__class__.__name__
| [
"swernst@gmail.com"
] | swernst@gmail.com |
312229298bb7c940f89122dd2cd79fc030c302c7 | 0ca3795da1d3d88ae7d77ea2ab44ce3c95bb49ed | /blog/apps/user/apps.py | 2cb53f03d9978c31a79f9c9a826368829b8184c4 | [] | no_license | DmitriyKhalturin/django-blog | e1a5c1c66a52a141cb034e0957e6001dc31f118c | e30368d75d4d675613a93f4e3e186a5e7811018d | refs/heads/develop | 2023-07-31T22:41:22.381474 | 2020-08-04T19:41:04 | 2020-08-04T19:55:43 | 281,173,512 | 0 | 0 | null | 2021-09-22T19:44:13 | 2020-07-20T16:46:24 | Python | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class UserConfig(AppConfig):
name = 'blog.apps.user'
| [
"dmitry.halturin.86@gmail.com"
] | dmitry.halturin.86@gmail.com |
d2441a8e7cd1131ccb87e1debc5c49e33fc62f90 | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-SearchKit/setup.py | cfce0b4f572de3e7d3cb839fb83d76b5f34634ee | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | '''
Deprecated wrappers for the "SearchKit" framework on macOS.
Use the CoreServices package instead.
'''
from pyobjc_setup import setup
VERSION="5.1.1"
setup(
name='pyobjc-framework-SearchKit',
description = "Wrappers for the framework SearchKit on macOS",
min_os_level='10.5',
packages = [ "SearchKit" ],
version=VERSION,
install_requires = [
'pyobjc-core>='+VERSION,
'pyobjc-framework-CoreServices>='+VERSION,
],
long_description=__doc__,
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
a47ce249f69c56724d0d6a2002329249502be5d7 | 1774936282e03a2e12c589314f2f865181a2de07 | /learning_templates/basic_app/templatetags/my_extras.py | 9dcc56005020c5da817fd1b1c2b7a692569d5c45 | [] | no_license | kimman8/django_deployment-example | 458ee1318d71d01a90842bdccd01ffb5aeb745b4 | 54b417d96c52de37670d31de4ca0fc1bd43e75d4 | refs/heads/master | 2022-12-23T15:26:49.989409 | 2020-10-01T22:30:55 | 2020-10-01T22:30:55 | 300,247,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | from django import template
register = template.Library()
@register.filter(name='cut')
def cut(value, arg):
"""
This cuts out all value of arg from the string
"""
return value.replace(arg,'')
# register.filter('cut',cut)
| [
"KIM@KIM-YUENs-iMac.local"
] | KIM@KIM-YUENs-iMac.local |
2c3045452e184a876d836e4523e0d1bc7d1c826e | b28753c509a011a13c9e4068cbf295808f4f158e | /app/firestore_services.py | a5d65186c94060cb5445934b15199d85a7b4ccd5 | [] | no_license | abecerra0521/flask | a62bf9520be44927412df141079f8c418fb65fd4 | 8e696c9ceb664c29a297df4745e81a9732a03907 | refs/heads/master | 2020-05-21T11:37:37.865189 | 2019-05-22T15:56:06 | 2019-05-22T15:56:06 | 186,030,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
credential_app = credentials.ApplicationDefault()
firebase_admin.initialize_app(credential_app, {
'projectId': 'todo-list-flask',
})
db = firestore.client()
def get_users():
return db.collection('users').get()
def get_user_by_id(user_id):
return db.collection('users')\
.document(user_id).get()
def insert_user(user_data):
data = {
'password': user_data.password,
}
db.collection('users').document(user_data.username).set(data)
def get_tasks(user_id):
return db.collection('users')\
.document(user_id)\
.collection('tasks').get()
def post_task(user_id, task):
data = {
'description': task,
'done': False
}
db.collection('users')\
.document(user_id)\
.collection('tasks').add(data)
def delete_task(user_id, task_id):
db.document('users/{}/tasks/{}'.format(user_id, task_id)).delete()
def update_task(user_id, task_id, done):
task_done = not bool(done)
task_ref = db.document('users/{}/tasks/{}'.format(user_id, task_id))
#task_done = not bool(done)
#task_ref = _get_task_ref(user_id, task_id)
task_ref.update({'done': task_done})
# def _get_task_ref(user_id, task_id):
# return db.document('users/{}/tasks/{}'.format(user_id, task_id))
| [
"abecerra.aabg@gmail.com"
] | abecerra.aabg@gmail.com |
9b857faea34313ff6868b7fa5be397c40a4184e6 | 89eeace71191fc929c2d93b7fe876ac7db441200 | /site_scons/site_tools/lib_rc/__init__.py | 7af9a109593330bb56823ceeecd2a948aa4c6847 | [] | no_license | janbraiins/lib-rc | 2b8e2092c23bbe03cfff2121fc22fb38cc715c85 | d5b6b909ca5f9f3d1fbf7404a48e3f3745ba2696 | refs/heads/master | 2022-12-26T18:43:17.007003 | 2017-08-19T09:27:32 | 2017-08-19T09:27:32 | 296,148,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | """Scons Tool for building external sources against lib-rc.
The tool assumes the build configuration is available through
construction environment variable 'CONFIG'. The following
configuration elements are required:
- LIB_RC_DIR - top level directory that contains the source
tree
Copyright (c) 2016 Braiins Systems s.r.o.
"""
import os
def generate(env):
"""Set build environment so that this project is also available to
other projects
"""
config = env['CONFIG']
env.Append(CPPPATH=[config.LIB_RC_DIR,
# Extend search path for generated files under the build directory
os.path.join('#$VARIANT_DIR',
os.path.basename(config.LIB_RC_DIR))])
def exists(env):
return 1
| [
"jan.capek@braiins.cz"
] | jan.capek@braiins.cz |
43bd61d034275b4e72d5fd73ddf6e07f646548ed | 85f68b427bf9c4b8b5c3f8a70dccc217226e9706 | /gam_app/old_migrations/0012_auto_20180619_1641.py | 7de987d6f30f3cd7be49de7d4ff093f9f154c292 | [] | no_license | benrogboe/GAM | ffb87e76a87aa7eaf0d0d33d4df7ddc571399e49 | fbec7cb967252578d4669c5ff91a9b0b9cdfd9d5 | refs/heads/master | 2020-03-30T05:08:38.811097 | 2018-09-28T18:55:24 | 2018-09-28T18:55:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | # Generated by Django 2.0.1 on 2018-06-19 16:41
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gam_app', '0011_auto_20180619_1527'),
]
operations = [
migrations.AlterField(
model_name='imagen',
name='archivo',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='gam_app.Archivo'),
),
migrations.AlterField(
model_name='imagen',
name='colección',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='gam_app.Colección'),
),
]
| [
"apjanco@gmail.com"
] | apjanco@gmail.com |
ddf3cb7cd88608065d779da0fa8b72337e5b9206 | 7e91bb1bb499139f00b2f1d3fc4769945f727260 | /math/triangle_quest.py | fdb24f4d8d38c299f05fce89b29474d191897c10 | [] | no_license | neeraj1909/python-hackerrank | 46867ee40d1fd647b049e78be02156c1b962b11b | 6004f5f14878f131bf9f3441af9f5f4ec7c3a2a5 | refs/heads/master | 2021-08-19T14:43:12.460929 | 2017-11-26T17:43:49 | 2017-11-26T17:43:49 | 108,431,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | for i in range(1,int(input())): #More than 2 lines will result in 0 score. Do not leave a blank line also
print(i*(pow(10, i) - 1)//9)
| [
"neeraj1909@gmail.com"
] | neeraj1909@gmail.com |
3e9fbcc82ac9735647bbbf58624023d9b3049086 | 7f368b275cd18a5b7b2eb22b822223252914c8ef | /tensorflow_gan/python/tpu/cross_replica_ops.py | 7ab35260b93b8be210c6cb4f9caf314cc746b313 | [
"Apache-2.0"
] | permissive | nivedwho/gan | 176c624800378d9dfa9f74211c362b62953cc7f1 | 723ce1e3627778b979f048d817f834f253611ff4 | refs/heads/master | 2023-08-01T08:07:34.299917 | 2021-09-14T04:10:38 | 2021-09-14T04:11:37 | 396,680,181 | 0 | 0 | Apache-2.0 | 2021-08-16T07:44:33 | 2021-08-16T07:44:33 | null | UTF-8 | Python | false | false | 3,128 | py | # coding=utf-8
# Copyright 2021 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow operations specific to TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow as tf
from tensorflow.python.tpu import tpu_function # pylint: disable=g-direct-tensorflow-import
__all__ = [
'cross_replica_mean',
'cross_replica_moments',
]
def cross_replica_mean(inputs, group_size=None):
"""Calculates the average value of inputs tensor across TPU replicas."""
num_replicas = tpu_function.get_tpu_context().number_of_shards
if not group_size:
group_size = num_replicas
if group_size == 1:
return inputs
if group_size != num_replicas:
group_assignment = []
assert num_replicas % group_size == 0
for g in range(num_replicas // group_size):
replica_ids = [g * group_size + i for i in range(group_size)]
group_assignment.append(replica_ids)
else:
group_assignment = None
return tf.compat.v1.tpu.cross_replica_sum(inputs, group_assignment) / tf.cast(
group_size, inputs.dtype)
def cross_replica_moments(inputs, axis, parallel=True, group_size=None):
"""Compute mean and variance of the inputs tensor across TPU replicas.
Args:
inputs: A tensor with 2 or more dimensions.
axis: Array of ints. Axes along which to compute mean and variance.
parallel: Use E[x^2] - (E[x])^2 to compute variance. This can be done
in parallel to computing the mean and reducing the communication overhead.
group_size: Integer, the number of replicas to compute moments arcoss.
None or 0 will use all replicas (global).
Returns:
Two tensors with mean and variance.
"""
# Compute local mean and then average across replicas.
mean = tf.math.reduce_mean(input_tensor=inputs, axis=axis)
mean = cross_replica_mean(mean)
if parallel:
# Compute variance using the E[x^2] - (E[x])^2 formula. This is less
# numerically stable than the E[(x-E[x])^2] formula, but allows the two
# cross-replica sums to be computed in parallel, saving communication
# overhead.
mean_of_squares = tf.reduce_mean(input_tensor=tf.square(inputs), axis=axis)
mean_of_squares = cross_replica_mean(mean_of_squares, group_size=group_size)
mean_squared = tf.square(mean)
variance = mean_of_squares - mean_squared
else:
variance = tf.math.reduce_mean(
input_tensor=tf.math.square(inputs - mean), axis=axis)
variance = cross_replica_mean(variance, group_size=group_size)
return mean, variance
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
c4b0fa6a10dd0233f06a512fb6746c6c4f0b86d7 | b17fda8e3a9f360cbab8e8ed0ecd66b03787250a | /.venv/lib/python2.7/site-packages/planemo/templates.py | caa25724a0252f54837ada1ffbff3f78b82341b4 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | maumauleon/galaxy-irri-dev | 7a4b824c90474da0a2f3a3b858777c5089b9d5cb | 063bf0dca5d465466aefa77edaf47df12c4ff932 | refs/heads/master | 2022-11-16T03:10:18.067196 | 2017-08-23T03:31:01 | 2017-08-23T03:31:01 | 98,497,124 | 1 | 2 | NOASSERTION | 2022-11-01T17:00:32 | 2017-07-27T05:25:40 | Python | UTF-8 | Python | false | false | 478 | py | try:
from jinja2 import Template
except ImportError:
Template = None
NO_JINJA2_MESSAGE = ("This functionality requires Jinja2 but this library is "
"unavailable. Install with `pip install jinja2`.")
def render(template_str, **kwds):
""" Use jinja2 to render a template
"""
if Template is None:
raise Exception(NO_JINJA2_MESSAGE)
template = Template(template_str)
contents = template.render(**kwds)
return contents
| [
"v.juanillas@irri.org"
] | v.juanillas@irri.org |
a697246864c5f020df2a2b5b60c9e4a429c0d160 | 7f53c41182a6d9c5da0c58a15716f01725ac0316 | /2019_2_19_public_test/test.py | e79247064e8a5dfa2e00c40dbbe822ef17f75f3b | [] | no_license | 1286211699/2019_1_23_pub_test | f6b7ee089e78ad673c56b3cd4ccee9b2154581f6 | 3aed7f4941353d48bf3407e9d30ac85c83b0ed7b | refs/heads/master | 2022-12-19T14:41:15.264627 | 2019-03-21T09:46:08 | 2019-03-21T09:46:08 | 167,125,649 | 1 | 0 | null | 2022-12-08T01:33:30 | 2019-01-23T05:54:52 | HTML | UTF-8 | Python | false | false | 666 | py | import _thread
import time
# 为线程定义一个函数
def print_time( threadName, delay):
count = 0
while count < 5:
time.sleep(delay)
count += 1
print ("%s: %s" % ( threadName, time.ctime(time.time()) ))
# 创建两个线程
#但是这个模块我们不推荐,因为底层封装的时候它的主线程不会等待子线程的结束!
#官方以及我们推荐再封装Threading,所以在这里大家了解下
try:
_thread.start_new_thread( print_time,("Thread-1", 2, ) )
_thread.start_new_thread( print_time,("Thread-2", 4, ) )
except:
print ("Error: 无法启动线程")
while True:
pass
| [
"1286211699@qq.com"
] | 1286211699@qq.com |
7e3af968048a42172de4897609638c92a4942170 | bb1fe15070832a508e4fbde929212414f4c10e78 | /felipe.py - Organizar/aula45.py | b30ab173614fd6f05f8b8b5121103ed4407e3874 | [] | no_license | structuredworldcapital/Lab-de-Estudos | 5cfd62414e4811b8170d8349563444f547811b3f | c97421549e8fe8196c321fd453b40ed3fab32cdf | refs/heads/master | 2022-07-25T16:50:56.956945 | 2020-05-04T17:23:09 | 2020-05-04T17:23:09 | 261,243,167 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 1,011 | py | from random import randint
itens = ('Pedra', 'Papel', 'Tesoura')
print('''Suas opcaoes:
[ 0 ] PEDRA
[ 1 ] PAPEL
[ 2 ] TESOURA''')
jogador = int(input('Qual é a sua jogada?'))
print('-=' * 11)
print('Computador jogou {}'.format(itens[jogador]))
print('Jogador jogou {}'.fomrat(itens[jogador]))
print('-=' * 11)
if computador == 0: # computador jogou PEDRA
if jogador == 0
print('EMPATE')
elif jogador == 1:
print('JOGADOR VENCE')
elif jogador == 2:
print('COMPUTADOR VENCE')
else:
print('JOGADA INVALIDA!')
elif computador == 1: # computador jogou PAPEL
if jogador == 0
print('COMPUTADOR VENCE')
elif jogador == 1:
print('EMPATE')
elif jogador == 2:
print('JOGADOR VENCE')
else:
print('JOGADA INVALIDA!')
elif computador == 2: # computador jogou TESOURA
if jogador == 0
print('JOGADOR VENCE')
elif jogador == 1:
print('COMPUTADOR VENCE')
elif jogador == 2:
print('EMPATE')
else:
print('JOGADA INVALIDA!') | [
"giancarlo.s.graziano@gmail.com"
] | giancarlo.s.graziano@gmail.com |
ddf8e310ace1ebb6773c14c882d812f973ffa1af | 4b4828d3c98d76d7bf38f90a015945acc408ddc5 | /PythonAI/Source/W2D3/src/bmi_web.py | 441f43d452b0cdebf5e3e9a87e8c79e84ae2551b | [] | no_license | Huh-jae-won/Study | cb5d32728e8dcded492e7edb054b500c91ec607c | e4dbc3fef69bb273b62b866fb5ef2a7250222f10 | refs/heads/main | 2023-06-20T13:06:26.691899 | 2021-07-11T07:43:41 | 2021-07-11T07:43:41 | 362,759,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,199 | py | """
# URL : http://localhost:8080/cgi-bin/bmi_web.py
"""
# 모듈 로딩 ---------------------------------------------------
import cgi, sys, codecs, os
import joblib
# WEB 인코딩 설정 ---------------------------------------------
sys.stdout=codecs.getwriter('utf-8')(sys.stdout.detach())
# 함수 선언 --------------------------------------------------
# WEB 페이지 출력 --------------------------------------------
def displayWEB(detect_msg):
print("Content-Type: text/html; charset=utf-8")
print("")
html="""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>비만 여부 판별</title>
</head>
<body align="center">
<h2>[ 비만 상태 체크 ]</h2>
<form>
<div style='text-align:center; background-color:#D5D5D5;border-radius:10px;width:60%; margin: auto;padding:50px;'>
<input id="height" type="text" placeholder="키" name="height">   
<input id="weight" type="text" placeholder="몸무게" name="weight">
<input type="submit" value="판정"></br>
<p><font color='blue'>{}</font></p>
</div>
</form></body></html>""".format(detect_msg)
print(html)
# 판정 --------------------------------------------------------
def detect_bmi(w, h):
w = int(w)
h = int(h)
# 비만도 예측하기
res = clf.predict([[w / 100, h / 200]])
return str(res[0])
# 기능 구현 -----------------------------------------------------
# (1) 학습 데이터 읽기
pklfile = os.path.dirname(__file__) + "/bmi.pkl"
clf = joblib.load(pklfile)
# (2) WEB 페이지 <Form> -> <INPUT> 리스트 가져오기
form = cgi.FieldStorage()
height_value = form.getvalue('height')
weight_value = form.getvalue('weight')
# (3) 판정 하기
if height_value is not None and weight_value is not None:
bmi_dic = {"fat": "과체중", "normal": "정상체중", "thin": "저체중"}
result = detect_bmi(weight_value, height_value)
result = '키 {}, 몸무게 {} => {}입니다.'.format(height_value, weight_value, bmi_dic[result])
else:
result ='측정된 결과가 없습니다.'
# (4) WEB 출력하기
displayWEB(result)
| [
"dfr9034@naver.com"
] | dfr9034@naver.com |
311e9dd78efad9b732440d9f4fae507fa27b1498 | f810888add2af1d8c30cf9920ee6c9becd687c60 | /Beginner/repeat.py | fc6cec1d130fccbc18daa1548b2df28ad7f07ba4 | [] | no_license | deepa045/python-programming | 2a4cc7d42131cc302b3573fe9d866dc62ca480ea | 373aeccbc5e1509f093f78e273052263ee0317fe | refs/heads/master | 2021-09-13T13:29:33.862035 | 2018-04-30T17:56:36 | 2018-04-30T17:56:36 | 125,207,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | string,val=raw_input().split()
for num in range(int(val)):
print string
| [
"noreply@github.com"
] | noreply@github.com |
ce62fce4822d0977d18f15b3bae8d32747436bd3 | 57f6805e4fa5aeb781af2434f41bbafd35ad6023 | /SentinelTime/time_series.py | 51ab51746fd19e95c98cd35a4776956858c24335 | [
"MIT"
] | permissive | marlinmm/SentinelTime | 4c87b0b0c38f92769614ebd6b6e2f85128242864 | 5732f23d508582c6b33bce68967331abe8c5cf9c | refs/heads/master | 2023-04-14T03:40:09.464249 | 2021-03-20T11:42:59 | 2021-03-20T11:42:59 | 291,967,885 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,049 | py | from SentinelTime.data_preprocessing import *
from SentinelTime.mask_stack import *
import rasterio.mask
import matplotlib.pyplot as plt
import pandas as pd
def extract_dates(directory, allowed_orbits=None):
"""
Extracts dates from list of preprocessed S-1 GRD files (need to be in standard pyroSAR exported naming scheme!)
:param directory: string
Path to folder, where files are stored
:return: list
returns list of acquisition dates of S-1 GRD files
"""
file_list = extract_files_to_list(path_to_folder=directory, datatype=".tif", path_bool=False)
new_file_list = []
print(allowed_orbits)
if allowed_orbits is not None:
for orbit in allowed_orbits:
for file in file_list:
if str(orbit) in file[len(file) - 8:len(file)]:
new_file_list.append(file)
date_list = []
for file in new_file_list:
date_list.append(int(file[2:10]))
elif allowed_orbits is None:
date_list = []
print("aaa")
for file in file_list:
date_list.append(int(file[2:10]))
print(len(date_list))
return date_list
def extract_time_series(results_dir, shapefile, buffer_size, point_path, allowed_orbits):
"""
Extracts time series information from patches of pixels using points and a buffer size to specify the size of the
patch
:param allowed_orbits:
:param shapefile: string
Path to point shapefile including name of shapefile
:param results_dir: string
Path to results directory, where layerstacks are stored and csv files will be stored
:param point_path: string
Path to point shapefile directory
:param buffer_size: int
Buffer size specifies the length of the rectangular buffer around the point
"""
# Import Patches for each class and all 4 layerstacks (VH/VV/Asc/Desc)
patches = create_point_buffer(shapefile, buffer_size=buffer_size)
layer_stacks = extract_files_to_list(path_to_folder=results_dir, datatype=".tif", path_bool=True)
# Iterate through all layerstacks:
for file in layer_stacks:
src1 = rio.open(file)
patch_mean = []
# Iterate through all patches of current class
for patch in patches:
pixel_mean = []
out_image, out_transform = rio.mask.mask(src1, [patch], all_touched=1, crop=True, nodata=np.nan)
# Calculate Mean for each patch:
for pixel in out_image:
pixel_mean.append(np.nanmean(pixel))
patch_mean.append(pixel_mean)
print(len(patch_mean[0]))
# Append dates of acquisition to each list (will be stored as float, doesnt matter for processing):
if "VH" in file and "Asc" in file:
patch_mean.append(extract_dates(results_dir + "VH" + "/" + "Asc" + "/", allowed_orbits))
if "VH" in file and "Desc" in file:
patch_mean.append(extract_dates(results_dir + "VH" + "/" + "Desc" + "/", allowed_orbits))
if "VV" in file and "Asc" in file:
patch_mean.append(extract_dates(results_dir + "VV" + "/" + "Asc" + "/", allowed_orbits))
if "VV" in file and "Desc" in file:
patch_mean.append(extract_dates(results_dir + "VV" + "/" + "Desc" + "/", allowed_orbits))
# Rotate array, so csv file will have correct orientation:
patch_mean = np.rot90(patch_mean)
patch_mean = np.rot90(patch_mean)
patch_mean = np.rot90(patch_mean)
patch_mean = patch_mean.tolist()
src1.close()
# Create CSV export directory and create header string with length equal to the number of patcher per class:
csv_result_dir = results_dir + "CSV/"
if not os.path.exists(csv_result_dir):
os.mkdir(csv_result_dir)
if "VH" in file:
pol1 = "VH"
vh_head_string = "VH"
tmp = ","
for i, elem in enumerate(patches):
vh_head_string = vh_head_string + str(i) + tmp + pol1
if "VV" in file:
pol1 = "VV"
vv_head_string = "VV"
tmp = ","
for i, elem in enumerate(patches):
vv_head_string = vv_head_string + str(i) + tmp + pol1
# Export patch means to csv files for each class, polarization and flight direction:
if "VH" in file and "Asc" in file:
np.savetxt(csv_result_dir + shapefile[len(point_path):len(shapefile) - 4] + "_VH_Asc.csv",
patch_mean, delimiter=",", header="date," + vh_head_string[0:len(vh_head_string) - 3], fmt='%f')
if "VH" in file and "Desc" in file:
np.savetxt(csv_result_dir + shapefile[len(point_path):len(shapefile) - 4] + "_VH_Desc.csv",
patch_mean, delimiter=",", header="date," + vh_head_string[0:len(vh_head_string) - 3], fmt='%f')
if "VV" in file and "Asc" in file:
np.savetxt(csv_result_dir + shapefile[len(point_path):len(shapefile) - 4] + "_VV_Asc.csv",
patch_mean, delimiter=",", header="date," + vv_head_string[0:len(vv_head_string) - 3], fmt='%f')
if "VV" in file and "Desc" in file:
np.savetxt(csv_result_dir + shapefile[len(point_path):len(shapefile) - 4] + "_VV_Desc.csv",
patch_mean, delimiter=",", header="date," + vv_head_string[0:len(vv_head_string) - 3], fmt='%f')
def import_time_series_csv(path_to_folder, frost_bool):
"""
Imports csv files from results folder
:param frost_bool:
:param path_to_folder: string
Path to folder, where csv files are stored
:return: tuple
returns tuple of lists containing the dataframe names and the dataframes itself
"""
csv_list = extract_files_to_list(path_to_folder, datatype=".csv", path_bool=False)
df_name_list = []
df_list = []
for csv in csv_list:
df = pd.read_csv(path_to_folder + csv)
df = df.rename({"# date": "date"}, axis=1)
# Change datatype of date from float to date object:
df['date'] = pd.to_datetime(df['date'], format='%Y%m%d')
# if frost_bool:
# df, precip = import_weather_for_fern(radar_df=df)
if frost_bool:
df, weather = import_weather_for_fern(radar_df=df, frost_bool=frost_bool)
if not frost_bool:
weather = import_weather_for_fern(radar_df=df, frost_bool=frost_bool)
df_name_list.append(csv[0:len(csv) - 4])
df_list.append(df)
return df_name_list, df_list, weather
def temporal_statistics(path_to_csv_folder, results_dir, fig_folder, plot_bool, weather_bool, frost_bool):
"""
Function calculates temporal statistics for all classes, polarizations and flight directions
:param fig_folder:
:param frost_bool:
:param path_to_csv_folder:
Path to folder, where csv files are stored
:param results_dir:
:param plot_bool: boolean
If set to True, charts of mean and std.dev. are plotted
:return: dict
Returns dictionary containing dictionaries with the temporal statistics for all classes, polarizations and
flight directions
"""
import csv
from scipy.ndimage.filters import gaussian_filter1d
df_name_list, df_list, weather = import_time_series_csv(path_to_csv_folder, frost_bool)
print(df_name_list)
statistics_dict = {}
# print(df_name_list)
# Iterate through all dataframes and compute temporal statistics
for i, df in enumerate(df_list):
# print(df)
# Temporal Mean:
df["patches_mean"] = df.mean(axis=1)
# print(df_name_list[i])
statistics_dict[df_name_list[i]] = {"Temporal Mean": round(df["patches_mean"].mean(), 3)}
statistics_dict[df_name_list[i]]["Temporal Median"] = round(df["patches_mean"].median(), 3)
# Temporal Standard Deviation:
df["patches_std"] = df.std(axis=1)
statistics_dict[df_name_list[i]]["Temporal Stdev."] = round(df["patches_std"].mean(), 3)
# Max., Min. and Amplitude:
statistics_dict[df_name_list[i]]["Temporal Max."] = round(df["patches_mean"].max(), 3)
statistics_dict[df_name_list[i]]["Temporal Min."] = round(df["patches_mean"].min(), 3)
statistics_dict[df_name_list[i]]["Temporal Amp."] = round(df["patches_mean"].max()
- df["patches_mean"].min(), 3)
print(statistics_dict)
dataframe_list1 = []
dataframe_list2 = []
dataframe_list3 = []
dataframe_list4 = []
tmp = 0
# Iterate through a quarter of the csv files to account for all four possible options of VH/VV/Asc/Desc
for j in range(0, int(len(df_name_list) / 4)):
# Iterate through Mean and Std.Dev.:
for k, elem in enumerate(["patches_mean"]):
# Plot mean of all patches over time if boolean is TRUE
if plot_bool:
# plt.figure(figsize=(11, 15))
plt.rcParams.update({'font.size': 14})
# TODO: make weather data stuff optional!!!!
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
fig, ax1 = plt.subplots()
fig.subplots_adjust(right=0.75)
fig.set_figheight(9)
fig.set_figwidth(16)
if weather_bool:
ax2 = ax1.twinx()
ax3 = ax1.twinx()
ax3.spines["right"].set_position(("axes", 1.1))
make_patch_spines_invisible(ax3)
ax3.spines["right"].set_visible(True)
# plt.figure(figsize=(16, 9))
# plt.rcParams.update({'font.size': 14})
if k == 0:
# ax1.figure(figsize=(16, 9))
if "VV" in df_name_list[tmp]:
title = str(df_name_list[tmp][:df_name_list[tmp].find("VV") - 1])
if "VH" in df_name_list[tmp]:
title = str(df_name_list[tmp][:df_name_list[tmp].find("VH") - 1])
plt.title('Mean of all Patches for class: ' + title)
if k == 1:
# ax1.figure(figsize=(16, 9))
plt.title('Std.Dev. of all Patches for class: ' + str(df_name_list[tmp][0:17]))
ax1.plot('date', elem, data=df_list[tmp], marker='', color='k', linewidth=0.7, label="")
ax1.plot('date', elem, data=df_list[tmp + 1], marker='', color='forestgreen', linewidth=0.7, label="")
# print(df_name_list[tmp + 3])
# print(df_name_list[tmp + 2])
ax1.plot('date', elem, data=df_list[tmp + 2], marker='', color='b', linewidth=0.7, label="")
ax1.plot('date', elem, data=df_list[tmp + 3], marker='', color='firebrick', linewidth=0.7, label="")
# filter time series using gaussian filter:
arr1 = gaussian_filter1d(df_list[tmp]["patches_mean"].to_numpy(), sigma=2)
arr2 = gaussian_filter1d(df_list[tmp + 1]["patches_mean"].to_numpy(), sigma=2)
arr3 = gaussian_filter1d(df_list[tmp + 2]["patches_mean"].to_numpy(), sigma=2)
arr4 = gaussian_filter1d(df_list[tmp + 3]["patches_mean"].to_numpy(), sigma=2)
# append filtered datasets to lists for further use:
dataframe_list1.append(arr1)
dataframe_list2.append(arr2)
dataframe_list3.append(arr3)
dataframe_list4.append(arr4)
# Plot filtered mean of all patches over time if boolean is TRUE
if plot_bool:
if "VH" in df_name_list[tmp]:
start_index = df_name_list[tmp].find("VH")
if "VV" in df_name_list[tmp]:
start_index = df_name_list[tmp].find("VV")
ax1.plot(df_list[tmp]['date'], arr1, marker='', color='k', linewidth=3,
label=df_name_list[tmp][start_index:])
ax1.plot(df_list[tmp + 1]['date'], arr2, marker='', color='forestgreen', linewidth=3,
label=df_name_list[tmp + 1][start_index:])
ax1.plot(df_list[tmp + 2]['date'], arr3, marker='', color='b', linewidth=3,
label=df_name_list[tmp + 2][start_index:])
ax1.plot(df_list[tmp + 3]['date'], arr4, marker='', color='firebrick', linewidth=3,
label=df_name_list[tmp + 3][start_index:])
# TODO: make weather data stuff optional!!!!
print(df_name_list[tmp + 3][18:len(df_name_list[tmp + 3])])
# plt.xlabel("Date")
ax1.set_xlabel('Date')
ax1.set_ylabel('Backscatter (dB)')
# plt.ylabel("Backscatter (dB)")
# ax1.legend(loc='lower left', bbox_to_anchor=(0.5, 1.005),
# ncol=4, fancybox=True, shadow=True)
ax1.legend(loc='upper center', ncol=4, fancybox=True, shadow=True)
ax1.set_ylim(-20, -5)
# plt.ylim((-18, -6))
if weather_bool:
print(weather)
ax2.plot(weather['date'], weather['precip'], color="silver")
# plt.ylabel("Precipitation (mm)")
ax2.set_ylabel('Precipitation (mm)', color="silver", loc='bottom')
# plt.ylim((-10, 50))
ax2.set_ylim(-10, 110)
ax3.plot(weather['date'], weather['temp'], color="orange")
# plt.ylabel("Precipitation (mm)")
ax3.set_ylabel('Avg_Temp (°C)', color="orange", loc='bottom')
# plt.ylim((-10, 110))
ax3.set_ylim(-10, 110)
plt.savefig(fig_folder + "Mean_for_Class_" + title + ".png", dpi=300)
plt.show()
# Increase tmp by 4 to get to the next class
tmp = tmp + 4
# Export temporal statistics to csv file:
with open(results_dir + 'Temp_Statistics.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in statistics_dict.items():
# print(value)
writer.writerow([key, value])
return dataframe_list1, dataframe_list2, dataframe_list3, dataframe_list4, df_list
def ratio_calc(path_to_folder, plot_bool, frost_bool):
"""
This function calculates the VH/VV ratio for all classes and flight directions and allows the user to plot the data
:param frost_bool: XXXXXXXXXXXXXXXXXXXXXXXX
:param path_to_folder: string
Path to folder, where csv files are stored
:param plot_bool: boolean
If set to TRUE, the plots are calculated and shown
:return: list
Returns a list of dataframes containing VH/VV ratios for all classes and flight directions
"""
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', -1)
df_name_list, df_list, weather = import_time_series_csv(path_to_folder + "CSV/", frost_bool)
tmp = 0
Asc_ratio_list = []
Desc_ratio_list = []
for i in range(int(len(df_list) / 4)):
print(i)
VH_Asc_df = df_list[tmp]
VH_Asc_df["patches_mean"] = df_list[tmp].mean(axis=1)
VH_Desc_df = df_list[tmp + 1]
VH_Desc_df["patches_mean"] = df_list[tmp + 1].mean(axis=1)
VV_Asc_df = df_list[tmp + 2]
VV_Asc_df["patches_mean"] = df_list[tmp + 2].mean(axis=1)
VV_Desc_df = df_list[tmp + 3]
VV_Desc_df["patches_mean"] = df_list[tmp + 3].mean(axis=1)
Asc_ratio = pd.DataFrame()
Asc_ratio["date"] = VH_Asc_df["date"]
Asc_ratio["VH_VV"] = VH_Asc_df["patches_mean"] - VV_Asc_df["patches_mean"]
# print(Asc_ratio)
Desc_ratio = pd.DataFrame()
Desc_ratio["date"] = VH_Desc_df["date"]
Desc_ratio["VH_VV"] = VH_Desc_df["patches_mean"] - VV_Desc_df["patches_mean"]
# print(Desc_ratio)
Asc_ratio_list.append(Asc_ratio)
Desc_ratio_list.append(Desc_ratio)
if plot_bool:
print(df_name_list[tmp])
# plt.title('Std.Dev. of all Patches for class: ' + str(df_name_list[tmp][0:6]))
plt.title('Std.Dev. of all Patches for class: ' + str(df_name_list[tmp]))
plt.plot('date', "VH_VV", data=Asc_ratio, marker='', color='blue', linewidth=2,
label="VH_VV_ratio for Asc")
plt.plot('date', "VH_VV", data=Desc_ratio, marker='', color='black', linewidth=2,
label="VH_VV_ratio for Desc")
plt.legend()
plt.show()
tmp = tmp + 4
return Asc_ratio_list, Desc_ratio_list
def import_weather_for_fern(radar_df, frost_bool):
fern_weather_station_data = "G:/Weather_data/Tageswerte_Lotschen_002.csv"
lotschen_weather_df = pd.read_csv(fern_weather_station_data, sep=";", decimal=',')
lotschen_weather_df = lotschen_weather_df.rename({"Tag": "date"}, axis=1)
lotschen_weather_df['date'] = pd.to_datetime(lotschen_weather_df['date'], format='%d.%m.%Y')
weather_df = pd.DataFrame(columns=['date', 't_min', 'precip'])
weather_df['date'] = lotschen_weather_df['date']
weather_df['t_min'] = lotschen_weather_df['MIN_TA200']
weather_df['precip'] = lotschen_weather_df['SUM_NN050']
weather_df['temp'] = lotschen_weather_df['AVG_TA200']
combine = pd.merge(radar_df, weather_df, on='date')
weather = combine
if frost_bool:
combine = combine.query("t_min >= -1")
combine = combine.reset_index(drop=True)
combine = combine.drop("t_min", axis=1)
combine = combine.drop("precip", axis=1)
combine = combine.drop("temp", axis=1)
return combine, weather
if not frost_bool:
return weather
| [
"marlinmueller.mm@gmail.com"
] | marlinmueller.mm@gmail.com |
c6b2e8b66b536d2ec8d0b0b20ddef5ee4cfd6e91 | 51a57371bca60654001445f75d1c17007a1de04d | /part5.py | 8874a60440882e00226e2d4f8d322c87567c39ce | [] | no_license | rahuljain1310/State-Estimation | 674c263e6526092f5577174ed5ca8fb4e3718eca | 625709dd5d1ee659853ea93ee4c50def0d811a11 | refs/heads/main | 2023-03-26T03:06:50.529939 | 2021-03-24T10:32:43 | 2021-03-24T10:32:43 | 350,550,564 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,694 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : part5.py
# Author : Rahul Jain
# Email : rahuljain13101999@gmail.com
# Date : 23/03/2021
import json
import numpy as np
from math import sin, cos, pi
from filter import KalmanFilter
from model import AirPlaneModel
from config import A, B, C, R
from matplotlib import pyplot as plt
import utils as utl
ACT = list()
EST = list()
listQ = [2, 10, 50]
for Q in listQ:
Q = (Q**2)*np.identity(2)
X_mean = np.array([10., 10., 1., 1.])
X_cov = 0.01*np.identity(4)
X0 = np.random.multivariate_normal(X_mean, X_cov, 1).reshape(-1, 1)
airplane = AirPlaneModel(A, B, C, R, Q, X0)
estimator = KalmanFilter(A, B, C, R, Q, X_mean, X_cov)
Tx, Ty = 30, 30
wx, wy = 2*pi/Tx, 2*pi/Ty
for T in range(200):
U = np.array([[sin(wx*T)], [cos(wy*T)]])
_ , zt = airplane.step(U)
estimator.step(U, zt)
ACT.append(airplane.actual_trajectory)
EST.append(estimator.estimated_trajectory)
fig, axs = plt.subplots(len(listQ))
for i in range(len(listQ)):
legends = ['Actual', f'Estimated σ = {listQ[i]}']
x_act, y_act = utl.get_xy_trajectory(ACT[i])
axs[i].plot(x_act, y_act)
x_est, y_est = utl.get_xy_trajectory(EST[i])
axs[i].plot(x_est, y_est)
axs[i].set_ylabel('Displacement')
axs[i].legend(legends)
plt.xlabel('Timestep, T')
plt.show()
fig, axs = plt.subplots()
for i in range(len(listQ)):
e = [np.linalg.norm(ACT[i][k]-EST[i][k]) for k in range(len(ACT[i])) ]
t = list(range(len(e)))
axs.plot(t, e)
axs.set_title('Actual and Estimated Trajectory')
axs.legend([f'σ = {q}' for q in listQ])
axs.set_title('Estimation Error')
axs.set_xlabel('Timestep, T')
axs.set_ylabel('Error')
plt.show()
| [
"ee1170476@iitd.ac.in"
] | ee1170476@iitd.ac.in |
cc212e89de838c5d797940d26049945d6c484869 | b21b1c9272a42c7c2a74ce871de90e0fe27798d0 | /rename.py | b252208f83c0697272e284511ba2742392aa8f8d | [] | no_license | rafiahmad16/object-detection-yolo-project | 387e47d3099f45ec6da56d9c95f79b0dd6c8f6dc | 8ea7e83377596ecb7bd1884cdf60414c4c3b5e9f | refs/heads/master | 2021-09-13T04:53:51.168185 | 2018-04-25T06:15:00 | 2018-04-25T06:15:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | import os
from shutil import copyfile
imdir = 'v_images'
if not os.path.isdir(imdir):
os.mkdir(imdir)
folders = [folder for folder in os.listdir('.') if 'video_images' in folder]
number = 2007
for imfile in os.scandir('video_images'):
filename = imfile.name
extention = filename.split('.')
if extention[1] == 'jpg':
number = number + 1
copyfile(imfile.path,'v_images/'+str(number)+ ".jpg")
| [
"rafi01010010@Rafis-MacBook-Air.local"
] | rafi01010010@Rafis-MacBook-Air.local |
dd722e77341a10ff56977e18b26a3b12366106a6 | 7729ddbb2e4eb03469cd19f2ac6b5670b831923b | /src/seraing/urban/dataimport/__init__.py | 626639e80ca9ee026c47dbf2d06065f7a7893534 | [] | no_license | IMIO/seraing.urban.dataimport_22 | 5cd7bb6e09debeb72145af107b99997ba54f96a3 | db2f3c596572159692fa6cb11050111d1cb0fca5 | refs/heads/master | 2021-05-05T17:01:27.788747 | 2017-09-12T13:45:01 | 2017-09-12T13:45:01 | 103,239,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | # -*- coding: utf-8 -*-
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('seraing.urban.dataimport')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
| [
"julien.jaumotte@imio.be"
] | julien.jaumotte@imio.be |
3bd3772edc7f9f005f3716bbebdb9186090e3414 | 8fc36ac61daf5f24e4b3cec32996173eb1b554ec | /fuxi_kubernetes/handlers/retry.py | a5678923881ee0ed49db334321e31ec282723362 | [
"Apache-2.0"
] | permissive | vklonghml/fuxi-kubernetes | 8c1928658f1508efec1288193bed027c058950ff | 899fac8f9a885397b764cec988ee07e608569c81 | refs/heads/master | 2020-05-27T08:01:41.469905 | 2017-03-02T02:08:07 | 2017-03-02T02:08:07 | 82,534,295 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,289 | py | # Copyright (c) 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import random
import time
from oslo_log import log as logging
from oslo_utils import excutils
from fuxi_kubernetes import exceptions
from fuxi_kubernetes.handlers import base
LOG = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 180
DEFAULT_INTERVAL = 3
class Retry(base.EventHandler):
"""Retries handler on failure.
`Retry` can be used to decorate another `handler` to be retried whenever
it raises any of the specified `exceptions`. If the `handler` does not
succeed within the time limit specified by `timeout`, `Retry` will
raise the exception risen by `handler`. `Retry` does not interrupt the
`handler`, so the actual time spent within a single call to `Retry` may
exceed the `timeout` depending on responsiveness of the `handler`.
`Retry` implements a variation of exponential backoff algorithm [1] and
ensures that there is a minimal time `interval` after the failed
`handler` is retried for the same `event` (expected backoff E(c) =
interval * 2 ** c / 2).
[1] https://en.wikipedia.org/wiki/Exponential_backoff
"""
def __init__(self, handler, exceptions=Exception,
timeout=DEFAULT_TIMEOUT, interval=DEFAULT_INTERVAL):
self._handler = handler
self._exceptions = exceptions
self._timeout = timeout
self._interval = interval
def __call__(self, event):
deadline = time.time() + self._timeout
for attempt in itertools.count(1):
try:
self._handler(event)
break
except self._exceptions:
with excutils.save_and_reraise_exception() as ex:
if self._sleep(deadline, attempt, ex.value):
ex.reraise = False
def _sleep(self, deadline, attempt, exception):
now = time.time()
seconds_left = deadline - now
if seconds_left <= 0:
LOG.debug("Handler %s failed (attempt %s; %s), "
"timeout exceeded (%s seconds)",
self._handler, attempt, exceptions.format_msg(exception),
self._timeout)
return 0
interval = random.randint(1, 2 ** attempt - 1) * self._interval
if interval > seconds_left:
interval = seconds_left
if interval < self._interval:
interval = self._interval
LOG.debug("Handler %s failed (attempt %s; %s), "
"retrying in %s seconds",
self._handler, attempt, exceptions.format_msg(exception),
interval)
time.sleep(interval)
return interval
| [
"vklonghml@gmail.com"
] | vklonghml@gmail.com |
75a03e1852de81c664b2d63a255fcd114b89401d | c25b974f8c124c3eb3701684ab6d3546368ba99a | /biplane_kine/smoothing/kf_filtering_helpers.py | 63790043b47b8c5c2e7062b6347e3487a7466b32 | [
"MIT"
] | permissive | klevis-a/process-vicon-biplane | 9530ea47b93260a361b318f04dcb1a2dbc7b2492 | f140589b4705f0d6411b80b8e2699add68d08662 | refs/heads/master | 2023-07-05T13:08:10.879630 | 2021-08-03T23:55:46 | 2021-08-03T23:55:46 | 287,614,920 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,832 | py | import numpy as np
import itertools
import distutils.util
from operator import itemgetter
from typing import Tuple, Union, Dict, Any, Sequence
from scipy.signal import butter, sosfiltfilt
from biplane_kine.smoothing.kalman_filtering import (LinearKF1DSimdKalman, FilterStep, StateMeans, CovarianceVec,
CorrVec, extract_corrs)
from biokinepy.np_utils import find_runs
import logging
log = logging.getLogger(__name__)
class Error(Exception):
"""Base error class for this module."""
pass
class InsufficientDataError(Error):
"""Error returned when there is insufficient data to filter/smooth."""
pass
class DoNotUseMarkerError(Error):
"""Error returned when a marker is labeled as do not use."""
pass
def init_point(marker_pos_labeled: np.ndarray, marker_pos_filled: np.ndarray) -> Tuple[int, int]:
"""Determined endpoints [start, stop) for the marker data based on marker visibility in Vicon cameras."""
non_nan_indices_labeled = np.nonzero(~np.isnan(marker_pos_labeled[:, 0]))[0]
non_nan_indices_filled = np.nonzero(~np.isnan(marker_pos_filled[:, 0]))[0]
max_non_nan = max(non_nan_indices_labeled[0], non_nan_indices_filled[0])
# return the max of labeled and filled for starting index, but for ending ending return the last non-nan index (+1)
# for just the labeled data
return max_non_nan, non_nan_indices_labeled[-1] + 1
def pos_lowpass_filter(marker_pos_filled: np.ndarray, start: int, num_points: int) -> np.ndarray:
"""Low-pass filter marker position data from frame start for num_points frames."""
# note that the filled marker data is utilized because it provides continuous data but filled marker data is not
# utilized during the Kalman filtering process
marker_pos_filt = np.full([num_points, 3], np.nan)
sos = butter(4, 4, 'lowpass', fs=100, output='sos')
for n in range(3):
pos_filled_sub = marker_pos_filled[start:start+num_points, n]
marker_pos_filt[:, n] = sosfiltfilt(sos, pos_filled_sub)
return marker_pos_filt
def x0_guess(marker_pos_labeled: np.ndarray, marker_pos_filled: np.ndarray, dt: float, points_to_filter: int,
points_to_average: int, min_num_points: int = 20) -> Tuple[np.ndarray, int, int]:
"""Guess marker position, velocity, and acceleration at the beginning of the Vicon capture.
Raises
------
biplane_kine.smoothing.kalman_filtering.InsufficientDataError
"""
start_idx, stop_idx = init_point(marker_pos_labeled, marker_pos_filled)
if stop_idx - start_idx < min_num_points:
raise InsufficientDataError('Not enough data to make a starting point guess')
if start_idx + points_to_filter > stop_idx:
points_to_filter = stop_idx - start_idx
x0_pos = pos_lowpass_filter(marker_pos_filled, start_idx, num_points=points_to_filter)
x0_vel = np.gradient(x0_pos, dt, axis=0)
x0_acc = np.gradient(x0_vel, dt, axis=0)
x0 = np.stack([x0_pos[0, :], np.mean(x0_vel[0:points_to_average, :], axis=0),
np.mean(x0_acc[0:points_to_average, :], axis=0)])
return x0, start_idx, stop_idx
def post_process_raw(marker_pos_labeled: np.ndarray, marker_pos_filled: np.ndarray, dt: float) \
-> Tuple[FilterStep, FilterStep]:
"""Create FilterSteps from raw (labeled) and filled Vicon marker data."""
# raw velocity, acceleration
marker_vel = np.gradient(marker_pos_labeled, dt, axis=0)
marker_acc = np.gradient(marker_vel, dt, axis=0)
raw_means = StateMeans(marker_pos_labeled, marker_vel, marker_acc)
# filled velocity, acceleration
marker_vel_filled = np.gradient(marker_pos_filled, dt, axis=0)
marker_acc_filled = np.gradient(marker_vel_filled, dt, axis=0)
filled_means = StateMeans(marker_pos_filled, marker_vel_filled, marker_acc_filled)
# endpoints
raw_endpts = (0, marker_pos_labeled.shape[0])
raw_indices = np.arange(raw_endpts[1])
raw = FilterStep(raw_endpts, raw_indices, raw_means, None, None)
filled = FilterStep(raw_endpts, raw_indices, filled_means, None, None)
return raw, filled
def kf_filter_marker_piece(marker_pos_labeled: np.ndarray, marker_pos_filled: np.ndarray, piece_start: int,
piece_stop: Union[int, None], dt: float, white_noise_var: float = 10000)\
-> Tuple[FilterStep, FilterStep]:
"""Filter raw (labeled) Vicon marker data starting at frame piece_start and ending at frame piece_end."""
pos_labeled_piece = marker_pos_labeled[piece_start:piece_stop, :]
pos_filled_piece = marker_pos_filled[piece_start:piece_stop, :]
x0, start_idx, stop_idx = x0_guess(pos_labeled_piece, pos_filled_piece, dt, 50, 10)
# guess for initial covariance, showing increasing uncertainty for velocity and acceleration
p = np.tile(np.diag([1, 100, 1000]), (3, 1, 1))
kf = LinearKF1DSimdKalman(dt=dt, discrete_white_noise_var=white_noise_var, r=1)
filtered_means, smoothed_means, filtered_covs, smoothed_covs = \
kf.filter_marker(pos_labeled_piece[start_idx:stop_idx, :], x0, p)
filtered_corrs = CorrVec(*extract_corrs(filtered_covs))
smoothed_corrs = CorrVec(*extract_corrs(smoothed_covs))
filtered_endpts = (piece_start + start_idx, piece_start + stop_idx)
filtered_indices = np.arange(filtered_endpts[0], filtered_endpts[1])
filtered = FilterStep(filtered_endpts, filtered_indices, filtered_means, filtered_covs, filtered_corrs)
smoothed = FilterStep(filtered_endpts, filtered_indices, smoothed_means, smoothed_covs, smoothed_corrs)
return filtered, smoothed
def kf_filter_marker_all(marker_pos_labeled: np.ndarray, marker_pos_filled: np.ndarray, dt: float,
white_noise_var: float = 10000) -> Tuple[FilterStep, FilterStep]:
"""Filter raw (labeled) Vicon marker data."""
return kf_filter_marker_piece(marker_pos_labeled, marker_pos_filled, 0, None, dt, white_noise_var)
def kf_filter_marker_piecewise(marker_pos_labeled: np.ndarray, marker_pos_filled: np.ndarray, dt: float,
max_gap: int = 75, max_gap_secondary: Tuple[int, int] = (30, 10), min_length: int = 75,
white_noise_var: float = 10000) -> Tuple[Sequence[FilterStep], Sequence[FilterStep]]:
"""Filter raw (labeled) Vicon marker data, accounting for gaps.
There are two conditions that create a gap:
1. The marker is not visible for more than or equal to max_gap frames
2. Periods where marker is not visible for >= max_gap_secondary[0] frames are separated by an interval where the
marker is visible for at most max_gap_secondary[1] frames
Subsequently, all gaps are combined.
Raises
------
biplane_kine.smoothing.kalman_filtering.InsufficientDataError
"""
start_idx, stop_idx = init_point(marker_pos_labeled, marker_pos_filled)
nans_labeled = ~np.isnan(marker_pos_labeled[start_idx:stop_idx, 0])
runs = find_runs(nans_labeled)
# primary gaps - no data for longer than max_gap
primary_runs_gaps_idx_start = np.nonzero(((~runs[0]) & (runs[2] >= max_gap)))[0]
primary_runs_gaps_idx_end = primary_runs_gaps_idx_start + 1
# secondary gaps - gaps of max_gap_secondary[0] separated by spaces where at most max_gap_secondary[1] data exists
runs_secondary_gaps_idx = np.nonzero(((~runs[0]) & (runs[2] >= max_gap_secondary[0])))[0]
secondary_runs_gaps_idx_start = []
secondary_runs_gaps_idx_end = []
for i in range(runs_secondary_gaps_idx.size-1):
if np.sum(runs[0][runs_secondary_gaps_idx[i]+1:runs_secondary_gaps_idx[i+1]] *
runs[2][runs_secondary_gaps_idx[i]+1:runs_secondary_gaps_idx[i+1]]) < max_gap_secondary[1]:
secondary_runs_gaps_idx_start.append(runs_secondary_gaps_idx[i])
secondary_runs_gaps_idx_end.append(runs_secondary_gaps_idx[i+1]+1)
# now let's combine the gaps
all_runs_gaps_idx = \
list(itertools.chain.from_iterable([zip(primary_runs_gaps_idx_start, primary_runs_gaps_idx_end),
zip(secondary_runs_gaps_idx_start, secondary_runs_gaps_idx_end)]))
def gaps_overlap(gap1: Tuple[int, int], gap2: Tuple[int, int]) -> bool:
"""Do the gaps overlap?"""
return (gap1[0] < gap2[1]) and (gap2[0] < gap1[1])
def combine_gaps(gap1: Tuple[int, int], gap2: Tuple[int, int]) -> Tuple[int, int]:
"""Combined the two gaps."""
min_start = min(gap1, gap2, key=itemgetter(0))
max_end = max(gap1, gap2, key=itemgetter(1))
return min_start[0], max_end[1]
# this only works if the list is sorted by the start index!
def recursive_combine(initial_gap_list, combined_gap_list):
# if there are no more gaps to combine then return the combined_gap_list
if not initial_gap_list:
return combined_gap_list
# if we can combine the current gap (combined_gap_list[-1]) with the next gap in the list to process
# (initial_gap_list[0])
if gaps_overlap(combined_gap_list[-1], initial_gap_list[0]):
# combine the gaps and update the current gap
combined = combine_gaps(combined_gap_list[-1], initial_gap_list[0])
combined_gap_list[-1] = combined
else:
# can't combine so add the considered gap becomes the current gap
combined_gap_list.append(initial_gap_list[0])
# either way we have taken care of this gap so remove it from the list of gaps to be considered
del(initial_gap_list[0])
# march forward
return recursive_combine(initial_gap_list, combined_gap_list)
def recursive_combine_start(initial_gap_list):
# no gaps
if not initial_gap_list:
return []
# the first combination is easy, it's just the first gap by itself
initial_gap_list_copy = initial_gap_list.copy()
combined_gap_list = [initial_gap_list_copy[0]]
del(initial_gap_list_copy[0])
return recursive_combine(initial_gap_list_copy, combined_gap_list)
# first sort by the start index
all_runs_gaps_idx.sort(key=itemgetter(0))
all_runs_gaps_ids_merged = recursive_combine_start(all_runs_gaps_idx)
# break the list of tuples apart into two lists
runs_gaps_idx_start_final = [gap[0] for gap in all_runs_gaps_ids_merged]
runs_gaps_idx_end_final = [gap[1] for gap in all_runs_gaps_ids_merged]
# number of pieces to filter will always be one greater than the number of gaps
num_pieces = len(all_runs_gaps_ids_merged) + 1
pieces_end_idx = np.full((num_pieces, ), stop_idx)
pieces_start_idx = np.full((num_pieces,), start_idx)
# there may not be any gaps so check first
if all_runs_gaps_ids_merged:
# interior pieces run from the end index of a gap to the start index of the next gap
pieces_end_idx[:-1] = runs[1][runs_gaps_idx_start_final] + start_idx
pieces_start_idx[1:] = runs[1][runs_gaps_idx_end_final] + start_idx
filtered_pieces = []
smoothed_pieces = []
# filter each piece
for i in range(num_pieces):
if (pieces_end_idx[i] - pieces_start_idx[i]) < min_length:
log.info('Skipping Filtering piece %d running from %d to %d', i, pieces_start_idx[i], pieces_end_idx[i])
continue
log.info('Filtering piece %d running from %d to %d ', i, pieces_start_idx[i], pieces_end_idx[i])
piece_filtered, piece_smoothed = kf_filter_marker_piece(marker_pos_labeled, marker_pos_filled,
pieces_start_idx[i], pieces_end_idx[i], dt,
white_noise_var)
filtered_pieces.append(piece_filtered)
smoothed_pieces.append(piece_smoothed)
if not filtered_pieces:
raise InsufficientDataError('No resulting segments to filter.')
return filtered_pieces, smoothed_pieces
def combine_pieces(pieces: Sequence[FilterStep]) -> FilterStep:
"""Combine multiple filtered pieces"""
# the new endpoints runs from the start of the first piece to the end of the last piece
endpts = (pieces[0].endpts[0], pieces[-1].endpts[1])
indices = np.arange(*endpts)
num_frames = endpts[1] - endpts[0]
# preinitialize numpy containers with NaNs
pos = np.full((num_frames, 3), np.nan, dtype=np.float64)
vel = np.full((num_frames, 3), np.nan, dtype=np.float64)
acc = np.full((num_frames, 3), np.nan, dtype=np.float64)
pos_cov = np.full((num_frames, 3), np.nan, dtype=np.float64)
vel_cov = np.full((num_frames, 3), np.nan, dtype=np.float64)
acc_cov = np.full((num_frames, 3), np.nan, dtype=np.float64)
pos_vel_cov = np.full((num_frames, 3), np.nan, dtype=np.float64)
pos_acc_cov = np.full((num_frames, 3), np.nan, dtype=np.float64)
vel_acc_cov = np.full((num_frames, 3), np.nan, dtype=np.float64)
pos_vel_corr = np.full((num_frames, 3), np.nan, dtype=np.float64)
pos_acc_corr = np.full((num_frames, 3), np.nan, dtype=np.float64)
vel_acc_corr = np.full((num_frames, 3), np.nan, dtype=np.float64)
for piece in pieces:
slc = np.s_[piece.endpts[0]-endpts[0]:piece.endpts[1]-endpts[0], :]
pos[slc] = piece.means.pos
vel[slc] = piece.means.vel
acc[slc] = piece.means.acc
pos_cov[slc] = piece.covars.pos
vel_cov[slc] = piece.covars.vel
acc_cov[slc] = piece.covars.acc
pos_vel_cov[slc] = piece.covars.pos_vel
pos_acc_cov[slc] = piece.covars.pos_acc
vel_acc_cov[slc] = piece.covars.vel_acc
pos_vel_corr[slc] = piece.corrs.pos_vel
pos_acc_corr[slc] = piece.corrs.pos_acc
vel_acc_corr[slc] = piece.corrs.vel_acc
means = StateMeans(pos, vel, acc)
cov = CovarianceVec(pos_cov, vel_cov, acc_cov, pos_vel_cov, pos_acc_cov, vel_acc_cov)
corr = CorrVec(pos_vel_corr, pos_acc_corr, vel_acc_corr)
return FilterStep(endpts, indices, means, cov, corr)
def piecewise_filter_with_exception(marker_exceptions: Dict[str, Any], marker_pos_labeled: np.ndarray,
marker_pos_filled: np.ndarray, dt: float, white_noise_var: float = 10000, **kwargs)\
-> Tuple[FilterStep, FilterStep, FilterStep, FilterStep]:
"""Filter marker position data (accounting for gaps) and for exceptions specified in in marker_exceptions.
**kwargs are passed to kf_filter_marker_piecewise once combined with marker_exceptions
Raises
------
biplane_kine.smoothing.kalman_filtering.DoNotUseMarkerError
"""
should_use = bool(distutils.util.strtobool(marker_exceptions.get('use_marker', 'True')))
if not should_use:
log.warning('Skipping marker because it is labeled as DO NOT USE.')
raise DoNotUseMarkerError('Marker has been marked as DO NOT USE')
smoothing_params = marker_exceptions.get('smoothing_params', {})
frame_ignores = np.array(marker_exceptions.get('frame_ignores', []))
# ignore frames
if frame_ignores.size > 0:
marker_pos_labeled_copy = marker_pos_labeled.copy()
marker_pos_labeled_copy[frame_ignores - 1, :] = np.nan
else:
marker_pos_labeled_copy = marker_pos_labeled
combined_smoothing_params = {**kwargs, **smoothing_params}
raw, filled = post_process_raw(marker_pos_labeled, marker_pos_filled, dt)
filtered_pieces, smoothed_pieces = kf_filter_marker_piecewise(marker_pos_labeled_copy, marker_pos_filled, dt,
white_noise_var=white_noise_var,
**combined_smoothing_params)
filtered = combine_pieces(filtered_pieces)
smoothed = combine_pieces(smoothed_pieces)
return raw, filled, filtered, smoothed
| [
"klevis.aliaj@utah.edu"
] | klevis.aliaj@utah.edu |
47d15cbb5b377278a0596f903530d487f4f3cc6c | 1b512092052c8fe7f6919ee870431ac7b3a65f66 | /pal/examples/MixedHalidesBE2/run_simple_misokg.py | 77cd6d71fbb4de358efaf37a34fd2962c948ae5b | [] | no_license | ClancyLab/PAL | d7b9dd1caeb62d363041b8e4c7f402d6edbf741e | cb0ef048de37014922b943ae6b5eaffd3d43da63 | refs/heads/master | 2022-02-25T05:31:20.590421 | 2019-10-14T19:47:10 | 2019-10-14T19:47:10 | 210,862,362 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,151 | py | from pal.opt import Optimizer
import pal.utils.strings as pal_strings
from pal.constants.solvents import solvents
from pal.kernels.matern import maternKernel52 as mk52
# from pal.objectives.binding_energy import get_binding_energy as BE
from pal.acquisition.misokg import getNextSample_misokg
from pal.stats.MLE import MLE
from pal.stats.MAP import MAP
import os
import copy
# import random
import numpy as np
import cPickle as pickle
def run_misokg(run_index):
# Store data for debugging
IS0 = pickle.load(open("enthalpy_N1_R3_Ukcal-mol", 'r'))
IS1 = pickle.load(open("enthalpy_N1_R2_Ukcal-mol", 'r'))
# Generate the main object
sim = Optimizer()
# Assign simulation properties
#sim.hyperparameter_objective = MAP
sim.hyperparameter_objective = MLE
###################################################################################################
# File names
sim.fname_out = "enthalpy_misokg.dat"
sim.fname_historical = None
# Information sources, in order from expensive to cheap
sim.IS = [
lambda h, c, s: -1.0 * IS0[' '.join([''.join(h), c, s])],
lambda h, c, s: -1.0 * IS1[' '.join([''.join(h), c, s])]
]
sim.costs = [
1.0,
0.1
]
sim.logger_fname = "data_dumps/%d_misokg.log" % run_index
if os.path.exists(sim.logger_fname):
os.system("rm %s" % sim.logger_fname)
os.system("touch %s" % sim.logger_fname)
sim.obj_vs_cost_fname = "data_dumps/%d_misokg.dat" % run_index
sim.mu_fname = "data_dumps/%d_mu_misokg.dat" % run_index
sim.sig_fname = "data_dumps/%d_sig_misokg.dat" % run_index
sim.combos_fname = "data_dumps/%d_combos_misokg.dat" % run_index
sim.hp_fname = "data_dumps/%d_hp_misokg.dat" % run_index
sim.acquisition_fname = "data_dumps/%d_acq_misokg.dat" % run_index
sim.save_extra_files = True
########################################
# Override the possible combinations with the reduced list of IS0
# Because we do this, we should also generate our own historical sample
combos_no_IS = [k[1] + "Pb" + k[0] + "_" + k[2] for k in [key.split() for key in IS0.keys()]]
sim.historical_nsample = 10
choices = np.random.choice(combos_no_IS, sim.historical_nsample, replace=False)
tmp_data = pal_strings.alphaToNum(
choices,
solvents,
mixed_halides=True,
name_has_IS=False)
data = []
for IS in range(len(sim.IS)):
for i, d in enumerate(tmp_data):
h, c, _, s, _ = pal_strings.parseName(pal_strings.parseNum(d, solvents, mixed_halides=True, num_has_IS=False), name_has_IS=False)
c = c[0]
data.append([IS] + d + [sim.IS[IS](h, c, s)])
sim.fname_historical = "data_dumps/%d.history" % run_index
pickle.dump(data, open(sim.fname_historical, 'w'))
simple_data = [d for d in data if d[0] == 0]
pickle.dump(simple_data, open("data_dumps/%d_reduced.history" % run_index, 'w'))
########################################
sim.n_start = 10 # The number of starting MLE samples
sim.reopt = 20
sim.ramp_opt = None
sim.parallel = False
# Possible compositions by default
sim.A = ["Cs", "MA", "FA"]
sim.B = ["Pb"]
sim.X = ["Cl", "Br", "I"]
sim.solvents = copy.deepcopy(solvents)
sim.S = list(set([v["name"] for k, v in sim.solvents.items()]))
sim.mixed_halides = True
sim.mixed_solvents = False
# Parameters for debugging and overwritting
sim.debug = False
sim.verbose = True
sim.overwrite = True # If True, warning, else Error
sim.acquisition = getNextSample_misokg
# Functional forms of our mean and covariance
# MEAN: 4 * mu_alpha + mu_zeta
# COV: sig_alpha * |X><X| + sig_beta * I_N + sig_zeta + MaternKernel(S, weights, sig_m)
SCALE = [2.0, 4.0][int(sim.mixed_halides)]
# _1, _2, _3 used as dummy entries
def mean(X, Y, theta):
mu = np.array([SCALE * theta.mu_alpha + theta.mu_zeta for _ in Y])
return mu
sim.mean = mean
def cov_old(X, Y, theta):
A = theta.sig_alpha * np.dot(np.array(X)[:, 1:-3], np.array(X)[:, 1:-3].T)
B = theta.sig_beta * np.diag(np.ones(len(X)))
C = theta.sig_zeta
D = mk52(np.array(X)[:, -3:-1], [theta.l1, theta.l2], theta.sig_m)
return theta.rho_matrix(X) * (A + B + C + D)
def cov(X0, Y, theta):
A = theta.sig_alpha * np.dot(np.array(X0)[:, :-3], np.array(X0)[:, :-3].T)
B = theta.sig_beta * np.diag(np.ones(len(X0)))
C = theta.sig_zeta
D = mk52(np.array(X0)[:, -3:-1], [theta.l1, theta.l2], theta.sig_m)
Kx = A + B + C + D
L = np.array([
np.array([theta.rho[str(sorted([i, j]))] if i >= j else 0.0 for j in range(theta.n_IS)])
for i in range(theta.n_IS)
])
# Normalize L to stop over-scaling values small
if theta.normalize_L:
L = L / np.linalg.norm(L)
# Force it to be positive semi-definite
Ks = L.dot(L.T)
if theta.normalize_Ks:
Ks = Ks / np.linalg.norm(Ks)
e = np.diag(np.array([theta.e1, theta.e2]))
Ks = e.dot(Ks.dot(e))
return np.kron(Ks, Kx)
sim.cov = cov
sim.theta.bounds = {}
sim.theta.mu_alpha, sim.theta.bounds['mu_alpha'] = None, (1E-3, lambda _, Y: max(Y))
sim.theta.sig_alpha, sim.theta.bounds['sig_alpha'] = None, (1E-2, lambda _, Y: 10.0 * np.var(Y))
sim.theta.sig_beta, sim.theta.bounds['sig_beta'] = None, (1E-2, lambda _, Y: 10.0 * np.var(Y))
sim.theta.mu_zeta, sim.theta.bounds['mu_zeta'] = None, (1E-3, lambda _, Y: max(Y))
sim.theta.sig_zeta, sim.theta.bounds['sig_zeta'] = None, (1E-2, lambda _, Y: 10.0 * np.var(Y))
sim.theta.sig_m, sim.theta.bounds['sig_m'] = None, (1E-2, lambda _, Y: np.var(Y))
sim.theta.l1, sim.theta.bounds['l1'] = None, (1E-1, 1)
sim.theta.l2, sim.theta.bounds['l2'] = None, (1E-1, 1)
sim.theta.e1, sim.theta.bounds['e1'] = None, (1E-1, 1.0)
sim.theta.e2, sim.theta.bounds['e2'] = None, (1E-1, 1.0)
# # NOTE! This is a reserved keyword in misoKG. We will generate a list of the same length
# # of the information sources, and use this for scaling our IS.
sim.theta.rho = {"[0, 0]": 1.0, "[0, 1]": 0.96, "[1, 1]": 1.0}
#sim.theta.rho = {"[0, 0]": 1.0}
#sim.theta.rho = {"[0, 0]": None, "[0, 1]": None, "[1, 1]": None}
sim.theta.bounds['rho [0, 0]'] = (1E-1, 1E1)
# sim.theta.bounds['rho [0, 1]'] = (1E-1, 1E1)
# sim.theta.bounds['rho [1, 1]'] = (1E-1, 1E1)
sim.theta.bounds['rho [0, 0]'] = (0.1, 1.0)
sim.theta.bounds['rho [0, 1]'] = (0.1, 1.0)
sim.theta.bounds['rho [1, 1]'] = (0.1, 1.0)
sim.theta.set_hp_names()
sim.primary_rho_opt = False
sim.update_hp_only_with_IS0 = False
sim.update_hp_only_with_overlapped = False
sim.theta.normalize_L = False
sim.theta.normalize_Ks = False
# This was a test feature that actually over-wrote rho to be PSD
# sim.force_rho_psd = True
###################################################################################################
# Start simulation
sim.run()
| [
"hherbol@gmail.com"
] | hherbol@gmail.com |
299c27d535669cc4de79dbbd0196b49da9b5ae69 | a428ca9108f899de7f69503126272a830102e06c | /app.py | 139faa37075b7cbcfa55b596c517bd906a5ca73b | [] | no_license | Hammadali1/detection-fyp | 5c4eefd503f52a22baaaec350fa51c1ee92e79d1 | 572948f3be70ef4082dab2964b0d86b128163adc | refs/heads/master | 2021-06-20T01:25:30.627043 | 2019-07-07T15:40:25 | 2019-07-07T15:40:25 | 193,284,974 | 0 | 0 | null | 2021-03-20T01:12:54 | 2019-06-22T22:35:36 | Python | UTF-8 | Python | false | false | 166 | py |
from flask import Flask
app = Flask(__name__)
@app.route("/")
def retrieve():
return "yahoo"
if __name__ == "__main__":
app.run()
| [
"noreply@github.com"
] | noreply@github.com |
00f569ae77237d2d80518fa93f5d1e27c4d3b867 | f9632d4a822f6525a007598a1f67757ac9891749 | /rakuten/playground/extract_category.py | 7b023d96f6665916c58bdc27b25ebb3531856c12 | [] | no_license | rpryzant/marketing | ab055a5ae02ed287cb5a763d3e937efa72d057df | 9f463da0a2e7c9c48951e793d95534963cd19721 | refs/heads/master | 2021-09-07T01:20:31.720381 | 2018-02-14T22:13:40 | 2018-02-14T22:13:40 | 116,716,301 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,074 | py | """
extracts a category from the data
"""
import map_reduce
from collections import defaultdict
# make parent dicts
genre_tsv = '/Volumes/datasets/rakuten_dump/merchandize_data/genres/genres_with_english.tsv'
id_to_pid = {}
for l in open(genre_tsv):
l = l.strip()
try:
[id, ja_name, pid, en_name] = l.split('\t')
except:
continue
id_to_pid[id] = pid
# the category you want to extract
# from fashion_categories
CATEGORIES_TO_EXTRACT = [
'502456', '100454', '502427', '100472', '110933', '502368', '100939', '100433', '216129',
'550009', '111103', '207733', '205193', '551648', '551648', '206587', '303816', '206591',
'206590', '303803', '551441', '551409', '551433', '551403', '551445', '551413', '551682',
'551668', '551648', '551664', '551644', '551677', '551672', '551652', '205197', '200807',
'207699', '100542', '100371', '558929', '204994', '402513' '402517', '402515', '508925',
'508930', '501642', '402087', '201780', '302242', '204982', '201794', '302464', '407933',
'502027', '402463', '402475', '501965', '501962', '501963', '501976', '506410', '200859'
]
def is_child(id):
# is id the child of CATEGORY_TO_EXTRACT?
while id in id_to_pid:
if id in CATEGORIES_TO_EXTRACT:
return True
id = id_to_pid[id]
return False
def map_fn(path):
out = open(path + '.OUT', 'a')
for l in open(path):
parts = l.strip().split("\t")
genre_id = parts[-1]
if is_child(genre_id):
out.write(l.strip() + '\n')
return
def reduce_fn(result_list):
return ''
map_reduce.mapreduce(
map_fn=map_fn,
reduce_fn=reduce_fn,
# input_re='/scr/rpryzant/marketing/rakuten/data/products_tsv/*.tsv',
input_re='/Volumes/datasets/rakuten_dump/merchandize_data/products_tsv/*.tsv',
output_filename='/scr/rpryzant/marketing/rakuten/categories',
n_cores=2
)
| [
"rapigan@gmail.com"
] | rapigan@gmail.com |
d37c132d0eab9fae1f0a71da0763c2b6382dab48 | b86c1eee6a67adab6d5daf58ff1f6cd3fadda387 | /newBackend/backendapi/migrations/0009_auto_20210520_0222.py | df7a0918cde06011b2af03596de0fdd87667ffd9 | [] | no_license | patrick-simon045/FINAL-YEAR-PROJECT | d5d24e1224d978dabd88928226bfc6f5b188e21a | b5d01e8cfaf52595454cdab85c6b56d340210dfa | refs/heads/master | 2023-06-25T19:28:52.246179 | 2021-07-07T18:21:59 | 2021-07-07T18:21:59 | 383,879,042 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # Generated by Django 3.0 on 2021-05-20 01:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backendapi', '0008_auto_20210520_0220'),
]
operations = [
migrations.AlterUniqueTogether(
name='lecture_course',
unique_together={('lecturer', 'course', 'academic_year')},
),
]
| [
"77192100+patrick-simon045@users.noreply.github.com"
] | 77192100+patrick-simon045@users.noreply.github.com |
79ceb1608e1615bbf1de6d05391d801f4773fff5 | c8693ab1ea11ed82d7a52b6582f2b6e8caa60417 | /Python/client.py | 65e80e2be46fb58e5c2fa07365b6efe467bffdb6 | [] | no_license | ValentinModan/AttackOnPython | 0b75b2a9d358fa7880b39458c2e0d99899dfec53 | ab029fb46077510913f9c9c50db464275809db8f | refs/heads/master | 2020-04-04T21:00:07.359565 | 2019-02-26T19:30:58 | 2019-02-26T19:30:58 | 156,268,799 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py |
from bluetooth import *
import sys
import json
import time
if sys.version < '3':
input = raw_input
addr = "A4:50:46:1F:BE:90"
if len(sys.argv) < 2:
print("no device specified. Searching all nearby bluetooth devices for")
print("the SampleServer service")
else:
addr = sys.argv[1]
print("Searching for SampleServer on %s" % addr)
# search for the SampleServer service
uuid = "f0937a74-f0e3-11e8-8eb2-f2801f1b9fd1"
service_matches = find_service( uuid = uuid, address = addr )
if len(service_matches) == 0:
print("couldn't find the SampleServer service =(")
sys.exit(0)
first_match = service_matches[0]
port = first_match["port"]
name = first_match["name"]
host = first_match["host"]
print("connecting to \"%s\" on %s" % (name, host))
# Create the client socket
sock=BluetoothSocket( RFCOMM )
sock.connect((host, port))
print("connected. type stuff")
a = "aaa"
b = "bbb"
myDictionary = {
"a":1,
"b":2
}
while True:
#data = input()
time.sleep(1)
data_string = json.dumps(myDictionary)
#if len(data) == 0: break
sock.send(data_string)
myDictionary["a"] = myDictionary["a"] + 1
sock.close()
| [
"aiordache22@gmail.com"
] | aiordache22@gmail.com |
d903e132f3f76958023738b2b7dbc6b604ca5f7d | 47bd6a35f8d911d73b3b335ba9f82881b7507efb | /Automatic_download_bounce.py | 4a46d6deb77527cab25f1c40d1308e0f25b60745 | [] | no_license | nandini4189/Pthon-Scripts | 144f5ad670aac7b5aed5030c7eee9c94a405b50d | 8f55bfde98192ec6a042dda3badfa1ef2791c6b7 | refs/heads/main | 2023-08-22T01:43:50.225468 | 2021-10-13T05:45:32 | 2021-10-13T05:45:32 | 416,604,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | import requests
import json
import io
import codecs
import pandas as pd
# api-endpoint
URL = "https://api.sendgrid.com/api/bounces.get.json?api_user=graghavendra&api_key=Diversio3413&date=1"
# sending get request and saving the response as response object
r = requests.get(url = URL)
# extracting data in json format
data= r.json()
with open(r'C:\Users\tkalyan\Desktop\data2.json', 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True, separators=(",", ':'))
df = pd.read_json (r'C:\Users\tkalyan\Desktop\data2.json')
df.to_csv (r'C:\Users\tkalyan\Desktop\bounces.csv', index = None)
print("The data is stored in the csv format") | [
"noreply@github.com"
] | noreply@github.com |
b661a1cfef36f0ddc1bcdb83973c0be2589e1ab1 | 28b76b454aca0f46ec28d292c44cd58932d4f924 | /blogging/migrations/0007_auto_20180417_1727.py | 41a39954d34313eebebd708e84c05289f4f69952 | [] | no_license | U-Divya/Django-Mysite | 56a1906abf259c41e806f2e6291754c7cb6cfc1a | 740f87903169e997f16095486516f58305c97b22 | refs/heads/master | 2020-03-15T16:27:08.505545 | 2018-06-08T16:14:40 | 2018-06-08T16:14:40 | 132,234,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-04-17 11:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogging', '0006_auto_20180417_1724'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='status',
field=models.CharField(blank=True, choices=[('Y', 'Yes'), ('n', 'No')], default=('y', 'Yes'), max_length=20),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
30e71e9ab69110f4a8e98185c665f48bca3aaf81 | bdc294f477244b9fff34b8e1bff968057a600a45 | /app/Trading.py | 34dde6d3fbf2311e83e95e16a4092cefaaea954e | [
"MIT"
] | permissive | PaulGG-Code/BinanceBot | e3cd1157d96880a2217955e98e6608bb98b93e20 | 2c4e3a06f2957226086702a2eec47e29799a7d7e | refs/heads/main | 2023-02-25T00:36:13.405857 | 2021-02-01T22:00:11 | 2021-02-01T22:00:11 | 335,093,869 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,412 | py | # -*- coding: UTF-8 -*-
# Define Python imports
import os
import sys
import time
import config
import threading
import math
import logging
import logging.handlers
# Define Custom imports
from Database import Database
from Orders import Orders
formater_str = '%(asctime)s,%(msecs)d %(levelname)s %(name)s: %(message)s'
formatter = logging.Formatter(formater_str)
datefmt="%Y-%b-%d %H:%M:%S"
LOGGER_ENUM = {'debug':'debug.log', 'trading':'trades.log','errors':'general.log'}
#LOGGER_FILE = LOGGER_ENUM['pre']
LOGGER_FILE = "binance-trader.log"
FORMAT = '%(asctime)-15s - %(levelname)s: %(message)s'
logger = logging.basicConfig(filename=LOGGER_FILE, filemode='a',
format=formater_str, datefmt=datefmt,
level=logging.INFO)
# Aproximated value to get back the commision for sell and buy
TOKEN_COMMISION = 0.001
BNB_COMMISION = 0.0005
#((eth*0.05)/100)
class Trading():
# Define trade vars
order_id = 0
order_data = None
buy_filled = True
sell_filled = True
buy_filled_qty = 0
sell_filled_qty = 0
# percent (When you drop 10%, sell panic.)
stop_loss = 0
# Buy/Sell qty
quantity = 0
# BTC amount
amount = 0
# float(step_size * math.floor(float(free)/step_size))
step_size = 0
# Define static vars
WAIT_TIME_BUY_SELL = 1 # seconds
WAIT_TIME_CHECK_BUY_SELL = 0.2 # seconds
WAIT_TIME_CHECK_SELL = 5 # seconds
WAIT_TIME_STOP_LOSS = 20 # seconds
MAX_TRADE_SIZE = 7 # int
# Type of commision, Default BNB_COMMISION
commision = BNB_COMMISION
def __init__(self, option):
print("options: {0}".format(option))
# Get argument parse options
self.option = option
# Define parser vars
self.order_id = self.option.orderid
self.quantity = self.option.quantity
self.wait_time = self.option.wait_time
self.stop_loss = self.option.stop_loss
self.increasing = self.option.increasing
self.decreasing = self.option.decreasing
# BTC amount
self.amount = self.option.amount
# Type of commision
if self.option.commision == 'TOKEN':
self.commision = TOKEN_COMMISION
# setup Logger
self.logger = self.setup_logger(self.option.symbol, debug=self.option.debug)
def setup_logger(self, symbol, debug=True):
"""Function setup as many loggers as you want"""
#handler = logging.FileHandler(log_file)
#handler.setFormatter(formatter)
#logger.addHandler(handler)
logger = logging.getLogger(symbol)
stout_handler = logging.StreamHandler(sys.stdout)
if debug:
logger.setLevel(logging.DEBUG)
stout_handler.setLevel(logging.DEBUG)
#handler = logging.handlers.SysLogHandler(address='/dev/log')
#logger.addHandler(handler)
stout_handler.setFormatter(formatter)
logger.addHandler(stout_handler)
return logger
def buy(self, symbol, quantity, buyPrice, profitableSellingPrice):
# Do you have an open order?
self.check_order()
try:
# Create order
orderId = Orders.buy_limit(symbol, quantity, buyPrice)
# Database log
Database.write([orderId, symbol, 0, buyPrice, 'BUY', quantity, self.option.profit])
#print('Buy order created id:%d, q:%.8f, p:%.8f' % (orderId, quantity, float(buyPrice)))
self.logger.info('%s : Buy order created id:%d, q:%.8f, p:%.8f, Take profit aprox :%.8f' % (symbol, orderId, quantity, float(buyPrice), profitableSellingPrice))
self.order_id = orderId
return orderId
except Exception as e:
#print('bl: %s' % (e))
self.logger.debug('Buy error: %s' % (e))
time.sleep(self.WAIT_TIME_BUY_SELL)
return None
def sell(self, symbol, quantity, orderId, sell_price, last_price):
'''
The specified limit will try to sell until it reaches.
If not successful, the order will be canceled.
'''
buy_order = Orders.get_order(symbol, orderId)
if buy_order['status'] == 'FILLED' and buy_order['side'] == 'BUY':
#print('Buy order filled... Try sell...')
self.logger.info('Buy order filled... Try sell...')
else:
time.sleep(self.WAIT_TIME_CHECK_BUY_SELL)
if buy_order['status'] == 'FILLED' and buy_order['side'] == 'BUY':
#print('Buy order filled after 0.1 second... Try sell...')
self.logger.info('Buy order filled after 0.1 second... Try sell...')
elif buy_order['status'] == 'PARTIALLY_FILLED' and buy_order['side'] == 'BUY':
#print('Buy order partially filled... Try sell... Cancel remaining buy...')
self.logger.info('Buy order partially filled... Try sell... Cancel remaining buy...')
self.cancel(symbol, orderId)
else:
self.cancel(symbol, orderId)
#print('Buy order fail (Not filled) Cancel order...')
self.logger.warning('Buy order fail (Not filled) Cancel order...')
self.order_id = 0
return
sell_order = Orders.sell_limit(symbol, quantity, sell_price)
sell_id = sell_order['orderId']
#print('Sell order create id: %d' % sell_id)
self.logger.info('Sell order create id: %d' % sell_id)
time.sleep(self.WAIT_TIME_CHECK_SELL)
if sell_order['status'] == 'FILLED':
#print('Sell order (Filled) Id: %d' % sell_id)
#print('LastPrice : %.8f' % last_price)
#print('Profit: %%%s. Buy price: %.8f Sell price: %.8f' % (self.option.profit, float(sell_order['price']), sell_price))
self.logger.info('Sell order (Filled) Id: %d' % sell_id)
self.logger.info('LastPrice : %.8f' % last_price)
self.logger.info('Profit: %%%s. Buy price: %.8f Sell price: %.8f' % (self.option.profit, float(sell_order['price']), sell_price))
self.order_id = 0
self.order_data = None
return
'''
If all sales trials fail,
the grievance is stop-loss.
'''
if self.stop_loss > 0:
# If sell order failed after 5 seconds, 5 seconds more wait time before selling at loss
time.sleep(self.WAIT_TIME_CHECK_SELL)
if self.stop(symbol, quantity, sell_id, last_price):
if Orders.get_order(symbol, sell_id)['status'] != 'FILLED':
#print('We apologize... Sold at loss...')
self.logger.info('We apologize... Sold at loss...')
else:
#print('We apologize... Cant sell even at loss... Please sell manually... Stopping program...')
self.logger.info('We apologize... Cant sell even at loss... Please sell manually... Stopping program...')
self.cancel(symbol, sell_id)
exit(1)
while (sell_status != 'FILLED'):
time.sleep(self.WAIT_TIME_CHECK_SELL)
sell_status = Orders.get_order(symbol, sell_id)['status']
lastPrice = Orders.get_ticker(symbol)
#print('Status: %s Current price: %.8f Sell price: %.8f' % (sell_status, lastPrice, sell_price))
#print('Sold! Continue trading...')
self.logger.info('Status: %s Current price: %.8f Sell price: %.8f' % (sell_status, lastPrice, sell_price))
self.logger.info('Sold! Continue trading...')
self.order_id = 0
self.order_data = None
def stop(self, symbol, quantity, orderId, last_price):
# If the target is not reached, stop-loss.
stop_order = Orders.get_order(symbol, orderId)
stopprice = self.calc(float(stop_order['price']))
lossprice = stopprice - (stopprice * self.stop_loss / 100)
status = stop_order['status']
# Order status
if status == 'NEW' or status == 'PARTIALLY_FILLED':
if self.cancel(symbol, orderId):
# Stop loss
if last_price >= lossprice:
sello = Orders.sell_market(symbol, quantity)
#print('Stop-loss, sell market, %s' % (last_price))
self.logger.info('Stop-loss, sell market, %s' % (last_price))
sell_id = sello['orderId']
if sello == True:
return True
else:
# Wait a while after the sale to the loss.
time.sleep(self.WAIT_TIME_STOP_LOSS)
statusloss = sello['status']
if statusloss != 'NEW':
print('Stop-loss, sold')
self.logger.info('Stop-loss, sold')
return True
else:
self.cancel(symbol, sell_id)
return False
else:
sello = Orders.sell_limit(symbol, quantity, lossprice)
print('Stop-loss, sell limit, %s' % (lossprice))
time.sleep(self.WAIT_TIME_STOP_LOSS)
statusloss = sello['status']
if statusloss != 'NEW':
print('Stop-loss, sold')
return True
else:
self.cancel(symbol, sell_id)
return False
else:
print('Cancel did not work... Might have been sold before stop loss...')
return True
elif status == 'FILLED':
self.order_id = 0
self.order_data = None
print('Order filled')
return True
else:
return False
def check(self, symbol, orderId, quantity):
# If profit is available and there is no purchase from the specified price, take it with the market.
# Do you have an open order?
self.check_order()
trading_size = 0
time.sleep(self.WAIT_TIME_BUY_SELL)
while trading_size < self.MAX_TRADE_SIZE:
# Order info
order = Orders.get_order(symbol, orderId)
side = order['side']
price = float(order['price'])
# TODO: Sell partial qty
orig_qty = float(order['origQty'])
self.buy_filled_qty = float(order['executedQty'])
status = order['status']
#print('Wait buy order: %s id:%d, price: %.8f, orig_qty: %.8f' % (symbol, order['orderId'], price, orig_qty))
self.logger.info('Wait buy order: %s id:%d, price: %.8f, orig_qty: %.8f' % (symbol, order['orderId'], price, orig_qty))
if status == 'NEW':
if self.cancel(symbol, orderId):
buyo = Orders.buy_market(symbol, quantity)
#print('Buy market order')
self.logger.info('Buy market order')
self.order_id = buyo['orderId']
self.order_data = buyo
if buyo == True:
break
else:
trading_size += 1
continue
else:
break
elif status == 'FILLED':
self.order_id = order['orderId']
self.order_data = order
#print('Filled')
self.logger.info('Filled')
break
elif status == 'PARTIALLY_FILLED':
#print('Partial filled')
self.logger.info('Partial filled')
break
else:
trading_size += 1
continue
def cancel(self, symbol, orderId):
# If order is not filled, cancel it.
check_order = Orders.get_order(symbol, orderId)
if not check_order:
self.order_id = 0
self.order_data = None
return True
if check_order['status'] == 'NEW' or check_order['status'] != 'CANCELLED':
Orders.cancel_order(symbol, orderId)
self.order_id = 0
self.order_data = None
return True
def calc(self, lastBid):
try:
#Estimated sell price considering commision
return lastBid + (lastBid * self.option.profit / 100) + (lastBid *self.commision)
#return lastBid + (lastBid * self.option.profit / 100)
except Exception as e:
print('Calc Error: %s' % (e))
return
def check_order(self):
# If there is an open order, exit.
if self.order_id > 0:
exit(1)
def action(self, symbol):
#import ipdb; ipdb.set_trace()
# Order amount
quantity = self.quantity
# Fetches the ticker price
lastPrice = Orders.get_ticker(symbol)
# Order book prices
lastBid, lastAsk = Orders.get_order_book(symbol)
# Target buy price, add little increase #87
buyPrice = lastBid + self.increasing
# Target sell price, decrease little
sellPrice = lastAsk - self.decreasing
# Spread ( profit )
profitableSellingPrice = self.calc(lastBid)
# Check working mode
if self.option.mode == 'range':
buyPrice = float(self.option.buyprice)
sellPrice = float(self.option.sellprice)
profitableSellingPrice = sellPrice
# Screen log
if self.option.prints and self.order_id == 0:
spreadPerc = (lastAsk/lastBid - 1) * 100.0
#print('price:%.8f buyp:%.8f sellp:%.8f-bid:%.8f ask:%.8f spread:%.2f' % (lastPrice, buyPrice, profitableSellingPrice, lastBid, lastAsk, spreadPerc))
self.logger.debug('price:%.8f buyprice:%.8f sellprice:%.8f bid:%.8f ask:%.8f spread:%.2f Originalsellprice:%.8f' % (lastPrice, buyPrice, profitableSellingPrice, lastBid, lastAsk, spreadPerc, profitableSellingPrice-(lastBid *self.commision) ))
# analyze = threading.Thread(target=analyze, args=(symbol,))
# analyze.start()
if self.order_id > 0:
# Profit mode
if self.order_data is not None:
order = self.order_data
# Last control
newProfitableSellingPrice = self.calc(float(order['price']))
if (lastAsk >= newProfitableSellingPrice):
profitableSellingPrice = newProfitableSellingPrice
# range mode
if self.option.mode == 'range':
profitableSellingPrice = self.option.sellprice
'''
If the order is complete,
try to sell it.
'''
# Perform buy action
sellAction = threading.Thread(target=self.sell, args=(symbol, quantity, self.order_id, profitableSellingPrice, lastPrice,))
sellAction.start()
return
'''
Did profit get caught
if ask price is greater than profit price,
buy with my buy price,
'''
if (lastAsk >= profitableSellingPrice and self.option.mode == 'profit') or \
(lastPrice <= float(self.option.buyprice) and self.option.mode == 'range'):
self.logger.info ("MOde: {0}, Lastsk: {1}, Profit Sell Price {2}, ".format(self.option.mode, lastAsk, profitableSellingPrice))
if self.order_id == 0:
self.buy(symbol, quantity, buyPrice, profitableSellingPrice)
# Perform check/sell action
# checkAction = threading.Thread(target=self.check, args=(symbol, self.order_id, quantity,))
# checkAction.start()
def logic(self):
return 0
def filters(self):
symbol = self.option.symbol
# Get symbol exchange info
symbol_info = Orders.get_info(symbol)
if not symbol_info:
#print('Invalid symbol, please try again...')
self.logger.error('Invalid symbol, please try again...')
exit(1)
symbol_info['filters'] = {item['filterType']: item for item in symbol_info['filters']}
return symbol_info
def format_step(self, quantity, stepSize):
return float(stepSize * math.floor(float(quantity)/stepSize))
def validate(self):
valid = True
symbol = self.option.symbol
filters = self.filters()['filters']
# Order book prices
lastBid, lastAsk = Orders.get_order_book(symbol)
lastPrice = Orders.get_ticker(symbol)
minQty = float(filters['LOT_SIZE']['minQty'])
minPrice = float(filters['PRICE_FILTER']['minPrice'])
minNotional = float(filters['MIN_NOTIONAL']['minNotional'])
quantity = float(self.option.quantity)
# stepSize defines the intervals that a quantity/icebergQty can be increased/decreased by.
stepSize = float(filters['LOT_SIZE']['stepSize'])
# tickSize defines the intervals that a price/stopPrice can be increased/decreased by
tickSize = float(filters['PRICE_FILTER']['tickSize'])
# If option increasing default tickSize greater than
if (float(self.option.increasing) < tickSize):
self.increasing = tickSize
# If option decreasing default tickSize greater than
if (float(self.option.decreasing) < tickSize):
self.decreasing = tickSize
# Just for validation
lastBid = lastBid + self.increasing
# Set static
# If quantity or amount is zero, minNotional increase 10%
quantity = (minNotional / lastBid)
quantity = quantity + (quantity * 10 / 100)
notional = minNotional
if self.amount > 0:
# Calculate amount to quantity
quantity = (self.amount / lastBid)
if self.quantity > 0:
# Format quantity step
quantity = self.quantity
quantity = self.format_step(quantity, stepSize)
notional = lastBid * float(quantity)
# Set Globals
self.quantity = quantity
self.step_size = stepSize
# minQty = minimum order quantity
if quantity < minQty:
#print('Invalid quantity, minQty: %.8f (u: %.8f)' % (minQty, quantity))
self.logger.error('Invalid quantity, minQty: %.8f (u: %.8f)' % (minQty, quantity))
valid = False
if lastPrice < minPrice:
#print('Invalid price, minPrice: %.8f (u: %.8f)' % (minPrice, lastPrice))
self.logger.error('Invalid price, minPrice: %.8f (u: %.8f)' % (minPrice, lastPrice))
valid = False
# minNotional = minimum order value (price * quantity)
if notional < minNotional:
#print('Invalid notional, minNotional: %.8f (u: %.8f)' % (minNotional, notional))
self.logger.error('Invalid notional, minNotional: %.8f (u: %.8f)' % (minNotional, notional))
valid = False
if not valid:
exit(1)
def run(self):
cycle = 0
actions = []
symbol = self.option.symbol
print('Auto Trading for Binance.com @yasinkuyu')
print('\n')
# Validate symbol
self.validate()
print('Started...')
print('Trading Symbol: %s' % symbol)
print('Buy Quantity: %.8f' % self.quantity)
print('Stop-Loss Amount: %s' % self.stop_loss)
#print('Estimated profit: %.8f' % (self.quantity*self.option.profit))
if self.option.mode == 'range':
if self.option.buyprice == 0 or self.option.sellprice == 0:
print('Please enter --buyprice / --sellprice\n')
exit(1)
print('Range Mode Options:')
print('\tBuy Price: %.8f', self.option.buyprice)
print('\tSell Price: %.8f', self.option.sellprice)
else:
print('Profit Mode Options:')
print('\tPreferred Profit: %0.2f%%' % self.option.profit)
print('\tBuy Price : (Bid+ --increasing %.8f)' % self.increasing)
print('\tSell Price: (Ask- --decreasing %.8f)' % self.decreasing)
print('\n')
startTime = time.time()
"""
# DEBUG LINES
actionTrader = threading.Thread(target=self.action, args=(symbol,))
actions.append(actionTrader)
actionTrader.start()
endTime = time.time()
if endTime - startTime < self.wait_time:
time.sleep(self.wait_time - (endTime - startTime))
# 0 = Unlimited loop
if self.option.loop > 0:
cycle = cycle + 1
"""
while (cycle <= self.option.loop):
startTime = time.time()
actionTrader = threading.Thread(target=self.action, args=(symbol,))
actions.append(actionTrader)
actionTrader.start()
endTime = time.time()
if endTime - startTime < self.wait_time:
time.sleep(self.wait_time - (endTime - startTime))
# 0 = Unlimited loop
if self.option.loop > 0:
cycle = cycle + 1
| [
"noreply@github.com"
] | noreply@github.com |
d9befda86b06264640487d776569c49952aa341a | 7513ddce2c659687216d79f9790d05a44c8f47b5 | /pyportfolio/utils.py | 9b3a1104f21fadeca279316989e9950f9f54b852 | [
"MIT",
"Python-2.0"
] | permissive | 0xKD/pyportfolio | 02a7a23e593ae80ff0abc6f0df057f51e8d0cde5 | caa721431f2ca4c0bb25d570751ce9d6d99afcee | refs/heads/main | 2023-03-28T22:10:21.965727 | 2021-04-09T04:51:50 | 2021-04-09T04:51:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | import logging
logging.basicConfig()
logger = logging.getLogger('pyportfolio')
logger.setLevel(logging.INFO)
# Filter Utils
def and_filter(filters):
return lambda x: all(f(x) for f in filters)
def or_filter(filters):
return lambda x: any(f(x) for f in filters)
def filter_dict(mydict, keys):
return {key: mydict[key] for key in keys } | [
"19279785+kaushiksk@users.noreply.github.com"
] | 19279785+kaushiksk@users.noreply.github.com |
1caac16e5225167efc86a074ade91bb7bb01268c | e051dbc70bbb656892946c3dadea75a859fb1af4 | /skin-generation/xacro-skin-generator-triangle-1.py | ae94c387251fd48860fcad34b10a67d95c0e516b | [] | no_license | matejhof/nao-mirror-self-recog | d5d2d83edd5d088d534e8ac05577f159e02e564c | cdd5433ed68f4c488373363c7d76668101c3a7ae | refs/heads/master | 2020-06-26T02:26:58.243911 | 2019-09-28T20:18:59 | 2019-09-28T20:18:59 | 199,496,349 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,114 | py | # This version creates sub-links connected to casing with fixed joints, with coordinate
# transformations applied to these sub-links
# Status: does not work, contact plugin does not register collisions in sub-links
import numpy as np
from placeSensors import *
VERTICES = [
"coordinates/torso.txt",
"coordinates/head.txt",
"coordinates/left-arm.txt",
"coordinates/right-arm.txt",
]
SENSOR_LENGTH = "0.001"
SENSOR_RADIUS = "0.0022"
SENSOR_NAMES = [
"skin_torso",
"skin_head",
"skin_l_wrist",
"skin_r_wrist",
]
LINK_NAMES = [
"torso_casing",
"head_casing",
"l_wrist_casing",
"r_wrist_casing",
]
ARG_NAMES = [
"contact_torso",
"contact_head",
"contact_left_arm",
"contact_right_arm",
]
sensors_per_link = 10
with open('link-template-triangle-1.urdf.txt', 'r') as fid:
linkTemplate = fid.read()
print('<?xml version="1.0" ?>')
print('<robot xmlns:xacro="http://www.ros.org/wiki/xacro">')
gazebo_str = '';
for i, file in enumerate(VERTICES):
vertices = np.loadtxt(file)
collisionStr = ""
print(' <xacro:macro name="insert_skin_' + LINK_NAMES[i] + '">')
for j, vertex in enumerate(vertices):
name = LINK_NAMES[i] + '_collision_' + str(j+1)
center = vertex[0:3]
normal = vertex[3:6]
rpy = calcRPY(np.array([0,0,1]), normal)
link_str = linkTemplate.replace("SENSOR_XYZ", str(center)[1:-1])
link_str = link_str.replace("SENSOR_RPY", str(rpy)[1:-1])
link_str = link_str.replace("SENSOR_LENGTH", str(SENSOR_LENGTH))
link_str = link_str.replace("SENSOR_RADIUS", str(SENSOR_RADIUS))
link_str = link_str.replace("COLLISION_NAME", name)
link_str = link_str.replace("LINK_NAME", LINK_NAMES[i])
link_str = link_str.replace("YAW_CORRECTION", str(vertex[6]))
print(link_str)
for k in range(sensors_per_link):
collisionStr = collisionStr + " <collision>" + name + str(k) + "</collision>\n"
print(' </xacro:macro>')
gazebo_str = gazebo_str + ' <xacro:if value="$(arg ' + ARG_NAMES[i] + ')">\n'
gazebo_str = gazebo_str + ' <gazebo reference="' + LINK_NAMES[i] + '">\n'
gazebo_str = gazebo_str + ' <sensor name="' + LINK_NAMES[i] + '_collision_sensor" type="contact">\n'
gazebo_str = gazebo_str + ' <plugin name="contact_plugin_' + LINK_NAMES[i] + '" filename="/home/user/catkin_ws/src/gazebo_ContactSensor_plugin/build/devel/lib/libcontact.so" />\n'
gazebo_str = gazebo_str + ' <always_on>1</always_on>\n'
gazebo_str = gazebo_str + ' <visualize>true</visualize>\n'
gazebo_str = gazebo_str + ' <update_rate>100</update_rate>\n'
gazebo_str = gazebo_str + ' <contact>\n'
gazebo_str = gazebo_str + collisionStr
gazebo_str = gazebo_str + ' </contact>\n'
gazebo_str = gazebo_str + ' </sensor>\n'
gazebo_str = gazebo_str + ' </gazebo>\n'
gazebo_str = gazebo_str + ' </xacro:if>\n'
print(gazebo_str)
print('</robot>')
| [
"outravoj@fel.cvut.cz"
] | outravoj@fel.cvut.cz |
21a03c5b4c4fdf5f65f8f33de569e2d41869d67b | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/account/azext_account/vendored_sdks/subscription/operations/_subscription_operation_operations.py | ff4eaf571ec7c44d355033680ae253d44a157678 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 4,364 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class SubscriptionOperationOperations(object):
"""SubscriptionOperationOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Version of the API to be used with the client request. Current version is 2019-10-01-preview. Constant value: "2019-10-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-10-01-preview"
self.config = config
def get(
self, operation_id, custom_headers=None, raw=False, **operation_config):
"""Get the status of the pending Microsoft.Subscription API operations.
:param operation_id: The operation ID, which can be found from the
Location field in the generate recommendation response header.
:type operation_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SubscriptionCreationResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.subscription.models.SubscriptionCreationResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'operationId': self._serialize.url("operation_id", operation_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
header_dict = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SubscriptionCreationResult', response)
header_dict = {
'Location': 'str',
'Retry-After': 'int',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
get.metadata = {'url': '/providers/Microsoft.Subscription/subscriptionOperations/{operationId}'}
| [
"noreply@github.com"
] | noreply@github.com |
6f41487d395ae9b4cdd0b9f9e9a9e493d9089af4 | 83bd62b9ef3d8aa7cbc892b1546704afe5bc5ee8 | /film/migrations/0010_film_review.py | 05bd82d4279e7f5426034e2d230c792bc1768216 | [] | no_license | baraa65/movies-search | fca843a04af2398898313cb38f547d4b5b083c02 | bf43193ce2d3786fd449f085170f4f83f8482b8b | refs/heads/master | 2023-07-17T08:40:32.315088 | 2021-09-01T20:59:29 | 2021-09-01T20:59:29 | 383,206,405 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | # Generated by Django 3.2 on 2021-07-05 10:37
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('film', '0009_auto_20210705_1332'),
]
operations = [
migrations.AddField(
model_name='film',
name='review',
field=models.ManyToManyField(related_name='review_of_film', through='film.review_of_film', to=settings.AUTH_USER_MODEL),
),
]
| [
"baraash75@gmail.com"
] | baraash75@gmail.com |
609618bd1fe9be3d32aa26f4ec5cb69297691451 | a2d30e0320b9a15862d1a21659c511064750df02 | /MyBlg/migrations/0002_auto_20170703_1632.py | d65369cef80443c613d4421897d5601a8c141c89 | [] | no_license | wjanaszek/first_blog | 0bdda4ed7e0e08930ede96534181a67ac329b9fa | a1f2b5392e17dc256fac20a0193a21e5eae9891a | refs/heads/master | 2021-03-27T15:59:21.537108 | 2017-07-11T20:16:24 | 2017-07-11T20:16:24 | 96,134,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-03 16:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('MyBlg', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='publish',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| [
"trash.wjanaszek@gmail.com"
] | trash.wjanaszek@gmail.com |
5f16893b288023099588adfdbd4c49d0bf59bceb | 33fff53a9b24e600848fc615769850b4590cecef | /pigprice/pigprice/middlewares.py | a55ae880c97b4f6611f8d54d19853307ca46cee7 | [
"MIT"
] | permissive | wshoo/scrapy-pj | 746b63697e6a4263df6afb15f67c33b929e187e0 | c1cb279e02fd13930b446f021b85ed4be313c5fe | refs/heads/master | 2022-11-25T16:26:32.319524 | 2019-12-25T05:44:13 | 2019-12-25T05:44:13 | 170,986,821 | 0 | 0 | MIT | 2022-11-22T03:26:03 | 2019-02-16T09:39:51 | Python | UTF-8 | Python | false | false | 3,601 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class PigpriceSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class PigpriceDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"wsh_oo@live.com"
] | wsh_oo@live.com |
19bbf36ff2037e3a684c244330eb212bbee1e7c9 | abf440dd253331e90e6b5bb3d1486eb240c98cde | /cgan/embeddings/glove_loader.py | c080a9c2d5df468fe56d7ad15c7d77e5fd2a082d | [] | no_license | MdeBoer95/Emoji-Generator | cd7638ebbab4833db2122cf4ebf4e48b0b9d0770 | 6960eb5764838d3971b1dfcf9c7d82995ef81577 | refs/heads/master | 2023-06-01T08:24:51.969005 | 2020-07-14T17:29:06 | 2020-07-14T17:29:06 | 268,293,400 | 1 | 0 | null | 2023-05-01T21:44:04 | 2020-05-31T14:09:34 | Python | UTF-8 | Python | false | false | 3,888 | py | import numpy as np
"""
def download_glove(data_dir_path, to_file_path):
if not os.path.exists(to_file_path):
if not os.path.exists(data_dir_path):
os.makedirs(data_dir_path)
glove_zip = data_dir_path + '/glove.6B.zip'
if not os.path.exists(glove_zip):
print('glove file does not exist, downloading from internet')
urllib.request.urlretrieve(url='http://nlp.stanford.edu/data/glove.6B.zip', filename=glove_zip,
reporthook=reporthook)
print('unzipping glove file')
zip_ref = zipfile.ZipFile(glove_zip, 'r')
zip_ref.extractall('very_large_data')
zip_ref.close()
"""
def load_glove(data_dir_path=None, embedding_dim=None):
"""
Load the glove models (and download the glove model if they don't exist in the data_dir_path
:param data_dir_path: the directory path on which the glove model files will be downloaded and store
:param embedding_dim: the dimension of the word embedding, available dimensions are 50, 100, 200, 300, default is 300
:return: the glove word embeddings (word -> embeddings)
"""
if embedding_dim is None:
embedding_dim = 300
# glove_file_path = data_dir_path + "/glove.6B." + str(embedding_dim) + "d.txt"
glove_file_path = data_dir_path
# download_glove(data_dir_path, glove_file_path)
_word2em = {}
file = open(glove_file_path, mode='rt', encoding='utf8')
for line in file:
words = line.strip().split()
word = words[0]
embeds = np.array(words[1:], dtype=np.float32)
_word2em[word] = embeds
file.close()
return _word2em
class GloveModel(object):
"""
Class the provides the glove embedding and document encoding functions
"""
model_name = 'glove-model'
def __init__(self):
self.word2em = None
self.embedding_dim = None
def load(self, data_dir_path, embedding_dim=None):
if embedding_dim is None:
embedding_dim = 300
self.embedding_dim = embedding_dim
self.word2em = load_glove(data_dir_path, embedding_dim)
def encode_word(self, word):
"""
If a word exsits: return a pre-trained embedding
otherwise: return a zero array
"""
w = word.lower()
if w in self.word2em:
return self.word2em[w]
else:
return np.zeros(shape=(self.embedding_dim, ))
def encode_docs(self, docs, max_allowed_doc_length=None):
doc_count = len(docs)
X = np.zeros(shape=(doc_count, self.embedding_dim))
max_len = 0
for doc in docs:
max_len = max(max_len, len(doc.split(' ')))
if max_allowed_doc_length is not None:
max_len = min(max_len, max_allowed_doc_length)
for i in range(0, doc_count):
doc = docs[i]
words = [w.lower() for w in doc.split(' ')]
length = min(max_len, len(words))
E = np.zeros(shape=(self.embedding_dim, max_len))
for j in range(length):
word = words[j]
try:
E[:, j] = self.word2em[word]
except KeyError:
pass
X[i, :] = np.sum(E, axis=1)
return X
def encode_doc(self, doc, max_allowed_doc_length=None):
words = [w.lower() for w in doc.split(' ')]
max_len = len(words)
if max_allowed_doc_length is not None:
max_len = min(len(words), max_allowed_doc_length)
E = np.zeros(shape=(self.embedding_dim, max_len))
X = np.zeros(shape=(self.embedding_dim, ))
for j in range(max_len):
word = words[j]
try:
E[:, j] = self.word2em[word]
except KeyError:
pass
X[:] = np.sum(E, axis=1)
return X
| [
"marceldeboer@web.de"
] | marceldeboer@web.de |
14aea44dfb3cac5b9c7cdb45d60535220558ea80 | 3f0beb3543f1fcad6b84b6194b0cd70612b1a109 | /app/schemas/credit.py | d507a76338d9639bfa6b5f84228456630c3fa9ac | [] | no_license | Keith-Njagi/ecommerce_app_credit_management | 3e1ec9300ca62d28e6113978ac295cc7012de5e1 | 176f63fb7e33235462391edbe122eb8ad27d782a | refs/heads/master | 2022-12-10T14:55:53.268186 | 2020-09-08T17:35:26 | 2020-09-08T17:35:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | from . import ma
from models.credit import CreditModel
from models.salesman import SalesmanModel
class CreditSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model =CreditModel
load_only = ('salesman',)
dump_only = ('id', 'created', 'updated',)
include_fk = True
_links = ma.Hyperlinks({
'self': ma.URLFor('api.credit_credit_detail', id='<id>'),
'collection': ma.URLFor('api.credit_credit_list')
})
| [
"kitgiana.keith@gmail.com"
] | kitgiana.keith@gmail.com |
3906e639fd5010cbc85fb6417f0c25ecf4c4001b | 1370ae668a0b5ed55b23d0c52316685a0e481379 | /software/Train KBC Models/Bilinear Decomposed/bilinear - Kopie.py | 0716288203f9058c1592741af70646dc061e7999 | [] | no_license | kavchop/KGC-Thesis | 0fe90bcc823f53ce0bab003edcf48be42ed068dd | ed27b543e52881302a7bbdcb41af9afab9053a8e | refs/heads/master | 2021-01-11T18:26:11.011631 | 2017-01-20T09:29:15 | 2017-01-20T09:29:15 | 79,544,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,128 | py | '''
Author of this implementation: Kavita Chopra (2016, version 1.0)
RUN THE CODE:
TRAINING:
- "python bilinear.py" for training with non-diagonal Mr
- "python bilinear.py diagonal" to train the bilinear diagonal model
EVALUATION:
- "python bilinear.py evaluate" for evaluation with non-diagonal Mr
- "python bilinear.py diagonal evaluate" to evaluate the bilinear diagonal model
Input
- training data consisting of two sets: positive training data (true triples), and
- negative training data, where for every positive triple there are two corrupted
triples: one where head is replaced and the other where tail is corrupted
- Hyperparameters:
factorization rank (dimension to which n is reduced to) n_red, learning rate
Optimization Procedure
- stochastic gradient descent with mini-batches using Adagrad-method
Knowledge Base
- subset of Freebase (FBK15) with frequent entities also present in Wikilinks
Split of Data
- training data: ~480,000 triples
- validation set 50,000 "
- test set 59,000 "
- all sets are disjoint
- validation on valid data set used to measure performance of model during training and
for employment of early stopping before a maximimum epoch-bound (e.g. 1000)
Validation and Evaluation Protocol
- for validation mean ranks of correct triples from a list of corrupted triples are reported
- evaluation on test data after training is complete to measure the quality of the final model
- for evaluation hits at ten (proportion of true triples from top ten ranks) are reported
Implementation Remarks
- before training a new model:
- meta-data-file with the customized configurations is created and saved in 'models/' directory
- meta-data-file is updated each time training is resumed at a different time
- initial embedding is saved to disk for visualization purposes e.g. after dimensionality reduction through PCA
- at customizable intervals current model is saved to disk so that training may be continued in different sessions
- global- and hyper-parameters of the model can be configured in the params.py file
More details can be found in readme file in same directory.
'''
import numpy as np
import tensorflow as tf
from datetime import datetime
import timeit
import pickle
import os
import zipfile
import params
import input
import eval
import sys
os.chdir(os.getcwd())
diagonal = False
model_name ='bilinear_decomp'
eval_mode = False
if len(sys.argv)==2:
if sys.argv[1] == 'evaluate':
eval_mode = True
if sys.argv[1] == 'diagonal':
diagonal = True
model_name = 'diagonal'
if len(sys.argv)==3:
if sys.argv[1] == 'diagonal' and sys.argv[2] == 'evaluate':
diagonal = True
model_name = 'diagonal'
eval_mode = True
dim = params.n_red
dataset = params.dataset
os.chdir(os.getcwd())
PATH = '../../../data/Trained Models/'+model_name+'/' + dataset + '/dim = '+str(dim) +'/'
if not os.path.exists(PATH):
os.makedirs(PATH)
PLOT_PATH = '../../../data/Model Validation Results for Plotting/' + dataset + '/dim = '+str(dim) +'/'
if not os.path.exists(PLOT_PATH):
os.makedirs(PLOT_PATH)
normalize_ent = params.normalize_ent
if not normalize_ent:
model_name = model_name + ' not normalized'
MODEL_PATH = PATH + model_name
INITIAL_MODEL = PATH + model_name + '_initial_model'
RESULTS_PATH = PATH + model_name + '_results'
MODEL_META_PATH = PATH + model_name + '_model_meta.txt'
PLOT_RESULTS_PATH = PLOT_PATH + model_name + '_results'
PLOT_MODEL_META_PATH = PLOT_PATH + model_name + '_model_meta.txt'
#methods for building the tensor network
#score: x0 * Mr * x1
def bilinear(h, t, h_1, t_1, A, B):
pos_score = tf.batch_matmul(tf.batch_matmul(h,tf.batch_matmul(A, tf.transpose(B, perm=[0, 2, 1]))), tf.transpose(t, perm=[0, 2, 1]))
neg_score = tf.batch_matmul(tf.batch_matmul(h_1,tf.batch_matmul(A, tf.transpose(B, perm=[0, 2, 1]))), tf.transpose(t_1, perm=[0, 2, 1]))
return pos_score, neg_score
def run_training(model_name):
#set global- and hyper-parameters, for description of each param see file params.py
n_red = params.n_red
a = params.a
shuffle_data = params.shuffle_data
check_collision = params.check_collision
swap = params.swap
max_epoch = params.max_epoch
global_epoch = params.global_epoch
learning_rate = params.learning_rate
batch_size = params.batch_size
result_log_cycle = params.result_log_cycle
embedding_log_cycle = params.embedding_log_cycle
corrupt_two = params.corrupt_two
valid_size = params.valid_size
valid_verbose = params.valid_verbose
train_verbose = params.train_verbose
#normalize_ent = params.normalize_ent
if eval_mode:
mode = 'evaluated'
else:
mode = 'trained'
if model_name == 'diagonal':
print "\nBilinear model is {} with diagonal relation matrices.\n".format(mode)
else:
print "\nBilinear model is {} with non-diagonal relation matrices.\n".format(mode)
#load set of all triples, train, valid and test data
triples, train, valid, test = input.load_data(swap)
ent_URI_to_int, rel_URI_to_int = input.create_dicts(triples) #load dicts
#load input-formats: int-matrices and triples-set for faster search
triples_set, train_matrix, valid_matrix, test_matrix = input.create_int_matrices(triples, train, valid, test, ent_URI_to_int, rel_URI_to_int)
n = len(ent_URI_to_int) #number of all unique entities
m = len(rel_URI_to_int) #number of all unique relations
#load existing model or initialize new and save to disk
print MODEL_PATH
if os.path.isfile(MODEL_PATH):
print "\nExisting model is being loaded...\n"
bilinear_model = input.pickle_object(MODEL_PATH, 'r')
W_param = bilinear_model[0]
print W_param.shape
W_param = np.reshape(W_param, (W_param.shape[0],1, W_param.shape[1]))
A_param = np.array(bilinear_model[1], dtype=np.float32)
B_param = np.array(bilinear_model[2], dtype=np.float32)
print A_param.shape, B_param.shape
entity_embed = input.learned_ent_embed(W_param)
# if 'evaluate' tag passed when running script, only run evaluation on test_matrix and terminate
if eval_mode:
Mr_param = np.array([np.dot(A_param[i], np.transpose(B_param[i])) for i in range(m)])
eval.run_evaluation(diagonal, triples_set, test_matrix, entity_embed, Mr_param, eval_mode=True, verbose=True, test_size=valid_size)
return
else:
if eval_mode:
print "\nNo {} model has been trained yet. Please train a model before evaluating.\n".format(model_name)
return
#write model configurations to disk
print "\nNew model is being initialized and saved before training starts...\n"
input.save_model_meta(model_name, MODEL_META_PATH, PLOT_MODEL_META_PATH, n_red, a, learning_rate, corrupt_two, normalize_ent, check_collision)
W_param, A_param, B_param = input.init_params(m, n, n_red, a)
bilinear_model = [W_param, A_param, B_param]
input.pickle_object(INITIAL_MODEL, 'w', bilinear_model)
entity_embed = input.learned_ent_embed(W_param)
#open eval-results table to retrieve the last trained global epoch
#if it does not exist, create a new result_table
if os.path.isfile(RESULTS_PATH):
results_table = input.pickle_object(RESULTS_PATH, 'r')
global_epoch = int(results_table[-1][0]) #update epoch_num
input.save_model_meta(model_name, MODEL_META_PATH, PLOT_MODEL_META_PATH, n_red, a, learning_rate, corrupt_two, normalize_ent, check_collision, global_epoch, resumed=True)
else:
results_table = np.reshape(np.asarray(['epoch', 'h_mean', 't_mean', 'h_hits', 't_hits', 'total_loss'], dtype=object), (1,6))
#run evaluation after initialization to get the state before training (at global_epoch 0)
Mr_param = np.array([np.dot(A_param[i], np.transpose(B_param[i])) for i in range(m)])
record = eval.run_evaluation(diagonal, triples_set, valid_matrix, entity_embed, Mr_param, test_size=valid_size)
new_record = np.reshape(np.asarray([global_epoch]+record+[0]), (1,6))
print "validation result of current embedding: {}\n".format(new_record)
results_table = np.append(results_table, new_record, axis=0)
input.pickle_object(RESULTS_PATH, 'w', results_table)
input.pickle_object(PLOT_RESULTS_PATH, 'w', results_table)
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.5
#with tf.Session(config=config) as sess:
# launch TF Session and build computation graph
# start a TF session and build the computation Graph
g = tf.Graph()
with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:
#with tf.Session() as sess:
# initialize model parameters (TF Variable) with numpy objects
A = tf.Variable(A_param, name='A')
B = tf.Variable(B_param, name='B')
W = tf.Variable(W_param, name='W')
# placeholders for current input batch in a gradient step
h_ph = tf.placeholder(tf.int32, shape=(None)) #head (subject)
l_ph = tf.placeholder(tf.int32, shape=(None)) #label (relation/predicate)
t_ph = tf.placeholder(tf.int32, shape=(None)) #tail (object)
h_1_ph = tf.placeholder(tf.int32, shape=(None)) #head from corrupted counterpart triple
t_1_ph = tf.placeholder(tf.int32, shape=(None)) #tail from corrupted counterpart triple
# tf.gather-ops for matrix and tensor-slicing on W and M based on current placeholder values
h = tf.gather(W, h_ph)
A_ = tf.gather(A, l_ph)
B_ = tf.gather(B, l_ph)
t = tf.gather(W, t_ph)
h_1 = tf.gather(W, h_1_ph)
t_1 = tf.gather(W, t_1_ph)
pos_score, neg_score = bilinear(h, t, h_1, t_1, A_, B_)
loss = tf.reduce_sum(tf.maximum(tf.sub(neg_score, pos_score)+1, 0))
# building training:
# SGD with Adagrad opimizer using adaptive learning rates
trainer = tf.train.AdagradOptimizer(learning_rate).minimize(loss)
# alternatively
# trainer=tf.train.AdamOptimizer().minimize(loss)
#ops for normalizing W
norm = tf.sqrt(tf.reduce_sum(tf.square(W), 2, keep_dims=True))
W_new = tf.div(W,norm)
W_norm = tf.assign(W, W_new)
# op for Variable initialization
init_op = tf.global_variables_initializer()
# vector X_id mirrors indices of train_matrix to allow inexpensive shuffling before each epoch
X_id = np.arange(len(train_matrix))
# determine the size of the training data and #iterations for training loop based batch_size and corrupt_two-tag
x = 1
if corrupt_two:
x = 2
batch_num = (x * len(train_matrix))/batch_size
batch_size = int(batch_size/float(x))
print "\nNumber of Triples in Training data: {}".format(len(train_matrix))
print "Iteration over training batches by relations (# {}) and maximal batch-size of {}".format(m, batch_size)
# run initializer-op for variables
sess.run(init_op)
print "\nTraining of Bilinear Model starts!"
for i in range(max_epoch):
print "\nepoch: {}".format(i)
if shuffle_data:
np.random.shuffle(X_id)
start = timeit.default_timer()
loss_sum = 0
train_batches = np.array_split(train_matrix[X_id], len(train_matrix)/batch_size)
for j in range(len(train_batches)):
pos_matrix = train_batches[j]
h_batch, l_batch, t_batch = pos_matrix[:,0], pos_matrix[:,1], pos_matrix[:,2]
if corrupt_two:
h_batch = np.append(h_batch, h_batch, axis=0)
t_batch = np.append(t_batch, t_batch, axis=0)
l_batch = np.append(l_batch, l_batch, axis=0)
neg_matrix = input.create_corrupt_matrix(triples_set, corrupt_two, pos_matrix, n, check_collision)
h_1_batch, t_1_batch = neg_matrix[:,0], neg_matrix[:,2]
# feed current int-input-batches to placeholders
feed_dict={h_ph: h_batch, l_ph: l_batch, t_ph: t_batch, h_1_ph: h_1_batch, t_1_ph: t_1_batch}
_, loss_value = sess.run(([trainer, loss]), feed_dict=feed_dict)
if train_verbose:
print loss_value
loss_sum += loss_value
#after completing one epoch print current loss:
print "total loss of epoch: {}".format(loss_sum)
if normalize_ent:
sess.run(W_norm)
'''
x = W.eval()
print x.shape
x = np.reshape(x, (n, 20))
print np.linalg.norm(x, axis=1)
'''
stop = timeit.default_timer()
print "time taken for current epoch: {} min".format((stop - start)/60)
global_epoch += 1
'''
#save model after each embedding_log_cycle
if global_epoch%embedding_log_cycle == 0:
W_param = W.eval()
Mr_param = M.eval()
bilinear_model = [W_param, A_param, B_param]
input.pickle_object(MODEL_PATH, 'w', bilinear_model)
'''
if global_epoch > 500:
valid_size = None
embedding_log_cycle, result_log_cycle = 25, 25
print A.eval()[1,:]
#run validation on current embedding applied on validation set
if global_epoch == 1 or global_epoch == 10 or global_epoch%result_log_cycle == 0:
entity_embed = input.learned_ent_embed(W_param)
Mr_param = np.array([np.dot(A_param[i], np.transpose(B_param[i])) for i in range(m)])
record = eval.run_evaluation(diagonal, triples_set, valid_matrix, entity_embed, Mr_param, test_size=valid_size, verbose=valid_verbose)
new_record = np.reshape(np.asarray([global_epoch]+record+[int(loss_sum)]), (1,6))
if valid_size == None and min(results_table[1:len(results_table),1]) >= new_record[0,1] and min(results_table[1:len(results_table),2]) >= new_record[0,2]:
#input.pickle_object('models/'+model_name+'_best_model', 'w', bilinear_model)
W_param = W.eval()
Mr_param = M.eval()
bilinear_model = [W_param, A_param, B_param]
input.pickle_object(MODEL_PATH, 'w', bilinear_model)
#TODO: LINES FOR EARLY STOPPING (a[1:-1,1]).tolist()
print "validation result of current embedding:\n{}\n{}".format(results_table[0,0:3], new_record[0,0:3])
results_table = np.append(results_table, new_record, axis=0)
input.pickle_object(RESULTS_PATH, 'w', results_table)
input.pickle_object(PLOT_RESULTS_PATH, 'w', results_table)
def main(arg=None):
run_training(model_name)
if __name__=="__main__":
#tf.app.run()
main()
| [
"kavit.chopra@yahoo.com"
] | kavit.chopra@yahoo.com |
411674aed64bba22b2f24cc9fdb1d0237b8dad90 | 10f047c7631b3aad90c7410c567c588993bfa647 | /PythonDispersal/src/testingTheequations/startColsTest.py | 9ad2d596bce5622a28b77d6736df337f57bb8ada | [] | no_license | ruthubc/ruthubc | ee5bc4aa2b3509986e8471f049b320e1b93ce1d5 | efa8a29fcff863a2419319b3d156b293a398c3a9 | refs/heads/master | 2021-01-24T08:05:40.590243 | 2017-08-30T01:37:56 | 2017-08-30T01:37:56 | 34,295,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | '''
Created on Jun 29, 2015
@author: user
'''
from core.StartColsClass import StartCols
cols = StartCols(126, 1, ad_fd = 0.6)
cols.make_col_list()
col_list = cols.col_list
print "col_list", col_list | [
"Amber22Ruby"
] | Amber22Ruby |
efeee94769b83f842bac96bd9d32030c907b7472 | 14be69d424c8f30cab70231d5509df50ccaa2f04 | /tensorflow/python/training/basic_session_run_hooks.py | 99f057e8371aa6d1d5420e5622a79426783ded4b | [
"Apache-2.0"
] | permissive | siemanko/tensorflow | 739847a1b7d44e6d9291da63be0daf3ae8f2d17c | 66e0faf5f2391d8e1b3acf69afbfa0adf609596d | refs/heads/master | 2021-07-08T09:00:56.875610 | 2017-10-05T18:16:17 | 2017-10-05T18:16:17 | 105,937,165 | 0 | 1 | null | 2017-10-05T20:29:58 | 2017-10-05T20:29:58 | null | UTF-8 | Python | false | false | 28,652 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes.
@@LoggingTensorHook
@@StopAtStepHook
@@CheckpointSaverHook
@@StepCounterHook
@@NanLossDuringTrainingError
@@NanTensorHook
@@SummarySaverHook
@@GlobalStepWaiterHook
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class _HookTimer(object):
"""Base timer for determining when Hooks should trigger.
Should not be instantiated directly.
"""
def __init__(self):
pass
def reset(self):
"""Resets the timer."""
pass
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step."""
raise NotImplementedError
def update_last_triggered_step(self, step):
"""Update the last triggered time and step number.
Args:
step: The current step.
Returns:
A pair `(elapsed_time, elapsed_steps)`, where `elapsed_time` is the number
of seconds between the current trigger and the last one (a float), and
`elapsed_steps` is the number of steps between the current trigger and
the last one. Both values will be set to `None` on the first trigger.
"""
raise NotImplementedError
def last_triggered_step(self):
"""Returns the last triggered time step or None if never triggered."""
raise NotImplementedError
class SecondOrStepTimer(_HookTimer):
"""Timer that triggers at most once every N seconds or once every N steps.
"""
def __init__(self, every_secs=None, every_steps=None):
self.reset()
self._every_secs = every_secs
self._every_steps = every_steps
if self._every_secs is None and self._every_steps is None:
raise ValueError("Either every_secs or every_steps should be provided.")
if (self._every_secs is not None) and (self._every_steps is not None):
raise ValueError("Can not provide both every_secs and every_steps.")
super(SecondOrStepTimer, self).__init__()
def reset(self):
self._last_triggered_step = None
self._last_triggered_time = None
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step.
Args:
step: Training step to trigger on.
Returns:
True if the difference between the current time and the time of the last
trigger exceeds `every_secs`, or if the difference between the current
step and the last triggered step exceeds `every_steps`. False otherwise.
"""
if self._last_triggered_step is None:
return True
if self._last_triggered_step == step:
return False
if self._every_secs is not None:
if time.time() >= self._last_triggered_time + self._every_secs:
return True
if self._every_steps is not None:
if step >= self._last_triggered_step + self._every_steps:
return True
return False
def update_last_triggered_step(self, step):
current_time = time.time()
if self._last_triggered_time is None:
elapsed_secs = None
elapsed_steps = None
else:
elapsed_secs = current_time - self._last_triggered_time
elapsed_steps = step - self._last_triggered_step
self._last_triggered_time = current_time
self._last_triggered_step = step
return (elapsed_secs, elapsed_steps)
def last_triggered_step(self):
return self._last_triggered_step
class NeverTriggerTimer(_HookTimer):
"""Timer that never triggers."""
def should_trigger_for_step(self, step):
_ = step
return False
def update_last_triggered_step(self, step):
_ = step
return (None, None)
def last_triggered_step(self):
return None
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints the given tensors every N local steps, every N seconds, or at end.
The tensors will be printed to the log, with `INFO` severity. If you are not
seeing the logs, you might want to add the following line after your imports:
```python
tf.logging.set_verbosity(tf.logging.INFO)
```
Note that if `at_end` is True, `tensors` should not include any tensor
whose evaluation produces a side effect such as consuming additional inputs.
"""
def __init__(self, tensors, every_n_iter=None, every_n_secs=None,
at_end=False, formatter=None):
"""Initializes a `LoggingTensorHook`.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names,
or `iterable` of tensors/tensor names.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
at_end: `bool` specifying whether to print the values of `tensors` at the
end of the run.
formatter: function, takes dict of `tag`->`Tensor` and returns a string.
If `None` uses default printing all tensors.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
only_log_at_end = (
at_end and (every_n_iter is None) and (every_n_secs is None))
if (not only_log_at_end and
(every_n_iter is None) == (every_n_secs is None)):
raise ValueError(
"either at_end and/or exactly one of every_n_iter and every_n_secs "
"must be provided.")
if every_n_iter is not None and every_n_iter <= 0:
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
self._tag_order = tensors
tensors = {item: item for item in tensors}
else:
self._tag_order = tensors.keys()
self._tensors = tensors
self._formatter = formatter
self._timer = (
NeverTriggerTimer() if only_log_at_end else
SecondOrStepTimer(every_secs=every_n_secs, every_steps=every_n_iter))
self._log_at_end = at_end
def begin(self):
self._timer.reset()
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()}
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
if self._should_trigger:
return SessionRunArgs(self._current_tensors)
else:
return None
def _log_tensors(self, tensor_values):
original = np.get_printoptions()
np.set_printoptions(suppress=True)
elapsed_secs, _ = self._timer.update_last_triggered_step(self._iter_count)
if self._formatter:
logging.info(self._formatter(tensor_values))
else:
stats = []
for tag in self._tag_order:
stats.append("%s = %s" % (tag, tensor_values[tag]))
if elapsed_secs is not None:
logging.info("%s (%.3f sec)", ", ".join(stats), elapsed_secs)
else:
logging.info("%s", ", ".join(stats))
np.set_printoptions(**original)
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
self._log_tensors(run_values.results)
self._iter_count += 1
def end(self, session):
if self._log_at_end:
values = session.run(self._current_tensors)
self._log_tensors(values)
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def after_create_session(self, session, coord):
if self._last_step is None:
global_step = session.run(self._global_step_tensor)
self._last_step = global_step + self._num_steps
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results + 1
if global_step >= self._last_step:
# Check latest global step to ensure that the targeted last step is
# reached. global_step read tensor is the value of global step
# before running the operation. We're not sure whether current session.run
# incremented the global_step or not. Here we're checking it.
step = run_context.session.run(self._global_step_tensor)
if step >= self._last_step:
run_context.request_stop()
class CheckpointSaverListener(object):
"""Interface for listeners that take action before or after checkpoint save.
`CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is
triggered, and provides callbacks at the following points:
- before using the session
- before each call to `Saver.save()`
- after each call to `Saver.save()`
- at the end of session
To use a listener, implement a class and pass the listener to a
`CheckpointSaverHook`, as in this example:
```python
class ExampleCheckpointSaverListerner(CheckpointSaverListener):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def before_save(self, session, global_step_value):
print('About to write a checkpoint')
def after_save(self, session, global_step_value):
print('Done writing checkpoint.')
def end(self, session, global_step_value):
print('Done with the session.')
...
listener = ExampleCheckpointSaverListerner()
saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir, listeners=[listener])
with tf.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]):
...
```
A `CheckpointSaverListener` may simply take some action after every
checkpoint save. It is also possible for the listener to use its own schedule
to act less frequently, e.g. based on global_step_value. In this case,
implementors should implement the `end()` method to handle actions related to
the last checkpoint save. But the listener should not act twice if
`after_save()` already handled this last checkpoint save.
"""
def begin(self):
pass
def before_save(self, session, global_step_value):
pass
def after_save(self, session, global_step_value):
pass
def end(self, session, global_step_value):
pass
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances.
Used for callbacks that run immediately before or after this hook saves
the checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of saver or scaffold should be set.
"""
logging.info("Create CheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = SecondOrStepTimer(every_secs=save_secs,
every_steps=save_steps)
self._listeners = listeners or []
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def before_run(self, run_context): # pylint: disable=unused-argument
if self._timer.last_triggered_step() is None:
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step+1):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
self._save(run_context.session, global_step)
def end(self, session):
last_step = session.run(self._global_step_tensor)
if last_step != self._timer.last_triggered_step():
self._save(session, last_step)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step):
"""Saves the latest checkpoint."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
for l in self._listeners:
l.after_save(session, step)
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor.".
format(collection_key))
self._saver = savers[0]
return savers[0]
class StepCounterHook(session_run_hook.SessionRunHook):
"""Hook that counts steps per second."""
def __init__(self,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError(
"exactly one of every_n_steps and every_n_secs should be provided.")
self._timer = SecondOrStepTimer(every_steps=every_n_steps,
every_secs=every_n_secs)
self._summary_writer = summary_writer
self._output_dir = output_dir
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
self._summary_tag = training_util.get_global_step().op.name + "/sec"
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step+1):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
steps_per_sec = elapsed_steps / elapsed_time
if self._summary_writer is not None:
summary = Summary(value=[Summary.Value(
tag=self._summary_tag, simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, global_step)
logging.info("%s: %g", self._summary_tag, steps_per_sec)
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanTensorHook(session_run_hook.SessionRunHook):
"""Monitors the loss tensor and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes a `NanTensorHook`.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=None,
save_secs=None,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaverHook`.
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output
by TF summary methods like `tf.summary.scalar` or
`tf.summary.merge_all`. It can be passed in as one tensor; if more
than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set.
"""
if ((scaffold is None and summary_op is None) or
(scaffold is not None and summary_op is not None)):
raise ValueError(
"Exactly one of scaffold or summary_op must be provided.")
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
self._timer = SecondOrStepTimer(every_secs=save_secs,
every_steps=save_steps)
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._next_step = None
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = (
self._next_step is None or
self._timer.should_trigger_for_step(self._next_step))
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
stale_global_step = run_values.results["global_step"]
global_step = stale_global_step + 1
if self._next_step is None or self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
if self._next_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._timer.update_last_triggered_step(global_step)
if "summary" in run_values.results:
for summary in run_values.results["summary"]:
self._summary_writer.add_summary(summary, global_step)
self._next_step = global_step + 1
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _get_summary_op(self):
"""Fetches the summary op either from self._summary_op or self._scaffold.
Returns:
Returns a list of summary `Tensor`.
"""
summary_op = None
if self._summary_op is not None:
summary_op = self._summary_op
elif self._scaffold.summary_op is not None:
summary_op = self._scaffold.summary_op
if summary_op is None:
return None
if not isinstance(summary_op, list):
return [summary_op]
return summary_op
class GlobalStepWaiterHook(session_run_hook.SessionRunHook):
"""Delays execution until global step reaches `wait_until_step`.
This hook delays execution until global step reaches to `wait_until_step`. It
is used to gradually start workers in distributed settings. One example usage
would be setting `wait_until_step=int(K*log(task_id+1))` assuming that
task_id=0 is the chief.
"""
def __init__(self, wait_until_step):
"""Initializes a `GlobalStepWaiterHook`.
Args:
wait_until_step: an `int` shows until which global step should we wait.
"""
self._wait_until_step = wait_until_step
def begin(self):
self._worker_is_started = False
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use _GlobalStepWaiterHook.")
def before_run(self, run_context):
if self._worker_is_started:
return None
if self._wait_until_step <= 0:
self._worker_is_started = True
return None
logging.info("Waiting for global step %d before starting training.",
self._wait_until_step)
last_logged_step = 0
while True:
current_step = run_context.session.run(self._global_step_tensor)
if current_step >= self._wait_until_step:
self._worker_is_started = True
return None
if current_step - last_logged_step > 1000:
logging.info("Waiting for global step %d before starting training. "
"Current step is %d.", self._wait_until_step, current_step)
last_logged_step = current_step
time.sleep(0.5)
class FinalOpsHook(session_run_hook.SessionRunHook):
"""A hook which evaluates `Tensors` at the end of a session."""
def __init__(self, final_ops, final_ops_feed_dict=None):
"""Initializes `FinalOpHook` with ops to run at the end of the session.
Args:
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of
names to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when running
`final_ops_dict`.
"""
self._final_ops = final_ops
self._final_ops_feed_dict = final_ops_feed_dict
self._final_ops_values = None
@property
def final_ops_values(self):
return self._final_ops_values
def end(self, session):
if self._final_ops is not None:
self._final_ops_values = session.run(self._final_ops,
feed_dict=self._final_ops_feed_dict)
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs `feed_fn` and sets the `feed_dict` accordingly."""
def __init__(self, feed_fn):
"""Initializes a `FeedFnHook`.
Args:
feed_fn: function that takes no arguments and returns `dict` of `Tensor`
to feed.
"""
self.feed_fn = feed_fn
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
fetches=None, feed_dict=self.feed_fn())
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
d914c63daf2fad0c1cedf4f0d70cdcc832c0e729 | f37d490338236d3cf779918efa28599f04ebdbe4 | /Inteligencia Artificial 1/Tarea 13/Programacion Genetica.py | 439d9264fc9262d72f9b262f8c814be5fb1fc4a8 | [] | no_license | sanemiliano/Artificial-Intelligence-Projects | a513a77bf1be53ea561ae931efd4b4fe7a95991c | b9fb0d3fa92d95514518f49bf96a81229b7b1595 | refs/heads/master | 2020-03-27T22:25:59.823400 | 2018-09-07T20:05:55 | 2018-09-07T20:05:55 | 147,230,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,560 | py | import numpy as np
import copy as cp
import random as rd
class Node:
def __init__(self):
self.fitness = 0
self.Yc = []
def calc_fitness(self, Y):
self.fitness = np.linalg.norm((self.Yc - Y))
def print(self):
pass
class Const(Node):
def __init__(self, x, y):
Node.__init__(self)
self.value = rd.randint(-10, 10)
self.Yc = np.ones((len(x)), float)*self.value
self.calc_fitness(y)
def print(self):
return str(self.value)
class Var(Node):
def __init__(self, x, y):
Node.__init__(self)
self.Yc = np.copy(x)
self.calc_fitness(y)
def print(self):
return 'x'
class Function(Node):
def __init__(self, X, Y, symbol, args):
Node.__init__(self)
self.symbol = symbol
self.args = args
if self.symbol == ' + ':
self.Yc = self.args[0].Yc + self.args[1].Yc
elif self.symbol == ' - ':
self.Yc = self.args[0].Yc - self.args[1].Yc
elif self.symbol == ' * ':
self.Yc = self.args[0].Yc * self.args[1].Yc
# elif self.symbol == '/':
# self.Yc = self.args[0].Yc / self.args[1].Yc
#elif self.symbol == ' / ':
# if(div == 0 ):
# self.Yc = 0
# else:
# self.Yc = self.args[0].Yc / div
elif self.symbol == 'sin':
self.Yc = np.sin(self.args[0].Yc)
elif self.symbol == 'cos':
self.Yc = np.cos(self.args[0].Yc)
# elif self.symbol == 'log':
# self.Yc = numpy.log(self.args[0].Yc)
#elif self.symbol == 'log':
# self.Yc = np.log(self.args[0].Yc)
elif self.symbol == 'exp':
self.Yc = np.exp(self.args[0].Yc)
elif self.symbol == 'abs':
self.Yc = np.abs(self.args[0].Yc)
self.calc_fitness(Y)
def print(self):
if len(self.args) is 1:
return self.symbol + ' ( '+ self.args[0].print()+' ) '
else:
return ' ( '+self.args[0].print() + self.symbol + self.args[1].print()+' ) '
#Código principal ***********************************
class GeneticProgramming:
def __init__(self,X,Y):
self.Pob = []
self.symbols = [' + ', ' - ', ' * ', 'sin', 'cos', 'exp', 'abs']
self.count = 0
self.elite = None
for i in range(0, 100):
if rd.random() > 0.5:
self.Pob.append(Var(X, Y))
else:
self.Pob.append(Const(X, Y))
self.lastChanged = self.Pob[-1]
self.lastChanged.calc_fitness(Y)
if (self.elite == None) or (self.lastChanged.fitness < self.elite.fitness):
self.elite = cp.copy(self.lastChanged)
def fit(self,X,Y):
while self.count < 100000:
i = rd.randint(0, len(self.symbols) - 1)
args = []
if i < 4:
args.append(self.Pob[rd.randint(0, len(self.Pob) - 1)])
args.append(self.Pob[rd.randint(0, len(self.Pob) - 1)])
symbol = self.symbols[i]
aux = Function(X, Y, symbol, args)
if aux.fitness < self.elite.fitness:
self.elite = cp.copy(aux)
i = rd.randint(0, len(self.Pob) - 1)
if aux.fitness < self.Pob[i].fitness:
self.Pob[i] = aux
self.count += 1
def print(self):
print(self.elite.print())
X = np.arange(-10,10,1)
Y = X**2 - 4
gp = GeneticProgramming(X,Y)
gp.fit(X,Y)
gp.print() | [
"zeus@polispay.org"
] | zeus@polispay.org |
9c45927602a0c2e02348329e4ac8932ab91a4026 | da82e7bff86aeabcdf91cfd0ca3149e5752b780f | /v2mac.py | d846ca44346f16517dcf80e9021e9d8e52b60e08 | [] | no_license | tymancjo/video_process | 92f95b52c420753d5fe86080421eff584e22c9ce | 4775d4c5a420ef9b45127a35836b9187a7000480 | refs/heads/main | 2023-05-28T09:23:11.043293 | 2021-06-07T17:45:27 | 2021-06-07T17:45:27 | 371,156,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,067 | py | import cv2
import copy
import sys
import time
import csv
import numpy as np
import math
def play_videoFile(filePath, data, vid_sync, data_sync, sample_shift=0):
cap = cv2.VideoCapture(filePath)
if not cap.isOpened():
print(f"Can't open file {filePath}")
return
v_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
v_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
v_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
v_fps = cap.get(cv2.CAP_PROP_FPS)
print(f"Got video: {v_width}x{v_height} px, len:{v_length}frames, fps:{v_fps}")
cv2.namedWindow('Video output window',cv2.WINDOW_AUTOSIZE)
# making a matrix buffer for the video
vid_buffer = []
# text related variables
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 0.7
color = (255, 255, 255)
thickness = 1
buffer_mem = 500 #[MB]
buffer_size = 100 #[frames]
frm = 0
total_frame = 0
video_frame = 0
prev_frm = -1
frm_step = 0
mark = ["[-]","[>]","[<]"]
t_start = 0
t_end = 1
step = False
first = True
# Figuring out the max possible play frame of video or data
n_samples, n_cols = data.shape
n_samples -= 1 # to skip the added amplitude
# Need to analyze here the data sets and figure out the first video and data frame that is possible to display. And as well the last one.
sample_shift = max(0, sample_shift)
start_data_sample = data_sync - vid_sync
if start_data_sample < 0:
vid_start = max(0, abs(start_data_sample) - 1) # the -1 is to make a sync perfect
start_data_sample = 0
for _ in range(vid_start):
_, _ = cap.read()
video_frame += 1
start_data_sample += sample_shift
if sample_shift > 0:
for _ in range(sample_shift):
_, _ = cap.read()
video_frame += 1
end_frame = min(n_samples-start_data_sample-1, v_length - video_frame)
print(f"End frame: {end_frame}")
# Preparing space for the full plot
plot_height = 200
plot_width = v_width
plot_frame = np.ones((plot_height * n_cols,plot_width,3),np.uint8)*25
# plot_data = (end_frame - start_data_sample)
plot_data = end_frame
data_step = 1
data_pixel = plot_width - 20
pixel_step = int(data_pixel / plot_data)
if pixel_step < 1:
# we need to make data step bigger
data_step = int(math.ceil(plot_data / data_pixel))
pixel_step = 1
data_pixel = int((plot_data / data_step) * pixel_step)
pixel_step_f = data_pixel / plot_data
plot_x0 = int((plot_width - data_pixel) / 2)
print(f"Plot spec. pix stp:{pixel_step}, data stp:{data_step}, plot dta:{plot_data}")
# Plotting the full plots in the created frame
for plot in range(n_cols):
data_point = start_data_sample
px = plot_x0
plot_scale = -10 + plot_height / 2
y0 = int(plot_height / 2) + plot * plot_height
cv2.line(plot_frame, (px,y0),(v_width,y0),(100,100,100), 1)
cv2.line(plot_frame, (px,y0 - plot_height),(px,y0 + plot_height),(100,100,100), 1)
for _ in range(int(plot_data/data_step)):
y1 = int(y0 - data[data_point,plot] * plot_scale)
y2 = int(y0 - data[data_point+data_step,plot] * plot_scale)
x1 = int(px)
x2 = int(px + pixel_step_f * data_step)
cv2.line(plot_frame, (x1,y1), (x2,y2), (255,255,0), 1)
data_point += data_step
px += pixel_step_f * data_step
while True:
if t_end != t_start:
fps = int(1 / (t_end - t_start))
else:
fps = 1
t_start = time.time()
if (frm_step > 0 and frm == len(vid_buffer) and total_frame < end_frame) or first:
first = False
ret_val, frame = cap.read()
vid_buffer.append(frame)
if total_frame == 0:
frame_mem_size = sys.getsizeof(vid_buffer[-1])
print(f"Frame memory size: {frame_mem_size} Bytes")
buffer_size = int( buffer_mem / (frame_mem_size / (1024*1024)) )
print(f"Buffer set to {buffer_size} frames")
total_frame += frm_step
buffer_len = len(vid_buffer)
if buffer_len > buffer_size:
del vid_buffer[0]
buffer_len = len(vid_buffer)
if frm >= buffer_len:
frm = buffer_len-1
display_frame = copy.copy(vid_buffer[frm])
plot_frame_full = copy.copy(plot_frame)
# plot_frame = np.zeros((300,v_width,3),np.uint8)
# the progress bar stuff
abs_frm = total_frame+frm-buffer_len+1
# progress bar
cv2.rectangle(display_frame, (11,v_height - 19), (int(11 + (v_width-22)*abs_frm/plot_data), v_height-11), (125,125,125), -1)
# buffer bar
cv2.rectangle(display_frame, (int(11 + (v_width-22)*(total_frame - buffer_len)/plot_data),v_height - 24), (int(11 + (v_width-22)*total_frame/plot_data), v_height-20), (0,0,255), -1)
# progress bar frame
cv2.rectangle(display_frame, (10,v_height - 25), (v_width-10, v_height-10), (255,255,255), 1)
# Using cv2.putText() method
txt_string = f"{mark[frm_step]} AbsFrame: {abs_frm} BufferFrm: {frm} HeadPos: {total_frame} FPS: {fps}"
# Placing play head on the plot window
play_head_x = round(plot_x0 + abs_frm * pixel_step_f)
cv2.line(plot_frame_full, (play_head_x,0), (play_head_x,plot_height*n_cols), (0,0,255), 1)
# Reading and plotting the current values
for plot in range(n_cols):
this_value = data[abs_frm + start_data_sample,plot] * data[-1,plot]
value_txt = f" : {this_value}"
cv2.putText(plot_frame_full,value_txt, (plot_x0,20+plot*plot_height), font,
0.5, color, thickness, cv2.LINE_AA)
prev_frm = frm
frm += frm_step
if step:
frm_step = 0
step = False
if frm < 0:
frm = 0
frm_step = 0
image = cv2.putText(display_frame, txt_string, org, font,
fontScale, color, thickness, cv2.LINE_AA)
# Stacking images arrays
# display = np.vstack((display_frame, plot_frame))
cv2.imshow('Video Frame', display_frame)
cv2.imshow('Plot Frame', plot_frame_full)
# cv2.imshow('Stacked', display)
the_pressed_key = cv2.waitKey(1)
if the_pressed_key == 27:
break # esc to quit
elif the_pressed_key == ord('j'):
frm_step = -1
elif the_pressed_key == ord('k'):
frm_step = 0
elif the_pressed_key == ord('l'):
frm_step = 1
elif the_pressed_key == ord('J'):
frm_step = -1
step = True
elif the_pressed_key == ord('L'):
frm_step = 1
step = True
t_end = time.time()
cv2.destroyAllWindows()
def get_csv_data(csv_file, skip=8, delimiter=';'):
with open(csv_file, 'r', newline = '') as file:
reader = csv.reader(file, delimiter = delimiter)
row_cnt = 0
data_set = []
for row in reader:
row_cnt += 1
data_row = []
if row_cnt > skip:
# processing the data row.
if len(row) > 0:
for d in row:
temp_data = d.replace(',','.')
try:
temp_data = float(temp_data)
except Exception as e:
temp_data = 0
data_row.append((temp_data))
data_set.append(data_row)
else:
print("skipped: ",row)
data_set = np.array(data_set)
return data_set
def normalize(data_array):
in_samples, n_cols = data_array.shape
amplitudes = []
for col in range(n_cols):
amplitude = max(abs(data_array[:,col].max()),abs(data_array[:,col].min()))
print(f"Column {col}, Amplitude {amplitude}")
if amplitude != 0:
data_array[:,col] /= amplitude
amplitudes.append(amplitude)
# np.append(data_array[-1,col], amplitude)
else:
# np.append(data_array[:,col], 0)
amplitudes.append(amplitude)
amplitudes = np.array(amplitudes)
data_array = np.vstack((data_array, amplitudes))
return data_array
def prepare_to_show(data, plots):
"""
This one is about to prepare the plots to be displayed
The idea is to take the input data, and prepare the final
data array to be used in plots.
Input:
data - np array of all the data
plots - list of tuples to create the plots
"""
out_data = []
x_range = len(data[:,0])
for plot in plots:
this_plot = np.zeros(x_range)
try:
for y in plot:
factor = y/abs(y)
y = abs(y)
this_plot += factor * data[:,y]
except:
y = plot
factor = y/abs(y)
y = abs(y)
this_plot += factor * data[:,y]
out_data.append(this_plot)
out_data = np.array(out_data)
return np.transpose(out_data)
def main():
data = get_csv_data('/Users/tymancjo/LocalGit/video/sc_data_example/11435.txt')
# data = get_csv_data('c:\\x\\11435.txt')
temp_data = normalize(copy.copy(data))
sync_data_index = np.argmax(temp_data[:,-2] == 1)
print(f"Sync data index: {sync_data_index}")
del(temp_data)
data = prepare_to_show(data,[(2,-3),-4,5])
data = normalize(data)
print(f"Data max: {data[-1,0]}")
video_file = '/Users/tymancjo/LocalGit/video/sc_data_example/11435.mp4'
# video_file = 'c:\\x\\11435.mp4'
play_videoFile(video_file,data,500,sync_data_index, sample_shift=770+220)
if __name__ == '__main__':
main() | [
"tomasz.tomanek@gmail.com"
] | tomasz.tomanek@gmail.com |
417e4e7fceb9fbbc14196f5a7e374f689dcfecff | 8ebb6c7abc58d1eeb8c7eaa6d6e27b625fc04696 | /2020/python/day20-1.py | fa2e81a2c45364bcc81279d0b2bfb8f1a4341cc0 | [] | no_license | davidcbc/aoc | 3d26f24314e0072315e5047f17d78c4017950a24 | 58bc1b020317ff66e1d46ad6c3f89c2535684ca2 | refs/heads/master | 2023-02-05T18:56:10.527499 | 2020-12-23T20:25:06 | 2020-12-23T20:25:06 | 317,977,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,462 | py | import copy
import math
f = open(r"2020/python/day20-input.txt", "r")
input = f.read().split("\n")
BASE = 2
UP = 2
RIGHT = 3
DOWN = 4
LEFT = 5
def checkSeamonster(grid, x, y):
seamonster = [list(" # "),
list("# ## ## ###"),
list(" # # # # # # ")]
if (len(grid[y]) < x + len(seamonster[0]) or
len(grid) < y + 3):
return False
for i in range(len(seamonster)):
for j in range(len(seamonster[i])):
if seamonster[i][j] == " ":
continue
if grid[y+i][x+j] == ".":
return False
for i in range(len(seamonster)):
for j in range(len(seamonster[i])):
if seamonster[i][j] == " ":
continue
grid[y+i][x+j] = 'O'
return True
def rotateSides(sides):
sides[LEFT-BASE] = rotateInt(sides[LEFT-BASE])
sides[RIGHT-BASE] = rotateInt(sides[RIGHT-BASE])
return [sides[LEFT-BASE], sides[UP-BASE], sides[RIGHT-BASE], sides[DOWN-BASE]]
def rotateInt(n):
return int(format(n, "#012b")[2:][::-1], 2)
def rotateGrid(original):
grid = zip(*original[::-1])
new = []
for l in grid:
new.append(list(l))
return new
def flipGrid(original):
new = []
for y in range(len(original)):
new.append([])
for x in range(len(original[y])-1, -1, -1):
new[y].append(original[y][x])
return new
def printTile(tile):
for line in tile[1]:
print(line)
def lineToNumber(line):
number = 0
for c in line:
number = number << 1
if c == '#':
number += 1
return number
def findCorrectPermutation(tiles, x, y, permutation, used, dimensions):
for row in permutation:
line = ""
for tile in row:
line += str(tile[0]) + " "
if x == dimensions and y == dimensions - 1:
return permutation
if x == dimensions:
x = 0
y += 1
for tile in tiles:
if (tile[0] in used or
(x != 0 and permutation[y][x-1][RIGHT] != tile[LEFT]) or
(y != 0 and permutation[y-1][x][DOWN] != tile[UP])):
continue
if x == 0:
permutation.append([])
permutation[-1].append(tile)
used.add(tile[0])
fullPermutation = findCorrectPermutation(tiles, x+1, y, permutation, used, dimensions)
if fullPermutation:
return fullPermutation
used.remove(tile[0])
permutation[-1].pop()
if x == 0:
permutation.pop()
return None
tiles = []
grid = []
tile = ""
i = 0
while i < len(input):
if input[i][:4] == "Tile":
tile = int(input[i].split()[1][:-1])
elif input[i] == "":
u = lineToNumber(grid[0])
d = lineToNumber(grid[len(grid)-1])
l = lineToNumber(row[0] for row in grid)
r = lineToNumber(row[len(row)-1] for row in grid)
for j in range(4):
tiles.append((tile,
copy.deepcopy(grid),
u,r,d,l))
u,r,d,l = rotateSides([u,r,d,l])
grid = rotateGrid(grid)
u = rotateInt(u)
t = r
r = l
l = t
d = rotateInt(d)
grid = flipGrid(grid)
for j in range(4):
tiles.append((tile,
copy.deepcopy(grid),
u,r,d,l))
u,r,d,l = rotateSides([u,r,d,l])
grid = rotateGrid(grid)
tile = ""
grid = []
else:
grid.append([c for c in input[i]])
i += 1
dim = int(math.sqrt(len(tiles)/8))
p = findCorrectPermutation(tiles, 0,0, [], set(), dim)
print(p[0][0][0] * p[0][dim-1][0] * p[dim-1][0][0] * p[dim-1][dim-1][0])
newGrid = []
for i in range(len(p)):
for k in range(1,len(p[i][0][1])-1):
line = ""
for j in range(len(p[i])):
line += "".join(p[i][j][1][k][1:-1])
newGrid.append(list(line))
for i in range(4):
for y in range(len(newGrid)):
for x in range(len(newGrid[y])):
checkSeamonster(newGrid, x, y)
newGrid = rotateGrid(newGrid)
newGrid = flipGrid(newGrid)
for i in range(4):
for y in range(len(newGrid)):
for x in range(len(newGrid[y])):
checkSeamonster(newGrid, x, y)
newGrid = rotateGrid(newGrid)
count = 0
for l in newGrid:
count += "".join(l).count("#")
print(count)
| [
"davidcbc@gmail.com"
] | davidcbc@gmail.com |
4d2056fec89cacb509ef5c0f41cce953a3b61ea2 | 6e2caad49f10db5ef560549618c884d2dd8f229c | /bili/osPro.py | 9934d02cb624c8d5db7b6b5b1196a5e7cef5475e | [] | no_license | iwen1992/python-learning | a6d9e022f8e7f7fadbc23390d13566881f6df689 | 8f306b86b8e733826a0d5d0e74f04a72891e922a | refs/heads/master | 2023-03-14T01:19:10.857037 | 2021-03-04T09:44:43 | 2021-03-04T09:44:43 | 330,402,840 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | import os
import time
#1、os.name系统的名字window-nt
#2、environ环境变量
#3、sep 分隔符 pathsep 路径分隔符 linesep 换行分割分
print(os.linesep)
#二、文件和目录操作
os.mkdir('stddemo')#当前目录下创建stddemo
os.rmdir('stddemo')#删除目录
print(os.getcwd())#打印当前目录
#三、path模块
#os.path.isabs(path)判断path是否为绝对路径
#os.path.getsize(file) 拿到文件的大小
file = os.getcwd()+'\osPro.py'
print(os.path.isabs(file) )
size = os.path.getsize(file)
print(size) | [
"3066420900@qq.com"
] | 3066420900@qq.com |
8c5e78c2e0950d63c09a8ccf701ea6c4e2067240 | b83ee9db2832b07faa2bd9afae043691922412c3 | /Knapsack/dynamic_knapsack.py | a2e0c1e3a3cc828fe3e8a5be4f04ff03bb24ca26 | [] | no_license | Prudkovskiy/Python | 68996953471292a1da3495adf16ff3b9eadec4b0 | adad850e3a5efa0e78f4329388c1de98fd8ee610 | refs/heads/master | 2021-05-13T18:40:55.942928 | 2018-05-18T07:42:30 | 2018-05-18T07:42:30 | 116,874,806 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,071 | py | """
задача о ранце методом динамического программирования
"""
from sys import stdin
class knapsack:
def __init__(self, max_weight=0):
self.W = max_weight
self.weight = [0]
self.price = [0]
# массив со всеми предметами, которые в конечном итоге вошли в рюкзак
self.ans = []
"""
создаем матрицу, каждый элемент которой это max_price(i,w) -
максимальная цена при выборе из первых i предметов, при этом
суммарный вес рюкзака не превышает w <= W
"""
self.max_price = []
self.weight_gcd = 1 # НОД всех весов предметов для сокращения размера матрицы (масштабирование)
# # заполняем нулями первую строчку (вместимости 0..W), когда ни один предмет в рюкзак не кладем
# self.max_price.append([0 for i in range(self.W+1)])
def __iadd__(self, other):
self.weight.append(other[0])
self.price.append(other[1])
# self.max_price.append([0 for i in range(self.W+1)])
return self
def gcd(self, a, b):
while b:
a, b = b, a % b
return abs(a)
def find_gcd(self):
if len(self.weight)-1 >= 2:
# НОД от весов первого и второго эл-ов
self.weight_gcd = self.gcd(self.weight[1], self.weight[2])
else:
return
for i in range(2, len(self.weight)-1):
self.weight_gcd = self.gcd(self.weight_gcd, self.weight[i])
def dyn_knapsack(self):
# создаем матрицу с учетом масштабирования и заполняем ее 0
self.find_gcd()
cols = self.W // self.weight_gcd + 1 # вместимость 0..W (div НОД)
rows = len(self.weight) # i первых предметов от 0 до k
for j in range(rows):
self.weight[j] = self.weight[j] // self.weight_gcd
self.max_price.append([0 for i in range(cols)])
for i in range(rows):
for j in range(cols):
# print(self.weight[i])
if self.weight[i] <= j:
# print("вместился")
self.max_price[i][j] = max(self.max_price[i-1][j],
self.max_price[i-1][j-self.weight[i]] + self.price[i])
else:
# print("не вместился")
self.max_price[i][j] = self.max_price[i-1][j]
# print(self.max_price)
self.find_answer(rows - 1, cols - 1)
# self.knapsack_price(self.ans)
# print(list(reversed(self.ans)))
return [self.max_price[rows-1][cols-1], self.knapsack_weight(self.ans), list(reversed(self.ans))]
def knapsack_weight(self, subjects):
weight = 0
for sub in subjects:
weight += self.weight[sub] * self.weight_gcd
return weight
def find_answer(self, k, w):
if self.max_price[k][w] == 0:
return
if self.max_price[k][w] == self.max_price[k-1][w]:
self.find_answer(k - 1, w)
else:
self.ans.append(k)
self.find_answer(k - 1, w - self.weight[k])
def isint(s):
try:
int(s)
return True
except ValueError:
return False
s = stdin.readline()
pack = knapsack()
while(s):
data = s.split()
if len(data) == 1 and isint(data[0]):
pack = knapsack(int(data[0]))
elif len(data) == 2:
pack += [int(data[0]), int(data[1])]
s = stdin.readline()
pack_weight, pack_price, subjects = pack.dyn_knapsack()
fist_line = str(pack_price) + " " + str(pack_weight)
print(fist_line)
for i in subjects:
print(i) | [
"nickolay.prud@gmail.com"
] | nickolay.prud@gmail.com |
b9d54800be181bc7335ae1361aa6a0240c3e5a88 | 639d6fb7f3be808dddf226d2feb520960a8ee656 | /lstm_helper_kit/lstm_keras_model_class.py | 67a020af72bb480d6acbd632180ffe267c522be8 | [
"MIT"
] | permissive | Treesbark/lstm_helper_kit | 5d2173412c0035dda1772150f579b1b412553f38 | ff95585557985f0aeba27d93159c0cd11e44ad00 | refs/heads/master | 2023-01-07T23:53:55.501840 | 2019-09-28T10:50:46 | 2019-09-28T10:50:46 | 170,912,718 | 0 | 0 | MIT | 2022-12-26T20:47:06 | 2019-02-15T18:49:47 | Python | UTF-8 | Python | false | false | 11,464 | py | # -*- coding: utf-8 -*-
"""Main module."""
# Importing the libraries
import matplotlib.pyplot as plt
import os
import math
import numpy as np
import datetime as dt
from numpy import newaxis
from sklearn.preprocessing import MinMaxScaler
from keras.layers import Dense, Activation, Dropout, LSTM
from keras.models import Sequential, load_model, model_from_json
from keras.callbacks import EarlyStopping, ModelCheckpoint
from pathlib import Path
plt.style.use('fivethirtyeight')
class LSTMKerasModel():
"""A class for abstracting out several functions in a Keras Model"""
def __init__(self):
self.model = Sequential()
self.data_scaler = None
self.training_window_size = None
def load_keras_model(self, complete_model_path):
"""
Method for loading a Keras model
Parameters
----------
complete_model_path : str
the entire path for where the model is saved
Returns
-------
Keras model
a loaded Keras model
"""
from pathlib import Path
# Checks to make sure a '.json' is appended to the end of the string
if complete_model_path.endswith('.json'):
complete_model_path = complete_model_path + '.json'
# Check to see if the model exists
file_checker = Path(complete_model_path)
if file_checker.is_file() == False:
print("Model not found")
else:
# load json and create model
json_file = open(complete_model_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded Keras model from " + complete_model_path)
self.model = loaded_model
def save_keras_model_to_file(self, file_save_path, force_overwrite=False):
"""
Method for saving a Keras model that checks to ensure the file does not already exist
Parameters
----------
file_save_path : str
the entire path for where the model should be saved
force_overwrite : bool
boolean to see if the whole string should be overwritten
Returns
-------
string
a value in a string
"""
from pathlib import Path
# Checks to make sure a '.json' is appended to the end of the string
if file_save_path.endswith('.json'):
file_save_path = file_save_path + '.json'
# Check to see if file is in path to avoid overwriting
file_checker = Path(file_save_path)
if file_checker.is_file() or force_overwrite == False:
print("File already exists")
else:
# Save the model to json
model_json = self.model.to_json()
with open(file_save_path, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
self.model.save_weights("model.h5")
print("Saved model " + str(file_save_path))
def save_keras_model_to_class(self, model):
"""
Saves a Keras model to the LSTM model object for easy of use and abstraction of functions
Parameters
----------
model : Keras Model
the model that was trained and is to be wrapped in this abstract class
"""
self.model = model
def create_train_and_test_data(self, pandas_series, train_test_split_percentage=0.8):
"""
Method that creates the train and test data using the provided pandas series
Parameters
----------
pandas_series : pandas series
the series from a pandas dataframe that the LSTM will be built off of
train_test_split_percentage : float
the percent, as a decimal, that will be used to split the input data to
train and test the model
Returns
-------
training_set : numpy array
the first item returned is the correctly formatted training set
test_set : numpy array
the second item returned is correctly formatted test set
"""
# Takes the pandas series and coverts it into numpy array
my_array = np.array(pandas_series)
# Reshapes the numpy arrray so element in the array its own array
my_array = np.reshape(my_array, (len(my_array), 1))
# Splits the data as set by the user
train_test_split_point = int((len(my_array) * train_test_split_percentage))
training_set = my_array[:train_test_split_point]
test_set = my_array[train_test_split_point:]
return training_set, test_set
def scale_data(self, training_dataset):
"""
Method that creates and saves a scaler based on the passed training data and
returns a scaled training set
Parameters
----------
training data : numpy array of shape (1,:)
the training data
Returns
-------
scaler :
the scaler for the provided training data that is also saved internally
LSTM class
"""
# Create a scaleer
data_scaler = MinMaxScaler(feature_range=(0, 1))
self.data_scaler = data_scaler
# Scale the data
scaled_training_set = data_scaler.fit_transform(training_dataset)
return scaled_training_set
def format_training_data_for_LSTM(self, scaled_training_set, training_window_size=365, prediction_output_size=1):
"""
Method that prepares the data for ingestion into an LSTM for training
purposes
Parameters
----------
scaled_traing_set : numpy array of shape (1,:)
the scaled (0,1) training set
input_window_size : int
the size of the training window
prediction_output_size : int
the size of the prediction output length
Returns
-------
X_training_data :
the training values that will be fed to the LSTM that it will use to
predict the next value
y_training_data :
the expected values the LSTM will be using to correct its internal weights
"""
# Update the training window size
self.training_window_size = training_window_size
# Instantiate the lists
X_training_data = []
y_training_data = []
# Create the training inputs and expected outouts for back propogation using the
# user specified window size and the length of the training set
for i in range(training_window_size, len(scaled_training_set)):
X_training_data.append(scaled_training_set[i - training_window_size:i, 0])
y_training_data.append(scaled_training_set[i:i+prediction_output_size, 0])
# Convert the values into a numpy array
X_training_data = np.array(X_training_data)
y_training_data = np.array(y_training_data)
# Reshape the training data for efficient processing in the LSTM
X_training_data = np.reshape(X_training_data, (X_training_data.shape[0], X_training_data.shape[1], 1))
return X_training_data, y_training_data
def format_prediction_data_for_LSTM(self, pandas_series, test_set):
"""
Method that prepares the data for ingestion into an LSTM for prediction
Parameters
----------
pandas_series : pandas series
the series from a pandas dataframe that the LSTM will be built off of
test_set : numpy_array
the formatted test set that was used for training the LSTM
Returns
-------
lstm_prediction_inputs : numpy array
the data that is to be fed into the lstm as predictors
"""
# Create a new array based upon the input, test set, and window sizes
lstm_prediction_inputs = np.array(
pandas_series[len(pandas_series) - len(test_set) - self.training_window_size:])
# Reshape the numpy array
lstm_prediction_inputs = lstm_prediction_inputs.reshape(-1, 1)
# Rescale the data
lstm_prediction_inputs = self.data_scaler.transform(lstm_prediction_inputs)
return lstm_prediction_inputs
def predict_point_by_point(self, input_data, window_size, test_data_length, data_scaler):
"""
Predicts the future values point by point with known data
Parameters
----------
input_data : numpy array
the data on which to build prediction - must be as long or longer than window size
window_size : int
the size of the window the model expects to ingest into the LSTM
test_data_length : int
the length of test data on which to predict
Returns
-------
numpy array
the future values that were predicted point-by-point
"""
X_test = []
# Predict the future values step-by-step
for i in range(window_size, test_data_length):
X_test.append(input_data[i - window_size:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
final_output_array = self.model.predict(X_test)
final_output_array = data_scaler.inverse_transform(final_output_array)
return final_output_array
def predict_future_sequence_for_given_time_length(self, input_data, time_to_predict_into_future):
"""
Predicts future sequences of values based upon an initial array array passed to the model
Parameters
----------
input_data : numpy array
the data on which to build prediction - must be as long or longer than window size
time_to_predict_into_future : int
the length of time the LSTM will predict into the future the outputs
Returns
-------
numpy array
the future predicted values
"""
# Instantiate the array that will be used to predict values
predictor_array = input_data[0:self.training_window_size, 0]
# Instantiate the final output array
final_output_array = []
# Predict future value for time perido in future provided
for i in range(time_to_predict_into_future):
# Save the array that is about to be used to predict the outcome but slice the first value
next_step_array = predictor_array[1:]
# Prepare the array to be fed to the LSTM
predictor_array = np.array([predictor_array])
predictor_array = np.reshape(predictor_array,
(predictor_array.shape[0], predictor_array.shape[1], 1))
predicted_output = self.model.predict(predictor_array)
# Append the predicted output to the next array to be used in the NN
predictor_array = np.append(next_step_array, predicted_output)
# Throw the predicted output onto the end of the output array
final_output_array = np.append(final_output_array, predicted_output)
final_output_array = self.data_scaler.inverse_transform([final_output_array])
final_output_array = final_output_array[0][:, newaxis]
return final_output_array | [
"shea.hartley@gmail.com"
] | shea.hartley@gmail.com |
b869872bf72cdc1084a31e866ffa6e18ef429fdc | 1fb82cf0b1fa219ec98ebd58ae53e2edc6dafb1a | /build/exercise2/catkin_generated/pkg.develspace.context.pc.py | 1e42272eda056104f01933099b4654afb9c1e315 | [] | no_license | roberttovornik/FRI-RINS_TurtleBot | ffb877369eb595e1a9f07571c786b729c3526d82 | f18ec20c00f390d20de5b01104ecd7bf83810d2c | refs/heads/master | 2021-01-19T22:49:05.279753 | 2017-05-02T16:03:22 | 2017-05-02T16:03:22 | 88,873,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "exercise2"
PROJECT_SPACE_DIR = "/home/robert/ROS/devel"
PROJECT_VERSION = "0.0.0"
| [
"robert.tovornik@gmail.com"
] | robert.tovornik@gmail.com |
ce5e6f8927e97a6ea173f1e91c3d7ee62877e31a | d30785d3b36051dddfbf0a94c41b0f1cba33b146 | /first_django_app/manage.py | 8cdb6c32d05cd6c541ca5d7eef6e1899646701dd | [] | no_license | jcchouinard/Django-tutorial | f227f5de4b5eeceb4b5fc3f628913ec3a1615c41 | ae85e14c359a3babedf890625faff31249588ff1 | refs/heads/main | 2023-02-03T17:31:53.507380 | 2020-12-23T05:13:27 | 2020-12-23T05:13:27 | 323,791,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'first_django_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"jcchouinard.mkt@gmail.com"
] | jcchouinard.mkt@gmail.com |
4f991df0912048ead0e186019f6a15facdca9fcc | b490698632d8a92a9d89072c033458c4d8316fd8 | /mysite/urls.py | 4421deb0fc1c511eb3e56d4069302a5584cfa285 | [] | no_license | kuldeepbista/travelblogs | dec6b764400db88d6c1823969a911b5078ac91a4 | c972caa8d312ba454eb2577cae33f33660cf8ccb | refs/heads/master | 2020-05-20T19:23:14.716956 | 2019-05-09T07:09:57 | 2019-05-09T07:09:57 | 185,720,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('tinymce/', include('tinymce.urls')),
path('', include('main.urls')),
path('admin/', admin.site.urls),
]
| [
"kuldeep@Kuldeeps-MacBook-Pro.local"
] | kuldeep@Kuldeeps-MacBook-Pro.local |
be4b3bc4b91e3948892eb0dfc5bd4d4bfbd7a81a | d3029a1d8a4892b04f21e7de6cfe578808b99efb | /test.py | 0944203c228a25fc75e8328304c941b6f2da02f5 | [] | no_license | johnpolakowski/File_Downloader | fae7c6240bbc9da26ef58ccd96885ec0b66b867c | a8a4b13bdbac7ccebfc5f4ef80aabb6bcc62657f | refs/heads/master | 2023-06-15T18:09:52.033795 | 2021-07-16T06:47:19 | 2021-07-16T06:47:19 | 386,534,484 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,465 | py | WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.ID, "recaptcha-anchor")))
ele = driver.find_element(By.ID, "recaptcha-anchor")
#ActionChains(driver).move_to_element(ele).perform()
ele.click()
driver.switch_to.default_content()
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, "iframe[title=\"recaptcha challenge\"]")))
iframe = driver.find_element(By.CSS_SELECTOR, "iframe[title=\"recaptcha challenge\"]")
driver.switch_to.frame(iframe)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.ID, "rc-imageselect")))
if ATTACK_IMAGES:
image_recaptcha(driver)
elif ATTACK_AUDIO:
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.ID, "recaptcha-audio-button")))
time.sleep(1)
driver.find_element(By.ID, "recaptcha-audio-button").click()
guess_again = True
while guess_again:
init("audio")
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.ID, "audio-source")))
# Parse table details offline
body = driver.find_element(By.CSS_SELECTOR, "body").get_attribute('innerHTML').encode("utf8")
soup = BeautifulSoup(body, 'html.parser')
link = soup.findAll("a", {"class": "rc-audiochallenge-tdownload-link"})[0]
urllib.urlretrieve(link["href"], TASK_PATH + "/" + TASK + ".mp3")
guess_str = get_numbers(TASK_PATH + "/" + TASK, TASK_PATH + "/")
type_style(driver, "audio-response", guess_str)
# results.append(guess_str)
wait_between(0.5, 3)
driver.find_element(By.ID, "recaptcha-verify-button").click()
wait_between(1, 2.5)
try:
logging.debug("Checking if Google wants us to solve more...")
driver.switch_to.default_content()
driver.switch_to.frame(iframeSwitch)
checkmark_pos = driver.find_element(By.CLASS_NAME, "recaptcha-checkbox-checkmark").get_attribute("style")
guess_again = not (checkmark_pos == "background-position: 0 -600px")
driver.switch_to.default_content()
iframe = driver.find_element(By.CSS_SELECTOR, "iframe[title=\"recaptcha challenge\"]")
driver.switch_to.frame(iframe)
except Exception as e:
print e
guess_again = False | [
"poppy_ski@hotmail.com"
] | poppy_ski@hotmail.com |
0b18446ffc3f619e218bbf4ee27d01ef5704d3cf | 93f650962489a895749be7e385d5a55429662148 | /manage.py | 7c4bc54db099fd70dc33472fd34daa9dcaa3c358 | [] | no_license | Jekrimo/django-user-dashboard | 02029df2825f271cae7114ccfd1d2cbb1772de5e | 0fcefad2b8ef81952270f2da5729e2db0cbd865c | refs/heads/master | 2020-02-26T14:49:15.270862 | 2016-08-19T18:51:56 | 2016-08-19T18:51:56 | 65,822,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "thedashboard.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"jekrimo@gmail.com"
] | jekrimo@gmail.com |
9d9c54c167c1d1608999cca9cd3f8deb88c08f87 | f7a718425de1447836b547f831a120937f1fcf40 | /plumbum/util/datefmt.py | 86b8b0bd0033ccb5a4c68ba1d8edc9352eb63e63 | [
"BSD-3-Clause"
] | permissive | coyotevz/plumbum-old-1 | ad8ce697ffb4cbd0a6f238f66a1c546800e47024 | c0f769ca525298ab190592d0997575d917a4bed4 | refs/heads/master | 2021-01-20T10:50:32.516766 | 2016-11-18T04:20:32 | 2016-11-18T04:20:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # -*- coding: utf-8 -*-
import os
import time
from datetime import datetime
# date/time utilities
if os.name == 'nt':
raise NotImplementedError("Not yet implemented for this platform")
else:
time_now, datetime_now = time.time, datetime.now
| [
"augusto@rioplomo.com.ar"
] | augusto@rioplomo.com.ar |
c900941111ee8983119ec1d8889a2c249f96f10e | 012451630e60bf17f7e9f2dfe317d88678eb2f48 | /simonsays.py | 5926769a4f3a8bc4f7e1476dd6b4b3e4ebb02542 | [] | no_license | arm15aig/Kattis_Python_Solutions | 452ed66a2720c969038dc1a212157b545054ee22 | 12c3e849f113cf8b23bda63b47b3300f604dc243 | refs/heads/master | 2022-12-07T06:08:16.432878 | 2020-08-25T21:51:15 | 2020-08-25T21:51:15 | 290,331,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | x = input()
for i in range(int(x)):
myInput = input()
command = myInput[11:]
if "Simon says" in myInput:
print(command)
| [
"arm15aig@gmail.com"
] | arm15aig@gmail.com |
c517eec9d829fff97df7e35c813ed564afea1bb4 | 64615f146aba943d7eeb434f7f0ce6ea4f0435a7 | /Img2json/estimator/opt.py | b63951f99789049e86a01e5a5a49f25cea0102a7 | [] | no_license | jie311/Human-Auto-annotation-Tool | 0eb3d6af35f0bea4d2ebcf0077acb4414dce79c9 | d9f8e23f4c6f2e53fb29286b70758ec5f21decb6 | refs/heads/master | 2023-06-01T10:15:21.197269 | 2021-06-25T08:40:20 | 2021-06-25T08:40:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,899 | py | import argparse
parser = argparse.ArgumentParser(description='PyTorch AlphaPose Training')
"----------------------------- General options -----------------------------"
parser.add_argument('--expID', default='default', type=str,
help='Experiment ID')
parser.add_argument('--dataset', default='coco', type=str,
help='Dataset choice: mpii | coco')
parser.add_argument('--nThreads', default=30, type=int,
help='Number of data loading threads')
parser.add_argument('--debug', default=False, type=bool,
help='Print the debug information')
parser.add_argument('--snapshot', default=1, type=int,
help='How often to take a snapshot of the model (0 = never)')
"----------------------------- AlphaPose options -----------------------------"
parser.add_argument('--addDPG', default=False, type=bool,
help='Train with data augmentation')
parser.add_argument('--sp', default=True, action='store_true',
help='Use single process for pytorch')
parser.add_argument('--profile', default=False, action='store_true',
help='add speed profiling at screen output')
"----------------------------- Model options -----------------------------"
parser.add_argument('--netType', default='hgPRM', type=str,
help='Options: hgPRM | resnext')
parser.add_argument('--loadModel', default=None, type=str,
help='Provide full path to a previously trained model')
parser.add_argument('--Continue', default=False, type=bool,
help='Pick up where an experiment left off')
parser.add_argument('--nFeats', default=256, type=int,
help='Number of features in the hourglass')
parser.add_argument('--nClasses', default=33, type=int,
help='Number of output channel')
parser.add_argument('--nStack', default=4, type=int,
help='Number of hourglasses to stack')
"----------------------------- Hyperparameter options -----------------------------"
parser.add_argument('--fast_inference', default=True, type=bool,
help='Fast inference')
parser.add_argument('--use_pyranet', default=True, type=bool,
help='use pyranet')
"----------------------------- Hyperparameter options -----------------------------"
parser.add_argument('--LR', default=2.5e-4, type=float,
help='Learning rate')
parser.add_argument('--momentum', default=0, type=float,
help='Momentum')
parser.add_argument('--weightDecay', default=0, type=float,
help='Weight decay')
parser.add_argument('--crit', default='MSE', type=str,
help='Criterion type')
parser.add_argument('--optMethod', default='rmsprop', type=str,
help='Optimization method: rmsprop | sgd | nag | adadelta')
"----------------------------- Training options -----------------------------"
parser.add_argument('--nEpochs', default=50, type=int,
help='Number of hourglasses to stack')
parser.add_argument('--epoch', default=0, type=int,
help='Current epoch')
parser.add_argument('--trainBatch', default=40, type=int,
help='Train-batch size')
parser.add_argument('--validBatch', default=20, type=int,
help='Valid-batch size')
parser.add_argument('--trainIters', default=0, type=int,
help='Total train iters')
parser.add_argument('--valIters', default=0, type=int,
help='Total valid iters')
parser.add_argument('--init', default=None, type=str,
help='Initialization')
"----------------------------- Data options -----------------------------"
parser.add_argument('--inputResH', default=320, type=int,
help='Input image height')
parser.add_argument('--inputResW', default=256, type=int,
help='Input image width')
parser.add_argument('--outputResH', default=80, type=int,
help='Output heatmap height')
parser.add_argument('--outputResW', default=64, type=int,
help='Output heatmap width')
parser.add_argument('--scale', default=0.25, type=float,
help='Degree of scale augmentation')
parser.add_argument('--rotate', default=30, type=float,
help='Degree of rotation augmentation')
parser.add_argument('--hmGauss', default=1, type=int,
help='Heatmap gaussian size')
"----------------------------- PyraNet options -----------------------------"
parser.add_argument('--baseWidth', default=9, type=int,
help='Heatmap gaussian size')
parser.add_argument('--cardinality', default=5, type=int,
help='Heatmap gaussian size')
parser.add_argument('--nResidual', default=1, type=int,
help='Number of residual modules at each location in the pyranet')
"----------------------------- Distribution options -----------------------------"
parser.add_argument('--dist', dest='dist', type=int, default=1,
help='distributed training or not')
parser.add_argument('--backend', dest='backend', type=str, default='gloo',
help='backend for distributed training')
parser.add_argument('--port', dest='port',
help='port of server')
"----------------------------- Detection options -----------------------------"
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]',
default='res152')
parser.add_argument('--indir', dest='inputpath',
help='image-directory', default="")
parser.add_argument('--list', dest='inputlist',
help='image-list', default="")
parser.add_argument('--mode', dest='mode',
help='detection mode, fast/normal/accurate', default="normal")
parser.add_argument('--outdir', dest='outputpath',
help='output-directory', default="examples/res/")
parser.add_argument('--inp_dim', dest='inp_dim', type=str, default='608',
help='inpdim')
parser.add_argument('--conf', dest='confidence', type=float, default=0.05,
help='bounding box confidence threshold')
parser.add_argument('--nms', dest='nms_thesh', type=float, default=0.6,
help='bounding box nms threshold')
parser.add_argument('--save_img', default=False, action='store_true',
help='save result as image')
parser.add_argument('--vis', default=True, action='store_true',
help='visualize image')
parser.add_argument('--matching', default=False, action='store_true',
help='use best matching')
parser.add_argument('--format', type=str,
help='save in the format of cmu or coco or openpose, option: coco/cmu/open')
parser.add_argument('--detbatch', type=int, default=1,
help='detection batch size')
parser.add_argument('--posebatch', type=int, default=80,
help='pose estimation maximum batch size')
"----------------------------- Video options -----------------------------"
parser.add_argument('--video', dest='video',
help='video-name', default="")
parser.add_argument('--webcam', dest='webcam', type=str,
help='webcam number', default=0)
parser.add_argument('--save_video', dest='save_video',
help='whether to save rendered video', default=False, action='store_true')
parser.add_argument('--vis_fast', dest='vis_fast',
help='use fast rendering', action='store_true', default=False)
opt = parser.parse_args()
opt.num_classes = 80
| [
"carolchenyx@gmail.com"
] | carolchenyx@gmail.com |
23394ba5e3d077a01662573c4197b02160b39db2 | 820867acf1db9a60649b115fe1ba480495b9add1 | /Desafios/Desafio 108.py | 9508d83bf78604c41e5683fbdb825d442fbdac41 | [
"MIT"
] | permissive | blopah/python3-curso-em-video-gustavo-guanabara-exercicios | 8486cfd889c1175ffd0b8961095bd17634ac0757 | d86c613ea8bb335c4dd888a34bda456055e4d82e | refs/heads/master | 2022-11-10T04:17:22.992631 | 2020-06-15T23:33:45 | 2020-06-15T23:33:45 | 272,562,814 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | """
Problema: Adapte o código do desafio 107, criando uma função adicional chamada moeda()
que consiga mostrar os valores como um valor monetário formatado.
Resolução do problema:
"""
from ex108_modulo import moeda
valor = float(input('Informe um valor: R$'))
print(f'Aumento de 10%: {moeda.aumentar(valor, 10)}')
print(f'Rezudino 15%: {moeda.diminuir(valor, 15)}')
print(f'O dobro de {moeda.moeda(valor, "$")} é: {moeda.dobro(valor)}')
print(f'A metade de {moeda.moeda(valor)} é: {moeda.metade(valor)}') | [
"pablodplucenahh@gmail.com"
] | pablodplucenahh@gmail.com |
567dc4756fdd77193ba0e7b938617bfbc0333d1a | 74c05c49de443e9c304549e017ddb7fc5b5dfafb | /customuser_project/wsgi.py | 58b0fa11b7edac62a12b544f8a8a36bca297497c | [
"MIT"
] | permissive | aliibsamohammed/customuser_project | 187ec4c85b49bcd807738dba63cddd8f174f0248 | f661f46447ac5144729fac5acf5406c3e87ccc46 | refs/heads/main | 2023-09-05T19:25:03.251279 | 2021-09-25T22:18:34 | 2021-09-25T22:18:34 | 409,338,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | """
WSGI config for customuser_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.conf import settings
from django.core.wsgi import get_wsgi_application
from whitenoise import WhiteNoise
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'customuser_project.settings')
application = get_wsgi_application()
application = WhiteNoise(application, root=settings.STATIC_ROOT) | [
"ibsamohammed@gmail.com"
] | ibsamohammed@gmail.com |
6c3c88c52957533c2d9b96c060ad47523130e927 | 253a92017383ea38d0a4e00097575a5a5fd81cc1 | /pages/migrations/0002_page_order.py | 1e849eaeb98e95c96e1d048659854fb77498938a | [] | no_license | crltsmrtnz/mboHupaWeb | 755ed5042d9738e2725b7e8b2436874e0cb6ad24 | 94e358b6954ab97a2338a2ceff8e343ef062490d | refs/heads/master | 2020-05-16T13:33:08.836312 | 2019-06-20T23:39:20 | 2019-06-20T23:39:20 | 183,076,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 2.0.2 on 2019-04-25 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='page',
name='order',
field=models.SmallIntegerField(default=0, verbose_name='Orden'),
),
]
| [
"crltsmrtnz@gmail.com"
] | crltsmrtnz@gmail.com |
6830bf43355e90891aa72f7a8937eceacff7dd87 | ad14fb1e1d133668207da93fdedf17d5523dfae5 | /lib/ipmi_client.py | 85a734292322c92b78567340a464fff1a2be12b4 | [
"Apache-2.0"
] | permissive | timlee66/openbmc-test-automation | 8a95187e233d585c1a534adfb1623e88728e3c82 | 43caf70f136a69fe104caa43a2379cc7e0986598 | refs/heads/master | 2022-05-22T19:22:12.546763 | 2021-10-08T01:30:54 | 2021-10-08T01:32:25 | 229,914,924 | 0 | 0 | Apache-2.0 | 2019-12-24T09:43:14 | 2019-12-24T09:43:13 | null | UTF-8 | Python | false | false | 5,474 | py | #!/usr/bin/env python
r"""
A python companion file for ipmi_client.robot.
"""
import collections
import gen_print as gp
import gen_cmd as gc
from robot.libraries.BuiltIn import BuiltIn
# Set default values for required IPMI options.
ipmi_interface = 'lanplus'
ipmi_cipher_suite = BuiltIn().get_variable_value("${IPMI_CIPHER_LEVEL}", '17')
ipmi_timeout = BuiltIn().get_variable_value("${IPMI_TIMEOUT}", '3')
ipmi_port = BuiltIn().get_variable_value("${IPMI_PORT}", '623')
ipmi_username = BuiltIn().get_variable_value("${IPMI_USERNAME}", "root")
ipmi_password = BuiltIn().get_variable_value("${IPMI_PASSWORD}", "0penBmc")
ipmi_host = BuiltIn().get_variable_value("${OPENBMC_HOST}")
# Create a list of the required IPMI options.
ipmi_required_options = ['I', 'C', 'N', 'p', 'U', 'P', 'H']
# The following dictionary maps the ipmitool option names (e.g. "I") to our
# more descriptive names (e.g. "interface") for the required options.
ipmi_option_name_map = {
'I': 'interface',
'C': 'cipher_suite',
'N': 'timeout',
'p': 'port',
'U': 'username',
'P': 'password',
'H': 'host',
}
def create_ipmi_ext_command_string(command, **options):
r"""
Create and return an IPMI external command string which is fit to be run
from a bash command line.
Example:
ipmi_ext_cmd = create_ipmi_ext_command_string('power status')
Result:
ipmitool -I lanplus -C 3 -p 623 -P ******** -H x.x.x.x power status
Example:
ipmi_ext_cmd = create_ipmi_ext_command_string('power status', C='4')
Result:
ipmitool -I lanplus -C 4 -p 623 -P ******** -H x.x.x.x power status
Description of argument(s):
command The ipmitool command (e.g. 'power status').
options Any desired options that are understood by
ipmitool (see iptmitool's help text for a
complete list). If the caller does NOT
provide any of several required options
(e.g. "P", i.e. password), this function
will include them on the caller's behalf
using default values.
"""
new_options = collections.OrderedDict()
for option in ipmi_required_options:
# This is to prevent boot table "-N 10" vs user input timeout.
if " -N " in command and option == "N":
continue
if option in options:
# If the caller has specified this particular option, use it in
# preference to the default value.
new_options[option] = options[option]
# Delete the value from the caller's options.
del options[option]
else:
# The caller hasn't specified this required option so specify it
# for them using the global value.
var_name = 'ipmi_' + ipmi_option_name_map[option]
value = eval(var_name)
new_options[option] = value
# Include the remainder of the caller's options in the new options
# dictionary.
for key, value in options.items():
new_options[key] = value
return gc.create_command_string('ipmitool', command, new_options)
def verify_ipmi_user_parm_accepted():
r"""
Deterimine whether the OBMC accepts the '-U' ipmitool option and adjust
the global ipmi_required_options accordingly.
"""
# Assumption: "U" is in the global ipmi_required_options.
global ipmi_required_options
print_output = 0
command_string = create_ipmi_ext_command_string('power status')
rc, stdout = gc.shell_cmd(command_string,
print_output=print_output,
show_err=0,
ignore_err=1)
gp.qprint_var(rc, 1)
if rc == 0:
# The OBMC accepts the ipmitool "-U" option so new further work needs
# to be done.
return
# Remove the "U" option from ipmi_required_options to allow us to create a
# command string without the "U" option.
if 'U' in ipmi_required_options:
del ipmi_required_options[ipmi_required_options.index('U')]
command_string = create_ipmi_ext_command_string('power status')
rc, stdout = gc.shell_cmd(command_string,
print_output=print_output,
show_err=0,
ignore_err=1)
gp.qprint_var(rc, 1)
if rc == 0:
# The "U" option has been removed from the ipmi_required_options
# global variable.
return
message = "Unable to run ipmitool (with or without the '-U' option).\n"
gp.print_error(message)
# Revert to original ipmi_required_options by inserting 'U' right before
# 'P'.
ipmi_required_options.insert(ipmi_required_options.index('P'), 'U')
def ipmi_setup():
r"""
Perform all required setup for running iptmitool commands.
"""
verify_ipmi_user_parm_accepted()
ipmi_setup()
def process_ipmi_user_options(command):
r"""
Return the buffer with any ipmi_user_options pre-pended.
Description of argument(s):
command An IPMI command (e.g. "power status").
"""
ipmi_user_options = BuiltIn().get_variable_value("${IPMI_USER_OPTIONS}", '')
if ipmi_user_options == "":
return command
return ipmi_user_options + " " + command
| [
"gkeishin@in.ibm.com"
] | gkeishin@in.ibm.com |
8125f2b59f3c1c6cac4642a27e9d7f074dc09df9 | 0e31fd5b7fb3eb03bd8976f3f1741e0a8025c918 | /company_index/middlewares.py | 49ed499ae617bdb2a6770b41512c5a742f808d23 | [] | no_license | shivvu/End-to-End-Deployment | 9bfa107b2ac21dedc1fe73ba8af6e69cf89748fc | 6f931a74ece8f3be5cf1229f1f7f4310653c6683 | refs/heads/main | 2023-02-07T08:03:14.313160 | 2020-12-19T16:57:55 | 2020-12-19T16:57:55 | 322,823,051 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,658 | py | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class IdxcrawlingSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class IdxcrawlingDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"noreply@github.com"
] | noreply@github.com |
fe49affdfe86b67c957fb54ead648638e5c4d54b | ff826a0767913591ef1462fc7b6ada3e96427f08 | /Pre_processing/csoText.py | b0564ea5bc565565378ca4a954216704e6242eed | [] | no_license | SoumyaBarikeri/ConferenceQuality | a99785f36d3c8b9738ea56e3f4be5909226cc7ac | 19af39bca2e7619e7dbeb7cb98fd49ab66b698c9 | refs/heads/master | 2022-12-08T23:28:03.721953 | 2020-09-13T13:32:38 | 2020-09-13T13:32:38 | 295,153,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,089 | py | """
This file fetches conference topics using Computer Science Ontology Classifier
"""
import classifier.classifier as CSO
import json
from nltk.stem import PorterStemmer
import pandas as pd
def stemwords(all_words):
"""
Function to get stem of all conference topics using Porter stemmer algorithm
Parameters
----------
all_words
List of all topics
Returns
-------
output
Stemmed form of the topics
"""
st = PorterStemmer()
text = all_words
output = []
for sentence in text:
output.append(" ".join([st.stem(i) for i in sentence.split()]))
return output
def getTopicsConf(conference):
"""
Function gets Semantic, Syntactic and Enhanced topics of Conference by running CSO classifier on Conference title
and text
Parameters
----------
conference
Conference data i.e. title and text
df2
Dataframe with manual labels for Computer Science/Non Computer Science conferences
i
Iterator over each conference
Returns
-------
topics
The syntactic, semantic and enhanced topics that are returned by CSO classifier
"""
eventID = conference['eventID']
title = conference['title']
print('title is' + title)
text = conference['text']
json_string = json.dumps({'key1': title, 'key2': text})
result = CSO.run_cso_classifier(json_string, modules="both", enhancement="first")
print(result['semantic'])
semantic_topics = "|".join(result['semantic'])
syntactic_topics = "|".join(result['syntactic'])
enhanced_topics = "|".join(result['enhanced'])
return semantic_topics, syntactic_topics, enhanced_topics
def getTopics(conference, df2, i):
"""
Function gets Semantic, Syntactic and Enhanced topics of Conference by running CSO classifier on Conference title
and text, adds the topics to the Dataframe as new columns
Parameters
----------
conference
Conference data i.e. title and text
df2
Dataframe with manual labels for Computer Science/Non Computer Science conferences
i
Iterator over each conference
Returns
-------
topics
The syntactic and semantic topics returned by CSO classifier
"""
eventID = df2.index[i]
title = conference[0]
print('title is' + title)
text = conference[1]
json_string = json.dumps({'key1': title, 'key2': text})
result = CSO.run_cso_classifier(json_string, modules="both", enhancement="first")
topics = result['semantic'] + result['syntactic']
print(result['semantic'])
semantic_topics = "|".join(result['semantic'])
syntactic_topics = "|".join(result['syntactic'])
enhanced_topics = "|".join(result['enhanced'])
df2.ix[eventID, 'semantic'] = semantic_topics
df2.ix[eventID, 'syntactic'] = syntactic_topics
df2.ix[eventID, 'enhanced'] = enhanced_topics
return topics
df = pd.read_csv('data/wCfP_data_full_new.csv')
df_Labeled = pd.read_csv('data/wCfP_cs_manual_labels_extended_420.csv', delimiter=';')
df_reduced = df.loc[:, ['eventID', 'title', 'text']]
# Merge WikiCfp data with conferences manually labeled as Computer science or Non Computer science
df_Labeled = pd.merge(df_Labeled, df_reduced, on='eventID')
columns = ['eventID', 'title', 'text','csLabel']
df2 = pd.DataFrame(df_Labeled, columns=columns)
# Create new columns in Dataframe for topics
df2['semantic'] = ""
df2['syntactic'] = ""
df2['enhanced'] = ""
df2['semantic'] = df2['semantic'].astype(object)
df2['syntactic'] = df2['syntactic'].astype(object)
df2['enhanced'] = df2['enhanced'].astype(object)
df2.set_index('eventID')
result = [getTopics(conf, df2, i) for i, conf in zip(range(df2.shape[0]), df2[['title', 'text']].values)]
all_topics = []
for row in result:
for word in row:
all_topics.append(word)
# Get a count of all distinct topics
wordCount = [[x, all_topics.count(x)] for x in set(all_topics)]
df2 = df2.drop(['title', 'text'], axis =1)
# Write the conference topics to csv file
df2.to_csv('data/conftopics.csv')
| [
"noreply@github.com"
] | noreply@github.com |
5895131f82ef26ab807bad6166ae4dfbed390bcd | ec8702ca17e1e73202b2ab19a351092bde619112 | /libs/models/mixins.py | 9d65e05db560e3f9257522e655913121e25126ab | [] | no_license | mygoda/new_ball | fff0846529a22253fcab06a6ec6d04206899b6ea | b2f05f5887758a0a30b50d5049b2736149e52e5c | refs/heads/master | 2021-01-09T19:03:56.411532 | 2016-07-11T00:03:44 | 2016-07-11T00:03:44 | 60,612,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | # -*- coding: utf-8 -*-
# __author__ = xutao
from __future__ import division, unicode_literals, print_function
from django.db.models import Q
class QuerysetMixin(object):
@classmethod
def get_by_unique(cls, **kwargs):
try:
instance = cls.objects.get(**kwargs)
except Exception, err:
print(err)
instance = None
return instance
@classmethod
def get_by_queries(cls, **kwargs):
query_list = [Q(**{key: value}) for key, value in kwargs.items()]
query = query_list.pop()
for query_append in query_list:
query &= query_append
try:
item = cls.objects.get(query)
except Exception:
item = None
return item
@classmethod
def filter_by_queries(cls, **kwargs):
query_list = [Q(**{key: value}) for key, value in kwargs.items()]
query = query_list.pop()
for query_append in query_list:
query &= query_append
try:
item = cls.objects.filter(query)
except Exception:
item = cls.objects.none()
return item | [
"taoxu@163.com"
] | taoxu@163.com |
883987a517479c143a4f47cd570555cb718d5ae8 | 4109e9a3ec14b9ff1670b65b686dbc10b485e1cf | /appi/migrations/0002_remove_skydevice_owner.py | 6443ee60b9e6eededef9b32ebf2f8d30a2427d7f | [] | no_license | bloomsky/bloomsky-widget-id-generator | afc6bdefea0a6f15b53a54054c43c06a64eaeae8 | 084d71b6afd192b4645aca513b36a6302acef1b8 | refs/heads/master | 2021-01-23T10:44:46.882267 | 2017-06-01T19:12:36 | 2017-06-01T19:12:36 | 93,090,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-26 18:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('appi', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='skydevice',
name='Owner',
),
]
| [
"weisun@wei79.local"
] | weisun@wei79.local |
cb0c7a44b43524123ce006d9b0d794b3060cda65 | e50a4781388ef7682a935986f2abae6fa85f879a | /MM2021/com-train/run_one.py | f4ebf3a94dd4b23d87a18b8e242c39768125e81f | [] | no_license | wojxhr/CG-trans-optimization | 2912156320db958e8aa6f797619acf59931be231 | 2b476b1291b972d22ec8e825eae21cbf4a597653 | refs/heads/master | 2023-08-24T23:05:31.526477 | 2021-10-22T03:01:29 | 2021-10-22T03:01:29 | 419,938,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,007 | py | """
This demo aims to help player running system quickly by using the pypi library simple-emualtor https://pypi.org/project/simple-emulator/.
"""
from simple_emulator import SimpleEmulator, create_emulator
# We provided some function of plotting to make you analyze result easily in utils.py
from simple_emulator import analyze_emulator, plot_rate
from simple_emulator import constant
from simple_emulator import cal_qoe
import os
def run_and_plot(emulator, network_trace, log_packet_file):
# Run the emulator and you can specify the time for the emualtor's running.
# It will run until there is no packet can sent by default.
emulator.run_for_dur(15)
# print the debug information of links and senders
emulator.print_debug()
# Output the picture of emulator-analysis.png
# You can get more information from https://github.com/AItransCompetition/simple_emulator/tree/master#emulator-analysispng.
# analyze_emulator(log_packet_file, file_range="all", sender=[1])
# Output the picture of rate_changing.png
# You can get more information from https://github.com/AItransCompetition/simple_emulator/tree/master#cwnd_changingpng
# plot_rate(log_packet_file, trace_file=network_trace, file_range="all", sender=[1])
print("Qoe : %.4f" % (cal_qoe()))
def evaluate(solution_file, block_traces, network_trace, log_packet_file, second_block_file=None):
# fixed random seed
import random
random.seed(1)
# import the solution
import importlib
solution = importlib.import_module(solution_file)
# Use the object you created above
my_solution = solution.MySolution()
# Create the emulator using your solution
# Set second_block_file=None if you want to evaluate your solution in situation of single flow
# Specify ENABLE_LOG to decide whether or not output the log of packets. ENABLE_LOG=True by default.
# You can get more information about parameters at https://github.com/AItransCompetition/simple_emulator/tree/master#constant
emulator = create_emulator(
block_file=block_traces,
second_block_file=second_block_file,
trace_file=network_trace,
solution=my_solution,
# enable logging packet. You can train faster if ENABLE_LOG=False
ENABLE_LOG=True
)
run_and_plot(emulator, network_trace, log_packet_file)
if __name__ == '__main__':
block_traces = ["datasets/scenario_1/blocks/block-priority-0.csv",
"datasets/scenario_1/blocks/block-priority-1.csv",
"datasets/scenario_1/blocks/block-priority-2.csv"]
# block_traces = ["datasets/scenario_2/blocks/block_video.csv", "datasets/scenario_2/blocks/block_audio.csv"]
# block_traces = ["datasets/scenario_3/blocks/block-priority-0-ddl-0.15-.csv",
# "datasets/scenario_3/blocks/block-priority-1-ddl-0.5-.csv",
# "datasets/scenario_3/blocks/block-priority-2-ddl-0.2-.csv"]
network_trace = "./datasets/scenario_1/networks/traces_102.txt"
# The file path of packets' log
log_packet_file = "output/packet_log/packet-0.log"
# Select the solution file
solution_file = 'solution_ours.reno.solution'
# solution_file = 'solution_ours.reno.bbr'
# solution_file = 'solution_ours.reno.test'
# The first sender will use your solution, while the second sender will send the background traffic
# The block files for the first sender
first_block_file = block_traces
# The block files for the second sender
second_block_file = ["datasets/background_traffic_traces/web.csv"]
if os.path.exists("./result1.csv"):
os.remove('./result1.csv')
# Create the emulator and evaluate your solution
evaluate(solution_file, first_block_file, network_trace, log_packet_file, second_block_file=second_block_file)
# If only one block_traces is given, it means there will be no background traffic
# evaluate(solution_file, block_traces, network_trace, log_packet_file)
| [
"391777866@qq.com"
] | 391777866@qq.com |
2e9db7f19f244cd7fee24d13844d3b512eacad9a | 8700c32358fea46addac456927f37920845c8da6 | /ssir_iwae.py | db2bf8c5b2117c81d2936bb5acd7d147741647b3 | [] | no_license | satyam-cyc/SeqIWAE | c1c2bdb208b2ce03a54f35a7c218fccf93554d71 | 4c9fb10d1e87f0ed625f7d547cf5d78218752834 | refs/heads/master | 2020-07-05T01:26:21.774755 | 2017-11-24T19:56:42 | 2017-11-24T19:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,708 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 20 19:13:03 2017
@author: Chin-Wei
Sequentialized Sampling Importance Resampling and IWAE training wrapper
"""
import numpy as np
from utils import log_sum_exp_np
def seq_iwae_update(train_func,eval_func,x,spls):
# use external stochasticity
"""
Sequentialized IWAE udpate
note that eval_func is for each data point, arguments: input and samples
spls: batchsize - number of iw samples - dimensions
"""
weight_func = lambda spl: - eval_func(x,spl)
re_sample = ssir(weight_func,spls)
train_func(re_sample)
def ssir(weight_func,samples):
"""
Sequentialized Sampling Importance Resampling
eval_func: function
samples: bs-n_iw-dim
"""
acc_weight = - np.inf * np.ones((samples.shape[0]))
old_spl = np.ones((samples.shape[0],samples.shape[2]))
for i in range(samples.shape[1]):
acc_weight, old_spl = refine(weight_func,
acc_weight,
old_spl,
samples[:,i])
return old_spl
def refine(weight_func,acc_weight,old_spl,new_spl):
"""
weight_func: function for calculating importance weight on `log` scale,
assumed independent accross instances
acc_weight: accumulated (sum of) importance weight
old_spl, new_spl: 1d vector of samples
"""
weights = weight_func(new_spl)
new_acc_weight = log_sum_exp_np(np.concatenate([acc_weight[:,None],
weights[:,None]],
axis=1),axis=1)[:,0]
log_proba_new = weights - new_acc_weight
log_unif = np.log(np.random.uniform(size=new_spl.shape[0]))
get_new = (log_proba_new >= log_unif)[:,None]
condlist = [np.logical_not(get_new), get_new]
choicelist = [old_spl, new_spl]
selected_spl = np.select(condlist, choicelist)
return new_acc_weight, selected_spl
if __name__ == '__main__':
import matplotlib.pyplot as plt
if 0:
sigmoid = lambda x: 1/(1+np.exp(-x))
def U(Z):
z1 = Z[:, 0]
z2 = Z[:, 1]
w1 = np.sin(2.*np.pi*z1/5.)
w3 = 3. * sigmoid((z1-1.)/0.3)
B1 = np.exp(-.5*(((z2-w1)/0.4)**2)+10)
B2 = np.exp(-.5*(((z2-w1+w3)/0.35)**2)+10)
B3 = np.exp(-.5*(z1**2 + z2**2/5.)+10)
return np.log( B1 + B2 + B3 )
sigma = 10. # sample from gaussian proposal with specified std
n_iw = 20
samples = np.random.randn(1000,n_iw,2) * sigma
weight_func = lambda x: U(x) - ((x/sigma) ** 2).sum(1)
re_samples = ssir(weight_func,samples)
plt.figure()
plt.scatter(re_samples[:,0],re_samples[:,1])
if 1:
sigmoid = lambda x: 1/(1+np.exp(-x))
def U2(Z):
z1 = Z[:, 0]
z2 = Z[:, 1]
B1 = 0.30 * np.exp(-(0.2 * z1 - 0.2 * z2 + 2.5)**2)
B2 = 0.50 * np.exp(-(4.0 * sigmoid(z2) + 2.5 * z1 - 5.0)**2)
B3 = 0.20 * np.exp(-(0.5 * z1 + 0.5 * z2 + 1)**2 + 0.5 * z1 * z2)
return np.log( B1 + B2 + B3 )
sigma = 10. # sample from gaussian proposal with specified std
n_iw = 50
samples = np.random.randn(1000,n_iw,2) * sigma
weight_func = lambda x: U2(x) - ((x/sigma) ** 2).sum(1)
re_samples = ssir(weight_func,samples)
plt.figure()
plt.scatter(re_samples[:,0],re_samples[:,1])
plt.xlim(-25,15)
plt.ylim(-18,22)
| [
"cw.huang427@gmail.com"
] | cw.huang427@gmail.com |
add361736c36d51ddd27caa4b8b50f6f08de26c6 | bed10dcbc66c0332cc2748be179e3f992e45c124 | /model.py | 24b218f6f670a65386e782de10787d542c01e4e6 | [] | no_license | GZWQ/image-generation | fdbe9214965d41daa19bb8660ecf816fb37d1674 | 3e477ab67e97d608b502ef42f5100b7f33f9d0f8 | refs/heads/master | 2021-01-14T22:26:18.709388 | 2020-02-24T17:23:30 | 2020-02-24T17:23:30 | 242,780,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,609 | py | from keras.models import Model, Input, Sequential,model_from_json
from keras import layers
# from keras_contrib.layers.normalization import InstanceNormalization
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from keras.layers.advanced_activations import LeakyReLU
import keras.backend as K
from keras.backend import tf as ktf
from utils.stn import SpatialTransformer
import numpy as np
from tqdm import tqdm
from dataloader import Dataloader
import os,keras
from keras.models import load_model
from keras.optimizers import Adam
from utils.pose_transform import AffineTransformLayer
from utils.layer_utils import content_features_model
import cv2
class PoseGAN():
def __init__(self,cfg):
########## Loss Setting #########
self._l1_penalty_weight = cfg.l1_penalty_weight
self._content_loss_layer = cfg.content_loss_layer
self._gan_penalty_weight = cfg.gan_penalty_weight
self._tv_penalty_weight = cfg.tv_penalty_weight
self._nn_loss_area_size = cfg.nn_loss_area_size
self._lstruct_penalty_weight = cfg.lstruct_penalty_weight
self._mae_weight = cfg.mae_weight
self._pose_estimator = load_model(cfg.pose_estimator)
##########General Setting########
self.im_size = cfg.im_size
self.use_warp = cfg.use_warp
self.warp_agg = cfg.warp_agg
self.epochs = cfg.epochs
self.batch_size = cfg.batch_size
self.dataset_name = cfg.dataset_name
self.display_ratio = cfg.display_ratio
common_path = '{}/l1_{}/tv_{}/struct_{}/mae_{}/'.format(self.dataset_name,self._l1_penalty_weight,self._tv_penalty_weight,self._lstruct_penalty_weight,self._mae_weight)
self.checkpoint_ratio = cfg.checkpoint_ratio
self.checkpoints_dir = cfg.checkpoints_dir+'{}/'.format(self.dataset_name)
self.output_dir = cfg.output_dir+'{}/l1_{}/tv_{}/struct_{}/mae_{}'.format(self.dataset_name,
self._l1_penalty_weight,
#self._tv_penalty_weight,
self._nn_loss_area_size,
self._lstruct_penalty_weight,
self._mae_weight)
self.checkpoints_dir = cfg.checkpoints_dir+common_path
self.output_dir = cfg.output_dir+common_path
print(self.checkpoints_dir)
os.makedirs(self.checkpoints_dir,exist_ok=True)
os.makedirs(self.output_dir,exist_ok=True)
########### #############
self.nfilters_decoder = (512, 512, 512, 256, 128, 3)
self.nfilters_encoder = (64, 128, 256, 512, 512, 512)
self.dataset = Dataloader(cfg)
opt_g = Adam(2e-4,0.5,0.999)
opt_d = Adam(2e-4,0.5,0.999)
############# Train Discriminator ###########
self.discriminator = self.make_discriminator()
self._generator = self.make_generator()
self._set_trainable(self._generator, False)
self._set_trainable(self.discriminator, True)
self.discriminator.compile(loss=['binary_crossentropy'],optimizer=opt_d,metrics=['accuracy'])
############# Train Generator ###########
self._set_trainable(self._generator, True)
self._set_trainable(self.discriminator, False)
input_img = Input([self.im_size[0], self.im_size[1], 3])
input_pose = Input([self.im_size[0], self.im_size[1], 18])
target_img = Input([self.im_size[0], self.im_size[1], 3])
target_pose = Input([self.im_size[0], self.im_size[1], 18])
if self.use_warp == 'full':
warp = [Input((1, 8))]
elif self.use_warp == 'mask':
warp = [Input((10, 8)), Input((10, self.im_size[0], self.im_size[1]))]
elif self.use_warp == 'stn':
warp = [Input((72,))]
else:
warp = []
fake_imgs = self._generator([input_img, input_pose, target_pose]+warp)
pred = self.discriminator([fake_imgs,input_pose,target_img,target_pose])
self.generator = Model([input_img, input_pose, target_img,target_pose]+warp,[pred,fake_imgs])
# self.generator.compile(loss=['binary_crossentropy',self.gene_loss],loss_weights=[1,1],optimizer=opt_g)
self.generator.compile(loss=['binary_crossentropy', self.gene_loss], loss_weights=[1, 1], optimizer=opt_g)
def _set_trainable(self, net, trainable):
for layer in net.layers:
layer.trainable = trainable
net.trainable = trainable
def block(self,x,f,down=True,bn=True,dropout=False,leaky=True):
if leaky:
x = LeakyReLU(0.2)(x)
else:
x = layers.Activation('relu')(x)
if down:
x = layers.ZeroPadding2D()(x)
x = layers.Conv2D(f,kernel_size=4,strides=2,use_bias=False)(x)
else:
x = layers.Conv2DTranspose(f,kernel_size=4,strides=2,use_bias=False)(x)
x = layers.Cropping2D((1,1))(x)
if bn:
x = InstanceNormalization()(x)
if dropout:
x = layers.Dropout(0.5)(x)
return x
def encoder(self,ins,nfilters=(64,128,256,512,512,512)):
_layers = []
if len(ins) != 1:
x = layers.Concatenate(axis=-1)(ins)
else:
x = ins[0]
for i,nf in enumerate(nfilters):
if i==0:
x = layers.Conv2D(nf,kernel_size=3,padding='same')(x)
elif i==len(nfilters)-1:
x = self.block(x,nf,bn=False)
else:
x = self.block(x,nf)
_layers.append(x)
return _layers
def decoder(self,skips,nfilters=(64,128,256,512,512,512)):
x = None
for i,(skip,nf) in enumerate(zip(skips,nfilters)):
if 0<i<3:
x = layers.Concatenate(axis=-1)([x,skip])
x = self.block(x,nf,down=False,leaky=False,dropout=True)
elif i==0:
x = self.block(skip,nf,down=False,leaky=False,dropout=True)
elif i== len(nfilters)-1:
x = layers.Concatenate(axis=-1)([x,skip])
x = layers.Activation('relu')(x)
x = layers.Conv2D(nf,kernel_size=3,use_bias=True,padding='same')(x)
else:
x = layers.Concatenate(axis=-1)([x,skip])
x = self.block(x,nf,down=False,leaky=False)
x = layers.Activation('tanh')(x)
return x
def concatenate_skips(self,skips_app,skips_pose,warp):
skips = []
if self.use_warp == 'stn':
b = np.zeros((2, 3), dtype='float32')
b[0, 0] = 1
b[1, 1] = 1
W = np.zeros((32, 6), dtype='float32')
weights = [W, b.flatten()]
locnet = Sequential()
locnet.add(layers.Dense(64, input_shape=(72,)))
locnet.add(LeakyReLU(0.2))
locnet.add(layers.Dense(32))
locnet.add(LeakyReLU(0.2))
locnet.add(layers.Dense(6, weights=weights))
for i, (sk_app, sk_pose) in enumerate(zip(skips_app, skips_pose)):
if i < 4:
if self.use_warp != 'stn':
out = AffineTransformLayer(10 if self.use_warp == 'mask' else 1, self.warp_agg, (self.im_size[0],self.im_size[1]))([sk_app] + warp)
else:
out = SpatialTransformer(locnet, K.int_shape(sk_app)[1:3])(warp + [sk_app])
out = layers.Concatenate(axis=-1)([out, sk_pose])
else:
out = layers.Concatenate(axis=-1)([sk_app, sk_pose])
skips.append(out)
return skips
def make_generator(self):
use_warp_skip = self.use_warp != 'none'
input_img = Input([self.im_size[0], self.im_size[1], 3])
input_pose = Input([self.im_size[0], self.im_size[1], 18])
target_pose = Input([self.im_size[0], self.im_size[1], 18])
if self.use_warp == 'full':
warp = [Input((1, 8))]
elif self.use_warp == 'mask':
warp = [Input((10, 8)), Input((10, self.im_size[0], self.im_size[1]))]
elif self.use_warp == 'stn':
warp = [Input((72,))]
else:
warp = []
if use_warp_skip:
enc_app_layers = self.encoder([input_img] + [input_pose], self.nfilters_encoder)
enc_tg_layers = self.encoder([target_pose] , self.nfilters_encoder)
enc_layers = self.concatenate_skips(enc_app_layers, enc_tg_layers, warp)
else:
enc_layers = self.encoder([input_img] + [input_pose] + [target_pose], self.nfilters_encoder)
out = self.decoder(enc_layers[::-1],self.nfilters_decoder)
model = Model([input_img,input_pose,target_pose]+warp,[out])
# model.summary()
return model
def make_discriminator(self):
input_img = Input([self.im_size[0],self.im_size[1],3])
input_pose = Input([self.im_size[0],self.im_size[1],18])
target_img = Input([self.im_size[0],self.im_size[1],3])
target_pose = Input([self.im_size[0],self.im_size[1],18])
'''
out = layers.Concatenate(axis=-1)([input_img,input_pose,target_img,target_pose])
out = layers.Conv2D(64,kernel_size=4,strides=2)(out)
out = self.block(out,128)
out = self.block(out, 256)
out = self.block(out, 512)
out = self.block(out, 1, bn=False)
out = layers.Activation('sigmoid')(out)
out = layers.Flatten()(out)
model = Model([input_img,input_pose,target_img,target_pose],out)
model.summary()
'''
out = layers.Concatenate(axis=-1)([input_img,input_pose])
out = layers.Conv2D(64,kernel_size=4,strides=2)(out)
out = self.block(out,128)
out = self.block(out, 256)
out = self.block(out, 512)
m_share = Model([input_img,input_pose],[out])
output_feat = m_share([target_img,target_pose])
input_feat = m_share([input_img,input_pose])
out = layers.Concatenate(axis=-1)([output_feat,input_feat])
out = LeakyReLU(0.2)(out)
out = layers.Flatten()(out)
out = layers.Dense(1)(out)
out = layers.Activation('sigmoid')(out)
model = Model([input_img, input_pose, target_img, target_pose], out)
# model.summary()
return model
def nn_loss(self,reference,target,neighborhood_size=(3,3)):
v_pad = neighborhood_size[0] // 2
h_pad = neighborhood_size[1] // 2
val_pad = ktf.pad(reference, [[0, 0], [v_pad, v_pad], [h_pad, h_pad], [0, 0]],
mode='CONSTANT', constant_values=-10000)
reference_tensors = []
for i_begin in range(0, neighborhood_size[0]):
i_end = i_begin - neighborhood_size[0] + 1
i_end = None if i_end == 0 else i_end
for j_begin in range(0, neighborhood_size[1]):
j_end = j_begin - neighborhood_size[0] + 1
j_end = None if j_end == 0 else j_end
sub_tensor = val_pad[:, i_begin:i_end, j_begin:j_end, :]
reference_tensors.append(ktf.expand_dims(sub_tensor, -1))
reference = ktf.concat(reference_tensors, axis=-1)
target = ktf.expand_dims(target, axis=-1)
abs = ktf.abs(reference - target)
norms = ktf.reduce_sum(abs, reduction_indices=[-2])
loss = ktf.reduce_min(norms, reduction_indices=[-1])
return loss
def total_variation_loss(self,x):
img_nrows, img_ncols = self.im_size[0],self.im_size[1]
assert K.ndim(x) == 4
if K.image_data_format() == 'channels_first':
a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1])
b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:])
else:
a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :])
b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
def gan_loss(self,y_true,y_pred):
return -K.mean(K.log(y_pred+1e-7))
def struct_loss(self,y_true,y_pred):
target_struct = self._pose_estimator(y_true[...,::-1]/2)[1][...,:18]
struct = self._pose_estimator(y_pred[...,::-1]/2)[1][...,:18]
return K.mean(target_struct-struct)**2
def l1_loss(self,y_true,y_pred):
return keras.losses.mean_absolute_error(y_true, y_pred)
def gene_loss(self,y_true,y_pred):
return self._l1_penalty_weight*self.nn_loss(y_pred,y_true)+\
self._tv_penalty_weight*self.total_variation_loss(y_pred)+\
self._lstruct_penalty_weight*self.struct_loss(y_true,y_pred)+self.l1_loss(y_true,y_pred)*100
def train(self):
valid = np.ones((self.batch_size,1))
fake = np.zeros((self.batch_size,1))
for epoch in tqdm(range(self.epochs)):
for ite in tqdm(range(self.dataset.number_of_batches_per_epoch())):
self.discriminator.save(self.checkpoints_dir + '{}_{}.h5'.format('discriminator', epoch + 1))
from_imgs, to_imgs, from_pose, to_pose, warp = self.dataset.next_text_sample()
self.sample_images(epoch, ite, from_imgs, to_imgs, from_pose, to_pose, warp)
from_imgs, to_imgs, from_pose, to_pose, warp = self.dataset.next_sample()
pred,fake_imgs = self.generator.predict([from_imgs, from_pose, to_imgs, to_pose]+warp)
d_loss_real = self.discriminator.train_on_batch([from_imgs,from_pose,to_imgs,to_pose], valid)
d_loss_fake = self.discriminator.train_on_batch([from_imgs,from_pose,fake_imgs,to_pose], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
g_loss = self.generator.train_on_batch([from_imgs, from_pose, to_imgs,to_pose]+warp, [valid, to_imgs])
if (epoch+1)%self.checkpoint_ratio == 0:
name = self.checkpoints_dir+'{}_{}.h5'.format('discriminator',epoch+1)
self.discriminator.save(self.checkpoints_dir+'{}_{}.h5'.format('discriminator',epoch+1))
if (epoch+1)%self.display_ratio == 0:
from_imgs, to_imgs, from_pose, to_pose, warp = self.dataset.next_text_sample()
self.sample_images(epoch,ite,from_imgs, to_imgs, from_pose, to_pose,warp)
def sample_images(self, epoch,iter, from_imgs, to_imgs, from_pose, to_pose, warp,testing=True):
pred, gen_iamges = self.generator.predict([from_imgs, from_pose, to_imgs, to_pose]+warp)
size = gen_iamges.shape[0]
size = size if size < 10 else 10
pose = to_pose
for i in range(size):
result_imgs = []
if testing:
result_imgs.append(self.transfrom(from_imgs[i]))
#result_imgs.append(pose[i])
result_imgs.append(self.transfrom(gen_iamges[i]))
result_imgs.append(self.transfrom(to_imgs[i]))
else:
# result_imgs.append(self.transfrom(gen_iamges[i]))
# result_imgs.append(self.transfrom(conditional_image[i]))
result_imgs.append(self.transfrom(to_imgs[i]))
result_imgs.append(pose[i])
result_imgs.append(self.transfrom(gen_iamges[i]))
# result_imgs.append(self.transfrom(gen_iamges[i]))
result_imgs = np.hstack(result_imgs)
result_imgs = result_imgs.astype(np.uint8, copy=False)
cv2.imwrite(self.output_dir + '{}_{}_{}.png'.format(epoch,iter, i), result_imgs)
def transfrom(self,img):
scale = 127.5
img = scale * img + scale
return img
if __name__ == '__main__':
from easydict import EasyDict as edict
cfg = edict({'batch_size': 16,
'im_size': (128, 64, 3),
'data_path': '',
'use_warp': 'none',
'dataset_name': 'cad60',
'disc_type': '',
'l1_penalty_weight':100,
'content_loss_layer':'none',
'gan_penalty_weight':1,
'tv_penalty_weight':0,
'nn_loss_area_size':1,
'lstruct_penalty_weight':0})
model = PoseGAN(cfg)
| [
"wqingdaniel@gmail.com"
] | wqingdaniel@gmail.com |
3ece49f91d19e555e7e128083e0047a507248f9e | fd45d0fb3dedac5948486fa928083d4f4f863ebc | /whatistheplan/models/__init__.py | 570015042da1184e500abb62c5004cc6d759cd0b | [
"MIT"
] | permissive | ccowmu/whatistheplan.com | b81d08124801bfa106c43d53db95601cfb3529ec | 97d4662c9248fc72d9b556b4fef222befcc2df30 | refs/heads/master | 2020-04-15T23:54:02.504073 | 2018-11-02T19:02:41 | 2018-11-02T19:02:41 | 33,387,069 | 5 | 7 | MIT | 2018-10-30T17:08:58 | 2015-04-03T23:12:49 | HTML | UTF-8 | Python | false | false | 199 | py | """Aggregate all database classes for easy importing"""
from whatistheplan.models.userprofile import UserProfile
from whatistheplan.models.game import Game
from whatistheplan.models.team import Team
| [
"dyladan@gmail.com"
] | dyladan@gmail.com |
908203b5cd69481a591c3a236d23ab58bfe761cd | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_321/ch130_2020_04_01_16_55_20_162558.py | b1a491a59ae05eb3b4d84e3ac5803ce5f576c87e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | def monta_mala(l):
i = 0
mala = []
while sum(mala) <= 23:
if sum(mala) + l[0] > 23:
break
else:
mala.append(l[i])
i +=1
return mala | [
"you@example.com"
] | you@example.com |
f92210930c31e1edaf7d1513a0637aec82175039 | 4f3e02123d20044405638b1baa059c13fca9b1d7 | /python iris.py | 12410ba99fd00f3071596d49227b774c361835f0 | [] | no_license | drb1001/data-fiddles | e5eea14149bf30dcdd8f4296fe0b08feb1c729bc | 6d8b48e5b87bd5ff357bb555dd43c59fb3d4921f | refs/heads/master | 2016-08-11T09:53:07.405255 | 2016-03-21T15:34:25 | 2016-03-21T15:34:25 | 54,401,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
#import pylab as pyl
from sklearn import cross_validation, datasets
from sklearn import linear_model, svm, neighbors
iris_dataset = datasets.load_iris()
# print iris_dataset.data.shape
# print iris_dataset.keys()
iris_data = iris_dataset.data
iris_feature_names = iris_dataset.feature_names
iris_target = iris_dataset.target
iris_target_names = iris_dataset.target_names
n_samples = iris_data.shape[0]
# need to regularise the data
iris_df = pd.DataFrame(data = iris_data, columns=list(iris_feature_names))
iris_df["target"] = iris_target
# scatter plots
#plt.scatter(x = iris_df["sepal length (cm)"], y = iris_df["sepal width (cm)"], c = iris_df["target"]) #plt plot
#iris_df.plot(kind="scatter", x="sepal length (cm)", y="sepal width (cm)", c = iris_df["target"]) #pd plot
# box plots
#plt.boxplot(iris_df, iris_df["target"]) #?? not sure how to get this to work
#iris_df.boxplot(by = "target", layout = (1,4))
#pyl.show()
myseed = 1234
logreg = linear_model.LogisticRegression()
svc_lin = svm.SVC(kernel='linear', random_state=myseed)
svc_rbf = svm.SVC(kernel='rbf', gamma=0.7, random_state=myseed)
svc_pl3 = svm.SVC(kernel='poly', degree=3, random_state=myseed)
knn3 = neighbors.KNeighborsClassifier(n_neighbors=3)
knn5 = neighbors.KNeighborsClassifier(n_neighbors=5)
models = [logreg, svc_lin, svc_rbf, svc_pl3, knn3, knn5]
cv_kf5 = cross_validation.KFold(n=n_samples, n_folds=5, shuffle=True, random_state=myseed)
cv_loo = cross_validation.LeaveOneOut(n=n_samples)
cv_lpo10 = cross_validation.LeavePOut(n=n_samples, p=2)
cvs = [cv_kf5, cv_loo, cv_lpo10]
for model in models:
for validator in cvs:
scores = cross_validation.cross_val_score(model, iris_data, iris_target, cv=validator)
print "Accuracy: %0.2f (+/- %0.2f), cvs: %0.2f" % ( scores.mean(), scores.std() * 2 , len(scores) )
print "--"
# X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris_data, iris_target, test_size=0.4, random_state=myseed)
# logreg.fit(X_train, y_train)
# print logreg.score(X_test, y_test)
| [
"dbrown@podio.com"
] | dbrown@podio.com |
21384b6912ea4b4605f00abd84ad1759212b43ff | c8f6ddd6e4ced85db81d1f95a07c9057bd494eae | /ch04_thermalHydraulics/python/geom_build.py | f244841c53395bba5cb5005f621bd8bfe0489d96 | [
"LPPL-1.3c"
] | permissive | wcdawn/WilliamDawn-thesis | 21c63a16c677107699fd024aeb0022d2e930d6a8 | 2f002a5df1773ff477f0f7423a22f517957d9844 | refs/heads/master | 2022-05-02T20:14:20.080382 | 2022-04-13T00:07:42 | 2022-04-13T00:07:42 | 169,178,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,216 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
LW = 2
FN = "Times New Roman"
FS = 16
plt.rc("lines", lw=LW)
plt.rc("mathtext", fontset="stix")
plt.rc("font", family=FN, size=FS)
# plot wedge --> chunk --> hex
f2f = 1
width = 0.25
height = 1.0
nHex = 3
plt.figure()
ax = plt.gca()
for i in range(nHex):
idx = i + 1
rect = matplotlib.patches.Rectangle(
[0.0, (idx - 1) * height], width, height, fill=False, lw=LW
)
ax.add_patch(rect)
# right
plt.plot(
[width * (1 + np.cos(np.pi / 6.0)), width * (1 + np.cos(np.pi / 6.0))],
[
(idx - 1) * height + width * np.sin(np.pi / 6.0),
height * idx + width * np.sin(np.pi / 6.0),
],
"-k",
)
# left
plt.plot(
[-width * np.cos(np.pi / 6.0), -width * np.cos(np.pi / 6.0)],
[
(idx - 1) * height + width * np.sin(np.pi / 6.0),
height * idx + width * np.sin(np.pi / 6.0),
],
"-k",
)
# bottom right
plt.plot(
[width, width * (1 + np.cos(np.pi / 6.0))],
[(idx - 1) * height, (idx - 1) * height + width * np.sin(np.pi / 6.0)],
"-k",
)
# bottom left
plt.plot(
[0.0, -width * np.cos(np.pi / 6.0)],
[(idx - 1) * height, (idx - 1) * height + width * np.sin(np.pi / 6.0)],
"-k",
)
# top right
plt.plot(
[width, width * (1 + np.cos(np.pi / 6.0))],
[idx * height, idx * height + width * np.sin(np.pi / 6.0)],
"-k",
)
# top left
plt.plot(
[0.0, -width * np.cos(np.pi / 6.0)],
[idx * height, idx * height + width * np.sin(np.pi / 6.0)],
"-k",
)
# add topper
plt.plot(
[-width * np.cos(np.pi / 6.0), 0.0],
[
nHex * height + width * np.sin(np.pi / 6.0),
nHex * height + 2 * width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.plot(
[0.0, width],
[
nHex * height + 2 * width * np.sin(np.pi / 6.0),
nHex * height + 2 * width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.plot(
[width, width * (1 + np.cos(np.pi / 6.0))],
[
nHex * height + 2 * width * np.sin(np.pi / 6.0),
nHex * height + width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.plot(
[width * (1 + np.cos(np.pi / 6.0)), -width * np.cos(np.pi / 6.0)],
[
nHex * height + width * np.sin(np.pi / 6.0),
nHex * height + width * np.sin(np.pi / 6.0),
],
"-k",
)
# asterisk
plt.plot(
[width * (1 + np.cos(np.pi / 6.0)), -width * np.cos(np.pi / 6.0)],
[
nHex * height + width * np.sin(np.pi / 6.0),
nHex * height + width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.plot(
[0.0, width],
[
nHex * height,
nHex * height + 2.0 * width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.plot(
[width, 0.0],
[
nHex * height,
nHex * height + 2.0 * width * np.sin(np.pi / 6.0),
],
"-k",
)
offset = 2.0
rect = matplotlib.patches.Rectangle([-offset, 0.0], width, height, fill=False, lw=LW)
ax.add_patch(rect)
# right
plt.plot(
np.array([width * (1 + np.cos(np.pi / 6.0)), width * (1 + np.cos(np.pi / 6.0))])
- offset,
[
width * np.sin(np.pi / 6.0),
height + width * np.sin(np.pi / 6.0),
],
"-k",
)
# left
plt.plot(
np.array([-width * np.cos(np.pi / 6.0), -width * np.cos(np.pi / 6.0)]) - offset,
[
width * np.sin(np.pi / 6.0),
height + width * np.sin(np.pi / 6.0),
],
"-k",
)
# bottom right
plt.plot(
np.array([width, width * (1 + np.cos(np.pi / 6.0))]) - offset,
[0.0, width * np.sin(np.pi / 6.0)],
"-k",
)
# bottom left
plt.plot(
np.array([0.0, -width * np.cos(np.pi / 6.0)]) - offset,
[0.0, width * np.sin(np.pi / 6.0)],
"-k",
)
# top right
plt.plot(
np.array([width, width * (1 + np.cos(np.pi / 6.0))]) - offset,
[height, height + width * np.sin(np.pi / 6.0)],
"-k",
)
# top left
plt.plot(
np.array([0.0, -width * np.cos(np.pi / 6.0)]) - offset,
[height, height + width * np.sin(np.pi / 6.0)],
"-k",
)
# add topper
plt.plot(
np.array([-width * np.cos(np.pi / 6.0), 0.0]) - offset,
[
height + width * np.sin(np.pi / 6.0),
height + 2 * width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.plot(
np.array([0.0, width]) - offset,
[
height + 2 * width * np.sin(np.pi / 6.0),
height + 2 * width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.plot(
np.array([width, width * (1 + np.cos(np.pi / 6.0))]) - offset,
[
height + 2 * width * np.sin(np.pi / 6.0),
height + width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.plot(
np.array([width * (1 + np.cos(np.pi / 6.0)), -width * np.cos(np.pi / 6.0)])
- offset,
[
height + width * np.sin(np.pi / 6.0),
height + width * np.sin(np.pi / 6.0),
],
"-k",
)
# asterisk
plt.plot(
np.array([width * (1 + np.cos(np.pi / 6.0)), -width * np.cos(np.pi / 6.0)])
- offset,
[
height + width * np.sin(np.pi / 6.0),
height + width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.plot(
np.array([0.0, width]) - offset,
[
height,
height + 2.0 * width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.plot(
np.array([width, 0.0]) - offset,
[
height,
height + 2.0 * width * np.sin(np.pi / 6.0),
],
"-k",
)
rect = matplotlib.patches.Rectangle(
[-2 * offset, 0.0], width, height, fill=False, lw=LW
)
ax.add_patch(rect)
plt.plot(
np.array([0.0, 0.5 * width]) - 2 * offset,
[
height,
height + width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.plot(
np.array([width, width*0.5]) - 2 * offset,
[
height,
height + width * np.sin(np.pi / 6.0),
],
"-k",
)
plt.arrow(-0.7 * offset, 0.5 * height, 0.25 * offset, 0.0, width=0.1)
plt.arrow(-1.7 * offset, 0.5 * height, 0.25 * offset, 0.0, width=0.1)
plt.arrow(4 * width, 0.0, 0.0, height, width=0.05, fc="k")
plt.text(4 * width, -0.25, "$z$")
ax.axis("equal")
plt.axis("off")
# plt.xlim([-0.5 * width, width])
plt.ylim([-height, nHex + 1 * height])
plt.tight_layout()
plt.savefig("../figs/chunk_description.pdf")
| [
"wcdawn@ncsu.edu"
] | wcdawn@ncsu.edu |
1b5cfbe1f3042ab381911ffa943576eb5a6a5208 | 32904d4841d104143ba0f41cc3aeb749e470f546 | /backend/django/apps/memos/migrations/0008_auto_20191025_2003.py | 3fdde9fa6cac56f3d36a37dc33c06ac8382c74cb | [] | no_license | aurthurm/dispatrace-api-vuejs | 20ec5deee015e69bce7a64dc2d89ccae8941b800 | 56d122318af27ff64755fc515345974631d3026f | refs/heads/master | 2023-01-23T23:03:15.438339 | 2020-10-20T22:09:29 | 2020-10-20T22:09:29 | 219,028,985 | 0 | 1 | null | 2022-12-22T18:31:38 | 2019-11-01T17:08:35 | Vue | UTF-8 | Python | false | false | 540 | py | # Generated by Django 2.2.6 on 2019-10-25 18:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('memos', '0007_memoattachment_memocomment'),
]
operations = [
migrations.AlterField(
model_name='memoattachment',
name='memo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memoattachment_attachment', to='memos.Memo'),
),
]
| [
"aurthurmusendame@gmail.com"
] | aurthurmusendame@gmail.com |
33e6fb56b398cd6635d41be61972d9290f4fa7f1 | cdd79cef15bdf6a0b9098e27028bbe38607bc288 | /数論/Combination/mcomb.py | 0f7e575c0fe0a8647ae6beff5f8fa66747094a11 | [] | no_license | nord2sudjp/atcoder | ee35a3eb35717485dc62627172de24c9dac102fb | 6b1cc5102a615492cc7ff8a33813bbb954641782 | refs/heads/master | 2023-08-25T11:27:14.205593 | 2021-09-27T05:43:04 | 2021-09-27T05:43:04 | 302,855,505 | 0 | 0 | null | null | null | null | SHIFT_JIS | Python | false | false | 654 | py | # https://atcoder.jp/contests/abc145/submissions/10775904
def comb(n,r,mod):
# nからr通りを選択する
# modは素数であること
if n<r:return 0
k=min(r,n-r)
C=1
for i in range(1,k+1):
C=(C*(n+1-i)*pow(i,mod-2,mod))%mod
return C
print(com(n+m,n,mod))
#
def comb_cal(n):
s=1
m=0
for i in range(n):
s*=2
m+=s-1
#print(s-1)
return((s-1,m))
i,j=comb_cal(3)
print(i,j)
'''
1: 1 : 1
2: 2 1 : 3
3: 3 3 1 : 7
4: 4 6 4 1 : 15
5: 5 10 10 5 1 : 31
6: 6 15 20 15 6 1 : 63
7: 7 21 35 35 21 7 1 : 127
8: 8 28 56 70 56 28 8 1 : 255
9: 9 36 84 126 126 84 36 9 1 : 511
1013
'''
| [
"nord2sudjp@gmail.com"
] | nord2sudjp@gmail.com |
cb9c8805b2401a47ffd0ff9c924fa33c3ef2548c | efcd916d9733ac4d2f4a162ac06d5b1356ec42d0 | /model/octave_unet.py | b487709c95a6bbe1d0895c5fe80e3d434d268c48 | [] | no_license | seandatasci/unet | d4d1b6c3016a1f673023c77c9e4f8ae6d0843e0f | 3be1c69956abf9ddb3d6565436f85e92c6e4e486 | refs/heads/master | 2022-11-05T08:10:03.659948 | 2020-06-16T01:44:54 | 2020-06-16T01:44:54 | 272,579,214 | 0 | 0 | null | 2020-06-16T01:16:41 | 2020-06-16T01:16:40 | null | UTF-8 | Python | false | false | 7,411 | py | # model code all in this cell
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import tensorflow as tf
from sklearn.utils import class_weight
from tensorflow.keras import layers
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras_octave_conv import OctaveConv2D
def o_unet(pretrained_weights = None,input_size = (800,600,1)):
inputs = Input(input_size)
# downsampling for low frequencies
low = layers.AveragePooling2D(2)(inputs)
high1, low1 = OctaveConv2D(64)([inputs,low])
high1 = layers.BatchNormalization()(high1)
high1 = layers.Activation("relu")(high1)
low1 = layers.BatchNormalization()(low1)
low1 = layers.Activation("relu")(low1)
high1, low1 = OctaveConv2D(64)([high1, low1])
high1 = layers.BatchNormalization()(high1)
high1 = layers.Activation("relu")(high1)
low1 = layers.BatchNormalization()(low1)
low1 = layers.Activation("relu")(low1)
pool1high = layers.MaxPooling2D(2)(high1)
pool1low = layers.MaxPooling2D(2)(low1)
high2, low2 = OctaveConv2D(128)([pool1high,pool1low])
high2 = layers.BatchNormalization()(high2)
high2 = layers.Activation("relu")(high2)
low2 = layers.BatchNormalization()(low2)
low2 = layers.Activation("relu")(low2)
high2, low2 = OctaveConv2D(128)([high2, low2])
high2 = layers.BatchNormalization()(high2)
high2 = layers.Activation("relu")(high2)
low2 = layers.BatchNormalization()(low2)
low2 = layers.Activation("relu")(low2)
pool2high = layers.MaxPooling2D(2)(high2)
pool2low = layers.MaxPooling2D(2)(low2)
high3, low3 = OctaveConv2D(256)([pool2high,pool2low])
high3 = layers.BatchNormalization()(high3)
high3 = layers.Activation("relu")(high3)
low3 = layers.BatchNormalization()(low3)
low3 = layers.Activation("relu")(low3)
high3, low3 = OctaveConv2D(256)([high3, low3])
high3 = layers.BatchNormalization()(high3)
high3 = layers.Activation("relu")(high3)
low3 = layers.BatchNormalization()(low3)
low3 = layers.Activation("relu")(low3)
pool3high = layers.MaxPooling2D(2)(high3)
pool3low = layers.MaxPooling2D(2)(low3)
high4, low4 = OctaveConv2D(512)([pool3high,pool3low])
high4 = layers.BatchNormalization()(high4)
high4 = layers.Activation("relu")(high4)
low4 = layers.BatchNormalization()(low4)
low4 = layers.Activation("relu")(low4)
high4, low4 = OctaveConv2D(512)([high4, low4])
high4 = layers.BatchNormalization()(high4)
high4 = layers.Activation("relu")(high4)
low4 = layers.BatchNormalization()(low4)
low4 = layers.Activation("relu")(low4)
pool4high = layers.MaxPooling2D(2)(high4)
pool4low = layers.MaxPooling2D(2)(low4)
high5, low5 = OctaveConv2D(1024)([pool4high, pool4low])
high5 = layers.BatchNormalization()(high5)
high5 = layers.Activation("relu")(high5)
low5 = layers.BatchNormalization()(low5)
low5 = layers.Activation("relu")(low5)
high5 = Dropout(0.4)(high5)
low5 = Dropout(0.4)(low5)
high5, low5 = OctaveConv2D(1024)([high5, low5])
high5 = layers.BatchNormalization()(high5)
high5 = layers.Activation("relu")(high5)
low5 = layers.BatchNormalization()(low5)
low5 = layers.Activation("relu")(low5)
high5 = Dropout(0.4)(high5)
low5 = Dropout(0.4)(low5)
uphigh6, uplow6 = OctaveConv2D(512, use_transpose=True, strides=(2,2))([high5,low5])
uphigh6 = layers.BatchNormalization()(uphigh6)
uphigh6 = layers.Activation("relu")(uphigh6)
uplow6 = layers.BatchNormalization()(uplow6)
uplow6 = layers.Activation("relu")(uplow6)
merge6high = concatenate([high4,uphigh6], axis = 3)
merge6low = concatenate([low4,uplow6], axis = 3)
high6, low6 = OctaveConv2D(512)([merge6high,merge6low])
high6 = layers.BatchNormalization()(high6)
high6 = layers.Activation("relu")(high6)
low6 = layers.BatchNormalization()(low6)
low6 = layers.Activation("relu")(low6)
high6, low6 = OctaveConv2D(512)([high6, low6])
high6 = layers.BatchNormalization()(high6)
high6 = layers.Activation("relu")(high6)
low6 = layers.BatchNormalization()(low6)
low6 = layers.Activation("relu")(low6)
uphigh7, uplow7 = OctaveConv2D(256, use_transpose=True, strides=(2,2))([high6, low6])
uphigh7 = layers.BatchNormalization()(uphigh7)
uphigh7 = layers.Activation("relu")(uphigh7)
uplow7 = layers.BatchNormalization()(uplow7)
uplow7 = layers.Activation("relu")(uplow7)
merge7high = concatenate([high3,uphigh7], axis = 3)
merge7low = concatenate([low3,uplow7], axis = 3)
high7, low7 = OctaveConv2D(256)([merge7high, merge7low])
high7 = layers.BatchNormalization()(high7)
high7 = layers.Activation("relu")(high7)
low7 = layers.BatchNormalization()(low7)
low7 = layers.Activation("relu")(low7)
high7, low7 = OctaveConv2D(256)([high7, low7])
high7 = layers.BatchNormalization()(high7)
high7 = layers.Activation("relu")(high7)
low7 = layers.BatchNormalization()(low7)
low7 = layers.Activation("relu")(low7)
uphigh8, uplow8 = OctaveConv2D(128, use_transpose=True, strides=(2,2))([high7, low7])
uphigh8 = layers.BatchNormalization()(uphigh8)
uphigh8 = layers.Activation("relu")(uphigh8)
uplow8 = layers.BatchNormalization()(uplow8)
uplow8 = layers.Activation("relu")(uplow8)
merge8high = concatenate([high2,uphigh8], axis = 3)
merge8low = concatenate([low2,uplow8], axis = 3)
high8, low8 = OctaveConv2D(128)([merge8high, merge8low])
high8 = layers.BatchNormalization()(high8)
high8 = layers.Activation("relu")(high8)
low8 = layers.BatchNormalization()(low8)
low8 = layers.Activation("relu")(low8)
high8, low8 = OctaveConv2D(128)([high8, low8])
high8 = layers.BatchNormalization()(high8)
high8 = layers.Activation("relu")(high8)
low8 = layers.BatchNormalization()(low8)
low8 = layers.Activation("relu")(low8)
uphigh9, uplow9 = OctaveConv2D(64, use_transpose=True, strides=(2,2))([high8, low8])
uphigh9 = layers.BatchNormalization()(uphigh9)
uphigh9 = layers.Activation("relu")(uphigh9)
uplow9 = layers.BatchNormalization()(uplow9)
uplow9 = layers.Activation("relu")(uplow9)
merge9high = concatenate([high1,uphigh9], axis = 3)
merge9low = concatenate([low1,uplow9], axis = 3)
high9, low9 = OctaveConv2D(64)([merge9high, merge9low])
high9 = layers.BatchNormalization()(high9)
high9 = layers.Activation("relu")(high9)
low9 = layers.BatchNormalization()(low9)
low9 = layers.Activation("relu")(low9)
high9, low9 = OctaveConv2D(64)([high9, low9])
high9 = layers.BatchNormalization()(high9)
high9 = layers.Activation("relu")(high9)
low9 = layers.BatchNormalization()(low9)
low9 = layers.Activation("relu")(low9)
conv9 = OctaveConv2D(32, ratio_out=0.0)([high9, low9])
conv9 = layers.Activation("sigmoid")(conv9)
conv10 = layers.Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
return model
| [
"noreply@github.com"
] | noreply@github.com |
1ea16f21550a63e6a23e2ef4a9f3e2cf5ea58db1 | 215dbe00272fbc53cc6239e972abce6f2b163003 | /Day5_Exercises/exercise1-2.py | 07a82c9a3d73661020c591f4f453df32fc92ff73 | [] | no_license | kkt8282/IOT_Raspberry | 29579f0793bbe56d9e85c9a6625e6caaba419d19 | 9a56a8974c0a6640d7616614e8a76417ced8931a | refs/heads/master | 2022-12-14T01:41:40.677777 | 2020-08-31T01:44:53 | 2020-08-31T01:44:53 | 291,587,488 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | import RPi.GPIO as GPIO
from time import sleep
LED_1 = 4
LED_2 = 5
LED_3 = 14
LED_4 = 15
LED = [LED_1, LED_2, LED_4, LED_3]
def main():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(LED, GPIO.OUT, initial=False)
print("main() program running...")
try:
while True:
LEDNum = int(input("LED NUMBER: "))
LEDSet = input("LED SET: ")
if LEDSet == "ON":
LEDSet = GPIO.HIGH
if LEDSet == "OFF":
LEDSet = GPIO.LOW
GPIO.output(LED[LEDNum-1], LEDSet)
except KeyboardInterrupt:
GPIO.cleanup()
if __name__ == '__main__':
main() | [
"63182249+kkt8282@users.noreply.github.com"
] | 63182249+kkt8282@users.noreply.github.com |
a822fbeeb592b742c4ddbe11b82b3ead6703f4e6 | 26e2c68f929ecc8bb5c20c6b8cd200b66d99def5 | /DjangoDopLEsson/products/migrations/0001_initial.py | 0ce5c7a66193f21ead1baaf75f96d6d86c10e249 | [] | no_license | kirigaikabuto/DjangoLessonsPart | ad19c1da0d1da27830c6fdf1b07353632bbc097d | 4442518ae1f0a8641e066c9a63ff4e55e04d5fe5 | refs/heads/master | 2022-11-28T10:29:54.428001 | 2020-08-03T09:26:42 | 2020-08-03T09:26:42 | 273,497,052 | 0 | 0 | null | 2020-08-03T09:26:43 | 2020-06-19T13:11:15 | Python | UTF-8 | Python | false | false | 590 | py | # Generated by Django 3.0.7 on 2020-06-24 05:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('price', models.IntegerField()),
],
),
]
| [
"ytleugazy@dar.kz"
] | ytleugazy@dar.kz |
4625b7562e6935395144e1da64a15c0b078f999e | 52b5773617a1b972a905de4d692540d26ff74926 | /triangle.py | aabaa8b43b28d0f6063839f8844acbb0a8568919 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | # this function is meant to print a triangle
def triangle():
# outer loop is for the rows --> 4
for i in range(0,4):
# inner loop is for colums --> 4
for j in range(0, i+1):
print("*",end= " ")
print("\r")
triangle()
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
e8828d6e1b94af027e4b77ca8d909c5b44902ee8 | 50a087382c8da4db28461644f8e4c61db7a3c230 | /titanicapidata.py | adf49a50749f307cfcc432ed5266563698934369 | [] | no_license | deep2555/Titanic-dataset-project | 91b478df513447d2416dca7f2f947fe02cc9c890 | 25ba4824d6c246c79526ed456447df81168d6707 | refs/heads/main | 2023-05-31T05:11:02.503655 | 2021-06-16T15:53:34 | 2021-06-16T15:53:34 | 377,543,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | #import flask
from flask import Flask , request, json,jsonify
import requests
import pickle
import numpy as np
app = Flask(__name__)
loaded_model = pickle.load(open("titanic.sav", "rb"))
@app.route("/", methods = ["POST"])
def titanic_predict():
user_input = request.json
print(user_input)
input_list = [user_input["PassengerId"], user_input["Pclass"], user_input["Age"], user_input["SibSp"], user_input["Fare"], user_input["male"], user_input["Q"], user_input["S"]]
prediction = loaded_model.predict([input_list])
confidence = loaded_model.predict_proba([input_list])
response = {}
response["prediction"] = int(prediction[0])
response["confidence"] = str(round(np.amax(confidence[0]) *100,2))
return jsonify(response)
if __name__ == "__main__":
app.run(host = "0.0.0.0", port = "5000") | [
"noreply@github.com"
] | noreply@github.com |
190ccccc1d21c2fef03056f73525f3e2d5d08a51 | 1d123feb5bee8cfcad2267bd2683cd5328a764d8 | /src/LivestockCV/core/transform/rescale.py | 10730d13260807f02474c184a4fea5f112a72265 | [
"MIT"
] | permissive | peschelgroup/LivestockCV | cd7803f74b5c37c04ede6cf1fc6f6da043821174 | e5746af75935d5000ba3ad26d09b6868fae76b76 | refs/heads/main | 2023-06-27T04:52:26.533031 | 2021-08-01T20:01:22 | 2021-08-01T20:01:22 | 379,750,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | # Rescale grayscale images to user defined range
import os
import numpy as np
from LivestockCV.core import fatal_error
from LivestockCV.core import print_image
from LivestockCV.core import plot_image
from LivestockCV.core import params
def rescale(gray_img, min_value=0, max_value=255):
"""Rescale image.
Inputs:
gray_img = Grayscale image data
min_value = (optional) new minimum value for range of interest. default = 0
max_value = (optional) new maximum value for range of interest. default = 255
Returns:
rescaled_img = rescaled image
:param gray_img: numpy.ndarray
:param min_value: int
:param max_value: int
:return c: numpy.ndarray
"""
if len(np.shape(gray_img)) != 2:
fatal_error("Image is not grayscale")
rescaled_img = np.interp(gray_img, (np.nanmin(gray_img), np.nanmax(gray_img)), (min_value, max_value))
rescaled_img = (rescaled_img).astype('uint8')
# Autoincrement the device counter
params.device += 1
if params.debug == 'print':
print_image(rescaled_img, os.path.join(params.debug_outdir, str(params.device) + "_rescaled.png"))
elif params.debug == 'plot':
plot_image(rescaled_img, cmap='gray')
return rescaled_img
| [
"noreply@github.com"
] | noreply@github.com |
1e23cc6f5ebb7e8346c498f65e57c1cb9222b3d2 | f7aa42f7f1677a5db519449c6ffc03f46398f485 | /homepage/migrations/0006_faculty_user.py | 0ac1c599acd7ccb77dbd7fd36a654a614cd5037b | [
"MIT"
] | permissive | abmishra1/IntelligentFacultyPortal | 7ee0c7397221f49f6007998d8c2fd24347b83fe1 | 4aae3a3024c10233acfa19f024fd723fb6ef1336 | refs/heads/master | 2020-07-15T13:29:54.978990 | 2018-05-18T18:51:43 | 2018-05-18T18:51:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-15 17:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('homepage', '0005_auto_20171115_0300'),
]
operations = [
migrations.AddField(
model_name='faculty',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"nitinkedia7@gmail.com"
] | nitinkedia7@gmail.com |
ece98bcf48efd5bae4412e46e743d79b5fdd7e83 | 33f56fd967f012edd86e5fed2d6992e2cc5c2a1f | /keras_97/keras-simple-model-0-97103-best-public-score.py | a5e71f2060cacfc2e34c67b15ae2263c6c05e228 | [] | no_license | CornellDataScience/IntSys-Seedling | 74571a69ef15dd79f0dd917161de027a11df14ef | b8296a8fca37167d333c263ea732ff9f850035bf | refs/heads/master | 2022-01-10T15:39:58.253066 | 2019-05-15T02:14:11 | 2019-05-15T02:14:11 | 169,326,358 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,756 | py | import pandas as pd
import numpy as np
import os
import imageio
import csv
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Activation
from keras.layers import Dropout
from keras.layers import Maximum
from keras.layers import ZeroPadding2D
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras import regularizers
from keras.layers import BatchNormalization
from keras.optimizers import Adam, SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.layers.advanced_activations import LeakyReLU
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from skimage.transform import resize as imresize
from tqdm import tqdm
from subprocess import check_output
#print(check_output(["ls", "data"]).decode("utf8"))
BATCH_SIZE = 16
EPOCHS = 30
RANDOM_STATE = 11
CLASS = {
'Black-grass': 0,
'Charlock': 1,
'Cleavers': 2,
'Common Chickweed': 3,
'Common wheat': 4,
'Fat Hen': 5,
'Loose Silky-bent': 6,
'Maize': 7,
'Scentless Mayweed': 8,
'Shepherds Purse': 9,
'Small-flowered Cranesbill': 10,
'Sugar beet': 11
}
INV_CLASS = {
0: 'Black-grass',
1: 'Charlock',
2: 'Cleavers',
3: 'Common Chickweed',
4: 'Common wheat',
5: 'Fat Hen',
6: 'Loose Silky-bent',
7: 'Maize',
8: 'Scentless Mayweed',
9: 'Shepherds Purse',
10: 'Small-flowered Cranesbill',
11: 'Sugar beet'
}
# Dense layers set
def dense_set(inp_layer, n, activation, drop_rate=0.):
dp = Dropout(drop_rate)(inp_layer)
dns = Dense(n)(dp)
bn = BatchNormalization(axis=-1)(dns)
act = Activation(activation=activation)(bn)
return act
# Conv. layers set
def conv_layer(feature_batch, feature_map, kernel_size=(3, 3),strides=(1,1), zp_flag=False):
if zp_flag:
zp = ZeroPadding2D((1,1))(feature_batch)
else:
zp = feature_batch
conv = Conv2D(filters=feature_map, kernel_size=kernel_size, strides=strides)(zp)
bn = BatchNormalization(axis=3)(conv)
act = LeakyReLU(1/10)(bn)
return act
# simple model
def get_model():
inp_img = Input(shape=(51, 51, 3))
# 51
conv1 = conv_layer(inp_img, 64, zp_flag=False)
conv2 = conv_layer(conv1, 64, zp_flag=False)
mp1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(conv2)
# 23
conv3 = conv_layer(mp1, 128, zp_flag=False)
conv4 = conv_layer(conv3, 128, zp_flag=False)
mp2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(conv4)
# 9
conv7 = conv_layer(mp2, 256, zp_flag=False)
conv8 = conv_layer(conv7, 256, zp_flag=False)
conv9 = conv_layer(conv8, 256, zp_flag=False)
mp3 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(conv9)
# 1
# dense layers
flt = Flatten()(mp3)
ds1 = dense_set(flt, 128, activation='tanh')
out = dense_set(ds1, 12, activation='softmax')
model = Model(inputs=inp_img, outputs=out)
# The first 50 epochs are used by Adam opt.
# Then 30 epochs are used by SGD opt.
#mypotim = Adam(lr=2 * 1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
mypotim = SGD(lr=1 * 1e-1, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=mypotim,
metrics=['accuracy'])
model.summary()
return model
def get_callbacks(filepath, patience=5):
lr_reduce = ReduceLROnPlateau(monitor='val_acc', factor=0.1, epsilon=1e-5, patience=patience, verbose=1)
msave = ModelCheckpoint(filepath, save_best_only=True)
return [lr_reduce, msave]
# I trained model about 12h on GTX 950.
def train_model(img, target):
callbacks = get_callbacks(filepath='model_weight_SGD.hdf5', patience=6)
gmodel = get_model()
gmodel.load_weights(filepath='model_weight_Adam.hdf5')
x_train, x_valid, y_train, y_valid = train_test_split(
img,
target,
shuffle=True,
train_size=0.8,
random_state=RANDOM_STATE
)
gen = ImageDataGenerator(
rotation_range=360.,
width_shift_range=0.3,
height_shift_range=0.3,
zoom_range=0.3,
horizontal_flip=True,
vertical_flip=True
)
gmodel.fit_generator(gen.flow(x_train, y_train,batch_size=BATCH_SIZE),
steps_per_epoch=10*len(x_train)/BATCH_SIZE,
epochs=EPOCHS,
verbose=1,
shuffle=True,
validation_data=(x_valid, y_valid),
callbacks=callbacks)
def test_model(img, label):
gmodel = get_model()
gmodel.load_weights(filepath='../data/plant-weight/model_weight_SGD.hdf5')
prob = gmodel.predict(img, verbose=1)
pred = prob.argmax(axis=-1)
sub = pd.DataFrame({"file": label,
"species": [INV_CLASS[p] for p in pred]})
sub.to_csv("sub.csv", index=False, header=True)
# Resize all image to 51x51
def img_reshape(img):
img = imresize(img, (51, 51, 3))
return img
# get image tag
def img_label(path):
return str(str(path.split('/')[-1]))
# get plant class on image
def img_class(path):
return str(path.split('/')[-2])
# fill train and test dict
def fill_dict(paths, some_dict):
text = ''
if 'train' in paths[0]:
text = 'Start fill train_dict'
elif 'test' in paths[0]:
text = 'Start fill test_dict'
for p in tqdm(paths, ascii=True, ncols=85, desc=text):
img = imageio.imread(p)
img = img_reshape(img)
some_dict['image'].append(img)
some_dict['label'].append(img_label(p))
if 'train' in paths[0]:
some_dict['class'].append(img_class(p))
return some_dict
# read image from dir. and fill train and test dict
def reader():
file_ext = []
train_path = []
test_path = []
for root, dirs, files in os.walk('../data'):
if dirs != []:
print('Root:\n'+str(root))
print('Dirs:\n'+str(dirs))
else:
for f in files:
ext = os.path.splitext(str(f))[1][1:]
if ext not in file_ext:
file_ext.append(ext)
if 'train' in root:
path = os.path.join(root, f)
train_path.append(path)
elif 'test' in root:
path = os.path.join(root, f)
test_path.append(path)
train_dict = {
'image': [],
'label': [],
'class': []
}
test_dict = {
'image': [],
'label': []
}
#train_dict = fill_dict(train_path, train_dict)
test_dict = fill_dict(test_path, test_dict)
return train_dict, test_dict
# I commented out some of the code for learning the model.
def score_from_csv():
input_file = csv.DictReader(open("sub.csv"))
def main():
train_dict, test_dict = reader()
#X_train = np.array(train_dict['image'])
#y_train = to_categorical(np.array([CLASS[l] for l in train_dict['class']]))
X_test = np.array(test_dict['image'])
label = test_dict['label']
# I do not recommend trying to train the model on a kaggle.
#train_model(X_train, y_train)
test_model(X_test, label)
if __name__=='__main__':
main()
| [
"bjk224@cornell.edu"
] | bjk224@cornell.edu |
9f944dbc67a1bb892749180712c64565d636b184 | 25bd603e5775558754bf32adbfbbc6c1d8d86db8 | /comments/migrations/0001_initial.py | 9f3652db9d54a856faabfcdf57101f469120b345 | [] | no_license | yeimermolina/django-blog-project | ee75fa05412c8d1194c57cac2b8e8909d4493892 | 3ca64d3f4b8f80f91a4a2527ed781e9cdceebb2d | refs/heads/master | 2021-01-19T09:20:31.212788 | 2017-04-10T00:44:11 | 2017-04-10T00:44:11 | 87,750,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-02-28 17:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='comments.Comment')),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-timestamp'],
},
),
]
| [
"yeimer.molina@gmail.com"
] | yeimer.molina@gmail.com |
1f125389bc31c3efbfd5132b1e77e427614268ff | 0b5b70b5353e887fb168350bb32ceb6cc23249fa | /homework/week04/sparta/__init__.py | e2dbdc125587bf3013e9e42fa83e6b40d59f28e3 | [] | no_license | diligejy/sparta_d | 2d8604a59899bb21cd97e44e13e3dd91bc9e5e77 | 55a01b61ad420302e53bab327daee2e919b02e73 | refs/heads/main | 2023-05-14T21:01:13.677335 | 2021-05-22T06:05:13 | 2021-05-22T06:05:13 | 358,761,190 | 0 | 0 | null | 2021-05-22T06:07:04 | 2021-04-17T01:49:23 | HTML | UTF-8 | Python | false | false | 601 | py | from flask import Flask
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
import config
db = SQLAlchemy()
migrate = Migrate()
def create_app():
app = Flask(__name__)
app.config.from_object(config)
# ORM
db.init_app(app)
migrate.init_app(app, db)
from . import models
from .views import main_views, memo_views, book_review_views, favorite_views
app.register_blueprint(main_views.bp)
app.register_blueprint(memo_views.bp)
app.register_blueprint(book_review_views.bp)
app.register_blueprint(favorite_views.bp)
return app | [
"sjy049@gmail.com"
] | sjy049@gmail.com |
4302c2a92d3fd0e16720b5d0bb2c81e469aa422d | 71e8bdddd84338bbb2d77934351d76251c2fd77d | /unique-paths.py | 39eeaaf353b483ccb782eeba05930f71fcbd9851 | [] | no_license | onestarshang/leetcode | 3da20fbec1b42d3565eb95a64ea3f30c29f1e1eb | 0a7aa09a2b95e4caca5b5123fb735ceb5c01e992 | refs/heads/master | 2021-01-09T06:00:06.018037 | 2016-12-17T16:17:49 | 2016-12-17T16:17:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | #coding: utf-8
'''
http://oj.leetcode.com/problems/unique-paths/
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
How many possible unique paths are there?
Above is a 3 x 7 grid. How many possible unique paths are there?
Note: m and n will be at most 100.
'''
class Solution:
# @return an integer
def uniquePaths(self, m, n):
if m == 0 or n == 0:
return 0
d = [[0 for i in range(n)] for j in range(m)]
for i in range(m):
for j in range(n):
if i == 0 or j == 0:
d[i][j] = 1
else:
d[i][j] = d[i - 1][j] + d[i][j - 1]
return d[m - 1][n - 1]
| [
"irachex@gmail.com"
] | irachex@gmail.com |
25f34ab887b47fdda71884e0831b7bf6e036da15 | 5c2ead245750a317ef4bdbfa6d0e17fd21f7aa91 | /rango/tests.py | a6b3519ed7821ba749642478a189f769435dbe62 | [] | no_license | PiotrusWatson/rango | 90386bcbe2b51264dc4884a2a106c0e451717db5 | d8d7bb937e84f7fc225a8a0b1dceee78f2908dd9 | refs/heads/master | 2021-01-12T00:38:16.318824 | 2017-02-10T12:52:35 | 2017-02-10T12:52:35 | 78,750,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py | from django.test import TestCase
from rango.models import Category
from django.core.urlresolvers import reverse
def add_cat(name, views, likes):
c = Category.objects.get_or_create(name=name)[0]
c.views = views
c.likes = likes
c.save()
return c
class IndexViewTests(TestCase):
def test_index_view_with_no_categories(self):
"""
if no questions exist, an appropriate message should be displayed
"""
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "There are no categories present.")
self.assertQuerysetEqual(response.context['categories'], [])
def test_index_view_with_categories(self):
"""
Check to make sure that the index has categories displayed
"""
add_cat('test',1,1)
add_cat('temp',1,1)
add_cat('tmp',1,1)
add_cat('tmp test temp',1,1)
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "tmp test temp")
num_cats = len(response.context['categories'])
self.assertEqual(num_cats, 4)
class CategoryMethodTests(TestCase):
def test_ensure_views_are_positive(self):
"""
should result True for categories where views are zero or positive
"""
cat = Category(name="test", views=-1, likes=0)
cat.save()
self.assertEqual((cat.views >= 0), True)
def test_slug_line_creation(self):
"""
checks that when we add a category an appropriate slug line is created
eg "Random Category String" -> "random-category-string"
"""
cat = Category(name = "Random Category String")
cat.save()
self.assertEqual(cat.slug, "random-category-string")
# Create your tests here.
| [
"piotruswatson@gmail.com"
] | piotruswatson@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.