blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1381ca116c99cc6ea7e9337e43bb5944241029d8
|
fd5b8eb6894f51e17f9d918fb8166162832c36fe
|
/norix/pipelines.py
|
a342c2a7ebeb241243985a04c1628472009cf86c
|
[
"Unlicense"
] |
permissive
|
busla/norix-scraper
|
608d2c912ff18c7d7fbe98364b378f10c1281a93
|
994c23865039eaa949089b680c881697cb504eb8
|
refs/heads/master
| 2020-12-03T05:10:51.615201
| 2015-10-01T13:48:14
| 2015-10-01T13:48:14
| 33,375,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from norix.items import *
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
import logging
import json
from bson import BSON
from bson import json_util
class PlayersPipeline(object):
def __init__(self):
connection = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
self.db = connection[settings['MONGODB_DB']]
def process_item(self, item, spider):
if not isinstance(item,PlayerItem):
return item # return the item to let other pipeline to handle it
db = self.db
players = db['players']
player_seminars = db['player_seminars__seminar_players']
players.update(
{
'ssn': item['ssn']
},
{
'ssn': item['ssn'],
'player_name': item['player_name'],
'email': item['email'],
'phone': item['phone'],
'status': item['status']
},
upsert=True)
player_seminars.insert_one(
{
'seminar_players': item['seminars'],
'player_seminars': item['ssn']
})
valid = True
for data in item:
if not data:
valid = False
raise DropItem("Missing {0}!".format(data))
if valid:
#self.collection.insert(dict(item))
#self.collection.findAndModify(dict(item), {'upsert':'true'});
spider.logger.info("Player %s added to collection", item['player_name'])
return item
class SeminarPipeline(object):
def __init__(self):
connection = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
self.db = connection[settings['MONGODB_DB']]
def process_item(self, item, spider):
if not isinstance(item,SeminarItem):
return item # return the item to let other pipeline to handle it
db = self.db
seminar = db['seminars']
#user_db = db['users']
user_seminars = db['seminar_seminar_has_users__user_user_has_seminars']
seminar.update({'seminar_id': item['seminar_id']}, dict(item), upsert=True)
#find_user = user_db.find({'username': spider.user, 'club': spider.club})
#spider.logger.info(spider.user_obj['_id'])
user_seminars.update({
'user_user_has_seminars': spider.user_obj['_id'],
'seminar_seminar_has_users': item['seminar_id']
},
{
'user_user_has_seminars': spider.user_obj['_id'],
'seminar_seminar_has_users': item['seminar_id']
},
upsert=True)
valid = True
for data in item:
if not data:
valid = False
raise DropItem("Missing {0}!".format(data))
if valid:
spider.logger.info("Seminar %s added to collection", item['seminar_name'])
return item
|
[
"nonni@nonni.cc"
] |
nonni@nonni.cc
|
fe737470b74d64cbc5ac466cb42e40ada5666b32
|
ee295a3f82f22d27f2aa4e9697eefa38d3660bd1
|
/coaches/admin.py
|
c19ebc5d96cd0347d47c588adf3e64d086a63396
|
[] |
no_license
|
OlgaChe/pybursa
|
d8373c730f5384458f402483a82318ee2d967b4b
|
5acd1645248444da65f4bc4330f5ea60e4e92c58
|
refs/heads/master
| 2020-12-11T07:40:24.542960
| 2015-01-11T16:31:09
| 2015-01-11T16:31:09
| 28,009,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
from django.contrib import admin
from coaches.models import Coach
@admin.register(Coach)
class CoachAdmin(admin.ModelAdmin):
list_display = ['name','surname', 'email', 'phone']
radio_fields = {'job': admin.HORIZONTAL, 'dossier': admin.HORIZONTAL}
save_as = True
save_on_top = True
|
[
"olgache73@gmail.com"
] |
olgache73@gmail.com
|
ef671cbf550f126fb9ae19a992829f6fc521cdc6
|
a71582e89e84a4fae2595f034d06af6d8ad2d43a
|
/tensorflow/compiler/tests/while_test.py
|
15a31111cb6b51f6d6e501b86d906d9ba53d1c22
|
[
"Apache-2.0"
] |
permissive
|
tfboyd/tensorflow
|
5328b1cabb3e24cb9534480fe6a8d18c4beeffb8
|
865004e8aa9ba630864ecab18381354827efe217
|
refs/heads/master
| 2021-07-06T09:41:36.700837
| 2019-04-01T20:21:03
| 2019-04-01T20:26:09
| 91,494,603
| 3
| 0
|
Apache-2.0
| 2018-07-17T22:45:10
| 2017-05-16T19:06:01
|
C++
|
UTF-8
|
Python
| false
| false
| 9,093
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for while loops in XLA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class WhileTest(xla_test.XLATestCase):
def testSingletonLoopHandrolled(self):
# Define a function for the loop body
@function.Defun(dtypes.int32)
def loop_body(step):
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
return step_out
# Define a function for the loop condition
@function.Defun(dtypes.int32)
def loop_cond(step):
return step < 10
with self.cached_session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index], loop_cond, loop_body)
result = sess.run(loop_outputs, {init_index: 0})
self.assertAllClose(result, [10], rtol=1e-3)
def testCountingLoopHandrolled(self):
# Define a function for the loop body
@function.Defun(dtypes.int32, dtypes.float32)
def loop_body(step, rsum):
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
sum_out = rsum + constant_op.constant(1.5, dtype=dtypes.float32)
return step_out, sum_out
# Define a function for the loop condition
@function.Defun(dtypes.int32, dtypes.float32)
def loop_cond(step, rsum):
del rsum
return step < 10
with self.cached_session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
init_sum = array_ops.placeholder(dtypes.float32, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index, init_sum], loop_cond,
loop_body)
result = sess.run(loop_outputs, {init_index: 0, init_sum: 0.0})
self.assertAllClose(result, [10, 15.0], rtol=1e-3)
no_iters_result = sess.run(loop_outputs, {init_index: 10, init_sum: 0.0})
self.assertAllClose(no_iters_result, [10, 0.0], rtol=1e-3)
def testCountingLoopHandrolledC64(self):
# Define a function for the loop body
@function.Defun(dtypes.int32, dtypes.complex64)
def loop_body(step, rsum):
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
sum_out = rsum + constant_op.constant(1.5 + 2j, dtype=dtypes.complex64)
return step_out, sum_out
# Define a function for the loop condition
@function.Defun(dtypes.int32, dtypes.complex64)
def loop_cond(step, rsum):
del rsum
return step < 10
with self.cached_session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
init_sum = array_ops.placeholder(dtypes.complex64, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index, init_sum], loop_cond,
loop_body)
result = sess.run(loop_outputs, {init_index: 0, init_sum: 0.0})
self.assertAllClose(result[1], np.complex64(15 + 20j), rtol=1e-3)
no_iters_result = sess.run(loop_outputs, {init_index: 10, init_sum: 0.0})
self.assertAllClose(no_iters_result[1], np.complex64(0), rtol=1e-3)
def testLoopWithConstantOutput(self):
# Define a function for the loop body
@function.Defun(dtypes.int32, dtypes.int32)
def loop_body(step, x):
del x
step_out = step + constant_op.constant(1, dtype=dtypes.int32)
return (step_out, 7)
# Define a function for the loop condition
@function.Defun(dtypes.int32, dtypes.int32)
def loop_cond(step, x):
del x
return step < 10
with self.cached_session() as sess:
init_index = array_ops.placeholder(dtypes.int32, [])
with self.test_scope():
loop_outputs = xla.while_loop([init_index, 42], loop_cond, loop_body)
result = sess.run(loop_outputs, {init_index: 0})
self.assertAllClose(result, [10, 7], rtol=1e-3)
def _testMaxItersSimple(self):
if is_compile_on_demand():
self.skipTest("list_ops are not supported in cpu_ondemand")
with self.cached_session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def create_while_loop():
iterations = array_ops.size(p, name="iterations")
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=iterations,
name="outer")
return array_ops.identity(r[1])
output = create_while_loop()
output = gradients_impl.gradients(output, v)[0]
result = sess.run(output, feed_dict={p: [0, 0, 0]})
print(result)
xla_context.Exit()
def testMaxItersSimple(self):
self.skipTest("Fails with v1 control flow")
# This fails with old control.
# self._testMaxItersSimple()
@test_util.enable_control_flow_v2
def testMaxItersSimpleV2(self):
self._testMaxItersSimple()
def _testNestedWhileLoopWithMaxItersFromOuterContext(self):
if is_compile_on_demand():
self.skipTest("list_ops are not supported in cpu_ondemand")
with self.cached_session() as sess, self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
# p:placeholder
# j = 0
# i, x = 0, 1.
# while j++ < 5:
# i1, x1 = 0, x
# while i1++ < len(p):
# i2, x2 = 0, x1
# while i2++ < len(p):
# x2 = v * x2
# x1 = grad(x1 + x2, v)
# x = x1
# output = x
output = create_while_loop()
sess.run(output, feed_dict={p: [0, 0, 0]})
xla_context.Exit()
def testNestedWhileLoopWithMaxItersFromOuterContext(self):
self._testNestedWhileLoopWithMaxItersFromOuterContext()
@test_util.enable_control_flow_v2
def testNestedWhileLoopWithMaxItersFromOuterContextV2(self):
self._testNestedWhileLoopWithMaxItersFromOuterContext()
@test_util.enable_control_flow_v2
def testMap(self):
if is_compile_on_demand():
self.skipTest("list_ops are not supported in cpu_ondemand")
with self.cached_session(), self.test_scope():
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, name="data")
r = map_fn.map_fn(lambda x: math_ops.multiply(math_ops.add(x, 3), 2),
elems)
self.assertAllEqual(r, np.array([(x + 3) * 2 for x in nums]))
xla_context.Exit()
def is_compile_on_demand():
return ("TF_XLA_FLAGS" in os.environ and
"tf_xla_compile_on_demand" in os.environ["TF_XLA_FLAGS"])
if __name__ == "__main__":
os.environ["TF_XLA_FLAGS"] = ("--tf_xla_min_cluster_size=2 " +
os.environ.get("TF_XLA_FLAGS", ""))
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
b20d92f82601082dd870674fcdda146fed234b4b
|
36eb5d43686264e110f0aa6f23ffc725076a067a
|
/train_exp.py
|
39312b4a0240b04d70346fbc1a993bf81b29cfcd
|
[] |
no_license
|
samsdimko/SMOMI5
|
16edb9535af1759baf5338ea5fad52e4202704cf
|
21fde8714e5493addb85f8c9bacdeef2bde2e2a6
|
refs/heads/master
| 2022-04-21T02:30:32.936304
| 2020-04-21T16:54:38
| 2020-04-21T16:54:38
| 257,272,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,423
|
py
|
"""This module implements data feeding and training loop to create model
to classify X-Ray chest images as a lab example for BSU students.
"""
__author__ = 'Alexander Soroka, soroka.a.m@gmail.com'
__copyright__ = """Copyright 2020 Alexander Soroka"""
import argparse
import glob
import numpy as np
import tensorflow as tf
import time
import math
from tensorflow.python import keras as keras
from tensorflow.python.keras.callbacks import LearningRateScheduler
from keras.models import load_model
from numpy import asarray
import PIL
from PIL import Image
import random
LOG_DIR = 'logs'
SHUFFLE_BUFFER = 10
BATCH_SIZE = 8
NUM_CLASSES = 2
PARALLEL_CALLS=4
RESIZE_TO = 224
TRAINSET_SIZE = 5216
VALSET_SIZE=624
def parse_proto_example(proto):
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/class/label': tf.FixedLenFeature([], tf.int64, default_value=tf.zeros([], dtype=tf.int64))
}
example = tf.parse_single_example(proto, keys_to_features)
example['image'] = tf.image.decode_jpeg(example['image/encoded'], channels=3)
example['image'] = tf.image.convert_image_dtype(example['image'], dtype=tf.float32)
example['image'] = tf.image.resize_images(example['image'], tf.constant([RESIZE_TO, RESIZE_TO]))
return example['image'], example['image/class/label']
def normalize(image, label):
return tf.image.per_image_standardization(image), label
def resize(image, label):
return tf.image.resize_images(image, tf.constant([RESIZE_TO, RESIZE_TO])), label
def create_dataset(filenames, batch_size):
"""Create dataset from tfrecords file
:tfrecords_files: Mask to collect tfrecords file of dataset
:returns: tf.data.Dataset
"""
return tf.data.TFRecordDataset(filenames)\
.map(parse_proto_example)\
.map(resize)\
.map(normalize)\
.shuffle(buffer_size=5 * batch_size)\
.repeat()\
.batch(batch_size)\
.prefetch(2 * batch_size)
def create_aug_dataset(filenames, batch_size):
return tf.data.TFRecordDataset(filenames)\
.map(parse_proto_example)\
.map(resize)\
.map(normalize)\
.map(augment)\
.map(resize)\
.shuffle(buffer_size=5 * batch_size)\
.repeat()\
.batch(batch_size)\
.prefetch(2 * batch_size)
def augment(image,label):
degree = 30
dgr = random.uniform(-degree, degree)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, 0.5, seed=None)
image = tf.image.random_contrast(image, 0.4, 1.4, seed=None)
image = tf.contrib.image.rotate(image, dgr * math.pi / 180, interpolation='BILINEAR')
image = tf.image.random_crop(image, size=[180, 180, 3], seed=None, name=None)
return image,label
def exp_decay(epoch):
lrate = initial_lrate * math.exp(-k*epoch)
return lrate
class Validation(tf.keras.callbacks.Callback):
def __init__(self, log_dir, validation_files, batch_size):
self.log_dir = log_dir
self.validation_files = validation_files
self.batch_size = batch_size
def on_epoch_end(self, epoch, logs=None):
print('The average loss for epoch {} is {:7.2f} '.format(
epoch, logs['loss']
))
validation_dataset = create_dataset(self.validation_files, self.batch_size)
validation_images, validation_labels = validation_dataset.make_one_shot_iterator().get_next()
validation_labels = tf.one_hot(validation_labels, NUM_CLASSES)
result = self.model.evaluate(
validation_images,
validation_labels,
steps=int(np.ceil(VALSET_SIZE / float(BATCH_SIZE)))
)
callback = tf.keras.callbacks.TensorBoard(log_dir=self.log_dir, update_freq='epoch', batch_size=self.batch_size)
callback.set_model(self.model)
callback.on_epoch_end(epoch, {
'val_' + self.model.metrics_names[i]: v for i, v in enumerate(result)
})
def build_model():
model = keras.models.load_model('model.h5')
model.trainable = True
return model
def main():
args = argparse.ArgumentParser()
args.add_argument('--train', type=str, help='Glob pattern to collect train tfrecord files')
args.add_argument('--test', type=str, help='Glob pattern to collect test tfrecord files')
args = args.parse_args()
train_dataset = create_aug_dataset(glob.glob(args.train), BATCH_SIZE)
train_images, train_labels = train_dataset.make_one_shot_iterator().get_next()
train_labels = tf.one_hot(train_labels, NUM_CLASSES)
lrate = LearningRateScheduler(exp_decay)
model = build_model()
model.compile(
optimizer=keras.optimizers.sgd(lr=0.0, momentum=0.9),
loss=tf.keras.losses.categorical_crossentropy,
metrics=[tf.keras.metrics.categorical_accuracy],
target_tensors=[train_labels]
)
log_dir='{}/xray-{}'.format(LOG_DIR, time.time())
model.fit(
(train_images, train_labels),
epochs=120,
steps_per_epoch=int(np.ceil(TRAINSET_SIZE / float(BATCH_SIZE))),
callbacks=[
lrate,
tf.keras.callbacks.TensorBoard(log_dir),
Validation(log_dir, validation_files=glob.glob(args.test), batch_size=BATCH_SIZE)
]
)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
samsdimko.noreply@github.com
|
640f0ac472579660bb1ca6b5856e1f775e2eda85
|
397e125e94f4f139f2bf5055824d81f24b8b1757
|
/ABC/011/next_month.py
|
2377071939fa14fe0e525012aedc43caaa3a7802
|
[] |
no_license
|
tails1434/Atcoder
|
ecbab6ee238e3f225551297db961b1b502841fa4
|
e7c7fed36be46bbaaf020a70997842240ba98d62
|
refs/heads/master
| 2021-07-07T00:31:49.235625
| 2020-09-30T01:42:01
| 2020-09-30T01:42:01
| 189,009,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 67
|
py
|
N = int(input())
if N == 12:
print(1)
exit()
print(N + 1)
|
[
"sososo1333@gmail.com"
] |
sososo1333@gmail.com
|
17b1c73deb8173d4bb2d925572a319123a08f653
|
4afaca0d5f87ec6eb7c04f4decba5d3a0deb0cd5
|
/contact/migrations/0001_initial.py
|
d42dbc5fc658c00682f7b070050fc8211c165799
|
[] |
no_license
|
royell1415/mandala
|
9cb84f093d72ebf0faabac83e255095c8b3c3ce6
|
8e1dfb1f756d19cc7f324c3372b7e74a39cacabe
|
refs/heads/master
| 2022-11-09T22:45:39.841677
| 2020-06-26T10:05:58
| 2020-06-26T10:05:58
| 275,124,698
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
# Generated by Django 3.0.7 on 2020-06-21 07:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('phone', models.IntegerField()),
],
),
]
|
[
"royell1415@gmail.com"
] |
royell1415@gmail.com
|
5529c55961a948d29b811b913d32cdb0d55b2f3a
|
c1dab6818d05c52bdc0347150ce700a73d64fa1d
|
/build/realsense/realsense-2.2.0/realsense2_camera/cmake/realsense2_camera-genmsg-context.py
|
6e81dfdd7ec6dfc93ef19b8847ffe4d715d317f0
|
[] |
no_license
|
Sinchiguano/Perception_ur10
|
de5ee83f6e930679c045f96d4d3b6a87caeab452
|
40f18dc771bdcc4372d784f4aa8261774bab2b2a
|
refs/heads/master
| 2022-02-16T05:38:00.578173
| 2019-09-01T15:16:11
| 2019-09-01T15:16:11
| 194,514,569
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 867
|
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/casch/ws_moveit/src/realsense/realsense-2.2.0/realsense2_camera/msg/IMUInfo.msg;/home/casch/ws_moveit/src/realsense/realsense-2.2.0/realsense2_camera/msg/Extrinsics.msg"
services_str = ""
pkg_name = "realsense2_camera"
dependencies_str = "sensor_msgs;std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "realsense2_camera;/home/casch/ws_moveit/src/realsense/realsense-2.2.0/realsense2_camera/msg;sensor_msgs;/opt/ros/melodic/share/sensor_msgs/cmake/../msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"cesarsinchiguano@hotmail.es"
] |
cesarsinchiguano@hotmail.es
|
74a69ff594bc5636f84fca27064f3a1d41602740
|
cc284bd22cad0552bf306e4d4f4dc3ca39ddbe61
|
/anderson.py
|
e3d2e0f35c53475f7a604200dd09078b4d938bbb
|
[
"MIT"
] |
permissive
|
afcarl/BFS_Sequences
|
d97dadcc59a419e5bb0992bf1708de69312e9e5b
|
75d8ecaedc2050ede120015d8a4b17ed72276ded
|
refs/heads/master
| 2020-08-22T15:16:29.796535
| 2019-06-25T18:02:56
| 2019-06-25T18:02:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
#!/usr/bin/env python
# coding: utf-8
# Anderson Darling
# use future imports for python 3.x forward compatibility
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# other imports
import math
import numpy
import random
def _phi(x):
'Cumulative distribution function for the standard normal distribution'
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
def anderson_darling(x, mean=None):
x = numpy.array(x)
x = sorted(x)
if mean is None:
mean = numpy.mean(x)
var = numpy.var(x)
std = numpy.std(x)
N = len(x)
y = [(xi - mean)/std for xi in x]
# start list at index 1
y = [None] + y
A = 0
for i in range(1, N+1):
A += (2*i - 1)*(math.log(_phi(y[i])) + math.log(1 - _phi(y[N+1-i])))
A = -N - 1/N * A
return A
# Statistic:
# 10.00: 1.760
# 5.000: 2.323
# 2.500: 2.904
# 1.000: 3.690
|
[
"fja0568@gmail.com"
] |
fja0568@gmail.com
|
eb0e4c2d2bd361697b98bb51b25ba54661c9b5dd
|
2031771d8c226806a0b35c3579af990dd0747e64
|
/pyobjc-framework-Cocoa/PyObjCTest/test_cfattributedstring.py
|
e11d40062532f71cdc30338ac39d3f6dc3c7a7a0
|
[
"MIT"
] |
permissive
|
GreatFruitOmsk/pyobjc-mirror
|
a146b5363a5e39181f09761087fd854127c07c86
|
4f4cf0e4416ea67240633077e5665f5ed9724140
|
refs/heads/master
| 2018-12-22T12:38:52.382389
| 2018-11-12T09:54:18
| 2018-11-12T09:54:18
| 109,211,701
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,680
|
py
|
from PyObjCTools.TestSupport import *
from CoreFoundation import *
from Foundation import NSCFAttributedString
class TestAttributedString (TestCase):
def testTypes(self):
try:
NSCFAttributedString = objc.lookUpClass('__NSCFAttributedString')
except objc.error:
NSCFAttributedString = objc.lookUpClass('NSCFAttributedString')
self.assertIs(CFAttributedStringRef, NSCFAttributedString )
self.assertIs(CFMutableAttributedStringRef, NSCFAttributedString )
def testTypeID(self):
v = CFAttributedStringGetTypeID()
self.assertIsInstance(v, (int, long))
def testCreate(self):
val = CFAttributedStringCreate(None, b"hello".decode('ascii'), {b'foo'.decode('ascii'): 42})
self.assertIsInstance(val, CFAttributedStringRef)
val = CFAttributedStringCreateWithSubstring(None, val, (1,2))
self.assertIsInstance(val, CFAttributedStringRef)
val2 = CFAttributedStringCreateCopy(None, val)
self.assertIs(val2, val)
def testGetting(self):
val = CFAttributedStringCreate(None, b"hello".decode('ascii'), {b'foo'.decode('ascii'): 42, b'bar'.decode('ascii'):b'baz'})
self.assertIsInstance(val, CFAttributedStringRef)
dta = CFAttributedStringGetString(val)
self.assertEqual(dta , b"hello".decode('ascii') )
l = CFAttributedStringGetLength(val)
self.assertEqual(l , 5 )
v, rng = CFAttributedStringGetAttributes(val, 1, None)
self.assertEqual(v , {b'foo'.decode('ascii'): 42, b'bar'.decode('ascii'): b'baz' } )
self.assertEqual(rng , (0, 5) )
v, rng = CFAttributedStringGetAttributes(val, 1, objc.NULL)
self.assertEqual(v , {b'foo'.decode('ascii'): 42, b'bar'.decode('ascii'): b'baz' } )
self.assertEqual(rng , objc.NULL )
v, rng = CFAttributedStringGetAttribute(val, 1, b"foo".decode('ascii'), None)
self.assertEqual(v , 42 )
self.assertEqual(rng , (0, 5) )
v, rng = CFAttributedStringGetAttribute(val, 1, b"foo".decode('ascii'), objc.NULL)
self.assertEqual(v , 42 )
self.assertEqual(rng , objc.NULL )
v, rng = CFAttributedStringGetAttributesAndLongestEffectiveRange(val, 1, (0,5), None)
self.assertEqual(v , {b"foo".decode('ascii'): 42, b"bar".decode('ascii'): b'baz' } )
self.assertEqual(rng , (0, 5) )
v, rng = CFAttributedStringGetAttributesAndLongestEffectiveRange(val, 1, (0,5), objc.NULL)
self.assertEqual(v , {b"foo".decode('ascii'): 42, b"bar".decode('ascii'): b'baz' } )
self.assertEqual(rng , objc.NULL )
v, rng = CFAttributedStringGetAttributeAndLongestEffectiveRange(val, 1, b"bar".decode('ascii'), (0,5), None)
self.assertEqual(v , b'baz' )
self.assertEqual(rng , (0, 5) )
v, rng = CFAttributedStringGetAttributeAndLongestEffectiveRange(val, 1, b"bar".decode('ascii'), (0,5), objc.NULL)
self.assertEqual(v , b'baz' )
self.assertEqual(rng , objc.NULL )
def testMutableCopy(self):
val = CFAttributedStringCreateMutable(None, 0)
self.assertIsInstance(val, CFAttributedStringRef)
orig = CFAttributedStringCreate(None, b"hello".decode("ascii"), {b'foo'.decode("ascii"): 42, b'bar'.decode("ascii"):'baz'})
self.assertIsInstance(orig, CFAttributedStringRef)
val = CFAttributedStringCreateMutableCopy(None, 0, orig)
self.assertIsInstance(orig, CFAttributedStringRef)
self.assertIsNot(val, orig)
CFAttributedStringReplaceString(val, (0,3), "Hal")
dta = CFAttributedStringGetString(val)
self.assertEqual(dta , b"Hallo".decode("ascii") )
v = CFAttributedStringGetMutableString(val)
self.assertIs(v, None )
CFAttributedStringSetAttributes(val, (0, 2), {b'ronald'.decode("ascii"):99}, False)
v, rng = CFAttributedStringGetAttributes(val, 1, None)
self.assertEqual(v , {b'ronald'.decode("ascii"):99, b'foo'.decode("ascii"): 42, b'bar'.decode("ascii"): 'baz' } )
self.assertEqual(rng , (0, 2) )
v, rng = CFAttributedStringGetAttributes(val, 3, None)
self.assertEqual(v , {b'foo'.decode("ascii"): 42, b'bar'.decode("ascii"): 'baz' } )
self.assertEqual(rng , (2, 3) )
self.assertIsInstance(rng, CFRange)
CFAttributedStringSetAttributes(val, (0, 2), {b'ronald'.decode("ascii"):99}, True)
v, rng = CFAttributedStringGetAttributes(val, 1, None)
self.assertEqual(v , {b'ronald'.decode("ascii"):99} )
self.assertEqual(rng , (0, 2) )
CFAttributedStringSetAttribute(val, (1, 3), b'color'.decode("ascii"), b'blue'.decode("ascii"))
v, rng = CFAttributedStringGetAttributes(val, 1, None)
self.assertEqual(v , {b'ronald'.decode("ascii"):99, b'color'.decode("ascii"):b'blue'.decode("ascii")} )
self.assertEqual(rng , (1, 1) )
CFAttributedStringRemoveAttribute(val, (1,3), b'color'.decode("ascii"))
v, rng = CFAttributedStringGetAttributes(val, 3, None)
self.assertEqual(v , {b'foo'.decode("ascii"): 42, b'bar'.decode("ascii"): 'baz' } )
self.assertEqual(rng , (2, 2) )
rep = CFAttributedStringCreate(None, "dummy", {b'attrib'.decode("ascii"): 99} )
CFAttributedStringReplaceAttributedString(val, (1,3), rep)
self.assertEqual(CFAttributedStringGetString(val) , b'Hdummyo'.decode("ascii"))
def testEditing(self):
val = CFAttributedStringCreateMutable(None, 0)
self.assertIsInstance(val, CFAttributedStringRef)
CFAttributedStringBeginEditing(val)
CFAttributedStringEndEditing(val)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
0b298d38894ef425ad1305607c706ac8c1028a1c
|
5168da0fb501135a3c86e4e95679f54a825d69d0
|
/openquake/hazardlib/gsim/nshmp_2014.py
|
7414d27cba6bcacae5529a8212acc10c9591c0e2
|
[
"BSD-3-Clause",
"AGPL-3.0-only"
] |
permissive
|
GFZ-Centre-for-Early-Warning/shakyground
|
266b29c05ea2cfff6d9d61f21b5114282c6fa117
|
0da9ba5a575360081715e8b90c71d4b16c6687c8
|
refs/heads/master
| 2023-06-01T21:41:11.127323
| 2018-10-09T10:31:48
| 2018-10-09T10:31:48
| 144,732,068
| 1
| 3
|
BSD-3-Clause
| 2019-11-18T07:58:49
| 2018-08-14T14:32:50
|
Python
|
UTF-8
|
Python
| false
| false
| 15,711
|
py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`AbrahamsonEtAl2014NSHMPUpper`
:class:`AbrahamsonEtAl2014NSHMPLower`
:class:`BooreEtAl2014NSHMPUpper`
:class:`BooreEtAl2014NSHMPLower`
:class:`CampbellBozorgnia2014NSHMPUpper`
:class:`CampbellBozorgnia2014NSHMPLower`
:class:`ChiouYoungs2014NSHMPUpper`
:class:`ChiouYoungs2014NSHMPLower`
:class:`Idriss2014NSHMPUpper`
:class:`Idriss2014NSHMPLower`
"""
import numpy as np
from openquake.hazardlib.gsim.base import _norm_sf, _truncnorm_sf
from openquake.hazardlib import const
# NGA West 2 GMPEs
from openquake.hazardlib.gsim.abrahamson_2014 import AbrahamsonEtAl2014
from openquake.hazardlib.gsim.boore_2014 import BooreEtAl2014
from openquake.hazardlib.gsim.campbell_bozorgnia_2014 import \
CampbellBozorgnia2014
from openquake.hazardlib.gsim.chiou_youngs_2014 import ChiouYoungs2014
from openquake.hazardlib.gsim.idriss_2014 import Idriss2014
def nga_west2_epistemic_adjustment(magnitude, distance):
"""
Applies the "average" adjustment factor for epistemic uncertainty
as defined in Table 17 of Petersen et al., (2014)::
| R < 10. | 10.0 <= R < 30.0 | R >= 30.0
-----------------------------------------------------------
M < 6.0 | 0.37 | 0.22 | 0.22
6 <= M <7.0 | 0.25 | 0.23 | 0.23
M >= 7.0 | 0.40 | 0.36 | 0.33
"""
if magnitude < 6.0:
adjustment = 0.22 * np.ones_like(distance)
adjustment[distance < 10.0] = 0.37
elif magnitude >= 7.0:
adjustment = 0.36 * np.ones_like(distance)
adjustment[distance < 10.0] = 0.40
adjustment[distance >= 30.0] = 0.33
else:
adjustment = 0.23 * np.ones_like(distance)
adjustment[distance < 10.0] = 0.25
return adjustment
DEFAULT_WEIGHTING = [(0.185, -1.), (0.63, 0.), (0.185, 1.)]
def get_weighted_poes(gsim, sctx, rctx, dctx, imt, imls, truncation_level,
weighting=DEFAULT_WEIGHTING):
"""
This function implements the NGA West 2 GMPE epistemic uncertainty
adjustment factor without re-calculating the actual GMPE each time.
:param gsim:
Instance of the GMPE
:param list weighting:
Weightings as a list of tuples of (weight, number standard deviations
of the epistemic uncertainty adjustment)
"""
if truncation_level is not None and truncation_level < 0:
raise ValueError('truncation level must be zero, positive number '
'or None')
gsim._check_imt(imt)
adjustment = nga_west2_epistemic_adjustment(rctx.mag, dctx.rrup)
adjustment = adjustment.reshape(adjustment.shape + (1, ))
if truncation_level == 0:
# zero truncation mode, just compare imls to mean
imls = gsim.to_distribution_values(imls)
mean, _ = gsim.get_mean_and_stddevs(sctx, rctx, dctx, imt, [])
mean = mean.reshape(mean.shape + (1, ))
output = np.zeros([mean.shape[0], imls.shape[0]])
for (wgt, fct) in weighting:
output += (wgt *
(imls <= (mean + (fct * adjustment))).astype(float))
return output
else:
# use real normal distribution
assert (const.StdDev.TOTAL
in gsim.DEFINED_FOR_STANDARD_DEVIATION_TYPES)
imls = gsim.to_distribution_values(imls)
mean, [stddev] = gsim.get_mean_and_stddevs(sctx, rctx, dctx, imt,
[const.StdDev.TOTAL])
mean = mean.reshape(mean.shape + (1, ))
stddev = stddev.reshape(stddev.shape + (1, ))
output = np.zeros([mean.shape[0], imls.shape[0]])
for (wgt, fct) in weighting:
values = (imls - (mean + (fct * adjustment))) / stddev
if truncation_level is None:
output += (wgt * _norm_sf(values))
else:
output += (wgt * _truncnorm_sf(truncation_level, values))
return output
class AbrahamsonEtAl2014NSHMPUpper(AbrahamsonEtAl2014):
"""
Implements the positive NSHMP adjustment factor for the Abrahamson et al.
(2014) NGA West 2 GMPE
"""
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Get original mean and standard deviations
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
# Return mean, increased by the adjustment factor,
# and standard devation
return mean + nga_west2_epistemic_adjustment(rctx.mag, dctx.rrup),\
stddevs
class AbrahamsonEtAl2014NSHMPLower(AbrahamsonEtAl2014):
"""
Implements the negative NSHMP adjustment factor for the Abrahamson et al.
(2014) NGA West 2 GMPE
"""
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Get original mean and standard deviations
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
# Return mean, increased by the adjustment factor,
# and standard devation
return mean - nga_west2_epistemic_adjustment(rctx.mag, dctx.rrup),\
stddevs
class AbrahamsonEtAl2014NSHMPMean(AbrahamsonEtAl2014):
"""
Implements the Abrahamson et al (2014) GMPE for application to the
weighted mean case
"""
def get_poes(self, sctx, rctx, dctx, imt, imls, truncation_level):
"""
Adapts the original `get_poes()` from the :class:
openquake.hazardlib.gsim.base.GMPE to call a function that take the
weighted sum of the PoEs from the epistemic uncertainty adjustment
"""
return get_weighted_poes(self, sctx, rctx, dctx, imt, imls,
truncation_level)
class BooreEtAl2014NSHMPUpper(BooreEtAl2014):
"""
Implements the positive NSHMP adjustment factor for the Boore et al.
(2014) NGA West 2 GMPE
"""
# Originally Boore et al. (2014) requires only Rjb, but the epistemic
# adjustment factors are given in terms of Rrup, so both are required here
REQUIRES_DISTANCES = set(("rjb", "rrup"))
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Get original mean and standard deviations
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
# Return mean, increased by the adjustment factor,
# and standard devation
return mean + nga_west2_epistemic_adjustment(
rctx.mag, dctx.rrup), stddevs
class BooreEtAl2014NSHMPLower(BooreEtAl2014):
"""
Implements the negative NSHMP adjustment factor for the Boore et al.
(2014) NGA West 2 GMPE
"""
# See similar comment above
REQUIRES_DISTANCES = set(("rjb", "rrup"))
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Get original mean and standard deviations
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
# Return mean, increased by the adjustment factor,
# and standard devation
return mean - nga_west2_epistemic_adjustment(rctx.mag, dctx.rrup),\
stddevs
class BooreEtAl2014NSHMPMean(BooreEtAl2014):
"""
Implements the Boore et al (2014) GMPE for application to the
weighted mean case
"""
# See similar comment above
REQUIRES_DISTANCES = set(("rjb", "rrup"))
def get_poes(self, sctx, rctx, dctx, imt, imls, truncation_level):
"""
Adapts the original `get_poes()` from the :class:
openquake.hazardlib.gsim.base.GMPE to call a function that take the
weighted sum of the PoEs from the epistemic uncertainty adjustment
"""
return get_weighted_poes(self, sctx, rctx, dctx, imt, imls,
truncation_level)
class CampbellBozorgnia2014NSHMPUpper(CampbellBozorgnia2014):
"""
Implements the positive NSHMP adjustment factor for the Campbell and
Bozorgnia (2014) NGA West 2 GMPE
"""
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Get original mean and standard deviations
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
# Return mean, increased by the adjustment factor,
# and standard devation
return mean + nga_west2_epistemic_adjustment(rctx.mag, dctx.rrup),\
stddevs
class CampbellBozorgnia2014NSHMPLower(CampbellBozorgnia2014):
"""
Implements the negative NSHMP adjustment factor for the Campbell and
Bozorgnia (2014) NGA West 2 GMPE
"""
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Get original mean and standard deviations
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
# Return mean, increased by the adjustment factor,
# and standard devation
return mean - nga_west2_epistemic_adjustment(rctx.mag, dctx.rrup),\
stddevs
class CampbellBozorgnia2014NSHMPMean(CampbellBozorgnia2014):
"""
Implements the Campbell & Bozorgnia (2014) GMPE for application to the
weighted mean case
"""
def get_poes(self, sctx, rctx, dctx, imt, imls, truncation_level):
"""
Adapts the original `get_poes()` from the :class:
openquake.hazardlib.gsim.base.GMPE to call a function that take the
weighted sum of the PoEs from the epistemic uncertainty adjustment
"""
return get_weighted_poes(self, sctx, rctx, dctx, imt, imls,
truncation_level)
class ChiouYoungs2014NSHMPUpper(ChiouYoungs2014):
"""
Implements the positive NSHMP adjustment factor for the Chiou & Youngs
(2014) NGA West 2 GMPE
"""
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Get original mean and standard deviations
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
# Return mean, increased by the adjustment factor,
# and standard devation
return mean + nga_west2_epistemic_adjustment(rctx.mag, dctx.rrup),\
stddevs
class ChiouYoungs2014NSHMPLower(ChiouYoungs2014):
"""
Implements the negative NSHMP adjustment factor for the Chiou & Youngs
(2014) NGA West 2 GMPE
"""
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Get original mean and standard deviations
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
# Return mean, increased by the adjustment factor,
# and standard devation
return mean - nga_west2_epistemic_adjustment(rctx.mag, dctx.rrup),\
stddevs
class ChiouYoungs2014NSHMPMean(ChiouYoungs2014):
"""
Implements the Chiou & Youngs (2014) GMPE for application to the
weighted mean case
"""
def get_poes(self, sctx, rctx, dctx, imt, imls, truncation_level):
"""
Adapts the original `get_poes()` from the :class:
openquake.hazardlib.gsim.base.GMPE to call a function that take the
weighted sum of the PoEs from the epistemic uncertainty adjustment
"""
return get_weighted_poes(self, sctx, rctx, dctx, imt, imls,
truncation_level)
class Idriss2014NSHMPUpper(Idriss2014):
"""
Implements the positive NSHMP adjustment factor for the Idriss (2014)
NGA West 2 GMPE
"""
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Get original mean and standard deviations
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
# Return mean, increased by the adjustment factor,
# and standard devation
return mean + nga_west2_epistemic_adjustment(rctx.mag, dctx.rrup),\
stddevs
class Idriss2014NSHMPLower(Idriss2014):
"""
Implements the negative NSHMP adjustment factor for the Idriss (2014)
NGA West 2 GMPE
"""
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Get original mean and standard deviations
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
# Return mean, increased by the adjustment factor,
# and standard devation
return mean - nga_west2_epistemic_adjustment(rctx.mag, dctx.rrup),\
stddevs
class Idriss2014NSHMPMean(Idriss2014):
"""
Implements the Idriss (2014) GMPE for application to the
weighted mean case
"""
def get_poes(self, sctx, rctx, dctx, imt, imls, truncation_level):
"""
Adapts the original `get_poes()` from the :class:
openquake.hazardlib.gsim.base.GMPE to call a function that take the
weighted sum of the PoEs from the epistemic uncertainty adjustment
"""
return get_weighted_poes(self, sctx, rctx, dctx, imt, imls,
truncation_level)
|
[
"mhaas@gfz-potsdam.de"
] |
mhaas@gfz-potsdam.de
|
04c6380edc9d403e0daa8af46e8821b04b91781c
|
4a23d02c58b95dbdd1616de9b738289f97e5bf43
|
/SimpleBatch.py
|
5f507d9b2de8fe97c3edc5f23740e3d8577b1e4c
|
[] |
no_license
|
ChenKQ/rs_mxnet_reader
|
5123bb2de2106126da6a356db9e7ca17847ba80f
|
ad3710c605114cf027d954e8f75372bef17c1b4a
|
refs/heads/master
| 2021-08-10T16:10:21.567289
| 2017-11-12T19:41:39
| 2017-11-12T19:41:39
| 110,461,772
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
class SimpleBatch(object):
def __init__(self,data,label):
self.data=data
self.label=label
|
[
"chenkaiqiang14@mails.ucas.ac.cn"
] |
chenkaiqiang14@mails.ucas.ac.cn
|
832c6f2677b752fed350fdf56b3de06df8dae36d
|
2d75d4fcc65bbfbc0b160b7205b8fbcb2ecaaf5b
|
/core/dbt/task/rpc/project_commands.py
|
e9b3f52c52ea4ebc9d1503c41538f1070eaa13fa
|
[
"Apache-2.0"
] |
permissive
|
carlineng/dbt
|
0c78f35db9e1d052de4bf11f48f80aab2638aeec
|
cf6359803edea6c1c3f4cbe70a4dc02e55f8168e
|
refs/heads/dev/0.15.2
| 2020-12-20T19:29:45.211945
| 2020-01-31T19:07:34
| 2020-01-31T19:07:34
| 236,188,153
| 0
| 0
|
Apache-2.0
| 2020-01-31T19:07:35
| 2020-01-25T15:27:20
| null |
UTF-8
|
Python
| false
| false
| 5,043
|
py
|
from datetime import datetime
from typing import List, Optional, Union
from dbt.contracts.rpc import (
RPCCompileParameters,
RPCDocsGenerateParameters,
RPCRunOperationParameters,
RPCSeedParameters,
RPCTestParameters,
RemoteCatalogResults,
RemoteExecutionResult,
RemoteRunOperationResult,
RPCSnapshotParameters,
RPCSourceFreshnessParameters,
)
from dbt.rpc.method import (
Parameters,
)
from dbt.task.compile import CompileTask
from dbt.task.freshness import FreshnessTask
from dbt.task.generate import GenerateTask
from dbt.task.run import RunTask
from dbt.task.run_operation import RunOperationTask
from dbt.task.seed import SeedTask
from dbt.task.snapshot import SnapshotTask
from dbt.task.test import TestTask
from .base import RPCTask
from .cli import HasCLI
class RPCCommandTask(
RPCTask[Parameters],
HasCLI[Parameters, RemoteExecutionResult],
):
@staticmethod
def _listify(
value: Optional[Union[str, List[str]]]
) -> Optional[List[str]]:
if value is None:
return None
elif isinstance(value, str):
return [value]
else:
return value
def handle_request(self) -> RemoteExecutionResult:
return self.run()
class RemoteCompileProjectTask(
RPCCommandTask[RPCCompileParameters], CompileTask
):
METHOD_NAME = 'compile'
def set_args(self, params: RPCCompileParameters) -> None:
self.args.models = self._listify(params.models)
self.args.exclude = self._listify(params.exclude)
if params.threads is not None:
self.args.threads = params.threads
class RemoteRunProjectTask(RPCCommandTask[RPCCompileParameters], RunTask):
METHOD_NAME = 'run'
def set_args(self, params: RPCCompileParameters) -> None:
self.args.models = self._listify(params.models)
self.args.exclude = self._listify(params.exclude)
if params.threads is not None:
self.args.threads = params.threads
class RemoteSeedProjectTask(RPCCommandTask[RPCSeedParameters], SeedTask):
METHOD_NAME = 'seed'
def set_args(self, params: RPCSeedParameters) -> None:
if params.threads is not None:
self.args.threads = params.threads
self.args.show = params.show
class RemoteTestProjectTask(RPCCommandTask[RPCTestParameters], TestTask):
METHOD_NAME = 'test'
def set_args(self, params: RPCTestParameters) -> None:
self.args.models = self._listify(params.models)
self.args.exclude = self._listify(params.exclude)
self.args.data = params.data
self.args.schema = params.schema
if params.threads is not None:
self.args.threads = params.threads
class RemoteDocsGenerateProjectTask(
RPCCommandTask[RPCDocsGenerateParameters],
GenerateTask,
):
METHOD_NAME = 'docs.generate'
def set_args(self, params: RPCDocsGenerateParameters) -> None:
self.args.models = None
self.args.exclude = None
self.args.compile = params.compile
def get_catalog_results(
self, nodes, generated_at, compile_results
) -> RemoteCatalogResults:
return RemoteCatalogResults(
nodes=nodes,
generated_at=datetime.utcnow(),
_compile_results=compile_results,
logs=[],
)
class RemoteRunOperationTask(
RPCTask[RPCRunOperationParameters],
HasCLI[RPCRunOperationParameters, RemoteRunOperationResult],
RunOperationTask,
):
METHOD_NAME = 'run-operation'
def set_args(self, params: RPCRunOperationParameters) -> None:
self.args.macro = params.macro
self.args.args = params.args
def _get_kwargs(self):
if isinstance(self.args.args, dict):
return self.args.args
else:
return RunOperationTask._get_kwargs(self)
def _runtime_initialize(self):
return RunOperationTask._runtime_initialize(self)
def handle_request(self) -> RemoteRunOperationResult:
success, _ = RunOperationTask.run(self)
result = RemoteRunOperationResult(logs=[], success=success)
return result
def interpret_results(self, results):
return results.success
class RemoteSnapshotTask(RPCCommandTask[RPCSnapshotParameters], SnapshotTask):
METHOD_NAME = 'snapshot'
def set_args(self, params: RPCSnapshotParameters) -> None:
# select has an argparse `dest` value of `models`.
self.args.models = self._listify(params.select)
self.args.exclude = self._listify(params.exclude)
if params.threads is not None:
self.args.threads = params.threads
class RemoteSourceFreshnessTask(
RPCCommandTask[RPCSourceFreshnessParameters],
FreshnessTask
):
METHOD_NAME = 'snapshot-freshness'
def set_args(self, params: RPCSourceFreshnessParameters) -> None:
self.args.selected = self._listify(params.select)
if params.threads is not None:
self.args.threads = params.threads
self.args.output = None
|
[
"jake@fishtownanalytics.com"
] |
jake@fishtownanalytics.com
|
3566c31f10feda9150fe9811c47702d72682d1a0
|
06c05314b018bb133f34e7052cc315b3cfdcd311
|
/TP1/data-ex4/mapper4.py
|
76fe6e1ac8201d274be9ddbcfc56665d261ef1b8
|
[
"MIT"
] |
permissive
|
tjaskula/dauphine-slpbd-tp
|
6a29a0e02049998135822de26770b7c5e800958b
|
ba82d2d3eda16ca6380ad7e1ddb30f9fd962b68c
|
refs/heads/master
| 2020-03-30T10:18:58.181528
| 2018-10-17T18:30:19
| 2018-10-17T18:30:19
| 151,114,966
| 0
| 0
|
MIT
| 2018-10-01T15:51:14
| 2018-10-01T15:51:14
| null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
#! /usr/bin/env python
import sys
remeberIdc = None
for line in sys.stdin:
line = line.strip()
idc, d, name = line.split(',')
if idc == remeberIdc:
print "%s\t%s\t%s" % (idc, d, name)
if remeberIdc != idc and name.startswith('G'):
remeberIdc = idc
|
[
"thomasv1000@hotmail.fr"
] |
thomasv1000@hotmail.fr
|
f4bc7a7de3187d89d91b4c6ee55aafc3a8683e27
|
380a47268c5975473a2e7c38c747bc3bdbd981b1
|
/benchmark/third_party/transformers/tests/models/bert/test_tokenization_bert.py
|
dfbcd266c49917d91429910a65a04ecb6ffbf9b5
|
[
"Apache-2.0"
] |
permissive
|
FMInference/FlexGen
|
07aa9b1918c19b02077e13ad07e76840843810dd
|
d34f7b4b43ed87a374f394b0535ed685af66197b
|
refs/heads/main
| 2023-07-24T02:29:51.179817
| 2023-07-21T22:38:31
| 2023-07-21T22:38:31
| 602,270,517
| 6,821
| 411
|
Apache-2.0
| 2023-07-07T22:59:24
| 2023-02-15T21:18:53
|
Python
|
UTF-8
|
Python
| false
| false
| 13,961
|
py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BertTokenizer
rust_tokenizer_class = BertTokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11])
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
# With lower casing
tokenizer = self.get_tokenizer(do_lower_case=True)
rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True)
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for i, token in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]])
self.assertListEqual(
[rust_tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]]
)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_2 + [102]
def test_offsets_with_special_characters(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
tokens = tokenizer_r.encode_plus(
sentence,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False
expected_results = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
)
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
def test_change_tokenize_chinese_chars(self):
list_of_commun_chinese_char = ["的", "人", "有"]
text_with_chinese_char = "".join(list_of_commun_chinese_char)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
kwargs["tokenize_chinese_chars"] = True
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False)
ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False)
tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r)
tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(tokens_without_spe_char_p, list_of_commun_chinese_char)
self.assertListEqual(tokens_without_spe_char_r, list_of_commun_chinese_char)
kwargs["tokenize_chinese_chars"] = False
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False)
ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False)
tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r)
tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p)
# it is expected that only the first Chinese character is not preceded by "##".
expected_tokens = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(list_of_commun_chinese_char)
]
self.assertListEqual(tokens_without_spe_char_p, expected_tokens)
self.assertListEqual(tokens_without_spe_char_r, expected_tokens)
|
[
"sqy1415@gmail.com"
] |
sqy1415@gmail.com
|
4012a5cd1acbc94fa691959cc75443b63e3e39f1
|
024d71c9addb15f2da3a7dc4b6cceb15d104f53f
|
/facial_landmark_detection.py
|
0b5474b09011117cc2979918d6d83513cd2cea69
|
[] |
no_license
|
Nishant-Ramakuru/Alcohol-Detection-Based-on-Thermal-Images
|
e9414b426573c235c2d57ed311952352d8c77a83
|
f90aafddacd0801a441868975c81581e7d6b639d
|
refs/heads/master
| 2022-08-20T21:51:00.017876
| 2022-07-20T08:29:44
| 2022-07-20T08:29:44
| 251,707,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,507
|
py
|
# Face landmarks Detection
# usage:
# python facelandmarkdetect.py --shape-predictor shape_predictor_68_face_landmarks.dat --image images/face1.jpg
# import the necessary packages
from imutils import face_utils
import numpy as np
import argparse
import os
import imutils
import dlib
import cv2
import matplotlib.pyplot as plt
import face_recognition
font = cv2.FONT_HERSHEY_SIMPLEX
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
ap.add_argument("-i", "--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
if os.path.isfile(args["shape_predictor"]):
pass
else:
# print("Oops...! File is not available. Shall I downlaod ?")
cmd = "wget -c --progress=bar http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"
os.system(cmd)
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# load the input image, resize it, and convert it to grayscale
image = face_recognition.load_image_file(args["image"])
orig = image
image = imutils.resize(image, width=500)
print(image.shape)
print(image.dtype)
#gray = np.array(image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
print(gray.shape)
print(gray.dtype)
# detect faces in the grayscale image
rects = detector(gray, 1)
# loop over the face detections
for (i, rect) in enumerate(rects):
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# convert dlib's rectangle to a OpenCV-style bounding box
# [i.e., (x, y, w, h)], then draw the face bounding box
(x, y, w, h) = face_utils.rect_to_bb(rect)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# show the face number
cv2.putText(image, "Face #{}".format(i + 1), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# loop over the (x, y)-coordinates for the facial landmarks
# and draw them on the image
s = [shape[17],shape[26],shape[0],shape[3],shape[5],shape[11],shape[13],shape[16]]
count = 0
avg_g = []
for (x, y) in shape:
cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
count+=1
G = []
print(x,y)
for i in range(-1,2):
for j in range(-1,2):
G.append(gray[y-j][x-i])
avg_g.append(sum(G)/9)
print(len(G))
print(len(avg_g))
for number, (x, y) in enumerate(shape):
r =1
text = str(number)
(tw, th), bl = cv2.getTextSize(text, font, 0.5, 2) # So the text can be centred in the circle
tw /= 2
th = th / 2 + 2
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
cv2.rectangle(image,
(int(x - tw), int(y - th)),
(int(x + tw), int(y + th)),
(0, 128, 255),
-1)
# number each circle, centred in the rectangle
cv2.putText(image, text, (int(x-tw), int(y + bl)), font, 0.5, (0,0,0), 2)
# get the average value in specified sample size (20 x 20)
# show the output image with the face detections + facial landmarks
plt.subplot(121)
plt.imshow(orig)
plt.xticks([])
plt.yticks([])
plt.title("Intput")
plt.subplot(122)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.title("Output")
fname = "result_" + args["image"][1]
#plt.savefig(fname)
plt.show()
|
[
"noreply@github.com"
] |
Nishant-Ramakuru.noreply@github.com
|
11b130a31c10fb9e095dcf07a1785c19c8b7458b
|
a9633fe2a90daf3623047ac0009b54b802d16d0d
|
/HW/HW2/Code/HW2_q1.py
|
f17b663928ece0c75692d0a8a6e41b83c5fc0884
|
[] |
no_license
|
sarasovdat/MMDS
|
485a6712750a5dc4ac16b8f7f9c4692e1817c206
|
f73862fbc0d504a223217b7fbb1490b929c81b73
|
refs/heads/main
| 2023-08-04T16:07:49.133831
| 2021-09-15T08:10:02
| 2021-09-15T08:10:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
import numpy as np
from scipy import linalg
# SVD of M:
M = np.array([[1, 2], [2, 1], [3, 4], [4, 3]])
U, Sigma, V_transpose = linalg.svd(M, full_matrices = False)
V = V_transpose.transpose()
sig = np.array([[7.61577311, 0], [0, 1.41421356]])
print("----- U -----")
print(U)
print("----- Sigma -----")
print(Sigma)
print("----- V_transpose -----")
print(V_transpose)
print("----- V -----")
print(V)
# Eigenvalues and eigen vectors of M_transpose x M:
M_transpose = M.transpose()
mult = M_transpose.dot(M)
#print(mult)
Evals, Evecs = linalg.eigh(mult)
print("----------")
print(Evals)
print("----------")
print(Evecs)
print("----------")
print("-------- Evals --------")
print(Evals)
print("-------- Evecs --------")
print(Evecs)
"""
# Test ortogonalnosti :
ET = Evecs.transpose()
print(" PRODUKT ")
print(np.dot(V, V_transpose))
print(np.dot(V_transpose, V))
print("_______________________")
print(np.dot(Evecs,ET))
print(np.dot(ET, Evecs))
# Check:
print(np.dot(np.dot(U, sig), V_transpose))
"""
|
[
"sarabizjak97@gmail.com"
] |
sarabizjak97@gmail.com
|
193acb5dc5a44ce5eb4800a26e3b5c3263006b47
|
9f0c84ec676d967af0d3dcebfeb51477797c6f16
|
/evaluating_expressions.py
|
13ef43bd1ed72bf605167e95dfc0271ddb67a66f
|
[] |
no_license
|
monajalal/Python_Playground
|
71169841d62eff5098af862c4443f022518a6be8
|
bb07a973d4459a957ca4fda07a6a55ed14c65067
|
refs/heads/master
| 2020-04-15T00:01:27.225571
| 2020-03-12T03:01:46
| 2020-03-12T03:01:46
| 60,127,186
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
class Solution:
# @param A : list of strings
# @return an integer
def evalRPN(self, A):
operations = set('+-/*')
operands = []
for item in A:
if item not in operations:
operands.append(item)
else:
operand2 = int(operands.pop())
operand1 = int(operands.pop())
if item == '+':
operands.append(operand1+operand2)
elif item == '-':
operands.append(operand1-operand2)
elif item == '*':
operands.append(operand1*operand2)
elif item == '/':
try:
operands.append(operand1/operand2)
except ZeroDivisionError:
return
return operands.pop()
s = Solution()
print(s.evalRPN("12*3-5+"))
|
[
"jalal@wisc.edu"
] |
jalal@wisc.edu
|
06649657ccf0f10d85bfa2c3ddd68a7de6b02f20
|
09d557923f726e5a8301d3489d0b0e20f3ecd9c6
|
/friends.py
|
0c49a2ee04dc44ca55975c0e533eaf39232d4dd7
|
[] |
no_license
|
moragbl/checkio
|
9b3a70301b7195876f0bf176dfdb054f7d6d1092
|
13761610cc651aeea426f4b61d35c28acc3e2bad
|
refs/heads/master
| 2021-01-10T06:47:14.406928
| 2016-01-10T18:24:13
| 2016-01-10T18:24:13
| 49,025,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,044
|
py
|
#!/usr/bin/python
class Friends:
def __init__(self, connections):
self.connections = list(connections)
return None
def add(self, connection):
if connection in self.connections:
return False
else:
self.connections = self.connections + [connection]
return True
def remove(self, connection):
new_connections = []
if connection in self.connections:
for thing in self.connections:
if thing != connection:
new_connections = new_connections + [thing]
self.connections = new_connections
return True
else:
return False
def names(self):
stuff = set()
for thing in self.connections:
stuff = stuff.union(thing)
return set(sorted(list(set(stuff))))
def connected(self, name):
result = set()
for thing in self.connections:
if name in thing:
result = result.union(thing.difference({name}))
return result
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
letter_friends = Friends(({"a", "b"}, {"b", "c"}, {"c", "a"}, {"a", "c"}))
digit_friends = Friends([{"1", "2"}, {"3", "1"}])
assert letter_friends.add({"c", "d"}) is True, "Add"
assert letter_friends.add({"c", "d"}) is False, "Add again"
assert letter_friends.remove({"c", "d"}) is True, "Remove"
assert digit_friends.remove({"c", "d"}) is False, "Remove non exists"
assert letter_friends.names() == {"a", "b", "c"}, "Names"
assert letter_friends.connected("d") == set(), "Non connected name"
assert letter_friends.connected("a") == {"b", "c"}, "Connected name"
f = Friends([{"1", "2"}, {"3", "1"}])
assert f.add({"2", "4"}) is True, "last add"
f = Friends(({"nikola", "sophia"}, {"stephen", "robot"}, {"sophia", "pilot"}))
assert f.connected("sophia") == {"nikola", "pilot"}, "connected thing"
|
[
"moragbl@users.noreply.github.com"
] |
moragbl@users.noreply.github.com
|
e62afdacebeee507bc7b6ffbe8a9a5216896add4
|
f8a58ae93ff78c59296a136dff721c5ef666790b
|
/Starting small/Open Concept.py
|
fc11dd75f69e0575b469b286f4e4b37d2c6c9d47
|
[] |
no_license
|
Anthonymcqueen21/Python-Programs
|
cb116b36e3c774ef51dba7f1fd29561767f89c7f
|
127d5cbab4e7a2d0009d65075508cbaf5a6b6dc2
|
refs/heads/master
| 2021-06-26T18:03:30.212592
| 2017-09-15T19:59:21
| 2017-09-15T19:59:21
| 79,636,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
message = "Hello World Crash Course reader!"
print(message)
message = "Hello World"
print(message)
message = "Python crash course"
print(message)
|
[
"noreply@github.com"
] |
Anthonymcqueen21.noreply@github.com
|
c0d33eecc47b3c11ae377ec4a317558862c0186b
|
47b4233f1c42a1136f661e2827c185f5a7ae31bf
|
/milestone3-flask.py
|
2de40561b15bc5c867b062b3d450be761ff588ac
|
[] |
no_license
|
RutujaJadhav/Baby-got-Back
|
ff9c7c2d0ea9ec96cf3d896fe63f0fc754cf97d5
|
6adaaacd88125b007e47739090739d6c03bed88d
|
refs/heads/master
| 2020-04-24T08:08:02.379108
| 2019-04-23T05:41:08
| 2019-04-23T05:41:08
| 171,821,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,595
|
py
|
from flask import Flask, render_template, jsonify, request
from Adafruit_CCS811 import Adafruit_CCS811
import RPi.GPIO as GPIO
from twilio.rest import Client
import threading
import urllib
import os
import time
import board
import neopixel
import pandas as pd
import numpy as np
import math
import datetime
import json
"""
INTERNAL FUNCTIONS
-------------------------------------------------
"""
textAlerts = False
riskFactor = False
def twilio_alert(msg):
global textAlerts
if textAlerts:
message = client.messages.create(to='+12067909956',from_='+12062080987',body=msg)
def alert_on():
pixels.fill((255,0,0))
pixels.show()
def alert_all(msg):
twilio_alert(msg)
alert_on()
def alert_off():
pixels.fill((0,0,0))
pixels.show()
def init_CCS811():
ccs = Adafruit_CCS811()
temp = ccs.calculateTemperature()
ccs.tempOffset = temp - 23.0
return ccs
def init_NeoPixels():
pixel_pin = board.D18
num_pixels = 1
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(pixel_pin,
num_pixels,
brightness=0.2,
auto_write=False,
pixel_order=ORDER)
return pixels
def init_StatusLED():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(23,GPIO.OUT)
def init_twilio():
account_sid = "AC0d464a30a7c6e7fbacd8dab0441ae589"
auth_token = "0753453d03c18573e2c09ba41f925a8f"
client = Client(account_sid, auth_token)
return client
def internet_on():
try:
urllib2.urlopen('http://216.58.192.142', timeout=1)
return True
except urllib2.URLError as err:
return False
def lan_job():
threading.Timer(10.0, lan_job).start()
if internet_on:
GPIO.output(23,GPIO.HIGH)
else:
GPIO.output(23,GPIO.LOW)
def ccs811_measure():
threading.Timer(1.0, ccs811_measure).start()
while not ccs.available():
pass
temp = ccs.calculateTemperature()
if not ccs.readData():
tt = time.time()
co2 = ccs.geteCO2()
tvoc = ccs.getTVOC()
temp = temp
data = [tt,co2,temp,tvoc]
df.loc[len(df)] = data
def getRiskFactors():
global riskFactor
global textAlerts
threading.Timer(1.0, getRiskFactors).start()
with open('static/data.json') as json_data:
d = json.load(json_data)
highrisk = len(d['highrisk'])>0
if highrisk:
if not riskFactor:
if textAlerts:
alert_all("High risk factors in the crib: "+','.join(d['highrisk']))
else:
alert_on()
else:
alert_on()
else:
alert_off()
riskFactor = highrisk
"""
ENDPOINTS
----------------------------------------------------------
"""
app = Flask(__name__)
ccs = init_CCS811()
pixels = init_NeoPixels()
client = init_twilio()
risk_factors = getRiskFactors()
df = pd.DataFrame(columns=['time','co2','temp','tvoc'])
alert_off()
init_StatusLED()
lan_job()
ccs811_measure()
getRiskFactors()
@app.route('/')
def index():
return render_template('dashboard.html')
@app.route('/text_alerts', methods=['GET', 'POST'])
def text_alerts():
global textAlerts
global riskFactor
if request.method == 'POST':
textAlerts = request.values.get('alerts') == 'true'
if riskFactor and textAlerts:
twilio_alert("There are risk factors in the crib")
resp = jsonify(success=True)
return resp
else:
return jsonify(alerts = textAlerts)
@app.route('/reading')
def reading():
return render_template('updatedchart.html')
@app.route('/air_quality')
def air_quality():
if len(df)>0:
return jsonify(df.iloc[-1].to_dict())
return jsonify()
@app.route('/air_quality_history')
def air_quality_history():
lookback = request.args.get('lookback',default = 3600,type=int)
lookback = time.time()-lookback
in_range = df[df['time']>lookback]
num_results = len(in_range)
if num_results>0:
data = in_range.sort_values(by='time')
temp_data = data['temp'].tolist()
temp_data = [round(x,2) for x in temp_data]
tvoc_data = data['tvoc'].tolist()
time_data = data['time'].tolist()
time_data = [datetime.datetime.fromtimestamp(x).strftime("%X") for x in time_data]
return jsonify(temp=temp_data,tvoc=tvoc_data,labels=time_data)
else:
return jsonify()
if __name__ == '__main__':
app.run(host='10.19.212.93', debug=True)
|
[
"noreply@github.com"
] |
RutujaJadhav.noreply@github.com
|
d41231abad772ae1e1447c830b6edba0dd5e0d56
|
3d66f781359fa22b02764711ce7fefcdf9b7c82a
|
/zab_parser.py
|
72cbf1642bf25aee78511b3540211ab3ee11b51c
|
[] |
no_license
|
lekhasv/parser_airmonitor
|
0dccdc4167b05ee055676066bcd253cecc0ce01d
|
b63e60d2b6fe169b75358ed84862b65e92aa7f8a
|
refs/heads/master
| 2020-07-31T06:19:29.890927
| 2020-02-13T09:19:58
| 2020-02-13T09:19:58
| 210,513,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,698
|
py
|
import datetime
import configparser
import os
import sys
import argparse
def loadConfig(path,radio,chas,data):
now = datetime.datetime.now()
hh = 0
log = ""
if data == '0':
name_file = now.strftime("%Y-%m-%d")+".txt"
else:
name_file = data+".txt"
if chas == "now":
chas = now.strftime("%H")
elif chas == "0":
hh = 1
if radio == "":
nf = ""
else:
nf = radio+"\\"
for key in config["radio"]:
if hh == 1:
chas = config[key]["time"]
if radio == "" and config["radio"][key] == "1":
log += parser_log(config[key]["path_dir"],chas,config[key]["name"],name_file)
elif key == radio:
log += parser_log(config[key]["path_dir"],chas,config[key]["name"],name_file)
if namespace.analysis:
err = analysis(log,radio)
#print(err)
else:
print(log)
if not os.path.exists(config["Setting"]["save_path"]+nf):
os.makedirs(config["Setting"]["save_path"]+nf)
fs = open(config["Setting"]["save_path"]+nf+name_file, "w", encoding='utf-8')
fs.write(log)
fs.close()
def analysis(log,radio):
obraz = []
exobraz = []
ok = 0
err = 0
samp = config[radio]["sample"]
for key in config[samp]:
if key[:4] == 'incl':
obraz.append(config[samp][key])
elif key[:4] == 'excl':
exobraz.append(config[samp][key])
log = log.rstrip('\r')
list_log = log.split("\n")
#print(list_log)
for st in list_log:
for obr in obraz:
if st.find(obr) != -1:
ok += 1
for exobr in exobraz:
if st.find(exobr) != -1:
err += 1
return '{"OK": '+str(ok)+', "ERR": '+str(err)+'}'
def parser_log(path_dir, period, radio, file_name):
period = period.replace("-",",")
pr = period.split(",")
name_file = file_name
if not namespace.noname:
txt_log = radio+"\n"
else:
txt_log = ""
prev_str_time = ""
prev_str_time_1 = ""
try:
f = open(path_dir+name_file, "r", encoding='utf-8')
except:
return ""
for line in f.readlines():
line = line.rstrip('\r\n')
if line.find("Опознан") != -1:
hour_period = False
for hr in pr:
if hr == line[11:13]:
hour_period = True
if hour_period:
str_time = line[11:16]
if (prev_str_time != str_time)&(prev_str_time_1 != str_time):
pos_end = line.find(", Ошибок")
txt_log += line[11:16]+" "+ line[29:pos_end]+"\n"
prev_str_time_1 = line[11:14]+str(int(line[14:16])+1)
prev_str_time = str_time
txt_log += "\n"
return txt_log
def createParser ():
parser = argparse.ArgumentParser()
parser.add_argument ('-r', '--radio', default='', help='Название радио как в конфиге')
parser.add_argument ('-c', '--chas', default='0', help='Часы, которые необходимо проверить. Пример: 07,08')
parser.add_argument ('-d', '--data', default='0', help='Дата в формате гггг-мм-дд')
parser.add_argument ('-n', '--noname', action='store_true', default=False, help='Не добавлять название радио в лог')
parser.add_argument ('-a', '--analysis', action='store_true', default=False, help='Анализ лога')
parser.add_argument ('-s', '--show', action='store_true', default=False, help='Показать названия радио в конфиге')
return parser
if __name__ == "__main__":
parser = createParser()
namespace = parser.parse_args()
pathname = os.path.dirname(sys.argv[0]) +'\\'
name_config = pathname + "settings.ini" #D:\\Обмен\\logger\\
if not os.path.exists(name_config):
print("не верный путь.")
exit()
config = configparser.ConfigParser()
config.read(name_config, encoding='UTF-8')
if namespace.show:
for key in config["radio"]:
print(key)
exit()
loadConfig(name_config,namespace.radio,namespace.chas,namespace.data)
exit()
|
[
"noreply@github.com"
] |
lekhasv.noreply@github.com
|
0c4fd9e1c44215905b61eecef8c50b3727e7e016
|
17fe4529fd2772b7d046f039bde140768634d028
|
/misc/cases/web/boss/test_login.py
|
e6fe63f8bf3b6493e346ecdf40111d113bcc2878
|
[] |
no_license
|
namesuqi/tapir
|
b9c21f30bf781eec314f0ae4f57c232f167e4734
|
a5d4e9bb45d8cbf7e41d42d9006b43b753f3ecf1
|
refs/heads/master
| 2020-03-07T04:16:45.213561
| 2018-03-29T08:34:46
| 2018-03-29T08:34:46
| 127,261,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,839
|
py
|
# coding=utf-8
# author: zengyuetian
import unittest
import time
from lib.web.boss.login_page import *
from lib.web.boss.summary_page import *
from lib.web.boss.const import *
from lib.common.HTMLTestRunner import HTMLTestRunner
from lib.common.path import *
from lib.driver import chrome_driver
class TestLogin(unittest.TestCase):
def setUp(self):
# global chrome_driver
print "Test Start ...."
opt = webdriver.ChromeOptions()
opt.add_argument("test-type")
self.chrome_driver = webdriver.Chrome(chrome_options=opt)
self.chrome_driver.implicitly_wait(10)
self.chrome_driver.maximize_window()
def tearDown(self):
print "Test End ..."
self.chrome_driver.close()
def test_login(self):
login_page = LoginPage(self.chrome_driver)
login_page.open()
login_page.input_user_name(WASU_USER)
login_page.input_password(WASU_PASSWORD)
login_page.submit()
time.sleep(1)
def test_logout(self):
login_page = LoginPage(self.chrome_driver)
login_page.open()
login_page.input_user_name(WASU_USER)
login_page.input_password(WASU_PASSWORD)
login_page.submit()
time.sleep(1)
summary_page = SummaryPage(self.chrome_driver)
# time.sleep(5)
summary_page.close_exit_btn()
summary_page.logout()
if __name__ == "__main__":
# unittest.main()
suite = unittest.TestSuite()
suite.addTest(TestLogin("test_logout"))
runner = unittest.TextTestRunner()
runner.run(suite)
#
# testunit.addTest(TestLogin("testLogout"))
#
# fp = open(RESULT_PATH + '/result.html', 'wb')
# runner = HTMLTestRunner(stream=fp,
# title='boss report')
#
# runner.run(testunit)
# fp.close()
|
[
"suqi_name@163.com"
] |
suqi_name@163.com
|
f8d9f2cc232063431ad4ba72a77b12166ca49f65
|
f9a96f02fb59ebb320d48ae7d266a1ba1bb2f7cc
|
/ex7.py
|
7d68bff89bed06e79931998e639987c4d066be79
|
[] |
no_license
|
virtualet/LPTHW
|
eb54eca5471c179652b1466e604419601a3a082c
|
e31b703e835640fc9f04ad99b027bcf6d6c1a746
|
refs/heads/master
| 2021-01-13T01:53:50.027232
| 2014-10-06T22:03:27
| 2014-10-06T22:03:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
print "mary had a little lamb"
print "its fleec was white as %s." % 'snow'
print "and everywhere that marry went"
print "." * 10
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + end10 + end11 + end12
|
[
"echoecho@gmail.com"
] |
echoecho@gmail.com
|
640e384f480d6dde5bb173efa0440e4aac09c5ae
|
49cddcbe8f80b80a6c8fcd2d6fea6586c65a710f
|
/face-det-cam.py
|
3cb06fb17a632c964757e1b7a48f6249f48a88b8
|
[] |
no_license
|
agurani/Face_Detection
|
2a9e432c001157ed81cb6dfbca9c341fd528f304
|
9d8a08ca2c6604366f7d2b98a9f7596afab40a4f
|
refs/heads/master
| 2021-06-08T11:45:56.660187
| 2021-05-10T06:45:18
| 2021-05-10T06:45:18
| 166,808,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
"""
ÖNEMLİ NOT: Çalıştırdıktan sonra programı kapatmak için "Q" tuşuna basın.
"""
import cv2
# Cascade yükleme
face_cascade = cv2.CascadeClassifier('haarcascade-frontalface-default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade-eye.xml')
# Tanıma yapacak fonksiyon
def detect(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 3)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
return frame
# Webcam ile tanıma yapılıyor.
# Eğer bilgisayarınızda birden fazla kamera bağlıysa 0'ı 1 yapabilirsiniz.
video_capture = cv2.VideoCapture(0)
while True:
_, frame = video_capture.read()
canvas = detect(frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
agurani.noreply@github.com
|
be4c7a95bb2e3dea3e5cef3331037298bb4cf366
|
cbb5355de11afa6679bcc6157148b64c48fb7d62
|
/subfun.py
|
44ca97e1039db74891d53aa462d73a0c46a73129
|
[] |
no_license
|
SimonWang1995/writeJobsheet
|
19e6577955759e073f42e72580193b88dc28ebb8
|
f49968dea46ad5eeffd9172b0fe49c9f138ff1ea
|
refs/heads/master
| 2021-05-20T04:20:51.924380
| 2020-07-20T13:17:25
| 2020-07-20T13:17:25
| 252,182,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,631
|
py
|
from utils.logsave import *
from tkinter.messagebox import *
from threading import Thread
import re
from random import choice
def getuserinfo(root,kaoqian):
try:
logger.info("开始获取用户信息")
user_info = kaoqian.getuserinfo()
logger.info(user_info)
return user_info
except Exception as e:
logger.error(e)
showinfo(message=e)
root.quit()
def getkaoqian(root,kaoqian):
if not kaoqian.table:
try:
logger.info("开始获取考勤信息")
kaoqian_list = kaoqian.get_kaoqian()
logger.info(kaoqian_list)
return kaoqian_list
except Exception as e:
logger.error(e)
showinfo(message=e)
else:
return kaoqian.table
def get_total(root,kaoqian):
work_days = []
kq_list = getkaoqian(root,kaoqian)
for value in kq_list:
if value[3]=="日班" or value[8]=="加班":
pat = re.compile("(\d+).(\d+).(\d+)")
date_tuple = pat.search(value[0]).groups()
date = '/'.join(date_tuple)
work_days.append(date)
showinfo(message="你这个月共上班 %s 天 , 详细请查看考勤" % len(work_days))
return work_days
def get_productlist(root, jobsheet):
if not jobsheet.productList:
try:
product_list = jobsheet.get_productlist()
return product_list
except Exception as e:
showinfo(message=e)
else:
return jobsheet.productList
def create_treeview(Treeview,page,headers,widths):
treeview = Treeview(page, show="headings", columns=headers)
for col,v in zip(headers,widths):
treeview.column(col,width=v,anchor="center")
treeview.heading(col,text=col)
return treeview
def startwrite(startbutton, treeview, jobsheet):
startbutton.config(bg="yellow", text="Ongoing")
for item in treeview.get_children():
value = treeview.item(item, option="value")
try:
jobsheet.readywt()
jobsheet.startwrite(value)
treeview.delete(item)
treeview.update()
except Exception as e:
print(e)
showinfo(message=e)
startbutton.config(bg="red", text="Stop")
startbutton.config(bg="green", text="Start")
def add_value():
pass
def thread_it(func, *args):
'''将函数放入线程中执行'''
# 创建线程
t = Thread(target=func, args=args)
# 守护线程
t.setDaemon(True)
# 启动线程
t.start()
|
[
"1069500569@qq.com"
] |
1069500569@qq.com
|
e85679eb12a298bbe947d702485fe04af1f16d37
|
92f9527ce8c91fb78baa1de13142d2d2dd59c4b5
|
/src/dailypoker/poker/migrations/0004_auto_20171025_1323.py
|
327c18a8f422d0db200511f6decda6718f2ad1c4
|
[] |
no_license
|
wislish/Texas-Holdem-Poker
|
869f8874d5b277ff78b51246098222ef881cb2e2
|
4260b3d3b44bbbf56e3faca851e14c12b5f9db8b
|
refs/heads/master
| 2021-04-26T16:47:29.233769
| 2018-04-02T17:35:52
| 2018-04-02T17:35:52
| 123,970,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-25 17:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poker', '0003_remove_player_hand_card'),
]
operations = [
migrations.RemoveField(
model_name='player',
name='user',
),
migrations.RemoveField(
model_name='game',
name='players',
),
migrations.AlterField(
model_name='game',
name='pot_size',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='game',
name='round_max_bet',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='game',
name='round_status',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.DeleteModel(
name='Player',
),
]
|
[
"maoan119@gmail.com"
] |
maoan119@gmail.com
|
26c7154286d22389cae93c532751367878955fb2
|
024a2f21e8b1b044f4214090471dc02c411861c5
|
/oms/controllers/wx_meta_purchase_controller.py
|
62c26a121d4acb4b4e3d07ba709783c2ed8df2c2
|
[] |
no_license
|
qiaozhizt/OMS
|
1e8d1d2fb8e7510576b635cbd8bbdc4908ed0fc0
|
43d31095b496d7cf0a2cf1ccda05f7891366aae4
|
refs/heads/master
| 2023-02-28T23:50:21.867461
| 2021-02-03T07:57:43
| 2021-02-03T07:57:43
| 335,470,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,349
|
py
|
# -*- coding: utf-8 -*-
import logging
import simplejson as json
import time
import requests
from api.controllers.tracking_controllers import tracking_lab_order_controller
from vendor.models import WxMetaProductRelationship, LensSpecmap
from pg_oms.settings import WX_META_SERVICE, WX_META_PURCHASE
from oms.models.order_models import PurchaseOrderRecords
class wx_meta_purchase_controller:
'''
WX Meta Purchase Controller class
'''
def __init__(self):
self.orderType = 'meta'
self.deliverType = '顺丰寄付' #快递方式指定圆通
self.customerLinkman = '李莲英' #联系人
self.customerTel = '15518639392' #联系电话
self.customerProvince ='上海市' #省
self.customerCity = '上海市' #市
self.customerCounty = '奉贤区' #区
self.customerAddress='大叶公路4601号伟星工业园' #地址
self.name = 'A100311' # 智镜客户码 固定的 A100311
self.pwd = 'zhijin123' # 获取令牌密码
self.host = WX_META_SERVICE if isinstance(WX_META_SERVICE, str) else WX_META_SERVICE[0]#伟星系统地址,写入配置文件
self.token_url = WX_META_PURCHASE.get('TOKEN_URL') #'/all/account/login'#获取令牌URL
self.wx_meta_prd_url = WX_META_PURCHASE.get('WX_META_PRD_URL') #'/api/product/listCustomerProducts' #获取现片产品清单
self.add_order_url = WX_META_PURCHASE.get('ADD_ORDER_URL') #'/api/order/addOrder'添加订单URL
self.order_status_url= WX_META_PURCHASE.get('ORDER_STATUS_URL')#'/api/order/getOrderStatus' #获取订单状态URL
#库存片产品对应关系,需写入对应关系表中
self.meta_product_relation = {
'KD56L': '00000000000000008736',
'KD56': '00000000000000002750',
'KDB56-C': '00000000000000002443',
'KD61L': '00000000000000004648',
'KDB61-H-SHMC': '00000000000000004048',
'KD61': '00000000000000002549',
}
# 获取令牌
'''
### 参数
* `name`:名字
* `pwd`:密码
'''
def get_headers(self,content_type='application/json'):
headers = {
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8'
}
try:
url = self.host + self.token_url + "?name=%s&pwd=%s"%(self.name,self.pwd)
logging.debug(url)
result = requests.post(url, headers=headers, timeout=60)
account = json.loads(result.text)
if account['code'] == '200' and account['map']['data']['token']:
headers = {
"Content-Type": content_type,
"X-Auth-Token": account['map']['data']['token']
}
return {"code":0,"headers":headers,"msg":"成功获取token"}
else:
return {"code":-1,"token":"","msg":"伟星接口返回信息变化"}
except Exception as e:
return {"code": -1, "token": "", "msg": "get token failed%s"%str(e)}
#获取现片产品列表
'''
/api/product/listCustomerProducts
### 参数
无
### 响应
```json
{
"success": true,
"code": "200",
"message": null,
"map": {
"data": [{
productId: `'xxx'`, 销售品名id
brand: `'xxx'`, 品牌
lenType: `'xxx'`, 光型
sphStart: `0.00`, 球镜起始值
sphEnd: `0.00`, 球镜结束值
cylStart: `0.00`, 柱镜起始值
cylEnd: `0.00`, 柱镜结束值
addStart: `0.00`, 加光起始值
addEnd: `0.00`, 加光结束值
productName: `'xxx'`, 销售品名
zsl: `'xxx'`, 折射率
dl: `'xxx'`, 大类
price: `100.0`, 单价
rate: `1.0`, 折扣
}]
}
}
```
'''
def list_wx_meta_products(self):
content_type = 'application/x-www-form-urlencoded;'
try:
result = self.get_headers(content_type)
logging.debug(result)
if (result['code'] == -1): # 返回出错信息
return result
headers = result['headers']
url = self.host + self.wx_meta_prd_url
result = requests.post(url, headers=headers, timeout=60)
response = json.loads(result.text)
if response['code'] == "200":
return {"code": 0, "data": response['map']['data'], "msg": "success"}
else:
return {"code": -1, "msg": response['message']}
except Exception as e:
return {"code": -1, "msg": "获取车房列表失败,异常信息:%s" % str(e)}
#生成订单
'''
* `addOrderDTOStr`: json字符串,json格式:
* orderType: 订单类型,`meta/house`分别表示现片/车房
* apiOrderNo: 第三方订单号
* deliveryDate: `'yyyy-MM-dd'`,要求发货日期
* deliverType: `'圆通'`,快递方式
* customerLinkman: `'张三'`,联系人
* customerTel: `'152xxxx'`,联系电话
* customerProvince: `'浙江省'`,省
* customerCity: `'台州市'`,市
* customerCounty: `'xxx'`,区
* customerAddress: `'xxx'`,地址
* note: `'xxx'`,备注
* items: 订单明细列表
* type: `'meta'`, 值为meta/house/frame,分别代表现片/车房/镜架
* productId: `'J123'`,销售品名id
* quantity: `1`,数量
* brand: `'白包装'`,品牌
* lr: `'l'`,左右眼
* sphval: `'1.0'`,球镜
* cylval: `'1.0'`,柱镜
* addval: `'1.0'`,加光
* lentype: `'近视'`,光型
* axis: `1`,光轴
* cbase: `'1'`,基弯
* coloring:`'wf-01'`,染色内容
* prismA:`'1'`,棱镜a
* directionA:`'内'`,方向a
* prismB:`'1'`,棱镜b
* directionB:`'上'`,方向b
* isAsse:`'Y'`,是否装配
* isCut:`'N'`,是否割边
* framePd:`'65'`,瞳距
* frameIpd:`''`,近用瞳距
* framePh:`''`,瞳高
* glassDistance:`''`,眼距
* frontAngle:`''`,前倾角
* moveIn:`''`,移心-内移
* moveOut:`''`,移心-外移
* processes: `{'镀膜': '蓝光'}`,json格式,`工艺类型:工艺ID`的映射
### 响应
```json
{
"success": true,
"code": "200",
"message": null,
"map": {
"data": null
}
}
```
'''
def add_meta_order(self, dict_data, delveryDate="", brand='白包装', isAsse="N", isCut="N"):
result = self.get_headers()
if(result['code'] == -1): #返回出错信息
return result
headers = result['headers']
try:
wx_meta_lens = LensSpecmap.objects.filter(inner_code=dict_data.get('rsku'), active='ACTIVE', vendor=dict_data.get('vendor'))
#wx_meta_lens = WxMetaProductRelationship.objects.filter(sku=dict_data.get('rsku'))
if len(wx_meta_lens) == 0 or len(wx_meta_lens) > 1:
return {"code": -1, "data": '', "msg": "未找到对应关系"}
wx_meta_len = wx_meta_lens[0]
l_product_id = wx_meta_len.outer_code
r_product_id = wx_meta_len.outer_code
if(not delveryDate): #默认要求当天发货
delveryDate = time.strftime("%Y-%m-%d", time.localtime())
order_dict = {
"orderType": self.orderType,#默认现片
"apiOrderNo": dict_data.get('order_number', ''),
"deliveryDate": delveryDate,
"deliverType": self.deliverType,
"customerLinkman": self.customerLinkman,
"customerTel": self.customerTel,
"customerProvince": self.customerProvince,
"customerCity": self.customerCity,
"customerCounty":self.customerCounty,
"customerAddress": self.customerAddress,
"note": dict_data.get('comments', ''),
"items": [
{
"type": self.orderType,
"productId": l_product_id,
"quantity": 1,
"brand": brand,
"lr": "l",
"sphval": dict_data.get('lsph', '0'),
"cylval": dict_data.get('lcyl', '0'),
"lentype": dict_data.get('l_lens_type', ''),
"axis": dict_data.get('laxis', '0'),
"addval": "0.00",
"cbase": "",
"coloring": "",
"prismA": 0,
"directionA": "",
"prismB": 0,
"directionB": "",
"isAsse": isAsse, #是否装配
"isCut": isCut, #切边
"framePd": 0,
"frameIpd": "",
"framePh": "",
"glassDistance": "",
"frontAngle": "", #前倾角 无
"moveIn": "",
"moveOut": "",
"processes": {} #工艺,无
},
{
"type": self.orderType,
"productId": r_product_id,
"quantity": 1,
"brand": brand,
"lr": "r",
"sphval": dict_data.get('rsph', '0'),
"cylval": dict_data.get('rcyl', '0'),
"lentype": dict_data.get('r_lens_type', ''),
"axis": dict_data.get('raxis', '0'),
"addval": "0.00",
"cbase": "",
"coloring": "",
"prismA": 0,
"directionA": "",
"prismB": 0,
"directionB": "",
"isAsse": isAsse, # 是否装配
"isCut": isCut, # 切边
"framePd": 0,
"frameIpd": "",
"framePh": "",
"glassDistance": "",
"frontAngle": "", # 前倾角 无
"moveIn": "",
"moveOut": "",
"processes": {} # 工艺,无
},
]
}
#{"code":"500","map":{"data":null},"message":"没有价格!产品ID:00000000000000008736 品牌:白包装 光型:近视 球:1 柱:2 加光:0","success":false}
url = self.host + self.add_order_url
result = requests.post(url, data=json.dumps(order_dict), headers=headers, timeout=60)
response = json.loads(result.text)
if response['code'] == "200":
# 存储下单信息
purchase_order_records = PurchaseOrderRecords.objects.filter(lab_number=dict_data.get('order_number', ''))
if len(purchase_order_records) > 0:
pur_order_records = purchase_order_records[0]
pur_order_records.order_data = json.dumps(order_dict)
pur_order_records.vendor = '10'
pur_order_records.save()
else:
pur_order_records = PurchaseOrderRecords()
pur_order_records.lab_number = dict_data.get('order_number', '')
pur_order_records.order_data = json.dumps(order_dict)
pur_order_records.vendor = '10'
pur_order_records.save()
return {"code": 0, "data": response['map']['data'], "msg": "success!"}
else:
return {"code": -1, "data": response['map']['data'], "msg": response['message']}
except Exception as e:
return {"code": -1, "data": "", "msg": "生成订单失败,异常信息:%s"%str(e)}
#根据伟星订单号获取伟星订单生产状态
'''
### 参数
* `orderNo`: 订单号(不是订单ID)
### 响应
```json
{
"success": true,
"code": "200",
"message": null,
"map": {
"data": {
"status": "1",
"deliverNo": "123",
"deliverCompany": "圆通"
}
}
}
```
#### 订单状态
* "-1": 已删除
* "0": 待引入
* "1": 待审核
* "23": 待确认
* "25": 已终止
* "27": 已取消
* "30": 生产中
* "35": 割边处理
* "37": 单证打印
* "38": 部分发货
* "40": 完成
'''
def getOrderStatus(self,orderNo):
if(orderNo == ""):
return {"code": -1,"msg": "订单号不能为空"}
content_type ='application/x-www-form-urlencoded;'
try:
result = self.get_headers(content_type)
logging.debug(result)
if (result['code'] == -1): # 返回出错信息
return result
headers = result['headers']
url = self.host + self.order_status_url + "?orderNo=%s"%orderNo
result = requests.post(url, headers=headers, timeout=60)
response = json.loads(result.text)
if response['code'] == "200":
return {"code":0,"data":response['map']['data'],"msg":"success"}
else:
return {"code":-1,"msg":response['message']}
except Exception as e:
return {"code": -1, "msg": "获取订单状态错误,异常信息:%s"%str(e)}
#封装数据
def pack_request_value(self, lab):
data_dict = {}
try:
if float(lab.od_sph) <= 0:
r_lens_type = '近视'
else:
r_lens_type = '老花'
if float(lab.os_sph) <= 0:
l_lens_type = '近视'
else:
l_lens_type = '老花'
if int(lab.vendor) > 9:
act_lens_sku = lab.act_lens_sku[3:]
else:
act_lens_sku = lab.act_lens_sku[2:]
data_dict['order_number'] = lab.lab_number
data_dict['vendor'] = lab.vendor
data_dict['rsku'] = act_lens_sku
data_dict['rsph'] = lab.od_sph
data_dict['rcyl'] = lab.od_cyl
data_dict['raxis'] = lab.od_axis
data_dict['r_lens_type'] = r_lens_type
data_dict['lsku'] = act_lens_sku
data_dict['lsph'] = lab.os_sph
data_dict['lcyl'] = lab.os_cyl
data_dict['laxis'] = lab.os_axis
data_dict['l_lens_type'] = l_lens_type
data_dict['comments'] = lab.comments
return data_dict
except Exception as e:
return data_dict
#处理返回结果
def analysis_result(self, request, lbo, purchase_order, res):
stat_dict = {}
try:
if res['code'] == 0:
purchase_order.vendor_order_reference = res['data']['orderNo']
purchase_order.save()
lbo.vendor_order_reference = res['data']['orderNo']
lbo.save()
# 记录日志
tloc = tracking_lab_order_controller()
tloc.tracking(lbo, request.user, 'LENS_PURCHASE', '镜片采购',
res['data']['orderNo'])
stat_dict[lbo.lab_number] = {'Success': True, 'Message': '下单成功'}
else:
stat_dict[lbo.lab_number] = {'Success': False, 'Message': res['msg']}
return stat_dict
except Exception as e:
stat_dict[lbo.lab_number] = {'Success': False, 'Message': e}
return stat_dict
|
[
"250849686@qq.com"
] |
250849686@qq.com
|
77485e7d02d6ad9aee29932b1412cd93cec7b922
|
dcc6e06cac54e16e9a4387237f409fbd95c14a07
|
/dotfiles/newsboat/split-opml-by-tags.py
|
454f620ae46f429a6c5b85b719ab3869227491ed
|
[
"MIT"
] |
permissive
|
simshadows/sims-dotfiles
|
09ee62581ca52abd05500cbba9b252d39a2f1cfb
|
ce664af873377d74370f5d1398d2165348de33a3
|
refs/heads/master
| 2023-04-08T19:09:36.028682
| 2023-03-27T06:40:02
| 2023-03-27T06:40:02
| 103,027,299
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,020
|
py
|
#!/usr/bin/env python3
import os
import sys
import shutil
import collections
import re
# Reserved tags
NO_TAGS_TAG = "NO-TAGS"
ORIGINAL_TAG = "ORIGINAL"
src_dir = sys.argv[1] # Good enough arg parsing for now
# We read these:
orig = os.path.join(src_dir, "autogenerated-opml/ORIGINAL.xml")
urls = os.path.join(src_dir, ".newsboat/urls")
# And we write our OPML files to here:
dumpto = os.path.join(src_dir, "autogenerated-opml")
print(" Reading tags...")
# Populate collections of feeds with particular tags
# tags[<tag-name>] = {<feed-url-1>, <feed-url-2>, ...}
tags = collections.defaultdict(set)
no_tags = set()
all_urls = set()
with open(urls, "r") as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
assert line.startswith("http")
substr1 = line.split(maxsplit=1)
substr1[0] = substr1[0].replace("&", "&") # A HACK.............
if substr1[0] in all_urls:
raise ValueError("Found duplicate: {}".format(substr1[0]))
all_urls.add(substr1[0])
if len(substr1) == 1:
no_tags.add(substr1[0])
elif len(substr1) == 2:
# The .split() op below on:
# "some tag1" "some tag2"
# would return something like:
# ['', 'some tag1', ' ', 'some tag2', '']
skip = True # Skip when empty string expected
has_tags = False
for s in substr1[1].split("\""):
s = s.strip()
assert (skip and len(s) == 0) or (not skip and len(s) > 0)
if not skip and not s.startswith("~"):
s = s.replace(" ", "-").replace("_", "-").lower()
s = re.sub(r'[^a-z0-9-]+', '', s)
assert(len(s) > 0)
tags[s].add(substr1[0])
has_tags = True
skip = not skip
assert not skip # We should finish on an empty string
if not has_tags:
no_tags.add(substr1[0])
else:
raise ValueError
# Sanity check on the set union
assert no_tags.union(*tags.values()) == all_urls
print(" URLs found: {}".format(len(all_urls)))
assert (NO_TAGS_TAG not in tags) and (ORIGINAL_TAG not in tags) # Reserved words
if len(no_tags) > 0:
tags[NO_TAGS_TAG] = no_tags
# Feedback on set sizes
print(" Tags found:")
for k, v in tags.items():
assert len(v) > 0
print(" '{}' with {} URLs.".format(k, len(v)))
# Create the new OPML files
print(" Writing new OPML files...")
with open(orig, "r") as orig_f:
for tag, urls_to_keep in tags.items():
newopml = os.path.join(dumpto, tag + ".xml")
urls_to_remove = all_urls - urls_to_keep
orig_f.seek(0)
print(" {}".format(newopml))
with open(newopml, "w") as new_f:
for line in orig_f:
if all(("xmlUrl=\"{}\"".format(x) not in line) for x in urls_to_remove):
new_f.write(line)
|
[
"contact@simshadows.com"
] |
contact@simshadows.com
|
1d227f48078109297a8c011b71885c3e33db0946
|
8e07b5b7a8dd38e0ef2c7ffc97d0392d886f32e6
|
/venv/Lib/site-packages/mypy/typeshed/third_party/2and3/paramiko/win_pageant.pyi
|
388c2e9d7a61872a1866ea01df0f94007bc377cd
|
[] |
no_license
|
RodrigoNeto/cursopythonyt
|
fc064a2e6106324e22a23c54bdb9c31040ac9eb6
|
279dad531e21a9c7121b73d84fcbdd714f435e7e
|
refs/heads/master
| 2023-07-03T00:54:09.795054
| 2021-08-13T12:42:24
| 2021-08-13T12:42:24
| 395,646,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
pyi
|
import ctypes.wintypes
import sys
assert sys.platform == "win32"
win32con_WM_COPYDATA: int
def can_talk_to_agent(): ...
class COPYDATASTRUCT(ctypes.Structure): ...
class PageantConnection:
def __init__(self) -> None: ...
def send(self, data: bytes) -> None: ...
def recv(self, n: int) -> bytes: ...
def close(self) -> None: ...
|
[
"rodrigoneto.forseti@gmail.com"
] |
rodrigoneto.forseti@gmail.com
|
241d12f0589080f83b03b869cccac1c92f023f75
|
5f6fa94e87f7607f85d02482a3fee09d4bf02bc2
|
/db/mysqlclient.py
|
de8e363b2e47404f86b42c51065dc23937dd7a43
|
[] |
no_license
|
13718422048/proxypool
|
802a41335fdc4aa4919cebb4ad56928baa55f6f4
|
eb7e93965c60c1680791fb4f595326566f3207af
|
refs/heads/master
| 2022-04-07T13:14:21.307534
| 2020-03-18T09:25:19
| 2020-03-18T09:25:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: --<>
Purpose:
Created: 2019/7/10
"""
import mysqlclient
class CMysqlClient(object):
def __init__(self, *args, **kwargs):
# 构造函数
user = kwargs.get("user", d=None)
password = kwargs.get("password", d=None)
ip = kwargs.get("ip", None)
database = kwargs.get("ip", None)
pass
def connect(self):
"""
连接数据库
"""
pass
def
|
[
"1640498323@qq.com"
] |
1640498323@qq.com
|
f0291c27c5720c0abd84b3e5731be796e60a259c
|
d65e041389d251bcdc861bfe6c33c8afad972b0e
|
/src/python/day_four/part_two.py
|
3b35b1c3f6c18e7d53084db7a1de0341e883bca9
|
[] |
no_license
|
DSmedley1989/adventofcode2020
|
674e23df22ccfd75f40592e91e5b57d7cba0d9d3
|
e9a447fc47779e727087fda4005b436f43403725
|
refs/heads/master
| 2023-01-31T21:01:52.736909
| 2020-12-07T22:57:56
| 2020-12-07T22:57:56
| 317,817,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,932
|
py
|
from part_one import required_fields, problem_input_raw, parse_passports, validate_passport
import re
invalid_passports_raw = """
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
"""
valid_passports_raw = """
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
"""
def validate_birth_year(year):
y = int(year)
return y >= 1920 and y <= 2002
def validate_issue_year(year):
y = int(year)
return y >= 2010 and y <= 2020
def validate_expiration_year(year):
y = int(year)
return y >= 2020 and y <= 2030
def validate_height(height):
exp = re.match(r'^([0-9]*)(in|cm)$', height)
if exp is None:
return False
num = int(exp.group(1))
unit = exp.group(2)
if unit == 'cm':
return num >= 150 and num <= 193
elif unit == 'in':
return num >= 59 and num <= 76
else:
return False
def validate_hair_colour(colour):
return re.match(r'^#[0-9a-f]{6}$', colour) is not None
def validate_eye_colour(colour):
return colour in [
'amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'
]
def validate_passport_id(pass_id):
return re.match(r'[0-9]{9}$', pass_id)
validations = {
'byr': validate_birth_year,
'iyr': validate_issue_year,
'eyr': validate_expiration_year,
'hgt': validate_height,
'hcl': validate_hair_colour,
'ecl': validate_eye_colour,
'pid': validate_passport_id
}
def validate_fields(passport):
for field in required_fields:
if not validations[field](passport[field]):
return False
return True
if __name__ == '__main__':
invalid_passports = parse_passports(invalid_passports_raw)
valid_passports = parse_passports(valid_passports_raw)
test_invalid_count = 0
test_valid_count = 0
for passport in invalid_passports:
if not validate_fields(passport):
test_invalid_count += 1
for passport in valid_passports:
if validate_fields(passport):
test_valid_count += 1
if test_invalid_count != 4 or test_valid_count != 4:
print("TEST FAILURE!")
print(test_invalid_count)
print(test_valid_count)
exit()
valid_count = 0
pass_with_fields = [
passport for passport in parse_passports(problem_input_raw) if validate_passport(passport)
]
for passport in pass_with_fields:
if validate_fields(passport):
valid_count += 1
print(valid_count)
|
[
"daniel.smedley@footballradar.com"
] |
daniel.smedley@footballradar.com
|
3fdfd6e33e65ec14df4bae6e7af73d7016887185
|
54665756c8ef8abb86ea92e927100ae80be60e0f
|
/application.py
|
ce61d7cc3bef74fb06a59578259c2d50724f2ef8
|
[] |
no_license
|
Tamerlan74/eb-app-1
|
e5e667b64b6fa9eb3afd39e4c5395f7ad14bddbf
|
e4db117b0e5cbeebec3ec92a4c660f04ae71fad1
|
refs/heads/main
| 2023-07-16T13:36:04.330482
| 2021-09-02T13:20:59
| 2021-09-02T13:20:59
| 402,420,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,978
|
py
|
import logging.handlers
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Handler
LOG_FILE = '/tmp/sample-app.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1048576, backupCount=5)
handler.setLevel(logging.INFO)
# Formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Add Formatter to Handler
handler.setFormatter(formatter)
# add Handler to Logger
logger.addHandler(handler)
welcome = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<!--
Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
http://aws.Amazon/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
-->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Welcome</title>
<style>
body {
color: #ffffff;
background-color: #E0E0E0;
font-family: Arial, sans-serif;
font-size:14px;
-moz-transition-property: text-shadow;
-moz-transition-duration: 4s;
-webkit-transition-property: text-shadow;
-webkit-transition-duration: 4s;
text-shadow: none;
}
body.blurry {
-moz-transition-property: text-shadow;
-moz-transition-duration: 4s;
-webkit-transition-property: text-shadow;
-webkit-transition-duration: 4s;
text-shadow: #fff 0px 0px 25px;
}
a {
color: #0188cc;
}
.textColumn, .linksColumn {
padding: 2em;
}
.textColumn {
position: absolute;
top: 0px;
right: 50%;
bottom: 0px;
left: 0px;
text-align: right;
padding-top: 11em;
background-color: #1BA86D;
background-image: -moz-radial-gradient(left top, circle, #6AF9BD 0%, #00B386 60%);
background-image: -webkit-gradient(radial, 0 0, 1, 0 0, 500, from(#6AF9BD), to(#00B386));
}
.textColumn p {
width: 75%;
float:right;
}
.linksColumn {
position: absolute;
top:0px;
right: 0px;
bottom: 0px;
left: 50%;
background-color: #E0E0E0;
}
h1 {
font-size: 500%;
font-weight: normal;
margin-bottom: 0em;
}
h2 {
font-size: 200%;
font-weight: normal;
margin-bottom: 0em;
}
ul {
padding-left: 1em;
margin: 0px;
}
li {
margin: 1em 0em;
}
</style>
</head>
<body id="sample">
<div class="textColumn">
<h1>IT WORKED!!</h1>
<p>Your first AWS Elastic Beanstalk Python Application is now running on your own dedicated environment in the AWS Cloud</p>
<p>This environment is launched with Elastic Beanstalk Python Platform</p>
</div>
<div class="linksColumn">
<h2>What's Next?</h2>
<ul>
<li><a href="http://docs.amazonwebservices.com/elasticbeanstalk/latest/dg/">AWS Elastic Beanstalk overview</a></li>
<li><a href="http://docs.amazonwebservices.com/elasticbeanstalk/latest/dg/index.html?concepts.html">AWS Elastic Beanstalk concepts</a></li>
<li><a href="http://docs.amazonwebservices.com/elasticbeanstalk/latest/dg/create_deploy_Python_django.html">Deploy a Django Application to AWS Elastic Beanstalk</a></li>
<li><a href="http://docs.amazonwebservices.com/elasticbeanstalk/latest/dg/create_deploy_Python_flask.html">Deploy a Flask Application to AWS Elastic Beanstalk</a></li>
<li><a href="http://docs.amazonwebservices.com/elasticbeanstalk/latest/dg/create_deploy_Python_custom_container.html">Customizing and Configuring a Python Container</a></li>
<li><a href="http://docs.amazonwebservices.com/elasticbeanstalk/latest/dg/using-features.loggingS3.title.html">Working with Logs</a></li>
</ul>
</div>
</body>
</html>
"""
def application(environ, start_response):
path = environ['PATH_INFO']
method = environ['REQUEST_METHOD']
if method == 'POST':
try:
if path == '/':
request_body_size = int(environ['CONTENT_LENGTH'])
request_body = environ['wsgi.input'].read(request_body_size)
logger.info("Received message: %s" % request_body)
elif path == '/scheduled':
logger.info("Received task %s scheduled at %s", environ['HTTP_X_AWS_SQSD_TASKNAME'],
environ['HTTP_X_AWS_SQSD_SCHEDULED_AT'])
except (TypeError, ValueError):
logger.warning('Error retrieving request body for async work.')
response = ''
else:
response = welcome
start_response("200 OK", [
("Content-Type", "text/html"),
("Content-Length", str(len(response)))
])
return [bytes(response, 'utf-8')]
|
[
"noreply@github.com"
] |
Tamerlan74.noreply@github.com
|
41c62ffb7c314661ff39786e0248172dd70cdeda
|
73a5eca1ddee1d74a3c2be9ca4e5e67ebe3d16f7
|
/src/tensortools/regress2d.py
|
3ea96986cf2c264b45bda84fb41642dd8a2b6e8a
|
[
"MIT"
] |
permissive
|
ychnlgy/Chebyshev-Lagrange
|
34346692a2925cde620377e8fbcb8d588623fac7
|
74292e72b83f992d6c42a2f2db04dfdce5a52aea
|
refs/heads/master
| 2020-05-23T06:20:10.831035
| 2020-02-12T16:31:38
| 2020-02-12T16:31:38
| 186,661,893
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,429
|
py
|
import torch
def regress2d(x, y):
'''
Input:
x - torch Tensor of shape (d, n), d different sets of n x-points.
y - torch Tensor of shape (d, n), corresponding y-values.
Output:
w - torch Tensor of shape (d, 1), linearly least square
weights to map x to y.
b - torch Tensor of shape (d, 1), linearly least square
bias to map x to y.
'''
xm = x.mean(dim=1).unsqueeze(1)
ym = y.mean(dim=1).unsqueeze(1)
w = _calc_w(x, y, xm, ym)
b = _calc_b(x, y, w, xm, ym)
return w, b
# === PRIVATE ===
def _calc_b(x, y, w, xm, ym):
return ym-w*xm
def _calc_w(x, y, xm, ym):
dx = x - xm
dy = y - ym
num = (dx*dy).sum(dim=1).unsqueeze(1)
den = (dx**2).sum(dim=1).unsqueeze(1)
return num/den
if __name__ == "__main__":
# Tests for 1d vectors
wt = [2.5, -0.2]
bt = [-1.25, 50]
def f(x):
y = torch.zeros(2, 100)
y[0] = wt[0]*x +bt[0] + torch.zeros_like(x).normal_(mean=0, std=0.5)
y[1] = wt[1]*x +bt[1] + torch.zeros_like(x).normal_(mean=0, std=0.5)
return y
x = torch.rand(1, 100) * 10 - 5
y = f(x)
w, b = regress2d(x, y)
def similar(v, t, eps=0.1):
print("Output: %.3f, target: %.3f" % (v, t))
return abs(v-t) < eps * abs(t)
for i in range(2):
assert similar(w[i].item(), wt[i])
assert similar(b[i].item(), bt[i])
|
[
"ychnlgy@gmail.com"
] |
ychnlgy@gmail.com
|
9189010707a10a16ab13b7ec406f8795e10748f4
|
80429abf48f2a5cbd93db95416400b0a3c5f12f4
|
/project2/problem_6.py
|
9e014ea8a5bdfd9acdda38ced559af2e810d6843
|
[] |
no_license
|
andrijana-kurtz/Udacity_Data_Structures_and_Algorithms
|
9b8c452c5d065a4c18419a3de14234887468c515
|
6bf39e5c20f9c8adef83d6484b9204bbfcb0717c
|
refs/heads/main
| 2023-04-27T14:37:55.978222
| 2021-05-14T02:38:07
| 2021-05-14T02:38:07
| 367,224,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,532
|
py
|
"""
Union and Intersection of Two Linked Lists
Your task for this problem is to fill out the union and intersection functions. The union of two sets A and B is the set of elements which are in A, in B, or in both A and B. The intersection of two sets A and B, denoted by A ∩ B, is the set of all objects that are members of both the sets A and B.
You will take in two linked lists and return a linked list that is composed of either the union or intersection, respectively. Once you have completed the problem you will create your own test cases and perform your own run time analysis on the code.
"""
class Node:
def __init__(self, value):
self.value = value
self.next = None
def __repr__(self):
return str(self.value)
class LinkedList:
def __init__(self):
self.head = None
def __iter__(self):
self.node = self.head
return self
def __next__(self):
if self.node:
node = self.node
self.node = self.node.next
return node
else:
raise StopIteration
def __str__(self):
cur_head = self.head
out_string = ""
while cur_head:
out_string += str(cur_head.value) + " -> "
cur_head = cur_head.next
return out_string
def append(self, value):
if self.head is None:
self.head = Node(value)
return
node = self.head
while node.next:
node = node.next
node.next = Node(value)
def size(self):
size = 0
node = self.head
while node:
size += 1
node = node.next
return size
def union(llist_1, llist_2):
set1 = {n.value for n in llist_1}
set2 = {n.value for n in llist_2}
union = set1.union(set2)
result_llist = LinkedList()
for value in union:
result_llist.append(value)
return result_llist
def intersection(llist_1, llist_2):
set1 = {n.value for n in llist_1}
set2 = {n.value for n in llist_2}
isec = set1.intersection(set2)
result_llist = LinkedList()
for value in isec:
result_llist.append(value)
return result_llist
# Test case 1
linked_list_1 = LinkedList()
linked_list_2 = LinkedList()
element_1 = [3,2,4,35,6,65,6,4,3,21]
element_2 = [6,32,4,9,6,1,11,21,1]
for i in element_1:
linked_list_1.append(i)
for i in element_2:
linked_list_2.append(i)
assert(str(union(linked_list_1,linked_list_2)) == '32 -> 65 -> 2 -> 35 -> 3 -> 4 -> 6 -> 1 -> 9 -> 11 -> 21 -> ')
assert(str(intersection(linked_list_1,linked_list_2)) == '4 -> 21 -> 6 -> ')
# Test case 2
linked_list_3 = LinkedList()
linked_list_4 = LinkedList()
element_1 = [3,2,4,35,6,65,6,4,3,23]
element_2 = [1,7,8,9,11,21,1]
for i in element_1:
linked_list_3.append(i)
for i in element_2:
linked_list_4.append(i)
assert(str(union(linked_list_3,linked_list_4)) == '65 -> 2 -> 35 -> 3 -> 4 -> 6 -> 1 -> 7 -> 8 -> 9 -> 11 -> 21 -> 23 -> ')
assert(str(intersection(linked_list_3,linked_list_4)) == '')
# Test case 3 - Two empty lists
linked_list_A = LinkedList()
linked_list_B = LinkedList()
element_1 = []
element_2 = []
for i in element_1:
linked_list_A.append(i)
for i in element_2:
linked_list_B.append(i)
assert(str(union(linked_list_A,linked_list_B)) == '')
assert(str(intersection(linked_list_A,linked_list_B)) == '')
# Test case 4 - Two lists, of which one empty
linked_list_A = LinkedList()
linked_list_B = LinkedList()
element_1 = [1,2,3,4]
element_2 = []
for i in element_1:
linked_list_A.append(i)
for i in element_2:
linked_list_B.append(i)
assert(str(union(linked_list_A,linked_list_B)) == '1 -> 2 -> 3 -> 4 -> ')
assert(str(intersection(linked_list_A,linked_list_B)) == '')
# Test case 5 - Two same lists
linked_list_A = LinkedList()
linked_list_B = LinkedList()
element_1 = [1,2,3,4]
element_2 = [1,2,3,4]
for i in element_1:
linked_list_A.append(i)
for i in element_2:
linked_list_B.append(i)
assert(str(union(linked_list_A,linked_list_B)) == '1 -> 2 -> 3 -> 4 -> ')
assert(str(intersection(linked_list_A,linked_list_B)) == '1 -> 2 -> 3 -> 4 -> ')
# Test case 5 - Two same lists
linked_list_A = LinkedList()
linked_list_B = LinkedList()
element_1 = [2,2,2]
element_2 = [3,3,3]
for i in element_1:
linked_list_A.append(i)
for i in element_2:
linked_list_B.append(i)
assert(str(union(linked_list_A,linked_list_B)) == '2 -> 3 -> ')
assert(str(intersection(linked_list_A,linked_list_B)) == '')
|
[
"andrijana.kurtz@gmail.com"
] |
andrijana.kurtz@gmail.com
|
7d6073ad7cb84d1e921bb255869454481c50ca3d
|
f1cbbbf53d25b9f03134dcbf8ceb0074b0cb3ab9
|
/customuser/forms.py
|
40dfee5d24e2d829f07091de0ce4c1af666b1636
|
[] |
no_license
|
skiboorg/i_temp
|
b8be438a0ca2e17d15ba518188dc9c60d918cf25
|
742366985fe409f0c2ad59025cac9bf4cc63da76
|
refs/heads/master
| 2023-01-04T06:32:53.359662
| 2020-11-02T17:29:12
| 2020-11-02T17:29:12
| 291,662,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import *
from django.forms import ModelForm
class SignUpForm(UserCreationForm):
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
model = User
fields = ('email', 'username', 'password1', 'password2', )
error_messages = {
'email': {
'unique': "Указанный адрес уже кем-то используется",
}, }
|
[
"11@11.11"
] |
11@11.11
|
ed1e906f2466f24dbf42f890104a0c0f9e20ef61
|
eb4731411b97ba2028682c367005183a8039b2e2
|
/eval_calculator.py
|
d2e433576c91e29b8eff507221125cab20e48db7
|
[] |
no_license
|
rezvanieh/practice_python
|
1f603c8132b7da4e8a78979a53ba4bb7d5bd2887
|
ec7f627e3bb3af9ced8cba6f71f4e70bd1592c70
|
refs/heads/master
| 2023-07-22T08:27:56.151757
| 2021-09-02T07:12:31
| 2021-09-02T07:12:31
| 402,309,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
import re
print("Using Eval Function for Calculation")
print("Type 'quit' to exit\n")
previous = 0
run = True
def perform_math():
global run
global previous
equation = ""
if previous == 0:
equation = input("ٍEnter equation: ")
else:
equation = input(str(previous))
if equation == 'quit':
print('Finished...')
run = False
else:
equation = re.sub('[a-zA-Z,.:()" "]', '' , equation)
if previous == 0:
previous = eval(equation)
else:
previous = eval(str(previous) + equation)
while run:
perform_math()
|
[
"rezvanieh.talebi@gmail.com"
] |
rezvanieh.talebi@gmail.com
|
7375d9c055ddef901788d0d3a34d84e9bcd60115
|
2e5afac9380dc05d71ea8ae92488266ce3cb5c94
|
/Ring/setup.py
|
710611a70cbd4ff428e5fb3a9aeb8de2c765d601
|
[] |
no_license
|
rraj29/RealtimeAudioPython
|
47d692e53f32ad7002df77348329cacccbed4611
|
bab55ae5410bccae27bf66aca148b1fd673d60f8
|
refs/heads/master
| 2022-10-23T10:12:26.388947
| 2020-06-19T13:15:04
| 2020-06-19T13:15:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
#!/usr/bin/env python
''' Usage: python setup.py build_ext --inplace '''
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Compiler.Options import get_directive_defaults
directive_defaults = get_directive_defaults()
directive_defaults['linetrace'] = True
directive_defaults['binding'] = True
import numpy
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("diod_cython",
sources=["diodcython.pyx"],
include_dirs=[numpy.get_include()])],
)
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("c_diod",
sources=["c_diod.pyx", "diod_cdef.c"],
include_dirs=[numpy.get_include()])],
)
|
[
"noreply@github.com"
] |
rraj29.noreply@github.com
|
9ee7a1390ae377ee6ccb2031080cf90ddfbd4522
|
0558a02176b4c27b369594d2f2d0070b64b77f32
|
/app/static_typing.py
|
69d669ca097703f4c5f3f4a774860d862809b9bb
|
[] |
no_license
|
marcelh89/projectX
|
57e77e844197ccf728b1a996a7330472210b2110
|
580817316ac8dc5cb9bf84fc8f83b0337075590c
|
refs/heads/master
| 2021-01-10T07:14:49.231907
| 2015-10-28T20:24:58
| 2015-10-28T20:24:58
| 44,829,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
def greeting(name: str) -> str:
return 'Hello ' + name
print(greeting('Peter'))
try:
print(greeting(1))
except TypeError as ter:
print(ter)
|
[
"marcelh89@googlemail.com"
] |
marcelh89@googlemail.com
|
48e96cb4959b50297d7f6413e401936fcd538a85
|
752704eba6977ddaee1c33dc9a8594c0f47eb358
|
/repositories/artist_repository.py
|
87b117296e737fcba05fae0aaae34c6f0d5f6266
|
[] |
no_license
|
JarrodBennie/python-record-shop
|
6dcce6ade10b5624e13a1e2816d9688204965eff
|
6758c4309acd919ee7c92a8b033a87d79b369dd1
|
refs/heads/master
| 2021-02-18T17:43:08.320906
| 2020-04-06T10:39:58
| 2020-04-06T10:39:58
| 245,218,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
from models.artist import Artist
from db.run_sql import run_sql
def select_all():
artists = []
sql = "SELECT * FROM artists"
results = run_sql(sql)
for row in results:
name = row["name"]
id = row["id"]
artist = Artist(name, id)
artists.append(artist)
return artists
def select(id):
artist = None
sql = "SELECT * FROM artists WHERE id = %s"
results = run_sql(sql, (id,))
row = results[0]
name = row["name"]
id = row["id"]
artist = Artist(name, id)
return artist
def save(artist):
sql = "INSERT INTO artists (name) VALUES (%s) RETURNING id"
results = run_sql(sql, (artist.name,))
id = results[0]["id"]
artist.id = id
return artist
def update(artist):
sql = "UPDATE artists SET (name) = (%s) WHERE id = %s"
run_sql(sql, (artist.name, artist.id))
def delete(id):
sql = "DELETE FROM artists WHERE id = %s"
run_sql(sql, (id,)).count
|
[
"jarrodbennie@icloud.com"
] |
jarrodbennie@icloud.com
|
fbad7287e306066bb1f74d6162618521239bdabd
|
b8560fcc01d746fde45c2a2c0364cacfc3d6613f
|
/4常用模块/shutil_module.py
|
3552f40f7fd23fcca6b3556a9b9b11d5f4753ce6
|
[] |
no_license
|
RingoSnail/Python_Basic
|
b76e17675ef29e40f544caccaf0c29f1ba0afdf5
|
830c472a9f231e3b7402705f6a02b1028bccfe92
|
refs/heads/master
| 2023-08-19T18:01:45.096762
| 2021-10-15T08:30:10
| 2021-10-15T08:30:10
| 305,307,563
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
py
|
# 高级的文件、文件夹、压缩包处理模块
|
[
"rain_hugo@yahoo.com"
] |
rain_hugo@yahoo.com
|
9882a77c834b182dafafe702c6450be8ddadef23
|
75e4e51e6bfb19f00bffb308074bd492a09ec6e0
|
/NewPic.py
|
e2607ae2c139f75eaf340cb159c3b36e1eaaf4ef
|
[] |
no_license
|
canydream/pyautogui
|
1eba874bf656b6712c7f3e93b3dd28987ce78b2f
|
a0c880dffea0ac7e28bb49ca5ee3447a2916325a
|
refs/heads/master
| 2023-06-21T06:46:32.147508
| 2021-08-01T11:08:40
| 2021-08-01T11:08:40
| 391,299,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
import cv2
import numpy as np
img3 = np.random.random((600, 800, 3))
while 1:
img3 = np.random.random((600, 800, 3))
img3 *= 50
img3 = img3.round()
cv2.imshow('img', img3)
print(cv2.waitKey(1000))
if 0:
break
cv2.destroyAllWindows()
|
[
"canydream@qq.com"
] |
canydream@qq.com
|
f923c988e6a1535d7ed8fffd7072ffde4193bc8a
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/googlecloudsdk/calliope/walker_util.py
|
5ac7827a51c6e386400e44564a8fdd49c740f037
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 19,357
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of CLI walkers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import cli_tree
from googlecloudsdk.calliope import markdown
from googlecloudsdk.calliope import walker
from googlecloudsdk.core.document_renderers import render_document
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import pkg_resources
import six
_HELP_HTML_DATA_FILES = [
'favicon.ico',
'index.html',
'_menu_.css',
'_menu_.js',
'_title_.html',
]
class DevSiteGenerator(walker.Walker):
"""Generates DevSite reference HTML in a directory hierarchy.
This implements gcloud meta generate-help-docs --manpage-dir=DIRECTORY.
Attributes:
_directory: The DevSite reference output directory.
_need_section_tag[]: _need_section_tag[i] is True if there are section
subitems at depth i. This prevents the creation of empty 'section:' tags
in the '_toc' files.
_toc_root: The root TOC output stream.
_toc_main: The current main (just under root) TOC output stream.
"""
_REFERENCE = '/sdk/gcloud/reference' # TOC reference directory offset.
_TOC = '_toc.yaml'
def __init__(self, cli, directory, hidden=False, progress_callback=None,
restrict=None):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
directory: The devsite output directory path name.
hidden: Boolean indicating whether to consider the hidden CLI.
progress_callback: f(float), The function to call to update the progress
bar or None for no progress bar.
restrict: Restricts the walk to the command/group dotted paths in this
list. For example, restrict=['gcloud.alpha.test', 'gcloud.topic']
restricts the walk to the 'gcloud topic' and 'gcloud alpha test'
commands/groups.
"""
super(DevSiteGenerator, self).__init__(cli)
self._directory = directory
files.MakeDir(self._directory)
self._need_section_tag = []
toc_path = os.path.join(self._directory, self._TOC)
self._toc_root = files.FileWriter(toc_path)
self._toc_root.write('toc:\n')
self._toc_root.write('- title: "gcloud Reference"\n')
self._toc_root.write(' path: %s\n' % self._REFERENCE)
self._toc_root.write(' section:\n')
self._toc_main = None
def Visit(self, node, parent, is_group):
"""Updates the TOC and Renders a DevSite doc for each node in the CLI tree.
Args:
node: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if node is a group, otherwise its is a command.
Returns:
The parent value, ignored here.
"""
def _UpdateTOC():
"""Updates the DevSIte TOC."""
depth = len(command) - 1
if not depth:
return
title = ' '.join(command)
while depth >= len(self._need_section_tag):
self._need_section_tag.append(False)
if depth == 1:
if is_group:
if self._toc_main:
# Close the current main group toc if needed.
self._toc_main.close()
# Create a new main group toc.
toc_path = os.path.join(directory, self._TOC)
toc = files.FileWriter(toc_path)
self._toc_main = toc
toc.write('toc:\n')
toc.write('- title: "%s"\n' % title)
toc.write(' path: %s\n' % '/'.join([self._REFERENCE] + command[1:]))
self._need_section_tag[depth] = True
toc = self._toc_root
indent = ' '
if is_group:
toc.write('%s- include: %s\n' % (
indent, '/'.join([self._REFERENCE] + command[1:] + [self._TOC])))
return
else:
toc = self._toc_main
indent = ' ' * (depth - 1)
if self._need_section_tag[depth - 1]:
self._need_section_tag[depth - 1] = False
toc.write('%ssection:\n' % indent)
title = command[-1]
toc.write('%s- title: "%s"\n' % (indent, title))
toc.write('%s path: %s\n' % (indent,
'/'.join([self._REFERENCE] + command[1:])))
self._need_section_tag[depth] = is_group
# Set up the destination dir for this level.
command = node.GetPath()
if is_group:
directory = os.path.join(self._directory, *command[1:])
files.MakeDir(directory, mode=0o755)
else:
directory = os.path.join(self._directory, *command[1:-1])
# Render the DevSite document.
path = os.path.join(
directory, 'index' if is_group else command[-1]) + '.html'
with files.FileWriter(path) as f:
md = markdown.Markdown(node)
render_document.RenderDocument(style='devsite',
title=' '.join(command),
fin=io.StringIO(md),
out=f, command_node=node)
_UpdateTOC()
return parent
def Done(self):
"""Closes the TOC files after the CLI tree walk is done."""
self._toc_root.close()
if self._toc_main:
self._toc_main.close()
class HelpTextGenerator(walker.Walker):
"""Generates help text files in a directory hierarchy.
Attributes:
_directory: The help text output directory.
"""
def __init__(self, cli, directory, hidden=False, progress_callback=None,
restrict=None):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
directory: The Help Text output directory path name.
hidden: Boolean indicating whether to consider the hidden CLI.
progress_callback: f(float), The function to call to update the progress
bar or None for no progress bar.
restrict: Restricts the walk to the command/group dotted paths in this
list. For example, restrict=['gcloud.alpha.test', 'gcloud.topic']
restricts the walk to the 'gcloud topic' and 'gcloud alpha test'
commands/groups.
"""
super(HelpTextGenerator, self).__init__(
cli, progress_callback=progress_callback, restrict=restrict)
self._directory = directory
files.MakeDir(self._directory)
def Visit(self, node, parent, is_group):
"""Renders a help text doc for each node in the CLI tree.
Args:
node: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if node is a group, otherwise its is a command.
Returns:
The parent value, ignored here.
"""
# Set up the destination dir for this level.
command = node.GetPath()
if is_group:
directory = os.path.join(self._directory, *command[1:])
else:
directory = os.path.join(self._directory, *command[1:-1])
files.MakeDir(directory, mode=0o755)
# Render the help text document.
path = os.path.join(directory, 'GROUP' if is_group else command[-1])
with files.FileWriter(path) as f:
md = markdown.Markdown(node)
render_document.RenderDocument(style='text', fin=io.StringIO(md),
out=f)
return parent
class DocumentGenerator(walker.Walker):
"""Generates style manpage files with suffix in an output directory.
All files will be generated in one directory.
Attributes:
_directory: The document output directory.
_style: The document style.
_suffix: The output file suffix.
"""
def __init__(self, cli, directory, style, suffix):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
directory: The manpage output directory path name.
style: The document style.
suffix: The generate document file suffix. None for .<SECTION>.
"""
super(DocumentGenerator, self).__init__(cli)
self._directory = directory
self._style = style
self._suffix = suffix
files.MakeDir(self._directory)
def Visit(self, node, parent, is_group):
"""Renders document file for each node in the CLI tree.
Args:
node: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if node is a group, otherwise its is a command.
Returns:
The parent value, ignored here.
"""
if self._style == 'linter':
meta_data = actions.GetCommandMetaData(node)
else:
meta_data = None
command = node.GetPath()
path = os.path.join(self._directory, '_'.join(command)) + self._suffix
with files.FileWriter(path) as f:
md = markdown.Markdown(node)
render_document.RenderDocument(style=self._style,
title=' '.join(command),
fin=io.StringIO(md),
out=f,
command_metadata=meta_data)
return parent
class HtmlGenerator(DocumentGenerator):
"""Generates HTML manpage files with suffix .html in an output directory.
The output directory will contain a man1 subdirectory containing all of the
HTML manpage files.
"""
def WriteHtmlMenu(self, command, out):
"""Writes the command menu tree HTML on out.
Args:
command: dict, The tree (nested dict) of command/group names.
out: stream, The output stream.
"""
def ConvertPathToIdentifier(path):
return '_'.join(path)
def WalkCommandTree(command, prefix):
"""Visit each command and group in the CLI command tree.
Args:
command: dict, The tree (nested dict) of command/group names.
prefix: [str], The subcommand arg prefix.
"""
level = len(prefix)
visibility = 'visible' if level <= 1 else 'hidden'
indent = level * 2 + 2
name = command.get('_name_')
args = prefix + [name]
out.write('{indent}<li class="{visibility}" id="{item}" '
'onclick="select(event, this.id)">{name}'.format(
indent=' ' * indent, visibility=visibility, name=name,
item=ConvertPathToIdentifier(args)))
commands = command.get('commands', []) + command.get('groups', [])
if commands:
out.write('<ul>\n')
for c in sorted(commands, key=lambda x: x['_name_']):
WalkCommandTree(c, args)
out.write('{indent}</ul>\n'.format(indent=' ' * (indent + 1)))
out.write('{indent}</li>\n'.format(indent=' ' * indent))
else:
out.write('</li>\n'.format(indent=' ' * (indent + 1)))
out.write("""\
<html>
<head>
<meta name="description" content="man page tree navigation">
<meta name="generator" content="gcloud meta generate-help-docs --html-dir=.">
<title> man page tree navigation </title>
<base href="." target="_blank">
<link rel="stylesheet" type="text/css" href="_menu_.css">
<script type="text/javascript" src="_menu_.js"></script>
</head>
<body>
<div class="menu">
<ul>
""")
WalkCommandTree(command, [])
out.write("""\
</ul>
</div>
</body>
</html>
""")
def _GenerateHtmlNav(self, directory, cli, hidden, restrict):
"""Generates html nav files in directory."""
tree = CommandTreeGenerator(cli).Walk(hidden, restrict)
with files.FileWriter(os.path.join(directory, '_menu_.html')) as out:
self.WriteHtmlMenu(tree, out)
for file_name in _HELP_HTML_DATA_FILES:
file_contents = pkg_resources.GetResource(
'googlecloudsdk.api_lib.meta.help_html_data.', file_name)
files.WriteBinaryFileContents(os.path.join(directory, file_name),
file_contents)
def __init__(self, cli, directory, hidden=False, progress_callback=None,
restrict=None):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
directory: The HTML output directory path name.
hidden: Boolean indicating whether to consider the hidden CLI.
progress_callback: f(float), The function to call to update the progress
bar or None for no progress bar.
restrict: Restricts the walk to the command/group dotted paths in this
list. For example, restrict=['gcloud.alpha.test', 'gcloud.topic']
restricts the walk to the 'gcloud topic' and 'gcloud alpha test'
commands/groups.
"""
super(HtmlGenerator, self).__init__(
cli, directory=directory, style='html', suffix='.html')
self._GenerateHtmlNav(directory, cli, hidden, restrict)
class ManPageGenerator(DocumentGenerator):
"""Generates manpage files with suffix .1 in an output directory.
The output directory will contain a man1 subdirectory containing all of the
manpage files.
"""
_SECTION_FORMAT = 'man{section}'
def __init__(self, cli, directory, hidden=False, progress_callback=None,
restrict=None):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
directory: The manpage output directory path name.
hidden: Boolean indicating whether to consider the hidden CLI.
progress_callback: f(float), The function to call to update the progress
bar or None for no progress bar.
restrict: Restricts the walk to the command/group dotted paths in this
list. For example, restrict=['gcloud.alpha.test', 'gcloud.topic']
restricts the walk to the 'gcloud topic' and 'gcloud alpha test'
commands/groups.
"""
# Currently all gcloud manpages are in section 1.
section_subdir = self._SECTION_FORMAT.format(section=1)
section_dir = os.path.join(directory, section_subdir)
super(ManPageGenerator, self).__init__(
cli, directory=section_dir, style='man', suffix='.1')
class LinterGenerator(DocumentGenerator):
"""Generates linter files with suffix .json in an output directory."""
def __init__(self, cli, directory, hidden=False, progress_callback=None,
restrict=None):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
directory: The linter output directory path name.
hidden: Boolean indicating whether to consider the hidden CLI.
progress_callback: f(float), The function to call to update the progress
bar or None for no progress bar.
restrict: Restricts the walk to the command/group dotted paths in this
list. For example, restrict=['gcloud.alpha.test', 'gcloud.topic']
restricts the walk to the 'gcloud topic' and 'gcloud alpha test'
commands/groups.
"""
super(LinterGenerator, self).__init__(
cli, directory=directory, style='linter', suffix='.json')
class CommandTreeGenerator(walker.Walker):
"""Constructs a CLI command dict tree.
This implements the resource generator for gcloud meta list-commands.
Attributes:
_with_flags: Include the non-global flags for each command/group if True.
_with_flag_values: Include flag value choices or :type: if True.
_global_flags: The set of global flags, only listed for the root command.
"""
def __init__(self, cli, with_flags=False, with_flag_values=False, **kwargs):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
with_flags: Include the non-global flags for each command/group if True.
with_flag_values: Include flags and flag value choices or :type: if True.
**kwargs: Other keyword arguments to pass to Walker constructor.
"""
super(CommandTreeGenerator, self).__init__(cli, **kwargs)
self._with_flags = with_flags or with_flag_values
self._with_flag_values = with_flag_values
self._global_flags = set()
def Visit(self, node, parent, is_group):
"""Visits each node in the CLI command tree to construct the dict tree.
Args:
node: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if node is a group, otherwise its is a command.
Returns:
The subtree parent value, used here to construct a dict tree.
"""
name = node.name.replace('_', '-')
info = {'_name_': name}
if self._with_flags:
all_flags = []
for arg in node.GetAllAvailableFlags():
value = None
if self._with_flag_values:
if arg.choices:
choices = sorted(arg.choices)
if choices != ['false', 'true']:
value = ','.join([six.text_type(choice) for choice in choices])
elif isinstance(arg.type, int):
value = ':int:'
elif isinstance(arg.type, float):
value = ':float:'
elif isinstance(arg.type, arg_parsers.ArgDict):
value = ':dict:'
elif isinstance(arg.type, arg_parsers.ArgList):
value = ':list:'
elif arg.nargs != 0:
metavar = arg.metavar or arg.dest.upper()
value = ':' + metavar + ':'
for f in arg.option_strings:
if value:
f += '=' + value
all_flags.append(f)
no_prefix = '--no-'
flags = []
for flag in all_flags:
if flag in self._global_flags:
continue
if flag.startswith(no_prefix):
positive = '--' + flag[len(no_prefix):]
if positive in all_flags:
continue
flags.append(flag)
if flags:
info['_flags_'] = sorted(flags)
if not self._global_flags:
# Most command flags are global (defined by the root command) or
# command-specific. Group-specific flags are rare. Separating out
# the global flags streamlines command descriptions and prevents
# global flag changes (we already have too many!) from making it
# look like every command has changed.
self._global_flags.update(flags)
if is_group:
if parent:
if cli_tree.LOOKUP_GROUPS not in parent:
parent[cli_tree.LOOKUP_GROUPS] = []
parent[cli_tree.LOOKUP_GROUPS].append(info)
return info
if cli_tree.LOOKUP_COMMANDS not in parent:
parent[cli_tree.LOOKUP_COMMANDS] = []
parent[cli_tree.LOOKUP_COMMANDS].append(info)
return None
class GCloudTreeGenerator(walker.Walker):
"""Generates an external representation of the gcloud CLI tree.
This implements the resource generator for gcloud meta list-gcloud.
"""
def Visit(self, node, parent, is_group):
"""Visits each node in the CLI command tree to construct the external rep.
Args:
node: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if node is a group, otherwise its is a command.
Returns:
The subtree parent value, used here to construct an external rep node.
"""
return cli_tree.Command(node, parent)
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
ff71f224008ee670b97d700b32ef94e4a96165ea
|
33213395f9b7606da83003d9f89966af16a47ed7
|
/proyecto/api/forms.py
|
78e5e6a6e54d6fa4fc77fd4d5730201856690ffa
|
[] |
no_license
|
sofiamanana/proyectoAnalisis
|
e75866d306424e37296c018da9cb7ee34a6450b4
|
3d7914dc2f6ef7813bd4672ada1cd57e01e24e26
|
refs/heads/main
| 2023-02-10T22:14:28.913022
| 2021-01-09T00:59:37
| 2021-01-09T00:59:37
| 311,785,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
from django import forms
from .models import File, Plan, Mensajes
class FileForm(forms.ModelForm):
class Meta:
model=File
fields=["name","filepath","username","plan"]
class FileForm2(forms.ModelForm):
class Meta:
model=File
fields=['filepath']
class FormMensaje(forms.ModelForm):
class Meta:
model=Mensajes
fields=['fiscalizador','mensaje']
class FormPlan(forms.ModelForm):
class Meta:
model=Plan
fields=['nombre','reportador']
|
[
"sofia.manana@sansano.usm.cl"
] |
sofia.manana@sansano.usm.cl
|
7ed9ede5a5b610fc38980e7998b759468f5dbce6
|
18899c79fa17d4b15df065db1a83ff25590b9ca0
|
/model_closedloop.py
|
027816de79943003c16e064dcbd575ba2294a88e
|
[
"MIT"
] |
permissive
|
brunopcarv/id4control
|
0ce3082674f9fdc8c68c3b478c17dec2bb00f927
|
a1a78aafce08abc94d008ecb9a53a6de771919bc
|
refs/heads/master
| 2022-11-28T21:52:56.405327
| 2020-08-12T00:10:47
| 2020-08-12T00:10:47
| 278,949,646
| 0
| 0
| null | 2020-08-12T00:10:48
| 2020-07-11T22:10:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,336
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 vi:noet
# Closed-loop system - Plant and Controller
import numpy as np
class ClosedLoopSystem():
def __init__(self, plant, controller, additive_noise_gen, xo, dt=0.01):
self.plant = plant
self.controller = controller
self.x = xo
self.xo = xo
self.k = 0
self.dt = dt # delta t: sample period
self.additive_noise_gen = additive_noise_gen
def next_points(self):
x_temp = self.x
self.x = self.plant.dynamics(x_temp,
self.controller.action(x_temp),
self.additive_noise_gen,
self.dt)
self.k += 1
return self.x
def reset(self):
self.x = self.xo
self.k = 0
def run(self, k):
x_list = [self.x]
for i in range(1, k):
x_list = np.append(x_list, [self.x], axis=0)
self.next_points()
self.reset()
return x_list, range(k)
if __name__ == "__main__":
from model_plants import LinearPlant, LinearDCMotorPlant, \
InvertedPendulum, PredatorPreyPlant
# Scalar zero controller
class NoController():
def __init__(self):
pass
def action(self, x):
return 0
controller = NoController()
plant = PredatorPreyPlant(1.0, 1.0, 1.0, 1.0)
xo = np.array([2.0, 1.0]).T
closed_loop = ClosedLoopSystem(plant, controller, xo)
final_time_unit = 2000
half = int(final_time_unit*0.25)
quarter = int(half*0.05)
x, time = closed_loop.run(final_time_unit)
x1 = x[:,0]
x2 = x[:,1]
X = np.array([x1[:half-1], x2[:half-1]]).T
Y = np.array([x1[1:half], x2[1:half]]).T
from id_kernel_ridge_regression import KernelRidgeRegression, \
kernel_rbf_function_M, kernel_rbf_function_N, kernel_linear_function, \
kernel_poly_function, kernel_tanh_function
from id_linear_ridge_regression import LinearRidgeRegression
# RBF kernel id
lambda_reg = 0.00001
regression = KernelRidgeRegression(lambda_reg)
# regression.training(X[:quarter,:], Y[:quarter,:], kernel_rbf_function_M)
regression.training(X[:half,:], Y[:half,:], kernel_rbf_function_M)
Y_ridge = np.array([x1[:half], x2[:half]]).T
for k in range(half,final_time_unit):
y = regression.predict(Y_ridge[-1,:], kernel_rbf_function_N)
Y_ridge = np.append(Y_ridge, [y], axis=0)
# RBF kernel id
random_ids = np.random.choice(half-1, size=quarter, replace=False)
regression_random = KernelRidgeRegression(lambda_reg)
regression_random.training(X[random_ids,:], Y[random_ids,:], kernel_rbf_function_M)
Y_ridge_random = np.array([x1[:half], x2[:half]]).T
for k in range(half,final_time_unit):
y = regression_random.predict(Y_ridge_random[-1,:], kernel_rbf_function_N)
Y_ridge_random = np.append(Y_ridge_random, [y], axis=0)
# Laplace kernel TODO: update
def kernel_laplace_function_M(X, Y, var=1.0, gamma=1.0):
import numexpr as ne
X_norm = np.sum(X**2, axis=-1)
return ne.evaluate('v * exp(-g * (A + B - 2 * C))', {
'A' : X_norm[:,None],
'B' : X_norm[None,:],
'C' : np.dot(X, X.T),
'g' : gamma,
'v' : var
})
# Similarity Sampling
def similarity_sampling_increment(X, x_candidates, similarity_function):
dets = list()
# M = similarity_function(X, X)
# dets.append(np.linalg.det(M))
for x in x_candidates:
X_extended = X
X_extended = np.append(X_extended, [x], axis=0)
M = similarity_function(X_extended, X_extended)
dets.append(np.linalg.det(M))
id_maxdet = dets.index(max(dets))
x_maxdet = x[id_maxdet]
return x_maxdet
def similarity_sampling(X, m_tilde, similarity_function):
m, n = np.shape(X)
X_current = X[0:10,:]
X_sampled = np.zeros((m_tilde, n))
for i in range(m_tilde):
a = X_current[10+i,:]
b = X_current[10+(i+1)*2]
candidates = list([a,b])
X_sampled[i,:] = similarity_sampling_increment(
X_current,
candidates,
similarity_function,
)
# Plot
import matplotlib.pyplot as plt
fig, axs = plt.subplots(2, 1)
axs[0].plot(time, x1, time, Y_ridge[:,0], time, Y_ridge_random[:,0])
axs[0].set_xlim(0,final_time_unit)
axs[0].set_xlabel('Time units (k)')
axs[0].set_ylabel('Prey: x1 (actual) and x1 (ridge)')
axs[0].grid(True)
axs[1].plot(time, x2, time, Y_ridge[:,1], time, Y_ridge_random[:,1])
axs[1].set_xlim(0,final_time_unit)
axs[1].set_xlabel('Time units (k)')
axs[1].set_ylabel('Pred: x2 (actual) and x2 (ridge)')
axs[1].grid(True)
# cxy, f = axs[1].cohere(x1, x2, 5, 1. / dt)
fig.tight_layout()
plt.show()
|
[
"brunopcarv@gmail.com"
] |
brunopcarv@gmail.com
|
9ee969ac84ea09647d6a185b3e82eab861cc2331
|
4feec0d298181dd982103bf0beafd8c42dff3b51
|
/Prototypes/GUI Prototype/GUI.py
|
cbb6bf107e8b50afde5af2d86141ee10380df798
|
[] |
no_license
|
ashbrindle/Programmable_Dynamic_Turning_Machine_Project
|
1484d23ada220c2ff37ace1972adba2d79a032c6
|
de1249e0b15b76d6913a9e21863fe6602d6c7e29
|
refs/heads/master
| 2021-10-28T03:47:20.271657
| 2019-04-21T18:21:18
| 2019-04-21T18:21:18
| 178,104,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
from Tkinter import *
instructions = [[]] # 0: state, 1: symbol, 2: direction, 3: new symbol, 4: new state
def save():
instructions[0].append(entstate.get())
instructions[0].append(entsymbol.get())
instructions[0].append(entdirection.get())
instructions[0].append(entNsymbol.get())
instructions[0].append(entNstate.get())
print instructions
top = Tk()
lblstate = Label(top, text = "State")
lblstate.pack()
entstate = Entry(top, bd = 5) # bd is the bored around the indicator (default at 2px, set to 5px)
entstate.pack()
lblsymbol = Label(top, text = "Symbol")
lblsymbol.pack()
entsymbol = Entry(top, bd = 5) # bd is the bored around the indicator (default at 2px, set to 5px)
entsymbol.pack()
lbldirection = Label(top, text = "Direction")
lbldirection.pack()
entdirection = Entry(top, bd = 5) # bd is the bored around the indicator (default at 2px, set to 5px)
entdirection.pack()
lblNsymbol = Label(top, text = "New Symbol")
lblNsymbol.pack()
entNsymbol = Entry(top, bd = 5) # bd is the bored around the indicator (default at 2px, set to 5px)
entNsymbol.pack()
lblNstate = Label(top, text = "New State")
lblNstate.pack()
entNstate = Entry(top, bd = 5) # bd is the bored around the indicator (default at 2px, set to 5px)
entNstate.pack()
lblTape = Label(top, text = "Tape")
lblTape.pack()
entTape1 = Entry(top, width = 5) # bd is the bored around the indicator (default at 2px, set to 5px)
entTape1.pack(side = RIGHT)
entTape2 = Entry(top, width = 5)
entTape2.pack(side = RIGHT)
entTape3 = Entry(top, width = 5)
entTape3.pack(side = RIGHT)
entTape4 = Entry(top, width = 5)
entTape4.pack(side = RIGHT)
entTape5 = Entry(top, width = 5)
entTape5.pack(side = RIGHT)
entTape6 = Entry(top, width = 5)
entTape6.pack(side = RIGHT)
entTape7 = Entry(top, width = 5)
entTape7.pack(side = RIGHT)
AcceptButton = Button(top, text = "Submit", command = save)
AcceptButton.pack(side = BOTTOM)
top.mainloop()
|
[
"ash.brindle97@gmail.com"
] |
ash.brindle97@gmail.com
|
4204bd99b9f1ec5958bd3e6f3840050defbbfb1d
|
317649dde4c3ca8b185d98eda59aff11c3276c88
|
/POWHEG-BOX-RES/vbs-ssww-nloew/.svn/pristine/42/4204bd99b9f1ec5958bd3e6f3840050defbbfb1d.svn-base
|
211da05343c92209feaad97890a2596f11e8ce0a
|
[] |
no_license
|
freejiebao/generator
|
8b5019cdf53be70006405a4d9547d693f813d9bd
|
8e658a44e69d770ac57e6bc9d13a66bcebcab07a
|
refs/heads/master
| 2021-06-16T14:31:10.174653
| 2021-05-19T08:13:41
| 2021-05-19T08:13:41
| 203,158,169
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,992
|
################################################################################
# genplots_shower.py #
################################################################################
"""
File: genplots_shower.py
Author: J.-N. Lang
Email: jlang@physik.uzh.ch
Description: vbs + PS plots
"""
#############
# Globals #
#############
from __future__ import unicode_literals
import os
from matplotlib import ticker
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from numpy import linspace, loadtxt, arange
from scipy.interpolate import interp1d
from math import sqrt
fontsize=16
plt.rc('text', usetex=True)
plt.rc('text.latex', unicode=True)
plt.rc('font', family='serif', size=fontsize)
plt.rc('mathtext',
fontset='custom',
rm='Bitstream Vera Sans',
it='Bitstream Vera Sans:italic',
bf='Bitstream Vera Sans:bold')
fig = plt.figure(num=None, figsize=(5, 6), dpi=120, facecolor='w', edgecolor='k')
plot_margin_left = 0.13
plot_margin_right = 0.95
plot_margin_top = 0.98
plot_margin_bottom = 0.1
plt.subplots_adjust(left=plot_margin_left, right=plot_margin_right,
top=plot_margin_top, bottom=plot_margin_bottom)
gridspec_y_ticks = 13
gridspec_x_ticks = 1
gs = GridSpec(gridspec_y_ticks, gridspec_x_ticks)
mid = 9
top_end = 13
up = plt.subplot(gs[1:mid-1, :])
lp = plt.subplot(gs[mid:top_end, :])
yticks = None
ylogscale = None
ylimup = None
powheg_label = '$\\texttt{POWHEG}\;\\mathrm{NLO\; EW}+\\mathrm{PS}$'
mocanlo_label_lo = '$\\texttt{MoCaNLO}\;\\mathrm{LO}$'
mocanlo_label_nlo = '$\\texttt{MoCaNLO}\;\\mathrm{NLO\; EW}$'
###################
# plot settings #
###################
obsmclo = 'histogram_transverse_momentum_j1_born.dat'
obsmcnlo = 'histogram_transverse_momentum_j1_nlo.dat'
obspwhgps = 'tot_j1_pt'
rescale_mc=1
rescale_pwgh=1000
uylabel = '$\mathrm{d}\sigma\mathrm{[fb]}/\mathrm{d} p_{\mathrm{T},\mathrm{j}_1}\mathrm{[GeV]}$'
lylabel = '$\delta[\%]$'
xlabel = '$p_{\\mathrm{T},\mathrm{j}_1} \mathrm{[GeV]}$'
oname = 'ptj1.pdf'
xlim = (0, 700)
ylimup = (0.00001,0.01)
ylimlp = (-50,20)
yticks = arange(-50,20,10)
rebin = 2
bins = 20
ylogscale = True
# obsmclo = 'histogram_transverse_momentum_j2_born.dat'
# obsmcnlo = 'histogram_transverse_momentum_j2_nlo.dat'
# obspwhgps = 'tot_j2_pt'
# rescale_mc=1.
# rescale_pwgh=1000.
# uylabel = '$\mathrm{d}\sigma\mathrm{[pb]}/\mathrm{d} p_{\mathrm{T},j_2}\mathrm{[GeV]}$'
# lylabel = '$\delta[\%]$'
# xlabel = '$p_{\\mathrm{T},j_2} \mathrm{[GeV]}$'
# oname = 'ptj2.pdf'
# xlim = (0, 500)
# ylim = (-50,-10)
# rebin = 3
# bins = 20
# obsmclo = 'histogram_invariant_mass_mjj12_born.dat'
# obsmcnlo = 'histogram_invariant_mass_mjj12_nlo.dat'
# obspwhgps = 'tot_jj_m'
# rescale_mc=1.
# rescale_pwgh=1000.
# uylabel = '$\mathrm{d}\sigma\mathrm{[fb]}/\mathrm{d} m_\mathrm{jj}\mathrm{[GeV]}$'
# lylabel = '$\delta[\%]$'
# xlabel = '$m_\mathrm{jj} \mathrm{[GeV]}$'
# xlim = (500, 2000)
# ylimup = (0.00001,0.003)
# ylimlp = (-35,-0)
# yticks = arange(-50,20,10)
# oname = 'mjj.pdf'
# rebin = 2
# bins = 2000
# ylogscale = True
# obsmclo = 'histogram_transverse_momentum_positron_born.dat'
# obsmcnlo = 'histogram_transverse_momentum_positron_nlo.dat'
# obspwhgps = 'tot_e_pt'
# rescale_mc=10.**3
# rescale_pwgh=10.**6
# uylabel = '$\mathrm{d}\sigma\mathrm{[fb]}/\mathrm{d} p_{\mathrm{T},e^+}\mathrm{[GeV]}$'
# lylabel = '$\delta[\%]$'
# xlabel = '$p_{\\mathrm{T},e^+} \mathrm{[GeV]}$'
# oname = 'pte.pdf'
# xlim = (0, 500)
# ylim = (-50,-10)
# rebin = 3
# bins = 20
# obsmclo = 'histogram_rapidity_positron_born.dat'
# obsmcnlo = 'histogram_rapidity_positron_nlo.dat'
# obspwhgps = 'tot_e_y'
# rescale_mc=10.**3
# rescale_pwgh=10.**6
# uylabel = '$\mathrm{d}\sigma\mathrm{[fb]}/\mathrm{d} y_{e^+}$'
# lylabel = '$\delta[\%]$'
# xlabel = '$y_{e^+}$'
# oname = 'ye.pdf'
# xlim = (-2.4, 2.4)
# ylim = (-50,-10)
# rebin = 2
# bins = 800
# obsmclo = 'histogram_rapidity_j1_born.dat'
# obsmcnlo = 'histogram_rapidity_j1_nlo.dat'
# obspwhgps = 'tot_j1_y'
# rescale_mc=10.**3
# rescale_pwgh=10.**6
# uylabel = '$\mathrm{d}\sigma\mathrm{[fb]}/\mathrm{d} y_{j_1}$'
# lylabel = '$\delta[\%]$'
# xlabel = '$y_{j_1}$'
# oname = 'yj1.pdf'
# xlim = (-2.4, 2.4)
# ylim = (-50,-10)
# rebin = 2
# bins = 800
# obsmclo = 'histogram_rapidity_j2_born.dat'
# obsmcnlo = 'histogram_rapidity_j2_nlo.dat'
# obspwhgps = 'tot_j2_y'
# rescale_mc=10.**3
# rescale_pwgh=10.**6
# uylabel = '$\mathrm{d}\sigma\mathrm{[fb]}/\mathrm{d} y_{j_2}$'
# lylabel = '$\delta[\%]$'
# xlabel = '$y_{j_2}$'
# oname = 'yj2.pdf'
# xlim = (-2.4, 2.4)
# ylim = (-50,-10)
# rebin = 2
# bins = 800
# obsmclo = 'histogram_rapidity_separation_j1j2_born.dat'
# obsmcnlo = 'histogram_rapidity_separation_j1j2_nlo.dat'
# obspwhgps = 'tot_jj_dy'
# rescale_mc=10.**0
# rescale_pwgh=10.**3
# uylabel = '$\mathrm{d}\sigma\mathrm{[fb]}/\mathrm{d} \Delta y_\mathrm{jj}$'
# lylabel = '$\delta[\%]$'
# xlabel = '$\Delta y_\mathrm{jj}$'
# oname = 'dyjj.pdf'
# xlim = (2.4, 7.)
# ylimlp = (-40,-10)
# rebin = 2
# bins = 800
# obsmclo = 'histogram_transverse_momentum_truth_missing_born.dat'
# obsmcnlo = 'histogram_transverse_momentum_truth_missing_nlo.dat'
# obspwhgps = 'tot_miss_pt'
# rescale_mc=1
# rescale_pwgh=10.**3
# uylabel = '$\mathrm{d}\sigma\mathrm{[fb]}/\mathrm{d} p_{\mathrm{T}}^\mathrm{miss}\mathrm{[GeV]}$'
# lylabel = '$\delta[\%]$'
# xlabel = '$p_{\\mathrm{T}}^\mathrm{miss} \mathrm{[GeV]}$'
# oname = 'ptmiss.pdf'
# xlim = (0, 500)
# ylimup = (0.00001,0.02)
# yticks = arange(-50,20,10)
# ylimlp = (-45,-0)
# rebin = 3
# bins = 20
# ylogscale = True
def join_value(d,pos,n,err=False):
r = 0.
for j in range(n):
if err:
r = sqrt(r**2 + d[pos+j]**2)
else:
r += d[pos+j]
r = r/n
return r
def join_err(d,pos,n):
r = 0.
for j in range(n):
r = sqrt(r**2 + d[pos+j]**2)
r = r/n
return r
def join_bounds(d,pos,n):
r = (d['left'][pos] + d['right'][pos+n-1])/2
return r
def parse_data(obs,mcpath, data_structure, rebin=False,
rescale_value=None,
**kwargs):
fname = os.path.join(mcpath, obs)
data = loadtxt(fname)
bins = len(data)
datac = {}
for d in data_structure:
datac[d] = data[:bins, data_structure[d]-1]
if 'left' not in datac and 'right' not in datac:
assert('middle' in datac)
binsize = datac['middle'][1]-datac['middle'][0]
datac['left'] = [u-binsize/2 for u in datac['middle']]
datac['right'] = [u+binsize/2 for u in datac['middle']]
x = [datac['left'][u] for u in range(bins)]
x += [datac['right'][bins-1]]
m = [(datac['left'][u] + datac['right'][u])/2. for u in range(bins)]
if rescale_value:
y = [u*rescale_value for u in datac['value']]
y += [y[-1]]
yerr = [u*rescale_value for u in datac['error']]
else:
y = datac['value']
yerr = datac['error']
if rebin:
nbins = (bins//rebin)*rebin
x = [datac['left'][i] for i in range(0, nbins, rebin)]
# TODO: <09-05-19, J.-N. Lang> #
# if nbins < bins need to combine last bin by hand (due to smaller size)
# I add another (fake0 point to continue the line when plotting with
# steps-post option.
x += [datac['right'][nbins-1]]
y = [join_value(y, i, rebin) for i in range(0, nbins, rebin)]
y += [y[-1]]
m = [join_bounds(datac, i, rebin) for i in range(0, nbins, rebin)]
m += [m[-1]]
yerr = [join_err(yerr, i, rebin) for i in range(0, nbins, rebin)]
yerr += [yerr[-1]]
return x,y,yerr,m
def parse_mocanlo_data(obs,**kwargs):
mcpath = 'data/MoCaNLO/data_16/'
data_structure = {'bin': 1,
'left': 2,
'right': 3,
'value': 4,
'error': 5,
'hits': 6}
return parse_data(obs, mcpath, data_structure, **kwargs)
def parse_pwhg_data(obs, **kwargs):
# pwhgpath = 'data/Powheg/res_pt4_rec01/'
pwhgpath = 'data/Powheg/res_pt4_incl_as/'
data_structure = {'middle': 1,
'value': 2,
'error': 3}
return parse_data(obs, pwhgpath, data_structure, **kwargs)
################
# upper plot #
################
if ylogscale is True:
up.axes.set_yscale('log')
plotstyle = {'color': 'black',
'drawstyle': 'steps-post',
'linewidth': 1}
x,y,yerr,m = parse_mocanlo_data(obsmclo, rebin=rebin,
rescale_value=rescale_mc)
x2 = x[:bins]
y2 = y[:bins]
y2err = yerr[:bins]
up.plot(x2, y2, label=mocanlo_label_lo, **plotstyle)
up.set_xlim(*xlim)
plotstyle = {'color': 'blue',
'drawstyle': 'steps-post',
'linestyle': '--',
'linewidth': 1}
x,y,yerr,m = parse_mocanlo_data(obsmcnlo, rebin=rebin,
rescale_value=rescale_mc)
x2nlo = x[:bins]
y2nlo = y[:bins]
y2errnlo = yerr[:bins]
up.plot(x2nlo, y2nlo, label=mocanlo_label_nlo, **plotstyle)
up.set_xlim(*xlim)
plotstyle = {'color': 'red',
'drawstyle': 'steps-post',
'linestyle': '-.',
'linewidth': 1}
x,y,yerr,m = parse_pwhg_data(obspwhgps, rebin=rebin,
rescale_value=rescale_pwgh)
x1 = x[:bins]
m1 = m[:bins]
y1 = [u for u in y[:bins]]
y1err = [u for u in yerr[:bins]]
up.plot(x1, y1, label=powheg_label, **plotstyle)
################
# lower plot #
################
def compare_bins(y1,y1err,y2,y2err):
ydiff = [100.*(y1[u]-y2[u])/(abs(y2[u])) for u in range(len(y2))]
ydifferr = [100.*sqrt((y1err[u]/y1[u])**2+(y2err[u]/y2[u])**2)
for u in range(len(y2))]
return ydiff, ydifferr
plotstyle = {'color': 'red',
'drawstyle': 'steps-post',
'linewidth': 1}
y3, y3err = compare_bins(y1,y1err,y2,y2err)
lp.plot(x1, y3, **plotstyle)
capsize=2
elinewidth=0.8
lp.errorbar(m1, y3, yerr=y3err, fmt='none',
drawstyle='mid', capsize=capsize, elinewidth=elinewidth, color='black')
plotstyle = {'color': 'blue',
'drawstyle': 'steps-post',
'linewidth': 1}
y4, y4err = compare_bins(y2nlo,y2errnlo,y2,y2err)
lp.plot(x1, y4, **plotstyle)
capsize=2
elinewidth=0.8
lp.errorbar(m1, y4, yerr=y4err, fmt='none',
drawstyle='mid', capsize=capsize, elinewidth=elinewidth,
color='black')
if yticks is not None:
lp.yaxis.set_ticks(yticks)
if ylimup is not None:
up.set_ylim(*ylimup)
lp.set_ylim(*ylimlp)
lp.set_xlim(*xlim)
##############################
# legend,label,annotations #
##############################
handles, labels = up.get_legend_handles_labels()
up.legend(handles, labels, handlelength=1.0, frameon=False, fontsize=fontsize, prop={'size':
fontsize},
loc='best')
plt.text(0.1, 1.05, uylabel,
horizontalalignment='center',
fontsize=fontsize,
transform=up.transAxes)
plt.text(0.0, 1.06, lylabel,
horizontalalignment='center',
fontsize=fontsize,
transform=lp.transAxes)
lp.set_xlabel(xlabel, fontsize=fontsize)
plt.setp(up.axes.get_xticklabels(), visible=False)
# plt.show()
fig.savefig(oname, dpi=120, bbox='standard', pad_inches=20)
|
[
"jiexiao@pku.edu.cn"
] |
jiexiao@pku.edu.cn
|
|
91e40d0f70f0106885be19ffb1c262f22102ec2f
|
8d8278a835f3fd7f878cf5539f6a09a6346e1812
|
/solution.py
|
e06b8d01cfd59a0982d14a7fbecbf337daec5616
|
[] |
no_license
|
mociadm-w4k2/lab2
|
40c875cb9c93039c657f6c6c033fab7fcd71fbfa
|
2a374dae7565898bf5472259fe1c6f6f84c4280e
|
refs/heads/master
| 2020-04-06T09:37:52.482974
| 2018-11-13T16:26:43
| 2018-11-13T16:26:43
| 157,350,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
# Exercise 1
# Fill with code
# Exercise 2
# Fill with code
# Exercise 3
# Fill with code
# Exercise 4
# Fill with code
# Exercise 5
# Fill with code
# Exercise 6
def prepare_data(file_name):
# Fill with code
return X, y
|
[
"noreply@github.com"
] |
mociadm-w4k2.noreply@github.com
|
f48ee1c2fb841d283cf1a9eeda275639a74825e7
|
09dbe6f5f4a01d25df5b5783fcf621d4d4e6acab
|
/python/workflow/__init__.py
|
68b8c99d8054b5ee7de8d79efbde2f07b363598d
|
[] |
no_license
|
cjhopp/scripts
|
b2e925b42e3685713475d15cb4a637e62143778a
|
89cecfe82532e84a739e26a845b9455df0090ca5
|
refs/heads/master
| 2023-05-31T06:35:03.638455
| 2023-05-15T18:14:44
| 2023-05-15T18:14:44
| 71,869,887
| 7
| 5
| null | 2017-07-12T13:59:15
| 2016-10-25T07:15:46
| null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
#!/usr/bin/python
__all__ = ['util', 'data_prep', 'focal_mecs', 'magnitudes', 'obspyck_util',
'process_parties', 'pyfehm_util', 'relocate', 'shelly_focmecs',
'shelly_mags', 'vis_3d_grid']
|
[
"chopp@lbl.gov"
] |
chopp@lbl.gov
|
4f46318710d0faa395f2befd39d9c72fd1a5173d
|
0961b605531fa73cb88640b5978572217bdb6554
|
/excelTitleToNumber.py
|
d9000b22ae72dcd4fcf299fb2d1d19c302278509
|
[] |
no_license
|
LYoung-Hub/Algorithm-Data-Structure
|
e01d8b72c4026d9d4b9788016ca54c4e359e80ba
|
e42ec45d98f990d446bbf4f1a568b70855af5380
|
refs/heads/master
| 2020-07-13T17:17:42.897244
| 2019-11-11T06:15:59
| 2019-11-11T06:15:59
| 205,121,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
length = len(s)
if length == 0:
return 0
ans = 0
for i in range(0, length - 1):
curr = ord(s[i]) - 64
ans = pow(26, length - 1 - i) * curr + ans
ans += ord(s[-1]) - 64
return ans
|
[
"yangliu2@caltech.edu"
] |
yangliu2@caltech.edu
|
54bf05de8cba8f49586ed5ab995b396493f49645
|
0178f788d9d8aa9bf1d72b63967e07b20b5981e9
|
/tests/python/kaolin/metrics/test_pointcloud.py
|
050ec4ba10f94cf81aec55d380d09889e697b090
|
[
"Apache-2.0"
] |
permissive
|
nexuslrf/kaolin
|
0ad5a7a92abcea8dc3dc50f8ef3c83d59ea7960d
|
85a78ef931b2b5231322566a25882ba049b01d26
|
refs/heads/master
| 2023-09-06T06:00:20.506722
| 2021-11-25T21:47:59
| 2021-11-25T21:47:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,849
|
py
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from kaolin.metrics import pointcloud as pc
from kaolin.utils.testing import FLOAT_DTYPES, with_seed
@pytest.mark.parametrize('dtype', FLOAT_DTYPES)
@pytest.mark.parametrize('device', ['cuda'])
class TestSidedDistance:
@pytest.fixture(autouse=True)
def get_tol(self, device, dtype):
if dtype == torch.half:
return 1e-3, 1e-3
elif dtype == torch.float:
return 1e-5, 1e-4
elif dtype == torch.double:
return 1e-6, 1e-5
@with_seed(torch_seed=0)
@pytest.fixture(autouse=True)
def input_double_p1(self, device, dtype):
return torch.randn((5, 20, 3), requires_grad=True, device='cuda', dtype=torch.double)
@with_seed(torch_seed=0)
@pytest.fixture(autouse=True)
def input_double_p2(self, device, dtype):
return torch.randn((5, 15, 3), requires_grad=True, device='cuda', dtype=torch.double)
@pytest.fixture(autouse=True)
def get_input(self, device, dtype):
p1 = torch.tensor([[[8.8977, 4.1709, 1.2839],
[8.5640, 7.7767, 9.4214]],
[[0.5431, 6.4495, 11.4914],
[3.2126, 8.0865, 3.1018]]], dtype=dtype, device=device)
p2 = torch.tensor([[[6.9340, 6.1152, 3.4435],
[0.1032, 9.8181, 11.3350]],
[[11.4006, 2.2154, 7.9589],
[4.2586, 1.4133, 7.2606]]], dtype=dtype, device=device)
return p1, p2
@with_seed(torch_seed=0)
@pytest.fixture(autouse=True)
def get_large_input(self, device, dtype):
N = 100
B = 3
M = 50
p1 = torch.randint(0, 100, (B, N, 3), dtype=dtype, device=device)
p2 = torch.randint(0, 100, (B, M, 3), dtype=dtype, device=device)
return p1, p2
@pytest.fixture(autouse=True)
def target_grad_double(self, input_double_p1, input_double_p2):
# if test_gradcheck passed the gradient using torch.double inputs is trustable
outputs = torch.sum(pc.sided_distance(input_double_p1, input_double_p2)[0])
outputs.backward()
return input_double_p1.grad.clone(), input_double_p2.grad.clone()
@pytest.fixture(autouse=True)
def target_grad_double_2(self, get_input):
# if test_gradcheck passed the gradient using torch.double inputs is trustable
p1, p2 = get_input
p1 = p1.detach()
p2 = p2.detach()
p1.requires_grad = True
p2.requires_grad = True
outputs = torch.sum(pc.sided_distance(p1, p2)[0])
outputs.backward()
return p1.grad.clone(), p2.grad.clone()
@pytest.fixture(autouse=True)
def target_grad_double_large(self, get_large_input):
# if test_gradcheck passed the gradient using torch.double inputs is trustable
p1, p2 = get_large_input
p1 = p1.detach()
p2 = p2.detach()
p1.requires_grad = True
p2.requires_grad = True
outputs = torch.sum(pc.sided_distance(p1, p2)[0])
outputs.backward()
return p1.grad.clone(), p2.grad.clone()
def test_sided_distance(self, device, dtype, get_input, get_tol):
p1, p2 = get_input
output_p1, output_idx_p1 = pc.sided_distance(p1, p2)
expected_p1 = torch.tensor([[12.3003, 41.1528], [57.0679, 62.9213]], device=device, dtype=dtype)
expected_idx_p1 = torch.tensor([[0, 0], [1, 1]], device=device, dtype=torch.long)
atol, rtol = get_tol
assert torch.allclose(output_p1, expected_p1, atol=atol, rtol=rtol)
assert torch.equal(output_idx_p1, expected_idx_p1)
def test_sided_distance_large_input(self, device, dtype, get_large_input, get_tol):
p1, p2 = get_large_input
output_p1, output_idx_p1 = pc.sided_distance(p1, p2)
expected_p1 = pc._sided_distance(p1, p2)
atol, rtol = get_tol
assert torch.allclose(output_p1, expected_p1, atol=atol, rtol=rtol)
@with_seed(torch_seed=0)
def test_directed_distance_batch_size(self, device, dtype):
with pytest.raises(RuntimeError,
match=r"Expected tensor of size \[3, 3, 3\], but got tensor "
r"of size \[2, 3, 3\] for argument #2 'p2' "
r"\(while checking arguments for sided_distance_forward_cuda\)"):
p1 = torch.randint(0, 10, (3, 2, 3), dtype=dtype, device=device)
p2 = torch.randint(0, 10, (2, 3, 3), dtype=dtype, device=device)
pc.sided_distance(p1, p2)
@with_seed(torch_seed=0)
def test_directed_distance_dims(self, device, dtype):
with pytest.raises(RuntimeError,
match="Expected 3-dimensional tensor, but got "
"4-dimensional tensor for argument #1 'p1' "
r"\(while checking arguments for sided_distance_forward_cuda\)"):
p1 = torch.randint(0, 10, (3, 2, 3, 4), dtype=dtype, device=device)
p2 = torch.randint(0, 10, (2, 3, 3), dtype=dtype, device=device)
pc.sided_distance(p1, p2)
with pytest.raises(RuntimeError,
match=r"Expected tensor of size \[2, 2, 3\], but got "
r"tensor of size \[2, 2, 2\] for argument #1 'p1' "
r"\(while checking arguments for sided_distance_forward_cuda\)"):
p1 = torch.randint(0, 10, (2, 2, 2), dtype=dtype, device=device)
p2 = torch.randint(0, 10, (2, 3, 3), dtype=dtype, device=device)
pc.sided_distance(p1, p2)
def test_grad_check(self, device, dtype, input_double_p1, input_double_p2):
if dtype != torch.double:
pytest.skip("Gradient check only works in double.")
input_points = (input_double_p1, input_double_p2)
grad_result = torch.autograd.gradcheck(pc.sided_distance, input_points, eps=1e-6, atol=1e-6)
assert grad_result
def test_grad_check_2(self, device, dtype, get_input):
# Test for gradient accumulation w.r.t p2
if dtype != torch.double:
pytest.skip("Gradient check only works in double.")
p1, p2 = get_input
p1.requires_grad = True
p2.requires_grad = True
grad_result = torch.autograd.gradcheck(pc.sided_distance, (p1, p2), eps=1e-6, atol=1e-6)
assert grad_result
def test_grad_check_large(self, device, dtype, get_large_input):
# Test for gradient accumulation w.r.t p2
if dtype != torch.double:
pytest.skip("Gradient check only works in double.")
p1, p2 = get_large_input
p1.requires_grad = True
p2.requires_grad = True
grad_result = torch.autograd.gradcheck(pc.sided_distance, (p1, p2), eps=1e-6, atol=1e-6)
assert grad_result
def test_grad_check_other_type(self, device, dtype, input_double_p1, input_double_p2, target_grad_double):
if dtype == torch.double:
pytest.skip("Gradient check for double already tested.")
p1 = input_double_p1.to(dtype).detach()
p2 = input_double_p2.to(dtype).detach()
p1.requires_grad = True
p2.requires_grad = True
output = pc.sided_distance(p1, p2)[0]
torch.sum(output).backward()
target_grad_p1, target_grad_p2 = target_grad_double
target_grad_p1 = target_grad_p1.to(dtype)
target_grad_p2 = target_grad_p2.to(dtype)
assert torch.allclose(p1.grad, target_grad_p1, rtol=1e-2, atol=1e-2)
assert torch.allclose(p2.grad, target_grad_p2, rtol=1e-2, atol=1e-2)
def test_grad_check_other_type_2(self, device, dtype, get_input, target_grad_double_2):
if dtype == torch.double:
pytest.skip("Gradient check for double already tested.")
p1, p2 = get_input
p1.requires_grad = True
p2.requires_grad = True
output = pc.sided_distance(p1, p2)[0]
torch.sum(output).backward()
target_grad_p1, target_grad_p2 = target_grad_double_2
target_grad_p1 = target_grad_p1.to(dtype)
target_grad_p2 = target_grad_p2.to(dtype)
assert torch.allclose(p1.grad, target_grad_p1, rtol=1e-2, atol=1e-2)
assert torch.allclose(p2.grad, target_grad_p2, rtol=1e-2, atol=1e-2)
def test_grad_check_other_type_large(self, device, dtype, get_large_input, target_grad_double_large):
if dtype == torch.double:
pytest.skip("Gradient check for double already tested.")
p1, p2 = get_large_input
p1.requires_grad = True
p2.requires_grad = True
output = pc.sided_distance(p1, p2)[0]
torch.sum(output).backward()
target_grad_p1, target_grad_p2 = target_grad_double_large
target_grad_p1 = target_grad_p1.to(dtype)
target_grad_p2 = target_grad_p2.to(dtype)
assert torch.allclose(p1.grad, target_grad_p1, rtol=1e-2, atol=1e-2)
assert torch.allclose(p2.grad, target_grad_p2, rtol=1e-2, atol=1e-2)
@pytest.mark.parametrize('dtype', FLOAT_DTYPES)
@pytest.mark.parametrize('device', ['cuda'])
class TestChamferDistance:
@pytest.fixture(autouse=True)
def get_tol(self, device, dtype):
if dtype == torch.half:
return 1e-3, 1e-3
elif dtype == torch.float:
return 1e-5, 1e-4
elif dtype == torch.double:
return 1e-6, 1e-5
@pytest.fixture(autouse=True)
def get_input(self, device, dtype):
p1 = torch.tensor([[[8.8977, 4.1709, 1.2839],
[8.5640, 7.7767, 9.4214]],
[[0.5431, 6.4495, 11.4914],
[3.2126, 8.0865, 3.1018]]], dtype=dtype, device=device)
p2 = torch.tensor([[[6.9340, 6.1152, 3.4435],
[0.1032, 9.8181, 11.3350]],
[[11.4006, 2.2154, 7.9589],
[4.2586, 1.4133, 7.2606]]], dtype=dtype, device=device)
return p1, p2
def test_chamfer_distance1(self, device, dtype, get_input, get_tol):
p1, p2 = get_input
output1 = pc.chamfer_distance(p1, p2)
output2 = pc.chamfer_distance(p2, p1)
expected1 = torch.tensor([72.5838, 151.0809], dtype=dtype, device=device)
atol, rtol = get_tol
assert torch.allclose(output1, expected1, atol=atol, rtol=rtol)
def test_chamfer_distance2(self, device, dtype, get_input, get_tol):
p1, p2 = get_input
output2 = pc.chamfer_distance(p1, p2, w1=1.3, w2=0.8)
expected2 = torch.tensor([71.4303, 150.8620], dtype=dtype, device=device)
atol, rtol = get_tol
assert torch.allclose(output2, expected2, atol=atol, rtol=rtol)
@pytest.mark.parametrize('dtype', FLOAT_DTYPES)
@pytest.mark.parametrize('device', ['cuda'])
class TestFScore:
@pytest.fixture(autouse=True)
def get_tol(self, device, dtype):
if dtype == torch.half:
return 1e-3, 1e-3
elif dtype == torch.float:
return 1e-5, 1e-4
elif dtype == torch.double:
return 1e-6, 1e-5
def test_FScore(self, device, dtype, get_tol):
gt_points = torch.tensor([[[8.8977, 4.1709, 1.2839],
[8.5640, 7.7767, 9.4214]],
[[0.5431, 6.4495, 11.4914],
[3.2126, 8.0865, 3.1018]]], dtype=dtype, device=device)
pred_points = torch.tensor([[[8.8914, 4.1788, 1.2176],
[8.5291, 7.5513, 9.5412]],
[[0.4010, 6.4602, 11.5183],
[3.2977, 8.0325, 3.1180]]], dtype=dtype, device=device)
output1 = pc.f_score(gt_points, pred_points, radius=0.2)
output2 = pc.f_score(gt_points, pred_points, radius=0.12)
expected1 = torch.tensor([1, 0.5], device=device, dtype=dtype)
expected2 = torch.tensor([0.5, 0.5], device=device, dtype=dtype)
atol, rtol = get_tol
assert torch.allclose(output1, expected1, atol=atol, rtol=rtol)
assert torch.allclose(output2, expected2, atol=atol, rtol=rtol)
|
[
"noreply@github.com"
] |
nexuslrf.noreply@github.com
|
fcb22e747567a162c6efe867fc957b9bc486b356
|
3529744f2a26e567162df37cbc62193670a61a8e
|
/rf/registration/admin.py
|
5d3f7861475c98d2da4bd959fc07e2d09e68bc79
|
[
"BSD-2-Clause"
] |
permissive
|
ooola/ruggerfest
|
7d46081e651c514ec4e65f3e6b1ae0cbb24e08ac
|
20e4b72678a960db4d660d893f82f6cf8c9c9116
|
refs/heads/master
| 2020-04-09T14:57:24.200365
| 2013-08-22T14:30:01
| 2013-08-22T14:30:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
from django.contrib import admin
from models import Team
class TeamAdmin(admin.ModelAdmin):
list_display = ('teamname', 'division', 'captain', 'email', 'phone_number', 'date', 'paid')
admin.site.register(Team, TeamAdmin)
|
[
"ola.nordstrom@gmail.com"
] |
ola.nordstrom@gmail.com
|
442e6625ffa0755f8423cb17b85506d4ad09d399
|
1f318aec4903a8a5a024c70b6f2fea1742af3f0c
|
/python3/VisionTherapyReader.py
|
a19a65af4a9d6c2dfe21c22f1fee3a350c5b2527
|
[] |
no_license
|
lightkraken/VisionTherapyReader
|
5a0aed474615030512cb51853ebeffc474acc04b
|
2a6284338b7668dd3a21d64de3920eebfd1dc401
|
refs/heads/master
| 2020-05-16T14:00:37.729650
| 2015-04-22T19:27:55
| 2017-11-13T20:34:31
| 34,411,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,203
|
py
|
from tkinter import *
import tkinter.filedialog
import tkinter.ttk
from files.colorreader import *
from files.barreader import *
from files.ptadjust import *
from files.rbadjust import *
from files.ptcalibrate import *
from files.rbcalibrate import *
from files.paster import *
from files import coder
class Root(Tk):
def __init__(self, *args, **kwargs):
Tk.__init__(self, *args, **kwargs)
self.title("Vision Therapy Reader")
self.start()
def start(self):
# give the config file some info
cnfg.SCREEN_WIDTH = self.winfo_screenwidth()
cnfg.SCREEN_HEIGHT = self.winfo_screenheight()
# 1x1 grid
# grid grows in all directions
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
# content frame
# placed at 0,0
# sticky on all sides
self.content = Content(self)
self.content.grid(column=0, row=0, sticky="NSEW")
# menu bar
self.option_add('*tearOff', FALSE)
self.menubar = MenuBar(self)
self.config(menu=self.menubar)
# key bindings
self.bind("<Up>", self.up_press)
self.bind("<Down>", self.down_press)
# close click binding
self.protocol("WM_DELETE_WINDOW", self.on_close)
def on_close(self):
cnfg.END_OPTIONS = cnfg.create_options_dict()
if cnfg.START_OPTIONS == cnfg.END_OPTIONS:
self.destroy()
else:
self.quit_dialogue = cnfg.QuitDialogue(self)
def up_press(self, *args):
if cnfg.CURRENT_TAB == 0:
self.content.barreaderframe.barstextframe.canvas.yview_scroll(-1,UNITS)
elif cnfg.CURRENT_TAB == 1:
self.content.colorreaderframe.colorstextframe.coloredtext.yview_scroll(-1,UNITS)
def down_press(self, *args):
if cnfg.CURRENT_TAB == 0:
self.content.barreaderframe.barstextframe.canvas.yview_scroll(1,UNITS)
elif cnfg.CURRENT_TAB == 1:
self.content.colorreaderframe.colorstextframe.coloredtext.yview_scroll(1,UNITS)
def restart(self):
self.content.destroy()
self.start()
class MenuBar(Menu):
def __init__(self, *args, **kwargs):
Menu.__init__(self, *args, **kwargs)
self.parent = args[0]
# file
self.file_menu = Menu(self)
self.file_menu.add_command(label="Open file...", command=self.on_open)
self.file_menu.add_command(label="Paste text...", command=self.on_paste)
self.file_menu.add_command(label="Close file", command=self.on_close)
self.file_menu.add_command(label="Exit", command=self.parent.on_close)
self.add_cascade(label="File", menu=self.file_menu)
# color
self.color_menu = Menu(self)
self.color_menu.add_command(label="Calibrate colors...", command=self.calib_menu_start)
self.advanced_color_submenu = Menu(self)
self.advanced_color_submenu.add_command(label="Manually adjust colors...", command=self.adjust_menu_start)
self.color_menu.add_cascade(menu=self.advanced_color_submenu, label="Advanced...")
self.add_cascade(menu=self.color_menu, label="Colors")
# view
self.view_menu = Menu(self)
self.show_options_var = IntVar(value=cnfg.SHOW_OPTIONS)
self.view_menu.add_checkbutton(label="Show Options Bar", onvalue=1, offvalue=0, variable=self.show_options_var, command=self.show_options_click)
self.add_cascade(label="View", menu=self.view_menu)
# password
self.password_menu = Menu(self)
self.password_menu.add_command(label="Enter Config Code...", command=self.enter_password)
self.password_menu.add_command(label="Create Config Code", command=self.create_password)
self.add_cascade(menu=self.password_menu, label="Config Code")
def on_paste(self):
self.pastey = Paster(self)
def adjust_menu_start(self):
self.adjust = AdjustMenu(self)
def calib_menu_start(self):
self.calib = CalibrateMenu(self)
def enter_password(self):
self.password_decoder = coder.AskPassword(self)
def create_password(self):
self.password_creator = coder.RetrievePassWord(self)
def show_options_click(self):
cnfg.SHOW_OPTIONS = self.show_options_var.get()
self.parent.content.barreaderframe.barsbottomoptionsframe.show_options()
self.parent.content.colorreaderframe.colorsbottomoptionsframe.show_options()
def calib_purpleteal(self):
self.calibpt_menu = PTCalibrate(self)
def calib_redblue(self):
self.calibrb_menu = RBCalibrate(self)
def adjust_purpleteal(self):
self.adjustpt_menu = PTAdjustMenu(self)
def adjust_redblue(self):
self.adjustrb_menu = RBAdjustMenu(self)
def on_open(self):
dialog = tkinter.filedialog.Open(self, filetypes = [('Text files', '*.txt')])
openfile = dialog.show()
if openfile != "":
cnfg.load_file(openfile)
self.refresh_all()
def on_close(self):
cnfg.TEXT = []
cnfg.TEXT.append("")
cnfg.TEXT_INDEX = 0
self.refresh_all()
def on_quit(self):
self.parent.destroy()
def refresh_all(self):
self.parent.content.barreaderframe.barstextframe.refresh_text()
self.parent.content.barreaderframe.barstextframe.create_bars()
self.parent.content.barreaderframe.barstextframe.refresh_text()
self.parent.content.barreaderframe.barstextframe.update_scroll_region()
self.parent.content.colorreaderframe.colorstextframe.create_new_text()
self.parent.content.colorreaderframe.colorstextframe.refresh_text()
self.parent.content.update_page_buttons()
class Content(tkinter.ttk.Notebook):
def __init__(self, *args, **kwargs):
tkinter.ttk.Notebook.__init__(self, *args, takefocus=False)
self.parent = args[0]
# common variables
self.palette_var = StringVar(value=cnfg.PALETTE_CHOICE)
self.font_chooser_var = StringVar(value=cnfg.FONT)
self.font_size_var = IntVar(value=cnfg.FONT_SIZE)
self.font_bold_var = IntVar()
if cnfg.FONT_BOLD == "":
self.font_bold_var.set(0)
elif cnfg.FONT_BOLD == "bold":
self.font_bold_var.set(1)
self.barreaderframe = BarReaderFrame(self)
self.colorreaderframe = ColorReaderFrame(self)
self.add(self.barreaderframe, text="Bar reader")
self.add(self.colorreaderframe, text="Color reader")
self.bind("<<NotebookTabChanged>>", self.set_current_tab_var)
self.select(cnfg.CURRENT_TAB)
self.update_page_buttons()
self.palette_click()
def set_current_tab_var(self, *args):
cnfg.CURRENT_TAB = self.index(self.select())
def palette_click(self, *args):
if self.palette_var.get() == " ":
self.palette_var.set(cnfg.PALETTE_CHOICE)
cnfg.PALETTE_CHOICE = self.palette_var.get()
cnfg.update_palette()
self.parent.focus()
# bar reader
self.barreaderframe.barstextframe.canvas.config(background=cnfg.PALETTE[3])
self.barreaderframe.barstextframe.font_update()
# color reader
self.colorreaderframe.colorstextframe.create_new_tags()
self.colorreaderframe.colorstextframe.refresh_text()
def font_chooser_click(self, *args):
if self.font_chooser_var.get() == " ":
self.font_chooser_var.set(cnfg.FONT)
cnfg.FONT = self.font_chooser_var.get()
self.parent.focus()
# bar reader
self.barreaderframe.barstextframe.font_update()
# color reader
self.colorreaderframe.colorstextframe.refresh_text()
def font_size_cursor_click(self, *args):
try:
self.font_size_previous = self.font_size_var.get()
except:
pass
def font_size_click(self, *args, **kwargs):
try:
if self.font_size_var.get() < 8:
self.font_size_var.set(8)
elif self.font_size_var.get() > 80:
self.font_size_var.set(80)
except:
self.font_size_var.set(self.font_size_previous)
cnfg.FONT_SIZE = self.font_size_var.get()
self.parent.focus()
self.barreaderframe.barstextframe.font_update()
self.colorreaderframe.colorstextframe.refresh_text()
def font_bold_click(self):
if self.font_bold_var.get() == 0:
cnfg.FONT_BOLD = ""
if self.font_bold_var.get() == 1:
cnfg.FONT_BOLD = "bold"
# bar reader
self.barreaderframe.barstextframe.font_update()
# color reader
self.colorreaderframe.colorstextframe.refresh_text()
def on_prev_press(self):
cnfg.TEXT_INDEX -= 1
# bar reader
self.barreaderframe.barstextframe.font_update()
# color reader
self.colorreaderframe.colorstextframe.delete_tags()
self.colorreaderframe.colorstextframe.create_new_tags()
self.colorreaderframe.colorstextframe.create_new_text()
self.colorreaderframe.colorstextframe.refresh_text()
self.update_page_buttons()
def on_next_press(self):
cnfg.TEXT_INDEX += 1
# bar reader
self.barreaderframe.barstextframe.font_update()
# color reader
self.colorreaderframe.colorstextframe.delete_tags()
self.colorreaderframe.colorstextframe.create_new_tags()
self.colorreaderframe.colorstextframe.create_new_text()
self.colorreaderframe.colorstextframe.refresh_text()
self.update_page_buttons()
def update_page_buttons(self):
if len(cnfg.TEXT) == 1: #if the text is only one page long
self.barreaderframe.barsbottomoptionsframe.prevbutton.configure(state=DISABLED)
self.barreaderframe.barsbottomoptionsframe.nextbutton.configure(state=DISABLED)
self.colorreaderframe.colorsbottomoptionsframe.prevbutton.configure(state=DISABLED)
self.colorreaderframe.colorsbottomoptionsframe.nextbutton.configure(state=DISABLED)
elif len(cnfg.TEXT) > 1: #if the text is more than one page...
if cnfg.TEXT_INDEX == 0: #and at the beginning
self.barreaderframe.barsbottomoptionsframe.prevbutton.configure(state=DISABLED)
self.barreaderframe.barsbottomoptionsframe.nextbutton.configure(state=NORMAL)
self.colorreaderframe.colorsbottomoptionsframe.prevbutton.configure(state=DISABLED)
self.colorreaderframe.colorsbottomoptionsframe.nextbutton.configure(state=NORMAL)
elif cnfg.TEXT_INDEX == len(cnfg.TEXT)-1: #and at the end
self.barreaderframe.barsbottomoptionsframe.prevbutton.configure(state=NORMAL)
self.barreaderframe.barsbottomoptionsframe.nextbutton.configure(state=DISABLED)
self.colorreaderframe.colorsbottomoptionsframe.prevbutton.configure(state=NORMAL)
self.colorreaderframe.colorsbottomoptionsframe.nextbutton.configure(state=DISABLED)
else:
self.barreaderframe.barsbottomoptionsframe.prevbutton.configure(state=NORMAL) #and in the middle
self.barreaderframe.barsbottomoptionsframe.nextbutton.configure(state=NORMAL)
self.colorreaderframe.colorsbottomoptionsframe.prevbutton.configure(state=NORMAL)
self.colorreaderframe.colorsbottomoptionsframe.nextbutton.configure(state=NORMAL)
try:
self.barreaderframe.barstextframe.canvas.yview_moveto(0.0)
except:
pass
self.colorreaderframe.colorsbottomoptionsframe.current_page.set(value="{0} / {1}".format(cnfg.TEXT_INDEX+1, len(cnfg.TEXT)))
self.barreaderframe.barsbottomoptionsframe.current_page.set(value="{0} / {1}".format(cnfg.TEXT_INDEX+1, len(cnfg.TEXT)))
root = Root()
root.mainloop()
|
[
"joshua.behan@gmail.com"
] |
joshua.behan@gmail.com
|
62ea2cf617b2827d170b1172c33ef87418de9112
|
6d967da5fd95aa5e66ddbb211da40041006ca5ec
|
/myvenv/Lib/site-packages/pip/_vendor/idna/package_data.py
|
5c7a356106d74de03558d9b069f42a36e8724c2a
|
[] |
no_license
|
gevorkyannaira/my-first-blog
|
96e4458045a1dd0aa9c1f3ec69f4c829428200e0
|
42ab12a8c2b0e402b5fa1b8e5a7cdd2629d06c16
|
refs/heads/master
| 2022-09-03T21:14:18.946448
| 2020-05-18T18:15:39
| 2020-05-18T18:15:39
| 264,909,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
<<<<<<< HEAD
__version__ = '2.8'
=======
__version__ = '2.9'
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
|
[
"gevorkyannaira5@gmail.com"
] |
gevorkyannaira5@gmail.com
|
4a8fc58e050c984be3b4182514e57e8c3d48b934
|
55169883340a5fd9c405156238035afac169cff6
|
/boston_example.py
|
019fa6284578eb1b971dcbf279bf8d73fbcbb840
|
[] |
no_license
|
attakhan/DataScience-and-AI
|
1d16dbb4634c64c162847821d0ebce68bba091d8
|
2037e6bc2412e99831a3c63c0f9e4c4169df755e
|
refs/heads/master
| 2021-09-20T08:42:48.655821
| 2018-08-07T07:41:32
| 2018-08-07T07:41:32
| 111,201,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,699
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 10 13:08:55 2018
@author: muhammad.atta
"""
from keras.datasets import boston_housing
from keras import models
from keras import layers
import numpy as np
(train_data,train_targets) , (test_data , test_targets) = boston_housing.load_data()
print(train_data.shape)
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis = 0)
train_data /= std
test_data -= mean
test_data /= std
print(train_data)
def model_method() :
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64,activation='relu'))
model.add(layers.Dense(64,activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
partial_train_data = np.concatenate([train_data[:i * num_val_samples],train_data[(i + 1) * num_val_samples:]],axis=0)
partial_train_targets = np.concatenate([train_targets[:i * num_val_samples], train_targets[(i + 1) * num_val_samples:]],axis=0)
model = model_method()
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
print(np.mean(all_scores))
|
[
"noreply@github.com"
] |
attakhan.noreply@github.com
|
f5e0381785565c4cf6fa682343001d5bd8f1aeff
|
40ed785aabe76003d6416bcdef858ab25a8b6cd8
|
/back_service/apps.py
|
b095178eafc2c740b69bc7e4e02744c91403bbbb
|
[] |
no_license
|
Takaklas/mobility-offloading-server-side
|
ece5164c51960b1f963763e5fd007d86af7a49ac
|
4acd709a1a702d783769d273f633f6fda20c1282
|
refs/heads/master
| 2020-04-06T09:41:08.428712
| 2019-03-29T20:57:23
| 2019-03-29T20:57:23
| 157,352,448
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
from django.apps import AppConfig
class BackServiceConfig(AppConfig):
name = 'back_service'
|
[
"takgate7@gmail.com"
] |
takgate7@gmail.com
|
bfe903ff472179fdfe59c5d045c065182ac0ec97
|
517c863520bbe7652586b3095c0b7359755453ad
|
/main.py
|
09164c211d31e1342476bb8b7859e4db3660f2da
|
[] |
no_license
|
Wallar1/card_chemistry
|
b5309950f6fb266c72d2149d5712eabeb2f7ad11
|
e42509996722ccaa0569b2a7702f0ee08af5285b
|
refs/heads/master
| 2023-03-03T17:09:12.615042
| 2021-02-14T10:23:53
| 2021-02-14T10:23:53
| 326,491,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,337
|
py
|
from flask import Flask, jsonify, send_file, make_response, send_from_directory
from player import Player
from scientist import ALL_SCIENTISTS
app = Flask(__name__)
@app.route('/')
def home_page():
return send_file('./static/scientist_scroller.html')
@app.route('/service_worker.js')
def service_worker():
"""
Normally this would just be served directly from the static directly where it lives. But we need to add this
'Service-Worker-Allowed' header so that the worker can have access to all requests. That way we can cache all pages
"""
response = make_response(send_from_directory('static',filename='service_worker.js'))
#change the content header file
response.headers['Content-Type'] = 'application/javascript'
response.headers['Service-Worker-Allowed'] = '/'
return response
@app.route('/scientists')
def scientists():
scientists = sorted(ALL_SCIENTISTS, key=lambda s: s.year)
scientists_json = [scientist.toJSON() for scientist in scientists]
return jsonify(scientists_json)
@app.route('/player')
def player():
return jsonify(Player().toJSON())
# def play_game():
# os.system('clear')
# player = Player()
# # TODO: make this some kind of loop
# won = player.start_battle(heat={}, lifelines={})
# if __name__ == '__main__':
# play_game()
|
[
"Robertwallace.naples@gmail.com"
] |
Robertwallace.naples@gmail.com
|
f6fc270193bc949f831597b37c31160cd44d1ebf
|
983dd371747f70473e0445aa810d98f555dc626e
|
/lispypy/__init__.py
|
0edbd20c3b77041467d0673f5d66ad19b041757f
|
[
"BSD-2-Clause"
] |
permissive
|
cg123/lispypy
|
6a2d9f54b5f3b0886b5591d1ff8f9e2fdf6304d8
|
04df5ea55c646342c8d5537787f9c0ce90aaf31b
|
refs/heads/master
| 2020-03-27T22:29:06.479280
| 2013-08-20T00:35:28
| 2013-08-20T00:35:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
# Copyright (c) 2013, Charles O. Goddard
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from . import common
from . import tokenizer
from . import parser
from . import lispobj
from . import interpreter
|
[
"Charles.Goddard@students.olin.edu"
] |
Charles.Goddard@students.olin.edu
|
d2c4033b46d5dd98b49f6fb13115f128c2f1873a
|
a3e9f51d0d7c086852421590c3fb8d2412bd013c
|
/test_models.py
|
83b521ccf839dcbf43fb2d65ec8e6d096f9da63a
|
[] |
no_license
|
Sprivideo4/Samsung-Prism-Project
|
db6b32b8f2cec58a756dea5b21b456d7e3329071
|
7e9b69f94ed17a151101c52f05f85efc7d95ce40
|
refs/heads/main
| 2022-12-18T21:21:49.767313
| 2020-10-06T02:45:36
| 2020-10-06T02:45:36
| 301,395,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,650
|
py
|
# Code for "TSM: Temporal Shift Module for Efficient Video Understanding"
# arXiv:1811.08383
# Ji Lin*, Chuang Gan, Song Han
# {jilin, songhan}@mit.edu, ganchuang@csail.mit.edu
# Notice that this file has been modified to support ensemble testing
import argparse
import time
import torch.nn.parallel
import torch.optim
from sklearn.metrics import confusion_matrix
from ops.dataset import TSNDataSet
from ops.models import TSN
from ops.transforms import *
from ops import dataset_config_test
from torch.nn import functional as F
# options
parser = argparse.ArgumentParser(description="TSM testing on the full validation set")
parser.add_argument('dataset', type=str)
# may contain splits
parser.add_argument('--weights', type=str, default=None)
parser.add_argument('--test_segments', type=str, default=25)
parser.add_argument('--dense_sample', default=False, action="store_true", help='use dense sample as I3D')
parser.add_argument('--twice_sample', default=False, action="store_true", help='use twice sample for ensemble')
parser.add_argument('--full_res', default=False, action="store_true",
help='use full resolution 256x256 for test as in Non-local I3D')
parser.add_argument('--test_crops', type=int, default=1)
parser.add_argument('--coeff', type=str, default=None)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
# for true test
parser.add_argument('--test_list', type=str, default='/content/drive/My Drive/samsung/labels/test_videofolder.txt')
parser.add_argument('--csv_file', type=str, default='/content/drive/My Drive/samsung/labels/CVAE_test_results.csv')
parser.add_argument('--softmax', default=False, action="store_true", help='use softmax')
parser.add_argument('--max_num', type=int, default=-1)
parser.add_argument('--input_size', type=int, default=224)
parser.add_argument('--crop_fusion_type', type=str, default='avg')
parser.add_argument('--gpus', nargs='+', type=int, default=None)
parser.add_argument('--img_feature_dim',type=int, default=256)
parser.add_argument('--num_set_segments',type=int, default=1,help='TODO: select multiply set of n-frames from a video')
parser.add_argument('--pretrain', type=str, default='imagenet')
args = parser.parse_args()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def parse_shift_option_from_log_name(log_name):
if 'shift' in log_name:
strings = log_name.split('_')
for i, s in enumerate(strings):
if 'shift' in s:
break
return True, int(strings[i].replace('shift', '')), strings[i + 1]
else:
return False, None, None
weights_list = args.weights.split(',')
test_segments_list = [int(s) for s in args.test_segments.split(',')]
assert len(weights_list) == len(test_segments_list)
if args.coeff is None:
coeff_list = [1] * len(weights_list)
else:
coeff_list = [float(c) for c in args.coeff.split(',')]
if args.test_list is not None:
test_file_list = args.test_list.split('\n')
else:
test_file_list = [None] * len(weights_list)
data_iter_list = []
net_list = []
modality_list = []
total_num = None
for this_weights, this_test_segments, test_file in zip(weights_list, test_segments_list, test_file_list):
is_shift, shift_div, shift_place = parse_shift_option_from_log_name(this_weights)
if 'RGB' in this_weights:
modality = 'RGB'
else:
modality = 'Flow'
this_arch = this_weights.split('TSM_')[1].split('_')[2]
modality_list.append(modality)
num_class, test_file, root_path, prefix = dataset_config_test.return_dataset(args.dataset,
modality)
print('=> shift: {}, shift_div: {}, shift_place: {}'.format(is_shift, shift_div, shift_place))
net = TSN(num_class, this_test_segments if is_shift else 1, modality,
base_model=this_arch,
consensus_type=args.crop_fusion_type,
img_feature_dim=args.img_feature_dim,
pretrain=args.pretrain,
is_shift=is_shift, shift_div=shift_div, shift_place=shift_place,
non_local='_nl' in this_weights,
)
if 'tpool' in this_weights:
from ops.temporal_shift import make_temporal_pool
make_temporal_pool(net.base_model, this_test_segments) # since DataParallel
checkpoint = torch.load(this_weights)
checkpoint = checkpoint['state_dict']
# base_dict = {('base_model.' + k).replace('base_model.fc', 'new_fc'): v for k, v in list(checkpoint.items())}
base_dict = {'.'.join(k.split('.')[1:]): v for k, v in list(checkpoint.items())}
replace_dict = {'base_model.classifier.weight': 'new_fc.weight',
'base_model.classifier.bias': 'new_fc.bias',
}
for k, v in replace_dict.items():
if k in base_dict:
base_dict[v] = base_dict.pop(k)
net.load_state_dict(base_dict)
input_size = net.scale_size if args.full_res else net.input_size
if args.test_crops == 1:
cropping = torchvision.transforms.Compose([
GroupScale(net.scale_size),
GroupCenterCrop(input_size),
])
elif args.test_crops == 3: # do not flip, so only 5 crops
cropping = torchvision.transforms.Compose([
GroupFullResSample(input_size, net.scale_size, flip=False)
])
elif args.test_crops == 5: # do not flip, so only 5 crops
cropping = torchvision.transforms.Compose([
GroupOverSample(input_size, net.scale_size, flip=False)
])
elif args.test_crops == 10:
cropping = torchvision.transforms.Compose([
GroupOverSample(input_size, net.scale_size)
])
else:
raise ValueError("Only 1, 5, 10 crops are supported while we got {}".format(args.test_crops))
data_loader = torch.utils.data.DataLoader(
TSNDataSet(root_path, test_file, num_segments=this_test_segments,
new_length=1 if modality == "RGB" else 5,
modality=modality,
image_tmpl=prefix,
test_mode=True,
remove_missing=len(weights_list) == 1,
transform=torchvision.transforms.Compose([
cropping,
Stack(roll=(this_arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(div=(this_arch not in ['BNInception', 'InceptionV3'])),
GroupNormalize(net.input_mean, net.input_std),
]), dense_sample=args.dense_sample, twice_sample=args.twice_sample),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
)
if args.gpus is not None:
devices = [args.gpus[i] for i in range(args.workers)]
else:
devices = list(range(args.workers))
net = torch.nn.DataParallel(net.cuda())
net.eval()
data_gen = enumerate(data_loader)
if total_num is None:
total_num = len(data_loader.dataset)
else:
assert total_num == len(data_loader.dataset)
data_iter_list.append(data_gen)
net_list.append(net)
output = []
def eval_video(video_data, net, this_test_segments, modality):
net.eval()
with torch.no_grad():
i, data, label = video_data
batch_size = label.numel()
num_crop = args.test_crops
if args.dense_sample:
num_crop *= 10 # 10 clips for testing when using dense sample
if args.twice_sample:
num_crop *= 2
if modality == 'RGB':
length = 3
elif modality == 'Flow':
length = 10
elif modality == 'RGBDiff':
length = 18
else:
raise ValueError("Unknown modality "+ modality)
data_in = data.view(-1, length, data.size(2), data.size(3))
if is_shift:
data_in = data_in.view(batch_size * num_crop, this_test_segments, length, data_in.size(2), data_in.size(3))
rst = net(data_in)
rst = rst.reshape(batch_size, num_crop, -1).mean(1)
if args.softmax:
# take the softmax to normalize the output to probability
rst = F.softmax(rst, dim=1)
rst = rst.data.cpu().numpy().copy()
if net.module.is_shift:
rst = rst.reshape(batch_size, num_class)
else:
rst = rst.reshape((batch_size, -1, num_class)).mean(axis=1).reshape((batch_size, num_class))
return i, rst, label
proc_start_time = time.time()
max_num = args.max_num if args.max_num > 0 else total_num
top1 = AverageMeter()
top5 = AverageMeter()
for i, data_label_pairs in enumerate(zip(*data_iter_list)):
with torch.no_grad():
if i >= max_num:
break
this_rst_list = []
this_label = None
for n_seg, (_, (data, label)), net, modality in zip(test_segments_list, data_label_pairs, net_list, modality_list):
rst = eval_video((i, data, label), net, n_seg, modality)
this_rst_list.append(rst[1])
this_label = label
assert len(this_rst_list) == len(coeff_list)
for i_coeff in range(len(this_rst_list)):
this_rst_list[i_coeff] *= coeff_list[i_coeff]
ensembled_predict = sum(this_rst_list) / len(this_rst_list)
for p, g in zip(ensembled_predict, this_label.cpu().numpy()):
output.append([p[None, ...], g])
cnt_time = time.time() - proc_start_time
prec1, prec5 = accuracy(torch.from_numpy(ensembled_predict), this_label, topk=(1, 5))
top1.update(prec1.item(), this_label.numel())
top5.update(prec5.item(), this_label.numel())
if i % 20 == 0:
print('video {} done, total {}/{}, average {:.3f} sec/video, '
'moving Prec@1 {:.3f} Prec@5 {:.3f}'.format(i * args.batch_size, i * args.batch_size, total_num,
float(cnt_time) / (i+1) / args.batch_size, top1.avg, top5.avg))
video_pred = [np.argmax(x[0]) for x in output]
video_pred_top5 = [np.argsort(np.mean(x[0], axis=0).reshape(-1))[::-1][:5] for x in output]
video_labels = [x[1] for x in output]
if args.csv_file is not None:
print('=> Writing result to csv file: {}'.format(args.csv_file))
with open(test_file_list[0].replace('/content/drive/My Drive/samsung/labels/test_videofolder.txt', '/content/drive/My Drive/samsung/labels/category.txt')) as f:
categories = f.readlines()
categories = [f.strip() for f in categories]
with open(test_file_list[0]) as f:
vid_names = f.readlines()
vid_names = [n.split(' ')[0] for n in vid_names]
assert len(vid_names) == len(video_pred)
if args.dataset != 'somethingv2': # only output top1
with open(args.csv_file, 'w') as f:
for n, pred in zip(vid_names, video_pred):
f.write('{};{}\n'.format(n, categories[pred]))
else:
with open(args.csv_file, 'w') as f:
for n, pred5 in zip(vid_names, video_pred_top5):
fill = [n]
for p in list(pred5):
fill.append(p)
f.write('{};{};{};{};{};{}\n'.format(*fill))
cf = confusion_matrix(video_labels, video_pred).astype(float)
np.save('cm.npy', cf)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
print(cls_acc)
upper = np.mean(np.max(cf, axis=1) / cls_cnt)
print('upper bound: {}'.format(upper))
print('-----Evaluation is finished------')
print('Class Accuracy {:.02f}%'.format(np.mean(cls_acc) * 100))
print('Overall Prec@1 {:.02f}% Prec@5 {:.02f}%'.format(top1.avg, top5.avg))
|
[
"somandra.bhl2015@gmail.com"
] |
somandra.bhl2015@gmail.com
|
46680f13ec78eb01cf69e406dab8be4e25fa0251
|
9538e8a5719ea0d9e4759b357f368b80f0a9c2a4
|
/basic-knowledge-penetration/py_penetration_1_网络基础/client_udp.py
|
66ac22fc686b4cd2934528cb4004f558dc6502d4
|
[] |
no_license
|
BLACKGOATGG/python
|
c7019e0c6b476a9ff6cb46f50237ee7b6a9da16e
|
230eb48153ee6c691e5ffdaec5c7ca1fa6d4254b
|
refs/heads/master
| 2021-06-30T02:47:07.771983
| 2019-04-18T09:18:27
| 2019-04-18T09:18:27
| 144,562,485
| 1
| 0
| null | 2019-02-02T09:58:34
| 2018-08-13T10:09:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
"""
python编写的udp客户端和tcp客户端差别不大,仅需要做两处简单的修改,将数据包以udp格式发出
UDP:用户数据报协议,是一个面向无连接的协议。
采用该协议不需要两个应用程序先建立连接。
UDP协议不提供差错恢复,不能提供数据重传,因此该协议传输数据安全性差。
UDP应用于及时通信,而TCP协议用来传送文件、命令等操作,因为这些数据不允许丢失,否则会造成文件错误或命令混乱。
下面代码就是模拟客户端通过命令行操作服务器。
客户端输入命令,服务器执行并且返回结果。
"""
import socket
from lib import *
host_ip = helper.get_host_ip()
target_host = '127.0.0.1' #标准的ipv4地址或主机名
target_port = 80
# 建立一个socket对象
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 发送一些数据
contents = 'GET / HTTP/1.1\r\nHost:'+ target_host +'\r\n\r\n'
client.sendto(contents.encode(),(target_host,target_port))
# 接受一些数据
data, addr = client.recvfrom(4096)
print(data, addr)
"""
在创建嵌套字对象时,将嵌套字的类型改为SOCK_DGRAM
之后调用sendto将数据传到你想发送的服务器上
因为udp是一个无连接状态的传输协议,所以不需要在此之前调用connect()函数,
最后一步是调用recvfrom()接受返回的udp数据包,
你将接受回传的数据以及远程主机的信息和端口号
"""
"""
注意:
1. sendto() 向服务器端发送编码后的数据(二进制字节),编码方法encode(),解码decode()
:::py3需要如此,py2书上列子没有
python3只能收发二进制数据,需要显式转码
"""
|
[
"hzgjl@tairanchina.com"
] |
hzgjl@tairanchina.com
|
a068aa8f19d1906f4996e1b50e34fdd2d1c23dd3
|
e3875fe665b5090a705d5c14caccb80cae46eaa5
|
/bench/bench.py
|
dbc6a823edec8932d8ccf4b0d1f54184811c8189
|
[
"BSD-3-Clause"
] |
permissive
|
pankajp/pysph
|
0664b4efb069def53f6fa3d1103d300977b14e6a
|
5bb1fc46a9c84aefd42758356a9986689db05454
|
refs/heads/master
| 2021-01-23T13:55:43.269572
| 2011-05-11T09:06:15
| 2011-05-11T09:06:15
| 1,721,136
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,230
|
py
|
#! /usr/bin/env python
"""module to run timings test code
Modules to time-test can be specified in two ways
* bench modules can be automatically collected from directory where this
file is present if they are cython modules with sources having extension
`.pyx`. The modules are compiled if they are not already
* modules (python/cython module name w/o extension) can be passed as
commandline arguments to time-test the specified modules
The bench modules are special modules having a callable `bench` defined
which returns a list of a dict having string (name of bench) keys and
float (time taken) values. The list is only as a way group different tests.
The modules may implement the bench function in whichever way they deem fit.
To run bench modules which need mpi to execute multiple processes,
name the bench module as "mpi<num_procs>_<bench_name>.pyx",
replacing <num_procs> with the number of processes in which to run the bench
and <bench_name> with the name of you would use for the file.
An easy way to run in different number of processes is to create symlinks with
different names.
The result of a parallel bench is that returned by the bench function
of the root process.
The results of all the bench tests are displayed in a tabular format
Any output from the test modules id redirected to file `bench.log`
Output from mpi runs is redirected to `mpirunner.log.<rank>'
"""
import os
import sys
import traceback
import subprocess
import pickle
# local relative import
import setup
def list_pyx_extensions(path):
"""list the files in the path having .pyx extension w/o the extension"""
ret = [f[:-4] for f in os.listdir(path) if f[-3:]=='pyx' and f[0]!='_']
ret.sort()
return ret
def mpirun(bench_name, num_procs):
ret = subprocess.check_output(['mpiexec', '-n', str(num_procs), sys.executable,
'mpirunner.py', 'p', bench_name])
return pickle.loads(ret)
def run(extns=None, dirname=None, num_runs=1):
"""run the benchmarks in the modules given
`extns` is names of python modules to benchmark (None => all cython
extensions in dirname)
`dirname` is the directory where the modules are found (None implies
current directory
`num_runs` is the number of times to run the tests, the minimum value
is reported over all the runs
"""
if dirname is None:
dirname = os.path.abspath(os.curdir)
olddir = os.path.abspath(os.curdir)
os.chdir(dirname)
if extns is None:
extns = list_pyx_extensions(os.curdir)
print 'Running benchmarks:', ', '.join(extns)
# this is needed otherwise setup will take arguments and do something else
sys.argvold = sys.argv[:]
sys.argv = sys.argv[:1]
# compile the bench .pyx files
setup.compile_extns(extns, dirname)#, [os.path.join(dirname,'..','..')])
logfile = open('bench.log', 'w')
outtext = ''
for bench_name in extns:
stdout_orig = sys.stdout
stderr_orig = sys.stderr
sys.stdout = sys.stderr = logfile
mpi = False
if bench_name.startswith('mpi'):
mpi = True
num_procs = int(bench_name.lstrip('mpi').split('_')[0])
try:
# bench to be run in mpi
if mpi:
res = mpirun(bench_name, num_procs)
# normal single process bench
else:
bench_mod = __import__(bench_name)
res = bench_mod.bench()
except:
stderr_orig.write('Failure running bench %s\n' %(bench_name))
traceback.print_exc(file=stderr_orig)
continue
# take minimum over `num_runs` runs
for i in range(num_runs-1):
# bench to be run in mpi
if mpi:
r = mpirun(bench_name, num_procs)
# normal single process bench
else:
r = bench_mod.bench()
for jn,j in enumerate(res):
for k,v in j.items():
j[k] = min(v, r[jn].get(k, 1e1000))
sys.stdout = stdout_orig
sys.stderr = stderr_orig
if mpi:
s = bench_name.split('_',1)[1]+' %d\n'%num_procs
s += '#'*len(s)
print s
outtext += s + '\n'
else:
s = bench_name + '\n' + '#'*len(bench_name)
print s
outtext += s + '\n'
for func in res:
for k in sorted(func.keys()):
s = k.ljust(40) + '\t%g'%func[k]
print s
outtext += s + '\n'
print
outtext += '\n'
logfile.write(outtext)
logfile.close()
sys.argv = sys.argvold
os.chdir(olddir)
if __name__ == '__main__':
print sys.argv
if '-h' in sys.argv or '--help' in sys.argv:
print '''usage:
python setup.py [extension1, [extension2, [...]]]
runs the bench extensions present in the current directory
'''
elif len(sys.argv) > 1:
# run specified extensions
run(sys.argv[1:])
else:
# run all extensions found in current directory
run()
|
[
"pankaj86@gmail.com"
] |
pankaj86@gmail.com
|
e5d73f03edf687e56a5bb4d04835d977c7935337
|
a7d41aa056165fc33b0c1d8edd50b8557f642548
|
/Python/List-2/no14.py
|
b1c7bc74951a9915a65187483d298fff9381c562
|
[] |
no_license
|
jemtca/CodingBat
|
3243ec9c5309f8581e1a54fba0b076069cec7d74
|
8545a70348dd621070c8b3efa280ca79a24f9d5a
|
refs/heads/master
| 2023-04-05T03:20:17.416495
| 2023-03-31T06:35:08
| 2023-03-31T06:35:08
| 147,287,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
# given an array of ints, return true if it contains no 1's or it contains no 4's
def no14(nums):
b = True
one = False
four = False
for x in range(len(nums)):
if nums[x] == 1:
one = True
for x in range(len(nums)):
if nums[x] == 4:
four = True
if one and four:
b = False
return b
print(no14([1, 2, 3]))
print(no14([1, 2, 3, 4]))
print(no14([2, 3, 4]))
|
[
"30645648+jemtca@users.noreply.github.com"
] |
30645648+jemtca@users.noreply.github.com
|
ecfb2b0c2bbeb0959aaecb984d03c3768a12ee18
|
7e47a0079d2eef72b3bcd4c566b0ee233f0e2fc6
|
/alaska/venv/bin/wheel
|
5a4e689dc3106306caf8f67b03a06ccbdc51a9ae
|
[] |
no_license
|
jlyonthinks/28July15
|
6781b3aee6140d3edcb05aa2e804f3d5429fd8a1
|
cf5c716a67cc2e48dc0cf7d4690859587cf45dfb
|
refs/heads/master
| 2023-01-14T00:43:42.315018
| 2015-07-29T00:40:51
| 2015-07-29T00:40:51
| 39,864,986
| 0
| 0
| null | 2022-12-26T19:49:43
| 2015-07-29T00:45:48
|
Python
|
UTF-8
|
Python
| false
| false
| 237
|
#!/home/jlyonthinks/alaska/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"jlyonthinks@gmail.com"
] |
jlyonthinks@gmail.com
|
|
e4c7fa728fc770c8d90def3d56e9a6d7f2e07661
|
0e228bea5d374ea2a01401908547066932bce8e8
|
/sypt_ptree.py
|
8af00271125bafba927367ba0e483344a5a460db
|
[
"MIT"
] |
permissive
|
marjanhs/phan_style_change
|
c4c2cdcec80fc35f3727e156350f3ebfbde09bfa
|
76c47ff751ec8d4318364b1f3d1272f50a94e06e
|
refs/heads/master
| 2020-03-22T22:41:43.767233
| 2019-02-08T16:56:23
| 2019-02-08T16:56:23
| 140,766,204
| 1
| 2
|
MIT
| 2018-07-19T21:19:45
| 2018-07-12T21:25:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,392
|
py
|
from pycorenlp import StanfordCoreNLP
from nltk.tree import ParentedTree
import re, os, subprocess
from nltk.tokenize import sent_tokenize, word_tokenize
arrow = ' => '
def leaf_to_root(node, ignoreleaf=True):
rules=[]
while node.label()!="ROOT":
rule= node.label() + arrow
for s in node:
if type(s)== ParentedTree:
rule += s.label() +' '
else:
rule += s +' '
rules.append(rule)
node = node.parent()
if ignoreleaf and len(rules) >1:
return rules[1:]
else:
return rules
def traverse(parsed_sentence, ignoreleaf=True):
t = ParentedTree.fromstring(parsed_sentence)
q = [t]
rules = []
while len(q) > 0:
current = q.pop()
for s in current:
if type(s) == ParentedTree:
q.append(s)
else:
rules.append(leaf_to_root(current, ignoreleaf=ignoreleaf))
return rules
def get_pt_features_coreNLP(doc, ignoreleaf=True):
en = doc.encode('utf-8')
de = en.decode('utf-8')
doc = de
chars_to_remove = ['{', '}', '(', ')']
rx = '[' + re.escape(''.join(chars_to_remove)) + ']'
doc = re.sub(rx, '', doc)
nlp = StanfordCoreNLP('http://localhost:9000')
sentences = sent_tokenize(doc)
ptree_features = list()
for sentence in sentences:
try:
if sentence != "" and len(word_tokenize(sentence)) <= 80: # less than 50 words
output = nlp.annotate(sentence, properties={
'annotators': 'parse',
'outputFormat': 'json'
})
parsed = (output['sentences'][0]['parse'])
rules = traverse(parsed, ignoreleaf=ignoreleaf)
ptree_features.append(rules)
except:
print('Problem in parsing sentece = %s' % sentence)
return ptree_features
def get_pt_features_standalone(doc, tmp_path='', ignoreleaf=True):
tmp_path = os.path.join(tmp_path, 'tmp_marjan')
args = ['java','-mx3000m', '-cp', 'stanford-parser-full-2018-02-27/stanford-parser.jar',
'edu.stanford.nlp.parser.lexparser.LexicalizedParser',
'-encoding', 'utf-8',
'-model', 'stanford-parser-full-2018-02-27/englishPCFG.ser.gz',
'-maxLength', '50',
'-sentences', 'newline',
'-outputFormat', 'penn',
tmp_path]
en = doc.encode('utf-8')
de = en.decode('utf-8')
doc = de
chars_to_remove = ['{', '}', '(', ')']
rx = '[' + re.escape(''.join(chars_to_remove)) + ']'
doc = re.sub(rx, '', doc)
sentences = sent_tokenize(doc)
ptree_features = list()
with open(tmp_path, 'wt') as fw:
for sentence in sentences:
if sentence.strip() != '':
fw.write(sentence+'\n')
p = subprocess.Popen(args, stdin=None, stdout=-1, stderr=-1)
outs, err = p.communicate()
outs = outs.decode('utf-8').replace('(())\n', '') # removing output of long sentences
for parsed in outs.split('\n\n'):
if parsed != "":
try:
rules = traverse(parsed, ignoreleaf= ignoreleaf)
ptree_features.append(rules)
except ValueError:
print('Problem in converting parsed sentence = %s ' % (parsed))
raise
return ptree_features
|
[
"ma.hosseinia@gmail.com"
] |
ma.hosseinia@gmail.com
|
74cdeeaefa73433d6ba79843dc152782e3814a80
|
c4e377c125cd02c2cd6a7ac8baaa92498ec0c0b5
|
/light_sensor.py
|
799b6eb435851f140095b6245bc9fc00670961ce
|
[] |
no_license
|
carlhdr/raspberry_pi
|
5f32d4900e153c82cdb20422b9ee569f98dba0a0
|
922e2c61f3028f3ab73350165261ea264b324401
|
refs/heads/master
| 2020-12-14T08:54:27.896296
| 2016-05-03T04:16:11
| 2016-05-03T04:16:11
| 58,165,160
| 0
| 0
| null | 2016-05-05T22:44:30
| 2016-05-05T22:44:30
| null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
#!/usr/bin/env python
# Example for RC timing reading for Raspberry Pi
# Must be used with GPIO 0.3.1a or later - earlier verions
# are not fast enough!
import RPi.GPIO as GPIO, time, os
DEBUG = 1
GPIO.setmode(GPIO.BCM)
def RCtime (RCpin):
reading = 0
GPIO.setup(RCpin, GPIO.OUT)
GPIO.output(RCpin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(RCpin, GPIO.IN)
# This takes about 1 millisecond per loop cycle
while (GPIO.input(RCpin) == GPIO.LOW):
reading += 1
return reading
print RCtime(18) # Read RC timing using pin #18
|
[
"pannii.orosz@gmail.com"
] |
pannii.orosz@gmail.com
|
68665a1bae1103716df96bc7d8a47e9d199f37f2
|
8535a64935c9f94269029d3c018dd0ed34686b78
|
/Coursera-Machine-Learning/Python-Code/ex2/gradient.py
|
da53eeb0f02f9c81c999121951c60e4fd2a0a218
|
[] |
no_license
|
hosjiu1702/ML-Self-Study-Training-Course
|
929647a73b04dd1529f2882da9cf2187001c32d3
|
71d9f00e0460a6f8f43bf0276ec67d75a7a02936
|
refs/heads/master
| 2022-12-13T11:29:16.343033
| 2019-03-13T07:08:19
| 2019-03-13T07:08:19
| 175,346,159
| 0
| 0
| null | 2022-12-09T15:37:52
| 2019-03-13T04:27:25
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 300
|
py
|
import numpy as np
from sigmoid import sigmoid
def gradient(theta, X, y, lam_bda):
m = len(y)
grad_J = np.zeros((theta.shape))
h = sigmoid(X.dot(theta))
grad_J = (1 / m) * X.T.dot(h-y) + (lam_bda / m) * theta
grad_J[0] = grad_J[0] - (lam_bda / m) * theta[0]
return grad_J
|
[
"hosjiu1702@gmail.com"
] |
hosjiu1702@gmail.com
|
d33ede4c779f29fb5299d2cd5d13206c63fba933
|
76d319fb1cebc730feb0e730d4fc3ac328bb5e93
|
/backend/settings.py
|
237f324df8acfc60a2f124ef74e0ca13e972d583
|
[] |
no_license
|
RAlanWright/django-react-todo
|
2a801af06eab530a04a15ca0d98d357d34657b34
|
aeebe1ff1d9a5a24e6399f604bc338cbb5b9c983
|
refs/heads/main
| 2023-01-11T18:56:52.600981
| 2020-11-09T16:29:24
| 2020-11-09T16:29:24
| 306,974,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,278
|
py
|
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("D_T_D_SECRET")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'todo'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Whitelist localhost:3000 for the frontend
CORS_ALLOWED_ORIGINS = (
'http://localhost:3000',
'http://localhost:8000',
)
|
[
"wright.alan88@gmail.com"
] |
wright.alan88@gmail.com
|
701da3c11fa30fb3121be3633c9e8150d1062c84
|
0b17f21b3447700e99ad376b88d1440c3546a29b
|
/engine.py
|
c27cd41e816ebea8508ab9faa735ab80de98286b
|
[] |
no_license
|
juniferd/terminal-boggle
|
4580d0253c52b7a38db2d9604a33b62703dbc592
|
c2abbdfae1f881da1c9d0bee476bc2c8613b1061
|
refs/heads/master
| 2021-01-10T09:27:06.046285
| 2016-02-22T23:13:57
| 2016-02-22T23:13:57
| 51,971,610
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,992
|
py
|
import threading
import mechanics
## engine for game play
GAME_TIME=15
class Engine(object):
def __init__(self):
self.__timer_on = True
self.__remaining_time = GAME_TIME
self.__round_score = 0
"""
menu_map = {
'h': full_menu,
'e': exit_round,
's': start_round,
'n': new_board,
'r': rotate_board,
}
"""
# present a menu
def menu(self):
print "\ntype S to start playing"
player_input = raw_input("> ").lower()
## FIX THIS LATER
if player_input == 's':
self.start_round()
elif player_input == 'e':
self.stop_round()
# start a round
def start_round(self):
##FIX THIS LATER
game_words = self.play_game()
print "Here are your valid words from the game: "
for word in game_words:
print word
# show total words
all_words = self.game.find_all_words()
print "You found %s out of %s words" % (len(game_words), all_words)
game_score = self.score_game(game_words)
# score a game
print "Your game score: ", game_score
# add score to round_score
self.__round_score += game_score
print "Your round score: ", self.__round_score
self.game_reset()
print "-------------"
# present the menu
self.menu()
# manually stop a round
def stop_round(self):
self.__round_score = 0
self.menu()
# change board to size nxn
# score
def score_game(self, words):
scoring = {
3 : 1,
4 : 1,
5 : 2,
6 : 3,
7 : 5,
8 : 11
}
score = 0
for word in words:
length = int(words[word])
if length >= 8:
score = score + 11
else:
score = score + scoring[length]
return score
def game_reset(self):
self.__timer_on = True
self.__remaining_time = GAME_TIME
def tick_timer(self):
self.__remaining_time -= 1
if self.__remaining_time <= 0:
self.__timer_on = False
print "%s seconds are up! press enter" % GAME_TIME
if self.__timer_on:
t = threading.Timer(1.0, self.tick_timer)
t.start()
# play a game of boggle
def play_game(self):
checked_words = {}
boggle_game = mechanics.Boggle()
self.game = boggle_game
self.tick_timer()
# play a timed game
while self.__timer_on is True:
boggle_game.show_board()
user_word = raw_input("> ")
if not self.__timer_on:
break
this_word = boggle_game.check_word(user_word.lower(),True)
if this_word is not None:
checked_words[this_word] = len(this_word)
return checked_words
|
[
"jyk@jyk"
] |
jyk@jyk
|
71e18a1184fee5d651e67b275dbcebed8c58628c
|
fcb053bd5cb1e43d6d02818c8683f31c58f5b678
|
/Collections/Collections.OrderedDict().py
|
a6f7fa9957c3a737b6bd8b1f36cee83f5e604ec7
|
[] |
no_license
|
12akshat12/Hackerrank-Python-Practice
|
40b20df8fadd925a0bc01ca52daa9cd3a6052340
|
d0c14da3d32e717b506614216f098a9729545f7a
|
refs/heads/master
| 2022-11-07T05:37:18.368939
| 2020-06-29T05:22:38
| 2020-06-29T05:22:38
| 275,672,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
from collections import OrderedDict
def main():
d=OrderedDict()
N=int(input())
for i in range (N):
item_name, space, net_price=input().rpartition(' ')
if(item_name not in d):
d[item_name]=int(net_price)
else:
d[item_name]=d[item_name]+int(net_price)
for i in d.items():
print(i[0], i[1])
main()
|
[
"akshat.minocha@quovantis.com"
] |
akshat.minocha@quovantis.com
|
6cd92e60f7754b6ce14be9c99f3185a81c39fc5f
|
74a09ecda56217babdb1e13bece699ca603267f2
|
/blog_api/views.py
|
f201e662cc47703c6e01c215275e98b59d7f485c
|
[] |
no_license
|
shz699/blogapi_django
|
6e2266f35d505c86a687e6c3fd908ceb5debb44a
|
a7b28e4f0d5666c58bbf8b6599d18b4b9c604af3
|
refs/heads/master
| 2023-06-11T15:50:28.360309
| 2021-07-02T15:47:51
| 2021-07-02T15:47:51
| 382,393,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
from rest_framework import generics, serializers
from blog.models import Post
from .serializers import PostSerializer
# Create your views here.
class PostList(generics.ListCreateAPIView):
queryset = Post.postobjects.all()
serializer_class = PostSerializer
class PostDetail(generics.RetrieveDestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
""" Concrete View Classes
#CreateAPIView
Used for create-only endpoints.
#ListAPIView
Used for read-only endpoints to represent a collection of model instances.
#RetrieveAPIView
Used for read-only endpoints to represent a single model instance.
#DestroyAPIView
Used for delete-only endpoints for a single model instance.
#UpdateAPIView
Used for update-only endpoints for a single model instance.
##ListCreateAPIView
Used for read-write endpoints to represent a collection of model instances.
RetrieveUpdateAPIView
Used for read or update endpoints to represent a single model instance.
#RetrieveDestroyAPIView
Used for read or delete endpoints to represent a single model instance.
#RetrieveUpdateDestroyAPIView
Used for read-write-delete endpoints to represent a single model instance.
"""
|
[
"shahidula699@gmail.com"
] |
shahidula699@gmail.com
|
1b5a79a4089a68bc7d9a2514d8c1f9c8638bf771
|
533d11ec5e56b22ade009213c36cdb5fca1a4073
|
/TD3/agent.py
|
24d9b2bf71c0a538e62a681451aa868ac5c5595f
|
[] |
no_license
|
joelseytre/advanced-machine-learning
|
309735f1fa0cfe6f3cf41f49e5c94853813f9d69
|
579ebece149669673b956914849adcadb941277c
|
refs/heads/master
| 2021-05-12T08:18:01.638194
| 2018-02-11T19:06:59
| 2018-02-11T19:06:59
| 117,275,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
from agents.RandomAgent import RandomAgent
from agents.QAgent import QAgent
# Agent = RandomAgent
Agent = QAgent
|
[
"joel.seytre@student.ecp.fr"
] |
joel.seytre@student.ecp.fr
|
bd48a7891bb09bfcada77c9f790b924d43f78d56
|
b5d3f6c6da601013b761ae302bfa8beea31852d7
|
/flask_api/modules/meteorological_img.py
|
8be8f522b77b258ad60a9b29fef7d400ef7fd254
|
[] |
no_license
|
is0363hr/ste_api_server
|
113d9cc90a7d6341e5dcd72d972ed529e2c39637
|
63cbced7e3466c667e884d6c6fc9d55897aa6557
|
refs/heads/main
| 2023-05-26T03:39:37.335804
| 2021-06-11T03:30:13
| 2021-06-11T03:30:13
| 348,591,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,349
|
py
|
# 気象庁画像取得(2021/3/11)
from flask.app import Flask
from modules.line_api import LinePush
import requests
from datetime import datetime, timedelta
import os
import cv2
import base64
def strToDate(year, month, day, hour, minute):
tstr = year + month + day + hour + minute
tdate = datetime.strptime(tstr, '%Y%m%d%H%M')
return tdate
def image_file_to_base64(file_path):
with open(file_path, "rb") as image_file:
data = base64.b64encode(image_file.read())
return data.decode('utf-8')
class MeteImg:
def __init__(
self,
dateTime,
lon,
lat,
zoom,
) -> None:
self.tag = ""
self.dateTime = dateTime
self.lon = lon
self.lat = lat
self.zoom = zoom
self.column = 0
self.line = 0
self.get_time = ''
def get_img(self, datetime_set=False):
if not datetime_set:
date_data = datetime.now()
else:
date_data = self.dateTime
h = sum([int(s) for s in str(date_data.strftime("%H"))])
m = int(date_data.strftime("%M")[-1])
temp_time = date_data - timedelta(minutes=5 + m % 5)
time = list(temp_time.strftime("%Y%m%d%H%M"))
if h < 10:
time[-4:-2] = "0" + str(h)
else:
time[-4:-2] = str(h)
time = "".join(time) + '00'
self.get_time = time
# print(time)
# time = "202101060955"
# self.img_time = temp_time.strftime("%Y年%m月%d日%H時%M分")
self.img_time = temp_time
if self.tag == "map":
base_url = "https://cyberjapandata.gsi.go.jp/xyz/pale/"
elif self.tag == "cloud":
base_url = ("https://www.jma.go.jp/bosai/jmatile/data/nowc/{}/none/{}/surf/hrpns/").format(time, time)
z, x, y = self.get_column_line()
for i in range(z):
for j in range(z):
url = (base_url + "{}/{}/{}.png").format(
self.zoom,
x+i,
y+j,
)
output_path = (SAVE_DIR+"/{}/{}/{}_{}.png").format(
self.tag, self.zoom, x+i, y+j
)
dir = os.path.dirname(output_path) + "/"+ str(date_data.strftime("%Y_%m_%d")) + "/"
os.makedirs(os.path.dirname(output_path), exist_ok=True)
self.save_img(url, output_path)
print(url)
return dir
# 指定したURLの画像を保存
def save_img(self, url, output_path):
try:
req = requests.get(url)
with open(output_path, "wb") as w:
w.write(req.content)
w.close()
except requests.exceptions.HTTPError as request_error:
print("{}へのアクセス失敗".format(url))
line_push = LinePush()
line_push.push_scraping_error(request_error)
except Exception as e:
print("アクセス失敗!!!")
line_push = LinePush()
line_push.push_scraping_error(e)
print(e)
# 画像結合
def img_connect(self, path):
z, x, y = self.get_column_line()
v_list = []
for i in range(z):
for j in range(z):
file = path + str(x+i) + "_" + str(y+j) + ".png"
if not (j==0):
if self.tag == "cloud":
im = cv2.imread(file, -1)
else:
im = cv2.imread(file)
base_img = cv2.vconcat([base_img, im])
elif j==0:
if self.tag == "cloud":
base_img = cv2.imread(file, -1)
else:
base_img = cv2.imread(file)
v_list.append(base_img)
base_img = v_list[0]
for v in v_list[1:]:
base_img = cv2.hconcat([base_img, v])
output_path = path + self.get_time + ".png"
cv2.imwrite(output_path, base_img)
return output_path
# 透過画像の合成
def sye(self, cloud_path):
cartopy = False
base_path = SAVE_DIR
if cartopy:
map = "cartopy/"
else:
map = "map/"
map_path = ("{}/{}/{}/result.png").format(base_path, "map", self.zoom)
# cloud_path = ("{}/{}/{}/{}.png").format(base_path, "cloud", self.zoom, self.get_time)
map = cv2.imread(map_path)
# height, width = map.shape[0], map.shape[1]
# map = cv2.resize(map, (512, 512))
cloud = cv2.imread(cloud_path, -1)
# 貼り付け先座標の設定。とりあえず左上に
x1, y1, x2, y2 = 0, 0, cloud.shape[1], cloud.shape[0]
map[y1:y2, x1:x2] = map[y1:y2, x1:x2] * (1 - cloud[:, :, 3:] / 255) + cloud[
:, :, :3
] * (cloud[:, :, 3:] / 255)
output_path = ("{}/{}/{}/{}.png").format(base_path, "sye", self.zoom, self.get_time)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
cv2.imwrite(output_path, map)
return output_path
def get_column_line(self):
self.line = LINE_INITIALIZE_NUMBER
self.column = COLUMN_INITIALIZE_NUMBER
zoom_range = self.zoom - ZOOM_INITIALIZE + 1
return zoom_range+1, zoom_range*self.line, zoom_range*self.column
def map_create(self):
self.tag = "map"
path = self.get_img()
self.img_connect(path)
return
def cloud_create(self, datetime_set=False):
self.tag = "cloud"
path = self.get_img(datetime_set)
cloud_path = self.img_connect(path)
sye_path = self.sye(cloud_path)
return cloud_path, sye_path
def meteoro_img_create(dateTime, lon, lat, zoom):
meteoro = MeteImg(
dateTime, lon, lat, zoom
)
meteoro.map_create()
return meteoro.cloud_create()
# 日本地図の初期位置
ZOOM_INITIALIZE = 4
LINE_INITIALIZE_NUMBER = 13
COLUMN_INITIALIZE_NUMBER = 5
SAVE_DIR = './static'
def main():
zoom = 4
now = datetime.now()
now = strToDate(
'2021',
'4',
'10',
'22',
'35'
)
mimg = MeteImg(
now,
'34',
'135',
4,
)
print(mimg.cloud_create(True))
if __name__ == "__main__":
main()
|
[
"tan_tan_tanuki-73@softbank.ne.jp"
] |
tan_tan_tanuki-73@softbank.ne.jp
|
0d873babbf620fae68abe2c32e8a1578624ca799
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/log-20190927/132.230.102.123-10.21.9.70/1569573569.py
|
dc5839f67d211337395137ed458b092a279d00b4
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,932
|
py
|
import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def divisors(n)
"""
Nimmt positive ganze Zahl und gibt die Listee aller ihrer Teiler ohne wiederholung zurück
args:
n: int (positives integer)
returns:
result: lst (aller Teiler ohne Wdh)
"""
result = []
if n == 0 or type(n) != int:
return "Nanana"
else:
for i in range(1, n + 1):
if n % i == 0 and i not in result:
result.append(i)
return result
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(n):
nonlocal covered, count
if n <= 0:
covered.add(0)
if n == 1:
covered.add(1)
r = func (n)
lenr = len (r)
if lenr == 1:
covered.add(2)
if lenr == 2:
covered.add(3)
if (lenr > 2) and ( lenr % 2 == 0):
covered.add(4)
if lenr > 2 and lenr % 2 == 1:
covered.add(5)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
if func.__doc__:
wrapper.__doc__ = func.__doc__
wrapper.__hints__ = typing.get_type_hints (func)
return wrapper
return coverage
coverage = mk_coverage()
try:
divisors = coverage(divisors)
except:
pass
## Lösung Teil 2. (Tests)
def test_divisors():
a = 5
b = 12
c = 0
assert divisors(a) == [1, 5]
assert divisors(b) == [1, 2, 3, 4, 6, 12]
assert divisors(c) == "Nanana"
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_divisors (self):
assert divisors
assert 'n' in getfullargspec(divisors).args
class TestGrades:
def test_docstring_present(self):
assert divisors.__doc__ is not None
def test_typing_present(self):
assert divisors.__hints__ == typing.get_type_hints(self.divisors_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def divisors_oracle(self, n:int)->list:
return [ d for d in range (1, n + 1) if n % d == 0 ]
def check_divisors (self, x):
assert set(divisors (x)) == set(self.divisors_oracle (x))
def test_correctness(self):
for i in range (100):
self.check_divisors (i)
n = random.randrange (10000)
self.check_divisors (n)
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
b36d2685c58c8d2b9e060a1ba82ec9a7e2d959f8
|
a92e506fb1ef8f6cd0ee9a5215c0a402b47920d1
|
/Divisor Game.py
|
8bd673e843645b6928df7b2c2cee00793328ca95
|
[] |
no_license
|
NikitaFir/Leetcode
|
81e6d9cce43a69584279ccdd41f5af833c49cc3f
|
b179f637c7587836cf1047b27514587b4db0776f
|
refs/heads/master
| 2022-06-30T17:47:37.915052
| 2020-05-12T16:21:58
| 2020-05-12T16:21:58
| 263,385,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
class Solution(object):
def divisorGame(self, N):
k = 0
x = 1
while N != x:
if N % x==0:
k += 1
N -= x
x = 1
else:
x += 1
if k % 2 != 0:
return True
else:
return False
print(Solution.divisorGame(0,3))
|
[
"noreply@github.com"
] |
NikitaFir.noreply@github.com
|
f3a2a0af14fc312520e401d23a530a671f70d031
|
68c92b50fa7eebd4edd93240252ad7cfdaa36d45
|
/semisup/train.py
|
0b19fed904b4cdcefb906781c5e799e91e09a7e4
|
[
"Apache-2.0"
] |
permissive
|
xu-ji/associative_deep_clustering
|
dcda58c1a8a391b91201789f55bd91dd9a041d0b
|
fb324f3749c1dedcde685640dc6143606bc6527b
|
refs/heads/master
| 2020-03-31T15:37:24.337726
| 2018-10-12T21:16:37
| 2018-10-12T21:16:37
| 152,344,260
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,352
|
py
|
#! /usr/bin/env python
"""
Copyright 2016 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Association-based semi-supervised training module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
from importlib import import_module
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.training import saver as tf_saver
import semisup
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'svhn', 'Which dataset to work on.')
flags.DEFINE_string('target_dataset', None,
'If specified, perform domain adaptation using dataset as '
'source domain and target_dataset as target domain.')
flags.DEFINE_string('target_dataset_split', 'unlabeled',
'Which split of the target dataset to use for domain '
'adaptation.')
flags.DEFINE_string('architecture', 'svhn_model', 'Which network architecture '
'from architectures.py to use.')
flags.DEFINE_integer('sup_per_class', 100,
'Number of labeled samples used per class in total.'
' -1 = all')
flags.DEFINE_integer('unsup_samples', -1,
'Number of unlabeled samples used in total. -1 = all.')
flags.DEFINE_integer('sup_seed', -1,
'Integer random seed used for labeled set selection.')
flags.DEFINE_integer('sup_per_batch', 10,
'Number of labeled samples per class per batch.')
flags.DEFINE_integer('unsup_batch_size', 100,
'Number of unlabeled samples per batch.')
flags.DEFINE_integer('emb_size', 128,
'Size of the embeddings to learn.')
flags.DEFINE_float('learning_rate', 1e-4, 'Initial learning rate.')
flags.DEFINE_float('minimum_learning_rate', 1e-6,
'Lower bound for learning rate.')
flags.DEFINE_float('decay_factor', 0.33, 'Learning rate decay factor.')
flags.DEFINE_float('decay_steps', 60000,
'Learning rate decay interval in steps.')
flags.DEFINE_float('visit_weight', 0.0, 'Weight for visit loss.')
flags.DEFINE_string('visit_weight_envelope', None,
'Increase visit weight with an envelope: [None, sigmoid, linear]')
flags.DEFINE_integer('visit_weight_envelope_steps', -1,
'Number of steps (after delay) at which envelope '
'saturates. -1 = follow walker loss env.')
flags.DEFINE_integer('visit_weight_envelope_delay', -1,
'Number of steps at which envelope starts. -1 = follow '
'walker loss env.')
flags.DEFINE_float('walker_weight', 1.0, 'Weight for walker loss.')
flags.DEFINE_string('walker_weight_envelope', None,
'Increase walker weight with an envelope: [None, sigmoid, linear]')
flags.DEFINE_integer('walker_weight_envelope_steps', 100,
'Number of steps (after delay) at which envelope '
'saturates.')
flags.DEFINE_integer('walker_weight_envelope_delay', 3000,
'Number of steps at which envelope starts.')
flags.DEFINE_float('logit_weight', 1.0, 'Weight for logit loss.')
flags.DEFINE_integer('max_steps', 100000, 'Number of training steps.')
flags.DEFINE_bool('augmentation', False,
'Apply data augmentation during training.')
flags.DEFINE_integer('new_size', 0,
'If > 0, resize image to this width/height.')
flags.DEFINE_integer('virtual_embeddings', 0,
'How many virtual embeddings to add.')
flags.DEFINE_string('logdir', '/tmp/semisup', 'Training log path.')
flags.DEFINE_integer('save_summaries_secs', 150,
'How often should summaries be saved (in seconds).')
flags.DEFINE_integer('save_interval_secs', 300,
'How often should checkpoints be saved (in seconds).')
flags.DEFINE_integer('log_every_n_steps', 100,
'Logging interval for slim training loop.')
flags.DEFINE_integer('max_checkpoints', 5,
'Maximum number of recent checkpoints to keep.')
flags.DEFINE_float('keep_checkpoint_every_n_hours', 5.0,
'How often checkpoints should be kept.')
flags.DEFINE_float('batch_norm_decay', 0.99,
'Batch norm decay factor '
'(only used for STL-10 at the moment.')
flags.DEFINE_integer('remove_classes', 0,
'Remove this number of classes from the labeled set, '
'starting with highest label number.')
flags.DEFINE_string('master', '',
'BNS name of the TensorFlow master to use.')
flags.DEFINE_integer('ps_tasks', 0,
'The number of parameter servers. If the value is 0, '
'then the parameters '
'are handled locally by the worker.')
flags.DEFINE_integer('task', 0,
'The Task ID. This value is used when training with '
'multiple workers to identify each worker.')
def logistic_growth(current_step, target, steps):
"""Logistic envelope from zero to target value.
This can be used to slowly increase parameters or weights over the course of
training.
Args:
current_step: Current step (e.g. tf.get_global_step())
target: Target value > 0.
steps: Twice the number of steps after which target/2 should be reached.
Returns:
TF tensor holding the target value modulated by a logistic function.
"""
assert target > 0., 'Target value must be positive.'
alpha = 5. / steps
current_step = tf.cast(current_step, tf.float32)
steps = tf.cast(steps, tf.float32)
return target * (tf.tanh(alpha * (current_step - steps / 2.)) + 1.) / 2.
def apply_envelope(type, step, final_weight, growing_steps, delay):
assert growing_steps > 0, "Growing steps for envelope must be > 0."
step = tf.cast(step - delay, tf.float32)
final_step = growing_steps + delay
if type is None:
value = final_weight
elif type in ['sigmoid', 'sigmoidal', 'logistic', 'log']:
value = logistic_growth(step, final_weight, final_step)
elif type in ['linear', 'lin']:
m = float(final_weight) / (
growing_steps) if not growing_steps == 0.0 else 999.
value = m * step
else:
raise NameError('Invalid type: ' + str(type))
return tf.clip_by_value(value, 0., final_weight)
def main(argv):
del argv
# Load data.
dataset_tools = import_module('tools.' + FLAGS.dataset)
train_images, train_labels = dataset_tools.get_data('train')
if FLAGS.target_dataset is not None:
target_dataset_tools = import_module('tools.' + FLAGS.target_dataset)
train_images_unlabeled, _ = target_dataset_tools.get_data(
FLAGS.target_dataset_split)
else:
train_images_unlabeled, _ = dataset_tools.get_data('unlabeled')
architecture = getattr(semisup.architectures, FLAGS.architecture)
num_labels = dataset_tools.NUM_LABELS
image_shape = dataset_tools.IMAGE_SHAPE
# Sample labeled training subset.
seed = FLAGS.sup_seed if FLAGS.sup_seed != -1 else None
sup_by_label = semisup.sample_by_label(train_images, train_labels,
FLAGS.sup_per_class, num_labels,
seed)
# Sample unlabeled training subset.
if FLAGS.unsup_samples > -1:
num_unlabeled = len(train_images_unlabeled)
assert FLAGS.unsup_samples <= num_unlabeled, (
'Chose more unlabeled samples ({})'
' than there are in the '
'unlabeled batch ({}).'.format(FLAGS.unsup_samples, num_unlabeled))
rng = np.random.RandomState(seed=seed)
train_images_unlabeled = train_images_unlabeled[rng.choice(
num_unlabeled, FLAGS.unsup_samples, False)]
graph = tf.Graph()
with graph.as_default():
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,
merge_devices=True)):
# Set up inputs.
t_unsup_images = semisup.create_input(train_images_unlabeled, None,
FLAGS.unsup_batch_size)
t_sup_images, t_sup_labels = semisup.create_per_class_inputs(
sup_by_label, FLAGS.sup_per_batch)
if FLAGS.remove_classes:
t_sup_images = tf.slice(
t_sup_images, [0, 0, 0, 0],
[FLAGS.sup_per_batch * (
num_labels - FLAGS.remove_classes)] +
image_shape)
# Resize if necessary.
if FLAGS.new_size > 0:
new_shape = [FLAGS.new_size, FLAGS.new_size, image_shape[-1]]
else:
new_shape = None
# Apply augmentation
if FLAGS.augmentation:
# TODO(haeusser) generalize augmentation
def _random_invert(inputs, _):
randu = tf.random_uniform(
shape=[FLAGS.sup_per_batch * num_labels], minval=0.,
maxval=1.,
dtype=tf.float32)
randu = tf.cast(tf.less(randu, 0.5), tf.float32)
randu = tf.expand_dims(randu, 1)
randu = tf.expand_dims(randu, 1)
randu = tf.expand_dims(randu, 1)
inputs = tf.cast(inputs, tf.float32)
return tf.abs(inputs - 255 * randu)
augmentation_function = _random_invert
else:
augmentation_function = None
# Create function that defines the network.
model_function = partial(
architecture,
new_shape=new_shape,
img_shape=image_shape,
augmentation_function=augmentation_function,
batch_norm_decay=FLAGS.batch_norm_decay,
emb_size=FLAGS.emb_size)
# Set up semisup model.
model = semisup.SemisupModel(model_function, num_labels,
image_shape)
# Compute embeddings and logits.
t_sup_emb = model.image_to_embedding(t_sup_images)
t_unsup_emb = model.image_to_embedding(t_unsup_images)
# Add virtual embeddings.
if FLAGS.virtual_embeddings:
t_sup_emb = tf.concat(0, [
t_sup_emb, semisup.create_virt_emb(FLAGS.virtual_embeddings,
FLAGS.emb_size)
])
if not FLAGS.remove_classes:
# need to add additional labels for virtual embeddings
t_sup_labels = tf.concat(0, [
t_sup_labels,
(num_labels + tf.range(1, FLAGS.virtual_embeddings + 1,
tf.int64))
* tf.ones([FLAGS.virtual_embeddings], tf.int64)
])
t_sup_logit = model.embedding_to_logit(t_sup_emb)
# Add losses.
visit_weight_envelope_steps = (
FLAGS.walker_weight_envelope_steps
if FLAGS.visit_weight_envelope_steps == -1
else FLAGS.visit_weight_envelope_steps)
visit_weight_envelope_delay = (
FLAGS.walker_weight_envelope_delay
if FLAGS.visit_weight_envelope_delay == -1
else FLAGS.visit_weight_envelope_delay)
visit_weight = apply_envelope(
type=FLAGS.visit_weight_envelope,
step=model.step,
final_weight=FLAGS.visit_weight,
growing_steps=visit_weight_envelope_steps,
delay=visit_weight_envelope_delay)
walker_weight = apply_envelope(
type=FLAGS.walker_weight_envelope,
step=model.step,
final_weight=FLAGS.walker_weight,
growing_steps=FLAGS.walker_weight_envelope_steps, # pylint:disable=line-too-long
delay=FLAGS.walker_weight_envelope_delay)
tf.summary.scalar('Weights_Visit', visit_weight)
tf.summary.scalar('Weights_Walker', walker_weight)
if FLAGS.unsup_samples != 0:
model.add_semisup_loss(t_sup_emb,
t_unsup_emb,
t_sup_labels,
visit_weight=visit_weight,
walker_weight=walker_weight)
model.add_logit_loss(t_sup_logit,
t_sup_labels,
weight=FLAGS.logit_weight)
# Set up learning rate
t_learning_rate = tf.maximum(
tf.train.exponential_decay(
FLAGS.learning_rate,
model.step,
FLAGS.decay_steps,
FLAGS.decay_factor,
staircase=True),
FLAGS.minimum_learning_rate)
# Create training operation and start the actual training loop.
train_op = model.create_train_op(t_learning_rate)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# config.log_device_placement = True
saver = tf_saver.Saver(max_to_keep=FLAGS.max_checkpoints,
keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours) #
# pylint:disable=line-too-long
slim.learning.train(
train_op,
logdir=FLAGS.logdir + '/train',
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
startup_delay_steps=(FLAGS.task * 20),
log_every_n_steps=FLAGS.log_every_n_steps,
session_config=config,
trace_every_n_steps=1000,
saver=saver,
number_of_steps=FLAGS.max_steps,
)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run()
|
[
"johannes.plapp@logivations.com"
] |
johannes.plapp@logivations.com
|
e88fe4c04c5aeb05fbd2703796f983a9c004218c
|
2b38f4398a1b54ed45c4e30219b02bec704a71c6
|
/110.平衡二叉树/isBalanced.py
|
f0bccf81238571051c49250bd09866788b0f55e3
|
[] |
no_license
|
johnmaster/Leetcode
|
7a1f6bd39d43d262f4436bf9e34e2fb9a984e475
|
ceecad205a5809036e59c726a1baa42918f97619
|
refs/heads/master
| 2020-07-21T06:00:22.290353
| 2019-12-27T09:19:50
| 2019-12-27T09:19:50
| 206,766,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,069
|
py
|
"""
一个高度平衡的二叉搜索树(平衡二叉搜索树)是在插入和删除任何节点之后
可以自动保持其高度最小。也就是说,有N个节点的平衡二叉搜索树,它的高
度是logN。并且,每个节点的两个子树的高度不会相差超过1。
根据定义,我们可以判断一个二叉搜索树是否是高度平衡的(平衡二叉树)。
正入我们之前提到的,一个有N个节点的平衡二叉搜索树的高度总是LogN。
因此,我们可以计算节点总数和树的高度,以确定这个二叉搜索树是否是高
度平衡的。
同样,在定义中,我们提到了高度平衡的二叉树一个特性:每个节点的两个子
树的深度不会相差超过1。我们也可以根据这个性质,递归的验证树。
为什么需要用到高度平衡的二叉搜索树?
当分析二叉搜索树的相关操作时,我们需要注意的是树的高度是十分重要的,
如果树的高度为h,则时间复杂度为O(h),二叉搜索树的高度的确很重要。
所以,我们来讨论一下树的节点总数N和高度h之间的关系。对于一个平衡二叉树
前文提过,h>logN。但对一个普通的二叉搜索树,在最坏的情况下,它可以退化
为一个链。
因此,具有N个节点的二叉搜索树的高度在logN和N区间变化,也就是说,搜索
操作的时间复杂度可以从logN变化到N。这是一个巨大的性能差异。
所以说,高度平衡的二叉搜索树对提高性能起着重要作用。
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isBalanced(self, root):
return self.dfs(root) != -1
def dfs(self, root):
if not root:
return 0
left = self.dfs(root.left)
if left == -1: return -1
right = self.dfs(root.right)
if right == -1: return -1
return max(left, right) + 1 if abs(right - left) < 2 else -1
|
[
"noreply@github.com"
] |
johnmaster.noreply@github.com
|
7cc3a8bcd234a170d28a60514da557484369ec59
|
87b8e3090f33e73892ef79b6d52c37d66ba21ea3
|
/src/huffman.py
|
09b5c0fbaf70a1ba80a5d32a97ea3c93d63bb906
|
[
"CC0-1.0"
] |
permissive
|
jwhang627/DigitalImageCompressor
|
97abdb6bb1757b244ae07ff699a0d2eab1253997
|
7d2d20ac98f6c6e8baf2c98410846d7b4a7c4388
|
refs/heads/master
| 2022-07-30T23:16:48.113088
| 2020-05-18T20:37:12
| 2020-05-18T20:37:12
| 261,240,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
# src/huffman.py
class NodeTree(object):
def __init__(self, left=None, right=None):
self.left = left
self.right = right
def children(self):
return (self.left, self.right)
def nodes(self):
return (self.left, self.right)
def __str__(self):
return '%s-%s' % (self.left, self.right)
def huffmanCodeTree(node, left=True, binString=''):
if type(node) is str:
return {node: binString}
(l, r) = node.children()
d = dict()
d.update(huffmanCodeTree(l, True, binString + '0'))
d.update(huffmanCodeTree(r, False, binString + '1'))
return d
def huffmanDecode(data,tree):
rev = {}
for v,k in tree.items():
rev[k] = v
start_idx = 0
end_idx = 1
max_idx = len(data)
decode = ''
while start_idx != max_idx:
if data[start_idx : end_idx] in rev:
decode += rev[data[start_idx : end_idx]] + " "
start_idx = end_idx
end_idx += 1
return decode
|
[
"jw8n5@mail.missouri.edu"
] |
jw8n5@mail.missouri.edu
|
73cf0ef2cb9fe2e438f97e2903b8c4922208754c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_262/ch133_2020_04_01_11_12_20_875320.py
|
d6b43f8de09fc367129e32985f549dbeebf2944e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
fun=input("está funcionando?")
if fun=='n':
fun=input("você sabe corrigir?")
if fun=='n':
fun=input("você precisa corrigir?")
if fun=='n':
print("Apague tudo e tente novamente")
else:
print("sem problemas!")
else:
print("sem problemas!")
|
[
"you@example.com"
] |
you@example.com
|
f532a8bb80293ee3fadcc4683d18bd6e07ffd0cf
|
7224f79adb11e02f17068a48cb0650266db8560c
|
/blog/migrations/0001_initial.py
|
5caeed9db7c25da82dcf89f0ad550e83e62223f2
|
[] |
no_license
|
kirbycbressler/blog_posts
|
5e994490861d31bb22fa235f2d4c2d4ecb2907d5
|
2bbaf6c2de989a9d43914ab7976b8d7d8b7a9adf
|
refs/heads/main
| 2023-07-15T19:37:55.806798
| 2021-08-22T17:07:11
| 2021-08-22T17:07:11
| 397,456,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 788
|
py
|
# Generated by Django 3.2.6 on 2021-08-18 02:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('body', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"kirbycbressler@yahoo.com"
] |
kirbycbressler@yahoo.com
|
05e555436bf655e00619796225111cd597e83c09
|
5123fb89f99bf6f109f3ac5a64b5ee7473ba8517
|
/CellularAutomata.py
|
6ee51c49a8c168170494acc544962b1c6e22a162
|
[] |
no_license
|
RyanSamman/ElementaryCellularAutomata
|
cf051d00a346a362dcf4cb0959304b76e6c7e187
|
5a7628a0528d66b869896a7055a940d733dadfba
|
refs/heads/master
| 2023-03-30T12:26:55.273514
| 2021-04-07T11:40:21
| 2021-04-07T11:40:21
| 286,540,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,041
|
py
|
from random import randrange
class CellularAutomata:
def __init__(self, width=11, rule=0, initialState=None) -> None:
if initialState is None: # Create an array with stuff in the center
sides = (width - 1) // 2
middle = width - 2 * sides
initialState = [0] * sides + [1] * middle + [0] * sides
self.CELLS_WIDTH = width
self.cellsHistory = [[*initialState]] # Record the history of the cells
self.cells = initialState
self.rule = rule
self.updateRuleArray()
def updateRuleArray(self):
# Converts bytes to little endian array of bytes
self.ruleArray = [self.rule >> i & 1 for i in range(8)]
def generateRuleIndex(self, left, middle, right):
index = 0b000
if self.cells[left]: index ^= 0b001 # 0b000 -> 0b001
if self.cells[middle]: index ^= 0b010 # 0b000 -> 0b010
if self.cells[right]: index ^= 0b100 # 0b000 -> 0b100
return self.ruleArray[index]
def applyRule(self):
newCells = []
for currentPosition, _ in enumerate(self.cells):
left = (currentPosition - 1) % self.CELLS_WIDTH
right = (currentPosition + 1) % self.CELLS_WIDTH
newCells.append(self.generateRuleIndex(left, currentPosition, right))
self.cells = newCells
self.cellsHistory.append(newCells)
return newCells
@staticmethod
def displayCells(cells):
[print("[]" if c else "__", end="") for c in cells]
print()
if __name__ == "__main__":
# Testing in the CLI, the main entrypoint is UI.py
inputRule = randrange(0, 256) # int(input("Enter a rule: "))
print(f"Rule {inputRule}")
width = 61
height = 40
startarr = [randrange(0, 2) for i in range(width)]
print("Starting cells are", startarr)
ca = CellularAutomata(rule=inputRule, width=width, initialState=startarr)
ca.displayCells(ca.cells)
for _ in range(height):
ca.applyRule()
ca.displayCells(ca.cells)
|
[
"RyanSamman@outlook.com"
] |
RyanSamman@outlook.com
|
0bd8081080edffef12ace581f49e14a0cfcdd932
|
c37f5b94eff116e9b2ea3bb17ba3390e12ff8265
|
/drf_shoestore/settings.py
|
7d5e5a831a5fbd2cc2ae3d7649e59b6b36d42576
|
[] |
no_license
|
DJJD2150/drf_shoestore
|
67f74fe7d179934bf723bdd67c387cb350388a65
|
7a66a2cd6b06705c36850a338649fc1595f5f3cf
|
refs/heads/dev2
| 2022-12-19T14:09:01.217579
| 2020-10-03T00:25:36
| 2020-10-03T00:25:36
| 300,011,999
| 0
| 1
| null | 2020-10-03T00:25:37
| 2020-09-30T18:15:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,280
|
py
|
"""
Django settings for drf_shoestore project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!bu_hs8(0i(zef)c9d%7z2-22rtzi033jh_vi6fq)s4j8loi#@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'api',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'drf_shoestore.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'drf_shoestore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
CORS_ALLOWED_ORIGINS = ["http://localhost:3000"]
CSRF_TRUSTED_ORIGINS = ["http://localhost:3000"]
|
[
"jndetke2150@gmail.com"
] |
jndetke2150@gmail.com
|
130d2e97b46cd60325c75df08f8304805de254a7
|
1f8e6d593740241012941b530faea20f0560880e
|
/summer_pre_assign_package1/summer_pre_assign_package2/module2.py
|
f6ea2b7b5609a4d619d734abff1e4803331ee929
|
[] |
no_license
|
durgaprasad1997/missionrnd_python_course
|
0c576d41985e83f96a0359160fdc606953bc5b1e
|
30e26b2a255a3387d2a2810559050ccb281e3fef
|
refs/heads/master
| 2020-03-22T07:57:33.983821
| 2018-07-04T14:59:38
| 2018-07-04T14:59:38
| 139,735,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
import logging
def f3():
logger = logging.getLogger(__name__)
logger.info('Entering function f3')
logger.warning('This cannot be done using function f3')
logger.error('Error occurred in function f3')
logger.debug('Leaving function f3')
def f4():
logger = logging.getLogger(__name__)
logger.info("Entering method f4")
logger.warning("This is a log from f4 at WARNING level")
logger.error("This is a log from f4 at ERROR level")
logger.debug("Leaving method f4")
|
[
"noreply@github.com"
] |
durgaprasad1997.noreply@github.com
|
5cb372ed429f9cc84f45f4986d19d7db6de2a6ff
|
9f45fe47cf6e6556118d59310af4652292231673
|
/eleventry.py
|
a51e1663f429fa7060f642a9861a9ee625981235
|
[] |
no_license
|
zubairrv/Python-basics-programs-practice
|
683d93d3a1f28bae9246d2bd3c898a7637e96f2b
|
5b0f7b9da56c67b4620e3421973d87c3e5607bd5
|
refs/heads/master
| 2022-12-29T11:48:44.816036
| 2020-10-08T11:38:13
| 2020-10-08T11:38:13
| 287,947,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
while True:
for i in ["/","- ","|","\\","|"]:
print "%s\r" % i,
|
[
"noreply@github.com"
] |
zubairrv.noreply@github.com
|
624671d487dd50b1558eb59e9f858d68e6edc44d
|
21b5d639df0157d4b93780ef12104ed0b3ae3d89
|
/examples/iris.py
|
2c0ee17611d0d8eb4e80903e0192c7f718c0eb3b
|
[] |
no_license
|
lwthatcher/mlp
|
54564e5bd2c3ffbff6c2aa6528c07e158986bd07
|
a57bf621c31884659c0df9bab52e048b82f4776d
|
refs/heads/master
| 2021-01-23T00:34:38.011259
| 2017-04-05T23:00:42
| 2017-04-05T23:00:42
| 85,741,442
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
import numpy as np
from mlp import NeuralNet
from mlp import util
from sklearn.preprocessing import normalize
from mlp.activation_functions import ReLU, Sigmoid
from sklearn.model_selection import cross_val_score
def run_iris():
features, labels = util.load_data_file("iris.txt")
features = normalize(features, axis=0)
# train/test set
X = features
Y = util.to_output_vector(labels)
args = [4, 10, 3]
kwargs = {"max_epochs": 100, "a_func": Sigmoid}
cross_fold(X, Y, 10, *args, **kwargs)
def cross_fold(X, Y, n, *model_args, **model_kwargs):
num_examples = len(Y)
# shuffle data first
idx = np.arange(num_examples)
np.random.shuffle(idx)
# split into n sets
splits = np.split(idx, n)
for i in range(n):
# get train/test sets
idx_test = splits[i]
tr1 = splits[:i]
tr2 = splits[i+1:]
tr1.extend(tr2)
idx_train = np.concatenate(tr1)
X_train = X[idx_train,:]
X_test = X[idx_test,:]
Y_train = Y[idx_train]
Y_test = Y[idx_test]
# create new model
model = NeuralNet(*model_args, **model_kwargs)
# train
num_epochs = model.fit(X_train, Y_train)
# compare
out = model.predict(X_test)
print(i, model.score(X_train, Y_train), num_epochs)
if __name__ == '__main__':
run_iris()
|
[
"lwthatcher@msn.com"
] |
lwthatcher@msn.com
|
6123625e902a2d37a7242f0f539d4af1f68662e8
|
4a040bbd3e463c37095dd955440aa12e3c9f2e1f
|
/exam/4_structure/package/ex2.py
|
9bb227231d3ee712ebfb8a9b3571b18eab8f7c59
|
[] |
no_license
|
ace2267/pythonExam
|
7a06b898237938a6f7036087160fb57efee33b09
|
6b6a4227c711b426114249c4993a738328bb4950
|
refs/heads/master
| 2020-05-07T08:30:59.891136
| 2019-05-25T00:04:33
| 2019-05-25T00:04:53
| 180,332,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
# from exam.package.packEx import testPakage
import exam.package.packEx
packEx.testPakage(1, 50)
|
[
"ace22672@gmail.com"
] |
ace22672@gmail.com
|
0e450cd56fbe42980d5a7104818eb4074accb3b1
|
5c60daf869242848c7ef2e15e143d470a7e9e50c
|
/pypokerengine/api/game.py
|
1c1bb310697aafe2f121e93e7f38362d279c92db
|
[] |
no_license
|
cfacundus/artificial-intelligence
|
32c30b7f27d112a799c07da2c93cf9c39645b8f2
|
ec4b2d1c41f416f3c73c62d027e20b19fcd0b99f
|
refs/heads/main
| 2023-01-29T21:35:48.976810
| 2020-11-29T22:18:11
| 2020-11-29T22:18:11
| 317,053,124
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,047
|
py
|
from pypokerengine.engine.dealer import Dealer
from pypokerengine.players import BasePokerPlayer
def setup_config(max_round, initial_stack, small_blind_amount, ante=0):
return Config(max_round, initial_stack, small_blind_amount, ante)
def start_poker(config, verbose=2):
config.validation()
dealer = Dealer(config.sb_amount, config.initial_stack, config.ante)
dealer.set_verbose(verbose)
dealer.set_blind_structure(config.blind_structure)
for info in config.players_info:
dealer.register_player(info["name"], info["algorithm"])
result_message = dealer.start_game(config.max_round)
return _format_result(result_message)
def _format_result(result_message):
return {
"rule": result_message["message"]["game_information"]["rule"],
"players": result_message["message"]["game_information"]["seats"]
}
class Config(object):
def __init__(self, max_round, initial_stack, sb_amount, ante):
self.players_info = []
self.blind_structure = {}
self.max_round = max_round
self.initial_stack = initial_stack
self.sb_amount = sb_amount
self.ante = ante
def register_player(self, name, algorithm):
if not isinstance(algorithm, BasePokerPlayer):
base_msg = 'Poker player must be child class of "BasePokerPlayer". But its parent was "%s"'
raise TypeError(base_msg % algorithm.__class__.__bases__)
info = { "name" : name, "algorithm" : algorithm }
self.players_info.append(info)
def set_blind_structure(self, blind_structure):
self.blind_structure = blind_structure
def validation(self):
player_num = len(self.players_info)
if player_num < 2:
detail_msg = "no player is registered yet" if player_num==0 else "you registered only 1 player"
base_msg = "At least 2 players are needed to start the game"
raise Exception("%s (but %s.)" % (base_msg, detail_msg))
|
[
"noreply@github.com"
] |
cfacundus.noreply@github.com
|
79c37e2cdea99cda4a653a7d2446f0cfc4ac29d2
|
58d500012a959a07b9560000886ddcb6140da175
|
/user_interface/neo4j_scene.py
|
9d795edf52b99f4cd7935130f22404b09c504fa4
|
[] |
no_license
|
daisy-ua/chat-application
|
b0f4520d6f4e13b791355b6493223e85a3b0162c
|
377b1d1b07fcb00a48dbfed3dde2fb733b2deb4c
|
refs/heads/master
| 2023-05-27T22:00:02.202621
| 2021-06-07T20:15:42
| 2021-06-07T20:15:42
| 354,841,629
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 861
|
py
|
from services.neo4j_server import neo4j
tags = [
'ads', 'swag', 'kpop', 'vegan', 'solotravel', 'puppylove', 'vr', 'bts', 'bantansonyendan',
'fitfam', 'cardio', 'giveaway', 'weddinghair'
]
OPTIONS = []
def get_users_by_tags():
for tag in tags:
users = neo4j.get_users_by_tag(tag)
users = list(set(users))
if len(users) != 0:
print("For tag '{0}'".format(tag))
for user in users:
print(user['username'])
def get_pairs_by_msg_len(len):
pairs = neo4j.get_pairs_by_msg_len(len)
pairs = list(set(pairs))
for pair in pairs:
print(pair['u1'] + " : " + pair['u2'])
def load_neo4j_scene():
get_users_by_tags()
# get_pairs_by_msg_len(2)
# users = neo4j.get_unrelated_by_tags('cardio')
# print(len(users))
# for lis in users:
# print(lis)
|
[
"eightnum.81@gmail.com"
] |
eightnum.81@gmail.com
|
c9aa318b9b0c13d3782326990ef227fa50314c45
|
66f7814e54fbae6473d4226ce843257ede1c6724
|
/scripts/websockets.py
|
1826ba78aaacafb8ff70725a322641fa5c70b41d
|
[] |
no_license
|
kgz/Maple
|
cb1b094563467fa6c720193f98e893926683b64b
|
0a801563ebbf9a6cf9bb24f69ee067e81d89661a
|
refs/heads/master
| 2020-04-16T19:33:58.486930
| 2019-01-15T14:40:17
| 2019-01-15T14:40:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
"""."""
from PyLog import Log
import json
from quart import websocket, request
class Sock():
"""."""
def __init__(self, app, *args, **kwargs):
"""."""
self.socks = {}
self.clients = []
@app.websocket("/ws")
async def connect():
"""."""
while True:
conn = websocket._get_current_object()
if conn not in self.clients:
self.clients.append(conn)
await self.emit(websocket.host)
data = await websocket.receive()
Log(data)
try:
js = json.loads(data)
for key in js.keys():
if key in self.socks:
self.socks.get(key)(js.get(key))
except Exception as exc:
Log(exc, level=5)
Log(self.socks)
def on(self, string):
def decorate(func):
self.socks[string] = func
return decorate
async def emit(self, data):
"""."""
for x in self.clients:
try:
await x.send(data)
except:
self.clients.remove(websocket._get_current_object())
|
[
"mat.frayne@gmail.com"
] |
mat.frayne@gmail.com
|
5ad570ef73816ca4ebb5fc3842c9f429ae06ab16
|
2a7c72c37a3ad3b3c839a9363f07baa5bf7e8366
|
/omnizart/models/chord_model.py
|
69b2fa024b8b1c383730a1732c6c287902fadad1
|
[
"MIT"
] |
permissive
|
Hadiaz1/omnizart
|
4bc3be58073e61f9b67650ec365bc10916226076
|
4188e6e193a4d5de6a0bec9f8dc2f0e847310e26
|
refs/heads/master
| 2023-08-16T11:20:26.060034
| 2021-10-02T08:53:18
| 2021-10-02T08:53:18
| 414,891,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,730
|
py
|
# pylint: disable=W0102,W0221
import tensorflow as tf
from tensorflow.python.framework import ops
from omnizart.models.t2t import positional_encoding, MultiHeadAttention
from omnizart.models.utils import shape_list
class FeedForward(tf.keras.layers.Layer):
"""Feedfoward layer of the transformer model.
Paramters
---------
n_units: list[int, int]
A two-element integer list. The first integer represents the output embedding size
of the first convolution layer, and the second integer represents the embedding size
of the second convolution layer.
activation_func: str
Activation function of the first covolution layer. Available options can be found
from the tensorflow.keras official site.
dropout_rate: float
Dropout rate of all dropout layers.
"""
def __init__(self, n_units=[2048, 512], activation_func="relu", dropout_rate=0):
super().__init__()
self.n_units = n_units
self.activation_func = activation_func
self.dropout_rate = dropout_rate
self.conv_1 = tf.keras.layers.Conv1D(n_units[0], kernel_size=1, activation=activation_func)
self.conv_2 = tf.keras.layers.Conv1D(n_units[1], kernel_size=1)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
self.layer_norm = tf.keras.layers.LayerNormalization()
def call(self, inp):
outputs = self.conv_1(inp)
outputs = self.conv_2(outputs)
outputs = self.dropout(outputs)
outputs += inp
return self.layer_norm(outputs)
def get_config(self):
config = super().get_config().copy()
config.update(
{
"activation_func": self.activation_func,
"n_units": self.n_units,
"dropout_rate": self.dropout_rate
}
)
return config
class EncodeSegmentTime(tf.keras.layers.Layer):
"""Encode feature along the time axis.
Parameters
----------
n_units: int
Output embedding size.
n_steps: int
Time length of the feature.
segment_width: int
Context width of each frame. Nearby frames will be concatenated to the feature axis.
Default to 21, which means past 10 frames and future 10 frames will be concatenated
to the current frame, resulting a feature dimenstion of *segment_width x freq_size*.
freq_size: int
Feature size of the input representation.
dropout_rate: float
Dropout rate of all dropout layers.
"""
def __init__(self, n_units=512, dropout_rate=0, n_steps=100, freq_size=24, segment_width=21):
super().__init__()
self.n_steps = n_steps
self.freq_size = freq_size
self.segment_width = segment_width
self.n_units = n_units
self.dropout_rate = dropout_rate
self.attn_layer = MultiHeadAttention(
n_units=freq_size,
n_heads=2,
activation_func="relu",
relative_position=True,
max_dist=4,
dropout_rate=dropout_rate
)
self.feed_forward = FeedForward(n_units=[freq_size * 4, freq_size], dropout_rate=dropout_rate)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
self.dense = tf.keras.layers.Dense(n_units, activation="relu")
self.layer_norm = tf.keras.layers.LayerNormalization()
def call(self, inp):
# output dim: [batch_size*n_steps, tonal_size, segment_width]
inp_reshape = tf.reshape(inp, shape=[-1, self.freq_size, self.segment_width])
# output dim: [batch_size*n_steps, segment_width, tonal_size]
inp_permute = tf.transpose(a=inp_reshape, perm=[0, 2, 1])
inp_permute += positional_encoding(
batch_size=shape_list(inp_permute)[0], timesteps=self.segment_width, n_units=self.freq_size
) * 0.01 + 0.01
attn_output = self.attn_layer(q=inp_permute, k=inp_permute, v=inp_permute)
forward_output = self.feed_forward(attn_output)
# restore shape
outputs = tf.transpose(a=forward_output, perm=[0, 2, 1])
outputs = tf.reshape(outputs, shape=[-1, self.n_steps, self.freq_size * self.segment_width])
outputs = self.dropout(outputs)
outputs = self.dense(outputs)
return self.layer_norm(outputs)
def get_config(self):
config = super().get_config().copy()
config.update(
{
"n_steps": self.n_steps,
"n_units": self.n_units,
"dropout_rate": self.dropout_rate,
"freq_size": self.freq_size,
"segment_width": self.segment_width
}
)
return config
class EncodeSegmentFrequency(tf.keras.layers.Layer):
"""Encode feature along the frequency axis.
Parameters
----------
n_units: int
Output embedding size.
n_steps: int
Time length of the feature.
segment_width: int
Context width of each frame. Nearby frames will be concatenated to the feature axis.
Default to 21, which means past 10 frames and future 10 frames will be concatenated
to the current frame, resulting a feature dimenstion of *segment_width x freq_size*.
freq_size: int
Feature size of the input representation.
dropout_rate: float
Dropout rate of all dropout layers.
"""
def __init__(self, n_units=512, dropout_rate=0, n_steps=100, freq_size=24, segment_width=21):
super().__init__()
self.freq_size = freq_size
self.segment_width = segment_width
self.n_steps = n_steps
self.n_units = n_units
self.dropout_rate = dropout_rate
self.attn_layer = MultiHeadAttention(
n_units=segment_width,
n_heads=1,
activation_func="relu",
relative_position=False,
max_dist=4,
dropout_rate=dropout_rate
)
self.feed_forward = FeedForward(n_units=[segment_width * 4, segment_width], dropout_rate=dropout_rate)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
self.out_dense = tf.keras.layers.Dense(n_units, activation="relu")
self.layer_norm = tf.keras.layers.LayerNormalization()
def call(self, inp):
inp_reshape = tf.reshape(inp, [-1, self.freq_size, self.segment_width])
inp_reshape += positional_encoding(
batch_size=shape_list(inp_reshape)[0], timesteps=self.freq_size, n_units=self.segment_width
) * 0.01 + 0.01
attn_output = self.attn_layer(q=inp_reshape, k=inp_reshape, v=inp_reshape)
forward_output = self.feed_forward(attn_output)
# restore shape
outputs = tf.reshape(forward_output, shape=[-1, self.n_steps, self.freq_size * self.segment_width])
outputs = self.dropout(outputs)
outputs = self.out_dense(outputs)
return self.layer_norm(outputs)
def get_config(self):
config = super().get_config().copy()
config.update(
{
"n_steps": self.n_steps,
"n_units": self.n_units,
"dropout_rate": self.dropout_rate,
"freq_size": self.freq_size,
"segment_width": self.segment_width
}
)
return config
def chord_block_compression(hidden_states, chord_changes):
block_ids = tf.cumsum(chord_changes, axis=1)
modify_ids = lambda x: tf.cond(pred=tf.equal(x[0], 0), true_fn=lambda: x, false_fn=lambda: x - 1)
block_ids = tf.map_fn(modify_ids, block_ids)
num_blocks = tf.reduce_max(input_tensor=block_ids, axis=1) + 1
max_steps = tf.reduce_max(input_tensor=num_blocks)
segment_mean_pad = lambda x: tf.pad( # pylint: disable=E1123,E1120
tensor=tf.math.segment_mean(data=x[0], segment_ids=x[1]),
paddings=tf.convert_to_tensor([[0, max_steps - x[2]], [0, 0]])
)
chord_blocks = tf.map_fn(segment_mean_pad, (hidden_states, block_ids, num_blocks), dtype=tf.float32)
return chord_blocks, block_ids
def chord_block_decompression(compressed_seq, block_ids):
gather_chords = lambda x: tf.gather(params=x[0], indices=x[1]) # pylint: disable=E1120
return tf.map_fn(gather_chords, (compressed_seq, block_ids), dtype=compressed_seq.dtype)
def binary_round(inp, cast_to_int=False):
graph = tf.compat.v1.get_default_graph()
with ops.name_scope("BinaryRound") as name:
if cast_to_int:
with graph.gradient_override_map({"Round": "Identity", "Cast": "Identity"}):
return tf.cast(tf.round(inp), tf.int32, name=name)
else:
with graph.gradient_override_map({"Round": "Identity"}):
return tf.round(inp, name=name)
class Encoder(tf.keras.layers.Layer):
"""Encoder layer of the transformer model.
Parameters
----------
num_attn_blocks:
Number of attention blocks.
n_steps: int
Time length of the feature.
enc_input_emb_size: int
Embedding size of the encoder's input.
segment_width: int
Context width of each frame. Nearby frames will be concatenated to the feature axis.
Default to 21, which means past 10 frames and future 10 frames will be concatenated
to the current frame, resulting a feature dimenstion of *segment_width x freq_size*.
freq_size: int
Feature size of the input representation.
dropout_rate: float
Dropout rate of all the dropout layers.
**kwargs:
Other keyword parameters that will be passed to initialize keras.layers.Layer.
"""
def __init__(
self,
dropout_rate=0,
num_attn_blocks=2,
n_steps=100,
enc_input_emb_size=512,
freq_size=24,
segment_width=21,
**kwargs
):
super().__init__(**kwargs)
self.n_steps = n_steps
self.num_attn_blocks = num_attn_blocks
self.enc_input_emb_size = enc_input_emb_size
self.dropout_rate = dropout_rate
self.freq_size = freq_size
self.segment_width = segment_width
self.layer_weights = tf.Variable(initial_value=tf.zeros(num_attn_blocks), trainable=True)
self.encode_segment_time = EncodeSegmentTime(
n_units=enc_input_emb_size,
dropout_rate=dropout_rate,
n_steps=n_steps,
freq_size=freq_size,
segment_width=segment_width
)
self.attn_layers = [
MultiHeadAttention(
n_units=enc_input_emb_size, n_heads=8, max_dist=16, dropout_rate=dropout_rate
)
for _ in range(num_attn_blocks)
]
self.ff_layers = [
FeedForward(n_units=[enc_input_emb_size * 4, enc_input_emb_size], dropout_rate=dropout_rate)
for _ in range(num_attn_blocks)
]
self.logit_dense = tf.keras.layers.Dense(1)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
def call(self, inp, slope=1):
segment_encodings = self.encode_segment_time(inp)
segment_encodings += positional_encoding(
batch_size=shape_list(segment_encodings)[0], timesteps=self.n_steps, n_units=self.enc_input_emb_size
)
segment_encodings = self.dropout(segment_encodings)
weight = tf.nn.softmax(self.layer_weights)
weighted_hidden_enc = tf.zeros(shape=shape_list(segment_encodings))
for idx, (attn_layer, feed_forward) in enumerate(zip(self.attn_layers, self.ff_layers)):
segment_encodings = attn_layer(q=segment_encodings, k=segment_encodings, v=segment_encodings)
segment_encodings = feed_forward(segment_encodings)
weighted_hidden_enc += weight[idx] * segment_encodings
chord_change_logits = tf.squeeze(self.logit_dense(weighted_hidden_enc))
chord_change_prob = tf.sigmoid(slope * chord_change_logits)
chord_change_pred = binary_round(chord_change_prob, cast_to_int=True)
return weighted_hidden_enc, chord_change_logits, chord_change_pred
def get_config(self):
config = super().get_config().copy()
config.update(
{
"n_steps": self.n_steps,
"enc_input_emb_size": self.enc_input_emb_size,
"num_attn_blocks": self.num_attn_blocks,
"dropout_rate": self.dropout_rate,
"freq_size": self.freq_size,
"segment_width": self.segment_width
}
)
return config
class Decoder(tf.keras.layers.Layer):
"""Decoder layer of the transformer model.
Parameters
----------
out_classes: int
Number of output classes. Currently supports 26 types of chords.
num_attn_blocks:
Number of attention blocks.
n_steps: int
Time length of the feature.
dec_input_emb_size: int
Embedding size of the decoder's input.
segment_width: int
Context width of each frame. Nearby frames will be concatenated to the feature axis.
Default to 21, which means past 10 frames and future 10 frames will be concatenated
to the current frame, resulting a feature dimenstion of *segment_width x freq_size*.
freq_size: int
Feature size of the input representation.
dropout_rate: float
Dropout rate of all the dropout layers.
**kwargs:
Other keyword parameters that will be passed to initialize keras.layers.Layer.
"""
def __init__(
self,
out_classes=26,
dropout_rate=0,
num_attn_blocks=2,
n_steps=100,
dec_input_emb_size=512,
freq_size=24,
segment_width=21,
**kwargs
):
super().__init__(**kwargs)
self.n_steps = n_steps
self.dec_input_emb_size = dec_input_emb_size
self.num_attn_blocks = num_attn_blocks
self.out_classes = out_classes
self.dropout_rate = dropout_rate
self.freq_size = freq_size
self.segment_width = segment_width
self.encode_segment_frequency = EncodeSegmentFrequency(
n_units=dec_input_emb_size,
dropout_rate=dropout_rate,
n_steps=n_steps,
freq_size=freq_size,
segment_width=segment_width
)
self.attn_layers_1 = [
MultiHeadAttention(
n_units=dec_input_emb_size,
n_heads=8,
relative_position=True,
max_dist=16,
self_mask=False,
dropout_rate=dropout_rate
)
for _ in range(num_attn_blocks)
]
self.attn_layers_2 = [
MultiHeadAttention(
n_units=dec_input_emb_size,
n_heads=8,
relative_position=False,
max_dist=16,
self_mask=False,
dropout_rate=dropout_rate
)
for _ in range(num_attn_blocks)
]
self.ff_layers = [
FeedForward(n_units=[dec_input_emb_size * 4, dec_input_emb_size], dropout_rate=dropout_rate)
]
self.dropout = tf.keras.layers.Dropout(dropout_rate)
self.out_dense = tf.keras.layers.Dense(out_classes)
def call(self, inp, encoder_input_emb, chord_change_pred):
segment_encodings = self.encode_segment_frequency(inp)
segment_encodings_blocked, block_ids = chord_block_compression(segment_encodings, chord_change_pred)
segment_encodings_blocked = chord_block_decompression(segment_encodings_blocked, block_ids)
segment_encodings_blocked.set_shape([None, self.n_steps, self.dec_input_emb_size])
decoder_inputs = segment_encodings + segment_encodings_blocked + encoder_input_emb
decoder_inputs += positional_encoding(
batch_size=shape_list(decoder_inputs)[0], timesteps=self.n_steps, n_units=self.dec_input_emb_size
)
decoder_inputs_drop = self.dropout(decoder_inputs)
layer_weights = tf.nn.softmax(tf.zeros((self.num_attn_blocks)))
weighted_hiddens_dec = tf.zeros(shape=shape_list(segment_encodings))
layer_stack = zip(self.attn_layers_1, self.attn_layers_2, self.ff_layers)
for idx, (attn_1, attn_2, feed_forward) in enumerate(layer_stack):
decoder_inputs_drop = attn_1(q=decoder_inputs_drop, k=decoder_inputs_drop, v=decoder_inputs_drop)
decoder_inputs_drop = attn_2(q=decoder_inputs_drop, k=encoder_input_emb, v=encoder_input_emb)
decoder_inputs_drop = feed_forward(decoder_inputs_drop)
weighted_hiddens_dec += layer_weights[idx] * decoder_inputs_drop
logits = self.out_dense(weighted_hiddens_dec)
chord_pred = tf.argmax(input=logits, axis=-1, output_type=tf.int32)
return logits, chord_pred
def get_config(self):
config = super().get_config().copy()
config.update(
{
"n_steps": self.n_steps,
"dec_input_emb_size": self.dec_input_emb_size,
"num_attn_blocks": self.num_attn_blocks,
"out_classes": self.out_classes,
"dropout_rate": self.dropout_rate,
"freq_size": self.freq_size,
"segment_width": self.segment_width
}
)
return config
class ChordModel(tf.keras.Model): # pylint: disable=R0901
"""Chord model in written in keras.
Keras model of ``chord`` submodule. The original implementation is written in
tensorflow 1.11 and can be found `here <https://github.com/Tsung-Ping/Harmony-Transformer>`_.
The model also implements the custom training/test step due to the specialized loss
computation.
Parameters
----------
num_enc_attn_blocks: int
Number of attention blocks in the encoder.
num_dec_attn_blocks: int
Number of attention blocks in the decoder.
segment_width: int
Context width of each frame. Nearby frames will be concatenated to the feature axis.
Default to 21, which means past 10 frames and future 10 frames will be concatenated
to the current frame, resulting a feature dimenstion of *segment_width x freq_size*.
freq_size: int
Feature size of the input representation.
out_classes: int
Number of output classes. Currently supports 26 types of chords.
n_steps: int
Time length of the feature.
enc_input_emb_size: int
Embedding size of the encoder's input.
dec_input_emb_size: int
Embedding size of the decoder's input.
dropout_rate: float
Dropout rate of all the dropout layers.
annealing_rate: float
Rate of modifying the slope value for each epoch.
**kwargs:
Other keyword parameters that will be passed to initialize the keras.Model.
See Also
--------
omnizart.chord.app.chord_loss_func:
The customized loss computation function.
"""
def __init__(
self,
num_enc_attn_blocks=2,
num_dec_attn_blocks=2,
segment_width=21,
freq_size=24,
out_classes=26,
n_steps=100,
enc_input_emb_size=512,
dec_input_emb_size=512,
dropout_rate=0,
annealing_rate=1.1,
**kwargs
):
super().__init__(**kwargs)
self.segment_width = segment_width
self.freq_size = freq_size
self.out_classes = out_classes
self.n_steps = n_steps
self.enc_input_emb_size = enc_input_emb_size
self.dec_input_emb_size = dec_input_emb_size
self.dropout_rate = dropout_rate
self.annealing_rate = annealing_rate
self.slope = 1
self.loss_func_name = "chord_loss_func"
self.encoder = Encoder(
num_attn_blocks=num_enc_attn_blocks,
dropout_rate=dropout_rate,
n_steps=n_steps,
enc_input_emb_size=enc_input_emb_size,
freq_size=freq_size,
segment_width=segment_width
)
self.decoder = Decoder(
num_attn_blocks=num_dec_attn_blocks,
out_classes=out_classes,
dropout_rate=dropout_rate,
n_steps=n_steps,
dec_input_emb_size=dec_input_emb_size,
freq_size=freq_size,
segment_width=segment_width
)
self.loss_tracker = tf.keras.metrics.Mean(name="loss")
def call(self, feature):
encoder_input_emb, chord_change_logits, chord_change_pred = self.encoder(feature, slope=self.slope)
logits, chord_pred = self.decoder(feature, encoder_input_emb, chord_change_pred)
return chord_pred, chord_change_pred, logits, chord_change_logits
def step_in_slope(self):
self.slope *= self.annealing_rate
def train_step(self, data):
# Input feature: (60, 100, 504)
# Chord change: (60, 100)
# Chord: (60, 100)
# Slope: 1.0
feature, (gt_chord, gt_chord_change) = data
with tf.GradientTape() as tape:
chord_pred, chord_change_pred, logits, chord_change_logits = self(feature)
if self.loss_func_name in self.loss.__name__:
loss = self.loss(gt_chord, gt_chord_change, logits, chord_change_logits)
trainable_vars = self.trainable_variables
loss_l2 = 2e-4 * tf.add_n([tf.nn.l2_loss(var) for var in trainable_vars if "bias" not in var.name])
loss += loss_l2
else:
loss_c = self.compiled_loss(gt_chord, chord_pred)
loss_cc = self.compiled_loss(gt_chord_change, chord_change_pred)
loss = loss_c + loss_cc
# Compute gradients
trainable_vars = self.trainable_variables
grads = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(grads, trainable_vars))
# Update the metrics
self.compiled_metrics.update_state(gt_chord, chord_pred)
self.loss_tracker.update_state(loss)
result = {m.name: m.result() for m in self.metrics}
result.update({"loss": self.loss_tracker.result()})
return result
def test_step(self, data):
feature, (gt_chord, gt_chord_change) = data
chord_pred, chord_change_pred, logits, chord_change_logits = self(feature)
if self.loss_func_name in self.loss.__name__:
loss = self.loss(gt_chord, gt_chord_change, logits, chord_change_logits)
trainable_vars = self.trainable_variables
loss_l2 = 2e-4 * tf.add_n([tf.nn.l2_loss(var) for var in trainable_vars if "bias" not in var.name])
loss += loss_l2
else:
loss_c = self.compiled_loss(gt_chord, chord_pred)
loss_cc = self.compiled_loss(gt_chord_change, chord_change_pred)
loss = loss_c + loss_cc
# Update the metrics
self.compiled_metrics.update_state(gt_chord, chord_pred)
self.loss_tracker.update_state(loss)
result = {m.name: m.result() for m in self.metrics}
result.update({"loss": self.loss_tracker.result()})
return result
def get_config(self):
config = {
"encoder": self.encoder,
"decoder": self.decoder,
"loss_tracker": self.loss_tracker
}
return config
class ReduceSlope(tf.keras.callbacks.Callback):
"""Custom keras callback for reducing slope value after each epoch."""
def on_epoch_end(self, epoch, logs=None):
self.model.step_in_slope()
if __name__ == "__main__":
model = ChordModel()
model.compile(optimizer="adam", loss=tf.keras.losses.BinaryCrossentropy())
output = model(tf.zeros((16, 60, 100, 504)))
|
[
"freedombluewater@gmail.com"
] |
freedombluewater@gmail.com
|
003cadbd97efb17c22d41fca3bf34dc5e842bda4
|
a05f65967b8f7b09d0e56bb7571abfcd25bc2bd6
|
/prime.py
|
37d11a9f6082decf449e2164eb1fcd6c665a8f7c
|
[] |
no_license
|
jyothi1802/python-programs
|
b5d793a1f5540aa1b7311a6c188123d65acc7c74
|
78273a9737973e282de46f83dd1119d3d5a96617
|
refs/heads/master
| 2020-04-14T15:22:32.320671
| 2019-01-03T11:46:39
| 2019-01-03T11:46:39
| 163,924,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
#Write a python program to check whether given number is prime or not
def find_prime():
try:
num = int(input("Enter a Number:"))
if(num>0):
for i in range(2,num//2+1) :
if (num % i == 0):
print(b,"is not prime")
break
else:
return num
else:
print("Enter positive number")
except:
print('enter valid number')
def main():
print(find_prime(),"is prime")
main()
|
[
"noreply@github.com"
] |
jyothi1802.noreply@github.com
|
03213bed7a200f336273927a512d357e0510ab02
|
d6f6f7267fd94fb2b912f667a97519fda7e60743
|
/tragetory/extrator3.py
|
dc839d94564ea827576236643bce74088305bf95
|
[] |
no_license
|
marcosvro/FrankIA
|
7d2af8dcae5c1a530d33a651eb3848bd056a7d91
|
e850b4c8794972b583ed80f64989126596d1ec3d
|
refs/heads/master
| 2021-07-25T09:06:58.079365
| 2017-11-08T03:27:23
| 2017-11-08T03:27:23
| 106,047,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,452
|
py
|
import numpy as np
import math
import threading
#import cv2
import time
import os
import ikpy as ik
from ikpy import plot_utils
#v1 = 250 estados
#CONFIGS +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
deslocamentoZpes = 3.
deslocamentoXpes = 5.
deslocamentoYpes = 0.
deslocamentoYpelves = 1.3
periodo = 20.
nEstados = 125
dMovx = deslocamentoXpes/nEstados
frameRate = periodo/nEstados
data_foot = np.zeros((nEstados,8), dtype=np.uint8)
data_pelv = np.zeros((nEstados,8), dtype=np.uint8)
#perna - quadril = target
link0 = ik.link.URDFLink("calc_lateral", [0,0, 0], [0,0,0], [1,0,0], use_symbolic_matrix=True, bounds=(-90,90))
link1 = ik.link.URDFLink("calc_frontal", [0,0, 0], [0,0,0], [0,1,0], use_symbolic_matrix=True, bounds=(-90,90))
link2 = ik.link.URDFLink("joelho", [0,0,8.24] , [0,0,0], [0,1,0], use_symbolic_matrix=True, bounds=(-90,90))
link3 = ik.link.URDFLink("quadril", [0,0,6.45], [0,0,0], [0,1,0], use_symbolic_matrix=True, bounds=(-90,90))
#link4 = ik.link.URDFLink("pelves", [0, 1.7, 4], [0, 0, 0], [1, 0, 0], use_symbolic_matrix=True, bounds=(-50,50))
#perna - pe = target
#link5 = ik.link.URDFLink("pelves", [0,0,0], [0,0,0], [1,0,0], use_symbolic_matrix=True, bounds = (-50, 50))
link6 = ik.link.URDFLink("quadril", [0, 0, 0], [0,0,0], [0,1,0], use_symbolic_matrix=True, bounds = (-180, 180))
link7 = ik.link.URDFLink("joelho", [0,0,-6.45], [0,0,0], [0,1,0], use_symbolic_matrix=True, bounds = (-180, 180))
link8 = ik.link.URDFLink("calc_frontal", [0,0,-8.24], [0,0,0], [0,1,0], use_symbolic_matrix=True, bounds = (-180, 180))
#link9 = ik.link.URDFLink("calc_lateral", [0,0,-4.48], [0,0,0], [1,0,0], use_symbolic_matrix=True, bounds = (-30, 30))
#link10 = ik.link.URDFLink("pe", [0,0,-2], [0,0,0], [0,0,0], use_symbolic_matrix=True)
#chains
foot2pelv = ik.chain.Chain([link0, link1, link2, link3], [True, True, True, False])
pelv2foot = ik.chain.Chain([link0, link1, link2, link3], [True, True, True, False])
#start joint positions
jointsf2p = np.deg2rad([0., 15.3, -35., 0.])
jointsp2f = np.deg2rad([0., 15.3, -35., 0.])
#jointsp2f = np.deg2rad([-5., 15., 0.])
"""
pos_test = foot2pelv.forward_kinematics(np.deg2rad([0.,23., -22., 0.]))
print (pos_test[:3, 3])
exit()
"""
#start target position
pos_inicial_pelves = [3.33, 0., 14.]
pos_inicial_pe = [3.33, 0., 14.]
frame_target = np.eye(4)
frame_target[:3, 3] = pos_inicial_pelves
ik1 = foot2pelv.inverse_kinematics(frame_target,initial_position=jointsf2p)
frame_target2 = np.eye(4)
frame_target2[:3, 3] = pos_inicial_pe
ik2 = pelv2foot.inverse_kinematics(frame_target2, initial_position=jointsp2f)
jointsf2p = ik1;
jointsp2f = ik2;
#FUNCOES +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
'''Calcula cinematica inversa da pelves.
parm: indice(int) - diz em qual posicao do vetor de tragetoria deve ser armazenada a cinematica e qual momento da tragetoria calcular'''
def thread_cinematica_pelves(indice):
pos = pos_inicial_pelves
p = (deslocamentoXpes/2)*((np.exp((2*(indice-nEstados/2))/50) - np.exp((2*(indice-nEstados/2))/-50))/(np.exp((2*(indice-nEstados/2))/50)+np.exp((2*(indice-nEstados/2))/-50)))
pos[0] = 0.5*p + 3.33
pos[1] = -deslocamentoYpelves*np.sin(indice*np.pi/nEstados)
frame_target = np.eye(4)
frame_target[:3, 3] = pos
lastpos = []
ik = foot2pelv.inverse_kinematics(frame_target,initial_position=jointsf2p)
ik = np.rad2deg(ik)
roll = -ik[0]
aux = 8.24*math.sin(np.deg2rad(ik[1]))
aux = pos[0] - aux
pitch = math.asin(aux/6.45)
ik = ik.astype(np.int8)
pitch = np.rad2deg(pitch).astype(np.int8)
print (indice, " -- position: ", pos)
#salva dados no array tragetoria
#calc_lateral
data_pelv[indice][0] = 90 + ik[0]
#calc_frontal
data_pelv[indice][1] = 90 + ik[1]
#joelho
data_pelv[indice][2] = 90 + ik[2]
#quadril
data_pelv[indice][3] = 90 + pitch
#pelves
data_pelv[indice][4] = 90 + roll
#torso
data_pelv[indice][5] = 90
#braco
data_pelv[indice][6] = 90
#cotovelo
data_pelv[indice][7] = 90
'''Calcula cinematica inversa dos pes.
parm: indice(int) - diz em qual posicao do vetor de tragetoria deve ser armazenada a cinematica e qual momento da tragetoria calcular'''
def thread_cinematica_pe(indice):
pos = pos_inicial_pe
pos[0] = 3.33 + 0.5*(-deslocamentoXpes/2)*((np.exp((2*(indice-nEstados/2))/50) - np.exp((2*(indice-nEstados/2))/-50))/(np.exp((2*(indice-nEstados/2))/50)+np.exp((2*(indice-nEstados/2))/-50)))
pos[2] = 14. - deslocamentoZpes*np.exp(-((indice-nEstados/2)**2)/600)
frame_target = np.eye(4)
frame_target[:3, 3] = pos
lastpos = []
if(indice):
last_pos = jointsp2f
else:
last_pos = [np.deg2rad(float(data_foot[indice-1][0]-90)),np.deg2rad(float(data_foot[indice-1][1]-90)),np.deg2rad(float(data_foot[indice-1][2]-90)),0.]
ik = pelv2foot.inverse_kinematics(frame_target,initial_position=last_pos)
ik = np.rad2deg(ik)
roll = 4*(data_pelv[indice][0]-90)
aux = 8.24*math.sin(np.deg2rad(ik[1]))
aux = pos[0] - aux
pitch = math.asin(aux/6.45)
ik = ik.astype(np.int8)
pitch = np.rad2deg(pitch).astype(np.int8)
print (indice, " -- position: ", pos)
#salva dados no array tragetoria
#calc_lateral
data_foot[indice][0] = 90 + roll
#calc_frontal
data_foot[indice][1] = 90 + ik[1]
#joelho
data_foot[indice][2] = 90 + ik[2]
#quadril
data_foot[indice][3] = 90 + pitch
#pelves
data_foot[indice][4] = 90 - roll
#torso
data_foot[indice][5] = 90
#braco
data_foot[indice][6] = 90
#cotovelo
data_foot[indice][7] = 90
if indice < 2:
return
erro = 0.
for i in range(8):
erro += (float(data_foot[indice-1][i])-float(data_foot[indice][i]))**2
erro = math.sqrt(erro)
print ("Erro : ", erro)
if erro > 60:
for i in range(8):
data_foot[indice][i] = data_foot[indice-1][i]
'''Cria nEstados threads para calcular a cinematica inversa considerando que o intervalo de execucao T esta particionado em nEstados.'''
def calculaTragetoria_pelves():
i = 0
while i < nEstados:
#cria threads para calcular cinematica invesa dos pes
thread = threading.Thread(target=thread_cinematica_pelves, args=(i, ))
thread.daemon=True
thread.start()
thread.join()
i += 1
def calculaTragetoria_pe():
i = 0
while i < nEstados:
#cria threads para calcular cinematica invesa da pelves
thread = threading.Thread(target=thread_cinematica_pe, args=(i, ))
thread.daemon=True
thread.start()
thread.join()
i += 1
#SETUP +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
print(np.rint(np.rad2deg(jointsf2p)))
print(np.rint(np.rad2deg(jointsp2f)))
#plot_chain(foot2pelv, juntas=jointsf2p)
#plot_chain(pelv2foot, juntas=jointsp2f)
iner = np.array([0., 0., 0., 0.], dtype=np.float)
#reading file
try:
with open('data_pelv.txt', 'r') as f:
data_pelv = np.loadtxt('data_pelv.txt').reshape((nEstados,8))
print ("File data_pelv loaded!")
except IOError:
print ("Calculando tragetoria.. ")
calculaTragetoria_pelves()
while threading.active_count() != 1:
os.system("clear")
print ("Calculando tragetoria.. (", threading.active_count(),"/",nEstados,")")
np.savetxt('data_pelv.txt', data_pelv)
try:
with open('data_foot.txt', 'r') as f:
data_foot = np.loadtxt('data_foot.txt').reshape((nEstados,8))
print ("File data_foot loaded!")
except IOError:
print ("Calculando tragetoria.. ")
calculaTragetoria_pe()
while threading.active_count() != 1:
os.system("clear")
print ("Calculando tragetoria.. (", threading.active_count(),"/",nEstados,")")
np.savetxt('data_foot.txt', data_foot)
print (data_foot.shape)
print (data_pelv.shape)
#LOOP +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
start = time.time()
t = 0.
t_fps = 0.
t_state = 0.
t_inercial = 0.
state = 0
fps = 0
while 1:
#sending data
######################################## incluir iner no vetor de rotacao ##############################################
#thread_cinematica_pe(693)
to_send = [666]+[it - 90 for it in data_pelv[state]]+["---"]+[it - 90 for it in data_foot[state]]+[666]
#timers
dTime = time.time() - start
start = time.time()
t += dTime
t_fps += dTime
t_state += dTime
t_inercial += dTime
#change state
if(t_state >= frameRate):
t_state = 0
state = (state+1)%nEstados
print (state, " -- ", to_send)
#END +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
f.close()
|
[
"marcos.v.rodrigues@hotmail.com"
] |
marcos.v.rodrigues@hotmail.com
|
c1d2095b70735c2304cbced854cabac380d14209
|
0843e27390fc52a4095931097cf357eebe6fa666
|
/blender_scripts/tools/svg_bobject.py
|
3803574344e5f68041a40a4000259146fcdc995e
|
[] |
no_license
|
DowLucas/primer
|
d087380adbbeffa3b1255b78708a131b4dfd1ba3
|
0b59bdaef5310d52eaa5b7a68e5e4a5ec707263d
|
refs/heads/master
| 2022-01-27T02:21:36.075525
| 2019-05-02T21:52:27
| 2019-05-02T21:52:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 58,480
|
py
|
import imp
from copy import deepcopy
import winsound
import sys
sys.path.append('C:\\Users\\justi\\Documents\\CodeProjects\\Primer\\blender_scripts')
import bobject
imp.reload(bobject)
from bobject import *
import constants
imp.reload(constants)
from constants import *
import helpers
imp.reload(helpers)
from helpers import *
class SVGBobject(Bobject):
"""docstring for ."""
def __init__(self, *filenames, **kwargs):
super().__init__(**kwargs)
if 'vert_align_centers' in kwargs:
self.vert_align_centers = kwargs['vert_align_centers']
else:
self.vert_align_centers = 'x_and_y'
if 'centered' in kwargs:
self.centered = kwargs['centered']
else:
self.centered = False
if 'color' in kwargs:
self.default_color = kwargs['color']
else:
self.default_color = 'color2'
if RENDER_QUALITY == 'medium' or RENDER_QUALITY == 'high':
default_transition_type = 'morph'
else:
default_transition_type = 'instant'
self.transition_type = self.get_from_kwargs('transition_type', default_transition_type)
self.reindex_points_before_morph = \
self.get_from_kwargs('reindex_points_before_morph', True)
self.lazy_morph = self.get_from_kwargs('lazy_morph', True)
self.min_length = self.get_from_kwargs('min_length', 1)
self.get_file_paths(filenames)
self.import_svg_data()
self.align_figures()
if self.transition_type == 'morph':
print("Making morph chains")
self.make_morph_chains()
print("Processing morph chains")
self.process_morph_chains() #Add splines and points for centering
#and smooth morphing
print("Making rendered curve objects")
self.make_rendered_curve_bobjects()
self.make_lookup_table()
elif self.transition_type == 'instant':
self.rendered_bobject_lists = []
for svg in self.paths:
rendered_bobject_list = []
for curve in self.imported_svg_data[svg]['curves']:
new_curve = curve.ref_obj.children[0].copy()
new_curve.data = curve.ref_obj.children[0].data.copy()
apply_material(new_curve, self.default_color)
new_curve_bobj = bobject.Bobject(
objects = [new_curve],
location = curve.ref_obj.location
)
rendered_bobject_list.append(new_curve_bobj)
self.rendered_bobject_lists.append(rendered_bobject_list)
self.make_lookup_table()
print("SVG Bobject initialized " + str(filenames[0]))
self.copyable_null = None #I think this might be unnecessary
self.active_path = self.paths[0]
def add_to_blender(self, **kwargs):
if 'appear_mode' in kwargs:
appear_mode = kwargs['appear_mode']
else:
appear_mode = 'per_curve'
if self.transition_type == 'instant':
initial_shape = self.rendered_bobject_lists[0]
for bobj in initial_shape:
self.add_subbobject(bobj)
super().add_to_blender(**kwargs)
for shape in self.rendered_bobject_lists[1:]:
for bobj in shape:
bobj.ref_obj.parent = self.ref_obj
bobj.superbobject = self
else:
#This part is a bit fragile because it assumes appear_frame is in kwargs
if appear_mode == 'per_curve':
#Convert time args to frames. Need to do this before passing to
#super so timing can be manipulated.
if 'appear_time' in kwargs:
if 'appear_frame' in kwargs:
raise Warning("You defined both start frame and start time." + \
"Just do one, ya dick.")
kwargs['appear_frame'] = kwargs['appear_time'] * FRAME_RATE
kwargs['appear_time'] = None #Avoid passing non-None appear
#time to super
#Bobject appears early but with each curve at size zero, then
#the curves morph to non-zero size, making it look like the
#curves appear independently.
kwargs['appear_frame'] -= DEFAULT_MORPH_TIME
if 'subbobject_timing' in kwargs:
if isinstance(kwargs['subbobject_timing'], list):
for time in kwargs['subbobject_timing']:
time += DEFAULT_MORPH_TIME
else:
kwargs['subbobject_timing'] = DEFAULT_MORPH_TIME
if 'transition_time' in kwargs:
transition_time = kwargs['transition_time']
else:
transition_time = DEFAULT_MORPH_TIME
super().add_to_blender(**kwargs)
self.morph_figure(
0,
start_frame = self.appear_frame + DEFAULT_MORPH_TIME,
duration = transition_time
)
def disappear(self, **kwargs):
if 'disappear_mode' in kwargs:
disappear_mode = kwargs['appear_mode']
else:
disappear_mode = 'per_curve'
if 'animate' in kwargs:
animate = kwargs['animate']
else:
animate = True
if disappear_mode == 'per_curve' and \
self.transition_type == 'morph' and animate == True:
if 'disappear_time' in kwargs:
if 'disappear_frame' in kwargs:
raise Warning("You defined both disappear frame and disappear time." +\
"Just do one, ya dick.")
kwargs['disappear_frame'] = kwargs['disappear_time'] * FRAME_RATE
kwargs['disappear_time'] = None #Avoid passing non-None appear
#time to super
#Bobject appears early but with each curve at size zero, then
#the curves morph to non-zero size, making it look like the
#curves appear independently.
for bobj in self.rendered_curve_bobjects:
bobj.disappear(disappear_frame = kwargs['disappear_frame'])
kwargs['disappear_frame'] += DEFAULT_MORPH_TIME
super().disappear(**kwargs)
else:
super().disappear(**kwargs)
def make_rendered_curve_bobjects(self):
null = new_null_curve(
parent = self.ref_obj,
location = self.ref_obj.location,
rotation = self.ref_obj.rotation_euler
)
#print("Max spline count is " + str(max_spline_count))
equalize_spline_count(null.objects[0], self.max_spline_count)
bpy.context.scene.objects.link(null.objects[0])
add_points_to_curve_splines(null.objects[0], total_points = self.max_point_count)
bpy.context.scene.objects.unlink(null.objects[0])
self.rendered_curve_bobjects = []
for i in range(len(self.morph_chains)):
#Would just deepcopy null, but that doesn't work on Blender data blocks
dup = null.ref_obj.children[0].copy()
dup.data = null.ref_obj.children[0].data.copy()
apply_material(dup, self.default_color)
rendered_curve = bobject.Bobject(objects = [dup], name = 'rendered')
rendered_curve.ref_obj.location = \
self.morph_chains[i][0].ref_obj.location
rendered_curve.ref_obj.rotation_euler = \
self.morph_chains[i][0].ref_obj.rotation_euler
self.add_subbobject(rendered_curve)
self.rendered_curve_bobjects.append(rendered_curve)
def make_lookup_table(self):
#print('Making lookup table')
if self.transition_type == 'morph':
#This function makes it easier to find the bobjects associated with
#individual curves/characters when coding a scene. This would otherwise
#be hard because the imported curves are copied and mixed into morph
#chains.
self.lookup_table = []
for curve_list in self.lists_of_copies:
self.lookup_table.append([])
for cur in curve_list:
#Find the morph chain where cur appears and add the
#corresponding rendered curve to self.lookup_table[-1].
for i, chain in enumerate(self.morph_chains):
if cur in chain:
self.lookup_table[-1].append(self.rendered_curve_bobjects[i])
break
elif self.transition_type == 'instant':
self.lookup_table = self.rendered_bobject_lists
else:
raise Warning('Lookup table not defined for transition type: ' + \
str(self.transition_type))
def import_svg_data(self):
self.imported_svg_data = {} #Build dictionary of imported svgs to use
#shape keys later and to avoid duplicate
#imports
for path in self.paths:
#Import svg and get list of new curves in Blender
if path not in self.imported_svg_data.keys():
self.imported_svg_data[path] = {'curves' : []}
#This is a dict of dicts for metadata, e.g., center and length
#of tex expressions
if path == None:
null = new_null_curve()
cur = null.ref_obj.children[0]
equalize_spline_count(cur, 1)
self.imported_svg_data[path]['curves'].append(cur)
#print(self.imported_svg_data[path]['curves'])
#print('length: ' + str(len(cur.data.splines)))
#print('length: ' + str(len(cur.data.splines[0].bezier_points)))
continue
previous_curves = [x for x in bpy.data.objects if x.type == 'CURVE']
bpy.ops.import_curve.svg(filepath = path)
new_curves = [x for x in bpy.data.objects if \
x.type == 'CURVE' and x not in previous_curves]
#Arrange new curves relative to tex object's ref_obj
scale_up = TEX_LOCAL_SCALE_UP #* self.scale[0]
for curve in new_curves:
for spline in curve.data.splines:
for point in spline.bezier_points:
point.handle_left_type = 'FREE'
point.handle_right_type = 'FREE'
#This needs to be in a separate loop because moving points before
#they're all 'Free' type makes the shape warp.
#It makes a cool "disappear in the wind" visual, though.
for spline in curve.data.splines:
for point in spline.bezier_points:
for i in range(len(point.co)):
point.co[i] *= scale_up
point.handle_left[i] *= scale_up
point.handle_right[i] *= scale_up
bpy.ops.object.select_all(action = 'DESELECT')
curve.select = True
bpy.ops.object.origin_set(type = "ORIGIN_GEOMETRY")
#This part is just meant for tex_objects
if self.vert_align_centers == True:
loc = curve.location
new_y = new_curves[0].location[1]
bpy.context.scene.cursor_location = (loc[0], new_y, loc[2])
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
curve.select = False
bpy.context.scene.objects.unlink(curve)
self.imported_svg_data[path]['curves'] = new_curves
#Make imported curve objects into bobjects
for path in self.imported_svg_data:
for i, curve in enumerate(self.imported_svg_data[path]['curves']):
curve_bobj = bobject.Bobject(objects = [curve])
#Make the bobject's ref_obj handle location
curve_bobj.ref_obj.location = curve.location
curve.location = [0, 0, 0]
#curve_bobj.add_to_blender(appear_frame = 0)
self.imported_svg_data[path]['curves'][i] = curve_bobj
#if path == None:
# print()
# print(self.imported_svg_data[path])
# print()
#if path == None:
#self.add_subbobject(curve_bobj)
#print(self.imported_svg_data)
bpy.context.scene.update()
def get_file_paths(self, filenames):
self.paths = []
for name in filenames:
path = os.path.join(
SVG_DIR,
name
) + ".svg"
if not os.path.exists(path):
raise Warning("Could not find " + name + ".svg")
self.paths.append(path)
def make_morph_chains(self):
#Need to copy curves to avoid reusing them when looping and linking into
#chains below
self.lists_of_copies = []
for path in self.paths:
copies = []
for curve in self.imported_svg_data[path]['curves']:
obj = curve.ref_obj.children[0].copy()
obj.data = curve.ref_obj.children[0].data.copy()
bobj = bobject.Bobject(
objects = [obj],
location = curve.ref_obj.location,
rotation_euler = curve.ref_obj.rotation_euler,
name = 'curve_copy')
copies.append(bobj)
self.lists_of_copies.append(copies)
self.morph_chains = []
for i, path in enumerate(self.paths):
#print("Adding curves to morph chains for shape " + str(i + 1) + " of " + str(len(self.paths)))
#print()
#print('######################################################')
#print('### Morph chains round ' + str(i) + ' ####################')
#print('######################################################')
#print()
try:
#initial = self.imported_svg_data[self.paths[i]]['curves']
#final = self.imported_svg_data[self.paths[i + 1]]['curves']
initial = self.lists_of_copies[i]
final = self.lists_of_copies[i + 1]
except:
if i + 1 == len(self.paths):
#If there's just one path, add the corresponding curves to
#self.morph_chains so the first figure can still be
#morphed to.
if i == 0:
for j in range(len(initial)):
self.morph_chains.append([initial[j]])
#print("That's the last one!")
break
else:
raise Warning('Something went wrong in make_morph_chains')
if self.lazy_morph == True:
destinations = self.find_lazy_morph_plan(initial, final)
#For convenience, get inverse of destinations, caleed 'sources',
#which is from the perspective of the 'final' expression.
sources = []
for j in range(len(final)):
if j in destinations:
sources.append(destinations.index(j))
else:
sources.append(None)
else:
#length = max(len(initial), len(final))
while len(initial) < len(final):
null_curve = new_null_curve(
parent = initial[-1].ref_obj.parent,
location = initial[-1].ref_obj.location,
rotation = initial[-1].ref_obj.rotation_euler
)
initial.append(null_curve)
while len(final) < len(initial):
null_curve = new_null_curve(
parent = final[-1].ref_obj.parent,
location = final[-1].ref_obj.location,
rotation = final[-1].ref_obj.rotation_euler
)
final.append(null_curve)
destinations = range(len(initial))
sources = destinations
#print('Destinations and sources before pairing:')
#print(' Destinations', destinations)
#print(' Sources', sources)
#print()
#print(" Adding curves to chains")
for j, (cur, dest) in enumerate(zip(initial, destinations)):
if dest != None:
self.add_to_or_make_morph_chain(i, cur, final[dest])
else:
k = j
#curves without a destination will look forward to try to
#pair with a curve that has no source, but won't jump past
#other curves with destinations.
while k < len(sources):
#Don't jump past a char with a destination
if k < len(destinations): #Doing this so the next line works
if destinations[k] != None: break
if sources[k] == None:
self.add_to_or_make_morph_chain(i, cur, final[k])
sources[k] = j
dest = destinations[j] = k
break
k += 1
#print('Destinations and sources after dest -> source match:')
#print(' Destinations', destinations)
#print(' Sources', sources)
#print()
for j, (cur, src) in enumerate(zip(final, sources)):
if src == None:
k = j
#curves without a source will look forward to try to
#pair with a curve that has no source, but won't jump past
#other curves with sources.
#max_index = min(len(destinations), len(sources))
while k < len(destinations):
#Don't jump past a char with a destination
if k < len(sources): #Doing this so the next line works
if sources[k] != None: break
if destinations[k] == None:
self.add_to_or_make_morph_chain(i, initial[k], cur)
sources[j] = k
dest = destinations[k] = j
break
k += 1
#bpy.context.scene.update()
#print('Destinations and sources after source -> dest match:')
#print(' Destinations', destinations)
#print(' Sources', sources)
#print()
#print(" Adding null curves for destination-less curves")
#If dest is still None after trying to pair it with a source,
#just insert a zero-size curve for cur to morph to.
#This section is pretty hacky
for j, dest in enumerate(destinations):
if dest == None:
cur = initial[j]
if j > 0:
k = j
while k >= len(final):
k -= 1
loc_cur = final[k]
else:
loc_cur = final[j]
#print("Discontinuing chain ")
null_curve = new_null_curve(
parent = final[0].ref_obj.parent,
location = loc_cur.ref_obj.location,
rotation = loc_cur.ref_obj.rotation_euler
#reuse_object = self.reusable_empty_curve
)
self.add_to_or_make_morph_chain(i, cur, null_curve)
#print(" Adding null curves for sourceless curves")
#If sources[j] is still None after trying to pair final[j] with
#a source, just insert a zero-size curve for final[j] to morph from.
for j, src in enumerate(sources):
if src == None:
cur = final[j]
if j > 0:
k = j
while k >= len(initial):
k -= 1
loc_cur = initial[k]
else:
loc_cur = initial[j]
#Make the null curve if i == 1, because that means the curve
#to morph from is one that is actually rendered. Otherwise,
#reuse the reusable empty curve.
'''if i == 1:
reuse = None
else:
pass'''
#reuse = self.reusable_empty_curve
#bpy.context.scene.update()
null_curve = new_null_curve(
parent = initial[0].ref_obj.parent,
location = loc_cur.ref_obj.location,
rotation = loc_cur.ref_obj.rotation_euler
#reuse_object = reuse
)
#self.expressions[0]['curves'].append(null_curve)
#print(i)
self.add_to_or_make_morph_chain(i, null_curve, cur)
'''print(destinations)
print(sources)
print()'''
#print(" Okay, done with that chain")
#print(" Adding null curves to extend chains")
#Make sure all the chains are the same length. Relevant, e.g., if
#a char in the first expression disappears in the second expression,
#and a third expression exists. We need to extend the chain of
#zero-size curves to keep later functions from tripping.
chain_length = 0
for chain in self.morph_chains:
chain_length = max(len(chain), chain_length)
for chain in self.morph_chains:
while len(chain) < chain_length:
null_curve = new_null_curve(
parent = final[0].ref_obj.parent,
location = chain[-1].ref_obj.location,
rotation = chain[-1].ref_obj.rotation_euler
#reuse_object = self.reusable_empty_curve
)
chain.append(null_curve)
#self.add_to_or_make_morph_chain(i, chain[-1], null_curve)
#Print chain info
'''for i, chain in enumerate(self.morph_chains):
print(
"Chain " + str(i + 1) + " of " + str(len(self.morph_chains)) + \
" which are each of length " + str(len(chain))
)
chain = [x.ref_obj.children[0].name for x in chain]
print(chain)'''
def add_to_or_make_morph_chain(self, index, char1, char2):
for chain in self.morph_chains:
if char1 == chain[-1]:
chain.append(char2)
'''if 'null' in char2.name:
chain_index = self.morph_chains.index(chain)
print('Discontinuing chain ' + str(chain_index) + \
' after inserting null curve at index ' + str(index + 1))
chain_names = [x.ref_obj.children[0].name for x in chain]
print(chain_names)'''
if len(chain) != index + 2:
raise Warning("Chain lengths messed up")
return
#If that doesn't exit the function, we have a new chain
working_chain = []
scavenged = False
#Scavenge for dropped chains that have ended near the right location
for chain in self.morph_chains:
if len(chain) <= index:
working_chain = chain
scavenged = True
chain_index = self.morph_chains.index(chain)
#print("Scavenged chain")
break
for i in range(len(working_chain), index):
#-1 because we're actually adding two curves to the chain, so the
#chain will have length equal to index + 1 at the end of this.
null_curve = new_null_curve(
parent = self.ref_obj,
location = char1.ref_obj.location,
rotation = char1.ref_obj.rotation_euler
#reuse_object = self.reusable_empty_curve
)
working_chain.append(null_curve)
working_chain.append(char1)
working_chain.append(char2)
if scavenged == False:
#print("A new chain, which means a curve had no source ")
self.morph_chains.append(working_chain)
if len(working_chain) != index + 2:
raise Warning("Chain lengths messed up")
def find_lazy_morph_plan(self, expr1, expr2, min_length = None):
#max length of substring we bother keeping
#Increments if shared is still too long
if min_length == None:
min_length = self.min_length #Default = 1
max_shared = 10 #8! is 40320
shared = get_shared_substrings(expr1, expr2)
for i in range(len(shared)):
if shared[-i][2] < min_length:
shared[-i] = None
shared = [sub for sub in shared if sub != None]
while len(shared) > max_shared:
min_length += 1
removed = 0
for i in range(len(shared)):
if len(shared) - removed <= max_shared:
break
if shared[-i][2] <= min_length:
shared[-i] = None
removed += 1
shared = [sub for sub in shared if sub != None]
#raise Warning("Shit's cray")
combos = get_substring_combos(shared)
best_option = [[0, 0, 0]]
highest_total = 0
for combo in combos:
total = 0
for substring in combo:
total += substring[2] ** 2
if total > highest_total:
highest_total = total
best_option = combo
destinations = []
for j in range(len(expr1)):
destination = None
for plan in best_option:
if j in range(plan[0], plan[0] + plan[2]):
destination = j + plan[1] - plan[0]
destinations.append(destination)
#print(best_option)
#print("Here's the plan:")
#print(destinations)
return destinations
def process_morph_chains(self):
self.max_spline_count = 0
self.max_point_count = CONTROL_POINTS_PER_SPLINE
for chain in self.morph_chains:
for link in chain:
spline_count = len(link.ref_obj.children[0].data.splines)
self.max_spline_count = max(spline_count, self.max_spline_count)
for spline in link.ref_obj.children[0].data.splines:
point_count = len(spline.bezier_points)
self.max_point_count = max(point_count, self.max_point_count)
#print(self.max_spline_count)
self.prep_log = []
count = 0
for chain in self.morph_chains:
count += 1
#print('Processing morph chain ' + str(count) + ' of ' + str(len(self.morph_chains)))
for link in chain:
already_processed = False
#cur = link.ref_obj.children[0]
'''for entry in self.prep_log:
if are_chars_same(link.ref_obj.children[0], entry[0]):
already_processed = True
#Out with the old
old = link.ref_obj.children[0]
try:
link.objects.remove(old)
except:
pass
old.parent = None
#In with the new
new = entry[1].copy()
new.data = entry[1].data.copy()
new.parent = link.ref_obj
link.objects.append(new)
break'''
if already_processed == False:
#print("Processing new curve")
#entry = []
#unprocessed = link.ref_obj.children[0].copy()
#unprocessed.data = link.ref_obj.children[0].data.copy()
#bpy.context.scene.objects.link(unprocessed)
#entry.append(unprocessed)
equalize_spline_count(link.ref_obj.children[0], self.max_spline_count)
bpy.context.scene.objects.link(link.ref_obj.children[0])
#for spline in link.ref_obj.children[0].data.splines:
# print('There were ' + str(len(spline.bezier_points)))
add_points_to_curve_splines(link.ref_obj.children[0], total_points = self.max_point_count)
bpy.context.scene.objects.unlink(link.ref_obj.children[0])
#for spline in link.ref_obj.children[0].data.splines:
# print('Now there are ' + str(len(spline.bezier_points)))
#processed = link.ref_obj.children[0].copy()
#processed.data = link.ref_obj.children[0].data.copy()
#entry.append(processed)
#self.prep_log.append(entry)
def morph_figure(
self,
final_index,
start_time = None,
start_frame = None,
duration = DEFAULT_MORPH_TIME,
transition_type = None
):
if start_time != None:
if start_frame != None:
raise Warning("You defined both start frame and start time." +\
"Just do one, ya dick.")
start_frame = int(start_time * FRAME_RATE)
#print('Morphing ' + str(self.ref_obj.name) + ' to shape ' + str(final_index + 1) + \
# ' of ' + str(len(self.paths)))
self.active_path = self.paths[final_index]
#duration = 60
end_frame = start_frame + duration
morph_pairs = []
#print('Start frame = ' + str(start_frame))
#print('End frame = ' + str(end_frame))
if transition_type == None:
transition_type = self.transition_type
if transition_type == 'morph':
for curve, chain in zip(self.rendered_curve_bobjects, self.morph_chains):
morph_pairs.append([curve, chain[final_index]])
for char1, char2 in morph_pairs:
char1 = char1.objects[0]
char2 = char2.objects[0]
self.morph_curve(char1, char2)
#Keyframes
#Character location relative to parent
#This ensures preservation of overall expression arrangement
char1.parent.keyframe_insert(data_path = "location", frame = start_frame)
char1.parent.location = char2.parent.location
char1.parent.keyframe_insert(data_path = "location", frame = end_frame)
char1.parent.keyframe_insert(data_path = "rotation_euler", frame = start_frame)
char1.parent.rotation_euler = char2.parent.rotation_euler
char1.parent.keyframe_insert(data_path = "rotation_euler", frame = end_frame)
#Shape keys
eval_time = char1.data.shape_keys.key_blocks[-2].frame
char1.data.shape_keys.eval_time = eval_time
char1.data.shape_keys.keyframe_insert(
data_path = 'eval_time',
frame = start_frame
)
eval_time = char1.data.shape_keys.key_blocks[-1].frame
char1.data.shape_keys.eval_time = eval_time
char1.data.shape_keys.keyframe_insert(
data_path = 'eval_time',
frame = end_frame
)
char1.data.shape_keys.eval_time = 0
else:
initial = self.rendered_bobject_lists[final_index - 1]
for bobj in initial:
bobj.disappear(
animate = False,
disappear_frame = start_frame
)
final = self.rendered_bobject_lists[final_index]
for bobj in final:
bobj.add_to_blender(
animate = False,
appear_frame = start_frame
)
def add_morph_shape_keys(self, initial, final):
if len(initial.data.splines) != len(final.data.splines):
#winsound.MessageBeep(type = MB_ICONEXCLAMATION)
print("#" + str(initial.name) + " has " + str(len(initial.data.splines)) + \
" splines and " + str(final.name) + " has " + \
str(len(final.data.splines)) + " splines, which is not the same number.")
print("#This means something went wrong when processing morph chains.")
print('#I gotchu this time, but you might wanna take a look back and fix the underlying issue.')
if len(initial.data.splines) < len(final.data.splines):
raise Warning("Oh dang. I actually don't gotchu. The rendered " + \
"curve is missing splines, I think.")
bpy.context.scene.objects.link(final)
equalize_spline_count(final, self.max_spline_count)
add_points_to_curve_splines(final)
bpy.context.scene.objects.unlink(final)
was_hidden = False
if initial.hide:
was_hidden = True
initial.hide = False
bpy.context.scene.objects.active = initial
bpy.ops.object.mode_set(mode = 'OBJECT')
#If absolute shape keys exist, set eval_time to zero
try:
initial.data.shape_keys.eval_time = 0
except:
pass
bpy.ops.object.shape_key_add(from_mix=False)
initial.data.shape_keys.use_relative = False
#For some reason, the default 'CARDINAL' interpolation setting caused
#bouncing, which would occasionally enlarge splines that should have
#been size zero, messing with the fill.
initial.data.shape_keys.key_blocks[-1].interpolation = 'KEY_LINEAR'
#bpy.ops.object.shape_key_retime()
#If there's only one shape key, it's the basis shape key.
if len(initial.data.shape_keys.key_blocks) == 1:
#We should add another shape key, which will get a keyframe
bpy.ops.object.shape_key_add(from_mix=False)
initial.data.shape_keys.key_blocks[-1].interpolation = 'KEY_LINEAR'
#initial.data.shape_keys.use_relative = False
#bpy.ops.object.shape_key_retime()
bpy.ops.object.mode_set(mode = 'EDIT')
#This might be a bit confusing, caused by the fact that I mixed up
#length rank and index in my original names and implementation.
#Could probably reimplement or at least change names.
initial_spline_length_ranks = get_list_of_spline_length_ranks(initial)
final_spline_length_ranks = get_list_of_spline_length_ranks(final)
for i in range(len(initial.data.splines)):
#Get the points of the ith spline
initial_points = initial.data.splines[i].bezier_points
#Okay, before we get the final points, we need to find the index of
#the spline with the same length rank as the ith initial spline.
#In the initial char, what is the length rank of the ith spline?
initial_length_rank = initial_spline_length_ranks[i]
#In the final char, what is the index of the corresponding length rank?
final_index = final_spline_length_ranks.index(initial_length_rank)
#Get the points of the final spline with the right length index
final_points = final.data.splines[final_index].bezier_points
#Double check that the splines have the same number of points
if len(initial_points) != len(final_points):
print('#' + str(initial.name) + " has " + str(len(initial_points)) + \
" points in spline " + str(i+1) + " and " + \
str(final.name) + " has " + str(len(final_points)) + \
" points in spline " + str(i+1) + \
" which is not the same number.")
print("#This means something went wrong when processing morph chains.")
print('#I gotchu this time, but you might wanna take a look back and fix the underlying issue.')
num_points = max(len(final_points), len(initial_points))
if len(initial_points) < len(final_points):
#bpy.context.scene.objects.link(initial)
add_points_to_curve_splines(initial, total_points = num_points)
#bpy.context.scene.objects.unlink(initial)
#raise Warning("Oh dang. I actually don't gotchu. The rendered " + \
# "curve is missing points, I think.")
else:
bpy.context.scene.objects.link(final)
add_points_to_curve_splines(final, total_points = num_points)
bpy.context.scene.objects.unlink(final)
#Assign final_points values to initial_points
for j in range(len(initial_points)):
initial_points[j].co = final_points[j].co
initial_points[j].handle_left = final_points[j].handle_left
initial_points[j].handle_right = final_points[j].handle_right
bpy.ops.object.mode_set(mode = 'OBJECT')
if was_hidden:
initial.hide = True
def morph_curve(self, initial, final):
#equalize_spline_count(initial, final)
char_set = [initial, final]
for char in char_set:
if self.reindex_points_before_morph == True:
for spline in char.data.splines:
reindex_to_top_point(spline)
#add_points_to_curve_splines(char, CONTROL_POINTS_PER_SPLINE)
self.add_morph_shape_keys(initial, final)
def calc_lengths(self):
for expr in self.imported_svg_data:
curves = self.get_figure_curves(expr)
right_most_x = -math.inf
for char in curves:
#char is a bobject, so reassign to the contained curve
char = char.objects[0]
for spline in char.data.splines:
for point in spline.bezier_points:
candidate = char.matrix_local.translation[0] + \
char.parent.matrix_local.translation[0] + \
point.co[0] * char.scale[0]
if right_most_x < candidate:
right_most_x = candidate
left_most_x = math.inf
for char in curves:
char = char.objects[0]
for spline in char.data.splines:
for point in spline.bezier_points:
candidate = char.matrix_local.translation[0] + \
char.parent.matrix_local.translation[0] + \
point.co[0] * char.scale[0]
if left_most_x > candidate:
left_most_x = candidate
length = right_most_x - left_most_x
center = left_most_x + length / 2
self.imported_svg_data[expr]['length'] = length * self.scale[0]
#Tbh, I don't remember why only the length is scaled
self.imported_svg_data[expr]['centerx'] = center
self.imported_svg_data[expr]['beginning'] = left_most_x #Untested
self.imported_svg_data[expr]['end'] = right_most_x
#Vertical stuff
top_most_y = -math.inf
for char in curves:
#char is a bobject, so reassign to the contained curve
char = char.objects[0]
for spline in char.data.splines:
for point in spline.bezier_points:
candidate = char.matrix_local.translation[1] + \
char.parent.matrix_local.translation[1] + \
point.co[1] * char.scale[1]
if top_most_y < candidate:
top_most_y = candidate
bottom_most_y = math.inf
for char in curves:
char = char.objects[0]
for spline in char.data.splines:
for point in spline.bezier_points:
candidate = char.matrix_local.translation[1] + \
char.parent.matrix_local.translation[1] + \
point.co[1] * char.scale[1]
if bottom_most_y > candidate:
bottom_most_y = candidate
height = top_most_y - bottom_most_y
center = bottom_most_y + height / 2
self.imported_svg_data[expr]['top'] = top_most_y
self.imported_svg_data[expr]['bottom'] = bottom_most_y
self.imported_svg_data[expr]['height'] = height * self.scale[1]
self.imported_svg_data[expr]['centery'] = (top_most_y + bottom_most_y) / 2
def get_figure_curves(self, fig):
#Really just here to be overridden by tex_bobject
return self.imported_svg_data[fig]['curves']
def align_figures(self):
self.calc_lengths()
for fig in self.imported_svg_data:
self.align_figure(fig)
def align_figure(self, fig):
data = self.imported_svg_data
curve_list = data[fig]['curves']
offset = list(curve_list[0].ref_obj.location)
if self.centered == True:
cen = data[fig]['centerx']
offset[0] = cen
elif self.centered == 'right':
offset[0] = data[fig]['end']
elif self.centered == 'top_centered':
offset[0] = data[fig]['centerx']
offset[1] = data[fig]['top']
elif self.centered == 'x_and_y':
offset[0] = data[fig]['centerx']
offset[1] = data[fig]['centery']
else:
offset[0] = data[fig]['beginning']
for i in range(len(curve_list)):
#For some reason, just subtracting the vector-valued locations
#doesn't work here. I'm baffled. Anyway, it works to convert to
#lists and subtract by element.
loc = list(curve_list[i].ref_obj.location)
new_loc = add_lists_by_element(loc, offset, subtract = True)
curve_list[i].ref_obj.location = new_loc
curve_list[i].ref_obj.parent = self.ref_obj
return curve_list #Used in subclass
class SVGFromBlend(SVGBobject):
def __init__(self, *filenames, **kwargs):
super().__init__(*filenames, **kwargs)
def get_file_paths(self, filenames):
#Should just be one file for SVGFromBlend, prepping to import.
#Might be multiple strings in the format helpers.import_object takes
self.paths = list(filenames)
def import_svg_data(self):
#import from the .blend file and add curves to self.imported_svg_data,
#mimicking the data structure of regular svg bobjects
paths = self.paths
self.imported_svg_data = {}
#For this type of object, the path list items are lists, which can
#have multiple strings to feed to import_objects()
for i, path in enumerate(paths):
if path == None:
name = path
#null = new_null_curve()
#cur = null.ref_obj.children[0]
#equalize_spline_count(cur, 1)
#self.imported_svg_data[path]['curves'].append(cur)
#print(self.imported_svg_data[path]['curves'])
#print('length: ' + str(len(cur.data.splines)))
#print('length: ' + str(len(cur.data.splines[0].bezier_points)))
#continue
else:
name = str(path)
'''if isinstance(name, list):
name = name[0]
name = str(name)
print(name)'''
self.imported_svg_data[name] = {'curves' : []}
new_curve_bobj = self.import_and_modify_curve(i, path)
#self.modify_curves(new_curve_bobj.ref_obj.children[0].children[0])
#self.modify_curves()
new_curves = []
#These will all have container objects because they were likely
#made as regular svgbobjects the first time, so just take the actual
#curves.
for obj in new_curve_bobj.ref_obj.children:
new_curves.append(obj.children[0])
#print(new_curves[-1].type)
#self.imported_svg_data[name]['curves'] = new_curves
#After calling import_objects(), it's best for paths to not be lists
#for i in range(len(self.paths)):
# self.paths[i] = str(self.paths[i])
for j, curve in enumerate(new_curves):
curve_bobj = bobject.Bobject(objects = [curve])
#Make the bobject's ref_obj handle location
curve_bobj.ref_obj.location = curve.location
curve.location = [0, 0, 0]
curve_bobj.ref_obj.rotation_euler = curve.rotation_euler
curve.rotation_euler = [0, 0, 0]
self.imported_svg_data[name]['curves'].append(curve_bobj)
#print(self.imported_svg_data)
#print(self.paths)
def import_and_modify_curve(self, index, path):
#This is unfinished. Something is wrong with the way it makes the
#rendered curve objects out of the imported ones.
#Extended by subclass
#index is needed for subclass implementation to know which curve it's
#modifying.
imported = import_object(path, 'svgblend')
new_curve_bobj = bobject.Bobject(
objects = imported.ref_obj.children[0].children,
name = 'imported_svg_object'
)
#new_curve_bobj.add_to_blender(appear_frame = 0)
return new_curve_bobj
def reindex_to_top_point(spline):
#Make it so the highest control point is at index 0
#This eliminates net rotation of points around the curve as they transition
#from the starting char to the target char
#Rotation would be fine, but they actually just go in a straight line,
#causing the curve to sometimes fold on itself
points = spline.bezier_points
#Find index of highest point in curve
index_highest = 0
for i in range(len(points)):
if points[i].co[1] > points[index_highest].co[1]: #Compare y values
index_highest = i
#copy point data to lists
positions = []
left_handles = []
right_handles = []
for point in points:
positions.append(deepcopy(point.co))
left_handles.append(deepcopy(point.handle_left))
right_handles.append(deepcopy(point.handle_right))
#re-index copied lists
for i in range(index_highest):
positions.append(positions.pop(0))
left_handles.append(left_handles.pop(0))
right_handles.append(right_handles.pop(0))
#Would just do this:
# points.append(points.pop(0))
#but points has type bpy_prop_collection, which is doesn't have
#list methods and is immutable
#assign values to blender bezier points
for i in range(len(points)):
points[i].co = positions[i]
points[i].handle_left = left_handles[i]
points[i].handle_right = right_handles[i]
def add_points_to_curve_splines(
curve,
total_points = CONTROL_POINTS_PER_SPLINE,
closed_loop = True
):
#if len(curve.data.splines[0].bezier_points) < total_points:
was_hidden = False
if curve.hide:
was_hidden = True
curve.hide = False
bpy.context.scene.objects.active = curve
bpy.ops.object.mode_set(mode = 'EDIT')
#Use subdivides to make control points that don't affect shape
for spline in curve.data.splines:
points = spline.bezier_points
while len(spline.bezier_points) < total_points:
#find longest segment to subdivide, ignores curvature
longest = 0
start_index = 0
end_index = 1
for j in range(len(points)):
if closed_loop == True:
k = (j + 1) % len(points)
sep = points[k].co - points[j].co
length = sep.length
else:
if j == len(points) - 1:
k = j
else:
k = j + 1
length = points[k].co[0] - points[j].co[0]
#This is a hacky way of making it work for graph curves
#bpy making it as uniform as possible along x.
#Doesn't make sense in general.
if length > longest:
start_index = j
end_index = k
longest = length
#subdivide longest segments
points[start_index].select_control_point = True
points[end_index].select_control_point = True
#execute_and_time("Get ready to subdivide")
#execute_and_time(
# "Subdivide",
bpy.ops.curve.subdivide()
#)
for point in points:
point.select_control_point = False
bpy.ops.object.mode_set(mode = 'OBJECT')
if was_hidden:
curve.hide = True
def equalize_spline_count(curve1, target):
splines1 = curve1.data.splines
if isinstance(target, int):
spline_count = target
else:
splines2 = target.data.splines
spline_count = max(len(splines1), len(splines2))
while len(splines1) < spline_count:
new_spline = splines1.new('BEZIER')
new_spline.bezier_points.add(count = 2)
new_spline.use_cyclic_u = True
if not isinstance(target, int):
while len(splines2) < spline_count:
new_spline = splines2.new('BEZIER')
new_spline.bezier_points.add(count = 2)
new_spline.use_cyclic_u = True
def get_list_of_spline_length_ranks(curve):
splines = curve.data.splines
curve_splines_ranked_by_length = []
#get a list of splines and sort them by length
#we have to do this because 'splines' is a bpy_prop_collection, not a list
#meaning it doesn't have list methods.
for spline in splines:
curve_splines_ranked_by_length.append(spline)
curve_splines_ranked_by_length.sort(key = lambda x: get_spline_length(x), \
reverse=True)
list_of_length_ranks = []
for spline in splines:
rank = curve_splines_ranked_by_length.index(spline)
list_of_length_ranks.append(rank)
return list_of_length_ranks
def get_spline_length(spline):
points = spline.bezier_points
length = 0
for j in range(len(points)):
k = (j + 1) % len(points)
sep = points[k].co - points[j].co
length += sep.length
return length
def new_null_curve(
parent = None,
location = (0, 0, 0),
rotation = (0, 0, 0),
#color = 'color5',
reuse_object = None
):
#print(" Adding null curve")
#if reuse_object == None:
data = bpy.data.curves.new(name = 'no_curve_data', type = 'CURVE')
obj = bpy.data.objects.new(name = 'no_curve', object_data = data)
#else:
# print('Reusing object!!!!!!')
# obj = reuse_object
bobj = bobject.Bobject(objects = [obj], name = 'null')
#obj.parent = bobj.ref_obj
#bobj.objects.append(obj)
bobj.ref_obj.parent = parent
#print(matrix_local)
bobj.ref_obj.location = location
bobj.ref_obj.rotation_euler = rotation
#print(bobj.ref_obj.matrix_local)
#bpy.context.scene.objects.link(new_null)
#if reuse_object == None:
# bobj.add_to_blender(animate = False)
#apply_material(obj, color)
#bpy.data.scenes[0].update()
#print(' Done adding null curve')
return bobj
def get_shared_substrings(expr1, expr2):
#not actually strings, but a series of curves that represent letters, mostly
curves1 = expr1
curves2 = expr2
shared = []
for i in range(len(curves1)):
j = 0
for j in range(len(curves2)):
length = 0
length = get_match_length(length, i, j, curves1, curves2)
if length > 0:
candidate = [i, j, length]
#Check whether candidate is redundant with a substring we
#already found. E.g., without this, comparing '01' with '012'
#would find the '01' and '1' substrings. We just want the longer
#one.
redundant = False
'''
#Actually, think we want redundancy, at least until speed becomes
#an issue. Without redundancy, morphing '123' to '1223' would
#result in the shared '3' being discarded, since it's redundant
#with the shared '23'. This is usually good, but because the
#original '2' is in two shared substrings, one (the '23') is
#discarded. In this case, the '3' won't be captured, even though
#it's not redundant with any substring that actually gets used.
#The truly redundant substrings will get tossed later when
#choosing the highest-scoring set to actually morph.
#Only smaller redundant substrings toward the right of larger
#substrings will be preserved. That's okay, because when
#substrings overlap, the left-most overlapping string is used.
#Since the left-most strings are never tossed, no redundancy is
#needed for backup.
'''
'''
Aaaaaaaactually, fuck redundancy. Things got slow.
'''
for substring in shared:
start1_diff = candidate[0] - substring[0]
start2_diff = candidate[1] - substring[1]
length_diff = candidate[2] - substring[2]
if start1_diff == start2_diff == -length_diff:
redundant = True
if redundant == False:
shared.append(candidate)
return(shared)
def are_chars_same(char1, char2):
splines1 = char1.data.splines
splines2 = char2.data.splines
if len(splines1) != len(splines2):
return False
for spline1, spline2 in zip(splines1, splines2):
points1 = spline1.bezier_points
points2 = spline2.bezier_points
for point1, point2 in zip(points1, points2):
for coord1, coord2 in zip(point1.co, point2.co):
if round(coord1, 3) == round(coord2, 3):
#When the svg is imported, coords are stored to many decimal
#points. Even in characters we'd call equivalent, there is
#some fluctuation in the less significant digits, so
#rounding here yields the desired behavior.
pass
else:
return False
return True
def get_match_length(length, char1_index, char2_index, curves1, curves2):
if are_chars_same(curves1[char1_index].objects[0], curves2[char2_index].objects[0]):
length += 1
char1_index += 1
char2_index += 1
try:
length = get_match_length(length, char1_index, char2_index, curves1, curves2)
return length
except:
return length
else:
if length > 0:
pass
return length
def get_substring_combos(substrings):
combos = []
combo_in_progress = []
combos = add_non_overlapping_substrings(combo_in_progress, combos, substrings)
return combos
def add_non_overlapping_substrings(combo_in_progress, combos, substrings):
if len(combo_in_progress) > 0:
#Start checking substrings with the one after the last one added.
starting_index = substrings.index(combo_in_progress[-1]) + 1
#starting_index = 0
else:
starting_index = 0
for i in range(starting_index, len(substrings)):
#check if substring works
candidate = substrings[i]
no_overlap = True
#check if substring overlaps with any substring alredy
#in combo_in_progress. If so, don't add it to combos.
for sub in combo_in_progress:
#E.g., sub = [0, 0, 1] and candidate = [3, 0, 1] overlap
no_overlap_in_1 = candidate[0] >= sub[0] + sub[2] or \
candidate[0] + candidate[2] <= sub[0]
no_overlap_in_2 = candidate[1] >= sub[1] + sub[2] or \
candidate[1] + candidate[2] <= sub[1]
no_overlap = (no_overlap_in_1 and no_overlap_in_2)
if no_overlap == False:
break
if no_overlap == True:
new_combo = deepcopy(combo_in_progress)
new_combo.append(candidate)
combos.append(new_combo)
combos = add_non_overlapping_substrings(new_combo, combos, substrings)
return combos
def main():
print_time_report()
if __name__ == "__main__":
main()
|
[
"justin.r.helps@gmail.com"
] |
justin.r.helps@gmail.com
|
94142f103a81d05da488cf0826ef22b6aefe9da9
|
43800bc4600961119339c7e1dd6fd9e685d8f991
|
/reto.py
|
e2792bdadc539bf5854a405a72039a1b7f6dc173
|
[] |
no_license
|
KevinMichelle/Repositorio_RSA
|
91219ae5f35c04516dddbf34f86ca4a1f1199ebf
|
c347cee2a50e577039611312773dd79744ed4e24
|
refs/heads/master
| 2016-09-05T15:19:41.881708
| 2014-10-16T10:15:56
| 2014-10-16T10:15:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
import sys
import random
def sonNumeros(arreglo):
areNumber = True
for i in xrange(1, len(arreglo)):
if not arreglo[i].isdigit(): # Si alguno de los argumentos no son numeros
areNumber = False
break
return areNumber
def f_x(x):
return (x * 2)
def encrip(mensaje, clave, n):
return pow(mensaje, clave, n)
if len(sys.argv) != 5:
print 'El numero de argumentos es invalido'
else:
areNumber = sonNumeros(sys.argv)
if areNumber:
x_servidor = int(sys.argv[1])
fx_servidor = int(sys.argv[2])
e_servidor = int(sys.argv[3])
n_servidor = int(sys.argv[4])
fx_cliente = f_x(x_servidor)
recuperar_mensaje = pow(fx_servidor, e_servidor, n_servidor)
print
print "La f(x) que se espera del servidor es {}.".format(fx_cliente)
print "El mensaje recuperado es {}.".format(recuperar_mensaje)
print
if fx_cliente == recuperar_mensaje:
print 'El servidor es quien dice que es'
else:
print 'El servidor no es quien dice que es, puede ser un sitio peligroso.'
else:
print 'Todos los argumentos deben ser numeros enteros.'
|
[
"michelleglz93@gmail.com"
] |
michelleglz93@gmail.com
|
373796447841d57b9fab8651bb3abfafb95db738
|
9b129a2b02072914d34e3c170e4ccc7655d4cdcf
|
/app.py
|
4171781ce3ff2814f010747a825c4da66f913074
|
[] |
no_license
|
Vsevololod/docker-web
|
7b69cafd1af88c8b34f4d94593ce3b9eea7b54e2
|
cdd895590a5218b37a65bbe850057060ce4b6042
|
refs/heads/master
| 2020-03-12T20:44:17.407642
| 2018-04-24T07:28:22
| 2018-04-24T07:28:22
| 130,812,629
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,814
|
py
|
from flask import *
from flask_bootstrap import Bootstrap
from flask_ldap import LDAP
from flask_ldap import login_required
import requests
import docker
app = Flask(__name__)
Bootstrap(app)
app.debug = True
app.config['LDAP_HOST'] = '195.19.252.68'
app.config['LDAP_DOMAIN'] = 'cc.spbu.ru'
app.config['LDAP_SEARCH_BASE'] = 'CN=Users,DC=cc,DC=spbu,DC=ru'
app.config['LDAP_REQUIRED_GROUP'] = 'CN=docker,CN=Users,DC=cc,DC=spbu,DC=ru'
ldap = LDAP(app)
app.secret_key = "welfhwdlhwdlfhwelfhwlehfwlehfelwehflwefwlehflwefhlwefhlewjfhwelfjhweflhweflhwel"
app.add_url_rule('/login', 'login', ldap.login, methods=['GET', 'POST'])
@app.route('/')
@login_required
def index():
if 'username' in session:
if 'volume' in session:
return redirect("/run_containers".format(escape(session['username'])))
else:
return redirect("/virtual_drive_setup")
return 'You are not logged in'
@app.route('/run_containers', methods=['GET'])
@login_required
def run_get():
if 'username' in session:
r = requests.get('https://docker-hub.cc.spbu.ru/v2/_catalog')
cont = r.json()['repositories']
return render_template("settings.html", name=session["username"], volume=session["volume"], containers=cont)
return 'error'
@app.route('/run_containers', methods=['POST'])
@login_required
def run_post():
if 'username' in session:
cli = docker.APIClient(base_url='unix://var/run/docker.sock')
container_name = request.form.get("container")
full_container_name = "docker-hub.cc.spbu.ru:443/" + container_name
pull_res = cli.pull(full_container_name)
command = request.form.get("command")
create_instance = cli.create_container(full_container_name, command,
volumes=["/test"],
name=session['username'] +"_"+ container_name,
host_config=cli.create_host_config(binds={
session["volume"]: {
'bind': '/test',
'mode': 'rw',
}}),
)
cli.start(create_instance["Id"])
return str(create_instance)
return 'error'
@app.route('/virtual_drive_setup', methods=['POST'])
@login_required
def virtual_drive_setup_post():
if 'username' in session:
url2 = request.form.get("url")
name2 = request.form.get("name")
pass2 = request.form.get("pass")
path2 = request.form.get("path")
r2 = requests.request('PROPFIND', url2, auth=(name2, pass2))
if 199 < r2.status_code < 300:
cli = docker.APIClient(base_url='unix://var/run/docker.sock')
volume = cli.create_volume(name=name2 + "_user_cont", driver='fentas/davfs',
driver_opts={
'url': "https://{}:{}@{}/{}".format(name2, pass2, url2[8:], path2),
'uid': '1000',
'gid': '1000',
},
labels={"key": "value"})
session["volume"] = volume["Name"]
return redirect('/run_containers')
return redirect("/virtual_drive_setup")
return 'error'
@app.route('/virtual_drive_setup', methods=['GET'])
@login_required
def virtual_drive_setup_get():
if 'username' in session:
return render_template("virtual_drive_setup.html", user=session["mail"])
return 'error'
if __name__ == '__main__':
app.run()
|
[
"vsevolodtachii@gmail.com"
] |
vsevolodtachii@gmail.com
|
2efa64d46e5d91e597a0c42442ffc6eb5ff9156b
|
c925a13ca720d085bb661be1466e0c56456af97e
|
/src/antpy/path.py
|
6a1843628d0dc6d42358145ef94d7c280f34768b
|
[
"MIT"
] |
permissive
|
IvanAntipov/AntPy
|
0e754727e6c090cce554ede6ef07f052ac1eb705
|
361a569141518bebd13a46917267567add8e138e
|
refs/heads/main
| 2023-08-06T04:29:33.149181
| 2021-09-14T10:11:36
| 2021-09-14T10:11:36
| 389,035,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
import datetime
import string
import random
def generate_random_filename():
ts = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
file_name = ''.join(random.choice(string.ascii_lowercase) for i in range(5))
return f"{ts}_{file_name}"
|
[
"imantipov@activebc.ru"
] |
imantipov@activebc.ru
|
252d0b54e5f8b124952be5653984cfe5aa8991e7
|
b2ecdc83c8d8b7038ff293daa442d3162fc3772b
|
/rkWebApp/views.py
|
00df447c7995029f3c1090fae25d3b33db7b7cae
|
[] |
no_license
|
simonrakovic/RakovicKarmenDjango
|
efef88316f45876cf658acd774ece58693099ecc
|
83f88b5a62fc1dcc5a99bddfba91b1af149b7251
|
refs/heads/master
| 2021-01-21T13:17:50.001362
| 2016-05-26T09:27:27
| 2016-05-26T09:27:27
| 45,276,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,123
|
py
|
# -*- coding: utf-8 -*-
from django.contrib import messages
from django.core.mail import EmailMessage
from django.shortcuts import render, render_to_response
from django.template.loader import render_to_string
# Create your views here.
from django.template import RequestContext
from rkWebApp.forms import QuestionForm
from rkWebApp.models import Novica, Files
def home(request):
novice = Novica.objects.all().order_by('-id')[:3]
return render_to_response('home.html', locals(), context_instance=RequestContext(request))
def podjetje(request):
return render_to_response('podjetje.html', locals(), context_instance=RequestContext(request))
def storitve(request):
return render_to_response('storitve.html', locals(), context_instance=RequestContext(request))
def povprasevanje(request):
form = QuestionForm()
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
ime_kontaktne_osebe = form.cleaned_data['ime_kontaktne_osebe']
email = form.cleaned_data['email']
naziv_podjetja = form.cleaned_data['naziv_podjetja']
sedez_podjetja = form.cleaned_data['sedez_podjetja']
vrsta_dejavnosti = form.cleaned_data['vrsta_dejavnosti']
davcni_zavezanec = form.cleaned_data['davcni_zavezanec']
st_zaposlenih = form.cleaned_data['st_zaposlenih']
st_prejetnih_racunov = form.cleaned_data['st_prejetnih_racunov']
st_izdanih_racunov = form.cleaned_data['st_izdanih_racunov']
dodatna_vprasanja = form.cleaned_data['dodatna_vprasanja']
msg = render_to_string('povprasevanje_mail.html', {'ime_kontaktne_osebe':ime_kontaktne_osebe,'email':email,'naziv_podjetja':naziv_podjetja,'sedez_podjetja':sedez_podjetja,'vrsta_dejavnosti':vrsta_dejavnosti,'davcni_zavezanec':davcni_zavezanec,'st_zaposlenih':st_zaposlenih,'st_prejetnih_racunov':st_prejetnih_racunov,'st_izdanih_racunov':st_izdanih_racunov,'dodatna_vprasanja':dodatna_vprasanja})
try:
msg = EmailMessage('Povprasevanje spletna', msg, email, ['karmen.rakovic@siol.net'])
msg.content_subtype = "html" # Main content is now text/html
msg.send()
messages.success(request, 'Povpraševanje je uspešno poslano.')
form = QuestionForm()
except:
messages.success(request, 'Prišlo je do napake!')
form = QuestionForm()
return render_to_response('povprasevanje.html', locals(), context_instance=RequestContext(request))
def kontakti(request):
return render_to_response('kontakti.html', locals(), context_instance=RequestContext(request))
def novice(request):
novice = Novica.objects.all()
return render_to_response('novice.html', locals(), context_instance=RequestContext(request))
def novica(request, id):
novica = Novica.objects.get(pk=id)
files = Files.objects.filter(novica=id)
return render_to_response('novica.html', locals(), context_instance=RequestContext(request))
|
[
"simonrakovic@gmail.com"
] |
simonrakovic@gmail.com
|
67e6a589520d73111095f3a72afd004de6c4c1f8
|
bfc885bbe60e80fb295ef711666518200889adc1
|
/peyecoder/panels.py
|
5a9e2d2de512910e1618a92a2618cb56f1152cd8
|
[
"MIT"
] |
permissive
|
rholson1/peyecoder
|
470151c1cd189117fb88ffbc40d945facf04a60a
|
8a30b2ae44d0a9be1f4c6b51be0d63a49dff6c6a
|
refs/heads/master
| 2022-11-29T11:55:01.585022
| 2022-08-05T15:36:15
| 2022-08-05T15:36:15
| 242,180,640
| 1
| 0
|
MIT
| 2022-11-22T10:43:44
| 2020-02-21T16:16:03
|
Python
|
UTF-8
|
Python
| false
| false
| 10,332
|
py
|
from PySide2.QtWidgets import QWidget, QLabel, QPushButton, QSpinBox, QComboBox, \
QRadioButton, QVBoxLayout, QHBoxLayout, QTableWidget, QTableWidgetItem, QCheckBox, \
QButtonGroup, QHeaderView, QApplication
from PySide2.QtGui import Qt
from PySide2 import QtGui
from peyecoder.models import Reason, Event
class Prescreen(QWidget):
REASONS = (
'Inattentive',
'Child Talking',
'Parent Talking',
'Parent Interference',
'Child Not Looking Before Sound',
'Equipment Malfunction',
'Experiment Ended Early',
'Other'
)
def __init__(self, callback):
super().__init__()
self.callback = callback
trial_label = QLabel('Trial:')
self.trial_box = QSpinBox()
self.trial_box.setFixedWidth(64)
self.trial_box.setValue(1)
# self.trial_box.setFocusPolicy(Qt.NoFocus)
reason_label = QLabel('Reason:')
self.reason_box = QComboBox()
self.reason_box.addItems(self.REASONS)
self.reason_box.setFocusPolicy(Qt.NoFocus)
self.code_radio = QRadioButton('Code')
self.nocode_radio = QRadioButton('Do Not Code')
self.code_radio.setChecked(True)
self.code_radio.setFocusPolicy(Qt.NoFocus)
self.nocode_radio.setFocusPolicy(Qt.NoFocus)
radio_layout = QVBoxLayout()
radio_layout.addStretch()
radio_layout.addWidget(self.code_radio)
radio_layout.addWidget(self.nocode_radio)
radio_layout.addStretch()
self.group_code = QButtonGroup()
self.group_code.addButton(self.code_radio)
self.group_code.addButton(self.nocode_radio)
self.record_button = QPushButton('Record Reason')
self.record_button.clicked.connect(self.record_reason)
self.record_button.setEnabled(False)
self.record_button.setFocusPolicy(Qt.NoFocus)
self.both_checkbox = QCheckBox('Display both coders')
self.both_checkbox.setFocusPolicy(Qt.NoFocus)
self.radio_primary = QRadioButton('Primary')
self.radio_secondary = QRadioButton('Secondary')
self.radio_primary.setChecked(True)
self.radio_primary.setFocusPolicy(Qt.NoFocus)
self.radio_secondary.setFocusPolicy(Qt.NoFocus)
who_layout = QVBoxLayout()
who_layout.addWidget(self.both_checkbox)
who_layout.addWidget(self.radio_primary)
who_layout.addWidget(self.radio_secondary)
self.group_who = QButtonGroup()
self.group_who.addButton(self.radio_primary, id=1)
self.group_who.addButton(self.radio_secondary, id=2)
layout = QHBoxLayout()
layout.addWidget(trial_label)
layout.addWidget(self.trial_box)
layout.addWidget(reason_label)
layout.addWidget(self.reason_box)
layout.addLayout(radio_layout)
layout.addWidget(self.record_button)
layout.addStretch()
layout.addLayout(who_layout)
self.setLayout(layout)
def record_reason(self):
reason = Reason(trial=self.trial_box.value(),
include=self.code_radio.isChecked(),
reason=self.reason_box.currentText())
self.callback(reason)
def prescreener(self):
if self.both_checkbox.isChecked():
return 0
else:
return self.group_who.checkedId()
class Code(QWidget):
TRIAL_STATUS = ('on', 'off')
def __init__(self, callback):
super().__init__()
self.callback = callback
trial_label = QLabel('Trial:')
self.trial_box = QSpinBox()
self.trial_box.setFixedWidth(64)
self.trial_box.setValue(1)
#self.trial_box.setFocusPolicy(Qt.NoFocus)
trial_status_label = QLabel('Trial Status:')
self.trial_status = QComboBox()
self.trial_status.addItems(self.TRIAL_STATUS)
self.trial_status.setFocusPolicy(Qt.NoFocus)
response_label = QLabel('Response:')
self.response_box = QComboBox()
self.response_box.setFocusPolicy(Qt.NoFocus)
self.record_button = QPushButton('Record Event')
self.record_button.clicked.connect(self.record_event)
self.record_button.setEnabled(False)
self.record_button.setFocusPolicy(Qt.NoFocus)
layout = QHBoxLayout()
layout.addWidget(trial_label)
layout.addWidget(self.trial_box)
layout.addWidget(trial_status_label)
layout.addWidget(self.trial_status)
layout.addWidget(response_label)
layout.addWidget(self.response_box)
layout.addStretch()
layout.addWidget(self.record_button)
self.setLayout(layout)
def set_responses(self, responses):
self.response_box.clear()
self.response_box.addItems(responses)
def record_event(self):
event = Event(trial=self.trial_box.value(),
status=self.trial_status.currentText() == 'on',
response=self.response_box.currentText())
if self.trial_status.currentText() == 'off':
self.trial_box.setValue(self.trial_box.value() + 1)
self.trial_status.setCurrentText('on')
self.callback(event)
class LogTable(QTableWidget):
class Labels:
Code = ('Trial #', 'Trial Status', 'Response', 'Time Code')
Prescreen1 = ('Trial #', 'PS 1 Code?', 'PS 1 Reason?')
Prescreen2 = ('Trial #', 'PS 2 Code?', 'PS 2 Reason?')
Prescreen12 = ('Trial #', 'PS 1 Code?', 'PS 1 Reason?', 'PS 2 Code?', 'PS 2 Reason?')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.verticalHeader().setVisible(False)
self.setSelectionBehavior(QTableWidget.SelectRows)
self.setFocusPolicy(Qt.NoFocus)
self.setVerticalScrollMode(self.ScrollPerPixel)
self.data = [] # list of iterables
def keyPressEvent(self, event: QtGui.QKeyEvent):
"""Add repaint call to address failure to repaint on MacOS when using arrows to navigate past top or bottom of
the logtable.
"""
super().keyPressEvent(event)
self.repaint()
def add_entry(self, entry):
""" Append an entry to the table data and to the table"""
self.data.append(entry)
self._add_row(entry)
self.repaint()
def _add_row(self, entry):
# Add a row to the table, given an iterable
new_row = self.rowCount()
self.setRowCount(new_row + 1)
for col, v in enumerate(entry):
item = QTableWidgetItem(str(v))
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) # Do not want ItemIsEditable
self.setItem(new_row, col, item)
def load_data(self, data):
self.data = data
# update table from self.data
self.setRowCount(0)
for entry in self.data:
self._add_row(entry)
def redden_selected(self):
for item in self.selectedItems():
item.setForeground(Qt.red)
def redden_rows(self, rows):
for r in rows:
for c in range(self.columnCount()):
self.item(r, c).setForeground(Qt.red)
def delete_selected(self):
"""Delete selected rows"""
deleted_rows = []
for row in reversed(range(self.rowCount())):
if self.item(row, 0).isSelected():
self.data.pop(row)
self.removeRow(row)
deleted_rows.append(row)
self.repaint()
return deleted_rows
def increment_selected(self):
"""Increment the value in the first cell of selected rows"""
for row in range(self.rowCount()):
if self.item(row, 0).isSelected():
self.data[row][0] += 1
self.item(row, 0).setText(str(self.data[row][0]))
def decrement_selected(self):
"""Decrement the value in the first cell of selected rows"""
for row in range(self.rowCount()):
if self.item(row, 0).isSelected():
self.data[row][0] -= 1
self.item(row, 0).setText(str(self.data[row][0]))
def has_selection(self):
return len(self.selectedItems()) > 0
def selected_rows(self):
# Return rows as dictionary with the keys the row numbers
# and the values the text of the first cell in the row
rows = {}
for row in range(self.rowCount()):
if self.item(row, 0).isSelected():
rows[row] = self.item(row, 0).text()
return rows
def select_rows(self, rows):
"""Select rows in the table specified as keys in a dictionary"""
first = True
for r in rows:
for c in range(self.columnCount()):
if self.item(r, c):
self.item(r, c).setSelected(True)
if first:
self.setCurrentCell(r, 0)
first = False
def set_code_labels(self):
self.set_labels(self.Labels.Code)
def set_prescreen_labels(self, ps):
if ps == 0:
self.set_labels(self.Labels.Prescreen12)
elif ps == 1:
self.set_labels(self.Labels.Prescreen1)
elif ps == 2:
self.set_labels(self.Labels.Prescreen2)
def set_labels(self, labels):
self.setColumnCount(len(labels))
self.setHorizontalHeaderLabels(labels)
def scroll_to_row(self, row):
# scrolling to the item doesn't work very well for the last item, so scrollToBottom instead
if row == self.rowCount() - 1:
self.scrollToBottom()
else:
self.scrollToItem(self.item(row, 0), self.PositionAtCenter)
def copy_selection(self):
# copy text from selected rows, or if no rows are selected, the entire table, to the clipboard
rows = []
if self.has_selection():
for r in self.selected_rows():
rows.append('\t'.join([self.item(r, c).text() for c in range(self.columnCount())]))
else:
for r in range(self.rowCount()):
rows.append('\t'.join([self.item(r, c).text() for c in range(self.columnCount())]))
app = QApplication.instance()
app.clipboard().setText('\n'.join(rows))
|
[
"rholson1@wisc.edu"
] |
rholson1@wisc.edu
|
c18c6a803556cb3d4779e0ae99b912b6b15ebaa3
|
c960fe2d453043123db739cc3e5521231fb028d4
|
/units/models.py
|
eb93e96017ad4d60f34e7d7a0b849971baa4616d
|
[] |
no_license
|
RaphaelQuem/bitsweet
|
d74590c366219161c911d21aa05dedf0de258766
|
66e90177544e0754a1dcc71409f16163e530b92f
|
refs/heads/master
| 2020-09-01T22:19:01.012848
| 2019-11-12T18:36:56
| 2019-11-12T18:36:56
| 219,073,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
from django.db import models
class MeasurementUnit(models.Model):
unit_id = models.AutoField(primary_key=True)
unit_name = models.CharField(max_length=100, null=False)
unit_abbreviation = models.CharField(max_length=10,null=True)
def __str__(self):
return self.unit_name
|
[
"raphzmaia@gmail.com"
] |
raphzmaia@gmail.com
|
199a343f605d4cb6ba0cf1a2b56902993154ff5d
|
24e7e0dfaaeaca8f911b40fcc2937342a0f278fd
|
/venv/Lib/site-packages/toolz/recipes.py
|
89de88db2b46d9a50231ffdf18aa0aa280f051f0
|
[
"MIT"
] |
permissive
|
BimiLevi/Covid19
|
90e234c639192d62bb87364ef96d6a46d8268fa0
|
5f07a9a4609383c02597373d76d6b6485d47936e
|
refs/heads/master
| 2023-08-04T13:13:44.480700
| 2023-08-01T08:36:36
| 2023-08-01T08:36:36
| 288,455,446
| 1
| 0
|
MIT
| 2021-01-22T19:36:26
| 2020-08-18T12:53:43
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
import itertools
from .itertoolz import frequencies, pluck, getter
__all__ = ('countby', 'partitionby')
def countby(key, seq):
""" Count elements of a collection by a key function
>>> countby(len, ['cat', 'mouse', 'dog'])
{3: 2, 5: 1}
>>> def iseven(x): return x % 2 == 0
>>> countby(iseven, [1, 2, 3]) # doctest:+SKIP
{True: 1, False: 2}
See Also:
groupby
"""
if not callable(key):
key = getter(key)
return frequencies(map(key, seq))
def partitionby(func, seq):
""" Partition a sequence according to a function
Partition `s` into a sequence of lists such that, when traversing
`s`, every time the output of `func` changes a new list is started
and that and subsequent items are collected into that list.
>>> is_space = lambda c: c == " "
>>> list(partitionby(is_space, "I have space"))
[('I',), (' ',), ('h', 'a', 'v', 'e'), (' ',), ('s', 'p', 'a', 'c', 'e')]
>>> is_large = lambda x: x > 10
>>> list(partitionby(is_large, [1, 2, 1, 99, 88, 33, 99, -1, 5]))
[(1, 2, 1), (99, 88, 33, 99), (-1, 5)]
See also:
partition
groupby
itertools.groupby
"""
return map(tuple, pluck(1, itertools.groupby(seq, key=func)))
|
[
"50989568+BimiLevi@users.noreply.github.com"
] |
50989568+BimiLevi@users.noreply.github.com
|
66106037a9117fd9b932a27781f87dc77cf935c9
|
4afd23a55aad4634db84969f8019eabee7cfa27e
|
/restaurant/user/migrations/0001_initial.py
|
6a26f973f322015ce03ae345861273b9ad65af92
|
[] |
no_license
|
fdfdd12345628/uidd2019_restaurant
|
bb8d9ac72acb047c347cf04567c733d3ced3ac1d
|
3768fbbcaf419ea72661b93b58d5494387e1ab7e
|
refs/heads/master
| 2020-05-02T04:40:15.601828
| 2019-07-09T04:00:25
| 2019-07-09T04:00:25
| 177,755,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,625
|
py
|
# Generated by Django 2.1.7 on 2019-04-15 17:42
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False,
help_text='Designates that this user has all permissions without explicitly assigning them.',
verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'},
help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.',
max_length=150, unique=True,
validators=[django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False,
help_text='Designates whether the user can log into this admin site.',
verbose_name='staff status')),
('is_active', models.BooleanField(default=True,
help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.',
verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True,
help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.',
related_name='user_set', related_query_name='user', to='auth.Group',
verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.',
related_name='user_set', related_query_name='user',
to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
[
"fdfdd12345628@gmail.com"
] |
fdfdd12345628@gmail.com
|
707fff8e60eb4a656961574d96ad9a1c5683f7e3
|
f8fbdd49b3af8cd159fab32f5af6f35f6d8a38a6
|
/jeevih14.py
|
92fc8ed1f5ba1fb91d865c139ae6c14d126a8354
|
[] |
no_license
|
JEEVITHAVIJAYAN/jeeviv
|
f2e8393f2a8d45eb781ff054127f561183ac45cc
|
0c6e34788528feeb0d124ef92d6e8e3702563685
|
refs/heads/master
| 2020-07-04T15:03:20.293758
| 2019-08-14T10:37:07
| 2019-08-14T10:37:07
| 202,319,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
#h
from itertools import permutations
p=input()
q=permutations(p)
r=[]
for i in list(q):
s="".join(i)
if s not in r:
r.append(s)
for i in r:
print(i)
|
[
"noreply@github.com"
] |
JEEVITHAVIJAYAN.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.