blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3d4834bbc46156f17c4081635c11a08d327dd1
|
710e96b1435bc43cc260512df75af5dd3b2afd13
|
/code/1044.py
|
6b217c61734025d6ab42ff1303588769ee0ced7d
|
[
"MIT"
] |
permissive
|
minssoj/Learning_Algorithm_Up
|
94ca8166c9a5d87917cf033ad8415871684241c4
|
45ec4e2eb4c07c9ec907a74dbd31370e1645c50b
|
refs/heads/main
| 2023-01-08T20:52:32.983756
| 2020-11-05T17:49:45
| 2020-11-05T17:49:45
| 301,926,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
# [기초-산술연산] 정수 1개 입력받아 1 더해 출력하기(설명)
# minso.jeong@daum.net
'''
문제링크 : https://www.codeup.kr/problem.php?id=1044
'''
n = int(input())
print(n + 1)
|
[
"minso.jeong@daum.net"
] |
minso.jeong@daum.net
|
8fa5e8e7b1fb0820c60a8b499bcdaf74e557bd2c
|
ee9910f736a28a229242247c15c8c3b53d0b4c04
|
/TimeTracky/tests/unit/test_handlers.py
|
746813f73b219dcea75181f6c9b44f44a83b0834
|
[] |
no_license
|
toadesina1/TA-Soft-engin
|
85d3590fb8ed71fe6d9d2872af8f21fd9fcd30ae
|
94c76a99e09d233ce11829c37c6a43809d605229
|
refs/heads/main
| 2023-04-15T17:39:04.506827
| 2021-04-23T19:42:37
| 2021-04-23T19:42:37
| 332,826,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,257
|
py
|
from __future__ import annotations
from collections import defaultdict
from datetime import date, datetime, timedelta, timezone
from typing import Dict, List
import pytest
from timetrackylib import bootstrap
from timetrackylib.domain import commands
from timetrackylib.services import handlers, unit_of_work
from timetrackylib.adapters import repository
from timetrackylib.adapters.orm import start_mappers
from timetrackylib.services.unit_of_work import FakeUnitOfWork
def boostrap_test_app():
return bootstrap.bootstrap(start_orm=False, uow=FakeUnitOfWork())
class TestAddtime_entry:
def test_add_single_time_entry(self):
bus = boostrap_test_app()
nu: datetime = datetime(2021, 4, 19, 13, 0, 5, 0, tzinfo=timezone.utc)
# add one
bus.handle(
commands.Addtime_entryCommand(
0,
f"Test", # title
f"Glasgow", # projectname
nu.isoformat(), # Task starttime
nu.isoformat(), # Task end time
)
assert bus.uow.time_entrys.get_by_title(f"Test") is not None
assert bus.uow.committed
def test_get_time_entry_by_id(self):
bus = boostrap_test_app()
nu: datetime = datetime(2021, 4, 19, 13, 0, 5, 0, tzinfo=timezone.utc)
# add one
bus.handle(
commands.Addtime_entryCommand(
99,
f"Test", # title
f"Glasgow", # projectname
nu.isoformat(), # date added
nu.isoformat(), # date edited
)
)
assert bus.uow.time_entrys.get_by_id(99) is not None
assert bus.uow.committed
def test_get_time_entry_by_url(self):
bus = boostrap_test_app()
nu: datetime = datetime(2021, 4, 19, 13, 0, 5, 0, tzinfo=timezone.utc)
# add one
bus.handle(
commands.Addtime_entryCommand(
99,
f"Test", # title
f"Glasgow", # projectname
nu.isoformat(), # date added
nu.isoformat(), # date edited
)
)
assert bus.uow.time_entrys.get_by_projectname(f"Glasgow") is not None
assert bus.uow.committed
def test_get_all_time_entrys(self):
bus = boostrap_test_app()
nu: datetime = datetime(2021, 4, 19, 13, 0, 5, 0, tzinfo=timezone.utc)
bus.handle(
commands.Addtime_entryCommand(
99,
f"Test", # title
f"Glasgow", # projectname
f"Drillhole5ft", # task
nu.isoformat(), # task starttime
nu.isoformat(), # task endtime
)
)
nuto = nu + timedelta(days = 3, hours=13)
bus.handle(
commands.Addtime_entryCommand(
999,
f"Test2", # title
f"Glasgow", # projectname
nu.isoformat(), # task starttime
nu.isoformat(), # task endtime
)
)
records = bus.uow.time_entry.get_all()
assert len(records) == 2
|
[
"noreply@github.com"
] |
toadesina1.noreply@github.com
|
e6389abc5d2da97c1d3db8c67355bd1223f92ff2
|
644ed18c5959b7633605db80e49a805adcac4eae
|
/Yolo.py
|
c170f99c083064525ec257e1eb1bdc8c28e4ac6c
|
[] |
no_license
|
Shaileshraajk/Smart-Waste-Segregation-System-using-Python-and-ML
|
7586e598d9e151353c7598694a600887daaaf986
|
2896f640137030ad16a781ccbaab7997a69fde81
|
refs/heads/main
| 2023-07-16T07:25:05.911955
| 2021-08-10T07:17:38
| 2021-08-10T07:17:38
| 394,559,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,029
|
py
|
#Packages
import cv2 as cv
import numpy as np
import urllib.request
import threading
from pyfirmata import SERVO
import pyfirmata
import time
import paho.mqtt.publish as publish
obj=0 #No. of Objects
flag=0
j=0
k=0
bio=0 # No. of Biodegradable Objects
nbio=0 # No. of Non-Biodegradable Objects
# Robotic-Arm Setup
board = pyfirmata.Arduino('COM3')
servo = board.get_pin('d:11:o')
board.digital[11].mode = SERVO
#Capture Video
cap = cv.VideoCapture(0)
whT = 320
confThreshold = 0.5
nmsThreshold=0.3
## Object Names
classesFile = "coco.names"
classNames = []
with open(classesFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
print("classNames: ", classNames)
print(len(classNames))
recyclableFile = "recycle.names"
recyclable = []
with open(recyclableFile, 'rt') as f:
recyclable = f.read().rstrip('\n').split('\n')
print("recyclable: ", recyclable)
# Arduino Connection
#arduinoData=serial.Serial('com3',9600)
# YOLO Model Configurations
modelConfiguration = "yolov3.cfg"
modelWeights = "yolov3.weights"
# Creating Deep Neural Network(DNN)
net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
def findObjects(outputs,img):
global flag
global i
global j
hT, wT, cT = img.shape
bbox = [] # bounding box corner points
classIds = [] # class id with the highest confidence
confs = [] # confidence value of the highest class
nbdg = [] #Non-Biodegradable
bdg = [] #Biodegradable
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores) #Find the Maximum Score
confidence = scores[classId]
if confidence > confThreshold:
w, h = int(det[2] * wT), int(det[3] * hT)
x, y = int((det[0] * wT) - w / 2), int((det[1] * hT) - h / 2)
bbox.append([x, y, w, h])
classIds.append(classId)
confs.append(float(confidence))
indices = cv.dnn.NMSBoxes(bbox, confs, confThreshold, nmsThreshold) #Non-Maximum Suppression
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
cv.rectangle(img, (x, y), (x + w, y + h), (255, 0, 255), 2)
cv.putText(img, f'{classNames[classIds[i]].upper()} {int(confs[i] * 100)}%',
(x, y - 10), cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)
print(classNames[classIds[i]])
#thingspeak_post(l)
if(classNames[classIds[i]] in recyclable):
flag = 1
bdg.append(classNames[classIds[i]])
else:
flag = 0
nbdg.append(classNames[classIds[i]])
thingspeak_post(l, flag)
Robotic_Arm(flag)
#arduinoData.write(flag)
l = len(classIds)-3
global obj
obj = l
print("Objects Scanned: ", l)
print(bdg)
print(nbdg)
remove_duplicates1(bdg)
remove_duplicates2(nbdg)
#mqtt_publish2(i)
#mqtt_publish3(j)
def remove_duplicates1(bdg):
global bio
bl=[]
for i in bdg:
if i not in bl:
bl.append(i)
bio=len(bl)
mqtt_publish2(bio)
def remove_duplicates2(nbdg):
global nbio
nbl = []
for i in nbdg:
if i not in nbl:
nbl.append(i)
nbio=len(nbl)
mqtt_publish3(nbio)
def mqtt_publish2(obj):
publish.single("Shailesh/Bio-DG/IOT", obj, hostname="test.mosquitto.org")
print("BDG Obj Count Done")
def mqtt_publish3(obj):
publish.single("Shailesh/Non-BDG", obj, hostname="test.mosquitto.org")
print("NBDG Obj Count Done")
def mqtt_publish1(obj):
publish.single("Shailesh/Nobjects", obj, hostname="test.mosquitto.org")
print("Obj Count Done")
def check_flag(flag):
global k
global j
if flag==1:
k=k+1
mqtt_publish2(k)
else:
j=j+1
mqtt_publish3(j)
def Robotic_Arm(f):
if(f == 1):
servo.write(0)
time.sleep(1)
else:
servo.write(180)
time.sleep(1)
def thingspeak_post(val1,val2):
threading.Timer(1,thingspeak_post,[val1, val2]).start()
URl='https://api.thingspeak.com/update?api_key='
KEY='NJM6WXH3J936SEZU'
HEADER='&field1={}&field2={}'.format(val1, val2)
NEW_URL = URl + KEY + HEADER
data = urllib.request.urlopen(NEW_URL)
# Reading Image and converting to blob
while True:
success, img = cap.read()
blob = cv.dnn.blobFromImage(img, 1 / 255, (whT, whT), [0, 0, 0], 1, crop=False)
net.setInput(blob)
layersNames = net.getLayerNames()
outputNames = [(layersNames[i[0] - 1]) for i in net.getUnconnectedOutLayers()]
outputs = net.forward(outputNames)
findObjects(outputs, img)
cv.imshow('Image', img)
cv.waitKey(1)
mqtt_publish1(obj)
|
[
"noreply@github.com"
] |
Shaileshraajk.noreply@github.com
|
fbd36cd5309b3248e8bf30b421adc6ce2092a674
|
6b46ffdb30a98b3e3f8da1e8f385522b5b48f02a
|
/backend/comment/models.py
|
28fd8ca85a6046abe176cfb0e4da1efb5f701a61
|
[] |
no_license
|
eve-klopfenstein/luna
|
27829f3a3c98368249f58c2b667fa339683bc3fd
|
1b93c639618bfcc1c5c7feaa85627497b3072a2e
|
refs/heads/main
| 2023-05-01T10:59:40.109986
| 2021-04-02T14:14:24
| 2021-04-02T14:14:24
| 367,734,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
from django.contrib.auth import get_user_model
from django.db import models
from review.models import Review
User = get_user_model()
class Comment(models.Model):
author = models.ForeignKey(to=User, related_name='comments', on_delete=models.CASCADE, blank=True)
review = models.ForeignKey(to=Review, related_name='comments', on_delete=models.CASCADE, null=True, blank=True)
content = models.CharField(max_length=600, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True, null=True, blank=True)
modified = models.DateTimeField(auto_now=True, null=True, blank=True, )
liked_by = models.ManyToManyField(to=User, related_name='liked_comments', blank=True)
def __str__(self):
return f' Comment: "{self.content}", written by: {self.author} for restaurant: {self.review}'
|
[
"radovic_dusko@yahoo.com"
] |
radovic_dusko@yahoo.com
|
0894ab4443b0c20e40c07b66f35c003920e6f84e
|
39735aac6631d2992b47ad7645930266322a4774
|
/tensorflow_probability/python/distributions/distribution_test.py
|
d25be8d069320024879b7b4b5336383f16bf12b7
|
[
"Apache-2.0"
] |
permissive
|
Abdelpakey/probability
|
b96dff53fab9d9405f39d224fa77ff13f871c5db
|
b43d491fade784ae216a026823d2d27d7317264f
|
refs/heads/master
| 2020-04-01T05:26:28.718050
| 2019-02-15T15:47:16
| 2019-02-15T15:47:16
| 152,903,143
| 0
| 0
| null | 2018-10-13T18:39:10
| 2018-10-13T18:39:31
| null |
UTF-8
|
Python
| false
| false
| 13,317
|
py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class DistributionTest(tf.test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
tfd.Normal,
tfd.Bernoulli,
tfd.Beta,
tfd.Chi2,
tfd.Exponential,
tfd.Gamma,
tfd.InverseGamma,
tfd.Laplace,
tfd.StudentT,
tfd.Uniform,
]
sample_shapes = [(), (10,), (10, 20, 30)]
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, tf.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape, self.evaluate(
tf.shape(dist.sample())))
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
self.evaluate(tf.shape(dist_copy.sample())))
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = tfd.Wishart(df=2, scale=[[1., 2], [2, 5]], validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
unused_normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(
base_params.pop("validate_args"), copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
mu = 1.
sigma = 2.
normal = tfd.Normal(mu, sigma, validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch()))
normal = tfd.Normal([mu], [sigma], validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([mu], [sigma], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch()))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = tf.placeholder_with_default(input=1, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertTrue(
normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x)))
x = tf.placeholder_with_default(input=[1], shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertFalse(
normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x)))
# There's no notion of partially known shapes in eager mode, so exit
# early.
if tf.executing_eagerly():
return
# Test case 3.
x = tf.placeholder_with_default(input=1, shape=None)
is_scalar = normal._is_scalar_helper(x.get_shape(), lambda: tf.shape(x))
self.assertTrue(self.evaluate(is_scalar))
x = tf.placeholder_with_default(input=[1], shape=None)
is_scalar = normal._is_scalar_helper(x.get_shape(), lambda: tf.shape(x))
self.assertFalse(self.evaluate(is_scalar))
def _GetFakeDistribution(self):
class FakeDistribution(tfd.Distribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tf.TensorShape(batch_shape)
self._static_event_shape = tf.TensorShape(event_shape)
super(FakeDistribution, self).__init__(
dtype=tf.float32,
reparameterization_type=tfd.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
return FakeDistribution
def testSampleShapeHints(self):
# In eager mode, all shapes are known, so these tests do not need to
# execute.
if tf.executing_eagerly():
return
fake_distribution = self._GetFakeDistribution()
# Make a new session since we're playing with static shapes. [And below.]
x = tf.placeholder_with_default(
input=np.ones((6, 7, 2, 3, 5), dtype=np.float32), shape=None)
dist = fake_distribution(batch_shape=[2, 3], event_shape=[5])
sample_shape = tf.convert_to_tensor([6, 7], dtype=tf.int32)
y = dist._set_sample_static_shape(x, sample_shape)
# We use as_list since TensorShape comparison does not work correctly for
# unknown values, ie, Dimension(None).
self.assertAllEqual([6, 7, 2, 3, 5], y.get_shape().as_list())
x = tf.placeholder_with_default(
input=np.ones((6, 7, 2, 3, 5), dtype=np.float32), shape=None)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[5])
sample_shape = tf.convert_to_tensor([6, 7], dtype=tf.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, 5], y.get_shape().as_list())
x = tf.placeholder_with_default(
input=np.ones((6, 7, 2, 3, 5), dtype=np.float32), shape=None)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[None])
sample_shape = tf.convert_to_tensor([6, 7], dtype=tf.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, None], y.get_shape().as_list())
x = tf.placeholder_with_default(
input=np.ones((6, 7, 2, 3, 5), dtype=np.float32), shape=None)
dist = fake_distribution(batch_shape=None, event_shape=None)
sample_shape = tf.convert_to_tensor([6, 7], dtype=tf.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
x = tf.placeholder_with_default(
input=np.ones((6, 7, 2, 3, 5), dtype=np.float32), shape=None)
dist = fake_distribution(batch_shape=[None, 3], event_shape=None)
# There's no notion of partially known shapes in eager mode, so exit
# early.
sample_shape = tf.convert_to_tensor([6, 7], dtype=tf.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
def testNameScopeWorksCorrectly(self):
x = tfd.Normal(loc=0., scale=1., name="x")
x_duplicate = tfd.Normal(loc=0., scale=1., name="x")
with tf.name_scope("y") as name:
y = tfd.Bernoulli(logits=0., name=name)
x_sample = x.sample(name="custom_sample")
x_sample_duplicate = x.sample(name="custom_sample")
x_log_prob = x.log_prob(0., name="custom_log_prob")
x_duplicate_sample = x_duplicate.sample(name="custom_sample")
self.assertEqual(x.name, "x/")
self.assertEqual(y.name, "y/")
# There's no notion of graph, hence the same name will be reused.
# Tensors also do not have names in eager mode, so exit early.
if tf.executing_eagerly():
return
self.assertTrue(x_sample.name.startswith("x/custom_sample"))
self.assertTrue(x_log_prob.name.startswith("x/custom_log_prob"))
self.assertEqual(x_duplicate.name, "x_1/")
self.assertTrue(x_duplicate_sample.name.startswith(
"x_1/custom_sample"))
self.assertTrue(x_sample_duplicate.name.startswith("x/custom_sample_1"))
def testStrWorksCorrectlyScalar(self):
# Usually we'd write np.float(X) here, but a recent Eager bug would
# erroneously coerce the value to float32 anyway. We therefore use constants
# here, until the bug is resolved in TensorFlow 1.12.
normal = tfd.Normal(loc=tf.constant(0, tf.float16),
scale=tf.constant(1, tf.float16))
self.assertEqual(
str(normal),
"tfp.distributions.Normal("
"\"Normal/\", "
"batch_shape=(), "
"event_shape=(), "
"dtype=float16)")
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
str(chi2),
"tfp.distributions.Chi2("
"\"silly/\", " # What a silly name that is!
"batch_shape=(2,), "
"event_shape=(), "
"dtype=float32)")
# There's no notion of partially known shapes in eager mode, so exit
# early.
if tf.executing_eagerly():
return
exp = tfd.Exponential(rate=tf.placeholder_with_default(
input=1., shape=None))
self.assertEqual(
str(exp),
"tfp.distributions.Exponential(\"Exponential/\", "
# No batch shape.
"event_shape=(), "
"dtype=float32)")
def testStrWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
str(mvn_static),
"tfp.distributions.MultivariateNormalDiag("
"\"MVN/\", "
"batch_shape=(2,), "
"event_shape=(2,), "
"dtype=float64)")
# There's no notion of partially known shapes in eager mode, so exit
# early.
if tf.executing_eagerly():
return
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=tf.placeholder_with_default(
input=np.ones((3, 3), dtype=np.float32), shape=[None, 3]),
name="MVN2")
self.assertEqual(
str(mvn_dynamic),
"tfp.distributions.MultivariateNormalDiag("
"\"MVN2/\", "
"batch_shape=(?,), " # Partially known.
"event_shape=(3,), "
"dtype=float32)")
def testReprWorksCorrectlyScalar(self):
# Usually we'd write np.float(X) here, but a recent Eager bug would
# erroneously coerce the value to float32 anyway. We therefore use constants
# here, until the bug is resolved in TensorFlow 1.12.
normal = tfd.Normal(loc=tf.constant(0, tf.float16),
scale=tf.constant(1, tf.float16))
self.assertEqual(
repr(normal),
"<tfp.distributions.Normal"
" 'Normal/'"
" batch_shape=()"
" event_shape=()"
" dtype=float16>")
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
repr(chi2),
"<tfp.distributions.Chi2"
" 'silly/'" # What a silly name that is!
" batch_shape=(2,)"
" event_shape=()"
" dtype=float32>")
# There's no notion of partially known shapes in eager mode, so exit
# early.
if tf.executing_eagerly():
return
exp = tfd.Exponential(rate=tf.placeholder_with_default(
input=1., shape=None))
self.assertEqual(
repr(exp),
"<tfp.distributions.Exponential"
" 'Exponential/'"
" batch_shape=<unknown>"
" event_shape=()"
" dtype=float32>")
def testReprWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
repr(mvn_static),
"<tfp.distributions.MultivariateNormalDiag"
" 'MVN/'"
" batch_shape=(2,)"
" event_shape=(2,)"
" dtype=float64>")
# There's no notion of partially known shapes in eager mode, so exit
# early.
if tf.executing_eagerly():
return
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=tf.placeholder_with_default(
input=np.ones((3, 3), dtype=np.float32), shape=[None, 3]),
name="MVN2")
self.assertEqual(
repr(mvn_dynamic),
"<tfp.distributions.MultivariateNormalDiag"
" 'MVN2/'"
" batch_shape=(?,)" # Partially known.
" event_shape=(3,)"
" dtype=float32>")
if __name__ == "__main__":
tf.test.main()
|
[
"copybara-piper@google.com"
] |
copybara-piper@google.com
|
b604a4ebbf17a27ae884d5f13df2ff0d7a3fe205
|
30baf29e5cf882af3e5af64c7f1a9eab6c259b55
|
/ActorCritic-CartPole/SAC/networks.py
|
72d3e7c27c72f94180c4afd5b247ec048c094395
|
[
"MIT"
] |
permissive
|
Frostday/PytorchProjects
|
5cb4cad908504af4ce1a05f718ef1c245c754bf3
|
ddfc19d6c34ce7b519e41f69e9b83ead2789a020
|
refs/heads/master
| 2023-05-10T16:29:52.846854
| 2021-05-28T13:56:44
| 2021-05-28T13:56:44
| 296,635,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,371
|
py
|
import os
import torch as T
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
from torch.distributions.normal import Normal
import numpy as np
class CriticNetwork(nn.Module):
# evaluates the value of a state and action pair
def __init__(self, beta, input_dims, n_actions, fc1_dims=256, fc2_dims=256,
name='critic', chkpt_dir='CartPole\SAC\models'):
super(CriticNetwork, self).__init__()
self.input_dims = input_dims
self.fc1_dims = fc1_dims
self.fc2_dims = fc2_dims
self.n_actions = n_actions
self.name = name
self.checkpoint_dir = chkpt_dir
self.checkpoint_file = os.path.join(self.checkpoint_dir, name+'_sac')
self.fc1 = nn.Linear(self.input_dims[0]+n_actions, self.fc1_dims)
self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims)
self.q = nn.Linear(self.fc2_dims, 1)
self.optimizer = optim.Adam(self.parameters(), lr=beta)
self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
self.to(self.device)
def forward(self, state, action):
action_value = F.relu(self.fc1(T.cat([state, action], dim=1)))
action_value = F.relu(self.fc2(action_value))
q = self.q(action_value)
return q
def save_checkpoint(self):
T.save(self.state_dict(), self.checkpoint_file)
def load_checkpoint(self):
self.load_state_dict(T.load(self.checkpoint_file))
class ValueNetwork(nn.Module):
# estimates the value of a particular state, doesn't care about the action took or are taking
def __init__(self, beta, input_dims, fc1_dims=256, fc2_dims=256,
name='value', chkpt_dir='CartPole\SAC\models'):
super(ValueNetwork, self).__init__()
self.input_dims = input_dims
self.fc1_dims = fc1_dims
self.fc2_dims = fc2_dims
self.name = name
self.checkpoint_dir = chkpt_dir
self.checkpoint_file = os.path.join(self.checkpoint_dir, name+'_sac')
self.fc1 = nn.Linear(*self.input_dims, self.fc1_dims)
self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims)
self.v = nn.Linear(self.fc2_dims, 1)
self.optimizer = optim.Adam(self.parameters(), lr=beta)
self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
self.to(self.device)
def forward(self, state):
state_value = F.relu(self.fc1(state))
state_value = F.relu(self.fc2(state_value))
v = self.v(state_value)
return v
def save_checkpoint(self):
T.save(self.state_dict(), self.checkpoint_file)
def load_checkpoint(self):
self.load_state_dict(T.load(self.checkpoint_file))
class ActorNetwork(nn.Module):
# returns a probability distribution
def __init__(self, alpha, input_dims, max_action, fc1_dims=256,
fc2_dims=256, n_actions=2, name='actor', chkpt_dir='CartPole\SAC\models'):
# max_action will be multiplied to the probability distribution(b/w -1 and 1) to get the real range
super(ActorNetwork, self).__init__()
self.input_dims = input_dims
self.fc1_dims = fc1_dims
self.fc2_dims = fc2_dims
self.n_actions = n_actions
self.name = name
self.checkpoint_dir = chkpt_dir
self.checkpoint_file = os.path.join(self.checkpoint_dir, name+'_sac')
self.max_action = max_action
self.reparam_noise = 1e-6
self.fc1 = nn.Linear(*self.input_dims, self.fc1_dims)
self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims)
self.mu = nn.Linear(self.fc2_dims, self.n_actions)
# mean of probability distribution
self.sigma = nn.Linear(self.fc2_dims, self.n_actions)
# standard deviation of probability distribution
self.optimizer = optim.Adam(self.parameters(), lr=alpha)
self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
self.to(self.device)
def forward(self, state):
prob = F.relu(self.fc1(state))
prob = F.relu(self.fc2(prob))
mu = self.mu(prob)
sigma = self.sigma(prob)
sigma = T.clamp(sigma, min=self.reparam_noise, max=1)
# clamp all values of sigma btw reparam_noise(almost 0) and 1
return mu, sigma
def sample_normal(self, state, reparameterize=True):
# to calculate the actual policy - required for continous action spaces
# policy is a probability distribution that tells us probability of selecting any action in our action space given some state
mu, sigma = self.forward(state)
probabilities = Normal(mu, sigma)
if reparameterize:
actions = probabilities.rsample()
# just adds some extra noise
else:
actions = probabilities.sample()
# print(actions)
action = T.tanh(actions) * T.tensor(self.max_action).to(self.device)
log_probs = probabilities.log_prob(actions)
# log of probabilities for loss function
log_probs -= T.log(1-action.pow(2)+self.reparam_noise)
log_probs = log_probs.sum(1, keepdim=True)
return action, log_probs
def save_checkpoint(self):
T.save(self.state_dict(), self.checkpoint_file)
def load_checkpoint(self):
self.load_state_dict(T.load(self.checkpoint_file))
|
[
"dhruvgarg01@gmail.com"
] |
dhruvgarg01@gmail.com
|
f14510c92f51411a5b1fbf7e23edff82daf6ca18
|
b18ef775594417725557839e2242916b1ca4cfd6
|
/python/Classes/Classes: Dealing with Complex Number.py
|
dd425c93223e1638470a88b5e16efd229f5c1ab5
|
[] |
no_license
|
sharique-ansari/Hackerrank-solutions
|
567a83ab6bd90bf2fbc0bcbbedd3eec75be5daf8
|
33c85d1671e64ae7d8ed79c03eb85122ee670dc5
|
refs/heads/master
| 2020-03-22T14:15:09.711607
| 2018-07-20T00:42:40
| 2018-07-20T00:42:40
| 140,164,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,489
|
py
|
import math
class Complex(object):
def __init__(self, real, imaginary):
self.real = real
self.imaginary = imaginary
def __add__(self, no):
return Complex(self.real + no.real, self.imaginary + no.imaginary)
def __sub__(self, no):
return Complex(self.real - no.real, self.imaginary - no.imaginary)
def __mul__(self, no):
return Complex(self.real * no.real - self.imaginary * no.imaginary,
self.real * no.imaginary + self.imaginary * no.real)
def __truediv__(self, no):
a = (no.real ** 2 + no.imaginary ** 2)
return self * Complex(no.real / a, -no.imaginary / a)
def mod(self):
return Complex(pow((self.real ** 2 + self.imaginary ** 2), 0.5), 0)
def __str__(self):
if self.imaginary == 0:
result = "%.2f+0.00i" % (self.real)
elif self.real == 0:
if self.imaginary >= 0:
result = "0.00+%.2fi" % (self.imaginary)
else:
result = "0.00-%.2fi" % (abs(self.imaginary))
elif self.imaginary > 0:
result = "%.2f+%.2fi" % (self.real, self.imaginary)
else:
result = "%.2f-%.2fi" % (self.real, abs(self.imaginary))
return result
if __name__ == '__main__':
c = map(float, input().split())
d = map(float, input().split())
x = Complex(*c)
y = Complex(*d)
print(*map(str, [x + y, x - y, x * y, x / y, x.mod(), y.mod()]), sep='\n')
|
[
"sharique098@gmail.com"
] |
sharique098@gmail.com
|
7604931d489d0270763c4e39b5a4ad29ac7908aa
|
14688c03b10bba09af01ef6fd7720714ab805ac2
|
/core/migrations/0005_talk.py
|
22215c0f051addb4284395590447ac21c79de111
|
[] |
no_license
|
CodeAkio/eventex
|
440765b72c6a7848f2f140bd09d2c7e0bae91a86
|
3120a70268a375cd6f612c979d797f0f3dafaa16
|
refs/heads/master
| 2023-04-26T23:24:13.466668
| 2019-12-26T23:07:14
| 2019-12-26T23:07:14
| 218,868,491
| 0
| 0
| null | 2023-04-21T20:39:24
| 2019-10-31T21:59:26
|
Python
|
UTF-8
|
Python
| false
| false
| 683
|
py
|
# Generated by Django 2.2.6 on 2019-12-21 17:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20191219_2049'),
]
operations = [
migrations.CreateModel(
name='Talk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('start', models.TimeField()),
('description', models.TextField()),
('speakers', models.ManyToManyField(to='core.Speaker')),
],
),
]
|
[
"codeakio@Mac-mini-de-Victor.local"
] |
codeakio@Mac-mini-de-Victor.local
|
e5580409284141245e84051d896e22d0fb6f3a7d
|
9e18a644070eb59c4b632b08acf57ef2a6cc4b39
|
/lib/read.py
|
dee895d95673e15a1959870758cf83d790dd2f7f
|
[] |
no_license
|
cwon789/cwon789-Seeing_through_fog_porjection_npz
|
a82339808ad96597c5c658ce26c42439a358c447
|
f00533f749ea122174f714b3d9376fabdcb7c21a
|
refs/heads/main
| 2023-05-25T04:52:19.775202
| 2021-05-21T15:02:54
| 2021-05-21T15:02:54
| 369,570,578
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,589
|
py
|
import numpy as np
import os
import json
from pyquaternion import Quaternion
def read_label(file, label_dir, camera_to_velodyne=None):
"""Read label file and return object list"""
file_name = file.split('.png')[0]
object_list = get_kitti_object_list(os.path.join(label_dir, file_name + '.txt'), camera_to_velodyne=camera_to_velodyne)
return object_list
def decode_visible_labels(value):
if value == "True":
return True
elif value == "False":
return False
else:
return None
def get_kitti_object_list(label_file, camera_to_velodyne=None):
"""Create dict for all objects of the label file, objects are labeled w.r.t KITTI definition"""
kitti_object_list = []
try:
with open(label_file.replace('.png', '.txt'), 'r') as file:
for line in file:
line = line.replace('\n', '') # remove '\n'
kitti_properties = line.split(' ')
object_dict = {
'identity': kitti_properties[0],
'truncated': float(kitti_properties[1]),
'occlusion': float(kitti_properties[2]),
'angle': float(kitti_properties[3]),
'xleft': int(round(float(kitti_properties[4]))),
'ytop': int(round(float(kitti_properties[5]))),
'xright': int(round(float(kitti_properties[6]))),
'ybottom': int(round(float(kitti_properties[7]))),
'height': float(kitti_properties[8]),
'width': float(kitti_properties[9]),
'length': float(kitti_properties[10]),
'posx': float(kitti_properties[11]),
'posy': float(kitti_properties[12]),
'posz': float(kitti_properties[13]),
'orient3d': float(kitti_properties[14]),
'rotx': float(kitti_properties[15]),
'roty': float(kitti_properties[16]),
'rotz': float(kitti_properties[17]),
'score': float(kitti_properties[18]),
'qx': float(kitti_properties[19]),
'qy': float(kitti_properties[20]),
'qz': float(kitti_properties[21]),
'qw': float(kitti_properties[22]),
'visibleRGB': decode_visible_labels(kitti_properties[23]),
'visibleGated': decode_visible_labels(kitti_properties[24]),
'visibleLidar': decode_visible_labels(kitti_properties[25]),
'visibleRadar': decode_visible_labels(kitti_properties[26]),
}
if camera_to_velodyne is not None:
pos = np.asarray([object_dict['posx'], object_dict['posy'], object_dict['posz'], 1])
pos_lidar = np.matmul(camera_to_velodyne, pos.T)
object_dict['posx_lidar'] = pos_lidar[0]
object_dict['posy_lidar'] = pos_lidar[1]
object_dict['posz_lidar'] = pos_lidar[2]
kitti_object_list.append(object_dict)
return kitti_object_list
except:
print('Problem occurred when reading label file!')
return []
def load_velodyne_scan(file):
"""Load and parse velodyne binary file"""
scan = np.fromfile(file, dtype=np.float32)
return scan.reshape((-1, 4))[:, :3]
#return scan
def load_calib_data(path_total_dataset, name_camera_calib, tf_tree):
"""
:param path_total_dataset: Path to dataset root dir
:param name_camera_calib: Camera calib file containing image intrinsic
:param tf_tree: TF (transformation) tree containing translations from velodyne to cameras
:return:
"""
with open(os.path.join(path_total_dataset, name_camera_calib), 'r') as f:
data_camera = json.load(f)
with open(os.path.join(path_total_dataset, tf_tree), 'r') as f:
data_extrinsics = json.load(f)
calib_dict = {
'calib_cam_stereo_left.json': 'cam_stereo_left_optical',
'calib_cam_stereo_right.json': 'cam_stereo_right_optical',
'calib_gated_bwv.json': 'bwv_cam_optical'
}
cam_name = calib_dict[name_camera_calib]
# Scan data extrinsics for transformation from lidar to camera
important_translations = ['lidar_hdl64_s3_roof', 'radar_ars300', cam_name]
translations = []
for item in data_extrinsics:
if item['child_frame_id'] in important_translations:
translations.append(item)
if item['child_frame_id'] == cam_name:
T_cam = item['transform']
elif item['child_frame_id'] == 'lidar_hdl64_s3_roof':
T_velodyne = item['transform']
elif item['child_frame_id'] == 'radar_ars300':
T_radar = item['transform']
# Use pyquaternion to setup rotation matrices properly
R_c_quaternion = Quaternion(w=T_cam['rotation']['w'] * 360 / 2 / np.pi, x=T_cam['rotation']['x'] * 360 / 2 / np.pi,
y=T_cam['rotation']['y'] * 360 / 2 / np.pi, z=T_cam['rotation']['z'] * 360 / 2 / np.pi)
R_v_quaternion = Quaternion(w=T_velodyne['rotation']['w'] * 360 / 2 / np.pi, x=T_velodyne['rotation']['x'] * 360 / 2 / np.pi,
y=T_velodyne['rotation']['y'] * 360 / 2 / np.pi, z=T_velodyne['rotation']['z'] * 360 / 2 / np.pi)
# Setup quaternion values as 3x3 orthogonal rotation matrices
R_c_matrix = R_c_quaternion.rotation_matrix
R_v_matrix = R_v_quaternion.rotation_matrix
# Setup translation Vectors
Tr_cam = np.asarray([T_cam['translation']['x'], T_cam['translation']['y'], T_cam['translation']['z']])
Tr_velodyne = np.asarray([T_velodyne['translation']['x'], T_velodyne['translation']['y'], T_velodyne['translation']['z']])
Tr_radar = np.asarray([T_radar['translation']['x'], T_radar['translation']['y'], T_radar['translation']['z']])
# Setup Translation Matrix camera to lidar -> ROS spans transformation from its children to its parents
# Therefore one inversion step is needed for zero_to_camera -> <parent_child>
zero_to_camera = np.zeros((3, 4))
zero_to_camera[0:3, 0:3] = R_c_matrix
zero_to_camera[0:3, 3] = Tr_cam
zero_to_camera = np.vstack((zero_to_camera, np.array([0, 0, 0, 1])))
zero_to_velodyne = np.zeros((3, 4))
zero_to_velodyne[0:3, 0:3] = R_v_matrix
zero_to_velodyne[0:3, 3] = Tr_velodyne
zero_to_velodyne = np.vstack((zero_to_velodyne, np.array([0, 0, 0, 1])))
zero_to_radar = zero_to_velodyne.copy()
zero_to_radar[0:3, 3] = Tr_radar
# Calculate total extrinsic transformation to camera
velodyne_to_camera = np.matmul(np.linalg.inv(zero_to_camera), zero_to_velodyne)
camera_to_velodyne = np.matmul(np.linalg.inv(zero_to_velodyne), zero_to_camera)
radar_to_camera = np.matmul(np.linalg.inv(zero_to_camera), zero_to_radar)
# Read projection matrix P and camera rectification matrix R
P = np.reshape(data_camera['P'], [3, 4])
# In our case rectification matrix R has to be equal to the identity as the projection matrix P contains the
# R matrix w.r.t KITTI definition
R = np.identity(4)
# Calculate total transformation matrix from velodyne to camera
vtc = np.matmul(np.matmul(P, R), velodyne_to_camera)
return velodyne_to_camera, camera_to_velodyne, P, R, vtc, radar_to_camera, zero_to_camera
|
[
"cwon789@naver.com"
] |
cwon789@naver.com
|
ff22a34e9a956fa4c76ccb221f9d964d39375203
|
6cd3de9d6aa0c52602010aa857966d5dc4d57442
|
/mlprodict/onnxrt/ops_cpu/op_lp_normalization.py
|
b8f5b8b7274ac89c8fb4bd972bdfda60a72b2e4c
|
[
"MIT"
] |
permissive
|
xadupre/mlprodict
|
2307ca96eafeeafff08d5322184399bb5dc1c37e
|
f82c8a26a60104948c67849b1c4af95ca812c153
|
refs/heads/master
| 2022-12-10T18:50:36.953032
| 2020-09-03T08:53:58
| 2020-09-03T08:53:58
| 292,824,744
| 1
| 0
|
NOASSERTION
| 2020-09-04T10:56:45
| 2020-09-04T10:56:44
| null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
# -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
import numpy
from ._op import OpRunUnaryNum
class LpNormalization(OpRunUnaryNum):
atts = {'axis': -1, 'p': 2}
def __init__(self, onnx_node, desc=None, **options):
OpRunUnaryNum.__init__(self, onnx_node, desc=desc,
expected_attributes=LpNormalization.atts,
**options)
def _run(self, x): # pylint: disable=W0221
norm = numpy.power(numpy.power(x, self.p).sum(
axis=self.axis), 1. / self.p)
norm = numpy.expand_dims(norm, self.axis)
if self.inplaces.get(0, False):
return self._run_inplace(x, norm)
return (x / norm, )
def _run_inplace(self, x, norm):
x /= norm
return (x, )
|
[
"xavier.dupre@gmail.com"
] |
xavier.dupre@gmail.com
|
5b38841d59350b4d58e32b73c5ae85f640564118
|
b1eb11a4586ca7478720071f0b4435d97d181f62
|
/squad/src/fused_layer_norm.py
|
b9a96781f90daeef3ab98aa55bf44d5c21272fe3
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
houshuaishuai/course
|
1930db8585a7c12ff656b27a2c7c2543587e032f
|
bd25dbeb20008ab8451186cf7d7a15444a139335
|
refs/heads/master
| 2023-05-07T01:37:38.456039
| 2021-04-01T07:32:59
| 2021-04-01T07:32:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,003
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""fused layernorm"""
import numpy as np
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.common.parameter import Parameter
from mindspore.common.initializer import initializer
from mindspore.ops.primitive import constexpr
import mindspore.common.dtype as mstype
from mindspore.nn.cell import Cell
__all__ = ['FusedLayerNorm']
@constexpr
def get_shape_for_norm(x_shape, begin_norm_axis):
print("input_shape: ", x_shape)
norm_shape = x_shape[begin_norm_axis:]
output_shape = (1, -1, 1, int(np.prod(norm_shape)))
print("output_shape: ", output_shape)
return output_shape
class FusedLayerNorm(Cell):
r"""
Applies Layer Normalization over a mini-batch of inputs.
Layer normalization is widely used in recurrent neural networks. It applies
normalization over a mini-batch of inputs for each single training case as described
in the paper `Layer Normalization <https://arxiv.org/pdf/1607.06450.pdf>`_. Unlike batch
normalization, layer normalization performs exactly the same computation at training and
testing times. It can be described using the following formula. It is applied across all channels
and pixel but only one batch size.
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
Args:
normalized_shape (Union(tuple[int], list[int]): The normalization is performed over axis
`begin_norm_axis ... R - 1`.
begin_norm_axis (int): It first normalization dimension: normalization will be performed along dimensions
`begin_norm_axis: rank(inputs)`, the value should be in [-1, rank(input)). Default: -1.
begin_params_axis (int): The first parameter(beta, gamma)dimension: scale and centering parameters
will have dimensions `begin_params_axis: rank(inputs)` and will be broadcast with
the normalized inputs accordingly, the value should be in [-1, rank(input)). Default: -1.
gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the gamma weight.
The values of str refer to the function `initializer` including 'zeros', 'ones', 'xavier_uniform',
'he_uniform', etc. Default: 'ones'.
beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the beta weight.
The values of str refer to the function `initializer` including 'zeros', 'ones', 'xavier_uniform',
'he_uniform', etc. Default: 'zeros'.
use_batch_nrom (bool): Whether use batchnorm to preocess.
Inputs:
- **input_x** (Tensor) - The shape of 'input_x' is :math:`(x_1, x_2, ..., x_R)`,
and `input_shape[begin_norm_axis:]` is equal to `normalized_shape`.
Outputs:
Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input_x`.
Examples:
>>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32)
>>> shape1 = x.shape[1:]
>>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1)
>>> m(x)
"""
def __init__(self,
normalized_shape,
begin_norm_axis=-1,
begin_params_axis=-1,
gamma_init='ones',
beta_init='zeros',
use_batch_norm=False):
super(FusedLayerNorm, self).__init__()
if not isinstance(normalized_shape, (tuple, list)):
raise TypeError("The type of 'normalized_shape' should be tuple[int] or list[int], but '{}' type is {}."
.format(normalized_shape, type(normalized_shape)))
self.normalized_shape = normalized_shape
self.begin_norm_axis = begin_norm_axis
self.begin_params_axis = begin_params_axis
self.gamma = Parameter(initializer(
gamma_init, normalized_shape), name="gamma")
self.beta = Parameter(initializer(
beta_init, normalized_shape), name="beta")
self.layer_norm = P.LayerNorm(begin_norm_axis=self.begin_norm_axis, begin_params_axis=self.begin_params_axis)
self.batch_norm = P.BatchNorm(is_training=True, epsilon=1e-5)
self.use_batch_norm = use_batch_norm
def construct(self, input_x):
"""Applies Layer Normalization over a mini-batch of inputs"""
if self.use_batch_norm and self.training:
ones = P.Fill()(mstype.float32, F.shape(input_x)[:self.begin_norm_axis], 1.0)
zeros = P.Fill()(mstype.float32, F.shape(input_x)[:self.begin_norm_axis], 0.0)
shape_x = F.shape(input_x)
norm_shape = get_shape_for_norm(shape_x, self.begin_norm_axis)
input_x = F.reshape(input_x, norm_shape)
output, _, _, _, _, _ = self.batch_norm(input_x, ones, zeros, None, None)
output = F.reshape(output, shape_x)
y = output * self.gamma + self.beta
else:
y, _, _ = self.layer_norm(input_x, self.gamma, self.beta)
return y
def extend_repr(self):
"""Display instance object as string."""
s = 'normalized_shape={}, begin_norm_axis={}, begin_params_axis={}, gamma{}, beta={}'.format(
self.normalized_shape, self.begin_norm_axis, self.begin_params_axis, self.gamma, self.beta)
return s
|
[
"zhengnengjin@huawei.com"
] |
zhengnengjin@huawei.com
|
31b45216c88ef58099dcd987e59f7ab88ce816d2
|
f86e28cb032ae88315a4d03b57eb2cf806d294a6
|
/grap_main/apps.py
|
bc4b5b1dd6245b167aeb24d1d13a942c29a08e1c
|
[] |
no_license
|
JjVera96/playbar
|
24398fdecbc12935da49ca0c9abffa4f83eff297
|
ddefdc3df7e24e185c423657a42c38eeb72c0ff1
|
refs/heads/master
| 2022-12-16T05:21:53.183454
| 2020-03-04T19:31:58
| 2020-03-04T19:31:58
| 244,983,743
| 0
| 0
| null | 2022-04-06T17:39:05
| 2020-03-04T19:16:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 159
|
py
|
# apps.py
from django.apps.config import AppConfig
class GrapMainConfig(AppConfig):
name = 'grap_main'
def ready(self):
from . import signals
|
[
"jjvera96@gmail.com"
] |
jjvera96@gmail.com
|
f2faa761dae3e182df133a6570d623251e4fa4ff
|
a15f20fec49aff81948abc2a390dcb2131caa1b7
|
/armstrong.py
|
d6d90e49c089c89858e637764d37a413c6b02b11
|
[] |
no_license
|
aarushmagotra/armstrong-number
|
1421640f34c966410de4d6f90625571078c094bc
|
fe21940ee0f857ce3de66af2433482d927e67205
|
refs/heads/main
| 2022-12-21T05:30:09.117396
| 2020-10-02T15:44:32
| 2020-10-02T15:44:32
| 300,637,856
| 0
| 1
| null | 2020-10-02T15:29:44
| 2020-10-02T14:19:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
def main ():
print()
print('WElCOME! Here I will help you to find a if a number is an armstrong for power 3 :)')
print()
print('And if not then I will tell you the next armstrong number of the entered value')
print()
n = int(input('Enter the number to you want to find te next armstrong of: '))
num = n
print()
print('Calculating... Please wait!!!')
print()
while True:
a_lst = []
x = str(n)
for i in range(len(x)):
a = int(x[i])
a_lst.append(a)
total_cube = 0
for i in a_lst:
i_cube = i ** 3
total_cube += i_cube
if total_cube == n:
if num == n:
print('The given number {} is itself an armstrong number for the power 3.'.format(n))
break
else:
print('The next armstrong number of {} is {} for power 3'.format(num, n))
break
else:
n += 1
fchoice()
def fchoice():
print()
choice = input('Would you like to find the wrmstrong of another number(y/n): ')
if choice == 'y':
main()
elif choice == 'n':
print()
print('Bye!!')
quit()
else:
print()
print('Invalid Input')
print()
print('Try again')
print()
fchoice()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
aarushmagotra.noreply@github.com
|
f0bea7110c4665b40940a96d7809ac081eddf1a6
|
718583bc7567810e3f041f83a65673d643833608
|
/bubble_chart/bubble-chart-exercise.py
|
65bc8f31559982e7a75e2ea1d6cdc028b5e3e074
|
[] |
no_license
|
gbdsantos/plotly-dashboard-with-dash
|
276dec4ddd9de3b3788d8e8768a8fe25559a2996
|
b0af57ee6ebcbff35b9e3fd374933ee3b5ea5f0f
|
refs/heads/master
| 2022-07-09T11:06:50.622408
| 2018-10-18T18:47:07
| 2018-10-18T18:47:07
| 148,810,714
| 0
| 0
| null | 2022-06-21T21:25:26
| 2018-09-14T15:50:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 905
|
py
|
#######
# Objective: Create a bubble chart that compares three other features
# from the mpg.csv dataset. Fields include: 'mpg', 'cylinders', 'displacement'
# 'horsepower', 'weight', 'acceleration', 'model_year', 'origin', 'name'
#######
# Perform imports here
import pandas as pd
import plotly.offline as go
import plotly.graph_objs as pyo
# Create a DataFrame from the .csv file:
df = pd.read_csv('../data/mpg.csv')
# Create data by choosing fields for x, y and marker size attributes
data = go.Scatter(x=df['displacement'], y=df['acceleration'],
text=df['name'],
mode='markers',
marker=dict(size=df['weight']/400))
# Create a Layout with a title and axis labels
layout = go.Layout(title='My Bubble Solution', hovermode='closest')
figure = go.Figure(data=data, layout=layout)
pyo.plot(figure, filename='bubble-chart-exercise.html')
# Create
|
[
"gbsantos.it@gmail.com"
] |
gbsantos.it@gmail.com
|
bd3ca4f5a6607ccb16f36dcb692e8cc31ab821fa
|
48e32d67b984fc7505a9b1556b0273cede2848e4
|
/ske_customization/customizations_for_ske/internal_import/stockgroup_items.py
|
3b94e0b7a97680c7eaa06ff8052437286d1477b7
|
[
"MIT"
] |
permissive
|
akshay83/ske_customization
|
86c776d37000ed97ddee63bb5ee84901d610414a
|
910e8ca88ffc83554ebb23f7480901dba9f08221
|
refs/heads/master
| 2021-01-02T23:09:00.888981
| 2020-05-08T07:41:52
| 2020-05-08T07:41:52
| 98,892,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
import frappe
class InternalImportStockGroupItems:
def __init__(self, value):
self.process_node = value
self.process()
def process(self):
#print "DEBUG: PROCESSING: STOCK GROUP:"+self.process_node.stock_group
if not frappe.db.exists({"doctype":"Item Group","item_group_name": self.process_node.stock_group}):
doc = frappe.get_doc({"doctype":"Item Group","item_group_name": self.process_node.stock_group})
doc.parent_item_group = 'All Item Groups'
doc.is_group = 1
doc.insert(ignore_permissions=True)
#print "DEBUG: INSERTED: STOCK GROUP:"+self.process_node.stock_group
|
[
"mehta.akshay@gmail.com"
] |
mehta.akshay@gmail.com
|
10eaeac02a5dcc162ac6889c1a4182414870249d
|
4f49c1de4683bd00f5b831a0c7fd2b431b627be5
|
/object_properties_panel.py
|
36028619ed216e14b535d3acd7be96cd2d144287
|
[] |
no_license
|
PyrokinesisStudio/BlenderArchitectureAppTemplate
|
6ce1c4896b7eee423c24558f10bc32bf3a2bdaac
|
6b18bdca380d658288cd605c2e794473f57a04b0
|
refs/heads/master
| 2020-03-18T17:13:01.424253
| 2017-12-22T07:42:59
| 2017-12-22T07:42:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 47,162
|
py
|
import bpy
import math
from bpy.app.translations import pgettext_iface as iface_ #for decimate modifier
from . import unit, utils
enum_object_tabs = [('INFO'," ","Show the Main Information"),
('DISPLAY',"","Show Options for how the Object is Displayed"),
('MATERIAL',"","Show the materials assign to the object"),
('CONSTRAINTS',"","Show the constraints assigned to the object"),
('MODIFIERS',"","Show the modifiers assigned to the object"),
('MESHDATA',"","Show the Mesh Data Information"),
('CURVEDATA',"","Show the Curve Data Information"),
('TEXTDATA',"","Show the Text Data Information"),
('EMPTYDATA',"","Show the Empty Data Information"),
('LIGHTDATA',"","Show the Light Data Information"),
('CAMERADATA',"","Show the Camera Data Information"),
('DRIVERS',"","Show the Drivers assigned to the Object")]
def draw_modifier(mod,layout,obj):
def draw_show_expanded(mod,layout):
if mod.show_expanded:
layout.prop(mod,'show_expanded',text="",emboss=False)
else:
layout.prop(mod,'show_expanded',text="",emboss=False)
def draw_apply_close(layout,mod_name):
layout.operator('object.modifier_apply',text="",icon='EDIT',emboss=False).modifier = mod.name
layout.operator('object.modifier_remove',text="",icon='PANEL_CLOSE',emboss=False).modifier = mod.name
def draw_array_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_ARRAY')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
box.prop(mod, "fit_type")
if mod.fit_type == 'FIXED_COUNT':
box.prop(mod, "count")
elif mod.fit_type == 'FIT_LENGTH':
box.prop(mod, "fit_length")
elif mod.fit_type == 'FIT_CURVE':
box.prop(mod, "curve")
box.separator()
split = box.split()
col = split.column()
col.prop(mod, "use_constant_offset")
sub = col.column()
sub.active = mod.use_constant_offset
sub.prop(mod, "constant_offset_displace", text="")
col.separator()
col.prop(mod, "use_merge_vertices", text="Merge")
sub = col.column()
sub.active = mod.use_merge_vertices
sub.prop(mod, "use_merge_vertices_cap", text="First Last")
sub.prop(mod, "merge_threshold", text="Distance")
col = split.column()
col.prop(mod, "use_relative_offset")
sub = col.column()
sub.active = mod.use_relative_offset
sub.prop(mod, "relative_offset_displace", text="")
col.separator()
col.prop(mod, "use_object_offset")
sub = col.column()
sub.active = mod.use_object_offset
sub.prop(mod, "offset_object", text="")
box.separator()
box.prop(mod, "start_cap")
box.prop(mod, "end_cap")
def draw_bevel_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_BEVEL')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
split = box.split()
col = split.column()
col.prop(mod, "width")
col.prop(mod, "segments")
col.prop(mod, "profile")
col = split.column()
col.prop(mod, "use_only_vertices")
col.prop(mod, "use_clamp_overlap")
box.label(text="Limit Method:")
box.row().prop(mod, "limit_method", expand=True)
if mod.limit_method == 'ANGLE':
box.prop(mod, "angle_limit")
elif mod.limit_method == 'VGROUP':
box.label(text="Vertex Group:")
box.prop_search(mod, "vertex_group", obj, "vertex_groups", text="")
box.label(text="Width Method:")
box.row().prop(mod, "offset_type", expand=True)
def draw_boolean_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_BOOLEAN')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
split = box.split()
col = split.column()
col.label(text="Operation:")
col.prop(mod, "operation", text="")
col = split.column()
col.label(text="Object:")
col.prop(mod, "object", text="")
def draw_curve_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_CURVE')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
split = box.split()
col = split.column()
col.label(text="Object:")
col.prop(mod, "object", text="")
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(mod, "vertex_group", obj, "vertex_groups", text="")
box.label(text="Deformation Axis:")
box.row().prop(mod, "deform_axis", expand=True)
def draw_decimate_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_DECIM')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
decimate_type = mod.decimate_type
row = box.row()
row.prop(mod, "decimate_type", expand=True)
if decimate_type == 'COLLAPSE':
box.prop(mod, "ratio")
split = box.split()
row = split.row(align=True)
row.prop_search(mod, "vertex_group", obj, "vertex_groups", text="")
row.prop(mod, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
split.prop(mod, "use_collapse_triangulate")
elif decimate_type == 'UNSUBDIV':
box.prop(mod, "iterations")
else: # decimate_type == 'DISSOLVE':
box.prop(mod, "angle_limit")
box.prop(mod, "use_dissolve_boundaries")
box.label("Delimit:")
row = box.row()
row.prop(mod, "delimit")
box.label(text=iface_("Face Count: %d") % mod.face_count, translate=False)
def draw_edge_split_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_EDGESPLIT')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
split = box.split()
col = split.column()
col.prop(mod, "use_edge_angle", text="Edge Angle")
sub = col.column()
sub.active = mod.use_edge_angle
sub.prop(mod, "split_angle")
split.prop(mod, "use_edge_sharp", text="Sharp Edges")
def draw_hook_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='HOOK')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
split = box.split()
col = split.column()
col.label(text="Object:")
col.prop(mod, "object", text="")
if mod.object and mod.object.type == 'ARMATURE':
col.label(text="Bone:")
col.prop_search(mod, "subtarget", mod.object.data, "bones", text="")
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(mod, "vertex_group", obj, "vertex_groups", text="")
layout.separator()
split = box.split()
# col = split.column()
# col.prop(mod, "falloff")
# col.prop(mod, "force", slider=True)
col = split.column()
col.operator("object.hook_reset", text="Reset")
col.operator("object.hook_recenter", text="Recenter")
if obj.mode == 'EDIT':
layout.separator()
row = layout.row()
row.operator("object.hook_select", text="Select")
row.operator("object.hook_assign", text="Assign")
def draw_mask_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_MASK')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
split = box.split()
col = split.column()
col.label(text="Mode:")
col.prop(mod, "mode", text="")
col = split.column()
if mod.mode == 'ARMATURE':
col.label(text="Armature:")
col.prop(mod, "armature", text="")
elif mod.mode == 'VERTEX_GROUP':
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(mod, "vertex_group", obj, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(mod.vertex_group)
sub.prop(mod, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
def draw_mirror_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_MIRROR')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
split = box.split(percentage=0.25)
col = split.column()
col.label(text="Axis:")
col.prop(mod, "use_x")
col.prop(mod, "use_y")
col.prop(mod, "use_z")
col = split.column()
col.label(text="Options:")
col.prop(mod, "use_mirror_merge", text="Merge")
col.prop(mod, "use_clip", text="Clipping")
col.prop(mod, "use_mirror_vertex_groups", text="Vertex Groups")
col = split.column()
col.label(text="Textures:")
col.prop(mod, "use_mirror_u", text="U")
col.prop(mod, "use_mirror_v", text="V")
col = box.column()
if mod.use_mirror_merge is True:
col.prop(mod, "merge_threshold")
col.label(text="Mirror Object:")
col.prop(mod, "mirror_object", text="")
def draw_solidify_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_SOLIDIFY')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
split = box.split()
col = split.column()
col.prop(mod, "thickness")
col.prop(mod, "thickness_clamp")
col.separator()
row = col.row(align=True)
row.prop_search(mod, "vertex_group", obj, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(mod.vertex_group)
sub.prop(mod, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
sub = col.row()
sub.active = bool(mod.vertex_group)
sub.prop(mod, "thickness_vertex_group", text="Factor")
col.label(text="Crease:")
col.prop(mod, "edge_crease_inner", text="Inner")
col.prop(mod, "edge_crease_outer", text="Outer")
col.prop(mod, "edge_crease_rim", text="Rim")
col = split.column()
col.prop(mod, "offset")
col.prop(mod, "use_flip_normals")
col.prop(mod, "use_even_offset")
col.prop(mod, "use_quality_normals")
col.prop(mod, "use_rim")
col.separator()
col.label(text="Material Index Offset:")
sub = col.column()
row = sub.split(align=True, percentage=0.4)
row.prop(mod, "material_offset", text="")
row = row.row(align=True)
row.active = mod.use_rim
row.prop(mod, "material_offset_rim", text="Rim")
def draw_subsurf_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_SUBSURF')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
box.row().prop(mod, "subdivision_type", expand=True)
split = box.split()
col = split.column()
col.label(text="Subdivisions:")
col.prop(mod, "levels", text="View")
col.prop(mod, "render_levels", text="Render")
col = split.column()
col.label(text="Options:")
col.prop(mod, "use_subsurf_uv")
col.prop(mod, "show_only_control_edges")
def draw_skin_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_SKIN')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
box.operator("object.skin_armature_create", text="Create Armature")
box.separator()
col = box.column(align=True)
col.prop(mod, "branch_smoothing")
col.prop(mod, "use_smooth_shade")
split = box.split()
col = split.column()
col.label(text="Selected Vertices:")
sub = col.column(align=True)
sub.operator("object.skin_loose_mark_clear", text="Mark Loose").action = 'MARK'
sub.operator("object.skin_loose_mark_clear", text="Clear Loose").action = 'CLEAR'
sub = col.column()
sub.operator("object.skin_root_mark", text="Mark Root")
sub.operator("object.skin_radii_equalize", text="Equalize Radii")
col = split.column()
col.label(text="Symmetry Axes:")
col.prop(mod, "use_x_symmetry")
col.prop(mod, "use_y_symmetry")
col.prop(mod, "use_z_symmetry")
def draw_triangulate_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_TRIANGULATE')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
row = box.row()
col = row.column()
col.label(text="Quad Method:")
col.prop(mod, "quad_method", text="")
col = row.column()
col.label(text="Ngon Method:")
col.prop(mod, "ngon_method", text="")
def draw_simple_deform_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_SIMPLEDEFORM')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
box.row().prop(mod, "deform_method", expand=True)
split = box.split()
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(mod, "vertex_group", obj, "vertex_groups", text="")
split = box.split()
col = split.column()
col.label(text="Origin:")
col.prop(mod, "origin", text="")
if mod.deform_method in {'TAPER', 'STRETCH', 'TWIST'}:
col.label(text="Lock:")
col.prop(mod, "lock_x")
col.prop(mod, "lock_y")
col = split.column()
col.label(text="Deform:")
if mod.deform_method in {'TAPER', 'STRETCH'}:
col.prop(mod, "factor")
else:
col.prop(mod, "angle")
col.prop(mod, "limits", slider=True)
def draw_wireframe_modifier(layout):
col = layout.column(align=True)
box = col.box()
row = box.row()
draw_show_expanded(mod,row)
row.prop(mod,'name',text="",icon='MOD_WIREFRAME')
draw_apply_close(row,mod.name)
if mod.show_expanded:
box = col.box()
has_vgroup = bool(mod.vertex_group)
split = box.split()
col = split.column()
col.prop(mod, "thickness", text="Thickness")
row = col.row(align=True)
row.prop_search(mod, "vertex_group", obj, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = has_vgroup
sub.prop(mod, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
row = col.row(align=True)
row.active = has_vgroup
row.prop(mod, "thickness_vertex_group", text="Factor")
col.prop(mod, "use_crease", text="Crease Edges")
col.prop(mod, "crease_weight", text="Crease Weight")
col = split.column()
col.prop(mod, "offset")
col.prop(mod, "use_even_offset", text="Even Thickness")
col.prop(mod, "use_relative_offset", text="Relative Thickness")
col.prop(mod, "use_boundary", text="Boundary")
col.prop(mod, "use_replace", text="Replace Original")
col.prop(mod, "material_offset", text="Material Offset")
if mod.type == 'ARRAY':
draw_array_modifier(layout)
elif mod.type == 'BEVEL':
draw_bevel_modifier(layout)
elif mod.type == 'BOOLEAN':
draw_boolean_modifier(layout)
elif mod.type == 'CURVE':
draw_curve_modifier(layout)
elif mod.type == 'DECIMATE':
draw_decimate_modifier(layout)
elif mod.type == 'EDGE_SPLIT':
draw_edge_split_modifier(layout)
elif mod.type == 'HOOK':
draw_hook_modifier(layout)
elif mod.type == 'MASK':
draw_mask_modifier(layout)
elif mod.type == 'MIRROR':
draw_mirror_modifier(layout)
elif mod.type == 'SOLIDIFY':
draw_solidify_modifier(layout)
elif mod.type == 'SUBSURF':
draw_subsurf_modifier(layout)
elif mod.type == 'SKIN':
draw_skin_modifier(layout)
elif mod.type == 'SIMPLE_DEFORM':
draw_simple_deform_modifier(layout)
elif mod.type == 'TRIANGULATE':
draw_triangulate_modifier(layout)
elif mod.type == 'WIREFRAME':
draw_wireframe_modifier(layout)
else:
row = layout.row()
row.label(mod.name + " view ")
def draw_constraint(con,layout,obj):
def draw_show_expanded(con,layout):
if con.show_expanded:
layout.prop(con,'show_expanded',text="",emboss=False)
else:
layout.prop(con,'show_expanded',text="",emboss=False)
def space_template(layout, con, target=True, owner=True):
if target or owner:
split = layout.split(percentage=0.2)
split.label(text="Space:")
row = split.row()
if target:
row.prop(con, "target_space", text="")
if target and owner:
row.label(icon='ARROW_LEFTRIGHT')
if owner:
row.prop(con, "owner_space", text="")
def target_template(layout, con, subtargets=True):
layout.prop(con, "target") # XXX limiting settings for only 'curves' or some type of object
if con.target and subtargets:
if con.target.type == 'ARMATURE':
layout.prop_search(con, "subtarget", con.target.data, "bones", text="Bone")
if hasattr(con, "head_tail"):
row = layout.row()
row.label(text="Head/Tail:")
row.prop(con, "head_tail", text="")
elif con.target.type in {'MESH', 'LATTICE'}:
layout.prop_search(con, "subtarget", con.target, "vertex_groups", text="Vertex Group")
def draw_copy_location_constraint(layout):
col = layout.column(align=True)
box = col.template_constraint(con)
if con.show_expanded:
target_template(box, con)
split = box.split()
col = split.column()
col.prop(con, "use_x", text="X")
sub = col.column()
sub.active = con.use_x
sub.prop(con, "invert_x", text="Invert")
col = split.column()
col.prop(con, "use_y", text="Y")
sub = col.column()
sub.active = con.use_y
sub.prop(con, "invert_y", text="Invert")
col = split.column()
col.prop(con, "use_z", text="Z")
sub = col.column()
sub.active = con.use_z
sub.prop(con, "invert_z", text="Invert")
box.prop(con, "use_offset")
space_template(box, con)
if con.type not in {'RIGID_BODY_JOINT', 'NULL'}:
box.prop(con, "influence")
def draw_copy_rotation_constraint(layout):
col = layout.column(align=True)
box = col.template_constraint(con)
if con.show_expanded:
target_template(box, con)
split = box.split()
col = split.column()
col.prop(con, "use_x", text="X")
sub = col.column()
sub.active = con.use_x
sub.prop(con, "invert_x", text="Invert")
col = split.column()
col.prop(con, "use_y", text="Y")
sub = col.column()
sub.active = con.use_y
sub.prop(con, "invert_y", text="Invert")
col = split.column()
col.prop(con, "use_z", text="Z")
sub = col.column()
sub.active = con.use_z
sub.prop(con, "invert_z", text="Invert")
box.prop(con, "use_offset")
space_template(box, con)
if con.type not in {'RIGID_BODY_JOINT', 'NULL'}:
box.prop(con, "influence")
def draw_copy_scale_constraint(layout):
col = layout.column(align=True)
box = col.template_constraint(con)
if con.show_expanded:
target_template(box, con)
row = box.row(align=True)
row.prop(con, "use_x", text="X")
row.prop(con, "use_y", text="Y")
row.prop(con, "use_z", text="Z")
box.prop(con, "use_offset")
space_template(box, con)
if con.type not in {'RIGID_BODY_JOINT', 'NULL'}:
box.prop(con, "influence")
def draw_copy_transforms_constraint(layout):
col = layout.column(align=True)
box = col.template_constraint(con)
if con.show_expanded:
target_template(box, con)
space_template(box, con)
if con.type not in {'RIGID_BODY_JOINT', 'NULL'}:
box.prop(con, "influence")
def draw_limit_distance_constraint(layout):
col = layout.column(align=True)
box = col.template_constraint(con)
if con.show_expanded:
target_template(box, con)
col = box.column(align=True)
col.prop(con, "distance")
col.operator("constraint.limitdistance_reset")
row = box.row()
row.label(text="Clamp Region:")
row.prop(con, "limit_mode", text="")
row = box.row()
row.prop(con, "use_transform_limit")
row.label()
space_template(box, con)
if con.type not in {'RIGID_BODY_JOINT', 'NULL'}:
box.prop(con, "influence")
def draw_limit_location_constraint(layout):
col = layout.column(align=True)
box = col.template_constraint(con)
if con.show_expanded:
split = box.split()
col = split.column()
col.prop(con, "use_min_x")
sub = col.column()
sub.active = con.use_min_x
sub.prop(con, "min_x", text="")
col.prop(con, "use_max_x")
sub = col.column()
sub.active = con.use_max_x
sub.prop(con, "max_x", text="")
col = split.column()
col.prop(con, "use_min_y")
sub = col.column()
sub.active = con.use_min_y
sub.prop(con, "min_y", text="")
col.prop(con, "use_max_y")
sub = col.column()
sub.active = con.use_max_y
sub.prop(con, "max_y", text="")
col = split.column()
col.prop(con, "use_min_z")
sub = col.column()
sub.active = con.use_min_z
sub.prop(con, "min_z", text="")
col.prop(con, "use_max_z")
sub = col.column()
sub.active = con.use_max_z
sub.prop(con, "max_z", text="")
row = box.row()
row.prop(con, "use_transform_limit")
row.label()
row = box.row()
row.label(text="Convert:")
row.prop(con, "owner_space", text="")
if con.type not in {'RIGID_BODY_JOINT', 'NULL'}:
box.prop(con, "influence")
def draw_limit_rotation_constraint(layout):
col = layout.column(align=True)
box = col.template_constraint(con)
if con.show_expanded:
split = box.split()
col = split.column(align=True)
col.prop(con, "use_limit_x")
sub = col.column(align=True)
sub.active = con.use_limit_x
sub.prop(con, "min_x", text="Min")
sub.prop(con, "max_x", text="Max")
col = split.column(align=True)
col.prop(con, "use_limit_y")
sub = col.column(align=True)
sub.active = con.use_limit_y
sub.prop(con, "min_y", text="Min")
sub.prop(con, "max_y", text="Max")
col = split.column(align=True)
col.prop(con, "use_limit_z")
sub = col.column(align=True)
sub.active = con.use_limit_z
sub.prop(con, "min_z", text="Min")
sub.prop(con, "max_z", text="Max")
box.prop(con, "use_transform_limit")
row = box.row()
row.label(text="Convert:")
row.prop(con, "owner_space", text="")
if con.type not in {'RIGID_BODY_JOINT', 'NULL'}:
box.prop(con, "influence")
def draw_limit_scale_constraint(layout):
col = layout.column(align=True)
box = col.template_constraint(con)
if con.show_expanded:
split = box.split()
col = split.column()
col.prop(con, "use_min_x")
sub = col.column()
sub.active = con.use_min_x
sub.prop(con, "min_x", text="")
col.prop(con, "use_max_x")
sub = col.column()
sub.active = con.use_max_x
sub.prop(con, "max_x", text="")
col = split.column()
col.prop(con, "use_min_y")
sub = col.column()
sub.active = con.use_min_y
sub.prop(con, "min_y", text="")
col.prop(con, "use_max_y")
sub = col.column()
sub.active = con.use_max_y
sub.prop(con, "max_y", text="")
col = split.column()
col.prop(con, "use_min_z")
sub = col.column()
sub.active = con.use_min_z
sub.prop(con, "min_z", text="")
col.prop(con, "use_max_z")
sub = col.column()
sub.active = con.use_max_z
sub.prop(con, "max_z", text="")
row = box.row()
row.prop(con, "use_transform_limit")
row.label()
row = box.row()
row.label(text="Convert:")
row.prop(con, "owner_space", text="")
if con.type not in {'RIGID_BODY_JOINT', 'NULL'}:
box.prop(con, "influence")
if con.type == 'COPY_LOCATION':
draw_copy_location_constraint(layout)
elif con.type == 'COPY_ROTATION':
draw_copy_rotation_constraint(layout)
elif con.type == 'COPY_SCALE':
draw_copy_scale_constraint(layout)
elif con.type == 'COPY_TRANSFORMS':
draw_copy_transforms_constraint(layout)
elif con.type == 'LIMIT_DISTANCE':
draw_limit_distance_constraint(layout)
elif con.type == 'LIMIT_LOCATION':
draw_limit_location_constraint(layout)
elif con.type == 'LIMIT_ROTATION':
draw_limit_rotation_constraint(layout)
elif con.type == 'LIMIT_SCALE':
draw_limit_scale_constraint(layout)
else:
row = layout.row()
row.label(con.name + " view ")
def draw_object_properties(layout,obj,context):
props = get_scene_props(bpy.context.scene)
col = layout.column(align=True)
box = col.box()
col = box.column(align=True)
row = col.row(align=True)
draw_object_tabs(row,obj)
box = col.box()
col = box.column()
if props.tabs == 'INFO':
draw_object_info(col,obj)
if props.tabs == 'DISPLAY':
# box = col.box()
row = col.row()
row.prop(obj,'draw_type',expand=True)
box.prop(obj,'hide_select')
box.prop(obj,'hide')
box.prop(obj,'hide_render')
box.prop(obj,'show_x_ray',icon='GHOST_ENABLED',text='Show X-Ray')
box.prop(obj.cycles_visibility,'camera',icon='CAMERA_DATA',text='Show in Viewport Render')
if props.tabs == 'MATERIAL':
draw_object_materials(col,obj,context)
if props.tabs == 'CONSTRAINTS':
# row = col.row()
col.operator_menu_enum("object.constraint_add", "type", text="Add Constraint",icon='CONSTRAINT_DATA')
# row.operator_menu_enum("fd_object.add_constraint", "type", icon='CONSTRAINT_DATA')
# row.operator("fd_object.collapse_all_constraints",text="",icon='FULLSCREEN_EXIT')
for con in obj.constraints:
draw_constraint(con,col,obj)
if props.tabs == 'MODIFIERS':
# row = col.row()
col.operator_menu_enum("object.modifier_add", "type",icon='MODIFIER')
# row.operator("fd_object.collapse_all_modifiers",text="",icon='FULLSCREEN_EXIT')
for mod in obj.modifiers:
draw_modifier(mod,col,obj)
if props.tabs == 'MESHDATA':
pass
if props.tabs == 'CURVEDATA':
pass
if props.tabs == 'TEXTDATA':
pass
if props.tabs == 'EMPTYDATA':
pass
if props.tabs == 'LIGHTDATA':
pass
if props.tabs == 'CAMERADATA':
pass
if props.tabs == 'DRIVERS':
draw_object_drivers(col,obj)
def draw_object_tabs(layout,obj):
props = get_scene_props(bpy.context.scene)
layout.prop_enum(props, "tabs", 'INFO', icon="BLANK1" if props.tabs == 'INFO' else "INFO", text="Info" if props.tabs == 'INFO' else "")
if obj.type == 'MESH':
layout.prop_enum(props, "tabs", 'DISPLAY', icon="BLANK1" if props.tabs == 'DISPLAY' else "RESTRICT_VIEW_OFF", text="Display" if props.tabs == 'DISPLAY' else "")
layout.prop_enum(props, "tabs", 'MATERIAL', icon="BLANK1" if props.tabs == 'MATERIAL' else "MATERIAL", text="Material" if props.tabs == 'MATERIAL' else "")
layout.prop_enum(props, "tabs", 'CONSTRAINTS', icon="BLANK1" if props.tabs == 'CONSTRAINTS' else "CONSTRAINT", text="Constraints" if props.tabs == 'CONSTRAINTS' else "")
layout.prop_enum(props, "tabs", 'MODIFIERS', icon="BLANK1" if props.tabs == 'MODIFIERS' else "MODIFIER", text="Modifiers" if props.tabs == 'MODIFIERS' else "")
layout.prop_enum(props, "tabs", 'MESHDATA', icon="BLANK1" if props.tabs == 'MESHDATA' else "MESH_DATA", text="Data" if props.tabs == 'MESHDATA' else "")
if obj.type == 'CURVE':
layout.prop_enum(props, "tabs", 'DISPLAY', icon='RESTRICT_VIEW_OFF', text="")
layout.prop_enum(props, "tabs", 'MATERIAL', icon='MATERIAL', text="")
layout.prop_enum(props, "tabs", 'CONSTRAINTS', icon='CONSTRAINT', text="")
layout.prop_enum(props, "tabs", 'MODIFIERS', icon='MODIFIER', text="")
layout.prop_enum(props, "tabs", 'CURVEDATA', icon='CURVE_DATA', text="")
if obj.type == 'FONT':
layout.prop_enum(props, "tabs", 'DISPLAY', icon='RESTRICT_VIEW_OFF', text="")
layout.prop_enum(props, "tabs", 'MATERIAL', icon='MATERIAL', text="")
layout.prop_enum(props, "tabs", 'CONSTRAINTS', icon='CONSTRAINT', text="")
layout.prop_enum(props, "tabs", 'MODIFIERS', icon='MODIFIER', text="")
layout.prop_enum(props, "tabs", 'TEXTDATA', icon='FONT_DATA', text="")
if obj.type == 'EMPTY':
layout.prop_enum(props, "tabs", 'DISPLAY', icon='RESTRICT_VIEW_OFF', text="")
layout.prop_enum(props, "tabs", 'CONSTRAINTS', icon='CONSTRAINT', text="")
layout.prop_enum(props, "tabs", 'EMPTYDATA', icon='EMPTY_DATA', text="")
if obj.type == 'LAMP':
layout.prop_enum(props, "tabs", 'DISPLAY', icon='RESTRICT_VIEW_OFF', text="")
layout.prop_enum(props, "tabs", 'CONSTRAINTS', icon='CONSTRAINT', text="")
layout.prop_enum(props, "tabs", 'LIGHTDATA', icon='LAMP_SPOT', text="")
if obj.type == 'CAMERA':
layout.prop_enum(props, "tabs", 'CONSTRAINTS', icon='CONSTRAINT', text="")
layout.prop_enum(props, "tabs", 'CAMERADATA', icon='OUTLINER_DATA_CAMERA', text="")
if obj.type == 'ARMATURE':
layout.prop_enum(props, "tabs", 'DISPLAY', icon='RESTRICT_VIEW_OFF', text="")
layout.prop_enum(props, "tabs", 'CONSTRAINTS', icon='CONSTRAINT', text="")
layout.prop_enum(props, "tabs", 'DRIVERS', icon="BLANK1" if props.tabs == 'DRIVERS' else "AUTO", text="Drivers" if props.tabs == 'DRIVERS' else "")
def draw_object_info(layout,obj):
# box = layout.box()
row = layout.row()
row.prop(obj,'name')
if obj.type in {'MESH','CURVE','LATTICE','TEXT'}:
pass
# row.operator('fd_object.toggle_edit_mode',text="",icon='EDITMODE_HLT').object_name = obj.name
has_hook_modifier = False
for mod in obj.modifiers:
if mod.type == 'HOOK':
has_hook_modifier = True
has_shape_keys = False
if obj.type == 'MESH':
if obj.data.shape_keys:
if len(obj.data.shape_keys.key_blocks) > 0:
has_shape_keys = True
if has_hook_modifier or has_shape_keys:
row = layout.row()
col = row.column(align=True)
col.label("Dimension")
col.label("X: " + str(obj.dimensions.x))
col.label("Y: " + str(obj.dimensions.y))
col.label("Z: " + str(obj.dimensions.z))
col = row.column(align=True)
col.label("Location")
col.label("X: " + str(obj.location.x))
col.label("Y: " + str(obj.location.y))
col.label("Z: " + str(obj.location.z))
col = row.column(align=True)
col.label("Rotation")
col.label("X: " + str(round(math.degrees(obj.rotation_euler.x),4)))
col.label("Y: " + str(round(math.degrees(obj.rotation_euler.y),4)))
col.label("Z: " + str(round(math.degrees(obj.rotation_euler.z),4)))
if has_hook_modifier:
layout.operator("fd_object.apply_hook_modifiers",icon='HOOK').object_name = obj.name
if has_shape_keys:
layout.operator("fd_object.apply_shape_keys",icon='SHAPEKEY_DATA').object_name = obj.name
else:
if obj.type not in {'EMPTY','CAMERA','LAMP'}:
layout.label('Dimensions:')
col = layout.column(align=True)
#X
row = col.row(align=True)
row.prop(obj,"lock_scale",index=0,text="")
if obj.lock_scale[0]:
row.label("X: " + str(obj.dimensions.x))
else:
row.prop(obj,"dimensions",index=0,text="X")
#Y
row = col.row(align=True)
row.prop(obj,"lock_scale",index=1,text="")
if obj.lock_scale[1]:
row.label("Y: " + str(obj.dimensions.y))
else:
row.prop(obj,"dimensions",index=1,text="Y")
#Z
row = col.row(align=True)
row.prop(obj,"lock_scale",index=2,text="")
if obj.lock_scale[2]:
row.label("Z: " + str(obj.dimensions.z))
else:
row.prop(obj,"dimensions",index=2,text="Z")
col1 = layout.row()
if obj:
col2 = col1.split()
col = col2.column(align=True)
col.label('Location:')
#X
row = col.row(align=True)
row.prop(obj,"lock_location",index=0,text="")
if obj.lock_location[0]:
row.label("X: " + str(obj.location.x))
else:
row.prop(obj,"location",index=0,text="X")
#Y
row = col.row(align=True)
row.prop(obj,"lock_location",index=1,text="")
if obj.lock_location[1]:
row.label("Y: " + str(obj.location.y))
else:
row.prop(obj,"location",index=1,text="Y")
#Z
row = col.row(align=True)
row.prop(obj,"lock_location",index=2,text="")
if obj.lock_location[2]:
row.label("Z: " + str(obj.location.z))
else:
row.prop(obj,"location",index=2,text="Z")
col2 = col1.split()
col = col2.column(align=True)
col.label('Rotation:')
#X
row = col.row(align=True)
row.prop(obj,"lock_rotation",index=0,text="")
if obj.lock_rotation[0]:
row.label("X: " + str(round(math.degrees(obj.rotation_euler.x),4)))
else:
row.prop(obj,"rotation_euler",index=0,text="X")
#Y
row = col.row(align=True)
row.prop(obj,"lock_rotation",index=1,text="")
if obj.lock_rotation[1]:
row.label("Y: " + str(round(math.degrees(obj.rotation_euler.y),4)))
else:
row.prop(obj,"rotation_euler",index=1,text="Y")
#Z
row = col.row(align=True)
row.prop(obj,"lock_rotation",index=2,text="")
if obj.lock_rotation[2]:
row.label("Y: " + str(round(math.degrees(obj.rotation_euler.z),4)))
else:
row.prop(obj,"rotation_euler",index=2,text="Z")
# row = box.row()
# row.prop(obj.mv,'comment')
def draw_object_materials(layout,obj,context):
mat = None
ob = context.object
slot = None
space = context.space_data
if ob:
mat = ob.active_material
if ob:
is_sortable = len(ob.material_slots) > 1
rows = 1
if (is_sortable):
rows = 4
row = layout.row()
row.template_list("MATERIAL_UL_matslots", "", ob, "material_slots", ob, "active_material_index", rows=rows)
col = row.column(align=True)
col.operator("object.material_slot_add", icon='ZOOMIN', text="")
col.operator("object.material_slot_remove", icon='ZOOMOUT', text="")
col.menu("MATERIAL_MT_specials", icon='DOWNARROW_HLT', text="")
if is_sortable:
col.separator()
col.operator("object.material_slot_move", icon='TRIA_UP', text="").direction = 'UP'
col.operator("object.material_slot_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
if ob.mode == 'EDIT':
row = layout.row(align=True)
row.operator("object.material_slot_assign", text="Assign")
row.operator("object.material_slot_select", text="Select")
row.operator("object.material_slot_deselect", text="Deselect")
# split = layout.split(percentage=0.65)
if ob:
layout.template_ID(ob, "active_material", new="material.new")
row = layout.row()
if slot:
row.prop(slot, "link", text="")
else:
row.label()
elif mat:
layout.template_preview(mat)
# split.template_ID(space, "pin_id")
# split.separator()
if mat:
layout.template_preview(mat)
if obj.type in {'MESH','CURVE'}:
pass
if obj.mode == 'EDIT':
row = layout.row(align=True)
row.operator("object.material_slot_assign", text="Assign")
row.operator("object.material_slot_select", text="Select")
row.operator("object.material_slot_deselect", text="Deselect")
layout.operator('fd_general.open_new_window',text="Open Material Editor",icon='NODETREE').space_type = 'NODE_EDITOR'
def draw_object_drivers(layout,obj):
if obj:
if not obj.animation_data:
layout.label("There are no drivers assigned to the object",icon='ERROR')
else:
if len(obj.animation_data.drivers) == 0:
layout.label("There are no drivers assigned to the object",icon='ERROR')
for DR in obj.animation_data.drivers:
box = layout.box()
row = box.row()
DriverName = DR.data_path
if DriverName in {"location","rotation_euler","dimensions" ,"lock_scale",'lock_location','lock_rotation'}:
if DR.array_index == 0:
DriverName = DriverName + " X"
if DR.array_index == 1:
DriverName = DriverName + " Y"
if DR.array_index == 2:
DriverName = DriverName + " Z"
value = eval('bpy.data.objects["' + obj.name + '"].' + DR.data_path)
if type(value).__name__ == 'str':
row.label(DriverName + " = " + str(value),icon='AUTO')
elif type(value).__name__ == 'float':
row.label(DriverName + " = " + str(unit.meter_to_active_unit(value)),icon='AUTO')
elif type(value).__name__ == 'int':
row.label(DriverName + " = " + str(value),icon='AUTO')
elif type(value).__name__ == 'bool':
row.label(DriverName + " = " + str(value),icon='AUTO')
elif type(value).__name__ == 'bpy_prop_array':
row.label(DriverName + " = " + str(value[DR.array_index]),icon='AUTO')
elif type(value).__name__ == 'Vector':
row.label(DriverName + " = " + str(unit.meter_to_active_unit(value[DR.array_index])),icon='AUTO')
elif type(value).__name__ == 'Euler':
row.label(DriverName + " = " + str(unit.meter_to_active_unit(value[DR.array_index])),icon='AUTO')
else:
row.label(DriverName + " = " + str(type(value)),icon='AUTO')
# props = row.operator("fd_driver.add_variable_to_object",text="",icon='ZOOMIN')
# props.object_name = obj.name
# props.data_path = DR.data_path
# props.array_index = DR.array_index
# obj_bp = utils.get_assembly_bp(obj)
# if obj_bp:
# props = row.operator('fd_driver.get_vars_from_object',text="",icon='DRIVER')
# props.object_name = obj.name
# props.var_object_name = obj_bp.name
# props.data_path = DR.data_path
# props.array_index = DR.array_index
utils.draw_driver_expression(box,DR)
# draw_add_variable_operators(box,obj.name,DR.data_path,DR.array_index)
utils.draw_driver_variables(box,DR,obj.name)
class PANEL_object_properties(bpy.types.Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_label = " "
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
if context.object:
return True
else:
return False
def draw_header(self, context):
layout = self.layout
obj = context.object
layout.label(text="Object: " + obj.name,icon='OBJECT_DATA')
def draw(self, context):
layout = self.layout
obj = context.object
if obj:
draw_object_properties(layout,obj,context)
class OPS_open_new_window(bpy.types.Operator):
bl_idname = "fd_general.open_new_window"
bl_label = "Open New Window"
space_type = bpy.props.StringProperty(name="Space Type")
@classmethod
def poll(cls, context):
return True
def execute(self, context):
bpy.ops.screen.userpref_show('INVOKE_DEFAULT')
for window in context.window_manager.windows:
if len(window.screen.areas) == 1 and window.screen.areas[0].type == 'USER_PREFERENCES':
window.screen.areas[0].type = self.space_type
return {'FINISHED'}
def get_scene_props(scene):
return scene.obj_panel
class scene_props(bpy.types.PropertyGroup):
tabs = bpy.props.EnumProperty(name="type",
items=enum_object_tabs,
description="Select the Object Type.",
default='INFO')
def register():
bpy.utils.register_class(PANEL_object_properties)
bpy.utils.register_class(scene_props)
bpy.utils.register_class(OPS_open_new_window)
bpy.types.Scene.obj_panel = bpy.props.PointerProperty(type = scene_props)
def unregister():
pass
|
[
"dev.andrewpeel@gmail.com"
] |
dev.andrewpeel@gmail.com
|
bbbb760b22d3a07d2b3d10445c267f72ed9fcfbd
|
e0b6f5bd451aa8af3273fbc948799637681342e1
|
/scripts/wm_representation/functions/IEM_conditions/IEM_condition.py
|
244e5b35232d3da6732fe524c6e5c3d6790c863a
|
[] |
no_license
|
davidbestue/encoding
|
6b304f6e7429f94f97bd562c7544d1fdccf7bdc1
|
c27319aa3bb652b3bfc6b7340044c0fda057bc62
|
refs/heads/master
| 2022-05-05T23:41:42.419252
| 2022-04-27T08:34:52
| 2022-04-27T08:34:52
| 144,248,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,024
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 18:24:32 2019
@author: David Bestue
"""
## Import functions prom the previous path
import sys
import os
previous_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
sys.path.insert(1, previous_path)
from model_functions import *
from fake_data_generator import *
from Weights_matrixs import *
from Representation import *
from process_encoding import *
from process_wm import *
from data_to_use import *
from bootstrap_functions import *
from joblib import Parallel, delayed
import multiprocessing
import time
import random
from sklearn.model_selection import KFold
import multiprocessing
multiprocessing.cpu_count()
### use the cores so we do not run out of memory
numcores = multiprocessing.cpu_count()
if numcores>20:
numcores=numcores-10
if numcores<10:
numcores=numcores-3
##paths to save the files
path_save_signal ='/home/david/Desktop/Reconstructions/IEM/IEM_target_far_delay.xlsx' #cross_b001_target_mix_octave_1_7_far.xlsx'
path_save_shuffle = '/home/david/Desktop/Reconstructions/IEM/shuff_IEM_target_far_delay.xlsx'
## options (chek the filename too!)
decoding_thing = 'Target' #'Distractor' #'Target'
Distance_to_use = 'far' #'close' 'far'
training_time= 'delay' #'stim_p' 'delay' 'respo'
## depending on the options, I will use one condition or the other
if decoding_thing=='Distractor':
cond_t = '2_7'
elif decoding_thing=='Target': ##at some point we can go for the response, though it should be similar
cond_t = '1_7'
# depending on the options, the TRs used for the training will be different
if training_time=='stim_p':
tr_st=3
tr_end=4
elif training_time=='delay':
tr_st=4
tr_end=6
elif training_time=='respo':
if decoding_thing=='Target':
tr_st=8
tr_end=9
elif decoding_thing=='Distractor':
tr_st=11
tr_end=12
## dictionary and list to save the files
Reconstructions={}
Reconstructions_shuff=[]
## elements for the loop
Conditions=['1_0.2', '1_7', '2_0.2', '2_7'] # '1_0.2', '1_7', '2_0.2', '2_7'
Subjects=['d001', 'n001', 'b001', 'r001', 's001', 'l001'] #'d001', 'n001', 'b001', 'r001', 's001', 'l001'
brain_regions = ['visual', 'ips', 'pfc'] # 'visual', 'ips', 'pfc'
ref_angle=180
num_shuffles = 10 #00
for Subject in Subjects:
for Brain_region in brain_regions:
#plt.figure()
### Data to use
enc_fmri_paths, enc_beh_paths, wm_fmri_paths, wm_beh_paths, masks = data_to_use( Subject, 'together', Brain_region)
##### Process training data
training_activity, training_behaviour = preprocess_wm_files(wm_fmri_paths, masks, wm_beh_paths, condition=cond_t,
distance=Distance_to_use, sys_use='unix', nscans_wm=nscans_wm, TR=2.335)
#
#training activity
if training_time=='stim_p':
delay_TR_cond = training_activity[:, tr_st, :]
if training_time=='delay':
delay_TR_cond = np.mean(training_activity[:, tr_st:tr_end, :], axis=1) ## training_activity[:, 8, :]
if training_time=='respo':
delay_TR_cond = training_activity[:, tr_st, :]
#
if decoding_thing=='Distractor':
training_thing = training_behaviour['Dist']
elif decoding_thing=='Target':
training_thing = training_behaviour['T']
##### Train your weigths
WM, Inter = Weights_matrix_LM( delay_TR_cond, training_thing )
WM_t = WM.transpose()
for idx_c, Condition in enumerate(Conditions):
if Condition == cond_t:
training_activity, training_behaviour = delay_TR_cond, training_thing
enc_fmri_paths, enc_beh_paths, wm_fmri_paths, wm_beh_paths, masks = data_to_use( Subject, 'together', Brain_region)
testing_activity, testing_behaviour = preprocess_wm_files(wm_fmri_paths, masks, wm_beh_paths,
condition=Condition, distance=Distance_to_use, sys_use='unix', nscans_wm=nscans_wm, TR=2.335)
#
Reconstruction = IEM_cross_condition_kfold(testing_activity= testing_activity, testing_behaviour=testing_behaviour,
decode_item= decoding_thing, WM=WM, WM_t=WM_t, Inter=Inter, tr_st=tr_st, tr_end=tr_end, n_slpits=10)
Reconstructions[Subject + '_' + Brain_region + '_' + Condition]=Reconstruction
shuff = IEM_cross_condition_kfold_shuff(testing_activity=testing_activity, testing_behaviour=testing_behaviour,
decode_item=decoding_thing, WM=WM, WM_t=WM_t, Inter=Inter, condition=Condition, subject=Subject, region=Brain_region,
iterations=num_shuffles, tr_st=tr_st, tr_end=tr_end, ref_angle=180, n_slpits=10)
Reconstructions_shuff.append(shuff)
###Reconstructions_shuff.append(shuff)
else:
Reconstruction, shuff = all_process_condition_shuff( Subject=Subject, Brain_Region=Brain_region, WM=WM, WM_t=WM_t,
distance=Distance_to_use, decode_item= decoding_thing, iterations=num_shuffles, Inter=Inter, Condition=Condition,
method='together', heatmap=False) #100
Reconstructions[Subject + '_' + Brain_region + '_' + Condition]=Reconstruction
Reconstructions_shuff.append(shuff)
### Save signal
### Get signal from the reconstructions (get the signal before; not done in the function in case you want to save the whole)
### If you want to save the whole recosntruction, uncomment the following lines
### Save Recosntructions
# path_save_reconstructions = #
# writer = pd.ExcelWriter(path_save_reconstructions)
# for i in range(len(Reconstructions.keys())):
# Reconstructions[Reconstructions.keys()[i]].to_excel(writer, sheet_name=Reconstructions.keys()[i]) #each dataframe in a excel sheet
# writer.save() #save reconstructions (heatmaps)
#Save just the signal (around the decoding thing)
Decoding_df =[]
for dataframes in Reconstructions.keys():
df = Reconstructions[dataframes]
a = pd.DataFrame(df.iloc[ref_angle*2,:]) ##*2 because there are 720
a = a.reset_index()
a.columns = ['times', 'decoding'] # column names
a['decoding'] = [sum(df.iloc[:,i] * f2(ref_angle)) for i in range(len(a))] #"population vector method" scalar product
a['times']=a['times'].astype(float)
a['region'] = dataframes.split('_')[1]
a['subject'] = dataframes.split('_')[0]
a['condition'] = dataframes.split('_')[-2] + '_' + dataframes.split('_')[-1]
Decoding_df.append(a)
Df = pd.concat(Decoding_df)
Df['label'] = 'signal' #ad the label of signal (you will concatenate this df with the one of the shuffleing)
Df.to_excel( path_save_signal ) #save signal
### Save Shuffle (in shuffles you do not need to get the *2 thing becuase it is done inside the function)
Df_shuffs = pd.concat(Reconstructions_shuff)
Df_shuffs['label'] = 'shuffle' ## add the label of shuffle
Df_shuffs.to_excel(path_save_shuffle) #save shuffle
|
[
"davidsanchezbestue@hotmail.com"
] |
davidsanchezbestue@hotmail.com
|
64ced12d14e6ef07689ff4230e0e91e5529ae4b7
|
44849991f507933ebc7ed4e8e37819a529be539e
|
/steps/step09.py
|
f4696c0a213f1cce610937e96f56827da22d84d5
|
[
"MIT"
] |
permissive
|
NukeA/deep-learning-from-scratch-3
|
4ff60e8ac5b157a05079fc3b8a2ea69acec9ece5
|
e48a7b8788827a16cc9f81adc135a3a14989bea5
|
refs/heads/master
| 2022-11-02T00:58:56.621011
| 2020-06-16T04:36:23
| 2020-06-16T04:36:23
| 273,873,741
| 1
| 0
|
MIT
| 2020-06-21T09:34:25
| 2020-06-21T09:34:24
| null |
UTF-8
|
Python
| false
| false
| 1,780
|
py
|
import numpy as np
class Variable:
def __init__(self, data):
if data is not None:
if not isinstance(data, np.ndarray):
raise TypeError('{} is not supported'.format(type(data)))
self.data = data
self.grad = None
self.creator = None
def set_creator(self, func):
self.creator = func
def backward(self):
if self.grad is None:
self.grad = np.ones_like(self.data)
funcs = [self.creator]
while funcs:
f = funcs.pop()
x, y = f.input, f.output
x.grad = f.backward(y.grad)
if x.creator is not None:
funcs.append(x.creator)
def as_array(x):
if np.isscalar(x):
return np.array(x)
return x
class Function:
def __call__(self, input):
x = input.data
y = self.forward(x)
output = Variable(as_array(y))
output.set_creator(self)
self.input = input
self.output = output
return output
def forward(self, x):
raise NotImplementedError()
def backward(self, gy):
raise NotImplementedError()
class Square(Function):
def forward(self, x):
y = x ** 2
return y
def backward(self, gy):
x = self.input.data
gx = 2 * x * gy
return gx
class Exp(Function):
def forward(self, x):
y = np.exp(x)
return y
def backward(self, gy):
x = self.input.data
gx = np.exp(x) * gy
return gx
def square(x):
return Square()(x)
def exp(x):
return Exp()(x)
x = Variable(np.array(0.5))
y = square(exp(square(x)))
y.backward()
print(x.grad)
x = Variable(np.array(1.0)) # OK
x = Variable(None) # OK
x = Variable(1.0) # NG
|
[
"koki0702@gmail.com"
] |
koki0702@gmail.com
|
a83e3e891b8fdc17c32fdcc50c55461de5f3d5e6
|
02d37a773141af3d37ece2e66d32c51d5365aed3
|
/appengine_hkp/parser.py
|
731e1f777ede8013247a24b7a17cec8246860bbe
|
[
"MIT"
] |
permissive
|
drastrom/appengine-hkp
|
4f61be3474c62fd3d4380224d31d137c78eb0e2c
|
d34affb482f47974316ac0e2a48983fd5ac10b85
|
refs/heads/master
| 2021-01-25T09:20:38.163889
| 2018-12-10T20:53:33
| 2018-12-10T20:53:33
| 93,815,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,774
|
py
|
#!/usr/bin/env python
from google.appengine.ext import ndb
import codecs
import datetime
import pgpdump
import pgpdump.packet
import pgpdump.utils
from . import models
from . import uni_utils
def load_key(key_asc):
data = pgpdump.AsciiData(key_asc)
entities = []
pubkey = None
curkey = None
curuid = None
subkey_latest_selfsig = datetime.datetime.utcfromtimestamp(0)
pubkey_latest_selfsig = datetime.datetime.utcfromtimestamp(0)
uid_latest_selfsig = datetime.datetime.utcfromtimestamp(0)
for packet in data.packets():
if isinstance(packet, pgpdump.packet.PublicKeyPacket) and not isinstance(packet, pgpdump.packet.SecretKeyPacket):
if type(packet) == pgpdump.packet.PublicKeyPacket:
pubkey_latest_selfsig = datetime.datetime.utcfromtimestamp(0)
pubkey = models.PublicKey()
curkey = pubkey
# Ugh, BlobProperty wants str, not bytearray
pubkey.key_data = str(data.data)
else:
subkey_latest_selfsig = datetime.datetime.utcfromtimestamp(0)
curkey = models.PublicSubkey()
entities.append(curkey)
curkey.reversed_fingerprint = codecs.decode(packet.fingerprint.decode('ascii'), 'hex')[::-1]
if type(packet) == pgpdump.packet.PublicKeyPacket:
curkey.key = ndb.Key(models.PublicKey, curkey.stringid, namespace='hkp')
else:
curkey.key = ndb.Key(models.PublicSubkey, curkey.stringid, parent=pubkey.key, namespace='hkp')
pubkey.subkeys.append(curkey.key)
curkey.creation_time = packet.creation_time
curkey.expiration_time = packet.expiration_time
curkey.algorithm_type = packet.pub_algorithm_type
curkey.bitlen = packet.modulus_bitlen
elif isinstance(packet, pgpdump.packet.UserIDPacket):
uid_latest_selfsig = datetime.datetime.utcfromtimestamp(0)
curuid = models.Uid()
entities.append(curuid)
curuid.key = ndb.Key(models.Uid, packet.user, parent=pubkey.key, namespace='hkp')
pubkey.uids.append(curuid.key)
curuid.uid = uni_utils.compatibility_casefold(packet.user)
elif isinstance(packet, pgpdump.packet.SignaturePacket):
# self-sig
if packet.key_id == pubkey.keyid:
# At this point only interested in UID, subkey, or sig directly on key
# TODO should record revocation as well
if packet.raw_sig_type in (0x10, 0x11, 0x12, 0x13, 0x18, 0x1F):
# From RFC4880:
# Subpackets that appear in a certification self-signature
# apply to the user name, and subpackets that appear in the subkey
# self-signature apply to the subkey. Lastly, subpackets on the
# direct-key signature apply to the entire key.
#
# NOTE while the certification subpackets should apply to the user name,
# not the entire key, gpg seems to put properties of the public key in the
# certification signature(s). So, no else here...
if packet.raw_sig_type >= 0x10 and packet.raw_sig_type <= 0x13 and uid_latest_selfsig < packet.creation_time:
uid_latest_selfsig = packet.creation_time
curuid.creation_time = packet.creation_time
curuid.expiration_time = packet.expiration_time
if (packet.raw_sig_type == 0x18 and subkey_latest_selfsig < packet.creation_time) or (packet.raw_sig_type != 0x18 and pubkey_latest_selfsig < packet.creation_time):
# Should modify pubkey even if the direct-key sig packet happens after subkeys
modkey = curkey if packet.raw_sig_type == 0x18 else pubkey
for subpack in packet.subpackets:
if subpack.subtype == 9: # Key Expiration Time
modkey.expiration_time = modkey.creation_time + datetime.timedelta(seconds=pgpdump.utils.get_int4(subpack.data, 0))
elif subpack.subtype == 27: # Key Flags
modkey.flags = subpack.data[0]
elif subpack.subtype == 23: # Key Server Preferences (do we need these?)
pass
ndb.put_multi(entities)
|
[
"github@jdrake.com"
] |
github@jdrake.com
|
474f0fa585fe803e09517019b929e36d9d8d6f83
|
f0a3209d739e6ce0a58e70eb4e25442d3012b57c
|
/Backend/getClothesofOutfit/getClothesofOutfit.py
|
d35792dcfdcacb3474fadc34487c6292eb8e2304
|
[] |
no_license
|
js1342/Backend
|
be73ec490369d6a4a12f3b249f14e54b66a28205
|
1856e32d2603f6a9e6cd5ea58e9f1a105748150a
|
refs/heads/master
| 2023-02-03T04:17:07.677284
| 2020-12-24T07:43:56
| 2020-12-24T07:43:56
| 316,378,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,920
|
py
|
import json
import time
import boto3
from boto3.dynamodb.conditions import Key, Attr
############## DB 정보 ##############
dynamodb = boto3.resource('dynamodb', region_name='us-east-2')
outfit_table = dynamodb.Table('Outfit')
clothes_table = dynamodb.Table('Clothes')
user_table = dynamodb.Table('User')
category_table = dynamodb.Table('Categories')
def lambda_handler(event, context):
user_id = int(event['pathParameters']['user-id'])
# GET: 아웃핏 전체 가지고 오기
# Outfit 테이블의 outfit map 불러오기
outfit_res = outfit_table.scan(FilterExpression=Attr('user_id').eq(user_id))
print(outfit_res['Items'])
# value type 변경 (Decimal -> str)
for item in outfit_res['Items']:
for key in item['outfit']:
res = clothes_table.get_item(Key={'clothes_id':item['outfit'][key]})
print("res", res['Item'])
res['Item']['user_id'] = str(res['Item']['user_id'])
res['Item']['category'] = str(res['Item']['category'])
res['Item']['clothes_id'] = str(res['Item']['clothes_id'])
res['Item']['outfit'] = str(item['outfit'][key])
item['outfit'][key] = res['Item']
# print(item)
item['user_id'] = str(item['user_id'])
item['saved'] = str(item['saved'])
item['outfit_id'] = str(item['outfit_id'])
for i in range (0, len(item['liked_users'])):
item['liked_users'][i] = str(item['liked_users'][i])
print(outfit_res['Items'])
print('-------------------------------')
print(outfit_res['Items'])
print('-------------------------------')
return {
"statusCode":200,
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "OPTIONS, GET",
},
"body":json.dumps(outfit_res['Items'], ensure_ascii=False)
}
|
[
"32619843+kmj1995kr@users.noreply.github.com"
] |
32619843+kmj1995kr@users.noreply.github.com
|
84fe758f6eacb7fe7424096d7021391445ce414e
|
aa70ce959d0237ed7f2af438c351e93f231d49dc
|
/16-720B-HW1 Spatial Pyramid Matching for Scene Classification/chendil/code/visual_words.py
|
bb8a4535f5ff7e83f855b4caae175012963522b0
|
[] |
no_license
|
c11/16-720-Computer-Vision
|
0fcf9bbacbb94c79da9466a28e9fb7a073c3c4fa
|
ac201820e7be947df350ede0ba9c7eb617acd0aa
|
refs/heads/master
| 2023-03-16T15:29:32.754875
| 2018-11-29T15:02:47
| 2018-11-29T15:02:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,808
|
py
|
import numpy as np
import multiprocessing
import imageio
import scipy.ndimage
import skimage.color
import sklearn.cluster
import scipy.spatial.distance
import os,time
import matplotlib.pyplot as plt
import util
import random
def extract_filter_responses(image):
'''
Extracts the filter responses for the given image.
[input]
* image: numpy.ndarray of shape (H,W) or (H,W,3)
[output]
* filter_responses: numpy.ndarray of shape (H,W,3F)
'''
[m,n,channel] = np.shape(image)
# make sure that entries in image are float and with range 0 1
if (type(image[0,0,0]) == int ):
image = image.astype('float') / 255
elif (np.amax(image) > 1.0):
image = image.astype('float') / 255
if channel == 1: # grey
image = np.matlib.repmat(image,1,1,3)
if channel == 4: # special case
image = image[:,:,0:3]
channel = 3
image = skimage.color.rgb2lab(image)
scale = [1,2,4,8,8 * np.sqrt(2)]
F = len(scale) * 4
response = np.zeros((m, n, 3*F))
#for i in range(channel):
# for j in range (len(scale)):
# response[:,:,i*len(scale)*4+j*4] = scipy.ndimage.gaussian_filter(image[:,:,i],sigma = scale[j],output=np.float64) # guassian
# response[:,:,i*len(scale)*4+j*4+1] = scipy.ndimage.gaussian_laplace(image[:,:,i],sigma = scale[j],output=np.float64) # guassian laplace
# response[:,:,i * len(scale)*4 + j*4+2] = scipy.ndimage.gaussian_filter(image[:,:,i], sigma = scale[j], order = [0,1],output = np.float64) # derivative in x direction
# response[:, :, i * len(scale)*4 + j*4+3] = scipy.ndimage.gaussian_filter(image[:,:,i], sigma = scale[j], order=[1, 0],output = np.float64) # derivative in y direction
# ----- TODO -----
for i in range(channel):
for j in range (len(scale)):
response[:,:,channel*4*j+i] = scipy.ndimage.gaussian_filter(image[:,:,i],sigma = scale[j],output=np.float64) # guassian
response[:,:,channel*4*j+3+i] = scipy.ndimage.gaussian_laplace(image[:,:,i],sigma = scale[j],output=np.float64) # guassian laplace
response[:,:,channel*4*j+6+i] = scipy.ndimage.gaussian_filter(image[:,:,i], sigma = scale[j], order = [0,1],output = np.float64) # derivative in x direction
response[:,:,channel*4*j+9+i] = scipy.ndimage.gaussian_filter(image[:,:,i], sigma = scale[j], order=[1, 0],output = np.float64) # derivative in y direction
return response
def get_visual_words(image,dictionary):
'''
Compute visual words mapping for the given image using the dictionary of visual words.
[input]
* image: numpy.ndarray of shape (H,W) or (H,W,3)
[output]
* wordmap: numpy.ndarray of shape (H,W)
'''
# ----- TODO -----
response = extract_filter_responses(image)
m,n,filnum = np.shape(response)
k,filnum = np.shape(dictionary)
dis = np.zeros(k)
wordmap = np.zeros((m,n))
for i in range(m):
for j in range(n):
pixel = response[i][j][:]
pixel = np.reshape(pixel,(1,filnum))
#print(np.shape(pixel))
# for kk in range(k):
# word = dictionary[kk]
# dis[kk] = scipy.spatial.distance.cdist(pixel,word)
dis = scipy.spatial.distance.cdist(dictionary,pixel)
#print(np.shape(dis))
#print(np.unravel_index(np.argmax(dis,axis = None),dis.shape)[0])
wordmap[i,j] = np.unravel_index(np.argmin(dis,axis = None),dis.shape)[0]
# plt.imshow(wordmap,cmap = 'rainbow')
# plt.show()
return wordmap
def compute_dictionary_one_image(args):
'''
Extracts random samples of the dictionary entries from an image.
This is a function run by a subprocess.
[input]
* i: index of training image
* alpha: number of random samples
* image_path: path of image file
* time_start: time stamp of start time
[saved]
* sampled_response: numpy.ndarray of shape (alpha,3F)
'''
i,alpha,image_path = args
#print("../data/" + image_path[i][0])
image = skimage.io.imread("../data/" + image_path[i][0])
#image = image.astype('float') / 255
filter_responses = extract_filter_responses(image)
# ----- TODO -----
m,n,kk = np.shape(filter_responses)
sampled_response = np.reshape(filter_responses,(m*n,kk))
idx = np.random.randint(m*n, size= alpha)
sampled_response = sampled_response[idx,:]
# pick up alpha random pixels
np.save('sampled_response.npy',sampled_response)
return sampled_response
def compute_dictionary(num_workers = 2):
'''
Creates the dictionary of visual words by clustering using k-means.
[input]
* num_workers: number of workers to process in parallel
[saved]
* dictionary: numpy.ndarray of shape (K,3F)T = 200T = f
'''
train_data = np.load("../data/train_data.npz")
#print(np.shape(train_data['image_names']))
#print(train_data['image_names'])
# ----- TODO -----
T = np.shape(train_data['image_names'])[0]
#T = 200
alpha = 250
K = 200
#filter_responses = np.zeros((alpha*T,3*20))
filter_responses = np.array([], dtype=np.int64).reshape(0,3*20)
for i in range (int(T/num_workers)):
p = multiprocessing.Pool(num_workers)
param = []
for j in range(num_workers):
param.append((i*num_workers+j,alpha,train_data['image_names']))
[fil1,fil2,fil3,fil4] = p.map(compute_dictionary_one_image,param)
# somehow concate them
#fil = np.concatenate((fil1,fil2,fil3,fil4),axis = 0)
#print(np.shape(fil))
filter_responses = np.vstack([filter_responses,fil1,fil2,fil3,fil4])
#filter_responses = np.concatenate((filter_responses,fil),axis = 0)
#filter_responses[i*num_workers*alpha:(i+1)*num_workers*alpha,:] = fil
#filter_responses[i*alpha:(i+1)*alpha,:] = compute_dictionary_one_image((i,alpha,train_data['image_names']))
kmeans = sklearn.cluster.KMeans(n_clusters=K, n_jobs = -1).fit(filter_responses)
dictionary = kmeans.cluster_centers_
np.save('dictionary.npy',dictionary)
return dictionary
|
[
"chendilin92@gmail.com"
] |
chendilin92@gmail.com
|
b62ee27024d05328ebb4cf87044e452d5be84b1a
|
34088b8e82bc64a10678a08c03db2732d52f0c1a
|
/Pinbot/app/dash/migrations/0018_auto__add_field_resumedailyreportdata_resume_down_proportion.py
|
ba80829f546552507a844e0a3e81dcce76bb676a
|
[] |
no_license
|
winghou/myFirstProfile
|
757d82f5391f3672e48db4aa5774e26a48a5ecc7
|
8fc5d16de7b6449cba058f4d2459bbb0c8438f77
|
refs/heads/master
| 2020-05-31T13:42:28.554703
| 2016-03-23T11:30:13
| 2016-03-23T11:30:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,426
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ResumeDailyReportData.resume_down_proportion'
db.add_column(u'dash_resumedailyreportdata', 'resume_down_proportion',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ResumeDailyReportData.resume_down_proportion'
db.delete_column(u'dash_resumedailyreportdata', 'resume_down_proportion')
models = {
u'dash.coredailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'CoreDailyReportData'},
'active_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lively_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lively_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'register_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repeat_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'dash.feeddailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'FeedDailyReportData'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lively_feed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lively_feed_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lively_feed_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_feed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'dash.partnerdailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'PartnerDailyReportData'},
'accept_task_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'accept_task_user_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'accusation_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'accusation_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'all_extra_reward_coin_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'all_reward_coin_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'do_task_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'do_task_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'entered_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'entered_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interviewed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'interviewed_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'resume_download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_download_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_viewed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_viewed_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_accedpted_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_accedpted_count_contrast': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'task_accedpted_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_viewed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'today_commend_and_check_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'today_commend_and_download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'today_extra_reward_coin_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'today_reward_coin_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'upload_resume_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'upload_resume_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'dash.pinbotdailyreport': {
'Meta': {'object_name': 'PinbotDailyReport'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pay_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pv': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'register_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {}),
'total_pay_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'uv': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'dash.resumedailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'ResumeDailyReportData'},
'company_card_send_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'entered_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interviewed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'resume_commends_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_down_proportion': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_fav_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_view_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'dash.userdailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'UserDailyReportData'},
'all_total_active_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lively_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_experience_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_manual_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_register_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_self_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repeat_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'total_experience_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_manual_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_register_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_self_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'dash.weixindailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'WeixinDailyReportData'},
'feed_notify_send_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed_notify_view_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lively_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lively_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_bind_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_feed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_feed_favours_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_reg_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'total_bind_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['dash']
|
[
"Newfarming@NewfarmingdeMacBook-Pro.local"
] |
Newfarming@NewfarmingdeMacBook-Pro.local
|
ba41f7aef79a7bcb7d8f12a8308d4d45eacd1ce8
|
6f4e925bf4538d104f1e3e9754d4297c5504ab80
|
/python/recall/app/domain/__init__.py
|
55879d938b278a1a92324ce2cb752388ae90ff9d
|
[
"MIT"
] |
permissive
|
kingreatwill/openself
|
7f02282da3e0b1f328c3627d83ba2b5ed4563dc8
|
8517d24e665b39371835ecd2ed0cd3509a5d9d62
|
refs/heads/master
| 2023-01-23T13:15:49.491402
| 2020-11-19T02:39:52
| 2020-11-19T02:39:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
import models
class Domain:
def __init__(self, model: models.db.Document):
self.model = model
# 列表;
def list(self, size=10, index=1, **kwargs):
size = int(size)
index = int(index)
return self.model.objects(**kwargs).skip((index - 1) * size).limit(size)
# 明细;
def get(self, id):
return self.model.objects(**{self.model.key(): id}).first()
def update(self, id, **kwargs):
model = self.model.objects(**{self.model.key(): id}).first()
if model:
return model.update(**kwargs)
return True
|
[
"kingreatwill@qq.com"
] |
kingreatwill@qq.com
|
5dd63b2b9df8c5af5403c212e5f8fa25f11a8055
|
49536aafb22a77a6caf249c7fadef46d63d24dfe
|
/tensorflow/tensorflow/contrib/learn/python/learn/learn_io/generator_io.py
|
784781d7368490a10d5dbc9cd5842f4bed98eda3
|
[
"Apache-2.0"
] |
permissive
|
wangzhi01/deeplearning-1
|
4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d
|
46ab82253d956953b8aa98e97ceb6cd290e82288
|
refs/heads/master
| 2020-05-28T03:14:55.687567
| 2018-09-12T16:52:09
| 2018-09-12T16:52:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,499
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow generator of dict with numpy arrays."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Container
from types import FunctionType
from types import GeneratorType
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
def generator_input_fn(x,
target_key=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
pad_value=None):
"""Returns input function that returns dicts of numpy arrays
yielded from a generator.
It is assumed that every dict of numpy arrays yielded from the dictionary
represents a single sample. The generator should consume a single epoch of the
data.
This returns a function outputting `features` and `target` based on the dict
of numpy arrays. The dict `features` has the same keys as an element yielded
from x.
Example:
```python
def generator():
for index in range(10):
yield {'height': np.random.randint(32,36),
'age': np.random.randint(18, 80),
'label': np.ones(1)}
with tf.Session() as session:
input_fn = generator_io.generator_input_fn(
generator, target_key="label", batch_size=2, shuffle=False,
num_epochs=1)
```
Args:
x: Generator Function, returns a `Generator` that will yield the data
in `dict` of numpy arrays
target_key: String or Container of Strings, the key or Container of keys of
the numpy arrays in x dictionaries to use as target.
batch_size: Integer, size of batches to return.
num_epochs: Integer, number of epochs to iterate over data. If `None` will
run forever.
shuffle: Boolean, if True shuffles the queue. Avoid shuffle at prediction
time.
queue_capacity: Integer, size of queue to accumulate.
num_threads: Integer, number of threads used for reading and enqueueing.
pad_value: default value for dynamic padding of data samples, if provided.
Returns:
Function, that returns a feature `dict` with `Tensors` and an optional
label `dict` with `Tensors`, or if target_key is `str` label is a `Tensor`
Raises:
TypeError: `x` is not `FunctionType`.
TypeError: `x()` is not `GeneratorType`.
TypeError: `next(x())` is not `dict`.
TypeError: `target_key` is not `str` or `target_key` is not `Container`
of `str`.
KeyError: `target_key` not a key or `target_key[index]` not in next(`x()`).
KeyError: `key` mismatch between dicts emitted from `x()`
"""
if not isinstance(x, FunctionType):
raise TypeError(
'x must be generator function; got {}'.format(type(x).__name__))
generator = x()
if not isinstance(generator, GeneratorType):
raise TypeError(
'x() must be generator; got {}'.format(type(generator).__name__))
data = next(generator)
if not isinstance(data, dict):
raise TypeError('x() must yield dict; got {}'.format(type(data).__name__))
input_keys = sorted(next(x()).keys())
if target_key is not None:
if isinstance(target_key, str):
target_key = [target_key]
elif isinstance(target_key, Container):
for item in target_key:
if not isinstance(item, str):
raise TypeError('target_key must be str or Container of str; got {}'.
format(type(item).__name__))
if item not in input_keys:
raise KeyError(
'target_key not in yielded dict. Expected {} keys; got {}'.format(
input_keys, item))
else:
raise TypeError('target_key must be str or Container of str; got {}'.
format(type(target_key).__name__))
def _generator_input_fn():
"""generator input function."""
queue = enqueue_data(
x,
queue_capacity,
shuffle=shuffle,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs,
pad_value=pad_value)
features = (queue.dequeue_many(batch_size)
if num_epochs is None else queue.dequeue_up_to(batch_size))
if not isinstance(features, list):
features = [features]
features = dict(zip(input_keys, features))
if target_key is not None:
if len(target_key) > 1:
target = {key: features.pop(key) for key in target_key}
else:
target = features.pop(target_key[0])
return features, target
return features
return _generator_input_fn
|
[
"hanshuobest@163.com"
] |
hanshuobest@163.com
|
3c2d7ec8c77ae7c529adc37d5a27cb476d8f7a1b
|
67d517b6f8ba789d9086bac9374d6d25cc9a410d
|
/topical/migrations/0016_tag.py
|
fd330c76f41484eabdcc082ad620ab3c659d8f81
|
[] |
no_license
|
Topical-Troupe/topical-backend
|
2f43c83a3b69cbaf7b07aaec30c1d0dec75a794f
|
54be5eb25f4c9d32f67be683ac1b1069e07ba8ab
|
refs/heads/main
| 2022-11-25T00:41:47.478747
| 2020-08-06T15:58:06
| 2020-08-06T15:58:06
| 280,196,461
| 0
| 0
| null | 2020-08-05T03:01:49
| 2020-07-16T15:57:44
|
Python
|
UTF-8
|
Python
| false
| false
| 633
|
py
|
# Generated by Django 3.0.8 on 2020-07-27 22:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topical', '0015_auto_20200727_1744'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512, unique=True)),
('products', models.ManyToManyField(blank=True, related_name='tags', to='topical.Product')),
],
),
]
|
[
"britton.wolfe1999@gmail.com"
] |
britton.wolfe1999@gmail.com
|
38911df2e5bdda7b60117af3dd5a796f464a1f0e
|
42171d6536bc649386cffb76e8c722880df23ade
|
/ldi.py
|
d7514e39e7c20bab9b8a2e6d6e8e4fa37e9f9636
|
[
"Apache-2.0"
] |
permissive
|
SLDGroup/NN_Mass
|
11301c3cc4ca8b0bbd42f0633569b0b2373f14f8
|
1de4bb575911a8fc8b06eec7b215ef86c1ac6c4a
|
refs/heads/main
| 2023-05-02T07:42:19.356074
| 2021-05-26T04:03:31
| 2021-05-26T04:03:31
| 352,197,180
| 4
| 3
|
Apache-2.0
| 2021-03-31T23:42:57
| 2021-03-27T23:08:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,298
|
py
|
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils.data import Dataset,DataLoader
from torchvision import transforms, datasets
import random
import numpy as np
import argparse
import datetime
import dataset
import model_zoo
#-------------------------------------
# argument parser
parser = argparse.ArgumentParser(description='Training MLP on MNIST/synthetic dataset')
parser.add_argument('--batch_size', type=int, default=100, help='Number of samples per mini-batch')
parser.add_argument('--epochs', type=int, default=1, help='Number of epoch to train')
parser.add_argument('--depth', type=int, default=4, help='the depth (number of FC layers) of the MLP')
parser.add_argument('--width', type=int, default=8, help='the width (number of neurons per layers) of the MLP')
parser.add_argument('--num_seg', type=int, default=2, help='the number of segmentation for the synthetic dataset')
parser.add_argument('--tc', type=int, default=20, help='the number of tc')
parser.add_argument('--dataset', type=str, default='MNIST', help='the type of dataset')
parser.add_argument('--sigma_log_file', type=str, default='logs/mlp_sigma.logs', help='the name of file used to record the LDI record of MLPs')
parser.add_argument('--iter_times', type=int, default=5, help='the number of iteration times to calculate the LDI of the same architecture')
args = parser.parse_args()
### for isometry at initialization
train_loader,test_loader =dataset.mnist_dataloaders()
for iter_times in range(args.iter_times):
model = model_zoo.Dense_MLP(args.width, args.depth, args.tc, input_dims=784, num_classses=10) # model.init_network(func)
sig_mean=0
sig_std=0
for i, (images, labels) in enumerate(test_loader):
images=images
sig_mean_tmp,sig_std_tmp=model.isometry(images.view([args.batch_size,784]))
sig_mean=sig_mean+sig_mean_tmp
sig_std=sig_std+sig_std_tmp
sig_mean=sig_mean/(i+1)
sig_std=sig_std/(i+1)
with open(args.sigma_log_file,'a+') as train_logs:
print(model.nn_mass, sig_mean.item(),sig_std.item(),
model.params, model.flops, args.width, args.depth,
args.tc, args.num_seg,file=train_logs)
|
[
"aonier1995@163.com"
] |
aonier1995@163.com
|
d791a58bb7fbc1afbe5cd755200f3867b52cb84f
|
fb82730f79db7f3f6033c59e5959b51cca10077c
|
/todoapp/migrations/0001_initial.py
|
e83f2362bceb4774a625b1e39aacae1f989d84d5
|
[] |
no_license
|
BhargavaCS/ToDoApp
|
dfd79330c02ab8311387a5d6399cc608f5676ceb
|
9d6be297826115a8799685820e8a909cebc706ca
|
refs/heads/master
| 2021-01-01T04:47:30.416088
| 2017-10-19T07:32:46
| 2017-10-19T07:32:46
| 97,245,520
| 0
| 1
| null | 2017-10-19T17:41:17
| 2017-07-14T15:01:31
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-13 13:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ToDoItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=512)),
('completed', models.BooleanField(default=False)),
('due_by', models.DateField()),
],
),
migrations.CreateModel(
name='ToDoList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('creation_date', models.DateField()),
],
),
migrations.AddField(
model_name='todoitem',
name='parent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='todoapp.ToDoList'),
),
]
|
[
"rambhargavasbr@gmail.com"
] |
rambhargavasbr@gmail.com
|
f58f33159e66f1a62c496e69b925b392e7e346bf
|
4b0ad6cd4917a6ce4fda0703515b4733c4cc84ac
|
/ipynb/Script/beamf_fib_fixed.py
|
8d9f7c9bd28d8ae690eae7ff1f6698b9528bdc63
|
[] |
no_license
|
tciodaro/sonar-analysis
|
8f089709590dbe6f329e504c69e72408e48c72bf
|
fd04bd77c63da962ba2b5ea3fab24f113a61a8c9
|
refs/heads/master
| 2021-09-06T00:56:33.825318
| 2018-02-01T04:08:58
| 2018-02-01T04:08:58
| 115,443,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,872
|
py
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from scipy import signal
print 'DEFINING ANGLES AND SENSOR ARRAY'
sound_speed = 1482.0 # meter per second
nsensors = 20
Lsensor = 2.14 # in meters
print 'STARTING FIB FIXED CALCULATION'
Fs = 16000
Ts = 1.0/Fs
freq_range = np.arange(100, 3200, 100)
freq_ref = 1700
angles_steering = np.arange(0, 180, 1)
#angles_mainlobe = np.arange(80,110,1)
#angles_sidelobe = np.setdiff1d(angles_mainlobe, angles_steering)
angles_mainlobe = np.array([90])
angles_sidelobe = np.array([0, 180])
M = nsensors # number of sensors
J = 300 # number of taps for each FIR
a = 0.01 # Side lobe gain
b = 0.95 # frequency invariant 'strength'
W = np.zeros((M*J, 1))
j = complex(0,1) # complex variable
# Build Q matrix
Q = np.zeros((M*J, M*J))
A = np.zeros(M*J)
# Build A vector
print 'BUILDING VECTOR A'
Staps = np.array([np.exp(-2*j*np.pi*freq_ref*Ts*itap) for itap in range(J)])
for thetaMainLobe in angles_mainlobe:
Ssteer = np.array([np.exp(-2*j*np.pi*freq_ref*m*Lsensor/sound_speed*np.cos(thetaMainLobe)) for m in range(M)])
A = A + np.kron(Staps, Ssteer)
A = np.array([A]).T
# Build Q Matrix
print 'BUILDING MATRIX Q'
# Adjust the gain for the mainlobe
print 'CALCULATING MAIN LOBE GAIN'
for thetaMainLobe in angles_mainlobe:
Ssteer = np.array([np.exp(-2*j*np.pi*freq_ref*m*Lsensor/sound_speed*np.cos(thetaMainLobe)) for m in range(M)])
S = np.array([np.kron(Staps, Ssteer)]).T
S = S.dot(np.conjugate(S).T)
Q = Q + S
# Adjust the gain for the sidelobe
print 'CALCULATING SIDE LOBE GAIN'
for thetaSideLobe in angles_sidelobe:
Ssteer = np.array([np.exp(-2*j*np.pi*freq_ref*m*Lsensor/sound_speed*np.cos(thetaSideLobe)) for m in range(M)])
S = np.array([np.kron(Staps, Ssteer)]).T
S = S.dot(np.conjugate(S).T)
Q = Q + a*S
# Adjust the invariant response in frequency and angles
print 'ADJUSTING THE INVARIANCE RESPONSE'
for freq in freq_range:
Staps = np.array([np.exp(-2*j*np.pi*freq*Ts*itap) for itap in range(J)])
for theta in angles_steering:
Ssteer_frq = np.array([np.exp(-2*j*np.pi*freq*m*Lsensor/sound_speed*np.cos(theta)) for m in range(M)])
Ssteer_ref = np.array([np.exp(-2*j*np.pi*freq_ref*m*Lsensor/sound_speed*np.cos(theta)) for m in range(M)])
Sfrq = np.array([np.kron(Staps, Ssteer_frq)]).T
Sref = np.array([np.kron(Staps, Ssteer_ref)]).T
S = Sfrq - Sref
S = S.dot(np.conjugate(S).T)
Q = Q + b*S
print 'ESTIMATING W'
Qinv = np.linalg.inv(Q)
W = Qinv.dot(A)
print 'BEAM PATTERN'
freq_range = np.array([1000])
beampattern = np.zeros((freq_range.shape[0], angles_steering.shape[0]))
for ifreq, freq in enumerate(freq_range):
Staps = np.array([np.exp(-2*j*np.pi*freq*Ts*itap) for itap in range(J)])
for itheta, theta in enumerate(angles_steering):
Ssteer = np.array([np.exp(-2*j*np.pi*freq*m*Lsensor/sound_speed*np.cos(theta)) for m in range(M)])
S = np.array([np.kron(Staps, Ssteer)]).T
beampattern[ifreq, itheta] = np.conjugate(W).T.dot(S)[0,0]
raise Exception()
print 'BEAMFORMING'
beampattern = np.zeros(angles_steering.shape[0])
# Source signal
nsources = 1
source_angle = np.array([30,45, 60, 75, 80]) # in degrees
source_freqs = np.array([1000,1000, 1000, 1000, 1000]) # Hz
source_ampli = np.array([1, 1, 1, 1, 1])
# Simulate received signal
angle_res = angles_steering[1] - angles_steering[0]
delay_max = nsensors * Lsensor / sound_speed # considering the angle of 90 degrees, sin = 1
heap_size = int(delay_max * Fs)*2
heap = np.zeros((angles_steering.shape[0], heap_size))
beamf = np.zeros(angles_steering.shape[0])
#total_samples = int(delay_max * Fs)
total_samples = J
x_time = np.arange(0, total_samples*10*Ts, Ts)
received = np.zeros((nsensors, x_time.shape[0]))
for i in range(nsensors):
for j in range(nsources):
delay = i * Lsensor * np.sin(source_angle[j] * np.pi / 180.0) / sound_speed # in seconds
received[i] = received[i] + source_ampli[j] * np.sin((x_time - delay) * 2 * np.pi * source_freqs[j])
# BEAMFORMING
for iang, ang in enumerate(angles_steering):
summed_signal = np.zeros(np.max([J,x_time.shape[0]]) - np.min([J,x_time.shape[0]]) + 1)
for i in range(nsensors):
delay = i * Lsensor * np.sin(ang * np.pi / 180.0) / sound_speed
delay_samples = np.abs(int(delay * Fs))
#summed_signal = summed_signal + W[i].dot(received[i][delay_samples:delay_samples+total_samples+1])
summed_signal = summed_signal + np.convolve(W[i], received[i],mode='valid')
beamf[iang] = summed_signal.dot(summed_signal)/float(total_samples)
raise Exception()
plt.figure(figsize=(10,5))
plt.plot(array_angles, beamf)
plt.xticks(array_angles[::10])
plt.ylabel('Power')
plt.xlabel('DOA [degrees]')
plt.title('Delay and Sum Beamforming')
print 'FIXED FIB ESTIMATED'
|
[
"ciodaro@mymacpro.local"
] |
ciodaro@mymacpro.local
|
f8d8ee4061dbff936f37094f60a8e6e5b2dbd040
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy241.py
|
d59a29805cadc73e57d6ef26c5940d08ffc753ef
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,185
|
py
|
# qubit number=2
# total number=11
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.swap(input_qubit[1],input_qubit[0]) # number=2
prog.swap(input_qubit[1],input_qubit[0]) # number=3
prog.cx(input_qubit[0],input_qubit[1]) # number=8
prog.x(input_qubit[1]) # number=9
prog.cx(input_qubit[0],input_qubit[1]) # number=10
prog.cx(input_qubit[0],input_qubit[1]) # number=7
prog.rx(-2.73004401596953,input_qubit[1]) # number=6
prog.z(input_qubit[1]) # number=4
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = FakeVigo()
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_noisy241.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
e87cc969beea20d7c66caebfb0bc39aae6e58bb3
|
ceeca29791e36e1a83f7019c60180d1644096939
|
/techparty/event/migrations/0003_auto__add_field_event_can_signup_before.py
|
3cf959fbfa2b55399ecf07d053a6fdff0619c871
|
[] |
no_license
|
gztechparty/techparty
|
0eab101bf3d52000dfe819b645cf337398960238
|
232c063f886e015e3a9af5db364167b7e3044575
|
refs/heads/develop
| 2020-06-08T07:12:37.820849
| 2019-06-04T10:02:26
| 2019-06-04T10:02:26
| 9,009,069
| 4
| 8
| null | 2014-08-05T15:13:40
| 2013-03-25T15:18:23
|
CSS
|
UTF-8
|
Python
| false
| false
| 10,060
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.can_signup_before'
db.add_column(u'event_event', 'can_signup_before',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.can_signup_before'
db.delete_column(u'event_event', 'can_signup_before')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'event.event': {
'Meta': {'object_name': 'Event'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'area': ('django.db.models.fields.IntegerField', [], {}),
'can_signup_before': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'create_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
'fee': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hashtag': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'need_subject': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'sponsor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['member.User']", 'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'tags': ('tagging.fields.TagField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'event.participate': {
'Meta': {'unique_together': "(('user', 'event'),)", 'object_name': 'Participate'},
'checkin_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'confirm_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'confirm_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['event.Event']"}),
'focus_on': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pay_amount': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pay_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'signup_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'topic': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['member.User']"})
},
u'event.photo': {
'Meta': {'object_name': 'Photo'},
'create_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['event.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'uploader': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['member.User']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'event.topic': {
'Meta': {'object_name': 'Topic'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['member.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['event.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slide_file': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slide_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'sub_title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tags': ('tagging.fields.TagField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'member.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'avatar': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'extra_data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gendar': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_lecturer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'tags': ('tagging.fields.TagField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['event']
|
[
"jeff@fallever.com"
] |
jeff@fallever.com
|
6cfde358feb3d86dc0581bcf306e3b8e4488b215
|
a3779f5bfa96756ab80d12ab2e816c0563f200b2
|
/backendFunctions.py
|
c6bb5d8d2a32bb5d5ea7dd6047d82ad6c19cfa56
|
[] |
no_license
|
ashraychowdhry/splitway
|
141e2ab2738e14f425b52edd00f110f2a408e1b6
|
e344433ed0b6bcab65d4a201fca085dce78c01f6
|
refs/heads/master
| 2020-09-02T11:57:01.357917
| 2019-11-18T15:44:38
| 2019-11-18T15:44:38
| 219,112,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,651
|
py
|
import requests
import json
import urllib.request
import datetime
def findCoordinates(address):
address = address.replace(" ", "+")
addressURL = "https://maps.googleapis.com/maps/api/geocode/json?address=" + address + "&key=AIzaSyCdaPXmz8jQexyn-kWR9rmiumUuLn3GgMs"
with urllib.request.urlopen(addressURL) as url:
data = json.loads(url.read().decode())
lat = (data["results"][0]["geometry"]["location"]["lat"])
lng = (data["results"][0]["geometry"]["location"]["lng"])
coordinates = (lng, lat)
return coordinates
def getDistance(address1, address2):
address1 = address1.replace(" ", "+")
address2 = address2.replace(" ", "+")
distanceURL = "https://maps.googleapis.com/maps/api/distancematrix/json?origins=" + address1 + "&destinations=" + address2 + "&units=imperial&mode=walking&language=en-EN&key=AIzaSyANMkW7bIUZCJI1jNM2l5hl1CpmXzVCpJg"
with urllib.request.urlopen(distanceURL) as url:
data = json.loads(url.read().decode())
distance = data["rows"][0]["elements"][0]["distance"]["value"]
distance = distance / 1609.344
return distance
def timeFormatter(strTime1, strTime2):
def test():
return(print(getDistance("1250 cobblemill way", "1259 cobblemill way")))
def newEvent(currentAddress, destinationAddress, eventTime, email, phone):
#Will store a new event in the database
numEvents += 1
def searchEvents(currentAddress, destinationAddress, eventTime): #incomplete requires DB info
for i = 0 to numEvents:
if getDistance(currentAddress, EVENTCURRENTADDRESS) < 0.25 and getDistance(destinationAddress, EVENTDESTINATIONADDRESS) < .25 **
test()
|
[
"peytonhowelle@gmail.com"
] |
peytonhowelle@gmail.com
|
eff958ea898766a48ee4dca2ae3178f7da519a30
|
05172b2f9ac3cc2dc925631fcb49513b3e02ea2c
|
/DynamicProgramming/PartitionEqualSubsetSum.py
|
d1d5db0937b6d583bd53fb5782b3ffef4a7c6914
|
[] |
no_license
|
KickItAndCode/Algorithms
|
cbe1ea95c1e655cbfa0b829d3bd2a2a78d7d862f
|
d41e8a2262cede3154bc5b48c43fb60ac098e385
|
refs/heads/master
| 2020-04-26T07:08:37.127285
| 2019-12-17T03:46:10
| 2019-12-17T03:46:10
| 173,385,735
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
# 416. Partition Equal Subset Sum
# Given a non-empty array containing only positive integers, find if the array can be partitioned into two subsets such that the sum of elements in both subsets is equal.
# Note:
# Each of the array element will not exceed 100.
# The array size will not exceed 200.
# Example 1:
# Input: [1, 5, 11, 5]
# Output: true
# Explanation: The array can be partitioned as [1, 5, 5] and [11].
# Example 2:
# Input: [1, 2, 3, 5]
# Output: false
# Explanation: The array cannot be partitioned into equal sum subsets.
def canPartition(nums):
# if this number is not a multiple of two they can't be split
total = sum(nums)
if total % 2 != 0:
return False
return canPartitionHelper(nums, 0, 0, total, {})
def canPartitionHelper(nums, i, curr_sum, total, cache):
# current state
current = str(i) + str(curr_sum)
# check cache
if current in cache:
return cache[current]
# base case truthy
if curr_sum * 2 == total:
return True
# base case falsy
if curr_sum > total // 2 or i >= len(nums):
return False
# recursion.. take or don't take a number
found = canPartitionHelper(nums, i + 1, curr_sum, total,
cache) or canPartitionHelper(nums, i + 1, curr_sum + nums[i], total, cache)
# store value in cache
cache[current] = found
return found
print(canPartition([1, 5, 11, 5]))
# Output: true
# Explanation: The array can be partitioned as [1, 5, 5] and [11].
print(canPartition([1, 2, 3, 5]))
# Output: false
# Explanation: The array cannot be partitioned into equal sum subsets.
|
[
"henderson@Roberts-MacBook-Pro-3.local"
] |
henderson@Roberts-MacBook-Pro-3.local
|
c97b3d8514f6e9fc644921f067cee1140ab673d1
|
f2f9c6c5daacfa5e65ea091cb01b3069ae11dbf0
|
/setup.py
|
abfad6d5d7276fd25bfb17751188a0ed74b093dc
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
TSL-Mitchell/elyra
|
774bc21ce3f639dd3457e0d84dfdd589d338ba21
|
2a16b9b7343ed2e8e7e667bafe20cf6a73f7297f
|
refs/heads/master
| 2023-01-25T05:30:21.990303
| 2020-11-26T23:04:52
| 2020-11-26T23:04:52
| 316,352,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,790
|
py
|
#
# Copyright 2018-2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from glob import glob
from setuptools import setup, find_packages
long_desc = """
Elyra is a set of AI centric extensions to JupyterLab. It aims to help data scientists,
machine learning engineers and AI developer’s through the model development life cycle complexities.
"""
here = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(here, 'elyra', '_version.py')) as f:
exec(f.read(), {}, version_ns)
npm_packages_path = "./dist/*.tgz"
auto_extension_path = "./etc/config/jupyter_notebook_config.d/*.json"
settings_path = './etc/config/settings/*.json'
metadata_path = './etc/config/metadata/runtime-images/*.json'
setup_args = dict(
name="elyra",
version=version_ns['__version__'],
url="https://github.com/elyra-ai/elyra",
description="Elyra provides AI Centric extensions to JupyterLab",
long_description=long_desc,
author="Elyra Maintainers",
license="Apache License Version 2.0",
data_files=[('etc/jupyter/jupyter_notebook_config.d', glob(auto_extension_path)),
('share/jupyter/lab/settings', glob(settings_path)),
('share/jupyter/metadata/runtime-images', glob(metadata_path))],
packages=find_packages(),
install_requires=[
'autopep8',
'entrypoints>=0.3',
'jinja2>=2.11,<3.0',
'jsonschema>=3.2.0',
'jupyter_core>=4.0,<5.0',
'jupyter_client>=6.1',
'jupyterlab-git==0.22.3',
'jupyterlab>=2.0.0,<3.0.0',
'kfp-notebook>=0.14.0,<0.15.0',
'kfp==1.0.0',
'minio>=5.0.7',
'nbclient>=0.5.1',
'nbconvert>=5.6.1,<6.0',
'nbdime>=2.0.0',
'nbresuse>=0.3.6',
'notebook>=6.0.3',
'papermill>=2.1.3',
'requests>=2.9.1,<3.0',
'rfc3986-validator>=0.1.1',
'traitlets>=4.3.2',
'urllib3>=1.24.2',
'websocket-client',
],
include_package_data=True,
classifiers=(
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
),
entry_points={
'console_scripts': [
'elyra-metadata = elyra.metadata.metadata_app:MetadataApp.main',
],
'elyra.pipeline.processors': [
'local = elyra.pipeline.processor_local:LocalPipelineProcessor',
'kfp = elyra.pipeline.processor_kfp:KfpPipelineProcessor'
],
'papermill.engine': [
'ElyraEngine = elyra.pipeline.elyra_engine:ElyraEngine',
]
},
)
if "--dev" not in sys.argv:
setup_args["data_files"].append(('share/jupyter/lab/extensions', glob(npm_packages_path)))
else:
sys.argv.remove("--dev")
if __name__ == '__main__':
setup(**setup_args)
|
[
"mitchell@truis.com.au"
] |
mitchell@truis.com.au
|
c3e40fb6c21a8b78a1912a6dddd65973f62ce9b6
|
a82dfb61b17fa66b9c75fe871401cff77aa77f56
|
/libmcell/definition/doc.py
|
48cdb0ffba9e2e4089d530fbc690a17b0601d563
|
[
"MIT"
] |
permissive
|
mcellteam/mcell
|
49ca84048a091de8933adccc083d31b7bcb1529e
|
3920aec22c55013b78f7d6483b81f70a0d564d22
|
refs/heads/master
| 2022-12-23T15:01:51.931150
| 2021-09-29T16:49:14
| 2021-09-29T16:49:14
| 10,253,341
| 29
| 12
|
NOASSERTION
| 2021-07-08T01:56:40
| 2013-05-23T20:59:54
|
C++
|
UTF-8
|
Python
| false
| false
| 7,463
|
py
|
"""
Copyright (C) 2021 by
The Salk Institute for Biological Studies
Use of this source code is governed by an MIT-style
license that can be found in the LICENSE file or at
https://opensource.org/licenses/MIT.
"""
import sys
import os
import yaml
from constants import *
from gen import indent_and_fix_rst_chars, yaml_type_to_py_type, get_default_or_unset_value_py
def cat_to_title(cat):
if cat == CATEGORY_CONSTANTS:
return 'Enums and Constants'
else:
return cat.replace('_', ' ').capitalize()
def write_cat_label(f, cat):
f.write('.. _api-' + cat + ':\n\n')
def gen_example_links(base_links):
split_links = base_links.strip().split()
n = len(split_links)
if n == 0:
return ''
res = 'Example' + ('' if n == 1 else 's') + ': '
for l in split_links:
name = os.path.basename(os.path.dirname(l)) + '/' + os.path.basename(l)
res += '`' + name + ' <' + EXAMPLES_BASE_URL + l + '>`_ '
return res
def write_h4(f, text, name, class_name):
f.write('.. _' + class_name + '__' + name + ':\n\n')
f.write(text + '\n')
f.write('-' * len(text) + '\n\n')
def get_method_declaration(method):
res = method[KEY_NAME] + ' ('
if KEY_PARAMS in method:
num_params = len(method[KEY_PARAMS])
for i in range(num_params):
param = method[KEY_PARAMS][i]
t = yaml_type_to_py_type(param[KEY_TYPE])
res += param[KEY_NAME] + ': ' + t
if KEY_DEFAULT in param:
res += '=' + get_default_or_unset_value_py(param)
if i != num_params - 1:
res += ', '
res += ')'
if KEY_RETURN_TYPE in method:
res += ' -> ' + yaml_type_to_py_type(method[KEY_RETURN_TYPE])
return res
def generate_class_documentation(f, class_name, class_def):
f.write(class_name + '\n' + '='*len(class_name) + '\n\n')
if KEY_DOC in class_def:
f.write(class_def[KEY_DOC].strip() + '\n\n')
if KEY_EXAMPLES in class_def:
f.write(gen_example_links(class_def[KEY_EXAMPLES]) + '\n\n')
if KEY_ITEMS in class_def and class_def[KEY_ITEMS]:
f.write('Attributes:\n' + '*'*len('Attributes:') + '\n')
num_items = len(class_def[KEY_ITEMS])
for item in class_def[KEY_ITEMS]:
t = yaml_type_to_py_type(item[KEY_TYPE])
header = item[KEY_NAME] + ': ' + t
write_h4(f, header, item[KEY_NAME], class_name)
if KEY_DOC in item and item[KEY_DOC]:
f.write(' | ' + indent_and_fix_rst_chars(item[KEY_DOC].strip(), ' | ') + '\n')
if KEY_DEFAULT in item:
f.write(' | - default argument value in constructor: ' + get_default_or_unset_value_py(item))
f.write('\n')
if KEY_EXAMPLES in item:
f.write('\n | ' + gen_example_links(item[KEY_EXAMPLES]) + '\n\n')
f.write('\n')
if KEY_METHODS in class_def and class_def[KEY_METHODS]:
f.write('\nMethods:\n' + '*'*len('nMethods:') + '\n')
for method in class_def[KEY_METHODS]:
method_name = method[KEY_NAME]
header = get_method_declaration(method)
write_h4(f, header, method_name, class_name)
if KEY_DOC in method:
f.write('\n | ' + indent_and_fix_rst_chars(method[KEY_DOC].strip(), ' | ') + '\n\n')
if KEY_PARAMS in method:
num_params = len(method[KEY_PARAMS])
for param in method[KEY_PARAMS]:
t = yaml_type_to_py_type(param[KEY_TYPE])
f.write('* | ' + param[KEY_NAME] + ': ' + t)
if KEY_DEFAULT in param:
f.write(' = ' + get_default_or_unset_value_py(param))
if KEY_DOC in param:
f.write('\n | ' + indent_and_fix_rst_chars(param[KEY_DOC].strip(), ' | ') + '\n\n')
else:
f.write('\n')
if KEY_EXAMPLES in method:
f.write(' | ' + gen_example_links(method[KEY_EXAMPLES]) + '\n\n')
f.write('\n')
f.write('\n')
def generate_documentation(data_classes):
# generate constants
with open(os.path.join(DOC_DIRECTORY, CATEGORY_CONSTANTS + EXT_RST), 'w') as f:
write_cat_label(f, CATEGORY_CONSTANTS)
f.write(
'*******************\n' +
cat_to_title(CATEGORY_CONSTANTS) + '\n' +
'*******************\n\n'
)
# generate enums first, then constants
enums = data_classes[KEY_ENUMS]
for enum in enums:
enum_name = enum[KEY_NAME]
f.write(enum_name + '\n' + '='*len(enum_name) + '\n\n')
if KEY_DOC in enum:
f.write('\n | ' + indent_and_fix_rst_chars(enum[KEY_DOC].strip(), ' | ') + '\n\n')
for value in enum[KEY_VALUES]:
f.write('* | **' + value[KEY_NAME] + '** = ' + str(value[KEY_VALUE]) + '\n')
if KEY_DOC in value:
f.write(' | ' + indent_and_fix_rst_chars(value[KEY_DOC].strip(), ' | ') + '\n\n')
f.write('\n')
f.write('\n\n')
c = 'Constants'
f.write(c + '\n' + '='*len(c) + '\n\n')
constants = data_classes[KEY_CONSTANTS]
for const in constants:
const_name = const[KEY_NAME]
f.write('* | **' + const_name + '**: ' + yaml_type_to_py_type(const[KEY_TYPE]) + \
' = ' + str(const[KEY_VALUE]) +'\n')
if KEY_DOC in const:
f.write(' | ' + indent_and_fix_rst_chars(const[KEY_DOC].strip(), ' | ') + '\n\n')
f.write('\n\n')
# then generate classes into files by category
for cat in CATEGORIES:
if cat == CATEGORY_CONSTANTS:
continue
input_file = cat + EXT_RST
with open(os.path.join(DOC_DIRECTORY, input_file), 'w') as f:
write_cat_label(f, cat)
cat_name = cat_to_title(cat)
f.write('*'*len(cat_name) + '\n' + cat_name + '\n' + '*'*len(cat_name) + '\n')
for key, value in sorted(data_classes.items()):
if key != KEY_CONSTANTS and key != KEY_ENUMS and value[KEY_CATEGORY] == cat:
generate_class_documentation(f, key, value)
# and generate api.rst file
with open(os.path.join(DOC_DIRECTORY, API_RST), 'w') as f:
title = 'Python API Reference'
f.write(
title + '\n' +
'='*len(title) + '\n\n'
)
f.write(
'.. toctree::\n'
' :maxdepth: 2\n'
' :hidden:\n'
' :caption: Contents\n\n'
)
for cat in CATEGORIES:
f.write(' ' + cat + '\n')
f.write('\nThis section contains automatically generated documentation on Python classes, enums, '
'and constants provided by MCell.\n\n')
for cat in CATEGORIES:
f.write('- :ref:`api-' + cat + '`\n')
|
[
"ahusar@salk.edu"
] |
ahusar@salk.edu
|
e518747ac7ebdccaa7c58444e65cf3a2287c92f6
|
c2252ab71bddf1438b26104f0959a9d9cde58f21
|
/Pingala/Pingala/wsgi.py
|
eb85e86316dc82844fbb671c162189eec3ace5ea
|
[] |
no_license
|
Dragnoid99/CS253
|
4880ea95cedac51a6e071fcb3e9ca32be60d197a
|
fc3c28e9d5a2af04e88b550571084a0ba759036d
|
refs/heads/master
| 2021-04-18T11:50:00.194863
| 2020-03-23T21:55:24
| 2020-03-23T21:55:24
| 249,541,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for Pingala project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Pingala.settings')
application = get_wsgi_application()
|
[
"noreply@github.com"
] |
Dragnoid99.noreply@github.com
|
a3277c30695fa06eebfb826382d70f019c490e2f
|
bc480c714cc086ed1d7b9df0e8e4e587bc608e6f
|
/Day04/str.py
|
f032ec63e609d7f92ee2ee26427b840bd0cd3ad5
|
[] |
no_license
|
What-After-College/Python-Batch-038
|
a989ad093ea3dba6ce219ad2acf7ec6dc01e7bff
|
441f1c1a4214a928b1f33ed3852b46ee3c4bb492
|
refs/heads/master
| 2022-11-29T05:04:22.914240
| 2020-07-15T08:21:07
| 2020-07-15T08:21:07
| 277,481,214
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
hi = "Hello "
name = "Thor"
# print(hi,name)
# print(hi,end="")
# print(name)
# print(hi+name)
# print(hi-name)
# print(hi*name)
# print(hi*3)
# print(len(name))
# print(len(hi))
# batch = "Rhythm Argha Sahil Pooja Aman"
# print(batch[10])
# print(batch[-12])
# print(batch[13:24])
# print(batch)
# batch.insert(0,"Akanshu")
# print(batch)
# ls = input("Enter names here: ").split()
# print(type(ls))
s = "captain america"
# ch = 'q'
# print(s.find(ch))
s = s.capitalize()
print(s)
|
[
"akanshuraj@gmail.com"
] |
akanshuraj@gmail.com
|
7550cdf18ede46735b48149495fb32bd50f1b306
|
8b46f0afd133e001686014582a7cf2efa9b2ae3b
|
/challanges/ll_kth_from_end/conftest.py
|
5c0b48e662693d63056e6fdcbb1e91b61a3abec2
|
[
"MIT"
] |
permissive
|
Patricia888/data-structures-and-algorithms
|
f6af0e88e7445062b8881bfd88783c88dcb546c6
|
8963acf857b9f7069eeeea2884b41376986c3d7c
|
refs/heads/master
| 2018-09-07T17:30:48.099569
| 2018-07-30T23:40:30
| 2018-07-30T23:40:30
| 126,067,848
| 0
| 0
|
MIT
| 2018-05-15T02:08:32
| 2018-03-20T18:55:49
|
Python
|
UTF-8
|
Python
| false
| false
| 172
|
py
|
import pytest
from .ll_kth_from_end import LinkedList as LL
@pytest.fixture
def empty_ll():
return LL()
@pytest.fixture
def small_ll():
return LL([1, 2, 3, 4])
|
[
"praftery8@gmail.com"
] |
praftery8@gmail.com
|
784fa6caf340114754975f11ff4b7c651ff70421
|
bdd6ab129de61947945b380a487a3ee923f542f3
|
/singleton-less/pipeline/pangene_test_2/analysis.py
|
3ea0807047a8273d00e1ff710ea261d221a92786
|
[] |
no_license
|
InfOmics/pangenes-review
|
1e27c1fd1a93fb7a5fd764c4090f7a4a2a207b0b
|
a74f8f9d615de6a76aa1918c2c4e9d0c1f0c8385
|
refs/heads/master
| 2021-01-07T12:14:37.891227
| 2020-06-01T14:20:27
| 2020-06-01T14:20:27
| 241,686,261
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,352
|
py
|
from pathlib import Path
from subprocess import call
import numpy as np
import pandas as pd
def getGenomeDistribution(path_clus):
#qui calcolo l'istogramma della "genomes per class distribution"
histogram = dict() #numero di famiglie che toccano X genomi
for line in open(path_clus,'r'):
genes = line.strip().split(' ')
aux = set() #set perchè non ha ripetizioni, paraloghi appartengono allo stesso genoma
for g in genes:
genome = g.split('_')[1]
aux.add(genome)
histogram[len(aux)] = histogram.get(len(aux),0)+1
return histogram
def getClasses(path_alf):
family_number = -1
nof_seqs = 0
classes = dict()
for line in open(path_alf,'r'):
family_number +=1
alf_cluster = [gene for gene in line.strip().split(' ')]
nof_seqs += len(alf_cluster)
classes['family_'+str(family_number)] = set(alf_cluster)
return classes, nof_seqs
#performance analysis with True/False Positives/Negatives
def performance_analysis(path_clus, dbclasses):
name = path_clus.stem
clusters = list()
for line in open(path_clus,'r'):
cc = line.strip().split(' ')
clusters.append( cc )
#print('number of clusters:', len(clusters))
e_classes = set()
for k,vv in dbclasses.items():
for v in [ (i,j) for i in vv for j in vv if i != j] :
e_classes.add(v)
e_clusters = set()
for vv in clusters:
for v in [ (i,j) for i in vv for j in vv if i != j] :
e_clusters.add(v)
tp = len( e_classes & e_clusters ) #True positives
fp = len( e_clusters - e_classes) #False positives
fn = len( e_classes - e_clusters) #False negatives
tn = ((nof_seqs * nof_seqs) - nof_seqs) - len(e_classes) #True negatives
precision = tp/(tp + fp)
recall = tp/(tp + fn)
result = {'classes_links':len(e_classes),
'cluster_links':len(e_clusters),
'TP':tp,
'FP':fp,
'FN':fn,
'TN':tn,
'precision':tp/(tp + fp),
'recall':tp/(tp + fn),
'true_negative_rate':tn/(tn + fp),
'c-diff':None,
'f1-score':2*(precision*recall)/(precision+recall) if (precision+recall) != 0 else 0}
#labels = ['classes_links','cluster_links','TP','FP','FN','TN','precision','recall','true_negative_rate','c-diff']
#result = [len(e_classes),len(e_clusters),tp,fp,fn,tn,tp/(tp + fp),tp/(tp + fn),tn/(tn + fp)]
return result
##MAIN NEW
#mintree dataset analysis
n_genes_mintree = dict()
mintree_hist_list = dict()
mintree_hist_diff = dict()
analysis_result = Path('analysis')
analysis_result.mkdir(exist_ok=True)
#dataset mintree analysis
mintree_result = Path(analysis_result,'analysis_mintree')
mintree_result.mkdir(exist_ok=True)
clus_mintree = Path('input_datasets', 'dataset_mintree','mintree','mintree.clus')
#analyze dataset_mintree.clus, used as reference to compare other pangenome softwares
#classes are the families of genes, nof_seqs is the number of sequences/genes
classes, nof_seqs = getClasses(clus_mintree)
#histogram in the form of dictionary (only values that occour at least once)
mintree_gen_distr = getGenomeDistribution(clus_mintree)
#number of genomes
genomes_list = set()
for key, value in classes.items():
for gene in value:
genomes_list.add(gene.split('_')[1])
nof_genomes = len(genomes_list)
#create the histogram
hist_mintree = [0]*nof_genomes
for k in mintree_gen_distr:
hist_mintree[k-1] = mintree_gen_distr[k]
#saving data
n_genes_mintree[clus_mintree.stem] = nof_seqs
mintree_hist_list[clus_mintree.stem] = hist_mintree
#calculate histograms for all the clus files obtained with pangenome softwares
for clus_file in Path('gene_families','mintree','mintree').glob('*'):
print(clus_file)
f_classes, f_nof_seqs = getClasses(clus_file)
file_gen_distr = getGenomeDistribution(clus_file)
#number of genomes
f_genomes_list = set()
for key, value in classes.items():
for gene in value:
f_genomes_list.add(gene.split('_')[1])
f_nof_genomes = len(f_genomes_list)
print(f_nof_genomes)
print(file_gen_distr)
#create the histogram
f_hist_file = [0]*f_nof_genomes
for k in file_gen_distr:
f_hist_file[k-1] = file_gen_distr[k]
#print(hist_file)
#saving
n_genes_mintree[clus_file.stem] = f_nof_seqs
mintree_hist_list[clus_file.stem] = f_hist_file
hist_aux = list()
for i in range(len(hist_mintree)):
hist_aux.append(abs(hist_mintree[i] - f_hist_file[i]))
""" print(hist_mintree)
print(f_hist_file)
print(hist_aux) """
mintree_hist_diff[clus_file.stem] = hist_aux
#converting into dataframe to have a clear layout of the data
mintree_hist_df = pd.DataFrame(mintree_hist_list)
mintree_hist_df.index +=1
mintree_hist_diff_df = pd.DataFrame(mintree_hist_diff)
mintree_hist_diff_df.index +=1
c_diff = dict()
for k,v in mintree_hist_diff.items():
c_diff[k]=sum(v)
#print(c_diff)
#saving histograms as csv file
mintree_hist_df.to_csv(Path(mintree_result,'histograms.csv'),sep='\t')
mintree_hist_diff_df.to_csv(Path(mintree_result,'histograms_difference.csv'),sep='\t')
with open(Path(mintree_result,'nof_genes.csv'),'w') as f:
f.write(''+'\t'+'nof_genes'+'\t'+'difference\n')
for k,v in n_genes_mintree.items():
if k == clus_mintree.stem:
f.write(k+'\t'+str(v)+'\n')
else:
f.write(k+'\t'+str(v)+'\t'+str(abs(n_genes_mintree[clus_mintree.stem] - v))+'\n')
#PARAMETERS MINSTREE DATASET
params_analysis = dict()
#params_analysis['dataset_mintree']=analysis(clus_mintree) #used only to check (false negative/positive is 0, precision is 1.0)
for clus_file in Path('gene_families','mintree','mintree').glob('*'):
print(clus_file)
params_analysis[clus_file.stem] = performance_analysis(clus_file,classes)
params_analysis[clus_file.stem]['c-diff']= c_diff[clus_file.stem]
params_df = pd.DataFrame(params_analysis)
params_df.to_csv(Path(mintree_result,'parameters.csv'),sep='\t')
print(params_df)
#----------------------#
#randomtree dataset analysis
n_genes_randomtree = dict()
randomtree_hist_list = dict()
randomtree_hist_diff = dict()
analysis_result = Path('analysis')
analysis_result.mkdir(exist_ok=True)
#dataset mintree analysis
randomtree_result = Path(analysis_result,'analysis_randomtree')
randomtree_result.mkdir(exist_ok=True)
clus_randomtree = Path('input_datasets', 'dataset_randomtree','randomtree','randomtree.clus')
#analyze dataset_mintree.clus, used as reference to compare other pangenome softwares
#classes are the families of genes, nof_seqs is the number of sequences/genes
classes, nof_seqs = getClasses(clus_randomtree)
#histogram in the form of dictionary (only values that occour at least once)
randomtree_gen_distr = getGenomeDistribution(clus_randomtree)
#number of genomes
genomes_list = set()
for key, value in classes.items():
for gene in value:
genomes_list.add(gene.split('_')[1])
nof_genomes = len(genomes_list)
print('@',nof_genomes)
#create the histogram
hist_randomtree = [0]*nof_genomes
for k in randomtree_gen_distr:
hist_randomtree[k-1] = randomtree_gen_distr[k]
#saving data
n_genes_randomtree[clus_randomtree.stem] = nof_seqs
randomtree_hist_list[clus_randomtree.stem] = hist_randomtree
#calculate histograms for all the clus files obtained with pangenome softwares
for clus_file in Path('gene_families','randomtree','randomtree').glob('*'):
print('rand',clus_file)
f_classes, f_nof_seqs = getClasses(clus_file)
file_gen_distr = getGenomeDistribution(clus_file)
#number of genomes
f_genomes_list = set()
for key, value in classes.items():
for gene in value:
f_genomes_list.add(gene.split('_')[1])
f_nof_genomes = len(f_genomes_list)
#create the histogram
f_hist_file = [0]*f_nof_genomes
#print(sorted(f_hist_file))
#print(f_nof_genomes)
for k in sorted(file_gen_distr):
f_hist_file[k-1] = file_gen_distr[k]
#print(hist_file)
#saving
n_genes_randomtree[clus_file.stem] = f_nof_seqs
randomtree_hist_list[clus_file.stem] = f_hist_file
hist_aux = list()
for i in range(len(hist_randomtree)):
hist_aux.append(abs(hist_randomtree[i] - f_hist_file[i]))
""" print(hist_randomtree)
print(f_hist_file)
print(hist_aux) """
randomtree_hist_diff[clus_file.stem] = hist_aux
#converting into dataframe to have a clear layout of the data
randomtree_hist_df = pd.DataFrame(randomtree_hist_list)
randomtree_hist_df.index +=1
randomtree_hist_diff_df = pd.DataFrame(randomtree_hist_diff)
randomtree_hist_diff_df.index +=1
c_diff = dict()
for k,v in randomtree_hist_diff.items():
c_diff[k]=sum(v)
print(c_diff)
#saving histograms as csv file
randomtree_hist_df.to_csv(Path(randomtree_result,'histograms.csv'),sep='\t')
randomtree_hist_diff_df.to_csv(Path(randomtree_result,'histograms_difference.csv'),sep='\t')
with open(Path(randomtree_result,'nof_genes.csv'),'w') as f:
f.write(''+'\t'+'nof_genes'+'\t'+'difference\n')
for k,v in n_genes_randomtree.items():
if k == clus_randomtree.stem:
f.write(k+'\t'+str(v)+'\n')
else:
f.write(k+'\t'+str(v)+'\t'+str(abs(n_genes_randomtree[clus_randomtree.stem] - v))+'\n')
#PARAMETERS MINSTREE DATASET
params_analysis = dict()
#params_analysis['dataset_randomtree']=analysis(clus_randomtree) #used only to check (false negative/positive is 0, precision is 1.0)
for clus_file in Path('gene_families','randomtree','randomtree').glob('*'):
params_analysis[clus_file.stem] = performance_analysis(clus_file,classes)
params_analysis[clus_file.stem]['c-diff']= c_diff[clus_file.stem]
params_df = pd.DataFrame(params_analysis)
params_df.to_csv(Path(randomtree_result,'parameters.csv'),sep='\t')
print(params_df)
|
[
"vincenzo.bonnici@gmail.com"
] |
vincenzo.bonnici@gmail.com
|
834df0212f5bbb8fa18876bb86326951b7c3b43c
|
f6252f763b46053d81ffcc19919a5adcb0fff069
|
/trax/rl/envs/fake_env_test.py
|
3a16ff7d30443efec3e095e4c9be87d1bdddef79
|
[
"Apache-2.0"
] |
permissive
|
codespeakers/trax
|
ee5da9e39b83b173034ff2638d856dec38e9675a
|
9fc11bca7accda0394d629cac96558f4539d7f61
|
refs/heads/master
| 2020-12-14T15:50:49.634706
| 2020-01-18T20:52:27
| 2020-01-18T20:52:27
| 234,796,218
| 0
| 0
|
Apache-2.0
| 2020-01-18T20:51:52
| 2020-01-18T20:51:51
| null |
UTF-8
|
Python
| false
| false
| 1,942
|
py
|
# coding=utf-8
# Copyright 2019 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trax.rl.fake_env."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import test
from trax.rl.envs import fake_env
class FakeEnvTest(test.TestCase):
def test_done_action(self):
env = fake_env.FakeEnv(input_shape=(2, 3),
n_actions=10,
done_time_step=None,
done_action=9)
env.reset()
# Actions 0 to 8
for action in range(9):
_, reward, done, _ = env.step(action)
self.assertFalse(done)
self.assertEqual(-1.0, reward)
_, reward, done, _ = env.step(9)
self.assertTrue(done)
self.assertEqual(1.0, reward)
def test_done_time_step(self):
env = fake_env.FakeEnv(input_shape=(2, 3),
n_actions=10,
done_time_step=10,
done_action=None)
env.reset()
# Take 10 steps.
for _ in range(10):
_, reward, done, _ = env.step(0)
self.assertFalse(done)
self.assertEqual(-1.0, reward)
# Take final time-step, this is the time-step numbered 10 since time-steps
# are 0 indexed.
_, reward, done, _ = env.step(0)
self.assertTrue(done)
self.assertEqual(1.0, reward)
if __name__ == '__main__':
test.main()
|
[
"afrozm@google.com"
] |
afrozm@google.com
|
293abfae62da020ede610808540be26b601231b4
|
73179def098e16d86b34d2c387bd40e34413411c
|
/Ex061.py
|
0904d3968a4b5d33fe3176c6ec369ea93dc3b1b2
|
[] |
no_license
|
RogerMCL/PythonExercises
|
2ad1b8d92ecdd276b282b58681b271c55ddb35e7
|
f5a462636c171aa901157eda04137edce9650f4b
|
refs/heads/main
| 2023-04-30T10:34:28.333885
| 2021-05-06T17:20:47
| 2021-05-06T17:20:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
#EXERCÍCIO 061 (UPDATE 051):
print('==== 10 TERMOS DE UMA PA ====')
n = int(input('Primeiro termo: '))
r = int(input('Razão: '))
print('')
c = 1
while c != 10:
print(n, end=' -> ')
n += r
c += 1
print(n)
'''for c in range(0, 10):
print(n, end=' -> ')
n += r
print('FIM')'''
|
[
"noreply@github.com"
] |
RogerMCL.noreply@github.com
|
12712297da94e00fbf92f666e30f60c4f50f505e
|
0047e5e17a2212f17be6dbc42b2502663d9de5ee
|
/runner_wine/experiment.py
|
bed70ae8d38b302eed8ee0b964da9720ec9c849d
|
[
"Apache-2.0"
] |
permissive
|
caiyueliang/mlflow-example
|
75a845b1c076725d47a07f656db7d0cdfebc3fb8
|
b65631243098620b27d64cbb950536acfd8922c1
|
refs/heads/master
| 2020-06-09T01:00:13.754282
| 2019-07-25T10:11:06
| 2019-07-25T10:11:06
| 193,338,830
| 0
| 0
| null | 2019-06-23T11:16:22
| 2019-06-23T11:16:22
| null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
from mlflow.tracking import MlflowClient
client = MlflowClient()
experiments = client.list_experiments() # returns a list of mlflow.entities.Experiment
print("[experiments] %s" % experiments)
run = client.create_run(experiments[0].experiment_id) # returns mlflow.entities.Run
client.log_param(run.info.run_id, "hello", "world")
client.set_terminated(run.info.run_id)
client.set_tag(run.info.run_id, "tag_key", "tag_value") # 添加标签到运行
|
[
"393900414@qq.com"
] |
393900414@qq.com
|
5a0c7f2232c9b5b9b6aebd0299f3b756198fbcab
|
a1488a281e582373b7270d85059f08330c0b685d
|
/dueros/directive/Display/tag/NewTag.py
|
86f19cc6e51950cde6ea45a11b1821aef4bfab0f
|
[
"Apache-2.0"
] |
permissive
|
xuchengzhi/bot-sdk-python
|
473fb8e7df629a6168983e26de74546bbca32768
|
966d103d55f9f1220c00d806ac13d0754015a31c
|
refs/heads/master
| 2020-12-06T17:00:55.587643
| 2019-10-18T02:54:45
| 2019-10-18T02:54:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
#!/usr/bin/env python3
# -*- encoding=utf-8 -*-
# description:
# author:jack
# create_time: 2018/9/17
"""
desc:pass
"""
from dueros.directive.Display.tag.TagTypeEnum import TagTypeEnum
from dueros.directive.Display.tag.BaseTag import BaseTag
class NewTag(BaseTag):
def __init__(self):
super(NewTag, self).__init__(TagTypeEnum.TAG_TYPE_NEW, '最新')
if __name__ == '__main__':
pass
|
[
"wangninghb@gmail.com"
] |
wangninghb@gmail.com
|
b5e8212fd31a82a108a7519c7d44f6b887b52698
|
a50b92f2a972f6b9094dfadafd0187de74a362d4
|
/form/urls.py
|
f984a3c7b51d60fc07ab5b34becc2560911be6ba
|
[] |
no_license
|
ganievdev/UzTube.uz
|
c2dc20a9f8b13ad61ed333a2552296f922a5ea41
|
776a0750dc88da4a98bcf734b71fcbb3f5b604f2
|
refs/heads/main
| 2023-06-28T05:18:37.066289
| 2021-08-06T07:12:16
| 2021-08-06T07:12:16
| 393,289,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
"""content URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.i18n import i18n_patterns
from django.views.generic import TemplateView
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('', include('main.urls')),
path('admin/', admin.site.urls),
path('client/', include('client.urls'))
]
# urlpatterns += i18n_patterns(
# )
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"algoprogram01@gmail.com"
] |
algoprogram01@gmail.com
|
15e2870866bde27351395ad4b2a7a69bc48e1a26
|
ec5f6799611a9b4f3d3a8a62bfb276ba0406c621
|
/Interpolation/bisimulation/step4/checkstep4.py
|
cbc07708aee11080b4b84ec392fd62eec25e9477
|
[] |
no_license
|
formal-verification-research/Modest-Probabilistic-Models-for-NoC
|
50dc14a495fce7e6e0ee622bbb348c8fa8a70e39
|
5c02029296f676fcd6812b4dd5fb97afda454af6
|
refs/heads/master
| 2023-07-25T16:04:05.747355
| 2023-06-23T16:40:16
| 2023-06-23T16:40:16
| 165,334,654
| 4
| 1
| null | 2022-03-07T17:17:36
| 2019-01-12T01:51:34
|
SMT
|
UTF-8
|
Python
| false
| false
| 5,710
|
py
|
import os
import re
probabilityRegex = re.compile(r'(\d\.\d+|0)\n')
probabilityTable = {
"0" : [(1/9), (16/81), (20/81), (4/27), (1/9), (1/27), (1/27), (1/27), (2/81), (2/81), (1/81), (1/81)],
"1" : [(1/9), (2/9), (2/9), (4/27), (1/9), (1/27), (1/27), (1/27), (1/27), 0, (1/27), 0],
"2" : [(1/9), (2/27), (10/27), (4/27), (1/9), (1/27), (2/27), 0, 0, (2/27), 0, 0],
"3" : [(1/9), (8/27), (4/27), (4/27), (1/9), (1/27), 0, (2/27), (1/27), 0, 0, (1/27)],
"4" : [0, 0, (4/9), 0, (1/3), 0, (1/9), 0, 0, (1/9), 0, 0],
"5" : [0, (4/9), 0, 0, (1/3), 0, 0, (1/9), (1/9), 0, 0, 0],
"6" : [(1/9), (2/9), 0, (2/9), (1/9), (1/9), 0, (2/9), 0, 0, 0, 0],
"7" : [(2/9), (1/9), (4/9), (1/9), 0, 0, (1/9), 0, 0, 0, 0, 0],
"8" : [(1/9), (1/9), (2/9), (1/3), 0, (1/9), 0, 0, 0, (1/9), 0, 0],
"9" : [(1/9), (1/3), (2/9), (1/9), (1/9), 0, 0, 0, (1/9), 0, 0, 0],
"10": [(1/9), (1/9), (2/9), (1/3), 0, (1/9), 0, 0, 0, 0, (1/9), 0],
"11": [(1/9), (1/3), (2/9), (1/9), (1/9), 0, 0, 0, 0, 0, 0, (1/9)]
}
stateTable = {
"1000" : "5",
"1030" : "3",
"1210" : "1",
"1300" : "3",
"1330" : "2",
"2010" : "3",
"2200" : "7",
"2230" : "1",
"2310" : "0",
"3000" : "5",
"3030" : "9",
"3210" : "0",
"3300" : "9",
"3330" : "4",
"1001" : "7",
"1031" : "1",
"1211" : "4",
"1301" : "1",
"1331" : "11",
"2011" : "1",
"2201" : "1",
"2231" : "1",
"2311" : "1",
"3001" : "3",
"3031" : "2",
"3211" : "1",
"3301" : "2",
"3331" : "4",
"1002" : "3",
"1032" : "0",
"1212" : "8",
"1302" : "0",
"1332" : "2",
"2012" : "2",
"2202" : "4",
"2232" : "4",
"2312" : "2",
"3002" : "3",
"3032" : "2",
"3212" : "2",
"3302" : "2",
"3332" : "4",
"1010" : "7",
"1200" : "3",
"1230" : "0",
"1310" : "1",
"2000" : "5",
"2030" : "3",
"2210" : "1",
"2300" : "3",
"2330" : "2",
"3010" : "3",
"3200" : "3",
"3230" : "2",
"3310" : "2",
"1011" : "4",
"1201" : "1",
"1231" : "1",
"1311" : "4",
"2001" : "3",
"2031" : "0",
"2211" : "8",
"2301" : "0",
"2331" : "2",
"3011" : "1",
"3201" : "0",
"3231" : "2",
"3311" : "6",
"1012" : "1",
"1202" : "2",
"1232" : "2",
"1312" : "1",
"2002" : "10",
"2032" : "2",
"2212" : "4",
"2302" : "2",
"2332" : "6",
"3012" : "0",
"3202" : "2",
"3232" : "6",
"3312" : "2"
}
print("\nStarting process.....")
for val0 in range (1,4):
for i in range (1,4):
val1 = (i + 1) % 4
for j in range (1,4):
val2 = (j + 2) % 4
for val3 in range (0,3):
outputFile = "dataFiles/output" + str(val0) + str(val1) + str(val2) + str(val3)
command = "mono /mnt/home/benjaylew/tools/Modest/mcsta.exe ./step4CounterExamples.modest -E \"val0=" + str(val0) + ", val1=" + str(val1) + ", val2=" + str(val2) + ", val3=" + str(val3) + "\" > " + str(outputFile)
print("Running: " + command)
os.system(command)
dataFile = open(outputFile)
probabilityList = []
data = dataFile.read()
data = data.split("Probability:")
for item in data:
match = probabilityRegex.search(item)
if match:
probabilityList.append(match.group().split('\n')[0])
dataFile.close()
probabilityList = probabilityList[1:14]
state81 = str(val0) + str(val1) + str(val2) + str(val3)
state = stateTable.get(state81)
abstractProbability = probabilityTable.get(state)
probabilityFile = open("dataFiles/probabilities" + str(val0) + str(val1) + str(val2) + str(val3), "a")
probabilityFile.write(state81)
probabilityFile.write("\n")
for i in range(0, len(probabilityList)):
probabilityFile.write("Abstract probability: ")
probabilityFile.write(str(abstractProbability[i]))
#print("Abstract probability: " + str(abstractProbability[i]))
probabilityFile.write("\t Concrete probability: ")
probabilityFile.write(probabilityList[i])
#print("Concrete probability: " + str(probabilityList[i]))
probabilityFile.write("\t Difference: ")
difference = abstractProbability[i] - float(probabilityList[i])
probabilityFile.write(str(difference))
#print("Difference: " + str(difference))
probabilityFile.write("\n")
if (difference > 0.00000001 or difference < -0.00000001):
print(state81, end="")
print("->", end="")
print(i, end="")
print("\tAbstract probability: ", end= "")
print(str(abstractProbability[i]), end="")
print("\tConcrete probability: ", end="")
print(probabilityList[i], end="")
print("\tDifference: ", end="")
print(difference)
|
[
"benjaylew@el176-threadripper.usu.edu"
] |
benjaylew@el176-threadripper.usu.edu
|
e145f747afa99d212bbec96ef87895a4f4b34bec
|
362f8eeb4dad793eb488f4e80acb65ace793c6e8
|
/function/bot_chart.py
|
b2ed9587bcdc6971d9cdb6c1b3bd2f45d379130b
|
[] |
no_license
|
louxinye/lxybot_v2
|
ab3613fab060b9d8d805482a9705fbdaab776cd9
|
8ac3ce319491d81e2ec5dda54c778e317fd56719
|
refs/heads/master
| 2021-04-15T07:59:17.103282
| 2018-12-03T03:01:29
| 2018-12-03T03:01:29
| 126,855,317
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,406
|
py
|
# -*- coding: utf-8 -*-
# osu!新人群chart系统
import re
from function import bot_osu
from function import bot_SQL
chart_bid = [338646, 808996, 71829]
now_turns = 4
force_mod = []
allow_mod = ['EZ', 'HR', 'HD', 'SD', 'PF', 'DT', 'NC', 'FL', 'SO']
# 提交chart,效果如下:更新用户信息
def submitChart(user_qq):
userinfo = bot_osu.searchUserInfo(user_qq, update=False)
uid = userinfo['uid']
name = userinfo['name']
pp = int(float(userinfo['pp']))
if not uid:
return userinfo['msg']
new_result = bot_osu.getUserRecent(uid, 0, max_num=15)
if not new_result:
msg = '游戏记录查询出错,请稍后再试'
return msg
current_chart = myChart(user_qq)['chart_info']
old_result_list = getOldResult(current_chart)
update_list = []
for recent in new_result:
bid = int(recent['beatmap_id'])
if bid not in chart_bid: # 不是chart图,跳过
continue
if recent['rank'] == 'F': # fail,跳过
continue
(mul, mod_list) = bot_osu.getMultiply(recent['enabled_mods'], EZbuff=1.8, Mtype=2)
if not calAllowMod(mod_list): # mod要求不符合,跳过
continue
index = chart_bid.index(bid)
new_chart_score = calChartScore(recent, pp, mul)
print('uid:%s, bid:%s, old:%.2f, new:%.2f' % (uid, bid, old_result_list[index], new_chart_score))
if new_chart_score > old_result_list[index] + 0.005:
for i in range(len(update_list)):
if update_list[i]['beatmap_id'] == bid:
del update_list[i]
break
update_list.append(recent)
old_result_list[index] = new_chart_score
if not update_list:
msg = '您未更新chart成绩'
return msg
msg = '您更新了下列chart成绩:'
for update in update_list:
# 对于每一条chart信息,0:uid,1:bid,2:turns,3:pp,4:c300,5:c100,6:c50,7:c0,8:score,9:combo,10:acc,11:rank,12:mod,13:mul,14:time,15:mode,16:result
bid = int(update['beatmap_id'])
acc = bot_osu.getAcc(update['count300'], update['count100'], update['count50'], update['countmiss'])
new_chart_score = old_result_list[chart_bid.index(bid)]
sql = 'SELECT * FROM chart WHERE uid=%s and bid=%s and turns=%s' % (uid, update['beatmap_id'], now_turns)
old_chart_info = bot_SQL.select(sql)
if old_chart_info:
sql = 'UPDATE chart SET current_pp=%s, count300=%s, count100=%s, count50=%s, count0=%s, map_score=%s, map_combo=%s,' \
'map_acc=%s, map_rank=\'%s\', map_mod=%s, map_multiply=%.3f, map_time=\'%s\', chart_score=%.2f, user_name=\'%s\'' \
'WHERE uid=%s and bid=%s and turns=%s' % (pp, update['count300'], update['count100'], update['count50'],
update['countmiss'], update['score'], update['maxcombo'], acc, update['rank'], update['enabled_mods'],
mul, update['date'], new_chart_score, name, uid, bid, now_turns)
msg = msg + '\n\nbid: %s\n得分: %.2f → %.2f' % (bid, old_chart_info[0][16], new_chart_score)
else:
sql = 'INSERT INTO chart VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, \'%s\', %s, %.3f, \'%s\', \'0\', %.2f, \'%s\')'\
% (uid, bid, now_turns, pp, update['count300'], update['count100'], update['count50'], update['countmiss'],update['score'],
update['maxcombo'], acc, update['rank'], update['enabled_mods'], mul, update['date'], new_chart_score, name)
msg = msg + '\n\nbid: %s\n得分: 0 → %.2f' % (bid, new_chart_score)
bot_SQL.action(sql)
return msg
# 判断玩家开启的mod是否符合要求
def calAllowMod(mod_list):
for forcemods in force_mod:
if forcemods not in mod_list:
return False
for mods in mod_list:
if mods not in allow_mod and mods not in force_mod:
return False
return True
# chart得分计算
def calChartScore(playmsg, user_pp, mod_mul):
acc = float(bot_osu.getAcc(playmsg['count300'], playmsg['count100'], playmsg['count50'], playmsg['countmiss'])) / 100
combo = int(playmsg['maxcombo'])
pp = int(float(user_pp))
miss = int(playmsg['countmiss'])
result = (15 + acc**2.5 * combo**0.5 - pp * 0.004 - miss * 0.2) * mod_mul
return result
# 获取对应bid编号的旧chart成绩,如果无成绩则输出0
def getOldResult(current_chart):
result = [0, 0, 0]
for i in range(len(chart_bid)):
for oldplay in current_chart:
if oldplay['turns'] == now_turns and oldplay['bid'] == chart_bid[i]:
result[i] = oldplay['result']
break
return result
# 查询指定qq号的本期已有chart信息,返回字典的列表,如果指定getMsg为True则会详细输出文本信息
def myChart(user_qq, getMsg=False):
chart_info = []
sql = 'SELECT * FROM user where qq = \'%s\'' % user_qq
result = bot_SQL.select(sql)
if not result:
msg = '您未绑定'
return {'msg': msg, 'chart_info':chart_info}
uid = result[0][1]
name = result[0][2]
sql = 'SELECT * FROM chart WHERE uid = %s AND turns = %s' % (uid, now_turns)
result = bot_SQL.select(sql)
if not result:
msg = '您没有相应chart成绩'
return {'msg': msg, 'chart_info': chart_info}
# 对于每一条chart信息,0:uid,1:bid,2:turns,3:pp,4:c300,5:c100,6:c50,7:c0,8:score,9:combo,10:acc,11:rank,12:mod,13:mul,14:time,15:mode,16:result
msg = '%s的成绩如下(第%d期)' % (name, now_turns)
for chart in result:
if getMsg:
rankmsg = getRankInfo(chart[2], chart[1])
rank = 0
for i in range(len(rankmsg)):
if rankmsg[i][0] == uid:
rank = i + 1
msg = msg + '\n\nbid: %s\n评分: %s\nchart得分: %.2f\n排名: %s/%s\n时间: %s' % \
(chart[1], chart[11], chart[16], rank, len(rankmsg), chart[14])
chart_info.append({'turns': chart[2], 'bid': chart[1], 'result': chart[16]})
return {'msg': msg, 'chart_info':chart_info}
# 获取指定chart图的全体信息,且默认按照总分降顺排序
def getRankInfo(turns, bid):
sql = 'SELECT uid, user_name, chart_score FROM chart WHERE bid = %s and turns = %s ORDER BY chart_score DESC' % (bid, turns)
result = bot_SQL.select(sql)
return result
# 获取指定chart图的前几名的文本信息,若不指定bid则默认输出全体chart图
def outputRankMsg(turns, bid=0, single_max_num=10, all_max_num=3):
if bid:
result = getRankInfo(turns, bid)
msg = 'bid: %s' % bid
for i in range(min(single_max_num, len(result))):
msg = msg + '\n%s: %s (%s)' % (i+1, result[i][1], result[i][2])
else:
msg = '第%s期全部chart排名一览' % turns
for bid in chart_bid:
msg = msg + '\n\nbid: %s' % bid
result = getRankInfo(turns, bid)
for i in range(min(all_max_num, len(result))):
msg = msg + '\n%s: %s (%s)' % (i+1, result[i][1], result[i][2])
return msg
# 获取chart图排名信息,接受用户指令且用于最终输出
def rankChart(content):
if content == '!chart_top':
msg = outputRankMsg(now_turns)
elif '!chart_top ' in content:
check_bid = re.match(r'^!chart_top ([1-9][0-9]*)$', content)
if check_bid:
bid = int(check_bid.group(1))
if bid not in chart_bid:
msg = 'bid: %s\n不是本期chart指定图' % bid
else:
msg = outputRankMsg(now_turns, bid=bid)
else:
msg = '您的!chart_top指令使用错误'
else:
msg = '无法识别,推测您是想使用指令!chart_top x(x为参数)'
return msg
def getChart():
txt = '''本期chart内容如下:
bid: %s
强制Mod: %s
可选Mod: %s
允许fail: 否
得分方式: 太长了懒得写
!submit指令用于提交最近15次成绩,如果有包含本歌曲则进行得分计算''' \
% (printAllow(chart_bid), printAllow(force_mod), printAllow(allow_mod))
return txt
def printAllow(list_m):
msg = ''
for name in list_m:
if not msg:
msg = msg + '%s' % name
else:
msg = msg + ',%s' % name
if not msg:
msg = '无'
return msg
|
[
"1061566571@qq.com"
] |
1061566571@qq.com
|
55d3b7638da30ef127a474aa62d94baf1f6ed049
|
86330954b156feab96469e240dc7eb3be4844b6b
|
/ros2_ws/install/pi_car/lib/python3.8/site-packages/pi_car/pi_car.py
|
6b5defe4aa51d0abfaff6190f189426ebf0473e5
|
[] |
no_license
|
b0hne/pi_ar_project
|
25ccf85b1ed3c74c10ad5e6d3b2f037f1ae5ccae
|
7f3a91638e4d30aa97f433e4e9a8bbd88ca1e1f2
|
refs/heads/master
| 2022-12-08T06:34:13.682564
| 2020-08-13T22:58:55
| 2020-08-13T22:58:55
| 287,394,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,864
|
py
|
import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Twist
import board
import busio
import adafruit_pca9685
import operator
DC_OFF = 0
# DC_MIN = 24575
DC_MIN = 25575
DC_MAX = 65535
DC_DIFF = DC_MAX - DC_MIN
MOTORS = 4
FREQUENCY = 100
# top to bottom, left to right
FORWARD = [13, 15, 2, 0]
BACKWARD = [12, 14, 3, 1]
def setupPCA(self):
self.i2c = busio.I2C(board.SCL, board.SDA)
self.pca = adafruit_pca9685.PCA9685(self.i2c)
self.pca.frequency = FREQUENCY
# print('pca is setup!')
class PiCarDriver(Node):
def __init__(self):
super().__init__('pi_car_driver')
setupPCA(self)
self.subscription = self.create_subscription(
Twist, 'cmd_vel', self.driver_callback, 5)
def driver_callback(self, msg):
global activated, timeout
x = msg.linear.x
y = msg.angular.z
# print('x = ' , x)
# print('y = ' , y)
if(x == 0 and y == 0):
breaking(self)
else:
xy = abs(x) + abs(y)
# check for > 1
if(xy > 1):
x = x / abs(xy)
y = y / abs(xy)
# print('x = ' , x)
# print('y = ' , y)
acceleration_front = [x, x, x, x]
acceleration_left = [y, y, -y, -y]
acceleration = list(map(operator.sub, acceleration_front, acceleration_left))
print(acceleration)
# acceleration = indexwise_add(acceleration_front, acceleration_left)
# print('acceleration: ', [v /2 for v in acceleration])
# set_acceleration(self, [v /2 for v in acceleration])
set_acceleration(self, acceleration)
def __del__(self):
# destructor
if self.pca is not None:
self.pca.deinit()
if self.i2c is not None:
self.i2c.deinit()
def indexwise_add(a, b):
return [sum(x) for x in zip(a, b)]
def reset_engine(self):
breaking(self)
def breaking(self):
global activated
for i in range(MOTORS):
self.pca.channels[(FORWARD[i])].duty_cycle = DC_OFF
self.pca.channels[(BACKWARD[i])].duty_cycle = DC_OFF
def set_acceleration(self, acc):
for i in range(MOTORS):
if (acc[i] >= 0):
self.pca.channels[FORWARD[i]].duty_cycle = DC_MIN + int(acc[i]*DC_DIFF)
self.pca.channels[BACKWARD[i]].duty_cycle = DC_OFF
else:
self.pca.channels[FORWARD[i]].duty_cycle = DC_OFF
self.pca.channels[BACKWARD[i]].duty_cycle = DC_MIN - int(acc[i]*DC_DIFF)
def main(args=None):
print('pi_car is seting up!')
rclpy.init(args=args)
pi_car_driver = PiCarDriver()
rclpy.spin(pi_car_driver)
pi_car_driver.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
[
"sebastian.benkel@tum.de"
] |
sebastian.benkel@tum.de
|
23458d70bd4f9ae696d8d81fa5c01f56971f7da7
|
3b7474148c07df7f4755106a3d0ada9b2de5efdc
|
/training/c25_flask/examples/world_api/original/tools.py
|
1dc1684dc799c4417844116947e284621a8d0bee
|
[] |
no_license
|
juancsosap/pythontraining
|
7f67466846138f32d55361d64de81e74a946b484
|
1441d6fc9544042bc404d5c7efffd119fce33aa7
|
refs/heads/master
| 2021-08-26T05:37:15.851025
| 2021-08-11T22:35:23
| 2021-08-11T22:35:23
| 129,974,006
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
import pymysql
from flask import request, jsonify, render_template, make_response, abort
def xmlify(template, value):
text = render_template(template, value=value)
response = make_response(text)
response.headers['Content-Type'] = 'application/xml'
return response
def prepare_response(template, info):
if len(info) > 0:
formats = ['application/json', 'application/xml']
accept = request.accept_mimetypes.best_match(formats)
if accept == 'application/json':
return jsonify(info)
elif accept == 'application/xml':
return xmlify(template, info)
else:
abort(406)
return make_response(jsonify({}), 204)
class MySQLDBManager:
def __init__(self, **kwargs):
self.host = kwargs['host'] if 'host' in kwargs else 'localhost'
self.port = kwargs['port'] if 'port' in kwargs else 3306
self.user = kwargs['user'] if 'user' in kwargs else 'root'
self.password = kwargs['password']
self.db = kwargs['db']
def connect(self):
self.conn = pymysql.connect(host=self.host,
port=self.port,
db=self.db,
user=self.user,
password=self.password)
self.cursor = self.conn.cursor()
def disconnect(self):
if self.conn:
self.conn.close()
def execute(self, sql, *args):
if len(args) > 0:
self.cursor.execute(sql, args)
else:
self.cursor.execute(sql)
result = self.cursor.fetchall()
return result
dbman = MySQLDBManager(password='roottoor', db='world')
module_name = 'tools.tools'
if __name__ == '__main__':
print('Loading {} module'.format(module_name))
else:
print('Importing {} module'.format(module_name))
|
[
"user.nuage@gmail.com"
] |
user.nuage@gmail.com
|
65f46d501c7c32b10b708b08a9717deeef8c4ff4
|
460fb7cd2f60dedfa65f93521f8d9126fc68a420
|
/6-a-knapsack/knapsack.py3
|
f093114b54e75f357da5595cc5643e8a441a2324
|
[] |
no_license
|
mahdihasnat/Competitive-Programming-Core-Skills
|
71c9c06907058f8715616a817ac6e6db94bf2eab
|
648e69338a4262d6d9d64a0970409077a17868a6
|
refs/heads/master
| 2022-11-06T18:53:08.553890
| 2020-06-18T19:28:43
| 2020-06-18T19:28:43
| 272,723,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py3
|
# -*- coding: utf-8 -*-
import sys
def main():
n, W = map(int, input().split())
items = [list(map(int, input().split())) for _ in range(n)]
result = []
dp = [[0] * (W +1) for _ in range(n+1)]
for i in range(1, n+1):
w = items[i-1][0]
v = items[i-1][1]
for j in range( 1 ,W+1):
dp[i][j] = dp[i-1][j]
if j>= w :
dp[i][j] = max(dp[i][j] , dp[i-1][j-w] + v)
for j in range(1,W+1):
dp[i][j] = max(dp[i][j] ,dp[i][j-1])
# for i in range(n+1):
# print( " ".join(map(str , dp[i])))
now = W
cost = dp[n][W]
for i in range(n , 0 , -1):
# print(now ,end = ' ')
# print(cost )
w = items[i-1][0]
v = items[i-1][1]
if now >= w and dp[i-1][now - w] + v == dp[i][now ] and cost > 0 :
result.append(i)
now -= w
cost -= v
assert(dp[0][now] == 0)
result.reverse()
print(len(result))
print(" ".join(map(str, result)))
if __name__ == '__main__':
main()
|
[
"mahdibuet3@gmail.com"
] |
mahdibuet3@gmail.com
|
6c7175ef0bf5c454553094b3f009ebac86114775
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/violin/_metasrc.py
|
466d497ebca4399340d12c5b16b62c2cd713264a
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
import _plotly_utils.basevalidators
class MetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="metasrc", parent_name="violin", **kwargs):
super(MetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
f429f737cd6c9f3cb23fa464de6ec7f9685132fd
|
0469618e876fb9c027cafd909246ba899a30c4d2
|
/tests/models/test_optimade_json.py
|
548ae189793cd2f8bd1cf448662e973a52239d37
|
[
"MIT"
] |
permissive
|
JuDFTteam/optimade-python-tools
|
ed88ab9e23bb6a5440956a7d831215fb00386d2a
|
96a4ff9cdb148fd39b024beda2e02667f99547d9
|
refs/heads/master
| 2023-02-18T22:06:24.844480
| 2021-01-15T18:39:11
| 2021-01-15T18:39:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
from optimade.models import DataType
def test_convert_python_types():
"""Convert various Python types to OPTIMADE Data types"""
from datetime import datetime
expected_data_type = [
DataType.STRING,
DataType.INTEGER,
DataType.FLOAT,
DataType.LIST,
DataType.DICTIONARY,
DataType.UNKNOWN,
DataType.TIMESTAMP,
]
python_types_as_strings = [
"str",
"int",
"float",
"list",
"dict",
"None",
"datetime",
]
python_types_as_types = [str, int, float, list, dict, None, datetime]
test_none = None
python_types_as_objects = [
str("Test"),
42,
42.42,
["Test", 42],
{"Test": 42},
test_none,
datetime.now(),
]
for list_of_python_types in [
python_types_as_strings,
python_types_as_types,
python_types_as_objects,
]:
for index, python_type in enumerate(list_of_python_types):
assert isinstance(
DataType.from_python_type(python_type), DataType
), f"python_type: {python_type}"
assert DataType.from_python_type(python_type) == expected_data_type[index]
def test_convert_json_types():
"""Convert various JSON and OpenAPI types to OPTIMADE Data types"""
json_types = [
("string", DataType.STRING),
("integer", DataType.INTEGER),
("number", DataType.FLOAT),
("array", DataType.LIST),
("object", DataType.DICTIONARY),
("null", DataType.UNKNOWN),
]
openapi_formats = [
("date-time", DataType.TIMESTAMP),
("email", DataType.STRING),
("uri", DataType.STRING),
]
for list_of_schema_types in [json_types, openapi_formats]:
for schema_type, optimade_type in list_of_schema_types:
assert isinstance(
DataType.from_json_type(schema_type), DataType
), f"json_type: {schema_type}"
assert DataType.from_json_type(schema_type) == optimade_type
def test_get_values():
"""Check all data values are returned sorted with get_values()"""
sorted_data_types = [
"boolean",
"dictionary",
"float",
"integer",
"list",
"string",
"timestamp",
"unknown",
]
assert DataType.get_values() == sorted_data_types
|
[
"noreply@github.com"
] |
JuDFTteam.noreply@github.com
|
71de969b7a916bf5eab2c78a574d3186cf9d290b
|
365186abceefc51e811706ad325a2f53a63a25f8
|
/tests/scripts/comparisonPlots.py
|
e0fbf4cda1982e9d1ea7945d69f32ca47e3b51b7
|
[
"MIT"
] |
permissive
|
tboudreaux/pytopsscrape
|
a857bdca8558bf86f7afd5f8f3e6d2e5ca90fa64
|
c9f95e6a6419debb0b6a22f22d1574a8bbf73bd0
|
refs/heads/master
| 2023-04-06T17:48:08.812425
| 2023-04-04T00:01:13
| 2023-04-04T00:01:13
| 532,559,997
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
import matplotlib.pyplot as plt
from pyTOPSScrape.parse import load_opal
import os
import datetime
def make_comparision_plot():
TargetPath = "./GS98Target.opac"
TestPath = "./GS98TestResult.opac"
OPALPath = "./GS98OPAL.opac"
targetTime = datetime.datetime.fromtimestamp(os.path.getmtime(TargetPath))
testTime = datetime.datetime.fromtimestamp(os.path.getmtime(TestPath))
OPALTime = datetime.datetime.fromtimestamp(os.path.getmtime(OPALPath))
print(f"Target File Last Modified at: {targetTime}")
print(f"Test File Last Modified at: {testTime}")
print(f"OPAL Comp File Last Modified at: {OPALTime}")
Target = load_opal(TargetPath)
Test = load_opal(TestPath)
OPAL = load_opal(OPALPath)
fig, ax = plt.subplots(1,1,figsize=(10,7))
ax.plot(Target[0], Target[2][75, :, 13], label="Current Test Target")
ax.plot(Test[0], Test[2][75, :, 13], label="Test Result")
ax.plot(OPAL[0], OPAL[2][75, :, 13], label="OPAL")
ax.legend()
ax.set_xlabel("Log T")
ax.set_ylabel("Opacity")
ax.set_title("Comparision made at log(R)=-1.5")
plt.savefig("comparison.pdf", bbox_inches='tight')
fig, ax = plt.subplots(1,1,figsize=(10,7))
ax.plot(Target[0], Target[2][75, :, 13] - Test[2][75, :, 13])
ax.set_xlabel("Log T")
ax.set_ylabel("Opacity")
ax.set_title("Target - Result Residuals made at log(R)=-1.5")
plt.savefig("TRResid.pdf", bbox_inches='tight')
if __name__ == "__main__":
make_comparision_plot()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
d37148f04674bda7b996896bf1686b7c6b7db8c4
|
a8b0599af76b5393039431f876be00d628a1fe43
|
/backend/kangas/server/__init__.py
|
4f348efc3c7c9d70831cec668eae3966b7210d79
|
[
"Apache-2.0"
] |
permissive
|
comet-ml/kangas
|
c951f648d890dca5a66cbab405d3437be2f3e9e3
|
df0c1a495032cc4f1c367c74fcb0ef6e5a2063be
|
refs/heads/main
| 2023-06-12T23:38:43.068259
| 2023-06-05T18:38:34
| 2023-06-05T19:28:33
| 550,324,241
| 944
| 41
|
Apache-2.0
| 2023-06-05T19:28:35
| 2022-10-12T15:10:04
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,379
|
py
|
# -*- coding: utf-8 -*-
######################################################
# _____ _____ _ _ #
# (____ \ _ | ___) (_) | | #
# _ \ \ ____| |_ ____| | ___ ___ _ _ | | #
# | | | )/ _ | _)/ _ | |(_ / __) |/ || | #
# | |__/ ( ( | | | ( ( | | |__| | | | ( (_| | #
# |_____/ \_||_|___)\_||_|_____/|_| |_|\____| #
# #
# Copyright (c) 2023 Kangas Development Team #
# All rights reserved #
######################################################
import os
from .queries import KANGAS_ROOT # noqa
def start_tornado_server(port, debug_level=None, max_workers=None):
"""
Args:
port: (int) the port to start the frontend server
debug_level: (str) None means suppress output from servers
"""
import asyncio
from concurrent.futures import ThreadPoolExecutor
import tornado
import tornado.log
import tornado.options
import tornado.web
from .tornado_server import datagrid_handlers
async def main():
if debug_level is not None:
tornado.options.options["logging"] = debug_level
tornado.log.enable_pretty_logging()
# set max_workers
executor = ThreadPoolExecutor(max_workers=max_workers)
print(
"Kangas tornado backend server starting with %s max workers"
% executor._max_workers
)
for handler in datagrid_handlers:
handler[1].executor = executor
app = tornado.web.Application(datagrid_handlers)
app.listen(port)
await asyncio.Event().wait()
try:
asyncio.run(main())
except KeyboardInterrupt:
print()
print("Exiting Kangas tornado backend server")
def start_flask_server(host, port, debug_level=None, max_workers=None):
from .flask_server import run
if max_workers is None:
max_workers = min(32, os.cpu_count() + 4)
print("Kangas flask backend server starting with %s max workers" % max_workers)
try:
run(
host=host,
port=port,
debug_level=debug_level,
max_workers=max_workers,
)
except KeyboardInterrupt:
print()
print("Exiting Kangas flask backend server")
|
[
"doug.blank@gmail.com"
] |
doug.blank@gmail.com
|
6c06a62093cbb8abc57c9f55f9cf82b806b88996
|
f10df3fa468d46d7ac43580cb193784c200dca86
|
/Ethererum Developers Interviews/ethereum_interviews.py
|
f2768b6ceef8ee297e872709e502afb0f16b7b9a
|
[
"MIT"
] |
permissive
|
lenamax2355/Kaggle-Compendium
|
5f517e994e69aee54eca6fa0144a373e6a73500b
|
61634ba742f9a0239f2d1e45973c4bb477ac6306
|
refs/heads/master
| 2022-11-21T19:17:15.483561
| 2020-07-15T18:40:28
| 2020-07-15T18:40:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
import pandas as pd
import generic_topic_detector
filepath = "C:\\Users\\rupachak\\Desktop\\Kaggle Data\\Ethereum Developer Interviews\\interview.csv"
interview_frame = pd.read_csv(filepath)
text_list = interview_frame['Who are you and what are you working on?'].values
text_list = list(map(lambda x:str(x),text_list))
lda_html = generic_topic_detector.get_formatted_html_data(text_list)
with open('eth_topics.html','w') as lda_topic:
lda_topic.write(lda_html)
|
[
"rupachak@adobe.com"
] |
rupachak@adobe.com
|
b6ae5f7abc5a2801d14fbb1bfd2ac98248a07ad6
|
f4d7937547c8d25649c3078b9a799f966b96399d
|
/AlphaZero/train/sequential/selfplay.py
|
656945db38dfb55be766f5bd227cb8bd468aafab
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
water-vapor/AlphaZero
|
eec7ec9359efa2e6d7560cc41f3ac6956524c07d
|
920162071c7a1557cbf45ffdecd840ee2b25b88f
|
refs/heads/master
| 2021-03-19T10:38:46.775914
| 2018-06-05T09:23:47
| 2018-06-05T09:23:47
| 97,711,015
| 9
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
import os
import numpy as np
from AlphaZero.game.gameplay_go import Game
from AlphaZero.train.sequential.nn_eval_seq import NNEvaluator
def selfplay(best_player_name, base_dir='data', num_games=25000):
""" Generate self play data and search probabilities.
Results are stored in data/selfplay/<best_player_name>/
Game records are stored as sgf files.
Search probabilities are stored as pickle files.
Args:
best_player_name: the name of the best player
num_games: number of games to play
Returns:
None
"""
best_player = NNEvaluator(os.path.join(base_dir, 'models', best_player_name))
# This can be parallelized
state_dataset = np.zeros((0, 17, 19, 19))
probs_dataset = np.zeros((0, 362))
result_dataset = np.zeros(0)
for num_game in range(num_games):
# TODO: indicate this is a selfplay, not yet implemented in gameplay.Game
match = Game(best_player, best_player)
result = match.start()
state_np, probs_np, result_np = match.get_history()
state_dataset = np.concatenate([state_dataset, state_np])
probs_dataset = np.concatenate([probs_dataset, probs_np])
result_dataset = np.concatenate([result_dataset, result_np])
# TODO: auto resignation should be implemented
with open(os.path.join(base_dir, 'selfplay', best_player_name, 'train.npy'), 'wb+') as f:
np.save(f, (state_dataset, probs_dataset, result_dataset))
|
[
"zzhaoah@connect.ust.hk"
] |
zzhaoah@connect.ust.hk
|
14f2874208906f621bdbf9f032deefe4c6b5337e
|
e90a5ab7a29babfc2f4c0de6958a3897a4901d34
|
/Array/Plus One.py
|
a3648118dc325284fe3d3ad7decde76e435f1d8b
|
[] |
no_license
|
Aleena-Mishra-10/LeetCode-Problems
|
031dfe5ce589d26373321c05bda7a7b5bd712fdf
|
af3685a30892c9da9335dc95449f2b2081cc0a11
|
refs/heads/main
| 2023-08-04T04:10:30.288583
| 2021-09-17T05:51:19
| 2021-09-17T05:51:19
| 328,077,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
stri = " "
for i in range(len(digits)):
stri=stri+str(digits[i])
print(int(stri)+1)
ans=[int(x) for x in str(int(stri)+1)]
print(ans)
return ans
|
[
"noreply@github.com"
] |
Aleena-Mishra-10.noreply@github.com
|
1215168ddbc7f8a6ee183800bdd85d6cce9fbc46
|
dd5a986ef4bc625e07b23ff5c98c3296f43a8ec2
|
/mysite/settings.py
|
771010c4ac70bdf1b82f02c36d258802bb248e87
|
[] |
no_license
|
maheshlangote1/my-first-blog
|
dafe1d7dcca08a0b20c6c999bc711fd21825474a
|
6f69be64727f6501f89e47467df2874172922725
|
refs/heads/master
| 2021-09-02T09:31:02.716380
| 2018-01-01T13:02:40
| 2018-01-01T13:02:40
| 115,900,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yxzzojkm8%-&313849$9)f&-5*7#7hly8-eo+8ho#5&x6^f1w0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"maheshlangote1@gmail.com"
] |
maheshlangote1@gmail.com
|
caac4b906ea5a48d94d2730fe65597e6501777c8
|
48858c685f1c60755b2eaa4aa4005336cd061fee
|
/server.py
|
9a3edcf5a5224ca4f0f37fc2d67f6b9c1529877a
|
[] |
no_license
|
jinky0ung/MyProject
|
c8052aa75a129f2a6180a1cc5009d553b1e1bd0b
|
3430a4521ce0b9d69be43fd83c7eebebd6937b90
|
refs/heads/master
| 2023-05-23T22:39:39.398447
| 2020-02-28T03:33:03
| 2020-02-28T03:33:03
| 232,560,873
| 0
| 0
| null | 2023-05-22T22:17:27
| 2020-01-08T12:41:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,089
|
py
|
# 패키지설치: requests, flask, pymongo, beautifulsoup4
import requests # url로 요청을 보낼 때 사용하는 친구
import openpyxl
from bs4 import BeautifulSoup # 크롤링, HTML을 찾기 쉽게 만들어주는 친구
from flask import Flask # API, HTML 요청을 받았을 때 적절한 결과를 내려주는 친구 (서버 프레임워크)
from flask import render_template # HTML을 예쁘게 브라우저로 내려주는 친구
from flask import jsonify # API에 Dictionary를 예쁘게 내려주는 친구
from flask import request # front에서 요청된 값들을 보관하고 있는 친구
from pymongo import MongoClient # python에서 몽고 DB에 접속하는 것을 도와주는 친구
client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbbook # 'dbbook'라는 이름의 db를 만듭니다.
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
# POST 요청은 브라우저에서 URL로만은 못보냄 -> 자바스크립트 Ajax 요청을 통해서만 가능
# localhost:5000/post 라는 url로 POST 요청이 왔을 때
@app.route('/post', methods=['POST'])
def saving():
# 브라우저(Java Script AJAX)에서 보낸 값들을 변수에 저장한다.
isbn_receive = request.form['isbn_give'] # 클라이언트로부터 url을 받는 부분
startdate_receive = request.form['startdate_give'] # 클라이언트로부터 comment를 받는 부분
enddate_receive = request.form['enddate_give'] # 클라이언트로부터 받은 작가이름
star_receive = request.form['star_give']
channel_receive = request.form['channel_give']
shreview_receive = request.form['shreview_give']
lgreview_receive = request.form['lgreview_give']
# header는 requests 라이브러리를 쓸 때, 보내는 사람이 어떤 사람인지 표시 (안중요함)
#headers = {
# 'User Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko)'
# 'Chrome/73.0.3683.86 Safari/537.36'}
headers = {
'Authorization': f'KakaoAK 88b58d8c44794be513f1a1261960236d'
}
data = requests.get(f"https://dapi.kakao.com/v3/search/book?&query={isbn}", headers=headers)
response = data.json()
#documents의 0번째 리스트에서 책정보를 읽어와서 각 변수에 저장.
datetime = response['documents'][0]['datetime'].split('T')[0]
author = response['documents'][0]['translators']
translator = response['documents'][0]['translators']
publisher = response['documents'][0]['publisher']
title = response['documents'][0]['title']
bookimage = response['documents'][0]['thumbnail']
#데이터들을 dictionary 형태로 포장 (이유는 쉽게 저장하기 위해)
bookinfo = dict(isbn=isbn_receive,
시작일=startdate_receive,
종료일=enddate_receive,
나의평점=star_receive,
추천경로=channel_receive,
한줄리뷰=shreview_receive,
나의리뷰=lgreview_receive,
발행일=datetime,
저자 = author,
역자 = translator,
출판사 = publisher,
책제목 = title,
책이미지 = bookimage)
# article collection에 dictionary를 저장
db.bookinfos.insert_one(bookinfo)
return jsonify({'result': 'success'})
# localhost:5000/post 라는 url로 GET 요청이 왔을 때
@app.route('/post', methods=['GET'])
def listing():
# 모든 article 찾기 & _id 값은 출력에서 제외하기
result = list(db.bookinfos.find({}, {'_id': 0}))
# articles라는 키 값으로 영화정보 내려주기
# jsonify에게 data를 넘겨주기 위해 dictionary 형태로 재가공
response = {
'result': 'success',
'bookinfos': result
}
return jsonify(response)
if __name__ == '__main__':
app.run('localhost', port=5000, debug=True)
|
[
"noreply@github.com"
] |
jinky0ung.noreply@github.com
|
8fc7949fc9a35ac9fe1f07a7a6aad32c1d35a8bf
|
f87e1a6712041174046f26fdb8464e5d871babd4
|
/basics.py
|
ea6b3b35bfec36f0430b04d080fad1278fbe066b
|
[
"MIT"
] |
permissive
|
jdidi-boujneh/xyz
|
fe6a8ce73ab9d4b8925a34a4639d63058e603e00
|
714cd5cb90d6e37d7fc819e3fa2cb996bff2c890
|
refs/heads/master
| 2022-12-26T14:53:47.294185
| 2020-09-14T14:44:32
| 2020-09-14T14:44:32
| 295,428,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
from flask import Flask, render_template
if __name__ == '__main__':
app.debug = True
app.run()
app = Flask(__name__)
@app.route('/home')
def home():
return render_template('home.html')
if __name__ == "__main__":
app.run(debug=True)
|
[
"noreply@github.com"
] |
jdidi-boujneh.noreply@github.com
|
1e1ae4854016d822cbf704f310b243729c7e3e4a
|
21acc25dd3969318dd8476e364fe2fb4eabfe4f4
|
/podcastninja/migrations/0005_auto_20150423_1005.py
|
5c94549f4db338ebc255bd4a0a32cb7727b5426a
|
[] |
no_license
|
monty5811/podcastninja
|
72dc98375974b1714a8457b09126981a76166b9a
|
94a55536270f3e1c4e4f2160e0a24e79c9f40b7f
|
refs/heads/master
| 2020-05-17T01:24:57.312486
| 2015-05-25T15:18:03
| 2015-05-25T15:18:03
| 35,883,288
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('podcastninja', '0004_podcastitem_s3_url'),
]
operations = [
migrations.AlterField(
model_name='podcastitem',
name='s3_url',
field=models.TextField(blank=True, null=True, verbose_name=b's3 url', validators=[django.core.validators.URLValidator()]),
),
]
|
[
"montgomery.dean97@gmail.com"
] |
montgomery.dean97@gmail.com
|
abe4a8aa610e86e4477086253dd53fe9bd29e75a
|
ac20cab9371639dc3b722914c7a7033c9164d574
|
/Indian_Liver_Problem/demo.py
|
4e0f430790215f5368a91200c9e635328ed0dd2d
|
[] |
no_license
|
Dishant1997/Electronic-Health-Record
|
ba6c0fe5f9ae94cbf1f11f07108eb3dc1861bcdc
|
199cae790e503d9e50b8ab13b492a3fe040bdc89
|
refs/heads/master
| 2020-08-13T01:07:21.621527
| 2019-10-13T19:40:49
| 2019-10-13T19:40:49
| 214,879,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 8 10:53:52 2019
@author: abd360
"""
from pycm import *
y_actu = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2] # or y_actu = numpy.array([2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2])
y_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2] # or y_pred = numpy.array([0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2])
cm = ConfusionMatrix(actual_vector=y_actu, predict_vector=y_pred) # Create CM From Data
print(cm)
|
[
"noreply@github.com"
] |
Dishant1997.noreply@github.com
|
7854099507fd19c58fb6b7397b65f1a9f46d8286
|
20a645c0a25c63592a6042da79d0ca9bdcfd8992
|
/2010_11_09_Tennis/test_tennis.py
|
597123e586ad8f06d0c7ba7bea4cdbacaa4a1409
|
[] |
no_license
|
beatorizu/dojo-campinas
|
470b8d9f8d3cb6147793dce97aea08a31638b75d
|
db5c4869ddb7dccb8fc05c9a3e452fa8727a4e2c
|
refs/heads/master
| 2020-12-27T15:35:50.109185
| 2011-08-09T14:03:48
| 2011-08-09T14:03:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
import unittest
from tennis import Game
class TennisTestCase(unittest.TestCase):
def pontuar_jogador(self, jogador, n):
for i in xrange(n):
self.game.pontuar(jogador)
def setUp(self):
self.game = Game()
def test_se_placar_inicial_eh_zero(self):
self.assertEquals(self.game.placar(), (0, 0, "em andamento"))
def test_jogador1_pontuando(self):
self.game.pontuar('jogador1')
self.assertEquals(self.game.placar(), (15, 0, "em andamento"))
self.game.pontuar('jogador1')
self.assertEquals(self.game.placar(), (30, 0, "em andamento"))
self.game.pontuar('jogador1')
self.assertEquals(self.game.placar(), (40, 0, "em andamento"))
def test_jogador2_pontuando(self):
self.game.pontuar('jogador2')
self.assertEquals(self.game.placar(), (0, 15, "em andamento"))
def test_jogador1_pontua_e_depois_jogador2_pontua(self):
self.game.pontuar('jogador1')
self.game.pontuar('jogador2')
self.assertEquals(self.game.placar(), (15, 15, "em andamento"))
def test_jogador1_vencedor(self):
self.pontuar_jogador('jogador1', 4)
self.assertEquals(self.game.placar()[2], "jogador1 venceu")
def test_jogador2_vencedor(self):
self.pontuar_jogador('jogador2', 4)
self.assertEquals(self.game.placar()[2], "jogador2 venceu")
def test_empate(self):
self.pontuar_jogador('jogador1', 3)
self.pontuar_jogador('jogador2', 3)
self.assertEquals(self.game.placar(), (40, 40, "deuce"))
def test_jogador1_pontua_no_empate(self):
self.pontuar_jogador('jogador1', 3)
self.pontuar_jogador('jogador2', 3)
self.pontuar_jogador('jogador1', 1)
self.assertEquals(self.game.placar(), ('A', 40, "vantagem jogador1"))
if __name__ == '__main__':
unittest.main()
|
[
"rennerocha@gmail.com"
] |
rennerocha@gmail.com
|
184e2752b2c8730b9ad69aadca3ff1754883ae36
|
92d6a36f97ba4691b8b75d471b251bb373015e38
|
/development/myprojects/prof/manage.py
|
ccd13e8c1c03620f469bae6de6f50a905caa39e2
|
[] |
no_license
|
aghee/cashconvert
|
7e5224c06fa53f2e8abbddf95a6e1d5315202a56
|
654bf952664e5df4e012650dc122b3fe9d9c3c5d
|
refs/heads/master
| 2020-03-16T23:40:35.467388
| 2018-05-13T20:06:09
| 2018-05-13T20:06:09
| 107,349,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "prof.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"mutembeiagnes@gmail.com"
] |
mutembeiagnes@gmail.com
|
224fabcf24e1332e5108b87f8f986a1011a7f344
|
c5b415c78fb047b6de191842d09573f07a2f3e6e
|
/media.py
|
beeb5fc5834c40779236274237c12d27e025b01c
|
[] |
no_license
|
EsraaQandel/ud036_StarterCode
|
72832f87c876691c3efe165c7d0998088770a9bb
|
72235f1653237391bdf63e263c97d6ee80e2389c
|
refs/heads/master
| 2021-07-06T02:40:13.258769
| 2017-09-28T20:11:57
| 2017-09-28T20:11:57
| 105,177,783
| 0
| 0
| null | 2017-09-28T17:20:08
| 2017-09-28T17:20:08
| null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
import webbrowser
class Movie():
""" A class the presents a movie with its title,storyline,image and a youtube trailer"""
def __init__ (self,movie_title, movie_storyline,poster_images,trailer_youtube):
self.title= movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_images
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
|
[
"noreply@github.com"
] |
EsraaQandel.noreply@github.com
|
a18876069899c3a874b97e8d190a3a3cc46169c8
|
069134fadf91de5c97a0360c924f4bbc3e885765
|
/manage.py
|
d6fa798186327a5bc28b63c4b76e6e671dad691b
|
[] |
no_license
|
sparkxgd/stu1803
|
be963fc0cd29c53171ea570e1e423d22bdbb665e
|
5c0637692fde1292f6611827737f0bd043b6e0d0
|
refs/heads/master
| 2023-01-14T15:42:34.435236
| 2020-11-18T07:23:25
| 2020-11-18T07:23:25
| 311,538,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stu1803.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"472036660@qq.com"
] |
472036660@qq.com
|
0d7eff129f2e264e1c25ff638064a0a4cba89488
|
6b6630afb6a00636f5a89f14a3f6c45d15b91536
|
/todo_dj/todo_dj/asgi.py
|
34ccdfe7db5c2e09fb49ce1a7983b0c66839492c
|
[] |
no_license
|
python0909/todoapp_dj
|
0d3ec3947c652e2e37cd4e168d70f52064c1cd3e
|
910c33cb5a07bbfde1c460e581e4df1943c43638
|
refs/heads/main
| 2023-07-09T06:30:58.965985
| 2021-08-04T12:37:03
| 2021-08-04T12:37:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
ASGI config for todo_dj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todo_dj.settings')
application = get_asgi_application()
|
[
"sainiboyms143@gmail.com"
] |
sainiboyms143@gmail.com
|
a39e8ae6b206d991cd0614e1fbc9302b6931c6c5
|
6fc7722c7c6ec3a33b294e812f0401c42b785059
|
/Camera Tilt Pan/Servo.py
|
76e43c070cb1141e3f97b2b4ff9b58de754520c4
|
[] |
no_license
|
bitoffabyte/Vr-Bot
|
cfd975e8142e262a3201e21fae6639c711fc7fda
|
2bbad78ec497adf8af9954b27a617a05f7c485ea
|
refs/heads/master
| 2022-11-25T00:26:41.503407
| 2020-07-29T22:06:00
| 2020-07-29T22:06:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
pwm = GPIO.PWM(7,50)
pwm.start(5)
pwm.ChangeDutyCycle(2)
def cfa(y):
return y/18 + 2
n = 0
while n!=200:
n=int(input('Entert the angle '))
pwm.ChangeDutyCycle(cfa(n))
|
[
"nr.rnarayan@gmail.com"
] |
nr.rnarayan@gmail.com
|
857a86a9d1aefc15857f4de3339d34b9a66cbe76
|
ccb9c57b6587060bb0715bcf5a5850a617f0526d
|
/code/client/make_0_1_map.py
|
125a9e2b992dbdef562f9d32899d02fc56e9f9b9
|
[] |
no_license
|
KodamaSakuno/uw2ol
|
fbbe7f0c8ad779f6665a5fb695fc2d845c0f5575
|
38818fba1c9af399077c130de99032e4b846a6cb
|
refs/heads/master
| 2023-08-28T07:13:37.271390
| 2021-11-02T16:51:25
| 2021-11-02T16:51:25
| 390,922,353
| 1
| 1
| null | 2021-07-30T03:52:42
| 2021-07-30T03:52:41
| null |
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
from map_maker import MapMaker
import numpy
import pickle
# add relative directory to python_path
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
# import from common(dir)
import constants as c
map_maker = MapMaker()
map_maker.set_world_piddle()
matrix = map_maker.world_map_piddle
matrix = matrix.astype('int')
size = 5
x = 960
y = 244
print(matrix[(y-size):(y+size), (x-size):(x+size)])
list_2d = matrix.tolist()
last_collumn_id = c.WORLD_MAP_COLUMNS - 1
last_row_id = c.WORLD_MAP_ROWS - 1
for collumn in range(0,c.WORLD_MAP_COLUMNS):
for row in range(0,c.WORLD_MAP_ROWS):
# last row or collumn
if collumn == last_collumn_id or row == last_row_id:
list_2d[row][collumn] = 0
# others
else:
# 4 tiles covering ship image
v = list_2d[row][collumn]
v_right = list_2d[row][(collumn+1)]
v_right_down = list_2d[(row + 1)][(collumn + 1)]
v_down = list_2d[(row + 1)][collumn]
# all 4 tiles must be sailable
can_sail_index = 1
for value in [v, v_right, v_right_down, v_down]:
if value in c.SAILABLE_TILES or (value >= 117 and value <= 120):
pass
else:
can_sail_index = 0
break
list_2d[row][collumn] = can_sail_index
new_matrix = numpy.array(list_2d)
pickle.dump(new_matrix, open('map_0_1_matrix', "wb"))
# print(new_matrix[(y-size):(y+size), (x-size):(x+size)])
|
[
"515441483@qq.com"
] |
515441483@qq.com
|
a5d672f7f6cc59caef2feca5f7485331ae4c796a
|
d215553c9da0edfbe849c8820e4806ec57f17f7e
|
/week-2/is-even.py
|
0483c3a2097f54460f1576958c9a72a0f5a04c2a
|
[] |
no_license
|
mikoada/CA116
|
4237ae8c7f0676999ad532e3e68bc4b57d9c541c
|
b9eb410591301430a4f77d3320ebc52e631e144a
|
refs/heads/master
| 2022-04-30T08:57:15.688065
| 2018-01-12T22:53:59
| 2018-01-12T22:53:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34
|
py
|
n = input()
print (n / 2) * 2 == n
|
[
"mixeradamski@gmail.com"
] |
mixeradamski@gmail.com
|
b99b28711ae0a414e635cb2d218084f25031f62f
|
d5844448e05b884c4ede2374797b42aaa9c4f22c
|
/utils.py
|
801dbd8024035a04f9d58ec5146e5d87b31b60cc
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] |
permissive
|
jb55/lightning-qt
|
41bf4abcba1b41e4e1d26c45562f5498d75761a9
|
34d6c990a74a5d6d8a10eb75a98d15a6c7eb1891
|
refs/heads/master
| 2021-05-22T21:22:08.279518
| 2019-12-24T17:31:41
| 2019-12-24T17:31:41
| 253,102,137
| 0
| 0
|
BSD-3-Clause-Clear
| 2020-04-04T21:28:55
| 2020-04-04T21:28:54
| null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
import os
import signal
import _thread
import threading
def exit_after(seconds):
"""Exits if the function takes longer than `seconds` to execute.
Actually it simulates a SIGHINT so it is quite adapted to RPC calls.
Taken and adapted from this very clever gist by aaronchall:
https://gist.github.com/aaronchall/6331661fe0185c30a0b4
"""
def outer(fn):
def inner(*args, **kwargs):
timer = threading.Timer(seconds, lambda _: os.kill(os.getpid(), signal.SIGINT),
args=[fn.__name__])
timer.start()
try:
result = fn(*args, **kwargs)
finally:
timer.cancel()
return result
return inner
return outer
def timeout_bool(seconds, fn, *args, **kwargs):
"""Convenient function that return False if function timed out, True otherwise.
"""
try:
exit_after(seconds)(fn)(*args, **kwargs)
except KeyboardInterrupt:
return False
return True
|
[
"darosior@protonmail.com"
] |
darosior@protonmail.com
|
df30b6ba3650907388e04031d1f8d41baf23208a
|
17aa83a6a1183557165d069fd3e99bef5fd5200e
|
/internalToEevee.py
|
48f62109d7ccaee5bdbd71e45c01e026628dfffa
|
[] |
no_license
|
stansu/blender_script
|
f0e4c4009c1a273bdb4dc53268830f240b97dbd6
|
f2a49508780f89d6b5cc62055f81a48d8c65edcf
|
refs/heads/master
| 2022-02-05T21:49:36.629749
| 2022-01-05T01:05:50
| 2022-01-05T01:05:50
| 31,686,823
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
import bpy
mats = bpy.data.materials
for m in mats:
m.use_nodes = True
mTree = m.node_tree
mNodes = mTree.nodes
mNodes.clear()
if not mNodes.get('Material Output'):
matOutput = mNodes.new("ShaderNodeOutputMaterial")
emitMat = mNodes.new("ShaderNodeEmission")
emitMat.inputs[0].default_value=m.diffuse_color
mTree.links.new(emitMat.outputs[0], matOutput.inputs[0])
|
[
"noreply@github.com"
] |
stansu.noreply@github.com
|
03f4d01b5685fdef317ea928949d4aec520f1672
|
2a122fef580d64605d9a7405e9a06175b6ac684b
|
/tests/test_skeleton.py
|
6e2b0294b71eb99926dbba5cd0d9b308bf37e1f6
|
[
"Apache-2.0"
] |
permissive
|
jaggernaut007/margipose-1
|
37688f235e028e912d37bcd3c36ff762783b2597
|
f0540aeb6dad40650b97b53835002f9616c38104
|
refs/heads/master
| 2022-08-23T03:45:48.617093
| 2019-05-09T03:18:34
| 2019-05-09T03:18:34
| 266,761,551
| 0
| 0
|
Apache-2.0
| 2020-05-25T11:28:08
| 2020-05-25T11:28:07
| null |
UTF-8
|
Python
| false
| false
| 2,391
|
py
|
import numpy as np
import torch
from torch.testing import assert_allclose
from margipose.data.skeleton import spherical_to_cartesian, cartesian_to_spherical, \
absolute_to_root_relative, absolute_to_parent_relative, parent_relative_to_absolute, \
CanonicalSkeletonDesc, canonicalise_orientation
def test_spherical_to_cartesian():
spherical = torch.Tensor([[4 * np.sqrt(3), np.deg2rad(30), np.deg2rad(60)]])
expected = torch.Tensor([[np.sqrt(3), 3, 6]])
actual = spherical_to_cartesian(spherical)
assert_allclose(actual, expected)
def test_cartesian_to_spherical():
cartesian = torch.Tensor([[np.sqrt(3), 3, 6]])
expected = torch.Tensor([[4 * np.sqrt(3), np.deg2rad(30), np.deg2rad(60)]])
actual = cartesian_to_spherical(cartesian)
assert_allclose(actual, expected)
def test_absolute_to_root_relative():
joints = torch.Tensor([
[1, 1, 1],
[1, 2, 1],
[1, 2, 2],
])
root_joint = 0
expected = torch.Tensor([
[0, 0, 0],
[0, 1, 0],
[0, 1, 1],
])
actual = absolute_to_root_relative(joints, root_joint)
assert_allclose(actual, expected)
def test_absolute_to_parent_relative():
joints = torch.Tensor([
[1, 1, 1],
[1, 2, 1],
[1, 2, 2],
])
joint_tree = [0, 0, 1]
expected = torch.Tensor([
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
])
actual = absolute_to_parent_relative(joints, joint_tree)
assert_allclose(actual, expected)
def test_parent_relative_to_absolute():
relative = torch.Tensor([
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
])
joint_tree = [0, 0, 1]
expected = torch.Tensor([
[0, 0, 0],
[0, 1, 0],
[0, 1, 1],
])
actual = parent_relative_to_absolute(relative, joint_tree)
assert_allclose(actual, expected)
def test_canonicalise_orientation(skeleton_canonical_univ):
skel_desc = CanonicalSkeletonDesc
new_skel = canonicalise_orientation(skel_desc, skeleton_canonical_univ)
pelvis = new_skel[skel_desc.joint_names.index('pelvis')]
lshoulder = new_skel[skel_desc.joint_names.index('left_shoulder')]
rshoulder = new_skel[skel_desc.joint_names.index('right_shoulder')]
assert_allclose(pelvis, torch.Tensor([0, 0, 0, 1]))
assert_allclose(lshoulder[2].item(), 0)
assert_allclose(rshoulder[2].item(), 0)
|
[
"fungalberto@gmail.com"
] |
fungalberto@gmail.com
|
094fbc3e03007c535e1a2c7772cfc4741c9e240b
|
2af520bec5efc61c0b65bbcf09e4ad3a747e6a63
|
/1004/main.py
|
605d07a0f383bb746b3dd016b61fa2a273caee54
|
[] |
no_license
|
Luning644182206/leetcode
|
87aa46b65737439156f52a071a81dd3479d24e12
|
63da89183f7309716d18f969481cf82155e679c4
|
refs/heads/master
| 2020-07-22T02:39:41.040141
| 2019-09-08T02:28:21
| 2019-09-08T02:28:21
| 207,050,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,124
|
py
|
# coding=-utf8
class Solution(object):
def longestOnes(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
var_hash = {
0: {
num:
},
1:
}
if __name__ == '__main__':
a = 'cababc'
b = Solution()
print(b.isValid(a))
class Solution {
public:
std::vector<int> psum;
int cal(int l, int r) {
if (!l) return psum[r];
else return psum[r] - psum[l-1];
}
int longestOnes(vector<int>& A, int K) {
if (A[0]) psum.push_back(0);
else psum.push_back(1);
for (int i = 1; i < A.size(); ++i) psum.push_back(psum.back() + 1-A[i]);
int ret = 0;
for (int i = 0; i < A.size(); ++i) {
int low = i, high = A.size()-1, ans = -1;
while (low <= high) {
int mid = (low + high) >> 1;
int cnt = cal(i, mid);
if (cnt <= K) {
ans = mid - i + 1;
low = mid + 1;
} else high = mid - 1;
}
ret = max(ret, ans);
}
return ret;
}
};
|
[
"luning04@baidu.com"
] |
luning04@baidu.com
|
dc9bc77e75ec86cb2ad265207209d03d37bf69a4
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/leetcode-cn/1929.0_Concatenation_of_Array.py
|
d8ab060fd5948df008b621e9dca0f8d6bf0d9362
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
'''
执行用时:36 ms, 在所有 Python3 提交中击败了38.78% 的用户
内存消耗:15.1 MB, 在所有 Python3 提交中击败了51.26% 的用户
'''
class Solution:
def getConcatenation(self, nums: List[int]) -> List[int]:
return nums + nums
'''
执行用时:36 ms, 在所有 Python3 提交中击败了38.78% 的用户
内存消耗:15.1 MB, 在所有 Python3 提交中击败了47.15% 的用户
'''
class Solution:
def getConcatenation(self, nums: List[int]) -> List[int]:
return nums * 2
|
[
"838255715@qq.com"
] |
838255715@qq.com
|
83aa2286d7ceded9df2768dbe64446908408c2eb
|
d8ec4e5f59291a0c2fb5058177d631d06af1af4e
|
/__init__.py
|
3639adbeca82ca6f598acd7acde760ed46f5b4f9
|
[] |
no_license
|
kevin808/wfz_academy
|
928b96f56ab50333416d120556643533d7074847
|
54b5abd6e592386307a9cc3dfa75e9d59ca9d8ee
|
refs/heads/master
| 2020-06-07T12:15:11.472817
| 2019-06-28T07:14:53
| 2019-06-28T07:14:53
| 193,020,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
from . import controllers
from . import models
from . import person
from . import wizard
|
[
"kevin@Kevins-MBP.lan"
] |
kevin@Kevins-MBP.lan
|
3668163b33ba19dd7eff00d702f7712c5fd93349
|
8a41a7f9340cfa784cb36d35dca1ecb1630e4097
|
/Programming/Python/Databases/mongodb_practice/mongodb_with_docker_container_class_based.py
|
2b5256a980b7d9de036f2423af2cae13cf65bfc6
|
[] |
no_license
|
anishst/Learn
|
02e6b6cce43cf21621d328ef0fc25168267a9a3d
|
a1aed8b78b19acdb23e20be57b67fb242e0aefc5
|
refs/heads/master
| 2022-05-13T10:17:40.293640
| 2022-03-30T12:44:21
| 2022-03-30T12:44:21
| 173,595,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,266
|
py
|
# UNDER DEV NOT FULLY WORKING
import uuid
import pymongo
class Database(object):
URI = "mongodb://192.168.1.50:27017"
DATABASE = None
@staticmethod
def initialize():
client = pymongo.MongoClient(Database.URI)
Database.DATABASE = client['items_test']
@staticmethod
def insert(collection, data):
Database.DATABASE[collection].insert(data)
@staticmethod
def find(collection, query):
return Database.DATABASE[collection].find(query)
@staticmethod
def find_one(collection, query):
return Database.DATABASE[collection].find_one(query)
@staticmethod
def update(collection, query, data):
Database.DATABASE[collection].update(query, data, upsert=True)
@staticmethod
def remove(collection, query):
return Database.DATABASE[collection].remove(query)
class Items:
def __init__(self, store, url, desc, target_price, _id=None):
self._id = uuid.uuid4().hex if _id is None else _id
self.store = store
self.url = url
self.desc = desc
self.target_price = target_price
def __repr__(self):
return "<Item {} with URL {}>".format(self.store, self.url)
def save_to_mongo(self):
Database.update("items_test", {'_id': self._id}, self.json())
def json(self):
return {
"_id": self._id,
"name": self.store,
"url": self.url,
"desc": self.desc,
"target_price": self.target_price
}
def delete(self):
Database.remove('items_test', {'_id': self._id})
@staticmethod
def get_all_items():
return [elem for elem in Database.find('items_test', {})]
@staticmethod
def get_by_id(id):
return Database.find_one('items_test', {"_id": id})
Database.initialize()
# add new item
# new_item = Items('amazon', 'url', 'desc1', '30')
# new_item.save_to_mongo()
# print(len(new_item.get_all_items()))
all_items = Database.find('items_test',{})
for item in all_items:
print(item["_id"])
print(item["name"])
print(item["url"])
# get by id
print(Items.get_by_id('67913520e1af4ca2b0ed7f9abb5b5019'))
# delete item
Items.delete()
# total count
print(len(Items.get_all_items()))
|
[
"anishst@hotmail.com"
] |
anishst@hotmail.com
|
d9bd6eeef9a6a4e5b9aecea2a28bb6bd45001a4b
|
fe62edbc1914e7d40c5b7a0f1004d48c2a13ae82
|
/Euler_047.py
|
74c2249839d0c9ee3d3cb24fb69542dcec0383d3
|
[] |
no_license
|
cavandervoort/Project-Euler-001-to-100
|
ef854469adc36f0596803aa7cd1b36297c94d595
|
6caa8f98c100954b40e10502011d5ad1e5b08a54
|
refs/heads/main
| 2023-04-25T09:56:11.109190
| 2021-05-19T20:19:46
| 2021-05-19T20:19:46
| 366,872,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
# Problem 47
# Distinct primes factors
primes = [2]
int_list = [-8,-6,-4,-2]
for num in range(3,1000000):
num_check = num
is_prime = True
count_distinct = 0
for prime in primes:
if num_check % prime == 0:
is_prime = False
count_distinct += 1
while num_check % prime == 0:
num_check /= prime
if count_distinct > 4:
break
if prime > num_check ** 0.5:
if num_check > 1:
count_distinct += 1
break
if count_distinct == 4:
int_list.append(num)
if int_list[-1] - int_list[-4] == 3:
print(f'The con-nums are {int_list[-1]}, {int_list[-2]}, {int_list[-3]}, and {int_list[-4]}.')
break
elif is_prime == True:
primes.append(num)
|
[
"61097283+cavandervoort@users.noreply.github.com"
] |
61097283+cavandervoort@users.noreply.github.com
|
385d4ab16bbc106afda4775ca7680ffe0f015eda
|
a6d224f77793fab5cb84b1dca189cd3524a90eff
|
/easycalc.py
|
adf14d08142568fd8b10210f3382b040486c1b9f
|
[] |
no_license
|
hmlinux/python-easycalc
|
05f1be6c3cce0a191cf1565bd070f20f54c25336
|
a8a48e9fb2e18913e5173e6cfc6bf0e4da2706ef
|
refs/heads/master
| 2020-03-14T02:06:33.273181
| 2018-04-28T08:59:35
| 2018-04-28T08:59:35
| 131,392,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,506
|
py
|
#/usr/bin/env python3
# _*_ coding: utf-8 _*_
import re
def matching_formula_value(num):
#判断输入公式是否合法,如包含字符或者特殊字符,做特殊处理
try:
num = float(num)
return True
except (ValueError,TypeError) as diag:
pass
def formatting_formula(formula): #去除计算公式中多余的"+-"号
formula = formula.replace("++", "+")
formula = formula.replace("+-", "-")
formula = formula.replace("-+", "-")
formula = formula.replace("--", "+")
return formula
def matching_plusminus_operator_and_multiplydivide_expression_sets(parenthesises_formula): #匹配加减操作符和加减表达式列表
# 取出圆括号表达式中所有的"+-"号,保存为列表形式,如['-', '+', '+']
# 用"+-"号作为分隔符,将圆括号中的乘除表达式取出,保存为列表形式,如['9', '2*5/3', '7/3*99/4*2998', '10*568/14']
parenthesises_formula = re.sub("[()]", "", parenthesises_formula)
plusminus_operator_list = re.findall("[+-]", parenthesises_formula)
plusminus_expression_list = re.split("[+-]", parenthesises_formula)
if plusminus_expression_list[0] == "":
#圆括号表达式列表中,如果第一个元素为空,则表明第一个元素为一个负数,则"-"号开头,将第一个"-"号合并到列表第一个元素
plusminus_expression_list[1] = plusminus_operator_list[0] + plusminus_expression_list[1]
del plusminus_expression_list[0]
del plusminus_operator_list[0]
for i, e in enumerate(plusminus_expression_list):
#处理乘除表达式中的第二个数是负数的情况,如 1 * -1, 1 * -2 + 3 * -5 - 6/-3表达式,第一步匹配是这样"['1 * ', '2 ', ' 3 * ', '5 ', ' 6/', '3']"
#在这一步需要处理成正确的结果:['1 * -2', '3 * -5', '6/-3']
e = e.strip()
if e.endswith("*") or e.endswith("/"):
try:
plusminus_expression_list[i] = plusminus_expression_list[i] + plusminus_operator_list[i] + plusminus_expression_list[i + 1]
del plusminus_expression_list[i + 1]
del plusminus_operator_list[i]
except IndexError as diag:
pass
return plusminus_operator_list,plusminus_expression_list
def matching_multiply_divide_operator_and_expression_sets(plusminus_equations): #匹配乘除操作符和乘除表达式列表
operator_list = re.findall("[*/]", plusminus_equations)
value_list = re.split("[*/]", plusminus_equations)
return operator_list,value_list
def plus_minus_calc(plusminus_operator_list,plusminus_expression_list): #加减运算
'''对运算公式进行加减运算,返回加减结果'''
plusminus_result = None
for i, e in enumerate(plusminus_expression_list):
match = matching_formula_value(e)
if match == True:
if plusminus_result:
if plusminus_operator_list[i - 1] == "+":
plusminus_result += float(e)
elif plusminus_operator_list[i - 1] == "-":
plusminus_result -= float(e)
else:
plusminus_result = float(e)
else:
print("\33[33;0m输入的公式中包含非数字字符!\33[0m")
print("\33[33;0m尝试运算: %s\33[0m" % e)
e = re.sub("\D", "", e)
if e == "": e = 0
if plusminus_result:
if plusminus_operator_list[i - 1] == "+":
plusminus_result += float(e)
elif plusminus_operator_list[i - 1] == "-":
plusminus_result -= float(e)
else:
try:
plusminus_result = float(e)
except ValueError as diag:
print("\33[33;1m无效输入!\33[0m")
return plusminus_result
def multiply_divide_calc(multiply_divide_operator_list,multiply_divide_value_list): #乘除运算
'''对运算公式进行乘除运算,返回乘除结果'''
multiply_divide_result = None
for i, num in enumerate(multiply_divide_value_list):
match = matching_formula_value(num)
if match == True:
if multiply_divide_result:
if multiply_divide_operator_list[i - 1] == "*":
multiply_divide_result *= float(num)
elif multiply_divide_operator_list[i - 1] == "/":
try:
multiply_divide_result /= float(num)
except ZeroDivisionError as diag:
multiply_divide_result = 0
print("\33[33;0m输入的公式中存在除数为0,重新输入!\33[0m")
else:
multiply_divide_result = float(num)
else:
print("\33[33;0m输入的公式中包含非数字字符!\33[0m")
print("\33[33;0m尝试运算: %s\33[0m" % num)
num = re.sub("\D", "", num)
if num == "": num = 1
if multiply_divide_result:
if multiply_divide_operator_list[i - 1] == "*":
multiply_divide_result *= float(num)
elif multiply_divide_operator_list[i - 1] == "/":
multiply_divide_result /= float(num)
else:
try:
multiply_divide_result = float(num)
except ValueError as diag:
print("\33[33;1m无效输入!\33[0m")
return multiply_divide_result
def calculating_priority_formulas(priority_formula): #计算圆括号表达式
""""""
plusminus_operator_list, plusminus_expression_list = matching_plusminus_operator_and_multiplydivide_expression_sets(priority_formula)
print("-----------")
print(plusminus_operator_list, plusminus_expression_list)
for index, equations in enumerate(plusminus_expression_list):
if "*" in equations or "/" in equations:
""""""
multiply_divide_operator_list, multiply_divide_value_list = matching_multiply_divide_operator_and_expression_sets(equations)
multiply_divide_result = multiply_divide_calc(multiply_divide_operator_list, multiply_divide_value_list) #取出乘除表达式进行乘除运算
plusminus_expression_list[index] = multiply_divide_result
plus_minus_result = plus_minus_calc(plusminus_operator_list, plusminus_expression_list) #将乘除的结果进行加减运算
print("%s 运算结果: %s" % (priority_formula, plus_minus_result))
return plus_minus_result
def start_mathematical_operations(formula):
""" 运算程序入口,对输入的数学公式进行处理,匹配最底层圆括号表达式,并交给乘除函数计算返回结果,替换圆括号表达式"""
formula = formula.replace(" ", "") #去掉表达式多余的空格
formula = formatting_formula(formula) #去掉表达式里重复的"+-"号
print(formula)
parenthesises_flag = True
while parenthesises_flag:
formula = formatting_formula(formula)
parenthesis_formula = re.search(r"\(([^()]+)\)", formula)
if parenthesis_formula:
parenthesis_formula = parenthesis_formula.group()
parenthesis_calc_result = calculating_priority_formulas(parenthesis_formula)
formula = formula.replace(parenthesis_formula, str(parenthesis_calc_result))
print("parenthesis_calc_result: %s" % formula)
else:
calc_result = calculating_priority_formulas(formula)
parenthesises_flag = False
print("最后的运算结果: \33[31;1m%s\33[0m" % calc_result)
def myCalcMain():
prompt = '''Welcome to the MyCalc monitor.
Server Version: MyCalc 1.0
请输入你的计算公式, 计算器会将计算结果输出到屏幕上; 退出(exit/quit)
示例公式: 1 - 2 * ( (60-30 +(-40/5) * (9-2*5/3 + 7 /3*99/4*2998 +10 * 568/14 )) - (-4*3)/ (16-3*2) )
正确结果: 2776672.6952380952380952380952381
'''
print(prompt)
while True:
formula = input("MyCalc> ").strip()
if formula == "exit" or formula == "quit":
exit("Bye.")
elif formula == "":
continue
else:
start_mathematical_operations(formula)
if __name__ == '__main__':
myCalcMain()
|
[
"741616710@qq.com"
] |
741616710@qq.com
|
0f025f1ff3bbc046a84647844b691ff948daa0f2
|
408f8bd2776200443867b92cc94c1f74fa2dea3d
|
/tutorial/overset/mesh/run_pyhyp.py
|
d847d5a27c95cea5d8cbc4bf9bc665290cd031b6
|
[] |
no_license
|
mdolab/MACH-Aero
|
58cb966da6b816597ba3f3d93c865fb0598a02b0
|
bb96a16fa7cb1889293a6b51fe20becefd24f74e
|
refs/heads/main
| 2023-09-03T23:32:11.923582
| 2023-08-22T20:21:51
| 2023-08-22T20:21:51
| 211,174,339
| 85
| 56
| null | 2023-09-12T17:32:18
| 2019-09-26T20:18:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,456
|
py
|
# ======================================================================
# Import modules
# ======================================================================
# rst Imports (beg)
from collections import OrderedDict
from mpi4py import MPI
from pyhyp import pyHypMulti
from pyhyp.utils import simpleOCart
from cgnsutilities.cgnsutilities import readGrid, combineGrids
import argparse
# rst Imports (end)
# ======================================================================
# Init stuff
# ======================================================================
# rst Init (beg)
rank = MPI.COMM_WORLD.rank
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", default=".")
parser.add_argument("--output_dir", default=".")
parser.add_argument("--level", default="L1")
args = parser.parse_args()
# rst Init (end)
# ======================================================================
# Specify parameters for extrusion
# ======================================================================
# rst parameters (beg)
# Near-Field
# reference first off wall spacing for L2 level meshes
s0 = 1.4e-7
# number of Levels in the near-Field
nNearfield = {"L3": 31, "L2": 61, "L1": 121}[args.level]
# Farfield
# background mesh spacing
dhStar = {"L3": 0.178, "L2": 0.09, "L1": 0.045}[args.level]
nFarfield = {"L3": 13, "L2": 25, "L1": 49}[args.level]
# General
# factor for spacings
fact = {"L3": 1.0, "L2": 2.0, "L1": 4.0}[args.level]
# levels of coarsening for the surface meshes
coarsen = {"L1": 1, "L2": 2, "L3": 3}[args.level]
# rst parameters (end)
# ======================================================================
# Common PyHyp options
# ======================================================================
# rst common_options (beg)
commonOptions = {
# ---------------------------
# Input Parameters
# ---------------------------
"unattachedEdgesAreSymmetry": False,
"outerFaceBC": "overset",
"autoConnect": True,
"fileType": "CGNS",
# ---------------------------
# Grid Parameters
# ---------------------------
"N": nNearfield,
"s0": s0 / fact,
"marchDist": 2.5 * 0.8,
"coarsen": coarsen,
"nConstantEnd": 2,
# ---------------------------
# Pseudo Grid Parameters
# ---------------------------
"ps0": -1.0,
"pGridRatio": -1.0,
"cMax": 1.0,
# ---------------------------
# Smoothing parameters
# ---------------------------
"epsE": 1.0,
"epsI": 2.0,
"theta": 1.0,
"volCoef": 0.5,
"volBlend": 0.00001,
"volSmoothIter": int(100 * fact),
}
# rst common_options (end)
# ======================================================================
# Individual PyHyp options
# ======================================================================
# rst individual_options (beg)
# wing options
wing_dict = {
"inputFile": "%s/near_wing.cgns" % (args.input_dir),
"outputFile": "%s/near_wing_vol_%s.cgns" % (args.output_dir, args.level),
"BC": {1: {"iLow": "ySymm"}, 2: {"iLow": "ySymm"}, 3: {"iLow": "ySymm"}},
"families": "near_wing",
}
# tip options
tip_dict = {
"inputFile": "%s/near_tip.cgns" % (args.input_dir),
"outputFile": "%s/near_tip_vol_%s.cgns" % (args.output_dir, args.level),
"families": "near_tip",
"splay": 0.0,
}
# rst individual_options (end)
# ======================================================================
# Generate Near-Field
# ======================================================================
# rst near_field (beg)
# figure out what grids we will generate again
options = OrderedDict()
options["wing"] = wing_dict
options["tip"] = tip_dict
# Run pyHypMulti
hyp = pyHypMulti(options=options, commonOptions=commonOptions)
MPI.COMM_WORLD.barrier()
# rst near_field (end)
# ======================================================================
# Combine Near-Field
# ======================================================================
# rst combine_near_field (beg)
# read the grids
wing = "%s/near_wing_vol_%s.cgns" % (args.output_dir, args.level)
tip = "%s/near_tip_vol_%s.cgns" % (args.output_dir, args.level)
wingGrid = readGrid(wing)
tipGrid = readGrid(tip)
gridList = [wingGrid, tipGrid]
# combine grids
combinedGrid = combineGrids(gridList)
# move to y=0
combinedGrid.symmZero("y")
# Write nearfield mesh
nearfield = "%s/near_%s.cgns" % (args.output_dir, args.level)
if rank == 0:
combinedGrid.writeToCGNS(nearfield)
MPI.COMM_WORLD.barrier()
# rst combine_near_field (end)
# ======================================================================
# Generate Far-Field
# ======================================================================
# rst far_field (beg)
farfield = "%s/far_%s.cgns" % (args.output_dir, args.level)
simpleOCart(nearfield, dhStar, 40.0, nFarfield, "y", 1, farfield)
# rst far_field (end)
# ======================================================================
# Combine all Grids
# ======================================================================
# rst combine (beg)
# we can do the stuff in one proc after this point
if rank == 0:
# read the grids
farfieldGrid = readGrid(farfield)
gridList.append(farfieldGrid)
finalGrid = combineGrids(gridList)
# write the final file
finalGrid.writeToCGNS("%s/ONERA_M6_%s.cgns" % (args.output_dir, args.level))
# rst combine (end)
|
[
"noreply@github.com"
] |
mdolab.noreply@github.com
|
dc4203b08a720fc5f960f3746692110481e6e020
|
bdfa087d82fc8d760d2adce82c95b6e47dba674a
|
/Argparse/arg10.py
|
4e962671da87a755c7c9a909ef57e557b13c3afc
|
[] |
no_license
|
UppaLouva/python-library
|
74f03718a0bb8ad2957cb371b9cfb98d45c7129d
|
9031ddd3ead01d64dc0f33ef7ceeb85138a8bc94
|
refs/heads/master
| 2022-12-18T03:13:56.623621
| 2020-09-23T18:55:17
| 2020-09-23T18:55:17
| 298,060,941
| 0
| 0
| null | 2020-09-23T18:45:12
| 2020-09-23T18:33:48
| null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', metavar='in-file', type=argparse.FileType('rt'))
parser.add_argument('-o', metavar='out-file', type=argparse.FileType('wt'))
try:
results=parser.parse_args()
print 'Input file:', results.i
print'Output file:', results.o
except IOError, msg:
parser.error(str(msg))
|
[
"noreply@github.com"
] |
UppaLouva.noreply@github.com
|
54025d4983956a4b8d617a830a51fb747d21b130
|
15a86fc7c53ec19d62cc6f12bcd411b6e87db960
|
/IT Essentials/IT-essentials Oefeningen/H4_Iteraties/Oef4.5.py
|
cc515d417d61b5985909bf0b1acafa963328f7bb
|
[] |
no_license
|
MiloszBoghe/School-Y1-
|
2fc6733e725714d76c1b2967baa2bb70ad89c69d
|
2715ee13bf46ff7ca9b5d079fe5bdb6b77d2b8af
|
refs/heads/master
| 2022-05-14T10:10:55.471842
| 2020-02-17T19:52:55
| 2020-02-17T19:52:55
| 241,193,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
getal = int(input("geef een getal: "))
while getal <= 1 or getal >= 100:
if(getal >= 100):
print("Fout! Het getal moet kleiner zijn dan 100")
else:
print("Fout! Het getal moet groter dan 1 zijn")
getal = int(input("geef een getal: "))
print("Het getal is: ", getal)
|
[
"11800460@student.pxl.be"
] |
11800460@student.pxl.be
|
a84668d5b3ffbba89c08659fdd78fd2de45b1132
|
a495475386da0d019504ddbcb1880dc3443ac83d
|
/savedata.py
|
430ecc5cdc4d55ad074bec5f73610080678d4d0b
|
[] |
no_license
|
ninedotnine/funtimes
|
d5b03ea35e1d54b4412230ea21a957f94f497cff
|
b8c6e2f37498f557c869c16200909053de31077a
|
refs/heads/master
| 2021-01-13T02:23:04.269799
| 2013-06-29T20:29:52
| 2013-06-29T20:29:52
| 10,260,672
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
# savedata.py
# keeps track of the save profile
from settings import datadir
def populateDictionary(dictionary):
try:
with open(datadir + dictionary + '.dat', 'r') as fp:
dictionary = {}
for line in fp:
if line.strip().startswith('#') or line.strip() == '':
continue
dictionary[line.strip()] = False
except FileNotFoundError:
print("\ncould not find '" + dictionary + "' in data directory\n")
raise SystemExit # should be safe since this runs before anything else
return dictionary
profile = {
'firstname' : "Default",
'lastname': "Namington",
'gender' : "boy",
'friendname' : "Ron",
'girlname' : "Katie",
'weet' : 0,
'posts' : 0,
'money' : 0,
'love' : 0,
'flash' : 0,
'sexy' : 0,
'energy' : 15,
# general-purpose garbage variable
'progress' : 0,
# stats
'strongth' : 10,
'dexterity' : 10,
'charisma' : 10,
'intellect' : 10,
'predicament' : 'tutorial',
'latestPredmap' : 'none',
'latestMapname' : 'none',
}
# moving certain settings here so they can be saved and loaded
# also negating them because that's consistent with the other dictionaries
prefs = populateDictionary('prefs')
items = populateDictionary('items')
quests = populateDictionary('quests')
savedata = (profile, items, quests)
|
[
"brianna@tassaron.com"
] |
brianna@tassaron.com
|
61d9a4aaf739dcc9eb9d67ff61a87ee112ed5f7d
|
0bd3aa75bbd55e4e04262e14a9a3f6e2d5ff6739
|
/neko.py
|
091fec581ac4db4221f7467e4d66ccc1ff896fa9
|
[] |
no_license
|
chagama-g/RandomASCIIArt
|
a5edf222cbf77e3de1c3f520c41e6b1453d4af6f
|
47bd0e27ce381d966217dbcecef16f9e888a86fd
|
refs/heads/master
| 2023-03-06T14:17:53.192353
| 2021-02-13T03:25:25
| 2021-02-13T03:25:25
| 338,485,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,868
|
py
|
import random
# original: https://2ch-aa.blogspot.com/2017/10/1010.html
neko = []
neko.append("""
,-、 ,.-、
./:::::\ /::::::ヽ
/::::::::::::;ゝ--──-- 、._/::::::::::::::|
/,.-‐''"´ \:::::::::::|
/ ヽ、::::|
/ ● ヽ|
l , , , ● l
.| (_人__丿 、、、 |
l l
` 、 /
`ー 、__ /
/`'''ー‐‐──‐‐‐┬'''""´ """)
neko.append(
""" (・ω・´)⌒ゝ
とと二~⌒つ ~
 ̄
━━━━
""")
neko.append(
""" ∧ ∧
(_(,, ・∀・) 高猫♪
⊆__つつ
彡
"""
)
neko.append("""ミ
∧ ∧
)_(,, 'A`) 安猫
⊆__つつ""")
neko.append(""" ∧,,∧
(=・ω・) 安猫にゃん
(,, uuノ""")
neko.append(""" | 彡⌒ミ
\ (・ω・` ):: ふさふさだね…
(| |)::::
(γ /:::::::
∧_∧ し \:::
.ミ,,・_・ミ \
ヾ(,_uuノ
∧_∧
| ミ・_・,,ミ
| (uu._)~
\ (・ω・` ):: あ…ありがとう
(| |)::::
(γ /:::::::
し \:::
\
""")
neko.append(""" |\_/|
|― ― |
∧_∧_ノ___//
(・ω・ ) /
O旦⊂| _ ヽ
OOノ_/」/_/\」 ))))""")
neko.append("""
わんわんお ./\___/ヽ
, , - ー -、 / |||| \
, -'l´ ' 、 / ヽ
/ l /ヽ /ヽ ヽ l /ヽ /ヽ l
l l⊂⊃ ⊂⊃ | 三 三 | にゃんにゃんお
l l (__人__) ,' '、 (__人__) /
ヽ__/,,,,, ,,,,,,,,ノ `;,,,,,,,, ,,,,,,,,'
/ ,、,,)) ((_,,、 ゙ヽ / ,、,,)) ((_,,、 ゙ヽ
 ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄""")
neko.append(""" 彡⌒ミ
('(゚∀゚*)∩ おもいよ!
/⌒⌒⌒⌒⌒ヽ
/ ハ_ハ ♪... / おやすみー♪
/ (,,・д・),,)~/
(_____ _ノ
彡⌒ミ
(ーωー ) ZZZ・・・
/⌒⌒⌒⌒∪ヽ
/ ハ_ハ zzz... /
/ (,,-д-),,)~/
(_____ _ノ""")
neko.append(""" ∧,,∧
(,,・∀・)ニャー
~(_u,uノ
彡 ⌒ ミ
∩´・ω・`∩
ヽ ノ かわいい
| | |
(__)_)""")
neko.append(""" 彡⌒ ミ
(・ω・´)⌒ゝ
とと二~⌒つ ~
 ̄
━━━
| ☆
|〃
|彡⌒ ミ
|(>ω<´)⌒ゝ
|とと二~⌒つ ~
|  ̄
|
━━━""")
neko.append("""
∧,,∧
(,,・∀・)
~(_u,uノ""")
neko.append("""
,-、 ,.-、
./:::::\ /::::::ヽ
/::::::::::::;ゝ--──-- 、._/::::::::::::::|
/,.-‐''"´ \:::::::::::|
/ ヽ、::::|
/ ヽ|
l l
. | ● |
l , , , ● l
` 、 (__人__丿 、、、 /
`ー 、__ /
/`'''ー‐‐──‐‐‐┬'''""´
/, |
(_/ | |
, ヽ、_) ∩
l |\ ノ |
. | ヘ |`ヽ二 ノ
. | / \ /
`ー‐' `ー‐'""")
if __name__ == "__main__":
index = int(random.uniform(0, len(neko)))
print(neko[index])
|
[
"55901504+chagama-g@users.noreply.github.com"
] |
55901504+chagama-g@users.noreply.github.com
|
0453ce5cd997d1dcb2f655bdd06348d9781bf042
|
5bffff249e8d264a0e0361cdef4c0196c1c5c352
|
/ffnn.py
|
deecb1b949f17de5be11d77e9ecdabf35c524829
|
[] |
no_license
|
Fer0xIsLit/school-project-thingy
|
00791543671bfd492234e105e19ed1eef14b1f70
|
9f37867b8e1715f4693aa734fb1a5feb5cf82b9e
|
refs/heads/master
| 2020-09-14T13:48:09.076098
| 2019-11-21T10:21:32
| 2019-11-21T10:21:32
| 223,146,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,907
|
py
|
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
s = 'cuda:0' if torch.cuda.is_available() else 'cpu'
device = torch.device(s)
print(s)
fw = open('images', 'rb')
train_img = fw.read()
fw.close()
fw = open('labels', 'rb')
train_lab = fw.read()
fw.close()
fw = open('test images', 'rb')
test_img = fw.read()
fw.close()
fw = open('test labels', 'rb')
test_lab = fw.read()
fw.close()
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc0 = nn.Linear(784, 18)
self.fc1 = nn.Linear(18, 18)
self.fc2 = nn.Linear(18, 10)
def forward(self, x):
x = torch.sigmoid(self.fc0(x))
x = torch.sigmoid(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
return x
def bytes_to_int(byte):
s = ''
for b in byte:
m = hex(b).split('x')[1]
if len(m) == 1:
m = '0'+m
s += m
return int(s, 16)
def get_image_test(i):
img = np.array([j for j in test_img[16+784*i:16+784*(i+1)]]).reshape(28, 28) / 255.0
lab = test_lab[8+i]
return img, lab
def get_image(i):
img = np.array([j for j in train_img[16+784*i:16+784*(i+1)]]).reshape(28, 28) / 255.0
lab = train_lab[8+i]
return img, lab
def make_training_data():
X, y = [], []
for i in tqdm(range(60000)):
img = get_image(i)
X.append(img[0])
y.append(np.eye(10)[img[1]])
return torch.tensor(X), torch.tensor(y)
# return torch.tensor(np.array(X)), torch.tensor(np.array(y))
def make_test_data():
X, y = [], []
for i in tqdm(range(10000)):
img = get_image_test(i)
X.append(torch.tensor(img[0]))
y.append(torch.tensor(np.eye(10)[img[1]]))
return X, y
#return torch.Tensor(X), torch.Tensor(y)
def train(net, epochs=3, batch_size=10):
optimizer = optim.Adam(net.parameters(), lr=1e-2)
loss_function = nn.MSELoss()
for epoch in range(epochs):
for i in tqdm(range(0, len(train_x), batch_size)):
x = train_x[i:i+batch_size].float().to(device)
y = train_y[i:i+batch_size].float().to(device)
net.zero_grad()
outputs = net(x.view(-1, 784))
loss = loss_function(outputs, y)
loss.backward()
optimizer.step()
print(loss)
def test(net):
correct = 0
total = 0
with torch.no_grad():
for i, img in tqdm(list(enumerate(test_x))):
guess = torch.argmax(net(img.view(1, 784).float().to(device)))
if guess == torch.argmax(test_y[i].to(device)):
correct += 1
total += 1
return correct/total
|
[
"noreply@github.com"
] |
Fer0xIsLit.noreply@github.com
|
49f9db31b951b1dbfa1be4501372698254be6474
|
1922a719746ec5dc77478477c2af6acd5197f686
|
/faceapi/views.py
|
3434ee892586d10e223c2fd424ddf5b31df7a9ed
|
[] |
no_license
|
Velezer/facewebapi
|
ef0e6865988094d3d18043abc0fcbb2e27ab4e8c
|
931a70fb195f83947c2853d76a8b5ac4aa334ff7
|
refs/heads/main
| 2023-06-10T22:16:20.096082
| 2021-06-26T09:16:44
| 2021-06-26T09:16:44
| 375,943,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,818
|
py
|
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse, HttpRequest
from .logic import *
import time
import asyncio
# Create your views here.
def index(request):
template = 'faceapi/index.html'
context = {}
pickling_images()
return render(request, template, context)
async def upload(request):
'''http://localhost:8000/faceapi/upload?name={Person_Name}&img={filename.jpg}'''
img = request.GET['img']
name = request.GET['name']
try:
filename = await download_image(img, name)
except Exception:
response = JsonResponse({
'status': 'error',
'data': {'name': name, 'img': img},
'message': 'Upload failed. Maybe you uploaded a non image file.'
})
response.status_code = 400
return response
compress_img(filename, size=(400, 400), quality=40)
images = list_server_images(excludes=['test'])
try:
pickling_images(images)
except Exception:
delete_image(name)
response = JsonResponse({
'status': 'error',
'data': {'name': name, 'img': img},
'message': "Can't pickle the image"
})
response.status_code = 400
return response
return JsonResponse({
'status': 'success',
'data': {'name': name, 'img': img},
'message': 'Image uploaded in server'
})
async def compare(request):
'''http://localhost:8000/faceapi/compare?excludes={Person}&excludes={Person}&img={filename.jpg}'''
start_time = time.perf_counter()
img = request.GET['img']
excludes = ['test']
try:
for exclude in request.GET.getlist('excludes'):
excludes.append(exclude)
except:
pass
server_images = list_server_images(excludes=excludes)
results = await asyncio.gather(download_image(img, 'test.jpg'), get_pickled_images(server_images))
try:
compress_img(results[0], size=(200, 200), quality=24)
except Exception as e:
print(e)
response = JsonResponse({
'status': 'error',
'message': "Maybe the image file is corrupt or the server can't download that"
})
response.status_code = 400
return response
test_img = encode_faces(results[0])
if len(test_img) == 0:
response = JsonResponse({
'status': 'error',
'message': 'No face detected.'
})
response.status_code = 400
return response
encoded_faces = results[1]
data = classify_face(test_img, encoded_faces)
total = time.perf_counter() - start_time
return JsonResponse({
'status': 'success',
'data': data,
'excludes': excludes[1:],
'response_time': total
})
|
[
"asvelezer@gmail.com"
] |
asvelezer@gmail.com
|
c389303e3146bc35ff821cb1d46e512bb30de237
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AnttechMorseMarketingSrtaConsultModel.py
|
ed15c1198c9962d4b975315a6f97b110b2a9d905
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,452
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AnttechMorseMarketingSrtaConsultModel(object):
def __init__(self):
self._anonymous_mobile_sha_256_list = None
self._blind_mobile_sha_256 = None
self._extend_params = None
self._order_amount = None
self._resource_id = None
@property
def anonymous_mobile_sha_256_list(self):
return self._anonymous_mobile_sha_256_list
@anonymous_mobile_sha_256_list.setter
def anonymous_mobile_sha_256_list(self, value):
self._anonymous_mobile_sha_256_list = value
@property
def blind_mobile_sha_256(self):
return self._blind_mobile_sha_256
@blind_mobile_sha_256.setter
def blind_mobile_sha_256(self, value):
self._blind_mobile_sha_256 = value
@property
def extend_params(self):
return self._extend_params
@extend_params.setter
def extend_params(self, value):
self._extend_params = value
@property
def order_amount(self):
return self._order_amount
@order_amount.setter
def order_amount(self, value):
self._order_amount = value
@property
def resource_id(self):
return self._resource_id
@resource_id.setter
def resource_id(self, value):
self._resource_id = value
def to_alipay_dict(self):
params = dict()
if self.anonymous_mobile_sha_256_list:
if hasattr(self.anonymous_mobile_sha_256_list, 'to_alipay_dict'):
params['anonymous_mobile_sha_256_list'] = self.anonymous_mobile_sha_256_list.to_alipay_dict()
else:
params['anonymous_mobile_sha_256_list'] = self.anonymous_mobile_sha_256_list
if self.blind_mobile_sha_256:
if hasattr(self.blind_mobile_sha_256, 'to_alipay_dict'):
params['blind_mobile_sha_256'] = self.blind_mobile_sha_256.to_alipay_dict()
else:
params['blind_mobile_sha_256'] = self.blind_mobile_sha_256
if self.extend_params:
if hasattr(self.extend_params, 'to_alipay_dict'):
params['extend_params'] = self.extend_params.to_alipay_dict()
else:
params['extend_params'] = self.extend_params
if self.order_amount:
if hasattr(self.order_amount, 'to_alipay_dict'):
params['order_amount'] = self.order_amount.to_alipay_dict()
else:
params['order_amount'] = self.order_amount
if self.resource_id:
if hasattr(self.resource_id, 'to_alipay_dict'):
params['resource_id'] = self.resource_id.to_alipay_dict()
else:
params['resource_id'] = self.resource_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechMorseMarketingSrtaConsultModel()
if 'anonymous_mobile_sha_256_list' in d:
o.anonymous_mobile_sha_256_list = d['anonymous_mobile_sha_256_list']
if 'blind_mobile_sha_256' in d:
o.blind_mobile_sha_256 = d['blind_mobile_sha_256']
if 'extend_params' in d:
o.extend_params = d['extend_params']
if 'order_amount' in d:
o.order_amount = d['order_amount']
if 'resource_id' in d:
o.resource_id = d['resource_id']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
c89f1e925348210ada55438f3e47f2b3572cbe03
|
0412893529999de784ab9cb914f385ba788a3684
|
/test/test_ack_collector_down.py
|
af6a6ac2ccc5999ccfadc2c84f1e1ec9cacdf9c9
|
[
"Apache-2.0"
] |
permissive
|
JeremyTangCD/lm-sdk-python
|
0326bf034c16b022b760600dc18fe7aaad42fa26
|
2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983
|
refs/heads/master
| 2020-04-15T15:39:59.276224
| 2019-01-09T09:55:36
| 2019-01-09T09:55:36
| 164,803,314
| 0
| 0
|
Apache-2.0
| 2019-01-09T09:58:55
| 2019-01-09T06:33:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import logicmonitor_sdk
from logicmonitor_sdk.models.ack_collector_down import AckCollectorDown # noqa: E501
from logicmonitor_sdk.rest import ApiException
class TestAckCollectorDown(unittest.TestCase):
"""AckCollectorDown unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAckCollectorDown(self):
"""Test AckCollectorDown"""
# FIXME: construct object with mandatory attributes with example values
# model = logicmonitor_sdk.models.ack_collector_down.AckCollectorDown() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"jeremy.tang@logicmonitor.com"
] |
jeremy.tang@logicmonitor.com
|
ad83aa4ab4266c792ad4a348e3344ecc1d9fec0c
|
4997d460a2fc64171191a8cb03ac5967c3761772
|
/if_hex.py
|
2008be8df67f34f027ea338093c2ddbc71957222
|
[] |
no_license
|
Otsgolyak/lastchance
|
3b7344863debc75619866bfacc0d620078c9296e
|
efc4f2662acdea974638a9321067084eece0f22e
|
refs/heads/master
| 2021-01-24T01:41:00.770847
| 2018-04-23T11:37:14
| 2018-04-23T11:37:14
| 122,818,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
import re
outputfile='../file.txt'
my_file=open(outputfile, mode='w', encoding = 'latin_1')
my_text = '0x012345,0xa1b2c3,0xdeadbeef,0x0x0x0x,0xabcdefg,0123abcd'
text_look_for = r"[0][x][0-9a-fA-F]+\,"
all_results = re.findall(text_look_for, my_text)
print(all_results)
my_file.write(str(all_results))
|
[
"36776260+Otsgolyak@users.noreply.github.com"
] |
36776260+Otsgolyak@users.noreply.github.com
|
54d35aced507702b82a95b2a8626758792f245cd
|
ffbbb74911f01e83194fbc87c0f13cf19d0d202d
|
/cod/textClassifierHATT_Only.py
|
5ff424a21846f6c38353d7957b6019c414f90aae
|
[] |
no_license
|
aliwagdy2580/READ
|
11c86c33cb159bda569209b69d6f15ec17eddb8d
|
582560e9202169650cf914892a952ffcc37084eb
|
refs/heads/master
| 2022-01-22T16:49:48.089526
| 2019-05-25T15:39:29
| 2019-05-25T15:39:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,721
|
py
|
# author - Richard Liao
# Dec 26 2016
import numpy as np
import pandas as pd
import cPickle
from collections import defaultdict
import re
from bs4 import BeautifulSoup
import sys
import os
os.environ['KERAS_BACKEND']='tensorflow'
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Embedding
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding, Merge, Dropout, LSTM, GRU, Bidirectional, TimeDistributed
from keras.models import Model
from keras import backend as K
from keras.engine.topology import Layer, InputSpec
from keras import initializations
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
MAX_SENT_LENGTH = 100
MAX_SENTS = 15
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
def clean_str(string):
"""
Tokenization/string cleaning for dataset
Every dataset is lower cased except
"""
string = re.sub(r"\\", "", string)
string = re.sub(r"\'", "", string)
string = re.sub(r"\"", "", string)
return string.strip().lower()
data_train = pd.read_csv('../data/imdb/labeledTrainData.tsv', sep='\t')
print data_train.shape
from nltk import tokenize
reviews = []
labels = []
texts = []
for idx in range(data_train.review.shape[0]):
'Parsing review ', idx
text = BeautifulSoup(data_train.review[idx])
text = clean_str(text.get_text().encode('ascii','ignore'))
texts.append(text)
sentences = tokenize.sent_tokenize(text)
reviews.append(sentences)
labels.append(data_train.sentiment[idx])
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
data = np.zeros((len(texts), MAX_SENTS, MAX_SENT_LENGTH), dtype='int32')
for i, sentences in enumerate(reviews):
for j, sent in enumerate(sentences):
print 'Processing review ',i,' sentence ', j
if j< MAX_SENTS:
wordTokens = text_to_word_sequence(sent)
k=0
for _, word in enumerate(wordTokens):
if k<MAX_SENT_LENGTH and tokenizer.word_index[word]<MAX_NB_WORDS:
data[i,j,k] = tokenizer.word_index[word]
k=k+1
word_index = tokenizer.word_index
print('Total %s unique tokens.' % len(word_index))
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = data[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
print('Number of positive and negative reviews in traing and validation set')
print y_train.sum(axis=0)
print y_val.sum(axis=0)
GLOVE_DIR = "../data/glove"
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print('Embedding size %s.' % len(coefs))
f.close()
print('Total %s word vectors.' % len(embeddings_index))
'''
embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
print 'Processing word ',word,' vector ', i
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SENT_LENGTH,
trainable=True)
sentence_input = Input(shape=(MAX_SENT_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sentence_input)
l_lstm = Bidirectional(LSTM(100))(embedded_sequences)
sentEncoder = Model(sentence_input, l_lstm)
review_input = Input(shape=(MAX_SENTS,MAX_SENT_LENGTH), dtype='int32')
review_encoder = TimeDistributed(sentEncoder)(review_input)
l_lstm_sent = Bidirectional(LSTM(100))(review_encoder)
preds = Dense(2, activation='softmax')(l_lstm_sent)
model = Model(review_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print("model fitting - Hierachical LSTM")
print model.summary()
model.fit(x_train, y_train, validation_data=(x_val, y_val),
nb_epoch=10, batch_size=50)
'''
# building Hierachical Attention network
embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SENT_LENGTH,
trainable=True)
class AttLayer(Layer):
def __init__(self, **kwargs):
self.init = initializations.get('normal')
#self.input_spec = [InputSpec(ndim=3)]
super(AttLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape)==3
self.W = self.init((input_shape[-1],1))
#self.W = self.init((input_shape[-1],))
#self.input_spec = [InputSpec(shape=input_shape)]
self.trainable_weights = [self.W]
super(AttLayer, self).build(input_shape) # be sure you call this somewhere!
def call(self, x, mask=None):
#print tf.shape(x)
#print tf.shape(self.W)
eij = K.tanh(K.dot(x, self.W))
ai = K.exp(eij)
#weights = ai/K.sum(ai, axis=1).dimshuffle(0,'x')
#weights = ai/K.sum(ai, axis=1)
weights = ai/tf.expand_dims(K.sum(ai, axis=1), 1)
#weighted_input = x*weights.dimshuffle(0,1,'x')
#weighted_input = x*tf.expand_dims(weights, 1)
#weighted_input = x*tf.expand_dims(weights, 2)
weighted_input = x*tf.expand_dims(weights, -1)
#return weighted_input.sum(axis=1)
return np.sum(weighted_input, axis=1)
def get_output_shape_for(self, input_shape):
return (input_shape[0], input_shape[-1])
sentence_input = Input(shape=(MAX_SENT_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sentence_input)
l_lstm = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences)
l_dense = TimeDistributed(Dense(200))(l_lstm)
l_att = AttLayer()(l_dense)
sentEncoder = Model(sentence_input, l_att)
review_input = Input(shape=(MAX_SENTS,MAX_SENT_LENGTH), dtype='int32')
review_encoder = TimeDistributed(sentEncoder)(review_input)
l_lstm_sent = Bidirectional(GRU(100, return_sequences=True))(review_encoder)
l_dense_sent = TimeDistributed(Dense(200))(l_lstm_sent)
l_att_sent = AttLayer()(l_dense_sent)
preds = Dense(2, activation='softmax')(l_att_sent)
model = Model(review_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print("model fitting - Hierachical attention network")
model.fit(x_train, y_train, validation_data=(x_val, y_val),
nb_epoch=10, batch_size=50)
|
[
"ahmad@friendlycares.com"
] |
ahmad@friendlycares.com
|
d0636d524d7e53a4de326695d54a9b0b63be670b
|
baf9f1db80e8502d287d1f220dd3659f59a1059b
|
/base3.py
|
816d5cc4309cde225d92f109ddf8fbfdb9534533
|
[] |
no_license
|
zhangman3187/homework
|
099a0cee343f69b552e5f239a5cab283e7aa3596
|
008c77f93043e8ffcb88dd7f80e71d3cf42ffa45
|
refs/heads/master
| 2021-01-17T22:49:56.018166
| 2017-03-07T14:59:37
| 2017-03-07T14:59:37
| 84,204,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
# -*- coding:utf-8 -*-
import requests,json
url='http://android.kuchuan.com/ranklatest?packagename=com.tencent.mm&market=360&date=1487690224850'
r=requests.get(url).json()
print r
print type(r)
print r[u'msg']
|
[
"heqiang@wandoujia.com"
] |
heqiang@wandoujia.com
|
50f13043c20772e09dcd42f04fa833947e2231c1
|
dd2e23e401fbfbc65eacc0fdc4000130cca11a98
|
/rename_file_duration.py
|
6249954f01376ddeb7cd9a5783657fdffb09cadb
|
[] |
no_license
|
franarama/Scripts
|
675b99946a3d509df49fc90393218411787bd67a
|
b0a3b9115601b8507d1e4f99f7b9e1a0adac1f54
|
refs/heads/master
| 2020-06-05T11:35:03.452869
| 2019-06-18T00:49:43
| 2019-06-18T00:49:43
| 192,425,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,637
|
py
|
"""
Renames all files in a given folder (MY_PATH)
to the form "filename_XhXmXs"
so it includes the duration of the media file
(h = hours, m = minutes, s = seconds)
"""
import subprocess
from os import listdir
from os.path import isfile, join
import os
import datetime
def getLength(filename):
result = subprocess.Popen(["ffprobe", filename],
stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
return [x for x in result.stdout.readlines() if "Duration" in x]
import subprocess32 as sp
import json
def probe(vid_file_path):
''' Give a json from ffprobe command line
@vid_file_path : The absolute (full) path of the video file, string.
'''
if type(vid_file_path) != str:
raise Exception('Give ffprobe a full file path of the video')
return
command = ["ffprobe",
"-loglevel", "quiet",
"-print_format", "json",
"-show_format",
"-show_streams",
vid_file_path
]
pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT)
out, err = pipe.communicate()
return json.loads(out)
def duration(vid_file_path):
''' Video's duration in seconds, return a float number
'''
_json = probe(vid_file_path)
if 'format' in _json:
if 'duration' in _json['format']:
return float(_json['format']['duration'])
if 'streams' in _json:
# commonly stream 0 is the video
for s in _json['streams']:
if 'duration' in s:
return float(s['duration'])
# if everything didn't happen,
# we got here because no single 'return' in the above happen.
raise Exception('I found no duration')
#return None
def formatDuration(hour_mins_sec):
hours = hour_mins_sec[0]
mins = hour_mins_sec[1]
secs = hour_mins_sec[2]
if mins[0] == '0':
mins = mins[1:]
if secs[0] == '0':
secs = secs[1:]
if int(hours[len(hours)-1]) > 0:
return hours + "h" + mins + "m" + secs + "s"
elif int(mins[len(mins)-1]) > 0:
return mins + "m" + secs + "s"
else:
return secs + "s"
MY_PATH = "/Users/framunno/Downloads/rename"
onlyfiles = [f for f in listdir(MY_PATH) if isfile(join(MY_PATH, f))]
for file in onlyfiles:
file_name = join(MY_PATH, file)
try:
file_duration = duration(file_name)
reformat_duration = str(datetime.timedelta(seconds=round(file_duration)))
hour_mins_sec = reformat_duration.split(":")
file_split = file_name.split(".")
new_file_name = file_split[0] + "_" + formatDuration(hour_mins_sec) + "." + file_split[1]
os.rename(file_name, new_file_name)
except:
print("Exception found on: ", file_name)
|
[
"noreply@github.com"
] |
franarama.noreply@github.com
|
0ae9aaea48fd9efd898ab1f13648756ef229672f
|
c8e403f868e4e2cd536a1e41bcdc341ad80eb066
|
/start_second_hands.py
|
0dbbfd13c9916e5f14aff981de60259822c73bec
|
[] |
no_license
|
BHBSA/hider_deal_price
|
28e6a13cd237e88f7c8d290289cb4560f6ff1bd3
|
3ddf9c3b53b696d1baba8f1cc1089e885780aef2
|
refs/heads/master
| 2022-12-09T01:20:26.660549
| 2018-07-10T01:53:07
| 2018-07-10T01:53:07
| 141,073,585
| 0
| 0
| null | 2022-07-06T19:50:06
| 2018-07-16T01:51:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
from crawler.centaline import Centaline
from crawler.fangtu import Fangtu
from crawler.goufangwang import Goufangwang
from crawler.kufangwang import Kufangwang
from crawler.leju import Leju
from crawler.leyoujia import Leyoujia
from crawler.lianjiazaixian import Lianjiazaixian
from crawler.maitian import Maitian
from crawler.qfangwang import Qfangwang
from crawler.taiwuwang import Taiwuwang
from crawler.woai import Woai
from crawler.fangtianxia import Fangtianxia
from multiprocessing import Process
if __name__ == '__main__':
centaline = Centaline()
fangtianxia = Fangtianxia()
fangtu = Fangtu()
goufangwang = Goufangwang()
kufangwang = Kufangwang()
leju = Leju()
leyoujia = Leyoujia()
lianjiazaixian = Lianjiazaixian()
maitian = Maitian()
qfangwang = Qfangwang()
taiwuwang = Taiwuwang()
woai = Woai()
Process(target=centaline.start_crawler).start()
Process(target=fangtu.start_crawler).start()
Process(target=goufangwang.start_crawler).start()
Process(target=kufangwang.start_crawler).start()
Process(target=leju.start_crawler).start()
Process(target=leyoujia.start_crawler).start()
Process(target=lianjiazaixian.start_crawler).start()
Process(target=maitian.start_crawler).start()
Process(target=qfangwang.start_crawler).start()
Process(target=taiwuwang.start_crawler).start()
Process(target=woai.start_crawler).start()
Process(target=fangtianxia.start_crawler).start()
|
[
"jijunyu@fangjia.com"
] |
jijunyu@fangjia.com
|
f81bc8c805a75b8b1b20cfd10b98ebbd4f5ed99e
|
96ea95f05aa1806d10668364ca5a26827bdd0eb7
|
/main.py
|
102777f43818291c554dbdfc63393bd279ee066f
|
[] |
no_license
|
SheyonFN/test-1
|
083a79f29d44f9a8eb64ce829973edb2c9f15ec7
|
8b4b1b3268df206ec0a9fedc55688de698a3b13a
|
refs/heads/master
| 2023-04-08T06:03:35.084117
| 2021-04-22T08:44:24
| 2021-04-22T08:44:24
| 360,451,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
def on_forever():
pass
basic.forever(on_forever)
def on_logo_event_pressed():
pass
input.on_logo_event(TouchButtonEvent.PRESSED, on_logo_event_pressed)
|
[
"83003954+SheyonFN@users.noreply.github.com"
] |
83003954+SheyonFN@users.noreply.github.com
|
116bf8d0af389f0ecb875e112d746e771ad16e0a
|
ec67023382a81849ed624b5274f38cd656aa85e3
|
/PracticeBasicThings/LAB05/TEST.py
|
8e252792e6316ea4485be5922716ca239c529ae8
|
[] |
no_license
|
Natthapolmnc/Python-basic-project
|
cb4a5e40ace4fe7b49dbb16f24ddb39112dbb54c
|
c1348bd1450058104d3c12f8a3843a7e85a5dbc1
|
refs/heads/master
| 2020-04-21T00:24:16.825259
| 2019-02-05T18:53:03
| 2019-02-05T18:53:03
| 169,195,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
num=int(input())
min=max=num
for i in range(14):
num=int(input())
if num<min:
min=num
if num>max:
max=num
print (max,min)
|
[
"natthapol3011@gmail.com"
] |
natthapol3011@gmail.com
|
65e6ac1ce7bc66e2698c5af850183ab8830213e4
|
5b814be169e0f0917ec927743d574fbce04fa5b1
|
/getWeb.py
|
e966b5cc9ad5b0c2fd9ac2932db88400cecf27d5
|
[] |
no_license
|
Hep-dog/Test
|
d680695f25a7f15e59434dcdbb449da422c8236c
|
49041b1fe2661f790b3b329afcb7e29e9fda4787
|
refs/heads/master
| 2021-06-30T01:21:08.983692
| 2017-09-16T15:51:02
| 2017-09-16T15:51:02
| 103,758,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
#!/usr/bin/python
# vim: set fileencoding=utf-8 :
import sys
from imp import reload
reload(sys)
import requests
import bs4
from bs4 import BeautifulSoup
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def fillUnivList(ulist, html):
soup = BeautifulSoup(html, "html.parser")
for tr in soup.find('tbody').children:
if isinstance(tr, bs4.element.Tag):
tds = tr('td')
ulist.append([tds[0].string, tds[1].string, tds[3].string])
pass
def printUnivList(ulist, num):
tplt = "{0:^10}\t{1:{3}<10}\t{2:<10}"
print(tplt.format("排名","学校名称","总分",chr(12288)))
for i in range(num):
u=ulist[i]
print(tplt.format(u[0], u[1], u[2], chr(12288)))
def main():
uinfo = []
url = 'http://www.zuihaodaxue.cn/zuihaodaxuepaiming2016.html'
html = getHTMLText(url)
fillUnivList(uinfo, html)
printUnivList(uinfo, 20) # 20 Universities
main()
|
[
"shenpx91@gmail.com"
] |
shenpx91@gmail.com
|
c5197a79386ec28ef354380fd30a7a275021810a
|
0a5c472821a05cd6d0264a8b7ee80e47b52cb7e9
|
/backendapi/backendapi/urls.py
|
90cccb52f8c89a305c732eb810b0e951e7ca0675
|
[] |
no_license
|
notsojatin/klaarAssessment
|
92dbeae608a6ebc6c9cf7374d57292b357682cef
|
3003f012293cfcf213d7f95b30980dcb7139a5f8
|
refs/heads/main
| 2023-06-28T05:12:49.917710
| 2021-07-31T11:34:51
| 2021-07-31T11:34:51
| 391,099,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
"""backendapi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
# path('admin', admin.site.urls),
path('',include('bank_branches.urls'))
]
|
[
"awstatic@Jatins-MacBook-Air.local"
] |
awstatic@Jatins-MacBook-Air.local
|
7000d765cb9ee3d2b521f0360d99992610fa3561
|
27ef726d974817fb39b448feec827ec4e17cbfa4
|
/Test_twisted/lib/python3.5/rlcompleter.py
|
3c1d9ec5087d55923d6ce22d4b7a843fb4fb4bb4
|
[] |
no_license
|
red-one-dataviz/fil_rouge
|
e6f99e02c1fdd3d7e010e214133c6f1e9d372fef
|
edb71cbe05f6be65b50f65ffa41edbdaec0786e3
|
refs/heads/master
| 2021-09-11T15:52:59.010096
| 2018-01-23T11:17:27
| 2018-01-23T11:17:27
| 110,855,607
| 3
| 1
| null | 2018-02-09T20:37:58
| 2017-11-15T16:05:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 60
|
py
|
/Users/thaianthantrong/anaconda/lib/python3.5/rlcompleter.py
|
[
"thaian.tt@gmail.com"
] |
thaian.tt@gmail.com
|
2416f4eb0dd8a8ade535334bf828d63bea818bb7
|
d3acfb43d8b81d33e5e6cac1619a1d91d063ea99
|
/tests/test_prep.py
|
609c99dad583270dfb9ef9bf2a37f1ef06b7c7bd
|
[] |
no_license
|
ann-cooper/choose-entities
|
6e93439f95859ef7b78687356773a6758d61fb0c
|
9d8cd658321f9a4ca98dc41864c3bf76e09ef36c
|
refs/heads/master
| 2023-05-25T15:37:45.759599
| 2023-02-07T20:35:29
| 2023-02-07T20:35:29
| 153,012,082
| 0
| 0
| null | 2023-05-23T02:45:36
| 2018-10-14T19:41:51
|
Python
|
UTF-8
|
Python
| false
| false
| 476
|
py
|
import pytest
import spacy
from choose_entities.prep_docs import PrepDocs
@pytest.fixture(scope="function")
def setup_docs():
docs = list(PrepDocs("tests/sample_pdfs").prep_docs())
return docs[0] if len(docs) == 1 else None
@pytest.mark.parametrize("vocab_len, type_check", [(1164, spacy.tokens.doc.Doc)])
def test_prep_docs(setup_docs, vocab_len, type_check):
assert setup_docs.vocab.length == vocab_len
assert isinstance(setup_docs, type_check) is True
|
[
"cooperannc@gmail.com"
] |
cooperannc@gmail.com
|
0fed27224f23341fd6ebe1d76b42ab9373db1d53
|
df1f359a6284e45a884aca5791ee87db2232b164
|
/python/security/utl/cache/__init__.py
|
740258c936a681316676400cc155db2a845f562d
|
[] |
no_license
|
wruibo/tools
|
abb086a32ed3f710676eda24b73ba17735a1cb7c
|
9f452b6c57ff211b38ca8ce971396e94c0b2194b
|
refs/heads/master
| 2020-06-28T23:08:27.688380
| 2017-11-03T06:09:53
| 2017-11-03T06:09:53
| 74,459,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,368
|
py
|
"""
cache for data
"""
class vendor:
@staticmethod
def file(name='default', dirpath=None):
"""
open a new file cache
:param name: str, name of the cache
:param dirpath: str or None, cache directory path
:return: obj, file cache
"""
from . import filec
return filec.FileCache(name, dirpath)
@staticmethod
def gnudbm(name='default', dirpath=None):
"""
open a new gnu dbm cache
:param name: str, name of the cache
:param dirpath: str or None, cache directory path
:return: obj, file cache
"""
from .import gnuc
return gnuc.GNUDBMCache(name, dirpath)
#global default cache object
__default_cache = vendor.file()
def default(cache=None):
"""
change the default cache type
:param cache: object, FileCache or GNUDBMCache object
:return:
"""
global __default_cache
if cache is not None:
__default_cache = cache
else:
return __default_cache
def save(key, content, wantold=False, encoding='utf-8'):
"""
save text content with key into cache
:param key: str, key for content
:param content: str, content for cache
:param wantold: bool, return old content if want
:param encoding: str, encoding of content
:return: str, old content or None
"""
return default().save(key, content, wantold, encoding)
def take(key, maxage=None, encoding='utf-8'):
"""
take text content with key from cache
:param key: str, key for content
:param maxage: int, max age for cache in seconds
:param encoding: str, encoding of content
:return: str, content, or None
"""
return default().take(key, maxage, encoding)
def saveb(key, content, wantold=False):
"""
save binary content with key into cache
:param key: str, key for content
:param content: bytes, content for cache
:param wantold: bool, return old content if want
:return: bytes, old content or None
"""
return default().saveb(key, content, wantold)
def takeb(key, maxage=None):
"""
take binary content with key from cache
:param key: str, key for content
:param maxage: int, max age for cache in seconds
:return: bytes, content, or None
"""
return default().takeb(key, maxage)
|
[
"polly@polly.local"
] |
polly@polly.local
|
60142cf4829b73accbc5a591d9054455638e196f
|
08295531e7b00ad49282815fdd43d98a8b8547b6
|
/nids_models/NIDS_RNN.py
|
00b0f9ac0d16967fb77efa8211dea262b8674bc5
|
[] |
no_license
|
SergioArroni/VulnerGAN-py
|
c2162c5ccd63e55b74c8cf5ce376fc4f64c5aa11
|
5aebfb6056cbb25df1d9d6fbfe0718ad5dbde54d
|
refs/heads/master
| 2023-07-14T01:53:29.666409
| 2021-08-24T02:31:39
| 2021-08-24T02:31:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,436
|
py
|
import datetime
import os
import shutil
import sys
import traceback
sys.path.append(os.path.dirname(sys.path[0]))
from data_process.my_dataset import Dataset_adv, Dataset, Dataset_mix, Dataset_adv_1,Dataset_shadow
from tensorflow.keras.models import Sequential, load_model, Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Activation, Dropout, SimpleRNN
import numpy as np
from poisoning.save_model import save_model
import tensorflow as tf
import keras
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
keras.backend.tensorflow_backend.set_session(sess)
class my_RNN():
def __init__(self, x_train):
self.model = Sequential()
self.model.add(SimpleRNN(120, input_shape=(x_train.shape[1], x_train.shape[2]), return_sequences=True))
self.model.add(Dropout(0.2))
self.model.add(SimpleRNN(120, return_sequences=True))
self.model.add(Dropout(0.2))
self.model.add(SimpleRNN(120, return_sequences=False))
self.model.add(Dropout(0.2))
# binary
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
self.model.summary()
# optimizer
adam = Adam(lr=0.0001)
# binary
self.model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
def get_test(model, X_test, Y_test):
correct = 0
acc = 0
# x_test_re = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])
y_pred = model.predict(X_test)
y_pred = np.array(y_pred)
y_pred = [np.round(x) for x in y_pred]
for i in range(X_test.shape[0]):
if Y_test[i] == 1 and y_pred[i] == 1:
correct += 1
if Y_test[i] == 0 and y_pred[i] == 0:
correct += 1
cnt = X_test.shape[0]
acc = correct / cnt
print('Test set: Accuracy: {}/{} ({:.6f}%)\n'.format(correct, cnt, 100. * correct / cnt))
return acc
def get_test_result(model, X_test, Y_test):
correct = 0
acc = 0
# x_test_re = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])
y_pred = model.predict(X_test)
y_pred = np.array(y_pred)
y_pred = [np.round(x) for x in y_pred]
file_path = "data_record/0.1_NIDS_RNN_result.csv"
if os.path.exists(file_path):
os.unlink(file_path)
# os.mkdir(file_path)
with open(file_path, "a") as f:
items = np.array(y_pred)
np.savetxt(f, items, fmt='%d', delimiter=',')
for i in range(X_test.shape[0]):
if Y_test[i] == 1 and y_pred[i] == 1:
correct += 1
if Y_test[i] == 0 and y_pred[i] == 0:
correct += 1
cnt = X_test.shape[0]
acc = correct / cnt
print('Test set: Accuracy: {}/{} ({:.6f}%)\n'.format(correct, cnt, 100. * correct / cnt))
return acc
# hyper-parameter
epoch = 1
model_path = "model_record/GRU_RNN/"
model_name = 'NIDS_GRU_RNN'
if __name__ == '__main__':
reuse_model = False
is_train = True
loop_exit = False
while not loop_exit:
print("----------- Welcome to NIDS Poison Detector! -----------")
print("Menu:")
print("\t1: start NIDS training")
print("\t2: NIDS test")
print("\t3: get NIDS performances")
c = input("Enter you choice: ")
if c == '1':
reuse_model = False
is_train = True
loop_exit = True
if c == '2':
reuse_model = True
is_train = True
loop_exit = True
if c == '3':
reuse_model = True
is_train = False
loop_exit = True
test_s = Dataset("../data/cic_2017/data_sets/1.0_test.csv")
x_test, y_test = test_s.items, test_s.label
x_test = x_test.reshape(x_test.shape[0], 1, x_test.shape[1])
# reshape input to be [samples, timesteps, features]
# x_train = x_train.reshape(x_train.shape[0], 1, x_train.shape[1])
val_s = Dataset("../data/cic_2017/data_sets/1.0_test.csv")
x_val, y_val = val_s.items, val_s.label
x_val = x_val.reshape(x_val.shape[0], 1, x_val.shape[1])
# dataset_n_len = 20000
# dataset_a_len = 5
# start training
if not reuse_model and is_train:
# 清空所有model记录
if os.path.exists(model_path):
shutil.rmtree(model_path)
os.mkdir(model_path)
if not os.path.exists(model_path):
# shutil.rmtree(model_path)
os.mkdir(model_path)
# train_s = Dataset("../data/cic_2017/data_sets/1.0_train.csv")
train_s = Dataset_shadow("../data/cic_2017/data_sets/0.1_val.csv", "data_record/0.1_NIDS_GRU_result.csv")
x_train, y_train = train_s.items, train_s.label
# print(x_train.shape)
# print(y_train.shape)
x_train = x_train.reshape(x_train.shape[0], 1, x_train.shape[1])
# print(x_train.shape)
model = my_RNN(x_train).model
i = 0
while i < epoch:
print('----------- epoch: %d -----------' % (i + 1))
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=1, batch_size=32)
val = get_test(model, x_val, y_val)
save_model(model, i+1, model_name,model_path)
i += 1
save_model(model, -1, model_name)
print('----------- Model training has been completed! -----------\n\n')
elif reuse_model and is_train:
test_s = Dataset("../data/cic_2017/data_sets/0.1_val.csv")
x_test, y_test = test_s.items, test_s.label
x_test = x_test.reshape(x_test.shape[0], 1, x_test.shape[1])
model_name = 'NIDS_RNN'
model_p = 'model_record/RNN/1_' + model_name + ".hdf5"
if os.path.exists(model_p):
model = load_model(model_p)
acc_min = get_test_result(model, x_test, y_test)
else:
print('No saved model, try start NIDS training!')
# test
elif reuse_model and not is_train:
test_s = Dataset("../data/cic_2017/data_sets/1.0_test_set.csv")
x_test, y_test = test_s.items, test_s.label
x_test = x_test.reshape(x_test.shape[0], 1, x_test.shape[1])
model_p = 'model_record/RNN/1_' + model_name + ".hdf5"
if os.path.exists(model_p):
model = load_model(model_p)
acc_min = get_test(model, x_test, y_test)
else:
print('No saved model, try start NIDS training!')
|
[
"liuguangrui.hit@gmail.com"
] |
liuguangrui.hit@gmail.com
|
ab7534158f0b002b76e280da30abc773c277e06d
|
978855208e71e8b37fcc27989d0d119e9d1c9d83
|
/decorator_patterns/class_decorator_with_arguments.py
|
3bb45559f97fbf3a6512b57378b77085cc5d1c43
|
[] |
permissive
|
jbrt/python-decorator-patterns
|
837a14725f52e395bbec6c22a90741b05f85938e
|
f12cf820bcc4c6be9c810dfd222ca0845131debf
|
refs/heads/master
| 2022-12-15T13:36:32.012764
| 2020-08-30T14:13:48
| 2020-08-30T14:13:48
| 291,475,448
| 0
| 0
|
MIT
| 2020-08-30T14:13:49
| 2020-08-30T13:27:33
| null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
#!/usr/bin/env python3
class ClassBasedDecoratorWithParams:
def __init__(self, arg1, arg2):
"""
Initialization (takes the arguments of the decorator)
:param arg1: argument one
:param arg2: argument two
"""
print("Initialization of the decorator")
print(f'Arguments for decorator: {arg1}, {arg2}')
def __call__(self, fn, *args, **kwargs):
"""
This method will take the argument for the decorated function
AND THE FUNCTION TO DECORATE (difference between the previous decorator)
:param fn: function to decorate
:param args: (list)
:param kwargs: (dict)
:return: function decorated
"""
print("__call__ method")
def inner_function(*args, **kwargs):
# Something before
print("Function has been decorated. Congratulations.")
response = fn(*args, **kwargs)
# Something after
return response
return inner_function
@ClassBasedDecoratorWithParams("arg1", "arg2")
def print_arguments(*args):
for arg in args:
print(arg)
if __name__ == '__main__':
print_arguments(1, 2, 3)
|
[
"julien@toshokan.fr"
] |
julien@toshokan.fr
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.