blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
be3c912ba0ddc4bcff9d69253f8d074868443909 | 640d26baa9322b92ea5d247280668b4ad7475f8d | /robot_assignment_ws/build/kobuki_keyop/catkin_generated/pkg.develspace.context.pc.py | 1b919eef351d66946b71d3613d93749a8c481f04 | [] | no_license | JulianVJacobs/Robotics-Project-2021 | 6baa5a6423a28cc278b84d831f2d8c9f5239da90 | 18a58cee8e2793bd05e5e158c0c998099fc62d5c | refs/heads/main | 2023-06-03T02:47:15.579120 | 2021-06-25T19:56:32 | 2021-06-25T19:56:32 | 374,733,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/julian/robot_assignment_ws/src/kobuki_keyop/include".split(';') if "/home/julian/robot_assignment_ws/src/kobuki_keyop/include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;std_srvs;std_msgs;roscpp;ecl_exceptions;ecl_threads;ecl_time;kobuki_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lkobuki".split(';') if "-lkobuki" != "" else []
PROJECT_NAME = "kobuki_keyop"
PROJECT_SPACE_DIR = "/home/julian/robot_assignment_ws/devel"
PROJECT_VERSION = "0.7.6"
| [
"1605267@students.wits.ac.za"
] | 1605267@students.wits.ac.za |
cbe426147ed87586dbfc67eeba8b4e4cbf5046b4 | d2a2546165b3db6295a3f21972dda8ab9aab7846 | /src/vehicles/towerhouse_flat.py | ce5fba774fa35c9f0aba0561dfddf9b98fb69324 | [] | no_license | andythenorth/road-hog | bab12b133dd674f0e6d7ae87498675f8da96b982 | 1800d57d4ce904e7041f24646c393b37903d9466 | refs/heads/main | 2022-09-26T19:57:31.006800 | 2022-09-17T10:09:37 | 2022-09-17T10:09:37 | 214,848,659 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | from road_vehicle import FlatHauler, DieselRoadVehicle
consist = FlatHauler(id='towerhouse_flat',
base_numeric_id=650,
name='Towerhouse',
semi_truck_so_redistribute_capacity=True,
vehicle_life=40,
intro_date=1968)
consist.add_unit(type=DieselRoadVehicle,
vehicle_length=2,
semi_truck_shift_offset_jank=2,
always_use_same_spriterow=True)
consist.add_unit(capacity=40,
vehicle_length=7,
cargo_length=4) # some cargo overlap eh?
| [
"mail@andythenorth.co.uk"
] | mail@andythenorth.co.uk |
a3bc86651ce830bbe7ddc395698eb5b9c2155f34 | 73fa26bff99b5caef6697769b6d53a3630c5afb3 | /portofolio/migrations/0001_initial.py | 5874a261e23cfdde461ad562ea7b4c1980b13252 | [] | no_license | handole/handofolio | 9ecb1a9359717f0b18e1c0f0ca3616cc365d8100 | 6190ed4a5d614d929489a62fb503a3434eec5349 | refs/heads/master | 2020-06-27T21:57:27.398249 | 2017-06-13T21:30:53 | 2017-06-13T21:30:53 | 94,252,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-13 17:31
from __future__ import unicode_literals
from django.db import migrations, models
import portofolio.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Portofol',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('image', models.ImageField(upload_to=portofolio.models.upload_location)),
('slug', models.SlugField(unique=True)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-timestamp', '-updated'],
},
),
]
| [
"denihhandoko@gmail.com"
] | denihhandoko@gmail.com |
3e284e6af35d735275c78c58377428a8021d456b | 8574094da8e8ad4bd65e1bbe67dd1abd5003d6a9 | /zopen.plone.filerepos/src/zopen/plone/filerepos/utils.py | d06855e83e9d0c2a375e78d2e19d46f23de1bceb | [] | no_license | madfrog2018/everydo-project | 5b948f8b04c04773163eb5193e45604b1fe4a74e | d2ea5b83513cf4191e29ba70a1fc8b1d9950599f | refs/heads/master | 2021-01-17T06:33:26.229787 | 2010-07-15T01:45:19 | 2010-07-15T01:45:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | #-*- coding:utf-8 -*-
def getMaxAttachmentSize(user_id=''):
return 30 * 1024 * 1024
def getQuota(user_id=''):
return 500 * 1024 * 1024
| [
"yan5yang@gmail.com@4c72a389-b037-68f1-009d-6f17fb46af5f"
] | yan5yang@gmail.com@4c72a389-b037-68f1-009d-6f17fb46af5f |
e1a323d444a2e947bff196013ec3c3a287d0fd63 | bf73b244a116a4fa01b3a91d11025a0cb29c1374 | /ecomapp/views.py | 0a2759458713f09de0c5fd4afa5ff3e922927189 | [] | no_license | brahim024/django-ecommerce-web-app | f88b3586d04bba59019322638e90b98d11033ba9 | f817eda9ab273b001fedc9a78d0aee3a13aa767c | refs/heads/master | 2023-03-01T12:05:30.259028 | 2021-02-10T12:11:21 | 2021-02-10T12:11:21 | 289,489,478 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | from django.shortcuts import render, get_object_or_404,redirect
from django.http import HttpResponse,HttpResponseRedirect
from .models import Category, Product
from cart.forms import CartAddProductForm
from .forms import CommentForm
from .filters import ProductFilter
# Create your views here.
def product_list(request,category_slug=None):
category=None
categories=Category.objects.all()
product=Product.objects.filter(available=True)
if category_slug:
category=get_object_or_404(Category,slug=category_slug)
product=product.filter(category=category)
myfilter=ProductFilter(request.GET,queryset=Product.objects.all())
return render(request,'list.html',
{'category':category,
'categories':categories,
'products':product,'myfilter':myfilter})
def product_detail(request,id,slug):
product=get_object_or_404(Product,id=id,slug=slug,available=True)
cart_product_form=CartAddProductForm()
comments=product.comment.filter(active=True)
new_comment=None
if request.method=='POST':
form=CommentForm(request.POST)
if form.is_valid():
new_comment=form.save(commit=False)
new_comment.product=product
new_comment.save()
#return HttpResponseRedirect('stor/product_list_by_category')
else:
form=CommentForm()
context={'form':form,'product':product,
'cart_product_form':cart_product_form,
'new_comment':new_comment,
'comments':comments,}
return render(request,'details.html',context)
| [
"ifninos168@gmail.com"
] | ifninos168@gmail.com |
e62bf5693b8310c80b29d8becdb2a5943dfa324f | c113158bf6ce1edeb24298d711fddc56af2d62ee | /heltour/tournament/migrations/0084_gamenomination_pairing.py | c3ea3fca48dc9aab4cd2bf0f78af016d6fa6ae79 | [
"MIT"
] | permissive | elvisaronsp/heltour | 44d05b6e195b5fd939304ac776167e85762aec83 | f5444552293ee4d51cbc7cf025857ed23d7d03dd | refs/heads/master | 2020-06-19T06:16:54.312479 | 2019-05-26T15:13:33 | 2019-05-26T15:13:33 | 196,594,361 | 1 | 0 | MIT | 2019-07-12T14:34:58 | 2019-07-12T14:34:58 | null | UTF-8 | Python | false | false | 540 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-01 20:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournament', '0083_auto_20160901_1905'),
]
operations = [
migrations.AddField(
model_name='gamenomination',
name='pairing',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='tournament.PlayerPairing'),
),
]
| [
"ben.cyanfish@gmail.com"
] | ben.cyanfish@gmail.com |
619df83ed8d14e7f8c40459b49bf4ef7e0e7a49a | c6e885e317915496c655dca38d8b7b830f848d64 | /worker.py | eae0a90ea4979b663fcd33ddea68627d83040a2c | [] | no_license | Kelvinson/tensorflow-a3c | 2bac0d9226b49ec604a98e76fdfadd55c402abc9 | 50258c4c5f4abe16d2df4bfd411fb5f3fd24ccbb | refs/heads/master | 2021-08-22T06:02:21.320069 | 2017-11-29T12:43:17 | 2017-11-29T12:43:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,826 | py | from collections import deque
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import gym
from network import create_network
from train_ops import *
from utils import *
G = 0.99
N_ACTIONS = 3
ACTIONS = np.arange(N_ACTIONS) + 1
N_FRAMES_STACKED = 4
N_MAX_NOOPS = 30
def list_set(l, i, val):
assert(len(l) == i)
l.append(val)
class Worker:
def __init__(self, sess, worker_n, env_name, summary_writer):
self.sess = sess
self.env = EnvWrapper(gym.make(env_name), prepro2=prepro2, frameskip=4)
worker_scope = "worker_%d" % worker_n
self.network = create_network(worker_scope)
self.summary_writer = summary_writer
self.scope = worker_scope
self.reward = tf.Variable(0.0)
self.reward_summary = tf.summary.scalar('reward', self.reward)
policy_optimizer = tf.train.AdamOptimizer(learning_rate=0.0005)
value_optimizer = tf.train.AdamOptimizer(learning_rate=0.0005)
self.update_policy_gradients, self.apply_policy_gradients, self.zero_policy_gradients, self.grad_bufs_policy = \
create_train_ops(self.network.policy_loss,
policy_optimizer,
update_scope=worker_scope,
apply_scope='global')
self.update_value_gradients, self.apply_value_gradients, self.zero_value_gradients, self.grad_bufs_value = \
create_train_ops(self.network.value_loss,
value_optimizer,
update_scope=worker_scope,
apply_scope='global')
self.init_copy_ops()
self.frame_stack = deque(maxlen=N_FRAMES_STACKED)
self.reset_env()
self.t_max = 10000
self.steps = 0
self.episode_rewards = []
self.render = False
self.value_log = deque(maxlen=100)
self.fig = None
def reset_env(self):
self.frame_stack.clear()
self.env.reset()
n_noops = np.random.randint(low=0, high=N_MAX_NOOPS+1)
print("%d no-ops..." % n_noops)
for i in range(n_noops):
o, _, _, _ = self.env.step(0)
self.frame_stack.append(o)
while len(self.frame_stack) < N_FRAMES_STACKED:
print("One more...")
o, _, _, _ = self.env.step(0)
self.frame_stack.append(o)
print("No-ops done")
def log_rewards(self):
reward_sum = sum(self.episode_rewards)
print("Reward sum was", reward_sum)
self.sess.run(tf.assign(self.reward, reward_sum))
summ = self.sess.run(self.reward_summary)
self.summary_writer.add_summary(summ, self.steps)
def init_copy_ops(self):
from_tvs = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='global')
to_tvs = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=self.scope)
from_dict = {var.name: var for var in from_tvs}
to_dict = {var.name: var for var in to_tvs}
copy_ops = []
for to_name, to_var in to_dict.items():
from_name = to_name.replace(self.scope, 'global')
from_var = from_dict[from_name]
op = to_var.assign(from_var.value())
copy_ops.append(op)
self.copy_ops = copy_ops
def sync_network(self):
self.sess.run(self.copy_ops)
def value_graph(self):
if self.fig is None:
self.fig, self.ax = plt.subplots()
self.fig.set_size_inches(2, 2)
self.ax.set_xlim([0, 100])
self.ax.set_ylim([0, 2.0])
self.line, = self.ax.plot([], [])
self.fig.show()
self.fig.canvas.draw()
self.bg = self.fig.canvas.copy_from_bbox(self.ax.bbox)
self.fig.canvas.restore_region(self.bg)
ydata = list(self.value_log)
xdata = list(range(len(self.value_log)))
self.line.set_data(xdata, ydata)
self.ax.draw_artist(self.line)
self.fig.canvas.update()
self.fig.canvas.flush_events()
def run_step(self):
states = []
actions = []
rewards = []
i = 0
self.sess.run([self.zero_policy_gradients,
self.zero_value_gradients])
self.sync_network()
list_set(states, i, self.frame_stack)
done = False
while not done and i < self.t_max:
#print("Step %d" % i)
s = np.moveaxis(self.frame_stack, source=0, destination=-1)
feed_dict = {self.network.s: [s]}
a_p = self.sess.run(self.network.a_softmax, feed_dict=feed_dict)[0]
a = np.random.choice(ACTIONS, p=a_p)
list_set(actions, i, a)
o, r, done, _ = self.env.step(a)
if self.render:
self.env.render()
feed_dict = {self.network.s: [s]}
v = self.sess.run(self.network.graph_v, feed_dict=feed_dict)[0]
self.value_log.append(v)
self.value_graph()
if r != 0:
print("Got reward", r)
self.frame_stack.append(o)
self.episode_rewards.append(r)
list_set(rewards, i, r)
list_set(states, i + 1, np.copy(self.frame_stack))
i += 1
if done:
print("Episode done")
self.log_rewards()
self.episode_rewards = []
# Calculate initial value for R
if done:
# Terminal state
r = 0
else:
# Non-terminal state
# Estimate the value of the current state using the value network
# (states[i]: the last state)
s = np.moveaxis(states[i], source=0, destination=-1)
feed_dict = {self.network.s: [s]}
r = self.sess.run(self.network.graph_v, feed_dict=feed_dict)[0]
# i - 1 to 0
# (Why start from i - 1, rather than i?
# So that we miss out the last state.)
for j in reversed(range(i)):
s = np.moveaxis(states[j], source=0, destination=-1)
r = rewards[j] + G * r
feed_dict = {self.network.s: [s],
# map from possible actions (1, 2, 3) -> (0, 1, 2)
self.network.a: [actions[j] - 1],
self.network.r: [r]}
self.sess.run([self.update_policy_gradients,
self.update_value_gradients],
feed_dict)
self.sess.run([self.apply_policy_gradients,
self.apply_value_gradients])
self.sess.run([self.zero_policy_gradients,
self.zero_value_gradients])
self.steps += 1
return done
| [
"matthew.rahtz@gmail.com"
] | matthew.rahtz@gmail.com |
e707dff5bcaa52d36f0d56604c629444190857e3 | a88a99fb3f754649db06ad86d22b5cb0d2d1e19c | /scholariumat/studies/admin.py | a7b9bd11bef2d7c203c61c32189fdf63a81e8547 | [
"MIT"
] | permissive | valuehack/scholariumat | 91ec59647948759d917ce7077d06b0aa9618c807 | 47c13f3429b95b9ad5ca59b45cf971895260bb5c | refs/heads/master | 2022-12-07T22:20:23.967854 | 2020-04-09T22:05:52 | 2020-04-09T22:05:52 | 135,466,121 | 0 | 3 | MIT | 2022-12-06T18:38:22 | 2018-05-30T15:55:14 | JavaScript | UTF-8 | Python | false | false | 165 | py | from django.contrib import admin
from products.admin import ProductBaseAdmin
from .models import StudyProduct
admin.site.register(StudyProduct, ProductBaseAdmin)
| [
"merlin.buczek@gmail.com"
] | merlin.buczek@gmail.com |
f64efc42f03f469457ffaf5dd385b81fe2ed4704 | 0c5dd1b89b686d23b536c9f51b30b2f5e69ff399 | /edit_form.py | adc3ab8524f18d52cfe4e708bf3488e3bbda4a5a | [] | no_license | blazprog/codesnips | 23fc57233fc197866c8c539df280d8792de098a4 | 1c307b74b5a00cbe339c86b3e37b101ad0921fcb | refs/heads/master | 2021-01-21T10:45:48.507153 | 2018-11-19T11:32:58 | 2018-11-19T11:32:58 | 101,984,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | # -*- coding: utf-8 -*-
import sys
import PyQt5.QtWidgets as qtw
# from PyQt5.QtWidgets import QWidget, QApplication, QDialog, \
### QMdiSubWindow
from PyQt5.QtSql import *
conn = QSqlDatabase.database()
class WordBrowse(QSqlTableModel):
def __init__(self, parent=None):
super().__init__(parent)
self.setTable('ozegov')
self.select()
class EditForm(qtw.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.initUI()
def initUI(self):
self.setWindowTitle('New Edit Form')
lblWord = qtw.QLabel('Word')
lbl = qtw.QLabel("My Label")
self.txtWord = qtw.QLineEdit()
lblDescription = qtw.QLabel('Description')
self.txtDescription = qtw.QTextEdit()
self.main_layout = qtw.QVBoxLayout()
#self.main_layout = qtv.QLabel()
self.main_layout.addWidget(lblWord)
self.main_layout.addWidget(lbl)
self.main_layout.addWidget(self.txtWord)
self.main_layout.addWidget(lblDescription)
self.main_layout.addWidget(self.txtDescription)
self.setLayout(self.main_layout)
self.model = WordBrowse()
self.mapper = qtw.QDataWidgetMapper(self)
self.mapper.setSubmitPolicy(qtw.QDataWidgetMapper.ManualSubmit)
self.mapper.setModel(self.model)
self.mapper.addMapping(self.txtWord, 0)
self.mapper.addMapping(self.txtDescription, 1)
self.mapper.toFirst()
self.show()
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
ef = EditForm()
ef.show()
sys.exit(app.exec_())
| [
"blaz.korosec@mentis.si"
] | blaz.korosec@mentis.si |
c0fb1b2ae995c53cd776a5dd076d79a17da945dc | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/189060_Using_Berkeley_DB_Database/recipe-189060.py | c21520218466957b07c688f8c45e5007591b0523 | [
"Python-2.0",
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 1,025 | py | #!/usr/bin/python
from bsddb3 import db # the Berkeley db data base
# Part 1: Create database and insert 4 elements
#
filename = 'fruit'
# Get an instance of BerkeleyDB
fruitDB = db.DB()
# Create a database in file "fruit" with a Hash access method
# There are also, B+tree and Recno access methods
fruitDB.open(filename, None, db.DB_HASH, db.DB_CREATE)
# Print version information
print '\t', db.DB_VERSION_STRING
# Insert new elements in database
fruitDB.put("apple","red")
fruitDB.put("orange","orange")
fruitDB.put("banana","yellow")
fruitDB.put("tomato","red")
# Close database
fruitDB.close()
# Part 2: Open database and write its contents out
#
fruitDB = db.DB()
# Open database
# Access method: Hash
# set isolation level to "dirty read (read uncommited)"
fruitDB.open(filename, None, db.DB_HASH, db.DB_DIRTY_READ)
# get database cursor and print out database content
cursor = fruitDB.cursor()
rec = cursor.first()
while rec:
print rec
rec = cursor.next()
fruitDB.close()
| [
"betty@qburst.com"
] | betty@qburst.com |
1531ec9143a6b9a180d6b727f62af76a9f8dd0c3 | 33d7d66b287f61e280ba18a6d24de3d7f437665e | /src/hp3par_exporter/prometheus_metrics.py | a83689554a68b37871cffe19ec742516b020e050 | [] | no_license | ycyr/hp3par-exporter | 13578a1ff61699ff91c52ddfd3ec8d3f506af8c3 | fef3dd8cdce96a0327583d03943a72cbb40c86b5 | refs/heads/master | 2020-09-15T11:53:58.833163 | 2019-08-29T13:19:38 | 2019-08-29T13:54:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from prometheus_client import Gauge
from prometheus_client import REGISTRY
registry = REGISTRY
gauge_hp3par_total_capacity_mib = Gauge('hp3par_totalCapacityMiB', 'Total system capacity in MiB', ["id", "hp3par_name"])
gauge_hp3par_allocated_capacity_mib = Gauge('hp3par_allocatedCapacityMiB',
'Total allowed capacity in MiB', ["id", "hp3par_name"])
gauge_hp3par_free_capacity_mib = Gauge('hp3par_freeCapacityMiB',
'Total free capacity in MiB', ["id", "hp3par_name"])
gauge_hp3par_failed_capacity_mib = Gauge('hp3par_failedCapacityMiB',
'Total failed capacity in MiB', ["id", "hp3par_name"])
| [
"nico.marcq@gmail.com"
] | nico.marcq@gmail.com |
9c53ee59254457ca1ebe3696b9eec8d479047f35 | f2a5680231e205dc49a083578d9bd90e4603036c | /Grokking-Coding-Interview-Patterns/1. Sliding Window/smallestWindowContainingSubString.py | 28ac6fe6c76af028ec7572eff2a802f078411d8d | [] | no_license | flogothetis/Technical-Coding-Interviews-Algorithms-LeetCode | d592451f7d297fd52395e33dc67686e9990a663c | 7c8473fce4b5b5affbfde5ed8c39fdb89cbc77d4 | refs/heads/master | 2023-01-13T15:56:07.706164 | 2020-11-18T18:54:52 | 2020-11-18T18:54:52 | 281,101,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py |
def smallestWindowContainingSubString (array, pattern):
# Put pattern in dictionary
dictionary = {}
for ch in pattern:
if ch not in dictionary:
dictionary[ch] = 0
dictionary[ch]+=1
windowStart = 0
globalMinWindow = len(array) + 1
match_ch = 0
for windowEnd in range (len(array)):
if (array[windowEnd] in dictionary ):
dictionary[array[windowEnd]]-=1
if(dictionary[array[windowEnd]] == 0):
match_ch+=1
while(match_ch == len(pattern)):
globalMinWindow = min (globalMinWindow, (windowEnd- windowStart +1))
if(array[windowStart] in dictionary):
if(dictionary[array[windowStart]] == 0):
match_ch-=1
dictionary[array[windowStart]]+=1
windowStart+=1
if (globalMinWindow <= len(array)):
return globalMinWindow
else:
return 0
def main():
print(smallestWindowContainingSubString("aabdec", "abc"))
print(smallestWindowContainingSubString("abdbca", "abc"))
print(smallestWindowContainingSubString("adcad", "abc"))
main()
| [
"flogothetis95@gmail.com"
] | flogothetis95@gmail.com |
228481d618f5cf5de30e9d3ca069a890fdb834ce | 56b1569a62c6a155ce9cf4b8059bd085848dd859 | /Python/camera_calibration/undistort_and_transform.py | a9261ec389a83b3a764bd25733b6ee15bbf2c4c2 | [] | no_license | Marius-Juston/Advanced-Autonomous-Vehicule | 1485ccc1a3dafbb875f845b2ba00cb05c6d6ca40 | 7f188428aafe0c0dfff75dd8567199c7067be17d | refs/heads/master | 2022-11-15T04:52:19.713449 | 2020-07-10T22:09:04 | 2020-07-10T22:09:04 | 266,609,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,760 | py | import pickle
import cv2
import matplotlib.pyplot as plt
import numpy as np
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load(open("wide_dist_pickle.p", "rb"))
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# Read in an image
img = cv2.imread('test_image2.png')
nx = 8 # the number of inside corners in x
ny = 6 # the number of inside corners in y
# MODIFY THIS FUNCTION TO GENERATE OUTPUT
# THAT LOOKS LIKE THE IMAGE ABOVE
def corners_unwarp(img, nx, ny, mtx, dist):
# Pass in your image into this function
# Write code to do the following steps
# 1) Undistort using mtx and dist
# 2) Convert to grayscale
# 3) Find the chessboard corners
# 4) If corners found:
# a) draw corners
# b) define 4 source points src = np.float32([[,],[,],[,],[,]])
# Note: you could pick any four of the detected corners
# as long as those four corners define a rectangle
# One especially smart way to do this would be to use four well-chosen
# corners that were automatically detected during the undistortion steps
# We recommend using the automatic detection of corners in your code
# c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])
# d) use cv2.getPerspectiveTransform() to get M, the transform matrix
# e) use cv2.warpPerspective() to warp your image to a top-down view
undistorted = cv2.undistort(img, mtx, dist, None, mtx)
gray = cv2.cvtColor(undistorted, cv2.COLOR_BGR2GRAY)
grid_size = (8, 6)
ret, corners = cv2.findChessboardCorners(gray, grid_size, None)
if ret:
cv2.drawChessboardCorners(undistorted, (nx, ny), corners, ret)
up_left = 0
up_right = grid_size[0] - 1
down_left = grid_size[0] * (grid_size[1] - 1)
down_right = down_left + grid_size[0] - 1
source_points = np.array([corners[up_left][0], corners[up_right][0], corners[down_left][0], corners[down_right][0]],
dtype=np.float32)
offset = 100
h, w = gray.shape
dist_points = np.array([[offset, offset], [w - offset, offset], [offset, h - offset], [w - offset, h - offset]], dtype=np.float32)
M = cv2.getPerspectiveTransform(source_points, dist_points)
perspective = cv2.warpPerspective(undistorted, M, (w, h), flags=cv2.INTER_LINEAR)
return perspective, M
top_down, perspective_M = corners_unwarp(img, nx, ny, mtx, dist)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(top_down)
ax2.set_title('Undistorted and Warped Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show()
| [
"Marius.juston@hotmail.fr"
] | Marius.juston@hotmail.fr |
fa15c3be548b8dc694d4faf4e585ba491143fd5c | cf7b3522b6fa8765b3f12dec06fd2868712d4e9a | /cristianoronaldoyopmailcom_282/wsgi.py | a1d3ebed57f67b9882524a591c63040c549c940e | [] | no_license | payush/cristianoronaldoyopmailcom-282 | 367d381b440343a8c75b4f8ae557fbf4c34b419e | 06e7f0fa63e863f11181bbde515574eda9844891 | refs/heads/master | 2020-03-23T13:10:53.127490 | 2018-07-19T16:16:59 | 2018-07-19T16:16:59 | 141,604,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | """
WSGI config for cristianoronaldoyopmailcom_282 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cristianoronaldoyopmailcom_282.settings")
application = get_wsgi_application()
| [
"ayushpuroheet@gmail.com"
] | ayushpuroheet@gmail.com |
7bdb7f5fdf1b70740ee3bf3595aca652f7137168 | 5ce77901781240f5c42539a471b27bbc8cbe444f | /Analysis/plot_in_degree.py | 13b954779fd0e2a1556bc72dddbb124fa3de5a0a | [] | no_license | jcate6/Bitcoin-fdac17 | a70f5d37adf8fa887602e25b83fcfe8fa970b4a7 | 0098a5f3a8b22a1dad69f1a38722836dfcc7108f | refs/heads/master | 2021-08-24T12:19:24.309279 | 2017-12-09T20:03:50 | 2017-12-09T20:03:50 | 107,714,261 | 0 | 4 | null | 2017-12-09T18:12:54 | 2017-10-20T18:44:21 | TeX | UTF-8 | Python | false | false | 520 | py | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv("In_degree_distros\\2017-11-15.csv")
data = data.as_matrix()
x = data[:,0]
y = data[:,1]
x = x[x<=100]
y = y[:x.shape[0]]
fig = plt.figure()
ax = fig.add_subplot(1,2,1)
ax.plot(x, y)
ax.set_xlabel("In-degree")
ax.set_ylabel("Frequency")
ax.set_title("In-Degree over Time")
ax.set_xscale("log")
ax.set_yscale("log")
fig.savefig("Plots\\2017-11-15_in_degree.png", bbox_inches='tight')
#plt.show()
| [
"agrawald@goldmail.etsu.edu"
] | agrawald@goldmail.etsu.edu |
f72e767999eef2f1d01d990eed01d56b07395a50 | 2f2d4571066a443121a1930b40b4045b7c284306 | /SimpleEventCounter/SimpleEventCounter/python/ConfFile_reco_cfg.py | fbdfe0916b529df061fbc439bc52a344f588d344 | [] | no_license | rvenditti/Tau3MuSearch | fb99397dfabb2d307535d80b374b86295df7de12 | 36f699e265164829c9843c787f6c9a9dfbbff696 | refs/heads/master | 2021-07-11T17:02:37.833567 | 2020-07-15T13:23:14 | 2020-07-15T13:23:14 | 177,627,204 | 1 | 4 | null | 2020-07-15T13:23:15 | 2019-03-25T16:48:07 | C | UTF-8 | Python | false | false | 1,012 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load('Configuration.StandardSequences.Services_cff')
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:/lustre/cms/store/user/rosma/SingleMuon/crab_SingleMuonRun2016B_MyZMuSkim_CMSSW_8_0_10_v4/170108_161635/0000/skims_SKIM_854.root'
# 'file:./Run2016B_SingleMuon_RAWRECO_ZMuPromptReco.root'
)
)
process.recoMuAna = cms.EDAnalyzer('RecoMuonAnalyzer',
muonsInputTag = cms.InputTag("muons"),
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("histoSingleMu_reco.root")
)
process.p = cms.Path(process.recoMuAna)
| [
"rosamaria.venditti@gmail.com"
] | rosamaria.venditti@gmail.com |
1f5525b4232b289bdbea2755bdbdd2b8336c6741 | de74a2af11962af7a8ef3dfb16fa130d35580f3a | /pylib/mailutils.py | b67e832b62cb800308e9238eee823b39470f78de | [] | no_license | vieyahn/winterpy | 4d46fa196bd7517ce4adc785ec803d9fc9aad0a0 | 5a54bcd4dd6d1c6c41b971d6049bcd94e9fb0e70 | refs/heads/master | 2021-01-14T12:57:26.515877 | 2016-08-26T10:38:33 | 2016-08-26T10:39:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,365 | py | # vim:fileencoding=utf-8
import re
import datetime
import codecs
from email import header
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
addr_re = re.compile(r'(.*?)\s+(<[^>]+>)($|,\s*)')
def decode_multiline_header(s):
ret = []
for b, e in header.decode_header(re.sub(r'\n\s+', ' ', s)):
if e:
if e.lower() == 'gb2312':
e = 'gb18030'
b = b.decode(e)
elif isinstance(b, bytes):
b = b.decode('ascii')
ret.append(b)
return ''.join(ret)
def get_datetime(m):
d = m['Date']
# Wed, 18 Jun 2014 04:09:18 +0000
t = datetime.datetime.strptime(d, '%a, %d %b %Y %H:%M:%S %z')
# convert to local time
return datetime.datetime.fromtimestamp(t.timestamp())
def decode_payload(m):
p = m.get_payload()
enc = m['Content-Transfer-Encoding']
ctype = m['Content-Type']
charset = get_charset_from_ctype(ctype) or 'utf-8'
return codecs.decode(p.encode(), enc).decode(charset)
def assemble_mail(subject, to, from_, html=None, text=None):
if html is None and text is None:
raise TypeError('no message given')
if html:
html = MIMEText(html, 'html', 'utf-8')
if text:
text = MIMEText(text, 'plain', 'utf-8')
if html and text:
msg = MIMEMultipart('alternative', _subparts = [text, html])
else:
msg = html or text
msg['Subject'] = encode_header(subject)
msg['From'] = encode_header_address(from_)
if isinstance(to, (list, tuple)):
msg['To'] = ', '.join(encode_header_address(x) for x in to)
else:
msg['To'] = encode_header_address(to)
return msg
def encode_header_address(s):
return addr_re.sub(_addr_submatch, s)
def encode_header(s):
return Header(s, 'utf-8').encode() if not eight_bit_clean(s) else s
def _addr_submatch(m):
return encode_header(m.group(1)) + ' ' + m.group(2) + m.group(3)
def eight_bit_clean(s):
return all(ord(c) < 128 for c in s)
def get_charset_from_ctype(ctype):
pos = ctype.find('charset=')
if pos > 0:
charset = ctype[pos+8:]
if charset.lower() == 'gb2312':
# Windows misleadingly uses gb2312 when it's gbk or gb18030
charset = 'gb18030'
elif charset.lower() == 'windows-31j':
# cp932's IANA name (Windows-31J), extended shift_jis
# https://en.wikipedia.org/wiki/Code_page_932
charset = 'cp932'
return charset
| [
"lilydjwg@gmail.com"
] | lilydjwg@gmail.com |
ce745e580227ed4af6b01b2899b374cfe1fa8c09 | d332507e59e0abb0315401e687638f62f9341a74 | /src/openpal/AllHeaders.h | 91b97a63fb778880bbaa13128732c50ee3ac5754 | [
"Apache-2.0"
] | permissive | txjmb/pydnp3 | 151591634181e63582ac2a9479a6286730d7a48d | fff9835d5ce9a75bd89a585942d3fbd3ad3de923 | refs/heads/master | 2022-04-21T11:09:48.319612 | 2020-04-23T20:06:46 | 2020-04-23T20:06:46 | 257,956,527 | 0 | 0 | Apache-2.0 | 2020-04-22T16:23:23 | 2020-04-22T16:23:22 | null | UTF-8 | Python | false | false | 3,008 | h | /*
* -*- coding: utf-8 -*- {{{
* vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
*
* Copyright 2018, Kisensum.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Neither Kisensum, nor any of its employees, nor any jurisdiction or
* organization that has cooperated in the development of these materials,
* makes any warranty, express or implied, or assumes any legal liability
* or responsibility for the accuracy, completeness, or usefulness or any
* information, apparatus, product, software, or process disclosed, or
* represents that its use would not infringe privately owned rights.
* Reference herein to any specific commercial product, process, or service
* by trade name, trademark, manufacturer, or otherwise does not necessarily
* constitute or imply its endorsement, recommendation, or favoring by Kisensum.
* }}}
*/
#ifndef PYDNP3_OPENPAL_ALLHEADERS_H
#define PYDNP3_OPENPAL_ALLHEADERS_H
#define PYDNP3_OPENPAL
namespace openpal {
class UInt48Type;
}
namespace opendnp3 {
typedef openpal::UInt48Type DNPTime;
}
// ---------- OPENPAL HEADERS ----------
//#include "channel/IPhysicalLayer.h" //missing "ChannelStatistics.h" file
#include "channel/IPhysicalLayerCallbacks.h"
#include "container/Array.h"
#include "container/ArrayView.h"
#include "container/Buffer.h"
#include "container/HasSize.h"
#include "container/Pair.h"
#include "container/RSlice.h"
#include "container/Settable.h"
#include "container/StaticBuffer.h"
#include "container/WSlice.h"
#include "executor/IExecutor.h"
#include "executor/IMonotonicTimeSource.h"
#include "executor/ITimer.h"
#include "executor/IUTCTimeSource.h"
#include "executor/MonotonicTimestamp.h"
#include "executor/TimeDuration.h"
#include "executor/TimerRef.h"
#include "executor/UTCTimestamp.h"
#include "logging/ILogHandler.h"
#include "logging/LogEntry.h"
#include "logging/LogFilters.h"
#include "logging/Logger.h"
#include "logging/LogLevels.h"
#include "logging/StringFormatting.h"
#include "serialization/DoubleFloat.h"
#include "serialization/FloatByteOrder.h"
#include "serialization/Format.h"
#include "serialization/Parse.h"
#include "serialization/Serialization.h"
#include "serialization/SerializationTemplatesLE.h"
#include "serialization/Serializer.h"
#include "serialization/SingleFloat.h"
#include "util/Comparisons.h"
#include "util/Finally.h"
#include "util/Limits.h"
#include "util/SequenceNum.h"
#include "util/ToHex.h"
#include "util/Uncopyable.h"
#include "Configure.h"
#endif
| [
"anhhng141@gmail.com"
] | anhhng141@gmail.com |
b34912f55df79ef6fca6df9cc4125e84a58ff2d4 | c459f4dd7b198ec8d8db8379726a5b2650be6636 | /appl/migrations/0031_projectapplication_verification_number.py | 42ff161f342dcf374df751e7b6ac04d39b7aafad | [] | no_license | jittat/admapp | 4c712182cd06e82efab6c2513fb865e5d00feae8 | 38bf299015ae423b4551f6b1206742ee176b8b77 | refs/heads/master | 2023-06-10T03:23:41.174264 | 2023-06-09T19:41:03 | 2023-06-09T19:41:03 | 101,953,724 | 10 | 4 | null | 2023-04-21T22:48:55 | 2017-08-31T03:12:04 | Python | UTF-8 | Python | false | false | 483 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-11 04:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appl', '0030_auto_20171011_0416'),
]
operations = [
migrations.AddField(
model_name='projectapplication',
name='verification_number',
field=models.CharField(blank=True, max_length=20),
),
]
| [
"jittat@gmail.com"
] | jittat@gmail.com |
ca864943b93a2b6e37b700762d4dd9f484604ac2 | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/PostenMapping/Model/Post050407019.py | 4569171ae4b997901f889b207ead5e7a75b37585 | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 2,916 | py | # coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post050407019(StandaardPost):
def __init__(self):
super().__init__(
nummer='0504.07019',
beschrijving='Fundering van zandcement volgens 5-4.7, dikte 19 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotation='laagRol',
defaultWaarde='fundering',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.07019')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw.type',
dotnotation='type',
defaultWaarde='zandcement',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.07019')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotation='dikte',
defaultWaarde='19',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.07019')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotation='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.07019')])
| [
"david.vlaminck@mow.vlaanderen.be"
] | david.vlaminck@mow.vlaanderen.be |
840e8e3b7c7e142ac1748f12224d4f410b3f3df9 | 3c56b08398d4478328ecaf5e70599e1b7e23d70b | /ml_infra_template.py | a76a21aebbc87e8ad51a139be4f6605dc76f245c | [] | no_license | sidneyriffic/ml_infra_template | 3e33f4a8ddabd6ac1e07f747fba902e0df27b703 | d2681964165be04f1a30b5a4436755e44724090f | refs/heads/master | 2022-12-27T10:07:55.735571 | 2020-10-07T20:17:28 | 2020-10-07T20:17:28 | 302,149,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | #!/usr/bin/env python3
"""Command line entry point for data/model interactions"""
import importlib
import argparse
import preprocess_ex as ppex
print(ppex)
description = 'Define model and data operations'
parser = argparse.ArgumentParser(description)
parser.add_argument('-m', '--model', dest='make_model_path',
help='Model build path')
parser.add_argument('-p', '--preprocess', dest='pre_path',
help='Preprocess folder path')
parser.add_argument('-s', '--serialized', dest='serial_model',
help='Use a saved serialized model')
parser.add_argument('-t', '--train', dest='train_path',
help='Train a model')
args = parser.parse_args()
print(args)
pre_path = args.pre_path[1:-1]
preprocess = importlib.import_module(pre_path + '.preprocess')
print(preprocess)
preprocess.preprocess(pre_path + '/')
| [
"sidneyriffic@gmail.com"
] | sidneyriffic@gmail.com |
9fae75716351829fb089c9d7e5195ade93a0258a | 381fd0a6f1f716f68bb2d5ef6340cee59e770065 | /advent_of_code/2020/day7.py | 048b6df7150076ade05fd62922a0f8c2756193b5 | [
"Unlicense"
] | permissive | netotz/codecamp | 4ec5ca4f8cf0adcdcbf99cd5533304ddd3d060d2 | 208fd1c929a85397ecdaa0c6cc74b8c6d99d76c7 | refs/heads/master | 2023-02-09T08:23:32.778882 | 2023-01-25T01:15:45 | 2023-01-25T01:15:45 | 253,117,424 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | import re
def parse_input(rawrules):
return {
fl[0]: {} if fl[1] == ''
else {
s[2:]: int(s[0])
for s in fl[1].split(' , ')
}
for line in rawrules.splitlines()
if (fl := [
s.strip()
for s in
re.sub(
r'(no other)*|bag(s*)|[.]', '', line
).split('contain')
])
}
with open('inputs/input7.txt') as file:
input7 = parse_input(file.read())
MYBAG = 'shiny gold'
def get_containers(rules):
containers = set()
def is_container(bag):
subbags = set(b for b in rules[bag])
if containers & subbags or MYBAG in subbags:
containers.add(bag)
return True
for b in subbags:
if is_container(b):
containers.add(b)
return True
for bag in rules:
if bag in containers:
continue
if is_container(bag):
containers.add(bag)
return containers
def count_required(rules):
def count_subbags(bag):
subbags = rules[bag].items()
if not subbags:
return 0
local_count = 0
for b, c in subbags:
accumulated = count_subbags(b)
if accumulated == 0:
local_count += c
else:
local_count += accumulated * c
return local_count + 1
total_bags = 0
for bag, count in rules[MYBAG].items():
total_bags += count_subbags(bag) * count
return total_bags
answer1 = len(get_containers(input7))
answer2 = count_required(input7)
def test():
raw = '''light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.'''
sample = parse_input(raw)
assert len(get_containers(sample)) == 4
assert count_required(sample) == 32
raw = '''shiny gold bags contain 2 dark red bags.
dark red bags contain 2 dark orange bags.
dark orange bags contain 2 dark yellow bags.
dark yellow bags contain 2 dark green bags.
dark green bags contain 2 dark blue bags.
dark blue bags contain 2 dark violet bags.
dark violet bags contain no other bags.'''
sample = parse_input(raw)
assert count_required(sample) == 126
| [
"neto.otz@hotmail.com"
] | neto.otz@hotmail.com |
ff753b9ca5453fbb8d40b0c432ca3478c3c2c751 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/DS8200v2-TC-MIB.py | 9508e7b6cfa965606170f2e85cae511187b5a04d | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 2,044 | py | #
# PySNMP MIB module DS8200v2-TC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DS8200v2-TC-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:54:27 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, Counter32, Bits, Unsigned32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, MibIdentifier, Gauge32, IpAddress, ObjectIdentity, ModuleIdentity, enterprises, TimeTicks, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Counter32", "Bits", "Unsigned32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "MibIdentifier", "Gauge32", "IpAddress", "ObjectIdentity", "ModuleIdentity", "enterprises", "TimeTicks", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
verilink = ModuleIdentity((1, 3, 6, 1, 4, 1, 321))
if mibBuilder.loadTexts: verilink.setLastUpdated('0011150000Z')
if mibBuilder.loadTexts: verilink.setOrganization('Verilink Corporation')
if mibBuilder.loadTexts: verilink.setContactInfo('Bob Ray bray@verilink.com 1-256-774-2380')
if mibBuilder.loadTexts: verilink.setDescription('DS8200v2 TC MIB.')
hbu = MibIdentifier((1, 3, 6, 1, 4, 1, 321, 100))
mibBuilder.exportSymbols("DS8200v2-TC-MIB", hbu=hbu, PYSNMP_MODULE_ID=verilink, verilink=verilink)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
d7fbc3caf38b5d1e8c88ba18cd5ba590516ea044 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_176/ch62_2019_03_29_14_32_02_630978.py | c677efb4ff50461d54b43e6c822ed6442ba27cd1 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | def filtra_positivos(n):
positivos=[]
i=0
while i<len(n):
if n[i]>0:
positivos.append(n[i])
i+=1
return positivos | [
"you@example.com"
] | you@example.com |
ec80c963f307638e5a6e9a96460f5ff51ef02556 | 81b438781ecc307225fcc6141238f8a8ef03bd64 | /Project/src/Modules/House/Hvac/_test/test_hvac.py | 8953a34e781e131dba4abbe5483197e7479b8deb | [] | permissive | DBrianKimmel/PyHouse | b7d61be4dc6ce9e3332228a6c633e81fdfd8a908 | a100fc67761a22ae47ed6f21f3c9464e2de5d54f | refs/heads/develop | 2021-01-23T09:30:08.722975 | 2020-02-29T16:30:08 | 2020-02-29T16:30:08 | 4,125,178 | 3 | 1 | MIT | 2020-07-19T22:07:18 | 2012-04-24T13:53:33 | Python | UTF-8 | Python | false | false | 2,518 | py | """
@name: PyHouse/Project/src/Modules/Housing/Hvac/_test/test_hvac.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on Jul 12, 2015
@Summary:
Passed all 5 tests - DBK - 2019-06-04
"""
__updated__ = '2019-10-06'
# Import system type stuff
import xml.etree.ElementTree as ET
from twisted.trial import unittest
# Import PyMh files and modules.
from _test.testing_mixin import SetupPyHouseObj
from Modules.Core.data_objects import ThermostatData
from Modules.Housing.Hvac.hvac import Api as hvacApi
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
class SetupMixin(object):
"""
"""
def setUp(self, p_root):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)
self.m_xml = SetupPyHouseObj().BuildXml(p_root)
self.m_api = hvacApi(self.m_pyhouse_obj)
self.m_thermostat_obj = ThermostatData()
class A0(unittest.TestCase):
def setUp(self):
pass
def test_00_Print(self):
print('Id: test_hvac')
class A1_XML(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_Tags(self):
""" Test to be sure the compound object was built correctly - Rooms is an empty dict.
"""
# print(PrettyFormatAny.form(self.m_xml, 'A1-01-A - Tags'))
self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)
self.assertEqual(self.m_xml.house_div.tag, 'HouseDivision')
self.assertEqual(self.m_xml.hvac_sect.tag, 'HvacSection')
self.assertEqual(self.m_xml.thermostat_sect.tag, 'ThermostatSection')
self.assertEqual(self.m_xml.thermostat.tag, 'Thermostat')
def test_02_Load(self):
"""
"""
l_obj = self.m_api.LoadXml(self.m_pyhouse_obj)
# print(PrettyFormatAny.form(l_obj, 'A1-02-A - Thermostats', 105))
self.assertEqual(len(l_obj.Thermostats), 2)
class A2_EmptyXML(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_EMPTY))
def test_01_BuildObjects(self):
""" Test to be sure the compound object was built correctly - Rooms is an empty dict.
"""
self.assertEqual(self.m_pyhouse_obj.House.Rooms, {})
def test_02_Load(self):
"""
"""
l_obj = self.m_api.LoadXml(self.m_pyhouse_obj)
self.assertEqual(len(l_obj.Thermostats), 0)
# ## END DBK
| [
"d.briankimmel@gmail.com"
] | d.briankimmel@gmail.com |
037b3c1da16ce4174e61a9c521b97380a06d93ef | 34c57c605eba40b67e2de338c1e101a1c4cb6b72 | /nn_iris.py | c2a412be2309b194453bb81384e3c2e5fc81088d | [] | no_license | youngsoul/pyimagesearch-python-machine-learning | 9af38980e9e408855f4457de82fc8ffd1fd00837 | 1efeb3035efb24348489d36f8db551a395afd144 | refs/heads/master | 2023-07-19T18:27:00.158079 | 2021-01-06T01:52:32 | 2021-01-06T01:52:32 | 166,071,588 | 0 | 2 | null | 2023-07-06T22:29:48 | 2019-01-16T16:20:48 | Jupyter Notebook | UTF-8 | Python | false | false | 1,517 | py | # USAGE
# python nn_iris.py
# import the necessary packages
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
from sklearn.datasets import load_iris
# load the Iris dataset and perform a training and testing split,
# using 75% of the data for training and 25% for evaluation
print("[INFO] loading data...")
dataset = load_iris()
(trainX, testX, trainY, testY) = train_test_split(dataset.data,
dataset.target, test_size=0.25, random_state=32)
# encode the labels as 1-hot vectors
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)
# define the 4-3-3-3 architecture using Keras
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation="sigmoid"))
model.add(Dense(3, activation="sigmoid"))
model.add(Dense(3, activation="softmax"))
# train the model using SGD
print("[INFO] training network...")
opt = SGD(lr=0.1, momentum=0.9, decay=0.1 / 250)
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
H = model.fit(trainX, trainY, validation_data=(testX, testY),
epochs=250, batch_size=16)
# evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=16)
print(classification_report(testY.argmax(axis=1),
predictions.argmax(axis=1), target_names=dataset.target_names)) | [
"theyoungsoul@gmail.com"
] | theyoungsoul@gmail.com |
87ebecbb428b65919bd35b4d33ef229bda00f646 | 7c15f211adc9e9eb9f66ccdd570c9f38dff7ea8d | /packages/autorest.python/test/vanilla/legacy/Expected/AcceptanceTests/SecurityKeySwaggerCredentialFlag/securitykeyswaggercredentialflag/aio/__init__.py | 9ad4814fd5f1de73db95b78dbb4a74561e35acb3 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/autorest.python | cc4bfbf91ae11535731cad37cedd6b733edf1ebd | a00d7aaa3753ef05cb5a0d38c664a90869478d44 | refs/heads/main | 2023-09-03T06:58:44.246200 | 2023-08-31T20:11:51 | 2023-08-31T20:11:51 | 100,315,955 | 47 | 40 | MIT | 2023-09-14T21:00:21 | 2017-08-14T22:58:33 | Python | UTF-8 | Python | false | false | 885 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._security_key_swagger_credential_flag import SecurityKeySwaggerCredentialFlag
try:
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"SecurityKeySwaggerCredentialFlag",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
f7cddd4b937b32d86f7d5852110289a46243c052 | aa06473d26ee952278eada3713c92114f317be73 | /aoc2020/11/main.py | 17c4b2dda92cccdcb61a22eb73c3b4938f213ce3 | [] | no_license | allefant/aoc | 1888b3434379dbee5faf982fcdcf7e4a61b2ca3c | 861421794ac0b57c037a593776fb0dcb9458f1aa | refs/heads/master | 2023-01-28T12:18:36.992292 | 2020-12-11T01:13:24 | 2020-12-11T22:08:01 | 319,518,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,090 | py | #!/usr/bin/env python3
import sys
import re
def step(m, w, h):
m2 = []
for y in range(h):
row = m[y]
row2 = []
for x in range(w):
occupied = 0
if row[x] != "B":
for j in range(-1, 2):
for i in range(-1, 2):
if m[y + j][x + i] == "#":
occupied += 1
if row[x] == "L":
if occupied == 0:
row2.append("#")
else:
row2.append(row[x])
elif row[x] == "#":
if occupied >= 5:
row2.append("L")
else:
row2.append(row[x])
else:
row2.append(row[x])
m2.append(row2)
return m2
def count(m, w, h, what):
c = 0
for y in range(h):
row = m[y]
c += row.count(what)
return c
def part1(input):
m0 = []
for row_ in open(input):
row = row_.strip()
m0.append(row)
h = len(m0)
w = len(m0[0])
print(w, h)
m = []
m.append("B" * (w + 2))
for i in range(h):
m.append("B" + m0[i] + "B")
m.append("B" * (w + 2))
w += 2
h += 2
while True:
m2 = step(m, w, h)
if m == m2:
print(count(m, w, h, "#"))
break
m = m2
def step2(m, w, h):
m2 = []
for y in range(h):
row = m[y]
row2 = []
for x in range(w):
occupied = 0
if row[x] != "B":
for j in range(-1, 2):
for i in range(-1, 2):
if i == 0 and j == 0: continue
dx = 0
dy = 0
while True:
dx += i
dy += j
if m[y + dy][x + dx] == ".":
continue
if m[y + dy][x + dx] == "#":
occupied += 1
break
if row[x] == "L":
if occupied == 0:
row2.append("#")
else:
row2.append(row[x])
elif row[x] == "#":
if occupied >= 5:
row2.append("L")
else:
row2.append(row[x])
else:
row2.append(row[x])
m2.append(row2)
return m2
def part2(input):
m0 = []
for row_ in open(input):
row = row_.strip()
m0.append(row)
h = len(m0)
w = len(m0[0])
print(w, h)
m = []
m.append("B" * (w + 2))
for i in range(h):
m.append("B" + m0[i] + "B")
m.append("B" * (w + 2))
w += 2
h += 2
while True:
m2 = step2(m, w, h)
if m == m2:
print(count(m, w, h, "#"))
break
m = m2
if __name__ == "__main__":
if sys.argv[1] == "1": part1(sys.argv[2])
if sys.argv[1] == "2": part2(sys.argv[2])
| [
"elias@users.sourceforge.net"
] | elias@users.sourceforge.net |
c3b05651a69dbc6cf08cd1913dbb82a5cc28b69f | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /repeatedSubstringPattern.py | b817bc879b45fdd26baf685fb791f702126c8410 | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py |
class Solution:
def repeatedSubstringPattern(self, s: str) -> bool:
return (s+s).find(s,1)!=len(s)
s='aba'
sl=Solution()
print(sl.repeatedSubstringPattern(s))
| [
"zzz136454872@163.com"
] | zzz136454872@163.com |
733761a83dcb9ee584718dff751838456c8c948a | 3cf41d1510239ce6987b878aabae1bcc8013fa33 | /account/migrations/0006_auto_20180815_0913.py | 6bb424fd8ef1354f07822ded3d51bf1a78465f34 | [] | no_license | djangogirlsbh/food | cc28bd23aff5593260aeab83b595cafc6ddf7d63 | bd809df79a373b33d12b489b3a5e468dc8cde4d5 | refs/heads/master | 2020-03-29T20:42:29.959148 | 2018-08-29T12:49:47 | 2018-08-29T12:49:47 | 150,326,438 | 0 | 1 | null | 2018-09-25T20:38:45 | 2018-09-25T20:38:44 | null | UTF-8 | Python | false | false | 3,541 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-15 09:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('account', '0005_auto_20180421_1237'),
]
operations = [
migrations.CreateModel(
name='BusinessUnit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='business unit name', max_length=100, unique=True)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='account.BusinessUnit')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='employee name', max_length=100, unique=True)),
('email', models.EmailField(max_length=254)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='employee', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Employment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('employment_date', models.DateField(null=True)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('businessunit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.BusinessUnit')),
('employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.Employee')),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reportees', to='account.Employment')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Position',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='employment',
name='position',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.Position'),
),
]
| [
"palashpatidar51@gmail.com"
] | palashpatidar51@gmail.com |
30792d92890cd2cc84ffe53925265f4a6dd48b76 | 1adf769cf9234f9b6c619f808d2723b99451d679 | /rusentrel/classic_cv/common.py | af55d7f1421214a903c7b1ec094ed9a00b6797a6 | [
"MIT"
] | permissive | DAVMARROS/attitude-extraction-with-attention-and-ds | 4e85fa154ead0cd9499aaedf5d752ac565f37b92 | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | refs/heads/master | 2023-02-09T04:56:24.090380 | 2020-12-30T10:09:34 | 2020-12-30T10:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | from rusentrel.classic.common import classic_common_callback_modification_func
CV_COUNT = 3
CV_NAME_PREFIX = u'cv_'
def classic_cv_common_callback_modification_func(callback):
"""
This function describes configuration setup for all model callbacks.
"""
classic_common_callback_modification_func(callback)
callback.set_cancellation_acc_bound(0.981)
callback.set_cancellation_f1_train_bound(0.85)
callback.set_key_save_hidden_parameters(False)
callback.set_key_stop_training_by_cost(True)
| [
"kolyarus@yandex.ru"
] | kolyarus@yandex.ru |
043fc7966bba39396a2d0d6a71cca52101550d54 | 17f3568e0be991636501970fb76c4c53a71ab38d | /opsgenie_sdk/api/alert/list_saved_searches_response_all_of.py | 54af2fc57517f94a7a9510294093791705444e21 | [
"Apache-2.0"
] | permissive | jkinred/opsgenie-python-sdk | 7b79ed8c7518de117887e6b76a3fbb5800b94020 | 69bbd671d2257c6c3ab2f3f113cb62bd1a941c02 | refs/heads/master | 2020-07-10T00:24:19.583708 | 2019-08-24T06:35:31 | 2019-08-24T06:35:31 | 204,118,572 | 0 | 0 | NOASSERTION | 2019-08-24T06:29:25 | 2019-08-24T06:29:24 | null | UTF-8 | Python | false | false | 3,077 | py | # coding: utf-8
"""
Python SDK for Opsgenie REST API
Python SDK for Opsgenie REST API # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: support@opsgenie.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ListSavedSearchesResponseAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'list[SavedSearchMeta]'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""ListSavedSearchesResponseAllOf - a model defined in OpenAPI""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this ListSavedSearchesResponseAllOf. # noqa: E501
:return: The data of this ListSavedSearchesResponseAllOf. # noqa: E501
:rtype: list[SavedSearchMeta]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ListSavedSearchesResponseAllOf.
:param data: The data of this ListSavedSearchesResponseAllOf. # noqa: E501
:type: list[SavedSearchMeta]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListSavedSearchesResponseAllOf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"zafer@opsgenie.com"
] | zafer@opsgenie.com |
b512df2f51179c5b81503c7fb4d3ef456da692d3 | 7365f2410c139c5f4bf5ba0777ed0321322c92d9 | /python/二叉树中和为某一值的路径.py | 5c2c04737f7db1ddc05645e8c66ed1f70822e455 | [] | no_license | EvanJamesMG/Point-to-the-offer | 956a17a3c2a0d99a11428765f6af9f4ebbbe5fc3 | cc9b6b7572cf819f0e53a800899e1ebd9fd6cf9d | refs/heads/master | 2021-01-10T17:11:06.125860 | 2016-04-21T03:47:15 | 2016-04-21T03:47:15 | 52,489,364 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,845 | py | # coding=utf-8
__author__ = 'EvanJames'
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
'''
题目描述
输入一颗二叉树和一个整数,打印出二叉树中结点值的和为输入整数的所有路径。
路径定义为从树的根结点开始往下一直到叶结点所经过的结点形成一条路径。
解题思路:DFS
注意坑:编程时候,valuelist.append(1)是对自己赋值,此时tem =valulist.append(1) 是不对的,
要想重新生成新的数组,应写为 valuelist+[1]
'''
class Solution:
# 返回二维列表,内部每个列表表示找到的路径
def FindPath(self, root, expectNumber):
if root == None:
return []
self.res = []
self.DFS(root, expectNumber - root.val, [root.val])
return self.res
def DFS(self, root, expectNumber, valuelist):
if root.left == None and root.right == None and expectNumber == 0:
self.res.append(valuelist)
if root.left:
self.DFS(root.left, expectNumber - root.left.val, valuelist+[root.left.val])
if root.right:
self.DFS(root.right, expectNumber - root.right.val, valuelist+[root.right.val])
if __name__ == '__main__':
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
res = Solution().FindPath(root, 3)
print(res)
'''
The fucking Java code!
import java.util.ArrayList;
public class test {
public static void main(String[] args){
ArrayList<ArrayList<Integer>> res= new ArrayList<ArrayList<Integer>>();
TreeNode root = new TreeNode(1);
root.left = new TreeNode(2);
root.right = new TreeNode(3);
res= FindPath(root,4);
System.out.println(res);
}
public static ArrayList<ArrayList<Integer>> FindPath(TreeNode root,int target) {
ArrayList<ArrayList<Integer>> res= new ArrayList<ArrayList<Integer>>();
if(root == null)
return res;
ArrayList<Integer> valuelist = new ArrayList<Integer>() ;
valuelist.add(root.val);
DFS(root,target-root.val,valuelist,res);
return res;
}
private static void DFS(TreeNode root, int sum, ArrayList<Integer> valuelist, ArrayList<ArrayList<Integer>> res) {
// TODO Auto-generated method stub
if(root.left==null && root.right ==null && sum==0){
res.add(valuelist);
}
if(root.left!=null){
ArrayList<Integer> temlist = new ArrayList<Integer>(valuelist);
temlist.add(root.left.val);
DFS(root.left,sum-root.left.val,temlist,res);
}
if(root.right!=null){
ArrayList<Integer> temlist = new ArrayList<Integer>(valuelist);
temlist.add(root.right.val);
DFS(root.right,sum-root.right.val,temlist,res);
}
}
}
'''
| [
"Evan123mg@gmail.com"
] | Evan123mg@gmail.com |
75c97fc7b38cb2373276f35754ce6cf90e46de18 | 91863555b2bf1044a420c62a2f7e696724d5ca80 | /models/pointnet_cls.py | 12b466d2155e566df8fea15d3f7db5abf765ebc0 | [] | no_license | SIlvaMFPedro/pointcloud-networks | dd90f6767d7e5dcbffe6f719118450929ca06d91 | 1369d4f74e4f6e964465d6e39157031bd83aac97 | refs/heads/master | 2020-12-06T22:48:01.012743 | 2020-02-04T17:47:24 | 2020-02-04T17:47:24 | 232,573,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,113 | py | # ------------------------
# IMPORTS
# ------------------------
# Import the necessary packages
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '..\\utils'))
import utils.tf_util
from models.transform_nets import input_transform_net, feature_transform_net
# ------------------------
# FUNCTIONS
# ------------------------
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=batch_size)
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
"""
Classification PointNet,
input is BxNx3,
output Bx40
"""
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
input_image = tf.expand_dims(point_cloud_transformed, -1)
net = utils.tf_util.conv2d(input_image, 64, [1, 3], padding='VALID', stride=[1, 1], bn=True,
is_training=is_training, scope='conv1', bn_decay=bn_decay)
net = utils.tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1], bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
net_transformed = tf.expand_dims(net_transformed, [2])
net = utils.tf_util.conv2d(net_transformed, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True,
is_training=is_training, scope='conv3', bn_decay=bn_decay)
net = utils.tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = utils.tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# Symmetric function: max pooling
net = utils.tf_util.max_pool2d(net, [num_point, 1], padding='VALID', scope='maxpool')
net = tf.reshape(net, [batch_size, -1])
net = utils.tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = utils.tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
net = utils.tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = utils.tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp2')
net = utils.tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
"""
pred: B*NUM_CLASSES,
label: B,
"""
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
# Enforce the transformation as orthogonal matrix
transform = end_points['transform'] # BxKxK
K = transform.get_shape()[1].value
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0, 2, 1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf.summary.scalar('mat loss', mat_diff_loss)
return classify_loss + mat_diff_loss * reg_weight
# ------------------------
# MAIN FUNCTION
# ------------------------
if __name__ == '__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32, 1024, 3))
outputs = get_model(inputs, tf.constant(True))
print(outputs)
| [
"silva.mfpedro@gmail.com"
] | silva.mfpedro@gmail.com |
037740a786e00e1013b215f7045a3547f1c59296 | 7d90d2ce27c6ee0af74391b09909edbd45fdc2f0 | /renix_py_api/api_gen/IsisPortRateConfig_Autogen.py | 1609c4903b9fb7ae49ff15d9a4f7e2266edacdc0 | [] | no_license | gaoxingyu-hub/54testframework-master-e284 | d7ea0d4a715b65c8652430e963a86b9522a7237a | 57dd2197e7d91b8ad8fb2bd0e3503f10afa08544 | refs/heads/master | 2023-04-30T05:50:41.542402 | 2021-05-28T09:19:37 | 2021-05-28T09:19:37 | 309,922,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | """
Auto-generated File
Create Time: 2019-12-27 02:33:27
"""
from .ROMEnum_Autogen import *
from renix_py_api.renix_common_api import *
from renix_py_api import rom_manager
from .ROMObject_Autogen import ROMObject
@rom_manager.rom
class IsisPortRateConfig(ROMObject):
def __init__(self, UpdateRoutesTransmitRate=None, **kwargs):
self._UpdateRoutesTransmitRate = UpdateRoutesTransmitRate # IS-IS Tx Hello Rate(messages/second)
properties = kwargs.copy()
if UpdateRoutesTransmitRate is not None:
properties['UpdateRoutesTransmitRate'] = UpdateRoutesTransmitRate
# call base class function, and it will send message to renix server to create a class.
super(IsisPortRateConfig, self).__init__(**properties)
def delete(self):
"""
call to delete itself
"""
return self._finalize()
def edit(self, UpdateRoutesTransmitRate=None, **kwargs):
properties = kwargs.copy()
if UpdateRoutesTransmitRate is not None:
self._UpdateRoutesTransmitRate = UpdateRoutesTransmitRate
properties['UpdateRoutesTransmitRate'] = UpdateRoutesTransmitRate
super(IsisPortRateConfig, self).edit(**properties)
@property
def UpdateRoutesTransmitRate(self):
"""
get the value of property _UpdateRoutesTransmitRate
"""
if self.force_auto_sync:
self.get('UpdateRoutesTransmitRate')
return self._UpdateRoutesTransmitRate
@UpdateRoutesTransmitRate.setter
def UpdateRoutesTransmitRate(self, value):
self._UpdateRoutesTransmitRate = value
self.edit(UpdateRoutesTransmitRate=value)
def _set_updateroutestransmitrate_with_str(self, value):
try:
self._UpdateRoutesTransmitRate = int(value)
except ValueError:
self._UpdateRoutesTransmitRate = hex(int(value, 16))
| [
"gaoxingyu@example.com"
] | gaoxingyu@example.com |
25ee11f1d07c9473ad9c0c5898e66388e55da898 | 7b034caedfa49de09c3883401afa001ce234dea7 | /utils/queue_utils/output_job_queue.py | 411660283561f473f5ec4913ea5a30c2f548de20 | [
"MIT"
] | permissive | Brown-University-Library/usep_gh_handler_app | 36cbc81d3233838ba0d511b27a050884d3b61baa | b271b8f26a3c27910445f1c0b55f9fbf6648865a | refs/heads/main | 2023-05-28T13:44:46.124344 | 2021-11-08T16:06:13 | 2021-11-08T16:06:13 | 19,741,222 | 0 | 2 | MIT | 2023-05-22T23:19:37 | 2014-05-13T14:00:38 | Python | UTF-8 | Python | false | false | 757 | py | # -*- coding: utf-8 -*-
import os, pprint
import redis, rq
queue_name = 'usep'
q = rq.Queue( queue_name, connection=redis.Redis() )
print( '- number of jobs in queue `%s`: %s' % (queue_name, len(q.jobs)) )
for job in q.jobs:
job_d = {
'_args': job._args,
'_kwargs': job._kwargs,
'_func_name': job._func_name,
'description': job.description,
'dt_created': job.created_at,
'dt_enqueued': job.enqueued_at,
'dt_ended': job.ended_at,
'origin': job.origin,
'id': job._id,
'traceback': job.exc_info,
'meta': job.meta,
'_result': job._result,
'_status': job._status,
}
print( '- job info...' )
pprint.pprint( job_d )
print( '---' )
| [
"birkin.diana@gmail.com"
] | birkin.diana@gmail.com |
b0960bc21f347c5f635065af13576341dc352f87 | 499a78ab760d0cd052acb3a3abd87e22b7075fc4 | /XOR/3_Favorite_byte.py | 036d42fc93345cdf565afb94dba0a751389db805 | [] | no_license | keithrozario/crypto_challenges | 5a4588db9c25ea25e86ef18d60ee144d40dec8b9 | 44083440d7d9713830a2d0854d1763eb82eb78cc | refs/heads/master | 2023-07-17T19:23:58.174041 | 2021-08-29T03:58:14 | 2021-08-29T03:58:14 | 391,328,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | """
single_byte_xor courtesy of
https://www.codementor.io/@arpitbhayani/deciphering-single-byte-xor-ciphertext-17mtwlzh30
"""
def single_byte_xor(text: bytes, key: int) -> bytes:
"""Given a plain text `text` as bytes and an encryption key `key` as a byte
in range [0, 256) the function encrypts the text by performing
XOR of all the bytes and the `key` and returns the resultant.
"""
return bytes([b ^ key for b in text])
data = "73626960647f6b206821204f21254f7d694f7624662065622127234f726927756d"
print(bytes.fromhex(data))
for x in range(256):
decrypted_text = single_byte_xor(
text=bytes.fromhex(data),
key=x
)
try:
if decrypted_text.decode('utf-8')[:6] == "crypto":
print(f"key:{x}, decrypted: {decrypted_text}")
except UnicodeDecodeError:
pass
| [
"keithjosephrozario@gmail.com"
] | keithjosephrozario@gmail.com |
ad47c84e3d814504a9b83adc133a2ed4f63c124d | c676bf5e77ba43639faa6f17646245f9d55d8687 | /tests/st/ops/gpu/test_reciprocal_op.py | fb422a94cfeced98b8012bf36b72af6c9cc3b0ce | [
"Apache-2.0",
"BSD-3-Clause-Open-MPI",
"MPL-2.0-no-copyleft-exception",
"LGPL-2.1-only",
"BSD-3-Clause",
"MPL-2.0",
"MPL-1.0",
"Libpng",
"AGPL-3.0-only",
"MPL-1.1",
"LicenseRef-scancode-proprietary-license",
"MIT",
"IJG",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"GPL-2.0-only",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | zhengnengjin/mindspore | 1e2644e311f54a8bd17010180198a46499e9c88f | 544b859bb5f46611882749088b44c5aebae0fba1 | refs/heads/master | 2022-05-13T05:34:21.658335 | 2020-04-28T06:39:53 | 2020-04-28T06:39:53 | 259,522,589 | 2 | 0 | Apache-2.0 | 2020-04-28T03:35:33 | 2020-04-28T03:35:33 | null | UTF-8 | Python | false | false | 2,302 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
import numpy as np
import mindspore.context as context
class NetReciprocal(nn.Cell):
def __init__(self):
super(NetReciprocal, self).__init__()
self.reciprocal = P.Reciprocal()
def construct(self, x):
return self.reciprocal(x)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_Reciprocal():
x0_np = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.float32)
x1_np = np.random.uniform(-2, 2, 1).astype(np.float32)
x0 = Tensor(x0_np)
x1 = Tensor(x1_np)
expect0 = np.reciprocal(x0_np)
error0 = np.ones(shape=expect0.shape) * 1.0e-5
expect1 = np.reciprocal(x1_np)
error1 = np.ones(shape=expect1.shape) * 1.0e-5
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
reciprocal = NetReciprocal()
output0 = reciprocal(x0)
diff0 = output0.asnumpy() - expect0
assert np.all(diff0 < error0)
assert (output0.shape() == expect0.shape)
output1 = reciprocal(x1)
diff1 = output1.asnumpy() - expect1
assert np.all(diff1 < error1)
assert (output1.shape() == expect1.shape)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
reciprocal = NetReciprocal()
output0 = reciprocal(x0)
diff0 = output0.asnumpy() - expect0
assert np.all(diff0 < error0)
assert (output0.shape() == expect0.shape)
output1 = reciprocal(x1)
diff1 = output1.asnumpy() - expect1
assert np.all(diff1 < error1)
assert (output1.shape() == expect1.shape)
| [
"leon.wanghui@huawei.com"
] | leon.wanghui@huawei.com |
5b266c3c679033a85b2f0ff641d253f9095b0cad | 7e5d7f35551e72cc98f3b8c10ec0dc4cfb032d95 | /python/tests/test_decay.py | 7affd6b2bd115c537b8e9602edd1a3da020fc616 | [
"Apache-2.0"
] | permissive | ijindal/baseline | 3fdf7bbff483f8b5093f90f3c8b2eb0059cd67b2 | 2261abfb7e770cc6f3d63a7f6e0015238d0e11f8 | refs/heads/master | 2020-03-19T15:16:05.757374 | 2019-06-28T18:50:09 | 2019-06-28T18:50:09 | 136,663,537 | 0 | 3 | Apache-2.0 | 2019-07-10T13:16:02 | 2018-06-08T20:32:29 | Python | UTF-8 | Python | false | false | 5,407 | py | import six
import pytest
import numpy as np
from mock import patch, MagicMock
import baseline
from baseline.train import (
create_lr_scheduler,
CosineDecayScheduler,
CyclicLRScheduler,
ExponentialDecayScheduler,
WarmupLinearScheduler,
ConstantScheduler,
PiecewiseDecayScheduler,
ZarembaDecayScheduler,
InverseTimeDecayScheduler,
CompositeLRScheduler,
)
@pytest.fixture
def piecewise():
min_ = np.random.randint(1, 5)
max_ = np.random.randint(min_ + 2, min_ + 7)
bounds = [min_, max_]
vals = np.random.uniform(size=len(bounds) + 1)
return bounds, vals
def test_zaremba_with_nones():
eta = np.random.rand()
zd = ZarembaDecayScheduler(lr=eta)
for step in np.random.randint(0, 1000000, size=100):
assert zd(step) == eta
def test_piecewise_start(piecewise):
b, v = piecewise
p = PiecewiseDecayScheduler(b, v)
lr = p(0)
assert lr == v[0]
def test_piecewise_mid(piecewise):
b, v = piecewise
p = PiecewiseDecayScheduler(b, v)
step = np.random.randint(np.min(b) + 1, np.max(b))
lr = p(step)
assert lr == v[1]
def test_piecewise_lsat(piecewise):
b, v = piecewise
p = PiecewiseDecayScheduler(b, v)
step = np.random.randint(np.max(b) + 3, np.max(b) + 100)
lr = p(step)
assert lr == v[-1]
def test_staircase_decay_flat():
steps = np.random.randint(900, 1001)
sd = ExponentialDecayScheduler(steps, np.random.rand(), lr=np.random.rand(), staircase=True)
stair_one_one = sd(np.random.randint(steps - 100, steps))
stair_one_two = sd(np.random.randint(steps - 100, steps))
stair_two = sd(np.random.randint(steps + 1, steps + 10))
assert stair_one_one == stair_one_two
assert stair_one_two != stair_two
def test_staircase_value():
sd = ExponentialDecayScheduler(1000, 0.9, lr=1.0, staircase=True)
gold = 1.0
test = sd(100)
np.testing.assert_allclose(test, gold)
gold = 0.9
test = sd(1001)
np.testing.assert_allclose(test, gold)
def test_exp_values():
sd = ExponentialDecayScheduler(1000, 0.9, lr=1.0)
gold = 0.9895192582062144
test = sd(100)
np.testing.assert_allclose(test, gold)
gold = 0.8999051805311098
test = sd(1001)
np.testing.assert_allclose(test, gold)
def test_warmup_peaks():
steps = np.random.randint(100, 1000)
lr = np.random.rand()
wls = WarmupLinearScheduler(steps, lr=lr)
peak = wls(steps)
assert peak == lr
past = wls(steps + np.random.randint(100, 10000))
assert past == lr
def test_warmup_increases():
steps = np.random.randint(100, 1000)
lr = np.random.rand()
wls = WarmupLinearScheduler(steps, lr=lr)
lrs = [wls(s) for s in range(steps)]
last = -1
for lr in lrs:
assert lr > last
last = lr
def test_cyclic_lr():
bounds = 1000
min_eta = 1e-5
max_eta = 1e-2
clr = CyclicLRScheduler(max_eta, bounds, lr=min_eta)
start = clr(0)
up = clr(bounds / 2.)
mid = clr(bounds)
down = clr(bounds + (bounds / 2.))
end = clr(2 * bounds)
late = clr(3 * bounds)
assert start == min_eta
assert up > start
assert up < mid
assert mid == max_eta
assert down < mid
assert down > end
assert end == min_eta
assert late == max_eta
def test_cosine_lr():
cd = CosineDecayScheduler(1000, lr=0.1)
iters = [0, 100, 900, 1000, 1001]
golds = [0.1, 0.09755283, 0.002447176, 0.0, 0.0]
for i, gold in zip(iters, golds):
np.testing.assert_allclose(cd(i), gold, rtol=1e-6)
def test_constant_lr():
lr = np.random.rand()
lrs = ConstantScheduler(lr=lr)
for x in np.random.randint(0, 10000000, size=np.random.randint(100, 1000)):
assert lrs(x) == lr
def test_inverse_time_values():
eta = 1.0
steps = np.random.randint(1, 100)
ti = InverseTimeDecayScheduler(steps, 1.0, lr=eta)
for i in range(1, 5):
lr = ti(i * steps)
assert lr == eta / (i + 1)
def test_inverse_time_is_flat():
steps = np.random.randint(1, 100)
ti = InverseTimeDecayScheduler(steps, np.random.rand(), staircase=True, lr=np.random.rand())
before = steps - np.random.randint(1, steps)
after = steps + np.random.randint(1, steps)
after2 = steps + np.random.randint(1, steps)
lr_before = ti(before)
lr_after = ti(after)
lr_after2 = ti(after2)
assert lr_before != lr_after
assert lr_after == lr_after2
def test_composite_calls_warm():
warmup_steps = np.random.randint(50, 101)
warm = MagicMock()
warm.warmup_steps = warmup_steps
rest = MagicMock()
lr = CompositeLRScheduler(warm=warm, rest=rest)
step = np.random.randint(0, warmup_steps)
_ = lr(step)
warm.assert_called_once_with(step)
rest.assert_not_called()
def test_composite_calls_rest():
warmup_steps = np.random.randint(50, 101)
warm = MagicMock()
warm.warmup_steps = warmup_steps
rest = MagicMock()
lr = CompositeLRScheduler(warm=warm, rest=rest)
step = np.random.randint(warmup_steps + 1, six.MAXSIZE)
_ = lr(step)
warm.assert_not_called()
rest.assert_called_once_with(step - warmup_steps)
def test_composite_error():
pytest.importorskip('torch')
from baseline.pytorch.optz import CompositeLRSchedulerPyTorch
with pytest.raises(AssertionError):
_ = create_lr_scheduler(**{"lr_scheduler_type": ["exponential", "zaremba"]})
| [
"dpressel@gmail.com"
] | dpressel@gmail.com |
9b549beccb6cedac47258ab75915d34cdb08a1a2 | eb0b328aabcaea4c65f50776efacc7ffeb4f0f00 | /pages/migrations/0001_initial.py | 1c29a4a665a46ce318302e89489b20d335d60819 | [] | no_license | skiboorg/webtouch | 06dc03b614d7b51ff3ee330e5d60649f80cd903c | 32f16f78a4bb437c33c5363fa528bf2325bced7b | refs/heads/master | 2022-03-13T01:50:34.266663 | 2019-10-21T18:37:21 | 2019-10-21T18:37:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,232 | py | # Generated by Django 2.2.6 on 2019-10-05 12:49
import ckeditor_uploader.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Filter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100, verbose_name='Название фильтра')),
('name_slug', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'verbose_name': 'Фильтр',
'verbose_name_plural': 'Фильтры',
},
),
migrations.CreateModel(
name='PortfolioItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=255, verbose_name='Название')),
('name_slug', models.CharField(blank=True, max_length=255, null=True)),
('image', models.ImageField(blank=True, upload_to='portfolio_img/', verbose_name='Изображение')),
('client', models.CharField(default='', max_length=255, verbose_name='Клиент')),
('date', models.CharField(default='', max_length=100, verbose_name='Дата')),
('url', models.CharField(default='', max_length=100, verbose_name='Ссылка')),
('wishes', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Пожелания')),
('technical', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Решения')),
('progressBarBackEnd', models.IntegerField(default=0, verbose_name='Прогресс-бар BackEnd')),
('progressBarFrontEnd', models.IntegerField(default=0, verbose_name='Прогресс-бар FrontEnd')),
('progressBarProduction', models.IntegerField(default=0, verbose_name='Прогресс-бар Production')),
('progressBarSEO', models.IntegerField(default=0, verbose_name='Прогресс-бар SEO')),
('is_active', models.BooleanField(db_index=True, default=True, verbose_name='Отображать ?')),
('created_at', models.DateTimeField(auto_now_add=True)),
('filter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='pages.Filter', verbose_name='Фильтр')),
],
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100, verbose_name='Статус')),
('name_slug', models.CharField(blank=True, max_length=255, null=True)),
('color', models.CharField(default='#', max_length=100, verbose_name='Цвет в виде #000000')),
],
options={
'verbose_name': 'Статус',
'verbose_name_plural': 'Статусы',
},
),
migrations.CreateModel(
name='PortfolioItemImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='portfolio_img/', verbose_name='Картинка')),
('item', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='pages.PortfolioItem', verbose_name='Кейс')),
],
),
migrations.AddField(
model_name='portfolioitem',
name='status',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='pages.Status', verbose_name='Текущий статус'),
),
]
| [
"ddnnss.i1@gmail.com"
] | ddnnss.i1@gmail.com |
2c41dced7d5f2643e2a0b5a13d1489e2dcfedae6 | 19da1a56f137a08772c347cf974be54e9c23c053 | /lib/adafruit_st7789.py | 48646518e9885771e3c8fc07206f09314fa59323 | [] | no_license | mk53202/mk53202-timeclock-pyportal | d94f45a9d186190a4bc6130077baa6743a816ef3 | 230a858d429f8197c00cab3e67dcfd3b295ffbe0 | refs/heads/master | 2021-02-04T05:38:25.533292 | 2020-02-27T22:45:56 | 2020-02-27T22:45:56 | 243,626,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,370 | py | # The MIT License (MIT)
#
# Copyright (c) 2019 Melissa LeBlanc-Williams for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_st7789`
====================================================
Displayio driver for ST7789 based displays.
* Author(s): Melissa LeBlanc-Williams
Implementation Notes
--------------------
**Hardware:**
* Adafruit 1.54" 240x240 Wide Angle TFT LCD Display with MicroSD:
https://www.adafruit.com/product/3787
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import displayio
__version__ = "1.0.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_ST7789.git"
_INIT_SEQUENCE = (
b"\x01\x80\x96" # _SWRESET and Delay 150ms
b"\x11\x80\xFF" # _SLPOUT and Delay 500ms
b"\x3A\x81\x55\x0A" # _COLMOD and Delay 10ms
b"\x36\x01\x08" # _MADCTL
b"\x21\x80\x0A" # _INVON Hack and Delay 10ms
b"\x13\x80\x0A" # _NORON and Delay 10ms
b"\x36\x01\xC0" # _MADCTL
b"\x29\x80\xFF" # _DISPON and Delay 500ms
)
# pylint: disable=too-few-public-methods
class ST7789(displayio.Display):
"""ST7789 driver"""
def __init__(self, bus, **kwargs):
super().__init__(bus, _INIT_SEQUENCE, **kwargs)
| [
"mkoster@stack41.com"
] | mkoster@stack41.com |
b441203e8ef873ce091b06bce5476c06a40a47c3 | 46349356d4812a6bf04a1dff4ee3311864f8b7ff | /ma_py/_main_plt_corr.py | 6cb586b4594b4b189e47762f33f86382e29e4968 | [] | no_license | alexdoberman/ma | 1ca9d20f64d0e8c87feff9f7bb04d09d3088aeb3 | 219e5e87b80c6a795c0d4161b3ad22b9973ed745 | refs/heads/master | 2022-07-17T13:15:21.672335 | 2020-05-12T15:10:40 | 2020-05-12T15:10:40 | 263,365,873 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | # -*- coding: utf-8 -*-
import numpy as np
import soundfile as sf
import matplotlib.pyplot as plt
def determine_lag(x, y, max_lag):
lags = []
for i in range(-max_lag, max_lag+1, 1):
corr = np.sum(x*np.roll(y, i))
lags.append((i, corr))
m = max(lags, key=lambda item:item[1])
# print (m)
shift_y = np.roll(y, m[0])
return m[0], shift_y
if __name__ == '__main__':
ds_sp_path = r'D:\REP\svn_MicArrAlgorithm2\MA_PY\out\result_corr_null\ds_sp.wav'
ds_inf_path = r'D:\REP\svn_MicArrAlgorithm2\MA_PY\out\result_corr_null\ds_inf.wav'
# Load signal
x1, rate = sf.read(ds_sp_path)
x2, rate = sf.read(ds_inf_path)
lag, x2_shift = determine_lag(x1,x2, max_lag = 512)
# x1 = x1[:16000]
# x2_shift = x2_shift[:16000]
y = x1-x2_shift
plt.plot(y)
plt.show()
'''
corr1 = np.correlate(x1, x2, 'full')
corr2 = np.correlate(y1, y2, 'full')
print (corr1.shape)
plt.plot(corr1)
plt.plot(corr2)
plt.show()
'''
| [
"lavrentyev@speechpro.com"
] | lavrentyev@speechpro.com |
3fd23346262334fba48dee9f799388d525f000d3 | e9e6d21b802240944537298687f5327fca4390a1 | /biomass/models/nfkb_pathway/reaction_network.py | b995a39682410883e5f4cf070c547cb1749794c9 | [
"Apache-2.0"
] | permissive | biomass-dev/biomass | dda8be0e4d481cf8d6378c5631443f625afe8804 | 2cc3ee62feab23d9224f82f0d15a3fed7c970a11 | refs/heads/master | 2023-08-03T04:42:33.192893 | 2023-06-20T10:03:27 | 2023-06-20T10:03:27 | 215,932,388 | 9 | 6 | Apache-2.0 | 2023-08-30T20:10:18 | 2019-10-18T03:16:39 | Python | UTF-8 | Python | false | false | 289 | py | from typing import Dict, List
class ReactionNetwork(object):
"""
Reaction indices grouped according to biological processes.
This is used for sensitivity analysis (target='reaction').
"""
def __init__(self) -> None:
self.reactions: Dict[str, List[int]] = {}
| [
"31299606+himoto@users.noreply.github.com"
] | 31299606+himoto@users.noreply.github.com |
a07fe7f010b5e816c382596e87dd1d9a64e75a29 | ab08ed332d23aa5c098a67588676bf6752ff99b9 | /semantic_segmentation/cli_interface.py | a26688eb42539860c0d3f31c25209d9aeefb9068 | [] | no_license | Mulham91/Deep-Learning-based-Pixel-wise-Lesion-Segmentationon-Oral-Squamous-Cell-Carcinoma-Images | 9bffb448265da755220961081dc21f2ae97c8694 | 17cf7751825fb755fcf77eb2b41317965a1a8189 | refs/heads/master | 2023-01-15T11:25:19.816351 | 2020-11-12T11:04:32 | 2020-11-12T11:04:32 | 312,130,175 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,043 | py | #!/usr/bin/env python
import sys
import argparse
from train import train
from data_utils.data_loader import verify_segmentation_dataset
def train_action(command_parser):
parser = command_parser.add_parser('train')
parser.add_argument("--model_name", type=str, required=True)
parser.add_argument("--train_images", type=str, required=True)
parser.add_argument("--train_annotations", type=str, required=True)
parser.add_argument("--n_classes", type=int, required=True)
parser.add_argument("--input_height", type=int, default=None)
parser.add_argument("--input_width", type=int, default=None)
parser.add_argument('--not_verify_dataset', action='store_false')
parser.add_argument("--checkpoints_path", type=str, default=None)
parser.add_argument("--epochs", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=2)
parser.add_argument('--validate', action='store_true')
parser.add_argument("--val_images", type=str, default="")
parser.add_argument("--val_annotations", type=str, default="")
parser.add_argument("--val_batch_size", type=int, default=2)
parser.add_argument("--load_weights", type=str, default=None)
parser.add_argument('--auto_resume_checkpoint', action='store_true')
parser.add_argument("--steps_per_epoch", type=int, default=512)
parser.add_argument("--optimizer_name", type=str, default="adam")
def action(args):
return train(model=args.model_name,
train_images=args.train_images,
train_annotations=args.train_annotations,
input_height=args.input_height,
input_width=args.input_width,
n_classes=args.n_classes,
verify_dataset=args.not_verify_dataset,
checkpoints_path=args.checkpoints_path,
epochs=args.epochs,
batch_size=args.batch_size,
validate=args.validate,
val_images=args.val_images,
val_annotations=args.val_annotations,
val_batch_size=args.val_batch_size,
auto_resume_checkpoint=args.auto_resume_checkpoint,
load_weights=args.load_weights,
steps_per_epoch=args.steps_per_epoch,
optimizer_name=args.optimizer_name)
parser.set_defaults(func=action)
def predict_action(command_parser):
parser = command_parser.add_parser('predict')
parser.add_argument("--checkpoints_path", type=str, required=True)
parser.add_argument("--input_path", type=str, default="", required=True)
parser.add_argument("--output_path", type=str, default="", required=True)
def action(args):
input_path_extension = args.input_path.split('.')[-1]
if input_path_extension in ['jpg', 'jpeg', 'png']:
return predict(inp=args.input_path, out_fname=args.output_path,
checkpoints_path=args.checkpoints_path)
else:
return predict_multiple(inp_dir=args.input_path,
out_dir=args.output_path,
checkpoints_path=args.checkpoints_path)
parser.set_defaults(func=action)
def predict_video_action(command_parser):
parser = command_parser.add_parser('predict_video')
parser.add_argument("--input", type=str, default=0, required=False)
parser.add_argument("--output_file", type=str, default="", required=False)
parser.add_argument("--checkpoints_path", required=True)
parser.add_argument("--display", action='store_true', required=False)
def action(args):
return predict_video(inp=args.input,
output=args.output_file,
checkpoints_path=args.checkpoints_path,
display=args.display,
)
parser.set_defaults(func=action)
def verify_dataset_action(command_parser):
parser = command_parser.add_parser('verify_dataset')
parser.add_argument("--images_path", type=str)
parser.add_argument("--segs_path", type=str)
parser.add_argument("--n_classes", type=int)
def action(args):
verify_segmentation_dataset(
args.images_path, args.segs_path, args.n_classes)
parser.set_defaults(func=action)
def action(args):
visualize_segmentation_dataset(args.images_path, args.segs_path,
args.n_classes,
do_augment=args.do_augment)
parser.set_defaults(func=action)
def main():
assert len(sys.argv) >= 2, \
"python -m keras_segmentation <command> <arguments>"
main_parser = argparse.ArgumentParser()
command_parser = main_parser.add_subparsers()
# Add individual commands
train_action(command_parser)
verify_dataset_action(command_parser)
args = main_parser.parse_args()
args.func(args)
| [
"you@example.com"
] | you@example.com |
e24ab27b4f8c46be37dd4e0f1d28b3e80022a1e2 | 13ba35a1b41f56a6791f65ff06aa6a7c6a34b60a | /tests/tests/test_helpers.py | 92e262de402ec9360cf567d1ab20276edeb45599 | [
"BSD-3-Clause"
] | permissive | dldevinc/django-spectrum | ea60b63feec313c87efc19effe31d455b243c99e | 66e69ace7d508219eb69aee3b1ed421df2bf3013 | refs/heads/main | 2023-09-04T04:52:48.530659 | 2023-08-18T05:40:23 | 2023-08-18T05:40:23 | 166,020,131 | 3 | 0 | BSD-3-Clause | 2023-08-18T05:15:10 | 2019-01-16T10:22:37 | Python | UTF-8 | Python | false | false | 8,064 | py | from decimal import Decimal
import pytest
from spectrum.exceptions import InvalidColorTypeError, InvalidColorValueError
from spectrum.helpers import (
format_color,
format_color_byte,
format_color_bytes,
format_hexa,
format_rgba,
fraction_to_color_byte,
re_hexa,
re_rgba,
)
class TestHexRegex:
def test_hex_rgb(self):
match = re_hexa.fullmatch("CB0")
assert match is not None
assert match.group(1) == "CB0"
match = re_hexa.fullmatch("#bd8")
assert match is not None
assert match.group(1) == "bd8"
def test_hex_rgba(self):
match = re_hexa.fullmatch("da88")
assert match is not None
assert match.group(1) == "da88"
match = re_hexa.fullmatch("#FF00")
assert match is not None
assert match.group(1) == "FF00"
def test_hex_rrggbb(self):
match = re_hexa.fullmatch("BACCEF")
assert match is not None
assert match.group(1) == "BACCEF"
match = re_hexa.fullmatch("#808080")
assert match is not None
assert match.group(1) == "808080"
def test_hex_rrggbbaa(self):
match = re_hexa.fullmatch("2fcb60ff")
assert match is not None
assert match.group(1) == "2fcb60ff"
match = re_hexa.fullmatch("#ba200060")
assert match is not None
assert match.group(1) == "ba200060"
class TestRGBRegex:
def test_rgb(self):
match = re_rgba.fullmatch("rgb(255, 255, 0)")
assert match is not None
assert match.groups() == ("255", "255", "0", None)
def test_rgba(self):
match = re_rgba.fullmatch("rgba(64, 128, 192, 0.5)")
assert match is not None
assert match.groups() == ("64", "128", "192", "0.5")
def test_rgba_new_notation(self):
match = re_rgba.fullmatch("rgba(64 128 192 / 52.5%)")
assert match is not None
assert match.groups() == ("64", "128", "192", "52.5%")
class TestFractionToColorByte:
def test_opaque(self):
assert fraction_to_color_byte(1) == 255
def test_transparent(self):
assert fraction_to_color_byte(0) == 0
def test_float(self):
assert fraction_to_color_byte(0.7) == 178
def test_string(self):
assert fraction_to_color_byte("0.7") == 179 # no precision loss
def test_decimal(self):
assert fraction_to_color_byte(Decimal("0.7")) == 179
class TestFormatColorByte:
def test_none(self):
with pytest.raises(TypeError):
format_color_byte(None)
def test_empty_string(self):
with pytest.raises(ValueError):
format_color_byte("")
def test_nondigit_string(self):
with pytest.raises(ValueError):
format_color_byte("FF")
def test_string(self):
assert format_color_byte("64") is 64
def test_int(self):
assert format_color_byte(64) is 64
def test_float(self):
with pytest.raises(TypeError):
format_color_byte(64.5)
def test_min_value(self):
assert format_color_byte("0") is 0
def test_max_value(self):
assert format_color_byte("255") is 255
def test_below_bounds(self):
with pytest.raises(OverflowError):
format_color_byte("-1")
def test_above_bounds(self):
with pytest.raises(OverflowError):
format_color_byte("256")
class TestFormatColorBytes:
def test_insufficient_length(self):
with pytest.raises(OverflowError):
format_color_bytes([128, 192])
def test_excessive_length(self):
with pytest.raises(OverflowError):
format_color_bytes([128, 192, 64, 0, 128])
def test_below_bounds(self):
with pytest.raises(OverflowError):
format_color_bytes([0, -1, 0])
def test_above_bounds(self):
with pytest.raises(OverflowError):
format_color_bytes([0, 256, 0])
def test_non_numeric_value(self):
with pytest.raises(ValueError):
format_color_bytes([128, "abc", 64, 0, 128])
def test_opacity_added(self):
assert format_color_bytes([128, "92", 64]) == (128, 92, 64, 255)
def test_stability(self):
input = ["192", "128", "64"]
output = format_color_bytes(input)
assert format_color_bytes(output) == output == (192, 128, 64, 255)
class TestFormatRGBA:
def test_short(self):
assert format_rgba(["192", "128", "64"]) == (192, 128, 64, 255)
def test_transparent(self):
assert format_rgba(["192", "128", "64", "0.2"]) == (192, 128, 64, 51)
def test_opaque(self):
assert format_rgba([94, 72, 156]) == (94, 72, 156, 255)
assert format_rgba([94, 72, 156, 1]) == (94, 72, 156, 255)
def test_fraction_opacity(self):
assert format_rgba([92, 40, 128, 0.5]) == (92, 40, 128, 128)
def test_percentage(self):
assert format_rgba([92, 40, 128, '70%']) == (92, 40, 128, 179)
class TestFormatHEXA:
def test_rgb(self):
assert format_hexa("bda") == (187, 221, 170, 255)
def test_rgba(self):
assert format_hexa("4fcd") == (68, 255, 204, 221)
def test_rrggbb(self):
assert format_hexa("60B0C4") == (96, 176, 196, 255)
def test_rrggbbaa(self):
assert format_hexa("2BEA40D0") == (43, 234, 64, 208)
class TestFormatColor:
def test_short_hex(self):
assert format_color("aac") == (170, 170, 204, 255)
assert format_color("#da0") == (221, 170, 0, 255)
def test_short_hexa(self):
assert format_color("cde0") == (204, 221, 238, 0)
assert format_color("#ff08") == (255, 255, 0, 136)
def test_hex(self):
assert format_color("DDA0C4") == (221, 160, 196, 255)
assert format_color("#2F4BEF") == (47, 75, 239, 255)
def test_hexa(self):
assert format_color("C0B0D080") == (192, 176, 208, 128)
assert format_color("#4B6D321A") == (75, 109, 50, 26)
def test_rgb(self):
assert format_color("rgb(75, 109, 26)") == (75, 109, 26, 255)
def test_rgba(self):
assert format_color("rgba(98, 212, 204, 0.89)") == (98, 212, 204, 227)
def test_short_iterable(self):
assert format_color(["67", "120", "64"]) == (67, 120, 64, 255)
def test_iterable(self):
assert format_color([32, 64, 128, 72]) == (32, 64, 128, 72)
def test_insufficient_hex_length(self):
with pytest.raises(InvalidColorValueError):
format_color("FF")
def test_excessive_hex_length(self):
with pytest.raises(InvalidColorValueError):
format_color("FFAABBDDEE")
def test_non_hex(self):
with pytest.raises(InvalidColorValueError):
format_color("XYZ")
def test_insufficient_rgb_length(self):
with pytest.raises(InvalidColorValueError):
format_color("rgb(128, 192)")
def test_excessive_rgb_length(self):
with pytest.raises(InvalidColorValueError):
format_color("rgb(32, 64, 92, 128, 255)")
def test_rgb_negative(self):
with pytest.raises(InvalidColorValueError):
format_color("rgb(128, -32, 60)")
def test_rgb_overbound(self):
with pytest.raises(InvalidColorValueError):
format_color("rgb(128, 192, 999)")
def test_rgba_negative_opacity(self):
with pytest.raises(InvalidColorValueError):
format_color("rgb(128, 32, 60, -0.5)")
def test_rgba_opacity_overbound(self):
with pytest.raises(InvalidColorValueError):
format_color("rgba(128, 192, 0, 1.5)")
def test_insufficient_iterable_length(self):
with pytest.raises(InvalidColorValueError):
format_color([64, 128])
def test_excessive_iterable_length(self):
with pytest.raises(InvalidColorValueError):
format_color([128, 96, 48, 255, 255])
def test_invalid_type(self):
with pytest.raises(InvalidColorTypeError):
format_color(None)
with pytest.raises(InvalidColorTypeError):
format_color(192)
| [
"pix666@ya.ru"
] | pix666@ya.ru |
0ddceaa4205fd6cdf94a419ca62cfc4e9c5534dd | 22ff0921aee459abd0a3c15281de80ba6b4035bf | /March/day 10 filefand/writebinaryfile.py | 79f9f619a263d7cf34ccffa69cb421d7ec836a30 | [] | no_license | BhushanTayade88/Core-Python | b0516f234b866682931af95b723adb1269fb946a | f687e4029e3a3aaf751538604dfd06386084252b | refs/heads/master | 2023-08-03T02:09:38.536580 | 2021-10-05T17:25:20 | 2021-10-05T17:25:20 | 413,910,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | f = open("emo.jpg","rb")
print("file opened")
copy=f.read()
f.close()
print("file is closedd")
print("file closed")
nf=open("emo2.jpg","wb")
print("new file open")
nf.write(copy)
nf.close()
print("new file is closed ")
| [
"tayadebhushan55@gmail.com"
] | tayadebhushan55@gmail.com |
f91a4af0af8738551ddd58b1d20701c183c3fca4 | b57b0a14df5c6841f04cccb7b02ad04afbca18f8 | /tokumx/tests/conftest.py | 07db4ebe9b173d851efc0f9d166f5c6401272b64 | [
"AFL-3.0",
"BSD-3-Clause-Modification",
"LGPL-3.0-only",
"Unlicense",
"LGPL-2.1-only",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | zeroc0d3/integrations-core | d9c99803c049668b7f9f9c796d338e343d3d46ee | 634d567f3c38d32aabb3f4c16b50bcfa8a4ae0fb | refs/heads/master | 2021-09-28T18:37:00.650406 | 2021-09-13T11:59:45 | 2021-09-13T11:59:45 | 199,758,958 | 0 | 0 | BSD-3-Clause | 2019-07-31T02:01:25 | 2019-07-31T02:01:24 | null | UTF-8 | Python | false | false | 1,526 | py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from copy import deepcopy
import pytest
from datadog_checks.dev import docker_run
from datadog_checks.tokumx import TokuMX
from datadog_checks.tokumx.vendor import pymongo
from . import common
@pytest.fixture(scope="session")
def dd_environment():
"""
Start a cluster with one master, one replica and one unhealthy replica and
stop it after the tests are done.
If there's any problem executing docker-compose, let the exception bubble
up.
"""
compose_dir = os.path.join(common.HERE, 'compose')
with docker_run(
compose_file=os.path.join(compose_dir, 'docker-compose.yaml'),
log_patterns='admin web console waiting for connections',
env_vars={'COMPOSE_DIR': compose_dir},
):
set_up_tokumx()
yield common.INSTANCE
@pytest.fixture
def check():
return TokuMX('tokumx', {}, {})
@pytest.fixture
def instance():
return deepcopy(common.INSTANCE)
def set_up_tokumx():
cli = pymongo.MongoClient(
common.TOKUMX_SERVER, socketTimeoutMS=30000, read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED
)
foos = []
for _ in range(70):
foos.append({'1': []})
foos.append({'1': []})
foos.append({})
bars = []
for _ in range(50):
bars.append({'1': []})
bars.append({})
db = cli['test']
db.foo.insert_many(foos)
db.bar.insert_many(bars)
| [
"noreply@github.com"
] | zeroc0d3.noreply@github.com |
9ee45cef427b92dc2368563e25db132f1046a80f | d08cf46d3e16ab8e6a958731168469ba38daf069 | /sandbox/kdv.py | 7c7536cbb6e3c7b1398b0a73cc5dac09cb962490 | [
"BSD-2-Clause"
] | permissive | spectralDNS/shenfun | ce808edc5258c896f2cccfbd88e67153e3f621c9 | bcda39d8d8e4741df1cafe719d81733cc1024def | refs/heads/master | 2023-07-27T20:29:57.075970 | 2023-07-11T12:33:04 | 2023-07-11T12:33:04 | 79,914,066 | 190 | 46 | BSD-2-Clause | 2022-05-11T19:10:33 | 2017-01-24T13:29:02 | Python | UTF-8 | Python | false | false | 2,876 | py | import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import PolyCollection
from shenfun import *
from mpl_toolkits.mplot3d import axes3d
N = 256
T = FunctionSpace(N, 'F', dtype='d')
#Tp = T
Tp = T.get_dealiased()
x = T.points_and_weights()[0]
u = TrialFunction(T)
v = TestFunction(T)
k = T.wavenumbers(scaled=True, eliminate_highest_freq=True)
u_ = Array(T)
Up = Array(Tp)
u_hat = Function(T)
def LinearRHS(self, u, **params):
return -Dx(u, 0, 3)
def NonlinearRHS(self, u, u_hat, rhs, **params):
rhs.fill(0)
Up[:] = Tp.backward(u_hat, Up)
rhs = Tp.forward(-0.5*Up**2, rhs)
rhs *= 1j*k
return rhs
# initialize
A = 25.
B = 16.
u_[:] = 3*A**2/np.cosh(0.5*A*(x-np.pi+2))**2 + 3*B**2/np.cosh(0.5*B*(x-np.pi+1))**2
u_hat = T.forward(u_, u_hat)
data = []
tdata = []
plt.figure()
def update(self, u, u_hat, t, tstep, plot_step, **params):
if tstep % plot_step == 0 and plot_step > 0:
u = T.backward(u_hat, u)
plt.plot(x, u)
plt.draw()
plt.pause(1e-6)
data.append(u.copy())
dt = 0.01/N**2
end_time = 0.006
par = {'plot_step': int(end_time/25/dt)}
integrator = ETDRK4(T, L=LinearRHS, N=NonlinearRHS, update=update, **par)
integrator.setup(dt)
u_hat = integrator.solve(u_, u_hat, dt, (0, end_time))
t = end_time
s = []
for d in data:
s.append(np.vstack((x, d)).T)
N = len(data)
tdata = np.linspace(0, end_time, N)
ddata = np.array(data)
fig = plt.figure(figsize=(8, 3))
#ax = axes3d.Axes3D(fig)
ax = fig.add_subplot(projection='3d')
X, Y = np.meshgrid(x, tdata)
ax.plot_wireframe(X, Y, ddata, cstride=1000)
ax.set_xlim(0, 2*np.pi)
ax.set_ylim(0, t)
ax.set_zlim(0, 2000)
ax.view_init(65, -105)
ax.set_zticks([0, 2000])
ax.grid()
fig2 = plt.figure(figsize=(8,3))
ax2 = fig2.add_subplot(projection='3d')
poly = PolyCollection(s, facecolors=(1, 1, 1, 1), edgecolors='b')
ax2.add_collection3d(poly, zs=tdata, zdir='y')
ax2.set_xlim3d(0, 2*np.pi)
ax2.set_ylim3d(0, t)
ax2.set_zlim3d(0, 2000)
ax2.view_init(65, -105)
ax2.set_zticks([0, 2000])
ax2.grid()
fig3 = plt.figure(figsize=(8, 3))
ax3 = fig3.add_subplot(projection='3d')
X, Y = np.meshgrid(x, tdata)
ax3.plot_surface(X, Y, ddata, cstride=1000, rstride=1, color='w')
ax3.set_xlim(0, 2*np.pi)
ax3.set_ylim(0, t)
ax3.set_zlim(0, 2000)
ax3.view_init(65, -105)
ax3.set_zticks([0, 2000])
ax3.grid()
fig4 = plt.figure(figsize=(8,3))
ax4 = fig4.add_subplot(projection='3d')
for i in range(len(tdata)):
ax4.plot(x, ddata[i], tdata[i])
ax4.view_init(65, -105)
ax4.set_zticks([0, 2000])
ax4.grid()
fig5 = plt.figure(facecolor='k')
ax5 = fig5.add_subplot(111, facecolor='k')
N = len(tdata)
for i in range(N):
offset = (N-i-1)*200
ax5.plot(x, ddata[N-i-1]+offset, 'w', lw=2, zorder=(i+1)*2)
ax5.fill_between(x, ddata[N-i-1]+offset, offset, facecolor='k', lw=0, zorder=(i+1)*2-1)
fig5.savefig('KdV.png')
plt.show()
| [
"mikaem@math.uio.no"
] | mikaem@math.uio.no |
e2c2d430af0dbd4f42ca76c107638345f45fd6b9 | 90a7efad0e02634fe46602cf6a9c42ce72af1823 | /udify/dataset_readers/universal_dependencies.py | cbf693061fe0bcae8de295ecbaadcd082e7efca9 | [
"MIT"
] | permissive | foxik/udify | 563237686d1b0833ed636e48ba5cb20dced49be6 | 99f9a8d220edf808c7f2d7e32112227f21c7084a | refs/heads/master | 2020-09-02T16:26:05.757090 | 2019-11-02T23:21:03 | 2019-11-02T23:21:03 | 219,258,945 | 0 | 0 | MIT | 2019-11-03T06:03:58 | 2019-11-03T06:03:58 | null | UTF-8 | Python | false | false | 6,063 | py | """
A Dataset Reader for Universal Dependencies, with support for multiword tokens and special handling for NULL "_" tokens
"""
from typing import Dict, Tuple, List, Any, Callable
from overrides import overrides
from udify.dataset_readers.parser import parse_line, DEFAULT_FIELDS
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, SequenceLabelField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
from udify.dataset_readers.lemma_edit import gen_lemma_rule
import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def lazy_parse(text: str, fields: Tuple[str, ...]=DEFAULT_FIELDS):
for sentence in text.split("\n\n"):
if sentence:
# TODO: upgrade conllu library
yield [parse_line(line, fields)
for line in sentence.split("\n")
if line and not line.strip().startswith("#")]
@DatasetReader.register("udify_universal_dependencies")
class UniversalDependenciesDatasetReader(DatasetReader):
def __init__(self,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False) -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, 'r') as conllu_file:
logger.info("Reading UD instances from conllu dataset at: %s", file_path)
for annotation in lazy_parse(conllu_file.read()):
# CoNLLU annotations sometimes add back in words that have been elided
# in the original sentence; we remove these, as we're just predicting
# dependencies for the original sentence.
# We filter by None here as elided words have a non-integer word id,
# and are replaced with None by the conllu python library.
multiword_tokens = [x for x in annotation if x["multi_id"] is not None]
annotation = [x for x in annotation if x["id"] is not None]
if len(annotation) == 0:
continue
def get_field(tag: str, map_fn: Callable[[Any], Any] = None) -> List[Any]:
map_fn = map_fn if map_fn is not None else lambda x: x
return [map_fn(x[tag]) if x[tag] is not None else "_" for x in annotation if tag in x]
# Extract multiword token rows (not used for prediction, purely for evaluation)
ids = [x["id"] for x in annotation]
multiword_ids = [x["multi_id"] for x in multiword_tokens]
multiword_forms = [x["form"] for x in multiword_tokens]
words = get_field("form")
lemmas = get_field("lemma")
lemma_rules = [gen_lemma_rule(word, lemma)
if lemma != "_" else "_"
for word, lemma in zip(words, lemmas)]
upos_tags = get_field("upostag")
xpos_tags = get_field("xpostag")
feats = get_field("feats", lambda x: "|".join(k + "=" + v for k, v in x.items())
if hasattr(x, "items") else "_")
heads = get_field("head")
dep_rels = get_field("deprel")
dependencies = list(zip(dep_rels, heads))
yield self.text_to_instance(words, lemmas, lemma_rules, upos_tags, xpos_tags,
feats, dependencies, ids, multiword_ids, multiword_forms)
@overrides
def text_to_instance(self, # type: ignore
words: List[str],
lemmas: List[str] = None,
lemma_rules: List[str] = None,
upos_tags: List[str] = None,
xpos_tags: List[str] = None,
feats: List[str] = None,
dependencies: List[Tuple[str, int]] = None,
ids: List[str] = None,
multiword_ids: List[str] = None,
multiword_forms: List[str] = None) -> Instance:
fields: Dict[str, Field] = {}
tokens = TextField([Token(w) for w in words], self._token_indexers)
fields["tokens"] = tokens
names = ["upos", "xpos", "feats", "lemmas"]
all_tags = [upos_tags, xpos_tags, feats, lemma_rules]
for name, field in zip(names, all_tags):
if field:
fields[name] = SequenceLabelField(field, tokens, label_namespace=name)
if dependencies is not None:
# We don't want to expand the label namespace with an additional dummy token, so we'll
# always give the 'ROOT_HEAD' token a label of 'root'.
fields["head_tags"] = SequenceLabelField([x[0] for x in dependencies],
tokens,
label_namespace="head_tags")
fields["head_indices"] = SequenceLabelField([int(x[1]) for x in dependencies],
tokens,
label_namespace="head_index_tags")
fields["metadata"] = MetadataField({
"words": words,
"upos_tags": upos_tags,
"xpos_tags": xpos_tags,
"feats": feats,
"lemmas": lemmas,
"lemma_rules": lemma_rules,
"ids": ids,
"multiword_ids": multiword_ids,
"multiword_forms": multiword_forms
})
return Instance(fields)
| [
"hyperparticle@gmail.com"
] | hyperparticle@gmail.com |
89e171105b6e4fd444900f215d2561e8181b50c7 | 5a628296aea2e3b908e634f8ad7f0d9d49750cf2 | /3dod/configs/car_cfg20_eval_ebm3_test_seq0012.py | 4a7f9f54fd90c0ce2f278918004038ba74020d10 | [
"MIT"
] | permissive | TianhaoFu/ebms_3dod | 521cf16946abaef77d005e7ee2e5b0a86d1a36fd | b8a33577c079d9a587bca289a707a8b1b3cb4834 | refs/heads/main | 2023-06-03T18:34:00.670739 | 2021-06-21T13:51:53 | 2021-06-21T13:51:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,720 | py | model = dict(
type='SingleStageDetector20',
backbone=dict(
type='SimpleVoxel',
num_input_features=4,
use_norm=True,
num_filters=[32, 64],
with_distance=False),
neck=dict(
type='SpMiddleFHD',
output_shape=[40, 1600, 1408],
num_input_features=4,
num_hidden_features=64 * 5,),
bbox_head=dict(
type='SSDRotateHead',
num_class=1,
num_output_filters=256,
num_anchor_per_loc=2,
use_sigmoid_cls=True,
encode_rad_error_by_sin=True,
use_direction_classifier=True,
box_code_size=7,),
extra_head=dict(
type='PSWarpHead',
grid_offsets = (0., 40.),
featmap_stride=.4,
in_channels=256,
num_class=1,
num_parts=28,)
)
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
pos_iou_thr=0.6,
neg_iou_thr=0.45,
min_pos_iou=0.45, # this one is to limit the force assignment
ignore_iof_thr=-1,
similarity_fn ='NearestIouSimilarity'
),
nms=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
nms_thr=0.7,
min_bbox_size=0
),
allowed_border=0,
pos_weight=-1,
smoothl1_beta=1 / 9.0,
debug=False),
extra=dict(
assigner=dict(
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1,
similarity_fn ='RotateIou3dSimilarity'
)
)
)
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=100,
nms_thr=0.7,
min_bbox_size=0
),
extra=dict(
score_thr=0.3, nms=dict(type='nms', iou_thr=0.1), max_per_img=100, EBM_guided=False, EBM_refine=True, EBM_refine_steps=10)
)
# # dataset settings
# dataset_type = 'KittiLiDAR'
# data_root = '/root/ebms_3dod/3dod/data/KITTI/'
# img_norm_cfg = dict(
# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# data = dict(
# imgs_per_gpu=2,
# # workers_per_gpu=4,
# workers_per_gpu=1,
# train=dict(
# type=dataset_type,
# root=data_root + 'object/training/',
# ann_file=data_root + 'ImageSets/train.txt',
# img_prefix=None,
# img_scale=(1242, 375),
# img_norm_cfg=img_norm_cfg,
# size_divisor=32,
# flip_ratio=0.5,
# with_mask=False,
# with_label=True,
# with_point=True,
# class_names = ['Car', 'Van'],
# augmentor=dict(
# type='PointAugmentor',
# root_path=data_root,
# info_path=data_root + 'kitti_dbinfos_trainval.pkl',
# sample_classes=['Car'],
# min_num_points=5,
# sample_max_num=15,
# removed_difficulties=[-1],
# global_rot_range=[-0.78539816, 0.78539816],
# gt_rot_range=[-0.78539816, 0.78539816],
# center_noise_std=[1., 1., .5],
# scale_range=[0.95, 1.05]
# ),
# generator=dict(
# type='VoxelGenerator',
# voxel_size=[0.05, 0.05, 0.1],
# point_cloud_range=[0, -40., -3., 70.4, 40., 1.],
# max_num_points=5,
# max_voxels=20000
# ),
# anchor_generator=dict(
# type='AnchorGeneratorStride',
# sizes=[1.6, 3.9, 1.56],
# anchor_strides=[0.4, 0.4, 1.0],
# anchor_offsets=[0.2, -39.8, -1.78],
# rotations=[0, 1.57],
# ),
# anchor_area_threshold=1,
# out_size_factor=8,
# test_mode=False),
#
# val=dict(
# type=dataset_type,
# root=data_root + 'object/testing/',
# ann_file=data_root + 'ImageSets/test.txt',
# img_prefix=None,
# img_scale=(1242, 375),
# img_norm_cfg=img_norm_cfg,
# size_divisor=32,
# flip_ratio=0,
# with_mask=False,
# with_label=False,
# with_point=True,
# class_names = ['Car'],
# generator=dict(
# type='VoxelGenerator',
# voxel_size=[0.05, 0.05, 0.1],
# point_cloud_range=[0., -40., -3., 70.4, 40., 1.],
# max_num_points=5,
# max_voxels=20000
# ),
# anchor_generator=dict(
# type='AnchorGeneratorStride',
# sizes=[1.6, 3.9, 1.56],
# anchor_strides=[0.4, 0.4, 1.0],
# anchor_offsets=[0.2, -39.8, -1.78],
# rotations=[0, 1.57],
# ),
# anchor_area_threshold=1,
# out_size_factor=8,
# test_mode=True),
# )
# dataset settings
dataset_type = 'KittiVideo'
data_root = '/root/ebms_3dod/3dod/data/KITTI/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
# workers_per_gpu=4,
workers_per_gpu=1,
val=dict(
type=dataset_type,
root=data_root + 'tracking/testing/',
calib_dir = 'calib/0012.txt',
img_dir = 'image_02/0012',
lidar_dir = 'velodyne/0012',
ann_file=data_root + 'ImageSets/test.txt',
img_prefix=None,
img_scale=(1242, 375),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
with_point=True,
class_names = ['Car'],
generator=dict(
type='VoxelGenerator',
voxel_size=[0.05, 0.05, 0.1],
point_cloud_range=[0., -40., -3., 70.4, 40., 1.],
max_num_points=5,
max_voxels=20000
),
anchor_generator=dict(
type='AnchorGeneratorStride',
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 1.0],
anchor_offsets=[0.2, -39.8, -1.78],
rotations=[0, 1.57],
),
anchor_area_threshold=1,
out_size_factor=8,
test_mode=True),
)
# optimizer
optimizer = dict(
type='adam_onecycle', lr=0.003, weight_decay=0.01,
grad_clip=dict(max_norm=10, norm_type=2)
)
# learning policy
lr_config = dict(
policy='onecycle',
moms = [0.95, 0.85],
div_factor = 10,
pct_start = 0.4
)
checkpoint_config = dict(interval=5)
log_config = dict(interval=50)
total_epochs = 80
dist_params = dict(backend='nccl')
log_level = 'INFO'
# work_dir = '../saved_model_vehicle
work_dir = '/root/ebms_3dod/3dod/saved_model_vehicle20'
load_from = None
resume_from = None
workflow = [('train', 1)]
SA_SSD_pretrained = True
SA_SSD_fixed = True
USE_EBM = True
| [
"fregu856@gmail.com"
] | fregu856@gmail.com |
754a312303ebd319014000b3257ab320ff38a7ee | 57c13a2500561e72e382489c23e9c0b8347be605 | /concurrency/simple_interval_sum_example.py | c4ccc0e400215264a4ef1daac9bc80a5f4004a70 | [] | no_license | linheimx/python_master | 7403d7af639e31810c90b2fba14972a6d3dcfcec | 7fb7c467bedaff1515975807552a0ba05e30f15e | refs/heads/master | 2021-01-21T21:54:55.537994 | 2016-12-23T15:05:14 | 2016-12-23T15:05:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | """
1 부터 200000000 더하기
real 0m16.303s
user 0m16.271s
sys 0m0.019s
"""
TOTAL_VALUE = 0
def interval_sum(start_num, last_num):
global TOTAL_VALUE
total = 0
for x in range(start_num, last_num + 1):
total += x
TOTAL_VALUE += total
if __name__ == "__main__":
interval_sum(1, 200000000)
print(TOTAL_VALUE)
| [
"bonwho09@gmail.com"
] | bonwho09@gmail.com |
7c9abd888ec8c97b3f20c8e59ec550f5a3f2fd02 | 1362bc36e86f8216d405b547f5f45874ac332b1e | /Google/wordBreak2.py | c134aff4c0d863141d8216c688860f6d1559f892 | [] | no_license | zhuolikevin/Algorithm-Practices-Python | ed5ca06758e35d910ffbea011b414b3c57fd6c7a | 1df8d93a8ecb8627899aadddb5dd5c5d0b144cdf | refs/heads/master | 2021-01-22T01:04:31.536327 | 2016-01-15T13:31:07 | 2016-01-15T13:31:07 | 32,602,632 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | class Solution(object):
def wordBreak(self, s, wordDict):
if not wordDict:
return []
# self.find = False
words = self.helper(wordDict, s)
res = []
for word in words:
temp = ' '.join(word)
res.append(temp)
return res
def helper(self, dic, s):
if not dic: return [[]]
if not s:
# self.find = True
return [[]]
res = []
words = []
for i in dic:
words.append(i)
for word in words:
# if self.find:
# break
i = 0
while i < len(word):
if i >= len(s) or s[i] != word[i]:
break
i += 1
if i == len(word):
temp = [word]
dic.remove(word)
remain = self.helper(dic, s[i:])
for solu in remain:
res.append(temp + solu)
dic.append(word)
return res
solution = Solution()
s = 'catsanddog'
wordDict = ["cat", "cats", "and", "sand", "dog"]
print solution.wordBreak(s, wordDict)
| [
"lizhuogo@gmail.com"
] | lizhuogo@gmail.com |
5b32f01d57e592dc5d14905ec3b40ccd35e0ed5d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_caretakers.py | 74fcd5b95fc2e398572ed29030a5ed02a77a231b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py |
from xai.brain.wordbase.nouns._caretaker import _CARETAKER
#calss header
class _CARETAKERS(_CARETAKER, ):
def __init__(self,):
_CARETAKER.__init__(self)
self.name = "CARETAKERS"
self.specie = 'nouns'
self.basic = "caretaker"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
a6687b56092e9d3c2f38e3fbb4e8ddac55c5f439 | 40ba3112a116b361673732efc10402a067322ad1 | /PycharmProjects/untitled/OO/newClass.py | 939b5ed323117618000643bb46f2be85ab8e298b | [] | no_license | oumingwang/----Python | 622be90adffefcab1696bb145b171fa9a8bff5b7 | 003c0b7880de2b9e0737120bc15bf6eaeb7a644f | refs/heads/master | 2020-07-03T03:16:13.687118 | 2017-04-08T16:55:01 | 2017-04-08T16:55:01 | 74,200,264 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | class MyClass (object):
"hello world "
version = 1.1
def MyVersion(self):
pass
c = MyClass()
print c.__class__.__name__
print c.__doc__
print c.__dict__
print c.__module__
print c.__class__.__base__
| [
"474978390@qq.com"
] | 474978390@qq.com |
808087af7bfe146f810c4487b617e92f6a2db462 | 5237e7939a668261d573c56d300101742b4dfe0d | /38-Abstraction adn Encapsulation.py | 9bc9763b8fa0371e64f2c0c938e11137f6f15bca | [] | no_license | imAtulSharma/Python-Tutorial-Files | fa5dcf855c4fc6008028e680bfe4c7691bd13f25 | 2245a171b9d7146f349f84027f413d796fc99a89 | refs/heads/master | 2022-12-02T00:13:53.231523 | 2020-07-23T05:27:15 | 2020-07-23T05:27:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py |
class Employee:
no_of_leaves = 8
def __init__(self, aname, asalary, arole):
self.name = aname
self.salary = asalary
self.role = arole
def printdetails(self):
return f"The Name is {self.name}. Salary is {self.salary} and role is {self.role}"
@classmethod
def change_leaves(cls, newleaves):
cls.no_of_leaves = newleaves
@classmethod
def from_dash(cls, string):
return cls(*string.split("-"))
@staticmethod
def printgood(string):
print("This is good " + string)
atul = Employee("atul", 255, "Instructor")
rohan = Employee("Rohan", 455, "Student")
karan = Employee.from_dash("Karan-480-Student")
Employee.printgood("Rohan")
| [
"atulsharma20may@gmail.com"
] | atulsharma20may@gmail.com |
67334df6315dcded9d30edef4d02cb7d9a0f739c | b509ef07d752e987f4cb84d1abd4c3a98488a6c7 | /resources/lib/streamlink/plugins/tamago.py | 0b6dc7197643d4c8de27269ff87d6ea19785b867 | [
"BSD-2-Clause"
] | permissive | Twilight0/script.module.streamlink.base | d91245d1a43d6b3191b62a6eb4b1cf70598ed23e | c1e4628715a81806586b10323b8cb01424bbb6fc | refs/heads/master | 2021-01-21T04:32:41.658823 | 2020-09-07T20:56:29 | 2020-09-07T20:56:29 | 101,915,967 | 6 | 4 | BSD-2-Clause | 2018-01-14T15:20:47 | 2017-08-30T18:31:47 | Python | UTF-8 | Python | false | false | 1,571 | py | import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HTTPStream
from streamlink import NoStreamsError
class Tamago(Plugin):
_url_re = re.compile(r"https?://(?:player\.)?tamago\.live/w/(?P<id>\d+)")
_api_url_base = "https://player.tamago.live/api/rooms/{id}"
_api_response_schema = validate.Schema({
u"status": 200,
u"message": u"Success",
u"data": {
u"room_number": validate.text,
u"stream": {validate.text: validate.url()}
}
})
_stream_qualities = {
u"150": "144p",
u"350": "360p",
u"550": "540p",
u"900": "720p",
}
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
user_id = self._url_re.match(self.url).group('id')
try:
api_response = self.session.http.get(self._api_url_base.format(id=user_id))
streams = self.session.http.json(api_response, schema=self._api_response_schema)['data']['stream']
except Exception:
raise NoStreamsError(self.url)
unique_stream_urls = []
for stream in streams.keys():
if streams[stream] not in unique_stream_urls:
unique_stream_urls.append(streams[stream])
quality = self._stream_qualities[stream] if stream in self._stream_qualities.keys() else "720p+"
yield quality, HTTPStream(self.session, streams[stream])
__plugin__ = Tamago
| [
"twilight@freemail.gr"
] | twilight@freemail.gr |
95932558356e481f28e177b43d77b26fe17a4990 | 8f3336bbf7cd12485a4c52daa831b5d39749cf9b | /Python/remove-duplicates-from-sorted-list.py | 720a0711be4121ce4774e70948432116eff69861 | [] | no_license | black-shadows/LeetCode-Topicwise-Solutions | 9487de1f9a1da79558287b2bc2c6b28d3d27db07 | b1692583f7b710943ffb19b392b8bf64845b5d7a | refs/heads/master | 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 | C++ | UTF-8 | Python | false | false | 912 | py | # Time: O(n)
# Space: O(1)
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
cur = head
while cur:
runner = cur.next
while runner and runner.val == cur.val:
runner = runner.next
cur.next = runner
cur = runner
return head
def deleteDuplicates2(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head: return head
if head.next:
if head.val == head.next.val:
head = self.deleteDuplicates2(head.next)
else:
head.next = self.deleteDuplicates2(head.next)
return head
| [
"noreply@github.com"
] | black-shadows.noreply@github.com |
013dba1a446b2feb94b19ec9b2abcba9d5432b7e | 002c14cd622b4890cce1c243065cebe39e2302ec | /LeetCode/105-Construct-Binary-Tree-from-Preorder-and-Inorder-Traversal/Construct-Binary-Tree-from-Preorder-and-Inorder-Traversal.py | e8b16cfce36d0ee1affa25b7da3c6475768bcd33 | [
"MIT"
] | permissive | hscspring/The-DataStructure-and-Algorithms | 6200eba031eac51b13e320e1fc9f204644933e00 | e704a92e091f2fdf5f27ec433e0e516ccc787ebb | refs/heads/master | 2022-08-29T18:47:52.378884 | 2022-08-25T16:22:44 | 2022-08-25T16:22:44 | 201,743,910 | 11 | 3 | MIT | 2021-04-20T18:28:47 | 2019-08-11T09:26:34 | Python | UTF-8 | Python | false | false | 597 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
if not preorder or not inorder:
return None
root_index = inorder.index(preorder[0])
root = TreeNode(preorder[0])
root.left = self.buildTree(preorder[1: root_index+1], inorder[: root_index])
root.right = self.buildTree(preorder[root_index+1: ], inorder[root_index+1: ])
return root | [
"haoshaochun@gmail.com"
] | haoshaochun@gmail.com |
ed9b903ef6ff142ea75af2d4c6f31beb3fee10d2 | b0eef0efd10556a4b054574fdd2d43124cb0856b | /npbench/benchmarks/polybench/durbin/durbin_cupy.py | 1a84a9ed2fc45b03004a56129ddb6dfd0aa73f2a | [
"BSD-3-Clause"
] | permissive | learning-chip/npbench | 140d38be2095b54393de6e0008264b54b7cf686b | f2f545afe3603d5c8f1771f26d660f25ce4a3cda | refs/heads/main | 2023-05-10T09:54:52.719759 | 2021-05-31T12:09:48 | 2021-05-31T12:09:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | import cupy as np
def kernel(r):
y = np.empty_like(r)
alpha = -r[0]
beta = 1.0
y[0] = -r[0]
for k in range(1, r.shape[0]):
beta *= 1.0 - alpha * alpha
alpha = -(r[k] + np.dot(np.flip(r[:k]), y[:k])) / beta
y[:k] += alpha * np.flip(y[:k])
y[k] = alpha
return y
| [
"alexandros.ziogas@inf.ethz.ch"
] | alexandros.ziogas@inf.ethz.ch |
fbdbcf9c89f5f3f1b99414a21d346ac275bb88aa | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/omninote/testcase/firstcases/testcase6_022.py | c47b59270d60bcc1b7d1a8506d1cc9cb435a6e82 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,354 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'it.feio.android.omninotes',
'appActivity' : 'it.feio.android.omninotes.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'it.feio.android.omninotes/it.feio.android.omninotes.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase022
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"it.feio.android.omninotes:id/fab_expand_menu_button\").className(\"android.widget.ImageButton\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"it.feio.android.omninotes:id/menu_attachment\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Camera\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_022\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'it.feio.android.omninotes'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
bff42ee4b49a59c4a1c91ef65285fd2eafdf4ea4 | 1b8d162160f5ab6d6a6b8940b8ab83b482abb409 | /tests/query/test_wildcard.py | 3613314b733134f56f4e05918918bba4a6c1ca75 | [
"Apache-2.0"
] | permissive | jlinn/pylastica | f81e438a109dfe06adc7e9b70fdf794c5d01a53f | 0fbf68ed3e17d665e3cdf1913444ebf1f72693dd | refs/heads/master | 2020-05-19T14:07:38.794717 | 2014-07-23T23:43:00 | 2014-07-23T23:43:00 | 10,442,284 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,650 | py | __author__ = 'Joe Linn'
import unittest
import pylastica
from tests.base import Base
class WildcardTest(unittest.TestCase, Base):
def test_search_with_analyzer(self):
client = self._get_client()
index = client.get_index('test')
index_params = {
'analysis': {
'analyzer': {
'lw': {
'type': 'custom',
'tokenizer': 'keyword',
'filter': ['lowercase']
}
}
}
}
index.create(index_params, True)
doc_type = index.get_doc_type('test')
mapping = pylastica.doc_type.Mapping(doc_type, {
'name': {'type': 'string', 'store': 'no', 'analyzer': 'la'}
})
doc_type.mapping = mapping
doc_type.add_document(pylastica.Document(1, {'name': 'San Diego'}))
doc_type.add_document(pylastica.Document(2, {'name': 'San Luis Obispo'}))
doc_type.add_document(pylastica.Document(3, {'name': 'San Francisco'}))
doc_type.add_document(pylastica.Document(4, {'name': 'Chicago'}))
doc_type.add_document(pylastica.Document(5, {'name': 'London'}))
index.refresh()
query = pylastica.query.Wildcard()
query.set_value('name', 'sa*')
result_set = doc_type.search(query)
self.assertEqual(3, len(result_set))
query = pylastica.query.Wildcard()
query.set_value('name', 'ch*')
result_set = doc_type.search(query)
self.assertEqual(1, len(result_set))
index.delete()
if __name__ == '__main__':
unittest.main()
| [
"joe@venturocket.com"
] | joe@venturocket.com |
20eb97d8a227b49c674e29cf693eee401713bdc7 | 05263538c3ad0f577cdbbdb9bac87dcf450230ce | /alexa/ask-sdk/ask_sdk_model/services/directive/error.py | 5bd50a4d2a1931a4237389af00b0942f568d9058 | [] | no_license | blairharper/ISS-GoogleMap-project | cea027324fc675a9a309b5277de99fc0265dcb80 | 3df119036b454a0bb219af2d703195f4154a2471 | refs/heads/master | 2020-03-21T16:47:21.046174 | 2018-10-24T08:05:57 | 2018-10-24T08:05:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,520 | py | # coding: utf-8
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
class Error(object):
"""
NOTE: This class is auto generated.
Do not edit the class manually.
:param code: error code to find more information in developer.amazon.com. # noqa: E501
:type code: (optional) int
:param message: Readable description of error. # noqa: E501
:type message: (optional) str
"""
deserialized_types = {
'code': 'int',
'message': 'str'
}
attribute_map = {
'code': 'code',
'message': 'message'
}
def __init__(self, code=None, message=None): # noqa: E501
# type: (Optional[int], Optional[str]) -> None
"""
:param code: error code to find more information in developer.amazon.com. # noqa: E501
:type code: (optional) int
:param message: Readable description of error. # noqa: E501
:type message: (optional) str
"""
self.__discriminator_value = None
self.code = code
self.message = message
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Error):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"blair.harper@gmail.com"
] | blair.harper@gmail.com |
36246774e46c9b6cd0ae0f29d7f7be2713617944 | b500996a0b29829fde6afe8b23178ca9df4a239d | /rydinfap/src/apps/assetpartpurch.py | 8aaafda1429311c01fc8ec79204b265b934722ab | [] | no_license | eocampo2000/test-code | 48c4d444e323eef5e6fe7e61b018952ef3cd4134 | 49328664243e1a9daf9c567d1aaaa19fd4654c02 | refs/heads/master | 2016-08-11T07:35:31.346464 | 2016-02-13T12:33:55 | 2016-02-13T12:33:55 | 51,642,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,303 | py | '''
Created on Jan 6, 2015
@author: eocampo
'''
'''
Created on Aug 20, 2014
@author: eocampo
'''
__version__ = '20150102'
import sys
import utils.strutils as su
import procdata.procinfa as pi
import procjobs.procsched as psc
import utils.fileutils as fu
from apps.infbaseapp import _InfaBaseApp
# Mandatory to define self.cmdStep
# method _getNextRunDate is sensitive to schedule changes !
RUN_PER_DAY = 1 # Daily runs.
DP_LEN = len('YYYYMM')
# Schedules
SCH_FREQ = 'Mthly'
sch = ()
cur_dayr = su.getTodayDtStr('%Y%m')
class AssetPartPurch(_InfaBaseApp):
exitOnError = True
def __init__(self):
super(AssetPartPurch,self).__init__()
self.landDir = ''
self.incFileSet = [] # Incoming Files. Contains full path name.
self.incFiles = []
self.workFiles = [] # Files that were moved to the working dir (ideally same than incSetFile).
self.trigFiles = [] # Incoming Trigger File.
self.fileDate = ''
self.FILE_SET_LEN = 1
self.ts = su.getTimeSTamp()
# Allowable commands for this application. Make sure to Set
self.cmdStep = { 'A' : self.getLock ,
'B' : self.isWorkDayWarn ,
'C' : self.chkNextRunFlg ,
'D' : self.procAssetPartPurch ,
}
# Infa Environmental variables/
self.infaEnvVar = {
'PMCMD' : 'mg.pmcmd' ,
'INFA_USER' : 'self.ib.rep_user' ,
'INFA_XPWD' : 'self.ib.rep_xpwd' ,
'DOMAIN' : 'self.ib.dom_name' ,
'INT_SERV' : 'self.ib.IS' ,
'INFA_SHARE' : 'self.ib.shareDir' ,
'INFA_APP_CFG' : 'self.ib.cfgDir' ,
'INFA_APP_LCK' : 'self.ib.lckDir' ,
'INFA_APP_CTL' : 'self.ib.ctlDir' ,
}
def _setDataDir(self) : return 0
def _wkf_asst_part_purch(self):
self.ib.fld = 'Asset'
self.ib.wkf = 'wkf_part_purchasing_dim_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def procAssetPartPurch(self):
ctlFile = '%s/%s.ctl' % (self.ib.ctlDir,self.appName)
self.log.debug('self.checkNextRunFlg is %s' % self.checkNextRunFlg)
prev_dayr = self._getCtlFile()
if self.checkNextRunFlg is True:
if prev_dayr is None or prev_dayr.strip() == '':
self.log.error("Could not find control file or No Data")
return -1
rc = psc.getNextRunDate(prev_dayr, cur_dayr, SCH_FREQ, self.log,sch)
if rc != 0 :
self.log.error("self._chkNextRun rc = %s" % rc)
return rc
# Run workflows
if self._wkf_asst_part_purch() != 0 : return 1
# Loading Staging Succeeded. Update the control file.
rc = fu.updFile(ctlFile,cur_dayr)
if rc == 0 :
if self.checkNextRunFlg: self.log.info('Updated Cur Load Date from %s to %s , Control File %s' % (prev_dayr,cur_dayr, ctlFile))
else : self.log.info('Overwriting Cur Load Date from %s to %s , Control File %s' % (prev_dayr,cur_dayr, ctlFile))
else :
self.log.error('Could not Update Load Date %s, Control File %s rc = %s' % (cur_dayr,ctlFile,rc))
return rc
def main(Args):
a = AssetPartPurch()
rc = a.main(Args)
return rc
if __name__ == '__main__':
from setwinenv import setEnvVars # Remove in UX
setEnvVars() # Remove in UX
rc= main(sys.argv)
| [
"eocampo1000@hotmail.com"
] | eocampo1000@hotmail.com |
d72f03c6696ae620de2f185352ac0ee64c52ce40 | 8bd6b0784de9a1e6a39d0f5f23f2d8fb50c73d49 | /MethodRefine/logistics/MethodRefine/logistics_benchmark-high/validating/validating_33.py | 6bd1c737b7c18109ca58e18e42baaa3f5f355f85 | [] | no_license | sysulic/MethodRefine | a483d74e65337dff4bc2539ce3caa3bf83748b48 | adbb22d4663041d853d3132f75032b7561bf605c | refs/heads/master | 2020-09-14T10:45:55.948174 | 2020-05-01T09:13:59 | 2020-05-01T09:13:59 | 223,104,986 | 3 | 2 | null | 2020-04-27T11:01:36 | 2019-11-21T06:33:16 | Python | UTF-8 | Python | false | false | 1,654 | py | #!/usr/bin/env python
# coding=utf-8
import sys
sys.path.insert(0, './')
from logistic import *
import new_tihtn_planner
state0 = new_tihtn_planner.State('state0')
allow = False
state0.loc = {'truck1':('city1','loc1'),'truck2':('city2','loc1'),'truck3':('city3','loc2'),'truck4':('city4','loc1'),'truck5':('city5','loc1'),'plane1':('city3','loc1'),'pkg1':('city3','loc1'),'pkg2':('city5','loc1'),'pkg3':('city2','loc1'),'pkg4':('city2','loc1'),}
state0.load = {'truck1':False,'truck2':False,'truck3':False,'truck4':False,'truck5':False,'plane1':False,}
state0.plane_nums = 1
new_tihtn_planner.declare_types({'location':[('city1','loc1'),('city1','loc2'),('city2','loc1'),('city2','loc2'),('city3','loc1'),('city3','loc2'),('city4','loc1'),('city4','loc2'),('city5','loc1'),('city5','loc2'),],'truck':['truck1','truck2','truck3','truck4','truck5',],'plane':['plane1',],'pkg':['pkg1','pkg2','pkg3','pkg4',]})
new_tihtn_planner.declare_funs({load_plane:['pkg', 'location', 'plane'],load_truck:['pkg', 'location', 'truck'],by_plane:['plane', 'location'],drive_truck:['truck', 'location'], unload_truck:['pkg', 'location', 'truck'],unload_plane:['pkg', 'location', 'plane']})
new_tihtn_planner.instance()
def execute(completable):
return new_tihtn_planner.pyhop(completable, allow, state0,[('delievery','pkg1',('city5','loc2')),('delievery','pkg2',('city5','loc2')),('delievery','pkg3',('city5','loc2')),('delievery','pkg4',('city4','loc1')),],[[0, 1],[1, 2],[2, 3],], 9)
def add_methods(fun_obj_list):
for fun in fun_obj_list:
new_tihtn_planner.add_method(fun.func_name.split('__')[0], fun)
def reverse_methods():
new_tihtn_planner.reverse_methods() | [
"526552330@qq.com"
] | 526552330@qq.com |
a6bb68f32efef496538748681b5a0a3d34d2fd67 | 4d7d2d44410ce1787ce3255dff2be9e5317535a7 | /apps/users/api/api.py | f0d5bf10e43dbea45b24f37110811e50a65fc02e | [] | no_license | Noeuclides/palindrome_api | 931533148cc2a2d4a5155d717ecb9559c1c30d12 | cb5bc8873f953121d4785fe62ef6b49ec2fdd996 | refs/heads/master | 2023-03-17T01:37:59.429450 | 2021-03-17T22:17:20 | 2021-03-17T22:17:20 | 348,148,575 | 0 | 0 | null | 2021-03-17T15:23:41 | 2021-03-15T23:10:58 | Python | UTF-8 | Python | false | false | 2,849 | py | from django.http import Http404, response
from django.contrib.auth import authenticate
from rest_framework import status, generics, permissions
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework.views import APIView
from rest_framework_jwt.settings import api_settings
from apps.users.models import User
from apps.users.api.serializers import UserSerializer, UserListSerializer, LoginSerializer
VALUES = ['id', 'name', 'last_name', 'username', 'email', 'password']
class UserAPIView(APIView):
def get(self, request: Request) -> Response:
users = User.objects.all().values(*VALUES)
users_serielizers = UserListSerializer(users, many=True)
return Response(users_serielizers.data)
def post(self, request: Request) -> Response:
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
response = {
"user": serializer.data,
}
return Response(response, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDetailAPIView(APIView):
def get_object(self, pk: int) -> User:
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def get(self, request: Request, pk: int) -> Response:
user = self.get_object(pk)
user_serielizers = UserSerializer(user)
return Response(user_serielizers.data)
def put(self, request: Request, pk: int) -> Response:
user = self.get_object(pk)
serializer = UserSerializer(user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request: Request, pk: int) -> Response:
user = self.get_object(pk)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class LoginAPIView(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
response = {
'success' : 'True',
'status code' : status.HTTP_200_OK,
'message': 'User logged in successfully',
'token' : serializer.data['token'],
}
status_code = status.HTTP_200_OK
return Response(response, status=status_code)
class UserRetrieveView(generics.RetrieveAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = UserSerializer
def get_object(self):
return self.request.user
| [
"euclidesnoeuclides@gmail.com"
] | euclidesnoeuclides@gmail.com |
8d733a6f9844f95ae270ebba18d3ce7204c182df | 7833e3f6e979dac7fd5f321ec8ba63fe1db188d6 | /srecanje2/matematika.py | d4dd8e48de6d8cee05392cd7c543a6da52caa95d | [] | no_license | jO-Osko/Krozek-python | 93865fd79d06ef5890e99c10f38bd94d308d4a70 | 787861fdeff625fc64b9ef0532a341992495713d | refs/heads/master | 2023-04-03T08:47:00.414804 | 2021-04-06T14:52:27 | 2021-04-06T14:52:27 | 305,398,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | # + - / * **
# // %
# // -> Celštevilsko deljenje
# % -> ostanek pri deljenju (modulo, modulus)
# vpisano <- int(input())
vpisano = int(input("Vnesi število"))
if vpisano % 2 != 0:
print("Vnesel si liho število")
else:
print("Vnesel si sodo število")
| [
"koprivec.filip@gmail.com"
] | koprivec.filip@gmail.com |
8a16ba48d7f52c945a9074f8d6397b88610d3699 | 74984afb8ac988ad56cb887cf1ae76e0580ceaf4 | /transposition.py | 853d98622852161725859684d7a471b899718f99 | [] | no_license | eBLDR/Criptography | e440786f1a8d2c2bc5d24a1e6d7f005fae6fd28a | f08974d8d2dd95087afb3d2f1b91419df0959371 | refs/heads/master | 2020-03-26T06:19:30.809445 | 2018-12-27T11:33:29 | 2018-12-27T11:33:29 | 144,599,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | """
Caesar cipher (substitution method) - by BLDR 2018
"""
from math import ceil
from cipher import Cipher
class TranspositionCipher(Cipher):
def __init__(self):
super().__init__()
self.possible_modes.update({'E': 'Encryption', 'D': 'Decryption'})
@staticmethod
def cipher_info():
print("Transposition cipher is a method of encryption by which the positions "
"held by units of plaintext (which are commonly characters or groups of "
"characters) are shifted according to a regular system, so that the ciphertext "
"constitutes a permutation of the plaintext.")
def run(self):
print('=== Transposition cipher method ===\n')
self.initialise(accept_numbers=True)
self.main()
def set_key(self):
while not self.key:
key = input('Insert key (any integer): ')
if key.isdigit():
self.key = int(key)
def process_message(self, key, decrypt=False):
msg_code = ''
msg_length = len(self.input_message)
pointer_jump = ceil(msg_length / key) if decrypt else key
for index in range(pointer_jump):
pointer = index
while pointer < msg_length:
msg_code += self.input_message[pointer]
pointer += pointer_jump
return msg_code
if __name__ == '__main__':
transposition_crypt = TranspositionCipher()
transposition_crypt.run()
| [
"ed.bldr@gmail.com"
] | ed.bldr@gmail.com |
84ab98771c4d8d46a86328fa9598e1a9fb82ae88 | 8e429e825cd28b74e18ac18f8a48f748e89ccb1b | /webapp/models.py | 96169d8af49ab8e63decd2d396e7c03a691877b2 | [] | no_license | gibsonx/AnsibleGUI | 0de7bd40259950ede14fe452f43f99a568af3ed2 | 00ff984e9d2385bfae68e7de82201b6fae336d48 | refs/heads/master | 2023-03-12T07:40:39.930422 | 2023-02-19T07:19:00 | 2023-02-19T07:19:00 | 168,816,549 | 1 | 0 | null | 2023-01-24T23:20:08 | 2019-02-02T09:39:48 | Jupyter Notebook | UTF-8 | Python | false | false | 1,150 | py | from django.db import models
# Create your models here.
class Host(models.Model):
hostname = models.CharField(max_length=16)
ip = models.GenericIPAddressField(null=True,blank=True)
port = models.IntegerField(null=True,blank=True)
username = models.CharField(max_length=16,null=True,blank=True)
password = models.CharField(max_length=16,null=True,blank=True)
ssh_key = models.TextField(max_length=30,null=True,blank=True)
mod_date = models.DateTimeField('最后修改日期', auto_now = True)
def __str__(self):
return self.hostname
class Group(models.Model):
groupname = models.CharField(max_length=16)
hosts = models.ManyToManyField(Host)
def __str__(self):
return self.groupname
class GroupVar(models.Model):
key = models.CharField(max_length=16)
value = models.CharField(max_length=16)
group = models.ForeignKey(Group,on_delete=models.CASCADE,default='')
def __str__(self):
return self.key
class Tag(models.Model):
usage = models.ManyToManyField(Host)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name | [
"you@example.com"
] | you@example.com |
e5138030c49c45efb963e43ee9fff85323b8bdc4 | e9eed586eb25a8805411a0c1069f79fb70be957d | /Course/migrations/0002_course_link.py | b356e7f70326435ad5679cbf93d6ad5b4e14bfef | [
"MIT"
] | permissive | jay1999ke/PureQPA | 61d250f85889867502a46f87385d825b764bab0c | c5ba6d7998d5fb1544b81bc076dbd19c3017fa9e | refs/heads/master | 2020-04-24T18:05:00.321716 | 2019-06-21T17:39:51 | 2019-06-21T17:39:51 | 172,169,063 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # Generated by Django 2.1 on 2018-10-16 13:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Course', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='course',
name='link',
field=models.CharField(blank=True, max_length=512),
),
]
| [
"jay1999ke@gmail.com"
] | jay1999ke@gmail.com |
0fec4a68aaa4b8c693563d5a6f693b2a76e69cd4 | 6c2d219dec81b75ac1aef7f96f4e072ed7562f81 | /scenes/siteAbbeyMaley.py | 9e59ff21c8d8c837c9a9feb11472785a774722e5 | [] | no_license | SFTEAM/scrapers | 7e2b0a159cb19907017216c16a976d630d883ba5 | 778f282bf1b6954aa06d265fdb6f2ecc2e3c8e47 | refs/heads/main | 2023-08-15T18:21:41.922378 | 2021-09-24T22:24:29 | 2021-09-24T22:24:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,729 | py | import re
import scrapy
import tldextract
from tpdb.BaseSceneScraper import BaseSceneScraper
### Abbiemaley.com has all scenes hidden behind a paywall.
### Sexyhub seems to have recent updates, and is getting current ones as
### well, so I'm pulling from there.
class siteAbbieMaleySpider(BaseSceneScraper):
name = 'AbbieMaley'
network = "Abbie Maley"
parent = "Abbie Maley"
start_urls = [
'https://www.sexyhub.org',
]
selector_map = {
'title': '//h1[@class="title"]/text()',
'description': '//div[contains(text(),"Description")]/following-sibling::div[1]/text()',
'date': '//div[contains(text(),"Release Date")]/following-sibling::text()',
'date_formats': ['%d %b %Y'],
'image': '//meta[@property="og:image"]/@content',
'performers': '//div[@class="models"]/a/text()',
'tags': '//div[contains(text(),"Categories")]/following-sibling::span/a/text()',
'external_id': '.*\/\d+-(.*)-abbiemaley',
'trailer': '',
'pagination': '/xfsearch/site/AbbieMaley.com/page/%s/'
}
def get_scenes(self, response):
scenes = response.xpath('//h2[@class="title"]/a/@href').getall()
for scene in scenes:
if re.search(self.get_selector_map('external_id'), scene):
yield scrapy.Request(url=self.format_link(response, scene), callback=self.parse_scene)
def get_performers(self, response):
performers = self.process_xpath(response, self.get_selector_map('performers')).getall()
if performers:
performerlist = []
for performer in performers:
performer = performer.lower()
if " aka " in performer:
performer = re.search('(.*) aka ', performer).group(1)
if performer:
performerlist.append(performer.strip().title())
return list(map(lambda x: x.strip().title(), performerlist))
return []
def get_tags(self, response):
if self.get_selector_map('tags'):
tags = self.process_xpath(response, self.get_selector_map('tags')).getall()
if tags:
performers = self.process_xpath(response, self.get_selector_map('performers')).getall()
if performers:
for performer in performers:
if performer in tags:
tags.remove(performer)
for tag in tags:
if " aka " in tag.lower():
tags.remove(tag)
return list(map(lambda x: x.strip(), tags))
return []
def get_site(self, response):
return "Abbie Maley"
| [
"briadin@yahoo.com"
] | briadin@yahoo.com |
25c8ba0696006727d611416a6a6c00cc64b65b15 | e942a7c881afd8bf83e6fef348e8b6aab2fb63b5 | /testProjects/testCoinCeeper/tcp/Costs/models.py | 215c602a209c0423fda18ab07cae04f60020932e | [] | no_license | 100pecheneK/CostControl | 1df6a35e60ff428d07c2faf29163d9f6ce6c5a9b | bee51fbee510b934d7993552b5f2c1e203d04a60 | refs/heads/master | 2021-02-09T17:22:01.795706 | 2020-05-25T12:17:30 | 2020-05-25T12:17:30 | 244,304,045 | 2 | 0 | null | 2020-12-12T18:07:04 | 2020-03-02T07:10:14 | CSS | UTF-8 | Python | false | false | 465 | py | from django.db import models
from django.contrib.auth import get_user_model
from .choices.cost_type import COST_TYPE
User = get_user_model()
class Cost(models.Model):
money = models.IntegerField(verbose_name='money')
cost_type = models.IntegerField(choices=COST_TYPE, verbose_name='costType', default=1)
date = models.DateField(auto_now=True, verbose_name='costDate')
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='user')
| [
"mistermihail23@gmail.com"
] | mistermihail23@gmail.com |
fa55ad8b8c0619a04460e3d5cecf31e3dd06b6f7 | 8b25a7984bd18fc356232083da0bb2f829a1dbd4 | /ineco_sms/wizard/send_sms_by_saleorder.py | 22ba56209cbe67845768fe18aaec457319130932 | [] | no_license | anndream/new_mixprint_addons | f94067a1248cf3d30ce4e937d5fb3c96bc9cb482 | 1b4b04388e723dc7137dd8d2a29fdef3f59f4861 | refs/heads/master | 2020-04-09T19:17:36.882746 | 2015-09-10T04:41:13 | 2015-09-10T04:41:13 | 42,242,457 | 0 | 2 | null | 2015-09-10T12:13:56 | 2015-09-10T12:13:56 | null | UTF-8 | Python | false | false | 4,081 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 - INECO PARTNERSHIP LIMITE (<http://www.ineco.co.th>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
#import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp import tools
class sms_send_by_saleorder(osv.osv_memory):
_name = "sms.send.by.saleorder"
_description = "Send SMS in sale order."
_columns = {
'server_id' : fields.many2one('ineco.sms.server', 'Server', required=True),
'phone': fields.char('Mobile No', size=64, required=True),
'message': fields.text('Message'),
}
# def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
# if context is None: context = {}
# fvg = super(sms_send_by_saleorder, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu)
# sale_id = context and context.get('active_id', False) or False
#
# if view_type == 'form' and (context.get('active_model') == 'sale.order') and sale_id:
# sale_obj = self.pool.get('sale.order').browse(cr, uid, sale_id, context=context)
# fvg['fields']['Mobile No'] = sale_obj.partner_id.mobile
#
# return fvg
def default_get(self, cr, uid, fields, context):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
res = super(sms_send_by_saleorder, self).default_get(cr, uid, fields, context=context)
server_ids = self.pool.get('ineco.sms.server').search(cr, uid,[('is_default','=',True)])
if server_ids:
res.update({'server_id': server_ids[0]})
sale_id = context and context.get('active_id', False) or False
if (context.get('active_model') == 'sale.order') and sale_id:
sale_obj = self.pool.get('sale.order').browse(cr, uid, sale_id, context=context)
if 'phone' in fields:
res.update({'phone': sale_obj.partner_id.mobile or False})
return res
def send_sms(self, cr, uid, ids, context=None):
""" Changes the Product Quantity by making a Physical Inventory.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context')
for data in self.browse(cr, uid, ids, context=context):
if data.server_id.balance < 1:
raise osv.except_osv(_('Warning!'), _('Balance limited.'))
data.server_id.send_message(data.phone,data.message)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:: | [
"thitithup@gmail.com"
] | thitithup@gmail.com |
f082c3e62020b6f49a23c0b4937155dba618d6e4 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5686275109552128_0/Python/Skywalker8921/B.py | 9326ba2218d9c5558c6e1d74f1c318f84c8792e4 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,434 | py | import sys,math
import collections
import functools
# https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __name__(self):
return self.func.__name__
def __repr__(self):
return self.func.__repr__
def __doc__(self):
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
class debugged(object):
def __init__(self,func):
self.func = func
def __call__(self,*args):
print("[{}({}) = ? ".format(self.func.__name__,args),file=sys.stderr)
val = self.func(*args)
print("{}({}) = {}]".format(self.func.__name__,args,val),file=sys.stderr)
return val
def main_small(D,P):
P.sort()
P.reverse()
Pin = tuple(P)
@memoized
#@debugged
def aux(P):
p = P[0]
if p <= 2:
return p
else:
# PP1 = [pp - 1 for pp in P];
# v1 = main_small(D,PP1);
res = P[0]-1
for i in range(p//2,p):
PP = list(P)
PP[0] = i
PP.append(p - i)
PP.sort()
PP.reverse()
PPin = tuple(PP)
v2 = aux(PPin)
res = min(res,v2)
return res+1
return aux(Pin);
if __name__ == "__main__":
T = int(input())
for c in range(T):
D = int(input())
P = [int(i) for i in input().split()]
res = main_small(D,P)
#res = main_large(smax,si)
print("Case #{}: {}".format(c+1,res),file=sys.stderr)
print("Case #{}: {}".format(c+1,res))
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
a5bf17d2fc298a295b4fce9f49b18f68c79ac34e | 5189b657618c4041041836e6697b69caa965229f | /blockit/migrations/0099_auto_20200904_1757.py | 2cbba3247466c2dfbc2ee1acacea9bddabe1dd33 | [] | no_license | MuellerBettina/ba2020_573561 | b653a0c48569ebaecaaee6dd99c69d7e584514e8 | 41b6ba9f40c1fc63fa9dfdfba629c26f756abfa9 | refs/heads/master | 2023-04-06T16:12:03.229605 | 2020-09-10T16:26:12 | 2020-09-10T16:26:12 | 269,595,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # Generated by Django 2.2.13 on 2020-09-04 15:57
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blockit', '0098_auto_20200904_1756'),
]
operations = [
migrations.AlterField(
model_name='action',
name='end_time',
field=models.DateTimeField(default=datetime.datetime(2020, 9, 4, 15, 57, 45, 419620, tzinfo=utc)),
),
migrations.AlterField(
model_name='action',
name='start_time',
field=models.DateTimeField(default=datetime.datetime(2020, 9, 4, 15, 57, 45, 419602, tzinfo=utc)),
),
]
| [
"BettinaMueller@pm.me"
] | BettinaMueller@pm.me |
fa8ea4af2a6244024d62ba80865d2b08b198f9fc | 958f972d273e314ae29aa5c8287925972f32816e | /univers/migrations/0003_auto_20201208_1951.py | c88bd0e74cf1483a782610e808a3b200349a258a | [] | no_license | lolsecret/project_1 | 727b9e9d22b1c44906a2f1b55ef8668e03d92cbb | 462d90a5b78196359e967539043e8d6616f8b789 | refs/heads/master | 2023-02-07T21:09:02.381298 | 2020-12-29T10:33:15 | 2020-12-29T10:33:15 | 322,527,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # Generated by Django 3.1.3 on 2020-12-08 13:51
import datetime
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('univers', '0002_auto_20201208_1116'),
]
operations = [
migrations.AddField(
model_name='groupspec',
name='test',
field=models.UUIDField(default=uuid.UUID('bf36e4b1-038a-4556-b32e-569be75fbce0')),
),
migrations.AlterField(
model_name='groupspec',
name='start_date',
field=models.DateField(default=datetime.datetime.now),
),
]
| [
"lucallonso@gmail.com"
] | lucallonso@gmail.com |
cb039be2894ef83559a1e11d1859c65872352644 | 54934cfe32ce5aa5c2e718b0c5c2afa4b458fe75 | /33ch/convex_hull.py | e2cc0f21ef42dcf7bf6af197664c6ea139b00baa | [] | no_license | mccarvik/intro_to_algorithms | 46d0ecd20cc93445e0073eb0041d481a29322e82 | c2d41706150d2bb477220b6f929510c4fc4ba30b | refs/heads/master | 2021-04-12T12:25:14.083434 | 2019-11-09T05:26:28 | 2019-11-09T05:26:28 | 94,552,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | from functools import reduce
def convex_hull_graham(points):
'''
Returns points on convex hull in CCW order according to Graham's scan algorithm.
By Tom Switzer <thomas.switzer@gmail.com>.
'''
TURN_LEFT, TURN_RIGHT, TURN_NONE = (1, -1, 0)
def cmp(a, b):
return (a > b) - (a < b)
def turn(p, q, r):
return cmp((q[0] - p[0])*(r[1] - p[1]) - (r[0] - p[0])*(q[1] - p[1]), 0)
def _keep_left(hull, r):
while len(hull) > 1 and turn(hull[-2], hull[-1], r) != TURN_LEFT:
hull.pop()
if not len(hull) or hull[-1] != r:
hull.append(r)
return hull
points = sorted(points)
l = reduce(_keep_left, points, [])
u = reduce(_keep_left, reversed(points), [])
return l.extend(u[i] for i in range(1, len(u) - 1)) or l
pts = [(0,0), (0,5), (5,5), (5,0), (2,2)]
print(convex_hull_graham(pts)) | [
"ec2-user@ip-172-31-91-31.ec2.internal"
] | ec2-user@ip-172-31-91-31.ec2.internal |
a9dea402482c68f2644aae9ac12a2e0058a422be | f889d26fec0c4da86c2b857191564e5ee57430a8 | /Python_advanced/advanced/stacks_and_ques/06_Balanced_Parenthesis.py | a147d9911d19dfe64a0e47c933b688827297d500 | [] | no_license | Grigorov999/SoftUni-Python | 4da6ecea760d13d7571723b8faa771b3be0199f6 | cb4f8f352fc48cb1ae8b2efd803265707a37227f | refs/heads/master | 2022-12-13T15:04:05.303204 | 2020-09-11T20:32:29 | 2020-09-11T20:32:29 | 294,784,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | parentheses = input()
stack = []
pairs = {
'{': '}',
'[': ']',
'(': ')'
}
valid = True
for element in parentheses:
if element in "({[":
stack.append(element)
elif element in ")}]":
if stack:
current = stack[-1]
if pairs[current] == element:
stack.pop()
else:
valid = False
break
else:
valid = False
if valid:
print("YES")
else:
print("NO")
| [
"noreply@github.com"
] | Grigorov999.noreply@github.com |
946f10273f525a0798af550bfa1ecc7df04d3e18 | fffb732290af97687ea3221ce4a6ce4d95640aff | /courses/w04_py/source/networkig/mysocket1.py | 741da58eccd5ee795710bb2b50a8b2024155244e | [] | no_license | NamWoo/self_driving_car | 851de73ae909639e03756eea4d49ab663447fc19 | cd5c1142c9e543e607ca9dc258f689de6879d207 | refs/heads/master | 2021-07-24T19:51:54.459485 | 2021-07-06T13:58:19 | 2021-07-06T13:58:19 | 186,267,543 | 9 | 7 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from socket import *
#import socket
mysock = socket(AF_INET, SOCK_STREAM)
print(mysock)
#myip_info = gethostbyname("google.com")
#print(myip_info) | [
"pre3ice@gmail.com"
] | pre3ice@gmail.com |
35c5216c087f41577a8a794e4c2777f16369f243 | 1180c0bfe29959d95f3c131e6e839950e528d4ee | /28/shibasisp/app.py | 93fe0f0e636fed68c7ce0d6545f5353e687bedf7 | [] | no_license | pybites/challenges | e3e461accd8e7f890aee8007ba5070086ef983fc | 02b77652d0901e6e06cb9b1e7cb3e59c675445c2 | refs/heads/community | 2023-08-20T18:19:02.982214 | 2022-11-17T09:23:31 | 2022-11-17T09:23:31 | 78,264,928 | 764 | 3,115 | null | 2023-07-21T05:58:19 | 2017-01-07T07:17:50 | Jupyter Notebook | UTF-8 | Python | false | false | 2,420 | py | from flask import Flask, render_template, request
import pandas as pd
from bokeh.plotting import figure, output_file
from bokeh.embed import components
app = Flask(__name__)
# Load the Iris Data Set
data = pd.read_csv('data/gapminder.csv')
data = data[(data.Year >= 1950)]
country_names = list(set(data.Country))
attribute_names = data.columns[2:-1].values.tolist()
# Create the main plot
def create_figure(first_country='India',
second_country='Pakistan',
selected_attribute='income'):
# filter datasets according to country
first_country_data = data[(data.Country == first_country)]
second_country_data = data[(data.Country == second_country)]
first_country_data_attribute = list(first_country_data[selected_attribute])
second_country_data_attribute = list(second_country_data[selected_attribute])
years = list(first_country_data["Year"])
# output to static HTML file
output_file("gapminder.html")
# create a new plot
p = figure(title="Country Data Analysis", x_axis_label='Years',width=1280, height=720)
p.line(years, first_country_data_attribute, legend=first_country, line_color="blue", line_width=3)
p.line(years, second_country_data_attribute, legend=second_country, line_color="green", line_width=3)
return p
# Index page
@app.route('/', methods=['GET', 'POST'])
def index():
first_country = "India"
second_country = "Pakistan"
selected_attribute = "income"
if request.method == 'POST':
first_country = request.form["first_country"]
second_country = request.form["second_country"]
selected_attribute = request.form["selected_attribute"]
# Create the plot
plot = create_figure(first_country, second_country, selected_attribute)
# Embed plot into HTML via Flask Render
script, div = components(plot)
return render_template("index.html",
script=script,
div=div,
country_names=country_names,
attribute_names=attribute_names,
selected_attribute=selected_attribute,
first_country=first_country,
second_country=second_country)
# With debug=True, Flask server will auto-reload
# when there are code changes
if __name__ == '__main__':
app.run(port=5000, debug=True)
| [
"pybites@projects.bobbelderbos.com"
] | pybites@projects.bobbelderbos.com |
29065c85f253e986f0247830667b223126e205ca | e36a4b7fdcff6e769455502a2cde6ede03c09c96 | /leetcode/sum_2.py | 80a43e364fe4d242343d5ccc3e3d5e9213002c11 | [] | no_license | SmallPuddingComing/Python-learn | 5c09fec5054887b9723c230527697a39642105fc | 49f154fa523c574aed44e440606a494680bd6ef7 | refs/heads/master | 2020-04-06T06:51:36.031265 | 2016-06-14T08:43:02 | 2016-06-14T08:43:02 | 57,961,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | #coding:utf8
class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
dict = {}
mylist = []
for i in range(len(num)):
if dict.get(target-num[i], None) == None:
dict[num[i]] = i
else:
mylist.append((dict[target-num[i]] + 1, i + 1))
if mylist is not None:
return mylist
if __name__ == '__main__':
solution = Solution()
num = [1,3,4,6,5,8]
print solution.twoSum(num, 9) | [
"1076643147@qq.com"
] | 1076643147@qq.com |
0820fcde3e782e1246cd0d3b958c53095226c1a1 | b2024047dfb29398787aacc4c12a76d99d477479 | /Probleme_22_BECKER_Justine.py | cedbe0f550ef49b613d33cc5f85f8b3e223d0247 | [] | no_license | mines-nancy-tcss5ac-2018/td1-becker261u | 0d5e5b146acbbec2d9d4315a77706f5acf81546f | 6b2ebaa3e6c673a377e5383f2412cebf52cad079 | refs/heads/master | 2020-03-31T22:05:12.325798 | 2018-10-12T10:58:57 | 2018-10-12T10:58:57 | 152,605,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from math import*
import numpy as np
def ouvre():
fichier=open('C:/Users/Justi/OneDrive/Documents/Mines Nancy/Informatique/p022_names.txt', 'r')
L=[]
for line in fichier.readlines():
L+=line.split('","')
L[0]='MARY'
L[-1]='ALONSO'
return L
def convertionalpha(mot):
S=[]
M=mot
Somme=0
alphabet=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
for k in range(len(M)):
i,j=0,False
while j==False:
if M[k]==alphabet[i]:
j=True
S.append(i+1)
i+=1
for i in range(len(S)):
Somme+=S[i]
return Somme
def solve():
L=ouvre()
L=sorted(L)
S=0
for i in range(len(L)):
X=convertionalpha(L[i])
S+=(X*(i+1))
return S
print(solve())
| [
"noreply@github.com"
] | mines-nancy-tcss5ac-2018.noreply@github.com |
070dff76c457f1874707620fb31fec5cf5729171 | 052943e74057f62064e1a0574790696304056f5e | /matplotlib/histogram.py | cf3c77d9d621fb8b036a203b3454e298c7f9eaf5 | [] | no_license | PragayanParamitaMohapatra/Basic_python | 0e9861bdb48f0f7e61f479fef5a3a501b5bd0ae7 | 69c83369a4facbc8d1829c163bc24871124dfff0 | refs/heads/master | 2022-11-30T21:52:06.841696 | 2020-08-06T17:36:43 | 2020-08-06T17:36:43 | 284,977,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | import matplotlib.pyplot as plt
blood_sugar_men=[113,85,90,150,149,88,93,115,135,80,77,82,129]
blood_sugar_women=[67,98,89,120,133,150,84,69,89,79,120,112,100]
blood_sugar_can=[113,85,90,150,149,88,93,115,135,80,77,82,129]
blood_sugar_wan=[67,98,89,120,133,150,84,69,89,79,120,112,100]
plt.xlabel("sugar range")
plt.ylabel('Total no. of patients')
plt.title('Blood sugar analysis')
print(plt.hist([blood_sugar_men,blood_sugar_women,blood_sugar_can,blood_sugar_wan],bins=[80,100,125,150],rwidth=0.50,color=["green","yellow","blue","orange"],label=['men','women']))
plt.legend()
plt.show() | [
"pragayanparamitaguddi111@gmail.com"
] | pragayanparamitaguddi111@gmail.com |
603fe7c5e0cc47a01c1d5faccc532aaeb43fdae8 | 6bc94d794a83a9b7e859a7f615197fc564c29761 | /oop_basic/animals.py | 45f97a37c4dc6dee8cdc50834b7cf319361d4b58 | [] | no_license | huchangchun/learn-python3 | b735a4477d5b7b96e8791aedf8424faed8487c3c | f154f80edf91c20e8b596e29e4e9f904c6a3f2bc | refs/heads/master | 2022-03-27T14:21:00.964729 | 2019-12-20T02:03:45 | 2019-12-20T02:03:45 | 115,346,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | # -*- coding:utf-8 -*-
class Animal():
def __init__(self,name,food):
self.name = name
self.food = food
def eat(self):
print('%s like %s'%(self.name,self.food))
class Dog(Animal):
def __init__(self,name,food,drink):
#加载父类构造方法
super(Dog,self).__init__(name,food)
self.drink = drink
def drinks(self):
print('%s 爱喝 %s' %(self.name,self.drink))
kitty = Dog('kt','骨头','牛奶')
kitty.eat()
kitty.drinks()
print('kitty is Animal?',isinstance(kitty,Animal))
print('kitty is dog1?',isinstance(kitty,Dog))
# kt like 骨头
# kt 爱喝 牛奶
# kitty is Animal? True
# kitty is dog1? True
| [
"hu_changchun@126.com"
] | hu_changchun@126.com |
79fbdfec2a57b56432000656e9547fc28d08a855 | fd2ceefb34ed0d9d16fa77ce3f8b8f91096f2c1a | /anyrl/tests/test_players.py | 6b28bf21ba7302d14a738ddc58177fe68026ed3c | [] | no_license | decoderkurt/anyrl-py | d4e433e6e7920b00f8487734ff688ad6e757706b | 94a0d7c2083312358f6c754d79d921a563f8237a | refs/heads/master | 2020-03-14T22:48:01.972856 | 2018-04-25T22:02:50 | 2018-04-25T22:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,199 | py | """
Test various Player implementations.
"""
import numpy as np
from anyrl.envs import batched_gym_env
from anyrl.rollouts import BasicPlayer, NStepPlayer, BatchedPlayer
from anyrl.tests.util import SimpleEnv, SimpleModel
def test_nstep_one_step():
"""
Test an NStepPlayer in the trivial, 1-step case.
"""
make_env = lambda: SimpleEnv(15, (1, 2, 3), 'float32')
make_agent = lambda: SimpleModel((1, 2, 3), stateful=True)
make_basic = lambda: BasicPlayer(make_env(), make_agent(), batch_size=3)
player1 = make_basic()
player2 = NStepPlayer(make_basic(), 1)
for _ in range(100):
transes1 = player1.play()
transes2 = player2.play()
assert len(transes1) == len(transes2)
for trans1, trans2 in zip(transes1, transes2):
assert _transitions_equal(trans1, trans2)
def test_nstep_multi_step():
"""
Test an NStepPlayer in the multi-step case.
"""
make_env = lambda: SimpleEnv(9, (1, 2, 3), 'float32')
make_agent = lambda: SimpleModel((1, 2, 3), stateful=True)
make_basic = lambda: BasicPlayer(make_env(), make_agent(), batch_size=1)
player1 = make_basic()
player2 = NStepPlayer(make_basic(), 3)
raw_trans = [t for _ in range(40) for t in player1.play()]
nstep_trans = [t for _ in range(40) for t in player2.play()]
for raw, multi in zip(raw_trans, nstep_trans):
for key in ['episode_step', 'episode_id', 'is_last']:
assert raw[key] == multi[key]
assert np.allclose(raw['model_outs']['actions'][0], multi['model_outs']['actions'][0])
assert np.allclose(raw['obs'], multi['obs'])
assert raw['rewards'] == multi['rewards'][:1]
assert raw['total_reward'] + sum(multi['rewards'][1:]) == multi['total_reward']
for raw, multi in zip(raw_trans[3:], nstep_trans):
if multi['new_obs'] is not None:
assert np.allclose(multi['new_obs'], raw['obs'])
else:
assert multi['episode_id'] != raw['episode_id']
def test_nstep_batch_invariance():
"""
Test that the batch size of the underlying
Player doesn't affect the NStepPlayer.
"""
make_env = lambda: SimpleEnv(9, (1, 2, 3), 'float32')
make_agent = lambda: SimpleModel((1, 2, 3), stateful=True)
def _gather_transitions(batch_size):
player = NStepPlayer(BasicPlayer(make_env(), make_agent(), batch_size=batch_size), 3)
transitions = []
while len(transitions) < 50:
transitions.extend(player.play())
# The NStepPlayer is not required to preserve
# the order of transitions.
return sorted(transitions, key=lambda t: (t['episode_id'], t['episode_step']))[:50]
expected = _gather_transitions(1)
for batch_size in range(2, 52):
actual = _gather_transitions(batch_size)
for trans1, trans2 in zip(expected, actual):
assert _transitions_equal(trans1, trans2)
def test_single_batch():
"""
Test BatchedPlayer when the batch size is 1.
"""
make_env = lambda: SimpleEnv(9, (1, 2, 3), 'float32')
make_agent = lambda: SimpleModel((1, 2, 3), stateful=True)
basic_player = BasicPlayer(make_env(), make_agent(), 3)
batched_player = BatchedPlayer(batched_gym_env([make_env]), make_agent(), 3)
for _ in range(50):
transes1 = basic_player.play()
transes2 = batched_player.play()
assert len(transes1) == len(transes2)
for trans1, trans2 in zip(transes1, transes2):
assert _transitions_equal(trans1, trans2)
def test_mixed_batch():
"""
Test a batch with a bunch of different
environments.
"""
env_fns = [lambda s=seed: SimpleEnv(s, (1, 2, 3), 'float32')
for seed in [3, 3, 3, 3, 3, 3]] #[5, 8, 1, 9, 3, 2]]
make_agent = lambda: SimpleModel((1, 2, 3), stateful=True)
for num_sub in [1, 2, 3]:
batched_player = BatchedPlayer(batched_gym_env(env_fns, num_sub_batches=num_sub),
make_agent(), 3)
expected_eps = []
for player in [BasicPlayer(env_fn(), make_agent(), 3) for env_fn in env_fns]:
transes = [t for _ in range(50) for t in player.play()]
expected_eps.extend(_separate_episodes(transes))
actual_transes = [t for _ in range(50) for t in batched_player.play()]
actual_eps = _separate_episodes(actual_transes)
assert len(expected_eps) == len(actual_eps)
for episode in expected_eps:
found = False
for i, actual in enumerate(actual_eps):
if _episodes_equivalent(episode, actual):
del actual_eps[i]
found = True
break
assert found
def _separate_episodes(transes):
res = []
for ep_id in set([t['episode_id'] for t in transes]):
res.append([t for t in transes if t['episode_id'] == ep_id])
return res
def _episodes_equivalent(transes1, transes2):
if len(transes1) != len(transes2):
return False
for trans1, trans2 in zip(transes1, transes2):
if not _transitions_equal(trans1, trans2, ignore_id=True):
return False
return True
def _transitions_equal(trans1, trans2, ignore_id=False):
for key in ['episode_step', 'total_reward', 'is_last', 'rewards']:
if trans1[key] != trans2[key] and (key != 'episode_id' or not ignore_id):
return False
if trans1['new_obs'] is None:
if trans2['new_obs'] is not None:
return False
else:
if not np.allclose(trans1['new_obs'], trans2['new_obs']):
return False
if (not np.allclose(trans1['model_outs']['actions'][0], trans2['model_outs']['actions'][0]) or
not _states_equal(trans1['model_outs']['states'], trans2['model_outs']['states'])):
return False
if not np.allclose(trans1['obs'], trans2['obs']):
return False
return True
def _states_equal(states1, states2):
if isinstance(states1, tuple):
if not isinstance(states2, tuple):
return False
return all(np.allclose(x, y) for x, y in zip(states1, states2))
else:
return np.allclose(states1, states2)
| [
"unixpickle@gmail.com"
] | unixpickle@gmail.com |
87b976bab6630f39bbfb3e6f0c0d66644899a06b | caaf04a58abe96563df1dbc88abe8594047fded9 | /medium/problem_1492_the_kth_factor_of_n.py | eb8691727c67157af4e5788cac0c6c8138370f2a | [] | no_license | EricMontague/Leetcode-Solutions | f1b09781b0afd60c79d55f65fe0552c80a928ac7 | fd1e40ace51fe2a3cc6dadb3fe5872c7fa149188 | refs/heads/master | 2021-01-09T20:00:15.580735 | 2020-12-14T22:24:24 | 2020-12-14T22:24:24 | 242,441,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,428 | py | """This file contains my solution to Leetcode problem 1492: The kth factor of N."""
# Max Heap Solution
# time complexity: O(sqrt(n) * logk), where 'n' is num and 'k' is the variable 'k'
# space complexity: O(k)
import heapq
import math
class Solution:
def kthFactor(self, num: int, k: int) -> int:
max_heap = []
for factor in range(1, math.floor(math.sqrt(num)) + 1):
if num % factor == 0:
heapq.heappush(max_heap, factor * -1)
other_factor = num // factor
if other_factor != factor:
heapq.heappush(max_heap, other_factor * -1)
while len(max_heap) > k:
heapq.heappop(max_heap)
if len(max_heap) < k:
return -1
return max_heap[0] * -1
# Min Heap Solution
# time complexity: O(sqrt(n)* log sqrt(n))
# space complexity: O(sqrt(n))
import heapq
import math
class Solution:
def kthFactor(self, num: int, k: int) -> int:
min_heap = []
for factor in range(1, math.floor(math.sqrt(num)) + 1):
if num % factor == 0:
min_heap.append(factor)
other_factor = num // factor
if other_factor != factor:
min_heap.append(other_factor)
heapq.heapify(min_heap)
return self.get_kth_factor(min_heap, k)
def get_kth_factor(self, min_heap, k):
if len(min_heap) < k:
return -1
factor = None
for index in range(k):
factor = heapq.heappop(min_heap)
return factor
# Simple iterative solution
# time complexity: O(sqrt(n))
# space complexity: O(sqrt(n))
class Solution:
def kthFactor(self, num: int, k: int) -> int:
lower_divisors = []
higher_divisors = []
sqrt = 1 / 2
for divisor in range(1, int(num ** sqrt) + 1):
if num % divisor == 0:
lower_divisors.append(divisor)
other_divisor = num // divisor
if other_divisor != divisor:
higher_divisors.append(other_divisor)
num_lower_divisors = len(lower_divisors)
num_higher_divisors = len(higher_divisors)
if k > num_lower_divisors + num_higher_divisors:
return -1
if k <= num_lower_divisors:
return lower_divisors[k - 1]
return higher_divisors[(k - num_lower_divisors) * -1] | [
"eric.g.montague@gmail.com"
] | eric.g.montague@gmail.com |
50012f51af28fc60aeaa999055ab4075bff3bb29 | 035730cf12c43f59b76d9809e444b9070c3e5732 | /BOJ_15652.py | b2f8733e3e279a3b26cc9d3056624057379bfe3e | [] | no_license | kimhaggie/Coding_practice | e18153838425874b80a683094369a6dfb8836c93 | a4f2732e5d7a63adae990226073333b88324765a | refs/heads/master | 2023-08-01T11:33:54.071564 | 2021-09-07T14:40:56 | 2021-09-07T14:40:56 | 310,264,349 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | #15652
import sys
import math
def func(i,j,m):
ans = []
if m==1:
return [[k] for k in range(i,j+1)]
else:
for k in range(i,j+1):
for x in func(k,j,m-1):
tmp = [k]
tmp.extend(x)
ans.append(tmp)
return ans
n,m = map(int,sys.stdin.readline().rstrip('\n').split(' '))
ans = func(1,n,m)
for x in ans:
print(' '.join(map(str,x))) | [
"kimhaggie@gmail.com"
] | kimhaggie@gmail.com |
b0718572e15181513d4f6940c68e176e3433a69f | f0a4ba1f1f941092e68e4b1ef9cff0d3852199ef | /Do_it!/3.검색 알고리즘/해시/chained_hash.py | 304cce40e99b6dc0f40b07abb5cd4a9ee238b869 | [] | no_license | lsb530/Algorithm-Python | d41ddd3ca7675f6a69d322a4646d75801f0022b2 | a48c6df50567c9943b5d7218f874a5c0a85fcc6d | refs/heads/master | 2023-06-18T04:36:09.221769 | 2021-06-28T16:49:35 | 2021-06-28T16:49:35 | 367,775,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,187 | py | # 체인법(chaining)으로 해시 함수 구현하기
# 체인법이란 해시값이 같은 데이터를 체인(chain) 모양의 연결 리스트로 연결하는 방법
# 오픈 해시법(open hashing)이라고도 한다.
from __future__ import annotations
import hashlib
from typing import Any
class Node:
"""해시를 구성하는 노드"""
def __init__(self, key: Any, value: Any, next: Node) -> None:
"""초기화"""
self.key = key # 키
self.value = value # 값
self.next = next # 뒤쪽 노드를 참조
class ChainedHash:
"""체인법으로 해시 클래스 구현"""
def __init__(self, capacity: int) -> None:
"""초기화"""
self.capacity = capacity # 해시 테이블의 크기를 지정
self.table = [None] * self.capacity # 해시 테이블(리스트)을 선언
def hash_value(self, key: Any) -> int:
"""해시값을 구함"""
if isinstance(key, int):
return key % self.capacity
return (int(hashlib.sha256(str(key).encode()).hexdigest(), 16) % self.capacity)
# 키로 원소를 검색하는 search() 함수
def search(self, key: Any) -> Any:
"""키가 key인 원소를 검색하여 값을 반환"""
hash = self.hash_value(key) # 검색하는 키의 해시값
p = self.table[hash] # 노드를 주목
while p is not None:
if p.key == key:
return p.value # 검색 성공
p = p.next # 뒤쪽 노드를 주목
return None # 검색 실패
# 원소를 추가하는 함수
def add(self, key: Any, value: Any) -> bool:
"""키가 key이고 값이 value인 원소를 추가"""
hash = self.hash_value(key) # 추가하는 key의 해시값
p = self.table[hash] # 노드를 주목
while p is not None:
if p.key == key:
return False # 추가 실패
p = p.next # 뒤쪽 노드를 주목
temp = Node(key, value, self.table[hash])
self.table[hash] = temp # 노드를 추가
return True # 추가 성공
# 원소를 삭제하는 함수
def remove(self, key: Any) -> bool:
"""키가 key인 원소를 삭제"""
hash = self.hash_value(key) # 삭제할 key의 해시값
p = self.table[hash] # 노드를 주목
pp = None # 바로 앞의 노드를 주목
while p is not None:
if p.key == key: # key를 발견하면 아래를 실행
if pp is None:
self.table[hash] = p.next
else:
pp.next = p.next
return True # key 삭제 성공
pp = p
p = p.next # 뒤쪽 노드를 주목
return False # 삭제 실패(key가 존재하지 않음)
# 원소를 출력하는 함수
def dump(self) -> None:
"""해시 테이블을 덤프"""
for i in range(self.capacity):
p = self.table[i]
print(i, end='')
while p is not None:
print(f' -> {p.key} ({p.value})', end='')
p = p.next
print()
| [
"lsb530@naver.com"
] | lsb530@naver.com |
bd4162975e1ee1079c46ef6c8b1fc3c18b01a5c1 | 3ba18755bbf53a2e918a79e1c57a48f44ac1e670 | /venv/bin/isort | c3a1d2046d4613f88dd5974e0f230d1d122c8129 | [] | no_license | celaltas/FlaskProject | d57bddf99d807a97981d477048a3a5eb4a97d5a5 | 8a9fe33c970f99b09fcc565391a4f31861780468 | refs/heads/master | 2022-12-25T09:30:22.407644 | 2020-10-10T18:03:35 | 2020-10-10T18:03:35 | 302,960,750 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | #!/home/celal/VSCProjects/FlaskProject/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"celal.tas123@gmail.com"
] | celal.tas123@gmail.com | |
84435aed610560af56cf4755bfdce4e91530bb4a | f3693916a8b118bf139364604dac3f51235ed613 | /functional/Components/Distributions/Distribution_POST_Optional/test_TC_43677_2_Distributions_POST_Distributions_Vnet_Pptx_Slide_Option.py | bc58905895d05cefd139b0850212850500682fef | [] | no_license | muktabehera/QE | e7d62284889d8241d22506f6ee20547f1cfe6db1 | 3fedde591568e35f7b80c5bf6cd6732f8eeab4f8 | refs/heads/master | 2021-03-31T02:19:15.369562 | 2018-03-13T02:45:10 | 2018-03-13T02:45:10 | 124,984,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,675 | py | # -*- coding: UTF-8 -*-
"""PFE Component Tests - Distributions.
* TC-43677 - Distributions POST:
Verify that user is able to send slides(PPT, PPTx) for distribution using request POST "/distributions" with VideoNet as public delivery system.
Equivalent test CURL command:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/distributions"
Same, with test data:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/distributions"
JSON data sent to PathFinder in this test:
{'activationDate': '2017-09-20T07:36:46.542Z',
'distributionPolicy': 'OPTIONAL',
'files': [{'id': 'vnetPPTXOpt',
'sourceUrl': 'qedorigin://Auto_storage/slidex.pptx',
'streamMetadata': {'bitrateKbps': 100,
'contentType': 'UNSPECIFIED',
'height': 5,
'mimeType': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'width': 10}}],
'id': 'vnetSlidePPTXOpt',
'name': 'Distribution with vnet PPTX Slide Opt',
'targetAudiences': [{'id': 'Broadcast_Videonet_Audience'}]}
"""
import pytest
from qe_common import *
logger = init_logger()
@pytest.mark.draft # remove this after script passed unit tests successfuly
@pytest.mark.components
@pytest.allure.story('Distributions')
@pytest.allure.feature('POST')
class Test_PFE_Components(object):
"""PFE Distributions test cases."""
@pytest.allure.link('https://jira.qumu.com/browse/TC-43677')
@pytest.mark.Distributions
@pytest.mark.POST
def test_TC_43677_POST_Distributions_Distributions_Vnet_Pptx_Slide(self, context):
"""TC-43677 - Distributions-POST
Verify that user is able to send slides(PPT, PPTx) for distribution using request POST "/distributions" with VideoNet as public delivery system."""
# Define a test step
with pytest.allure.step("""Verify that user is able to send slides(PPT, PPTx) for distribution using request POST "/distributions" with VideoNet as public delivery system."""):
### Positive test example
# Test case configuration
distributionDetails = context.sc.DistributionDetails(
activationDate='2017-09-20T07:36:46.542Z',
distributionPolicy='OPTIONAL',
expirationDate=None,
files=[{
'id': 'vnetPPTXOpt',
'sourceUrl': 'qedorigin://Auto_storage/slidex.pptx',
'streamMetadata': {
'bitrateKbps':
100,
'width':
10,
'height':
5,
'mimeType':
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'contentType':
'UNSPECIFIED'
}
}],
id='vnetSlidePPTXOpt',
name='Distribution with vnet PPTX Slide Opt',
status=None,
tags=None,
targetAudiences=[{
'id': 'Broadcast_Videonet_Audience'
}])
# createEntity the Distributions.
# The `check` call validates return code
# and some of the swagger schema.
# Most schema checks are disabled.
response = check(
context.cl.Distributions.createEntity(
body=distributionDetails
)
)
### Can add tests here to validate the response content
with pytest.allure.step("""Verify that user is able to send slides(PPT, PPTx) for distribution using request POST "/distributions" with VideoNet as public delivery system."""):
### Negative test example
# Test case configuration
distributionDetails = context.sc.DistributionDetails(
activationDate='2017-09-20T07:36:46.542Z',
distributionPolicy='OPTIONAL',
expirationDate=None,
files=[{
'id': 'vnetPPTXOpt',
'sourceUrl': 'qedorigin://Auto_storage/slidex.pptx',
'streamMetadata': {
'bitrateKbps':
100,
'width':
10,
'height':
5,
'mimeType':
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'contentType':
'UNSPECIFIED'
}
}],
id='vnetSlidePPTXOpt',
name='Distribution with vnet PPTX Slide Opt',
status=None,
tags=None,
targetAudiences=[{
'id': 'Broadcast_Videonet_Audience'
}])
# prepare the request, so we can modify it
request = context.cl.Distributions.createEntity(
body=distributionDetails
)
### Invalid JSON Error injection example
### Errors that result in valid JSON can be configured above.
### Otherwise, uncomment the code below (request.future....)
# Get the generated payload and corrupt the metric
# request.future.request.data = request.future.request.data.replace(
# '"metric": 1,', '"metric":,'
# )
# createEntity the Distributions, and check we got the error we expect
try:
client, response = check(
request,
quiet=True, returnResponse=True
)
except (HTTPBadRequest, HTTPForbidden) as e: # 400, 403 error
get_error_message(e) | expect.any(
should.start_with('may not be empty'),
should.start_with('Invalid page parameter specified'),
should.contain('Invalid Authorization Token')
)
else:
raise Exception(
"Expected error message, got {} status code instead.".format(
response.status_code))
| [
"mbehera@qumu.com"
] | mbehera@qumu.com |
440374b2f3813c91ed4c3c16714460c04352ff1d | 1ba58b17f33122abf4236e9e430a51d375e0eb53 | /km73/Ruban_Yehor/4/task1.py | 3b5e7ca9dacb7e460a733371cb9aa90330097712 | [] | no_license | igortereshchenko/amis_python | c4f8d86b88ab036d08ff0ce35c9b42ebeabecc42 | c6f0f2a70c82d5f269b3078eb296f82271b5bb10 | refs/heads/master | 2021-10-22T16:21:19.990650 | 2017-11-01T07:26:54 | 2017-11-01T07:26:54 | 104,785,028 | 0 | 139 | null | 2020-04-21T21:27:09 | 2017-09-25T18:11:42 | Python | UTF-8 | Python | false | false | 221 | py | a = float(input("Введите первое число - "))
b = float(input("Введите второе число - "))
if a > b:
ans = b
else:
ans = a
print("меньшее число -",ans)
input()
| [
"noreply@github.com"
] | igortereshchenko.noreply@github.com |
9a49844ddc8e2920e88c79f6f3cc8dc15536f458 | cb2ddcde8311d06f99e2308e94c58036a393f592 | /src/byro/members/migrations/0010_memberbalance.py | 8501edb4909802ca93bfb93b0e0de14116c680fc | [
"Apache-2.0"
] | permissive | Lagertonne/byro | 9eea069709a7eeb8a80e024af97bc93fb019efa8 | d2d05b96d75f94848bd8b9af1a556a4a1e080320 | refs/heads/master | 2022-11-19T11:39:29.699413 | 2020-07-14T20:56:16 | 2020-07-14T20:56:16 | 279,690,577 | 0 | 0 | Apache-2.0 | 2020-07-14T20:52:09 | 2020-07-14T20:52:08 | null | UTF-8 | Python | false | false | 2,136 | py | # Generated by Django 2.1.8 on 2019-04-15 12:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("members", "0009_auto_20180512_1810")]
operations = [
migrations.CreateModel(
name="MemberBalance",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"reference",
models.CharField(
blank=True,
help_text="For example an invoice number or a payment reference",
max_length=50,
null=True,
unique=True,
verbose_name="Reference",
),
),
(
"amount",
models.DecimalField(
decimal_places=2, max_digits=8, verbose_name="Amount"
),
),
("start", models.DateTimeField(verbose_name="Start")),
("end", models.DateTimeField(verbose_name="End")),
(
"state",
models.CharField(
choices=[
("paid", "paid"),
("partial", "partially paid"),
("unpaid", "unpaid"),
],
default="unpaid",
max_length=7,
),
),
(
"member",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="balances",
to="members.Member",
),
),
],
)
]
| [
"r@rixx.de"
] | r@rixx.de |
0e2c57e01d581a53bb2452113c588ec91aa41688 | 93cc6795fc1b7f6a06b08480ad4fbde46fa02c7c | /base/files_for_ssh/update_or_create_bonus_template.py | 74007cf86b0fcbe1dbbc57ee7b22107730a02253 | [] | no_license | A-Zorg/msw_api | 4a04eae9738e77b528e79496b6653d3a07109ca5 | ccd73b7675f3d477a2eec30808eff975a247e70c | refs/heads/master | 2023-08-16T08:51:47.506514 | 2021-09-20T19:21:46 | 2021-09-20T19:21:46 | 325,813,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | from index.models import CustomUser
from reconciliation.models import Bonus
from accounting_system.models import AccountType
acc_type = AccountType.objects.get(id={ACC_ID})
try:
bonus_object = Bonus.objects.get(account_type=acc_type)
bonus_object.decimal_percentage = {VALUE}
bonus_object.save()
except:
bonus_object = Bonus.objects.create(
account_type=acc_type,
decimal_percentage={VALUE}
)
bonus_object.save()
| [
"you@example.com"
] | you@example.com |
4ef44e5d20b17902e764ba4b376780476f7c678e | d3efc82dfa61fb82e47c82d52c838b38b076084c | /ETF/Creation_SA/YW_ETFSS_SZSG_060.py | 37d438d6681057abde41659bd9c6bf133b8f17ed | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import time
sys.path.append("/home/yhl2/workspace/xtp_test/ETF")
from import_common import *
sys.path.append("/home/yhl2/workspace/xtp_test/ETF/etf_service")
from ETF_Basket_Add_Real import etf_basket_add_real
from ETF_GetComponentShare import etf_get_all_component_stk
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ETFSS_SZSG_060(xtp_test_case):
def test_YW_ETFSS_SZSG_060(self):
# -----------ETF申购-------------
title = '深圳ETF申购--全部成交(数量最大单位&费用>min)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'case_ID': 'ATC-202-59',
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title + ', case_ID=' + case_goal['case_ID'])
unit_info = {
'ticker': '169165', # etf代码
'etf_unit': 9, # etf申购单位数
}
# -----------查询ETF申购前成分股持仓-------------
component_stk_info = etf_get_all_component_stk(unit_info['ticker'])
# -----------ETF申购-------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryEtfQty(unit_info['ticker'], '2', '14', '2', '0',
'B', case_goal['期望状态'], Api)
# 定义委托参数信息------------------------------------------
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'用例错误原因': '获取下单参数失败, ' + stkparm['错误原因'],
}
etf_query_log(case_goal, rs)
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker':
stkparm['证券代码'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_PURCHASE'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
int(unit_info['etf_unit'] * stkparm['最小申赎单位']),
}
EtfParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = etfServiceTest(Api, case_goal, wt_reqs, component_stk_info)
etf_creation_log(case_goal, rs)
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
a3a2cfa8d5e7e73ab92e6d6d8daef6b1f6fadefe | f4efa1e5ef98616b0044a96e08fede1584a5a5a5 | /atom-editor/lilac.py | 1305d1741a2f23ae0165465e8471cd80acbc652c | [] | no_license | LawrenceCWC/repo | fcafda53570c03703a8988b1bce17a2184a4f6bc | c2d614e11a9ea1b9c4bc9bd08b041bf9ee8ce2cd | refs/heads/master | 2020-12-25T04:19:43.803869 | 2016-06-22T12:28:55 | 2016-06-22T12:28:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | #!/usr/bin/env python3
#
# This is a complex version of lilac.py for building
# a package from AUR.
#
# You can do something before/after building a package,
# including modify the 'pkgver' and 'md5sum' in PKBUILD.
#
# This is especially useful when a AUR package is
# out-of-date and you want to build a new one, or you
# want to build a package directly from sourceforge but
# using PKGBUILD from AUR.
#
# See also:
# [1] ruby-sass/lilac.py
# [2] aufs3-util-lily-git/lilac.py
# [3] octave-general/lilac.py
#
from lilaclib import *
build_prefix = 'extra-x86_64'
pre_build = aur_pre_build
post_build = aur_post_build
# do some cleanup here after building the package, regardless of result
# def post_build_always(success):
# pass
if __name__ == '__main__':
single_main(build_prefix)
| [
"farseerfc@gmail.com"
] | farseerfc@gmail.com |
0abeef728ce5d91b2a87d84cc216dc46f6697521 | 830f50885bbf7cdeffc08097b55f662a498cf518 | /python/downloader/FileSelectionDialog.py | 00f5a2ffa23e3a11bbed65bf96d9e9293f090628 | [] | no_license | precimilo/mcandre | 86b2e77e28e3bd14d02e40eb9978ae4b7ccf9fcd | e9ab9e3fce7aba93b6528c40e06bde4ae0d461a7 | refs/heads/master | 2020-12-30T17:32:37.406612 | 2013-05-09T18:46:46 | 2013-05-09T18:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | # FileSelectionDialog.py - John Finlay (http://www.pygtk.org/pygtk2tutorial/index.html)
# Andrew Pennebaker
import gtk
class FileSelectionDialog:
PENDING="Pending"
OK="OK"
CANCEL="Cancel"
def __init__(self, titleText="File Selecion", selectionText=""):
self.state=self.PENDING
self.fileSelection=gtk.FileSelection(title=titleText)
self.fileSelection.selection_entry.set_text(selectionText)
self.fileSelection.ok_button.connect("clicked", self.okEvent)
self.fileSelection.cancel_button.connect("clicked", self.cancelEvent)
self.fileSelection.show_all()
# loop until button clicked
while self.state==self.PENDING:
while gtk.events_pending():
gtk.main_iteration()
def okEvent(self, widget=None, event=None, data=None):
self.fileName=self.fileSelection.get_filename()
self.state=self.OK
self.fileSelection.destroy()
def cancelEvent(self, widget=None, event=None, data=None):
self.state=self.CANCEL
self.fileSelection.destroy()
def getFileName(self):
return self.fileName | [
"andrew.pennebaker@gmail.com"
] | andrew.pennebaker@gmail.com |
48283e3e193ef9a2997ee9f3474a16b8b385f713 | b5f38cc8a97f67ba1df0bea0e111ad0d3f14dc13 | /test/asyncio_tests/test_asyncio_bulk.py | 6cb0336378f57f8d9eaf68d25394b666a1aa11ff | [
"Apache-2.0"
] | permissive | yutiansut/motor | 10cb8e68e8c776fa33262608e13b611941fbdb13 | 132352beda3e4215e68991c5165b4ccd06e71a2c | refs/heads/master | 2021-09-24T07:11:36.995179 | 2018-01-12T10:05:58 | 2018-01-12T10:05:58 | 106,511,806 | 1 | 0 | Apache-2.0 | 2018-10-05T02:19:35 | 2017-10-11T05:59:51 | Python | UTF-8 | Python | false | false | 3,292 | py | # Copyright 2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Motor's bulk API with asyncio."""
import unittest
from pymongo.errors import BulkWriteError
from motor.motor_asyncio import AsyncIOMotorBulkOperationBuilder
from test.asyncio_tests import asyncio_test, AsyncIOTestCase
class TestAsyncIOBulk(AsyncIOTestCase):
# Little testing is needed: Most of the logic is in PyMongo, and Motor's
# bulk operations are lightly tested with Tornado already.
@asyncio_test(timeout=30)
def test_multiple_error_ordered_batch(self):
yield from self.collection.delete_many({})
yield from self.collection.create_index('a', unique=True)
try:
bulk = self.collection.initialize_ordered_bulk_op()
self.assertTrue(isinstance(bulk,
AsyncIOMotorBulkOperationBuilder))
bulk.insert({'b': 1, 'a': 1})
bulk.find({'b': 2}).upsert().update_one({'$set': {'a': 1}})
bulk.find({'b': 3}).upsert().update_one({'$set': {'a': 2}})
bulk.find({'b': 2}).upsert().update_one({'$set': {'a': 1}})
bulk.insert({'b': 4, 'a': 3})
bulk.insert({'b': 5, 'a': 1})
try:
yield from bulk.execute()
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
self.assertEqual(1, result['nInserted'])
self.assertEqual(1, len(result['writeErrors']))
cursor = self.collection.find({}, {'_id': False})
docs = yield from cursor.to_list(None)
self.assertEqual([{'a': 1, 'b': 1}], docs)
finally:
yield from self.collection.drop()
@asyncio_test
def test_single_unordered_batch(self):
yield from self.collection.delete_many({})
bulk = self.collection.initialize_unordered_bulk_op()
self.assertTrue(isinstance(bulk,
AsyncIOMotorBulkOperationBuilder))
bulk.insert({'a': 1})
bulk.find({'a': 1}).update_one({'$set': {'b': 1}})
bulk.find({'a': 2}).upsert().update_one({'$set': {'b': 2}})
bulk.insert({'a': 3})
bulk.find({'a': 3}).remove()
result = yield from bulk.execute()
self.assertEqual(0, len(result['writeErrors']))
upserts = result['upserted']
self.assertEqual(1, len(upserts))
self.assertEqual(2, upserts[0]['index'])
self.assertTrue(upserts[0].get('_id'))
a_values = yield from self.collection.distinct('a')
self.assertEqual(
set([1, 2]),
set(a_values))
if __name__ == '__main__':
unittest.main()
| [
"jesse@mongodb.com"
] | jesse@mongodb.com |
48aa07668d158a84f581a1cc6189dcb51bb02fd9 | eaa71d0669f9f161c15dc45c37fadb1ce2bcea9e | /Pagina Web/CP_S10/app/rutas.py | b6b347366bc23ec9a3952df3657ed7d6bd53528b | [] | no_license | luiskar268/Ciclo-3 | 566f4bec8af5f05ff458d698c384238579e095d6 | 5a076959c1c0958290133197f9dde8d0e7f1a388 | refs/heads/master | 2023-08-13T14:43:06.925887 | 2021-10-08T02:37:26 | 2021-10-08T02:37:26 | 407,919,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | from app import app
from flask import render_template
from app.forms import FormInicio
@app.route('/')
@app.route('/index')
def index():
usuario = {'usuario':'...'}
comentarios = [
{
'autor':{'usuario':'...'},
'comentario':'...'
},
{
'autor':{'usuario':'...'},
'comentario':'...'
}
]
return render_template('index.html', titulo="Inicio", usuario=usuario, comentarios=comentarios)
@app.route('/login')
def login():
form = FormInicio()
return render_template('iniciar_sesion.html',titulo='Iniciar Sesión', form=form) | [
"you@example.com"
] | you@example.com |
5f9004f06c556e9677e12b41dec0c7c9d095e410 | 3ffeeae8a9a3245d8998d94aa08f680f00056cad | /26.删除排序数组中的重复项.py | caabdfab8cced80dd2fba20d79a5e03d77e43c4d | [] | no_license | Ezi4Zy/leetcode | 6e293e5c07a7d8c3e38f9445ff24330134ef6c48 | 9d394cd2862703cfb7a7b505b35deda7450a692e | refs/heads/master | 2022-04-09T14:11:36.957861 | 2022-03-09T10:30:30 | 2022-03-09T10:30:30 | 57,290,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | #
# @lc app=leetcode.cn id=26 lang=python
#
# [26] 删除排序数组中的重复项
#
# @lc code=start
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
begin = 0
end = 1
while end < len(nums):
if nums[begin] != nums[end]:
begin += 1
nums[begin] = nums[end]
end += 1
return begin+1
# @lc code=end
| [
"Ezi4zy@163.com"
] | Ezi4zy@163.com |
be24d8d03ccdda316fc996f645d73db8ce92e3b6 | 9f387c703dbf4d970d0259424c7b299108c369f5 | /dd_sdk_1_0/dd_sdk_1_0/models/snmp_username.py | 0999f08689f3dea29ff5b00fe9032335d786557e | [] | no_license | gcezaralmeida/datadomain_sdk_python | c989e6846bae9435c523ab09e230fc12d020f7f1 | e102ec85cea5d888c8329626892347571832e079 | refs/heads/main | 2023-08-23T22:42:47.083754 | 2021-10-25T21:52:49 | 2021-10-25T21:52:49 | 370,805,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,753 | py | # coding: utf-8
"""
DataDomain Rest API Documentation
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from dd_sdk_1_0.configuration import Configuration
class SnmpUsername(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, _configuration=None): # noqa: E501
"""SnmpUsername - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SnmpUsername, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SnmpUsername):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SnmpUsername):
return True
return self.to_dict() != other.to_dict()
| [
"root@s6006st157.petrobras.biz"
] | root@s6006st157.petrobras.biz |
de1022e98235b9d739417fb743639087694beff0 | cd921f57b4ea51f8cb598c76e6766dc530909b8a | /tensorflow_federated/python/tensorflow_libs/tensor_utils_test.py | e928317481ee4b261d976b9a17a77600e5ef5f55 | [
"Apache-2.0"
] | permissive | Catherineylp/federated | f4d30d8eb7fa718ac5d1a62549f244d03120cc73 | 7a5549f3fb0eb2e3b5cdcb4788a8856cbfa17416 | refs/heads/master | 2021-07-12T19:01:44.935095 | 2020-09-21T02:24:28 | 2020-09-21T02:24:28 | 202,102,353 | 0 | 0 | Apache-2.0 | 2019-08-13T08:45:24 | 2019-08-13T08:45:23 | null | UTF-8 | Python | false | false | 7,678 | py | # Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.tensorflow_libs import tensor_utils
class TensorUtilsTest(test.TestCase):
def test_check_nested_equal(self):
nested_dict = {
'KEY1': {
'NESTED_KEY': 0
},
'KEY2': 1,
}
nested_list = [('KEY1', ('NESTED_KEY', 0)), ('KEY2', 1)]
flat_dict = {
'KEY1': 0,
'KEY2': 1,
}
nested_dtypes = {
'x': [tf.int32, tf.float32],
'y': tf.float32,
}
nested_shapes = {
# N.B. tf.TensorShape([None]) == tf.TensorShape([None])
# returns False, so we can't use a None shape here.
'x': [[1], [3, 5]],
'y': [1],
}
# Should not raise an exception.
tensor_utils.check_nested_equal(nested_dict, nested_dict)
tensor_utils.check_nested_equal(nested_list, nested_list)
tensor_utils.check_nested_equal(flat_dict, flat_dict)
tensor_utils.check_nested_equal(nested_dtypes, nested_dtypes)
tensor_utils.check_nested_equal(nested_shapes, nested_shapes)
with self.assertRaises(TypeError):
tensor_utils.check_nested_equal(nested_dict, nested_list)
with self.assertRaises(ValueError):
# Different nested structures.
tensor_utils.check_nested_equal(nested_dict, flat_dict)
# Same as nested_dict, but using float values. Equality still holds for
# 0 == 0.0 despite different types.
nested_dict_different_types = {
'KEY1': {
'NESTED_KEY': 0.0
},
'KEY2': 1.0,
}
tf.nest.assert_same_structure(nested_dict, nested_dict_different_types)
# Same as nested_dict but with one different value
nested_dict_different_value = {
'KEY1': {
'NESTED_KEY': 0.5
},
'KEY2': 1.0,
}
with self.assertRaises(ValueError):
tensor_utils.check_nested_equal(nested_dict, nested_dict_different_value)
tensor_utils.check_nested_equal([None], [None])
def always_neq(x, y):
del x, y
return False
with self.assertRaises(ValueError):
tensor_utils.check_nested_equal([1], [1], always_neq)
def test_to_var_dict(self):
v1 = tf.Variable(0, name='v1')
v2 = tf.Variable(0, name='v2')
d0 = tensor_utils.to_var_dict([])
self.assertIsInstance(d0, collections.OrderedDict)
self.assertEmpty(d0)
d1 = tensor_utils.to_var_dict([v1])
self.assertIsInstance(d1, collections.OrderedDict)
self.assertLen(d1, 1)
self.assertEqual(d1['v1'], v1)
d2 = tensor_utils.to_var_dict([v1, v2])
self.assertIsInstance(d2, collections.OrderedDict)
self.assertLen(d2, 2)
self.assertEqual(d2['v1'], v1)
self.assertEqual(d2['v2'], v2)
with self.assertRaises(TypeError):
tensor_utils.to_var_dict(v1)
with self.assertRaises(TypeError):
tensor_utils.to_var_dict([tf.constant(1)])
def test_to_var_dict_preserves_order(self):
a = tf.Variable(0, name='a')
b = tf.Variable(0, name='b')
c = tf.Variable(0, name='c')
var_dict = tensor_utils.to_var_dict([c, a, b])
self.assertEqual(['c', 'a', 'b'], list(var_dict.keys()))
def test_to_var_dict_duplicate_names(self):
v1 = tf.Variable(0, name='foo')
v2 = tf.Variable(0, name='foo')
assert v1.name == v2.name
with self.assertRaisesRegexp(ValueError, 'multiple.*foo'):
tensor_utils.to_var_dict([v1, v2])
def test_to_odict(self):
d1 = {'b': 2, 'a': 1}
odict1 = tensor_utils.to_odict(d1)
self.assertIsInstance(odict1, collections.OrderedDict)
self.assertCountEqual(d1, odict1)
odict2 = tensor_utils.to_odict(odict1)
self.assertEqual(odict1, odict2)
with self.assertRaises(TypeError):
tensor_utils.to_odict({1: 'a', 2: 'b'})
def test_zero_all_if_any_non_finite(self):
def expect_ok(structure):
with tf.Graph().as_default():
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
with self.session() as sess:
result, error = sess.run((result, error))
try:
tf.nest.map_structure(np.testing.assert_allclose, result, structure)
except AssertionError:
self.fail('Expected to get input {} back, but instead got {}'.format(
structure, result))
self.assertEqual(error, 0)
expect_ok([])
expect_ok([(), {}])
expect_ok(1.1)
expect_ok([1.0, 0.0])
expect_ok([1.0, 2.0, {'a': 0.0, 'b': -3.0}])
def expect_zeros(structure, expected):
with tf.Graph().as_default():
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
with self.session() as sess:
result, error = sess.run((result, error))
try:
tf.nest.map_structure(np.testing.assert_allclose, result, expected)
except AssertionError:
self.fail('Expected to get zeros, but instead got {}'.format(result))
self.assertEqual(error, 1)
expect_zeros(np.inf, 0.0)
expect_zeros((1.0, (2.0, np.nan)), (0.0, (0.0, 0.0)))
expect_zeros((1.0, (2.0, {
'a': 3.0,
'b': [[np.inf], [np.nan]]
})), (0.0, (0.0, {
'a': 0.0,
'b': [[0.0], [0.0]]
})))
def test_is_scalar_with_list(self):
self.assertRaises(TypeError, tensor_utils.is_scalar, [10])
def test_is_scalar_with_bool(self):
self.assertRaises(TypeError, tensor_utils.is_scalar, True)
def test_is_scalar_with_tf_constant(self):
self.assertTrue(tensor_utils.is_scalar(tf.constant(10)))
def test_is_scalar_with_scalar_tf_variable(self):
self.assertTrue(tensor_utils.is_scalar(tf.Variable(0.0, 'scalar')))
def test_is_scalar_with_nonscalar_tf_variable(self):
self.assertFalse(
tensor_utils.is_scalar(tf.Variable([0.0, 1.0], 'notscalar')))
def test_same_shape(self):
self.assertTrue(
tensor_utils.same_shape(tf.TensorShape(None), tf.TensorShape(None)))
self.assertTrue(
tensor_utils.same_shape(tf.TensorShape([None]), tf.TensorShape([None])))
self.assertTrue(
tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape([1])))
self.assertTrue(
tensor_utils.same_shape(
tf.TensorShape([None, 1]), tf.TensorShape([None, 1])))
self.assertTrue(
tensor_utils.same_shape(
tf.TensorShape([1, 2, 3]), tf.TensorShape([1, 2, 3])))
self.assertFalse(
tensor_utils.same_shape(tf.TensorShape(None), tf.TensorShape([1])))
self.assertFalse(
tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape(None)))
self.assertFalse(
tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape([None])))
self.assertFalse(
tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape([2])))
self.assertFalse(
tensor_utils.same_shape(tf.TensorShape([1, 2]), tf.TensorShape([2, 1])))
if __name__ == '__main__':
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.