blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f38302532a1db3c972e4413e332da4c2144cc11 | f7cb41fe0859d80574e412f51280376c8e3c9e0f | /yard/skills/66-python/cookbook/yvhai/demo/std/collections/dict.py | f4194347689f807351b82b4ba8185b92fb9885ed | [
"Apache-2.0"
] | permissive | bbxyard/bbxyard | 7ca771af10237ae9a6d758baf26d78d110e8c296 | 5bf32150fa5cade9e4d9063037e5a03e587f9577 | refs/heads/master | 2023-01-14T17:39:04.191739 | 2020-08-18T06:56:36 | 2020-08-18T06:56:36 | 11,716,364 | 1 | 2 | Apache-2.0 | 2022-12-30T01:25:47 | 2013-07-28T06:48:59 | C | UTF-8 | Python | false | false | 2,910 | py | # 实现multdict
from collections import defaultdict, OrderedDict, Counter
from yvhai.demo.base import YHDemo
class OrderedDictDemo(YHDemo):
"""OrderedDict"""
def __init__(self):
super(OrderedDictDemo, self).__init__("OrderDict")
@staticmethod
def demo(args=[]):
od = OrderedDict()
od['foo'] = 1
od['bar'] = 2
od['barfoo'] = 3
od['foobar'] = 4
print(od)
class DefaultDictDemo(YHDemo):
"""multdict"""
def __init__(self):
super(DefaultDictDemo, self).__init__('DefaultDict')
@staticmethod
def demo_mult_dict_by_set():
md = defaultdict(set)
md["apple"].add("苹果")
md["apple"].add("iPhone")
md["apple"].add("乔布斯")
md["windows"].add("窗户")
md["windows"].add("MS")
md["windows"].add("Bill Gates")
return md
@staticmethod
def demo_mult_dict_by_list():
md1 = DefaultDictDemo.demo_mult_dict_by_set()
md2 = defaultdict(list)
for key, vset in md1.items():
for value in vset:
md2[key].append(value)
print(md1)
print(md2)
@staticmethod
def demo(args=[]):
DefaultDictDemo.demo_mult_dict_by_set()
DefaultDictDemo.demo_mult_dict_by_list()
class WordCount(YHDemo):
"""词频统计"""
@classmethod
def stat_by_dict(cls, word_list):
cls.mark_section("WC: 通过d.setdefault, 规避判空")
d = {}
for word in word_list:
d.setdefault(word, 0)
d[word] += 1
print(d)
@classmethod
def stat_by_default_dict(cls, word_list):
cls.mark_section("WC: 通过defaultdict")
d = defaultdict(int)
for word in word_list:
d[word] += 1
print(d)
@classmethod
def stat_by_counter(cls, word_list):
cls.mark_section("WC: By Counter")
d = Counter(word_list)
print(d)
@classmethod
def demo(cls):
word_list = ["Linux", "Windows", "MacOS", "Linux", "Linux", "Cygwin", "MacOS", "Windows"]
cls.stat_by_dict(word_list)
cls.stat_by_default_dict(word_list)
cls.stat_by_counter(word_list)
class NestedDict(YHDemo):
"""嵌套dict"""
@classmethod
def gen_default(cls):
return {"name": "", "nums": 0}
@classmethod
def demo(cls, args=[]):
d = defaultdict(cls.gen_default)
d["group1"]["name"] = "g1"
d["group1"]["nums"] += 1
d["group1"]["nums"] += 1
d["group2"]["name"] = "g2"
d["group2"]["nums"] += 10
d["group2"]["nums"] += 1
print(d)
class DictDemo(YHDemo):
def __init__(self):
super(DictDemo, self).__init__('Dict')
@staticmethod
def demo(args=[]):
OrderedDictDemo.demo()
DefaultDictDemo.demo()
WordCount.demo()
NestedDict.demo()
| [
"bbxyard@gmail.com"
] | bbxyard@gmail.com |
6d9ad99200ba4e3cdf7b88a7da2787de0df12c8b | afde521f50b6be4be9e5c3071ed6459419fb5edb | /env/lib/python3.6/site-packages/pyecharts/charts/scatter3D.py | 35198ac9849fd58ec18641bff9956994438195d7 | [] | no_license | guhongcheng/myblog | ddef4aa0888dedfb70933b34bfd0c5da5bb5d5cd | b11f5ee26125b9551b1f27814b96a845dd4e6a76 | refs/heads/master | 2022-12-18T20:26:46.596014 | 2018-07-26T02:46:07 | 2018-07-26T02:46:07 | 134,683,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,712 | py | # coding=utf-8
from pyecharts.chart import Chart
import pyecharts.constants as constants
class Scatter3D(Chart):
"""
<<< 3D 散点图 >>>
"""
def __init__(self, title="", subtitle="", **kwargs):
kwargs["renderer"] = constants.CANVAS_RENDERER
super(Scatter3D, self).__init__(title, subtitle, **kwargs)
self._js_dependencies.add("echartsgl")
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
def __add(self, name, data, grid3d_opacity=1, **kwargs):
"""
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选
:param data:
数据项,数据中,每一行是一个『数据项』,每一列属于一个『维度』
:param grid3d_opacity:
3D 笛卡尔坐标系组的透明度(点的透明度),默认为 1,完全不透明。
:param kwargs:
"""
kwargs.update(
xaxis3d_type="value", yaxis3d_type="value", zaxis3d_type="value"
)
chart = self._get_all_options(**kwargs)
self._option.get("legend")[0].get("data").append(name)
self._option.update(
xAxis3D=chart["xaxis3D"],
yAxis3D=chart["yaxis3D"],
zAxis3D=chart["zaxis3D"],
grid3D=chart["grid3D"],
)
self._option.get("series").append(
{
"type": "scatter3D",
"name": name,
"data": data,
"label": chart["label"],
"itemStyle": {"opacity": grid3d_opacity},
}
)
self._config_components(**kwargs)
| [
"1051712303@qq.com"
] | 1051712303@qq.com |
dba87a9b580d39b7e8694daed7b9a5cb06a8db56 | 998a180e5c974d89c9ad33532d4fd33298c806a4 | /chapter1_arrays_and_strings/palindrome_permutation_1_4.py | 9fff45e7b07a41cdbf04a5422ddc172fcfa0d501 | [] | no_license | Taycode/cracking-the-coding-interview-solutions | c542a047a37b5af406469ba3f912b4bbdc326b05 | 0c2dcc4d4558dc4766b5ddcce470a60986eb39a6 | refs/heads/master | 2023-02-08T16:31:59.683541 | 2020-12-27T16:59:12 | 2020-12-27T16:59:12 | 324,807,557 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | """
Given a string, write a function to check if it is a permutation of a palin
drome. A palindrome is a word or phrase that is the same forwards and backwards. A permutation
is a rearrangement of letters. The palindrome does not need to be limited to just dictionary words.
EXAMPLE
Input: Tact Coa
Output: True (permutations: "taco cat", "atco eta", etc.)
"""
def palindrome_permutation(string):
"""
:param string: string
:return: boolean
"""
the_dict = {}
string = string.replace(' ', '')
string = string.lower()
for _ in string:
if _ not in the_dict.keys():
the_dict[_] = 1
else:
the_dict[_] += 1
values = list(the_dict.values())
length = len(string)
if length % 2 == 0:
for _ in values:
if _ % 2 != 0:
return False
else:
return True
else:
count = 0
for _ in values:
if _ % 2 != 0:
count += 1
if count > 1:
return False
else:
return True
print(palindrome_permutation('Tact Coa'))
| [
"tay2druh@gmail.com"
] | tay2druh@gmail.com |
2aa1f7f97663abbd042a034c1570074ff9f985b1 | d496e9b71a9cdc226c8005b7d1be53a0c9154a36 | /guessNumber.py | 8be67a889479f08dae7d291e69ea215d59321b7b | [] | no_license | Shubham-S-Yadav/Python | 1f83e4a28e304679e16829613d845e8eae6f3921 | 830537dd02b60bb9f00a9079556a0c9323a26c17 | refs/heads/master | 2021-07-03T06:39:06.150795 | 2017-09-22T14:05:01 | 2017-09-22T14:05:01 | 104,320,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | '''
Generate a random number between 1 and 9 (including 1 and 9). Ask the user to guess the number, then tell them whether they guessed too low, too high, or exactly right.
'''
import random
while True:
number = int(input("Enter a number between 1 to 10: "))
rand = random.randrange(1, 10)
if (number == rand):
print("You guessed it right")
break
elif number > rand:
print("Guessed too high.")
else:
print("Guessed too low.") | [
"31968975+Shubham-S-Yadav@users.noreply.github.com"
] | 31968975+Shubham-S-Yadav@users.noreply.github.com |
e97d3591d81411fa559ed7e6451f00eadeff503d | 7ee8abf70257e885215a1abd4838845ffce0a626 | /hello.py | 04c61710ba08d106ff3895cc62b2c2843a31686a | [] | no_license | diorich/firstapp | b52610118d3eabe752ab7fee4860f91cf5a9be22 | 62dddc6863b2b6c258b4536567a43a5dafd8ca12 | refs/heads/master | 2021-01-17T18:08:15.500815 | 2016-10-17T18:42:15 | 2016-10-17T18:42:15 | 71,169,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | from flask import Flask
app = Flask(__name__)
@app.route("/")
def index():
return "Hello World!"
if __name__=='__main__':
app.run(port=5000, debug=True)
| [
"d.rich@live.co.uk"
] | d.rich@live.co.uk |
0f715b9465ba1a0dcc750d969a70815c72610331 | c5ea70bb1337b9c9c72a5dd80cebac8f1ba52a4d | /articleapp/migrations/0001_initial.py | af823e9e2455f6c04a5a7bb967af3293be69d4cd | [] | no_license | leesh9069/Django_Project_LSH | 8466f0a8e6670e3357f23e64437ec9452ae23c33 | 9fb647cc2586f3244a80f65ed31d4d646a4cac6f | refs/heads/master | 2023-07-19T16:29:54.855919 | 2021-09-23T06:41:58 | 2021-09-23T06:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | # Generated by Django 3.2.5 on 2021-08-01 15:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateField(auto_created=True, null=True)),
('title', models.CharField(max_length=200, null=True)),
('image', models.ImageField(upload_to='article/')),
('content', models.TextField(null=True)),
('writer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='article', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"shlee9069@naver.com"
] | shlee9069@naver.com |
33864e4c4e10988ab56cdf4ba1f15fbbd344f0e0 | d2f50124ff3bec70b9b3139ecb063b06e526781d | /despachos_mercancias/migrations/0012_auto_20170113_1639.py | 17429c15b3153edf6a6c8081aaad3e0199999d20 | [] | no_license | odecsarrollo/odecopack-componentes | e8d993f089bf53bbf3c53d1265e70ac5c06b59b8 | b583a115fb30205d358d97644c38d66636b573ff | refs/heads/master | 2022-12-12T00:33:02.874268 | 2020-08-13T18:45:01 | 2020-08-13T18:45:01 | 189,262,705 | 0 | 0 | null | 2022-12-08T11:23:46 | 2019-05-29T16:37:21 | Python | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-13 21:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('despachos_mercancias', '0011_enviotransportadoratcc_ciudad'),
]
operations = [
migrations.AlterField(
model_name='enviotransportadoratcc',
name='ciudad',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='mis_envios_tcc', to='geografia_colombia.Ciudad'),
),
]
| [
"fabio.garcia.sanchez@gmail.com"
] | fabio.garcia.sanchez@gmail.com |
a395c82a89fc6e3b2e18e384f6d821681a2b2481 | 6c38e5f5e43e47f83b34581b0f3a785ee5b5768b | /old_version/src_server/handler/care_handler.py | 76abda847be279c69728bf1dd5f353e49d16bc03 | [] | no_license | 340StarObserver/deepnote | bd3a8765222e550bec4cd05e894b3fa8ba6f1303 | 643a5252ceeb6e5d633ae1b0691121136135fbbe | refs/heads/master | 2021-01-17T01:50:02.731339 | 2017-03-23T08:49:12 | 2017-03-23T08:49:12 | 63,515,784 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Author : Lv Yang
# Created : 05 October 2016
# Modified : 22 October 2016
# Version : 1.0
"""
This script used to care somebody or cancel care
"""
import time
import sys
sys.path.append("../model")
from base_handler import BaseHandler
from mongoconn_model import getclient
from interact_model import exist_care
class CareHandler(BaseHandler) :
def __init__(self,post_data,post_files,usr_session,server_conf):
super(CareHandler,self).__init__(post_data,post_files,usr_session,server_conf)
def perform(self):
# accept parameters
cared_id = self._post_data['cared_id']
# if not login
if 'user_id' not in self._usr_session:
return {'result':False,'reason':1}
# connect to mongo
db_conn = getclient(self._server_conf['mongo']['hosts'],\
self._server_conf['mongo']['replset'],self._server_conf['mongo']['db_name'],\
self._server_conf['mongo']['db_user'],self._server_conf['mongo']['db_pwd'])
record_id = exist_care(db_conn,self._server_conf['mongo']['db_name'],\
self._usr_session['user_id'],cared_id)
if record_id is None:
# if ever has not cared this person
cur_t = int(time.time())
# add a care record
care_doc = {}
care_doc['carer_id'] = self._usr_session['user_id']
care_doc['time'] = cur_t
care_doc['cared_id'] = cared_id
db_conn[self._server_conf['mongo']['db_name']]['care_record'].insert_one(care_doc)
# add a message to remind the cared person
msg_doc = {}
msg_doc['user_ids'] = [cared_id]
msg_doc['time'] = cur_t
msg_doc['who_id'] = self._usr_session['user_id']
msg_doc['who_nick'] = self._usr_session['nick']
msg_doc['note_id'] = ''
msg_doc['note_title'] = ''
msg_doc['action_id'] = 4
msg_doc['content'] = ''
db_conn[self._server_conf['mongo']['db_name']]['message_record'].insert_one(msg_doc)
else:
# if ever has cared this person
db_conn[self._server_conf['mongo']['db_name']]['care_record'].delete_one({'_id':record_id})
# return result
db_conn.close()
return {'result':True}
| [
"lvyang@ippclub.org"
] | lvyang@ippclub.org |
ce37c43e76430750154401851a00fca84140d317 | abd9537f8b90a990e195ded5f9fafdcc108d2a48 | /swea/d4/1486/1486_shelf_powerset.py | 487056a865e5b4c70509b2d17d0851b107ba7e2c | [] | no_license | ohdnf/algorithms | 127171744631406c1d08cc2583aa569a094fa2cd | 6f286753dab827facc436af4f2130f11dad2d44f | refs/heads/master | 2023-08-09T11:19:56.445351 | 2021-08-31T13:11:46 | 2021-08-31T13:11:46 | 236,180,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import sys
sys.stdin = open('input.txt')
t = int(input())
for test_case in range(1, t+1):
n, b = map(int, input().split()) # 점원 수, 선반 높이
clerks = list(map(int, input().split()))
# clerks.sort(reverse=True)
heights = list()
for i in range(1<<n):
tmp = 0
for j in range(n+1):
if i & (1<<j):
tmp += clerks[j]
if tmp >= b:
heights.append(tmp - b)
heights.sort()
print('#{} {}'.format(test_case, heights[0]))
| [
"jupyohong7@gmail.com"
] | jupyohong7@gmail.com |
08580dffeacca70d2adf0088f5b03bd8a89be49a | 115e96b9de17f0040333c6d375b45b6c4df8765e | /even_odd.py | f805324ec9be827edd335c3b5b65a41b0189131c | [] | no_license | NdunguGP/pythonApps | e4deab3db2c7dcec58a1f3bf5ed228df4724c963 | 98cb3150e66d66622e8be8f0ebb99a4e076bacc8 | refs/heads/master | 2021-07-23T16:37:11.543607 | 2017-10-29T19:33:10 | 2017-10-29T19:33:10 | 104,220,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | import random
def even_odd(num):
# If % 2 is 0, the number is even.
# Since 0 is falsey, we have to invert it with not.
return not num % 2
start = 5
while start:
number = random.randint(1, 99)
if even_odd(number):
print("{} is even".format(number))
else:
print("{} is odd".format(number))
start -=1
| [
"noreply@github.com"
] | noreply@github.com |
e5a492b919dd3469c6ccf1a0e1b91198ebd9a81a | b3ad87a5556c4cb46953db552517090163c79f4b | /extract_feat.py | a38d5c9497da1bf6d87ff7f76a4fe026f3e03e3c | [] | no_license | martinhoang11/Image-classification-with-cnn-and-svm-model | c9b8e0222c2472b16d8239c0c46c9aebc4308ff1 | 1ed4e6e2130d8771f0f7122697f202c5b85ce4ef | refs/heads/master | 2020-07-01T15:33:41.092036 | 2019-08-08T08:43:37 | 2019-08-08T08:43:37 | 201,212,042 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,660 | py | import numpy as np
import tensorflow as tf
import vgg16
import cv2
import os
# Change this path
DATASET_DIR = './Images/run/'
if not os.path.exists('./npydataset'):
os.mkdir('./npydataset')
def load_image(PATH):
batch = []
folder_name = []
img = [cv2.imread(PATH + file) for file in os.listdir(PATH)]
img = [cv2.resize(file,(224,224),3) for file in img]
folder_name = [file.replace(file.split('.')[1],"npy") for file in os.listdir(PATH)]
batch = [file.reshape((224, 224, 3)) for file in img] # 1 image resized in 224x224x3
return batch, folder_name
# Neu batch nay ma truyen vao mot luc 20 tam hinh
# thi no se tra ve fc6 kich thuoc 20
# with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(per_process_gpu_memory_fraction=0.7)))) as sess:
def extract_data_feature(path):
batch, folder_name = load_image(path)
batch_size = len(batch)
i = 1
with tf.device('/cpu:0'):
with tf.Session() as sess:
images = tf.placeholder("float", [batch_size, 224, 224, 3])
feed_dict = {images: batch}
print('Loading model...')
vgg = vgg16.Vgg16()
with tf.name_scope("content_vgg"):
vgg.build(images)
print("Extracting feature...")
fc6 = sess.run(vgg.fc6, feed_dict=feed_dict)
print('FC6 feature: ', fc6)
for x in fc6:
np.save('./npydataset' + folder_name[i],x)
print('Saved ' , i )
i = i + 1
print('Number of input: ', len(fc6))
print('Feature length of FC6: ', len(fc6[0]))
extract_data_feature(DATASET_DIR)
| [
"39638263+martinhoang11@users.noreply.github.com"
] | 39638263+martinhoang11@users.noreply.github.com |
b6909e37a238b47e2e8dba721836e7fc248f5a2f | af4024de1e85735b88de94392934c75295e4bfb6 | /aula04/conta_digitos.py | ba7b5cb7571f5389e2154ec64a0d09d3e86fd4e6 | [] | no_license | felipenatividade/curso-python | 82f26e82e27377392684a6ece0e446eb4ee73e8c | 63820cd76471492f4d3742beafc78818273f0430 | refs/heads/master | 2020-04-05T01:42:13.393989 | 2018-11-28T00:00:49 | 2018-11-28T00:00:49 | 156,445,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | n = int(input('Digite um numero'))
d = int(input('Digite um digito'))
versaoStr = str(n)
def conta_digitos(versaoStr):
for i<
| [
"felipecmro@hotmail.com"
] | felipecmro@hotmail.com |
997203f940c4b6476c12df8f5e3f8b791bb4cc9b | 39d183b0a113148a6bbc7cbd5ac6fa522f06f700 | /5手写数字识别/mnist_test.py | 264524a799b6c2799e7040855cf2cf3b95ce7332 | [] | no_license | gdgf/MoocTF | 0311ddab6f69bf9e570e43f3395f43b013fd47d6 | 8cfdfcd0b6397967f7346f7db898cc19bac19a81 | refs/heads/master | 2022-04-17T00:54:56.896890 | 2020-02-16T02:52:17 | 2020-02-16T02:52:17 | 240,673,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | # coding:utf-8
# 去掉警告
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 测试浮现了
import time # 为了延迟
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import mnist_backward
TEST_INTERVAL_SECS = 5 # 程序循环的额时间
def test(mnist):
with tf.Graph().as_default() as g: # 复现计算图
x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
y = mnist_forward.forward(x, None) # 计算出y的值
# 实例化带滑动平均的saver对象
ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
ema_restore = ema.variables_to_restore()
saver = tf.train.Saver(ema_restore)
# 计算正确率
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
# 如果有的haunted恢复参数
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score))
else:
# 没有模型和参数,
print('No checkpoint file found')
return
time.sleep(TEST_INTERVAL_SECS)
def main():
mnist = input_data.read_data_sets("./data/", one_hot=True)
test(mnist)
if __name__ == '__main__':
main()
| [
"794428738@qq.com"
] | 794428738@qq.com |
14575f006fa799ab2f3698289711bf9ad024a62a | 86813bf514f3e0257f92207f40a68443f08ee44b | /0338 比特位计数/0338 比特位计数.py | 96ff96f4ec3d4463f1b79f1a90552912d3e21da3 | [] | no_license | Aurora-yuan/Leetcode_Python3 | 4ce56679b48862c87addc8cd870cdd525c9d926c | 720bb530850febc2aa67a56a7a0b3a85ab37f415 | refs/heads/master | 2021-07-12T13:23:19.399155 | 2020-10-21T03:14:36 | 2020-10-21T03:14:36 | 212,998,500 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | #label: math/dynamic programming difficulty: medium
"""
思路一:
麻瓜思想,每个数转成二进制计数
"""
class Solution:
def countBits(self, num: int) -> List[int]:
res = list()
for i in range(num+1):
res.append(bin(i).count('1'))
return res
“”“
思路二:
《剑指Offer》里提到的结论:如果一个数 i 和 i - 1 做与运算,那么 i 的二进制表示形式中的最右边一个 1 会变成0 。
利用动态规划的思想。
如果我们已经知道了 i & i -1 这个数字的1的个数cnt,那么根据上面的提到的结论, i 这个数字中 1 的个数就是 cnt + 1。
所以不难得到状态转移方程: dp[i] = dp[i & (i - 1)] + 1
”“”
class Solution(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
dp = [0 for i in range(num + 1)]
for i in range(1, num + 1):
dp[i] = dp[i & (i - 1)] + 1
return dp
| [
"noreply@github.com"
] | noreply@github.com |
c0c8b5cd76ab1211715caf9c7860677b20a6a49e | dfadee3252841bae4f1065919d794557832245a9 | /exercise-3/ex3.py | b6994faa4de7370b7f4aaa52829e708297765462 | [
"MIT"
] | permissive | Queky/advent-code-2020 | 7c74b4e91f62fd7ab2475c96d8e5f4799c9c9bd7 | 1a1dbc208464b28e813f4743423c7fbacd5a4d0d | refs/heads/main | 2023-02-01T14:38:04.023310 | 2020-12-13T18:51:47 | 2020-12-13T18:51:47 | 320,059,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py |
class Map:
maxWidth = 0
maxHeight = 0
positions = []
def __init__(self, lines):
self.maxWidth = len(lines[0])
self.maxHeight = len(lines)
for line in lines:
if line:
self.positions.append(list(line))
def is_tree(self, x, y):
return self.positions[y][self.normalize_x(x)] == '#'
def normalize_x(self, x):
return x % self.maxWidth
def is_end(self, y):
return y >= self.maxHeight
class Player:
encounteredTrees = 0
posX = 0
posY = 0
map: Map
def __init__(self, map):
self.map = map
def run(self):
while not self.map.is_end(self.posY):
self.check_if_player_found_tree(self.posX, self.posY)
self.move_forward()
print('Encountered trees -> ' + str(self.encounteredTrees))
def check_if_player_found_tree(self, x, y):
if self.map.is_tree(x, y):
self.encounteredTrees += 1
def move_forward(self):
self.posX += 3
self.posY += 1
class LoadFile:
lines = []
def __init__(self):
pass
def read_file(self):
file = open("map.txt", "r")
for line in file:
self.lines.append(line.strip())
def get_file_entries(self):
return self.lines
if __name__ == "__main__":
file = LoadFile()
file.read_file()
Player(Map(file.get_file_entries())).run() | [
"inakisanchez_92@hotmail.com"
] | inakisanchez_92@hotmail.com |
49ada2472ec71775a9376b00c9ae37ee34f38b0a | 15e3bdc0af8247f8f4f6d4932336e8fcba94e239 | /string/format.py | bb537793cf27719c4358d7183adc6edfa377f444 | [] | no_license | yuii-code/_workshop02 | e3b38c53ffa130d9a0fb2947af53708dbeca2264 | 7cdd901ec0728b442a37ffaaa4df0b29f019e027 | refs/heads/master | 2023-03-23T02:24:35.477521 | 2021-03-13T10:41:43 | 2021-03-13T10:41:43 | 328,414,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | age = 20
txt = "My name is yui, and I am {}"
result = txt.format(age)
print("result", result)
| [
"onthicha.s@ku.th"
] | onthicha.s@ku.th |
a095922b84d873ba110a5c151c41ad135874eb8c | c370c85aaa470a9dcfef38de2356083ce1b41b41 | /src/main.py | da4494bdd58941e0265ad7a602b47ae31e8c602e | [] | no_license | mlytle4218/pod-kast | cd51979d16c322fddab2b2638922d9ebb5eded82 | e519add9cf4b2e5eeca53485c5431a9988bb1466 | refs/heads/main | 2023-08-24T05:00:52.949984 | 2021-10-09T06:49:36 | 2021-10-09T06:49:36 | 411,491,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,053 | py | # python imports
import os, subprocess
# local imports
import utils
from data_accessor import DataAccessor
from menu import Menu
from sqlite_database_creation import Category, Podcast
from sql_category import CategoryControls
from sql_podcast import PodcastControls
import config
from log import logging as log
class Main:
def __init__(self):
dataAccessor = DataAccessor()
self.session = dataAccessor.get_session()
width = config.width
height = config.height
try:
width = int(subprocess.check_output(['tput', 'cols']))
height = int(subprocess.check_output(['tput', 'lines'])) - 1
except Exception as e:
log.error(e)
self.menu = Menu(width, height)
self.category_controls = CategoryControls()
self.podcast_controls = PodcastControls()
self.start()
def start(self):
try:
while True:
try:
result = self.menu.main_menu()
result = int(result)
if result == 1:
self.add_category()
elif result == 2:
self.edit_category()
elif result == 3:
self.delete_category()
elif result == 4:
self.add_new_podcast()
elif result == 5:
self.edit_existing_podcast()
elif result == 6:
self.delete_existing_podcast()
elif result == 7:
self.choose_episodes_to_download()
elif result == 8:
self.start_downloads()
elif result == 9:
self.search()
elif result == 10:
self.delete_from_download_queue()
elif result == 11:
self.update_all_episodes()
elif result == 12:
self.list_archived_episodes()
except ValueError as e:
if result == 'q':
break
except KeyboardInterrupt as e:
log.error(e)
pass
def add_category(self):
os.system('clear')
category_name = self.menu.get_input('Enter category name: ')
if category_name:
category = Category(category_name)
self.category_controls.add_new_category(self.session, category)
self.result_print(category, 'added')
def edit_category(self):
os.system('clear')
categories = self.category_controls.get_all_categories(self.session)
category = self.menu.print_out_menu_options_categories(objects=categories)
if category:
category.title = utils.rlinput('category name: ', category.title).strip()
result = self.category_controls.update_category(self.session, category)
self.result_print(category, 'updated')
def delete_category(self):
os.system('clear')
categories = self.category_controls.get_all_categories(self.session)
category = self.menu.print_out_menu_options_categories(objects=categories)
if category:
self.category_controls.remove_category(self.session, category)
self.result_print(category, 'deleted')
def add_new_podcast(self):
os.system('clear')
podcast = Podcast()
podcast.title = self.menu.get_input('Enter name: ')
podcast.url = self.menu.get_input('Enter url name: ')
podcast.audio = self.menu.get_input('Enter audio directory: ')
podcast.video = self.menu.get_input('Enter video directory: ')
categories = self.category_controls.get_all_categories(self.session)
category = self.menu.print_out_menu_options_categories(objects=categories)
podcast.category = category.category_id
result = self.podcast_controls.add_new_podcast(self.session,podcast)
if result:
self.result_print(podcast, 'added')
# TODO: add episode creation
# self.title = title
# self.url = url
# self.audio = audio
# self.video = video
# self.catgory = category
# podcast = enter_podcast_info(podcast)
# if podcast != None:
# episodes = backend.get_episodes_from_feed(podcast.url)
# sql.insert_podcast(podcast, episodes)
# pass
def edit_existing_podcast(self):
os.system('clear')
podcasts = self.podcast_controls.get_all_podcasts(self.session)
podcast = self.menu.display_items_podcasts(objects=podcasts)
# if podcast:
# podcast.title = utils.rlinput('podcast name: ', podcast.title).strip()
# result = self.podcast_controls.update_podcast(self.session, podcast)
# self.result_print(podcast, 'updated')
pass
def delete_existing_podcast(self):
os.system('clear')
podcasts = self.podcast_controls.get_all_podcasts(self.session)
podcast = self.menu.display_items_podcasts(objects=podcasts)
if podcast:
self.podcast_controls.delete_podcast_id(self.session, podcast.podcast_id)
self.result_print(podcast, 'deleted')
pass
def choose_episodes_to_download(self):
pass
def start_downloads(self):
pass
def search(self):
pass
def delete_from_download_queue(self):
pass
def update_all_episodes(self):
pass
def list_archived_episodes(self):
pass
def result_print(self, result, message_text):
if result:
print('{} was {}'.format(result, message_text))
input('press enter to acknowledge')
else:
print('{} was not {}'.format(result, message_text))
input('press enter to acknowledge')
if __name__ == "__main__":
Main()
| [
"mlytle4218@gmail.com"
] | mlytle4218@gmail.com |
d49b8dd94000b3cb7de0d0de656972db01f76896 | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba0150.pngMap.py | 12eae37b66b5a10110de9f226bbdc8418a2818d0 | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba0150.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000011111110000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000011111100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111110000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000001011111111111111000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011011111111111111111111110000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000010111111111111111111111111111111100000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111100000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111110000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111110000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111100000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111110000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000101111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111100000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111011111000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000001011111111111111111111111111101111100000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111100011000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111001101100000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000010111111111111111111111111111100000010000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111100010000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111110000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111010100000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111000000000000000000000000000',
'00000000000000000000000000000000000000000011111000000000000000000000000000101111111111111111111110011100000000000000000000000000',
'00000000000000000000000000000000000000000011111110000000000000000000000000001111111111111111111111011100000000000000000000000000',
'00000000000000000000000000000000000001011111111111000000000000000000000011111111111111111111111111001000000000000000000000000000',
'00000000000000000000000000000000000011111111111111000000000000000000000011111111111111111111111111011100000000000000000000000000',
'00000000000000000000000000000000000011111111111110000000000000000000000011111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000011111111111110000000000000000000001011111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000011111111111111000000000000000000000111111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000011111111111111110000000000000000001111111111111111111111111111111110000000000000000000000000',
'00000000000000000000000000000000000000111111111111111111111111111100111111111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111111110000000000000000000000000',
'00000000000000000000000000000000000000000011111011111111111111111111111111111111111111111111111111111100001100000000000000000000',
'00000000000000000000000000000000000000000000011011111111111111111111111111111111111111111111111111111110011000000000000000000000',
'00000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111111100000000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111000000000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111100000000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111110000000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111000000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111100000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111110000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111111000000000000000000',
]
| [
"bili33@87ouo.top"
] | bili33@87ouo.top |
7f2df471b94bb54376e154486267ebd828d91fe3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_deepens.py | 5ee1f3387e1e9e28ab6fb75803b9751b7df84712 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _DEEPENS():
def __init__(self,):
self.name = "DEEPENS"
self.definitions = deepen
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['deepen']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
523ebba8af98a2d50fc0e4ed662799dd8673bccc | 59020ea258d38e876b39a4a5d79a85ad4d014084 | /rpi/withImgReg/testCamera.py | fae929747cdd4b24b55abfae99836b64bdc18abf | [
"MIT"
] | permissive | Joash-JW/CZ3004-MDP-Image-Recognition | eb4db054cf6a74b9269a6b5b4af271d311ef7f86 | 75a12aa7b44ace049ac8885fb832135083b26c9b | refs/heads/master | 2022-12-28T10:35:16.947227 | 2020-09-28T16:28:07 | 2020-09-28T16:28:07 | 246,212,331 | 0 | 0 | MIT | 2020-09-28T16:28:08 | 2020-03-10T05:01:00 | Jupyter Notebook | UTF-8 | Python | false | false | 53 | py | from sendImg import *
obj = sendImg()
obj.takePic()
| [
"32333304+Joash-JW@users.noreply.github.com"
] | 32333304+Joash-JW@users.noreply.github.com |
a5cfcab1ab85afbc722c0382948b7d599a5f384b | 6ee14bd6b0f7c71d7d81249f24b2472b7af58dad | /scripts/page_utils.py | f5e6cccd218fdae6bafcbee38f2611707e71aa24 | [] | no_license | matplo/lmdweb | ae7124d508f3630647457c77e4791d15f44172ab | d95a40dd61a3862d301cbbf5d4ba613759e5081c | refs/heads/master | 2020-04-30T12:40:57.709765 | 2019-06-11T18:49:28 | 2019-06-11T18:49:28 | 176,832,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | def safe_meta_get(page, what_meta, default_val=True):
retval = default_val
try:
retval = page.meta.get(what_meta)
except:
retval = default_val
page.meta = { what_meta : default_val}
if retval is None:
retval = default_val
page.meta[what_meta] = default_val
return retval
| [
"mploskon@lbl.gov"
] | mploskon@lbl.gov |
809cb39be1c498b2dc3381f28cb369e0fa000dd1 | d404fb72dee51f8c2791bf21cc5d9ee91d2d6a45 | /ch03_if/0118_grade.py | 994ed4ab4c6a92af05a7cb316a7605ce19cac7b7 | [] | no_license | kangwonlee/18pf_welcome_template | 6c5c997e7aac08d8a7d94d4a146037c2d3b4a813 | 9279559c7cde37a18b8e1d5e596f161087493218 | refs/heads/master | 2021-04-12T07:52:29.577562 | 2018-03-18T21:29:28 | 2018-03-18T21:29:28 | 125,769,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | score = int(input("성적을 입력하시오: "))
if score >= 90:
print("학점 A")
elif score >= 80:
print("학점 B")
elif score >= 70:
print("학점 C")
elif score >= 60:
print("학점 D")
else:
print("학점 F")
| [
"kangwon.lee@kpu.ac.kr"
] | kangwon.lee@kpu.ac.kr |
c757bd9966acd9a987e30e914ce32a7e7481d6da | 9ee9d0aa7d924e7d0500f88f2d5a566a31857360 | /stylegan-encoder-master/encode_images_fk.py | 361ee0eacd88487356f25f28066cc64cd8dc42fa | [] | no_license | Fenkail/py-torch | 044302f71b3254cca9da3308697d14c048253675 | 91e5eedb8df124753bcd13081416cafdd1ee2f48 | refs/heads/master | 2020-06-29T14:17:19.206952 | 2019-10-12T08:20:23 | 2019-10-12T08:20:23 | 200,559,248 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,975 | py | import os
import argparse
import pickle
from tqdm import tqdm
import PIL.Image
import numpy as np
import dnnlib
import dnnlib.tflib as tflib
import config
from encoder.generator_model import Generator
from encoder.perceptual_model import PerceptualModel, load_images
from keras.models import load_model
import time
import logging
def split_to_batches(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def styleGAN_encoder(args):
start_ = time.time()
args.decay_steps *= 0.01 * args.iterations # Calculate steps as a percent of total iterations
if args.output_video:
import cv2
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=args.batch_size)
ref_images = [os.path.join(args.src_dir, x) for x in os.listdir(args.src_dir)]
ref_images = list(filter(os.path.isfile, ref_images))
if len(ref_images) == 0:
raise Exception('%s is empty' % args.src_dir)
os.makedirs(args.data_dir, exist_ok=True)
os.makedirs(args.mask_dir, exist_ok=True)
os.makedirs(args.generated_images_dir, exist_ok=True)
os.makedirs(args.dlatent_dir, exist_ok=True)
os.makedirs(args.video_dir, exist_ok=True)
# Initialize generator and perceptual model
tflib.init_tf()
with dnnlib.util.open_url(args.model_url, cache_dir=config.cache_dir) as f:
generator_network, discriminator_network, Gs_network = pickle.load(f)
generator = Generator(Gs_network, args.batch_size, clipping_threshold=args.clipping_threshold, tiled_dlatent=args.tile_dlatents, model_res=args.model_res, randomize_noise=args.randomize_noise)
if (args.dlatent_avg != ''):
generator.set_dlatent_avg(np.load(args.dlatent_avg))
perc_model = None
if (args.use_lpips_loss > 0.00000001):
with dnnlib.util.open_url('https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2', cache_dir=config.cache_dir) as f:
perc_model = pickle.load(f)
perceptual_model = PerceptualModel(args, perc_model=perc_model, batch_size=args.batch_size)
perceptual_model.build_perceptual_model(generator)
ff_model = None
# Optimize (only) dlatents by minimizing perceptual loss between reference and generated images in feature space
for images_batch in tqdm(split_to_batches(ref_images, args.batch_size), total=len(ref_images)//args.batch_size):
names = [os.path.splitext(os.path.basename(x))[0] for x in images_batch]
if args.output_video:
video_out = {}
for name in names:
video_out[name] = cv2.VideoWriter(os.path.join(args.video_dir, f'{name}.avi'),cv2.VideoWriter_fourcc(*args.video_codec), args.video_frame_rate, (args.video_size,args.video_size))
perceptual_model.set_reference_images(images_batch)
dlatents = None
if (args.load_last != ''): # load previous dlatents for initialization
for name in names:
dl = np.expand_dims(np.load(os.path.join(args.load_last, f'{name}.npy')),axis=0)
if (dlatents is None):
dlatents = dl
else:
dlatents = np.vstack((dlatents,dl))
else:
if (ff_model is None):
if os.path.exists(args.load_resnet):
print("Loading ResNet Model:")
ff_model = load_model(args.load_resnet)
from keras.applications.resnet50 import preprocess_input
if (ff_model is None):
if os.path.exists(args.load_effnet):
import efficientnet
print("Loading EfficientNet Model:")
ff_model = load_model(args.load_effnet)
from efficientnet import preprocess_input
if (ff_model is not None): # predict initial dlatents with ResNet model
dlatents = ff_model.predict(preprocess_input(load_images(images_batch,image_size=args.resnet_image_size)))
if dlatents is not None:
generator.set_dlatents(dlatents)
op = perceptual_model.optimize(generator.dlatent_variable, iterations=args.iterations)
pbar = tqdm(op, leave=False, total=args.iterations)
vid_count = 0
best_loss = None
best_dlatent = None
for loss_dict in pbar:
pbar.set_description(" ".join(names) + ": " + "; ".join(["{} {:.4f}".format(k, v)
for k, v in loss_dict.items()]))
if best_loss is None or loss_dict["loss"] < best_loss:
best_loss = loss_dict["loss"]
best_dlatent = generator.get_dlatents()
if args.output_video and (vid_count % args.video_skip == 0):
batch_frames = generator.generate_images()
for i, name in enumerate(names):
video_frame = PIL.Image.fromarray(batch_frames[i], 'RGB').resize((args.video_size,args.video_size),PIL.Image.LANCZOS)
video_out[name].write(cv2.cvtColor(np.array(video_frame).astype('uint8'), cv2.COLOR_RGB2BGR))
generator.stochastic_clip_dlatents()
print(" ".join(names), " Loss {:.4f}".format(best_loss))
if args.output_video:
for name in names:
video_out[name].release()
# Generate images from found dlatents and save them
generator.set_dlatents(best_dlatent)
generated_images = generator.generate_images()
generated_dlatents = generator.get_dlatents()
for img_array, dlatent, img_name in zip(generated_images, generated_dlatents, names):
img = PIL.Image.fromarray(img_array, 'RGB')
img.save(os.path.join(args.generated_images_dir, f'{img_name}.png'), 'PNG')
np.save(os.path.join(args.dlatent_dir, f'{img_name}.npy'), dlatent)
generator.reset_dlatents()
end_ = time.time()
logging.info('图像的StyleEncoder编码耗费时间: %.2fs' % (end_ - start_))
| [
"fengkai1996@foxmail.com"
] | fengkai1996@foxmail.com |
84178264efd8e2f1fc7382e20d6fb767bbb79b3d | 70205375cd8d98c6f7f7a214fc7f9db8abab75f5 | /WEEK-9/1-Money-In-The-Bank/The-Alchemy-Way/base.py | 78fc905756046d9a3ffe2c88dbfcab01d946fc5b | [] | no_license | presianbg/HackBG-Programming101 | 81f006b938663f982e182cdfef312cd7f9f33263 | 1a16953d4ac12766828daaf62a6290a68043a7bc | refs/heads/master | 2021-01-18T22:33:27.515738 | 2016-05-26T04:54:47 | 2016-05-26T04:54:47 | 32,878,278 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | '''
Share Base to other files - main idea
'''
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
| [
"presianbg@gmail.com"
] | presianbg@gmail.com |
600a302ed2fe494a20e8295bf50c9fec01ba19fb | b857397e77f2804b80a5e7a39c1434230889986b | /orders/migrations/0016_auto_20180614_1907.py | 58c4f6e5dc62487230bf19ecdd3ad5dd110652ab | [] | no_license | Gxrjan/OnlineOrder-on-Django | 38af12489ab95ca705b5e8a32f6bd886cbbdf718 | 7a2901cbd613b378505dc22a2522f5e5d20bcbf5 | refs/heads/master | 2020-03-22T17:11:18.958217 | 2018-07-10T05:02:22 | 2018-07-10T05:02:22 | 140,379,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # Generated by Django 2.0.5 on 2018-06-14 16:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0015_auto_20180614_1457'),
]
operations = [
migrations.AlterField(
model_name='template',
name='name',
field=models.CharField(error_messages={'unique': 'Шаблон с таким названием уже существует'}, max_length=255, unique=True),
),
]
| [
"Gxrjan@gmail.com"
] | Gxrjan@gmail.com |
78bce8b6106795876ff11fd8725ce110034391de | 7dce2aa5fbbf74dedb51d2a4792a9ccbf29a7430 | /pybin/cpu.py | 91de011c898c5ed3800ffbc1926d2a0f9c6b6239 | [] | no_license | robert-e-roy/spork | 4e4e1fec3d056fc33d5c42f77bd87bbfa3c3885d | bbc475ffe86cee9b6131365857365acf62314fe9 | refs/heads/master | 2023-02-02T08:49:48.114545 | 2020-12-20T21:45:54 | 2020-12-20T21:45:54 | 45,711,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | #!/usr/bin/env python
import os
import sys
import pdb
import time
import array
def main():
'''What wll we do!'''
tsize=10000000
print "cpu speed test\n"
# pdb.set_trace()
myArray=[0]
start=time.time()
msize=0
while (msize < tsize):
msize += 1
myArray.append(1)
now=time.time()
print now
print start
diff= now-start
print 'total time to count', tsize, 'took', diff
print 'total time to count {} took {} seconds '.format(tsize,diff)
# Here's our payoff idiom!
if __name__ == '__main__':
main()
| [
"me@robert-e-roy.com"
] | me@robert-e-roy.com |
5c4f2ddf4459e23dcfd43b94dcfb8c19ba95d495 | 2518db90aeef9130856081ec6600ab261be25741 | /Test/stats_python/stats.py | 8107c1874b760fcb1b3ecc6914108c32db1bf894 | [] | no_license | MalcolmMielle/RSI | aec03084da4326fdab111773dd8e83242ff50f8a | 5a1eec8d749ac71060e5814e9b51ed823550b09f | refs/heads/master | 2020-09-17T05:25:52.439905 | 2019-11-25T17:34:40 | 2019-11-25T17:34:40 | 224,004,604 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | import numpy
import scipy.stats
import matplotlib.pyplot as plt
def mean(list):
sum = 0
for element in list:
sum = sum + element
sum = sum / len(list)
return sum
def variance(list, mean):
sum_el = 0
for element in list:
temp_el = element - mean
temp_el = temp_el * temp_el
sum_el = sum_el + temp_el
sum_el = sum_el / (len(list) - 1)
return sum_el
def sd(variance):
standd = numpy.sqrt(variance)
return standd
precision = list()
recall = list()
with open('sketchezlec.dat') as f:
for line in f:
data = line.split()
print(data)
precision.append(float(data[1]))
recall.append(float(data[2]))
mean_v = mean(precision)
sd_v = sd(variance(precision, mean_v))
print("Precision mean and sd " + str(mean_v) + " "+ str(sd_v))
mean_recall = mean(recall)
sd_recall = sd(variance(recall, mean_recall))
print("Recall mean and sd " + str(mean_recall) + " "+ str(sd_recall))
| [
"malcolm.mielle@protonmail.com"
] | malcolm.mielle@protonmail.com |
aa416bdfc7a7201c4451bbc32abf96628417d47f | 67e1c52432a4d6a35ad1609a072b1d0c19b9f24b | /osi_django_app/manage.py | 604a4509ef9904cd98b5258bbca348a45d6b0529 | [
"MIT",
"LicenseRef-scancode-free-unknown"
] | permissive | tapaswenipathak/Open-Source-Programs | d7c017b70e89e81e19054e4c1f503deb325678db | ab31f6307338905a76635656ed2dc4dffb789bac | refs/heads/master | 2023-08-24T07:39:42.232907 | 2023-01-28T15:11:56 | 2023-01-28T15:11:56 | 46,958,040 | 2,527 | 569 | MIT | 2023-08-14T10:48:14 | 2015-11-27T03:37:32 | JavaScript | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'osi_django_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"tapaswenipathak@gmail.com"
] | tapaswenipathak@gmail.com |
0eefce36ea159a3ee01e3a8648d44a932052a570 | 679e31fe16e92e1d0bc3448c25845103f19a622f | /web_flask/3-python_route.py | 96ec3a910bab88bd161ba28e53c1573167ff9a05 | [] | no_license | Gikaro/AirBnB_clone_v2 | ab7d63ce3e942253ded54d30d68c631eb055308c | 5744e747f2fdb722d7e6843bd1e4a67abf9c8243 | refs/heads/master | 2023-03-20T01:34:49.172584 | 2020-09-02T23:22:39 | 2020-09-02T23:22:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | #!/usr/bin/python3
"""WebFlask module"""
from flask import Flask
from os import environ
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def hello_route():
"""Display Hello HBNB"""
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def hbnb_route():
"""Display HBNB"""
return 'HBNB'
@app.route('/c/<text>', strict_slashes=False)
def c_route(text):
"""Display text"""
real_text = text.replace('_', ' ')
return 'C {}'.format(real_text)
@app.route('/python/', strict_slashes=False)
@app.route('/python/<text>', strict_slashes=False)
def python_route(text='is cool'):
"""Display text"""
real_text = text.replace('_', ' ')
return 'Python {}'.format(real_text)
if __name__ == '__main__':
environ['FLASK_APP'] = __file__
app.run(host='0.0.0.0', port=5000)
| [
"sebri.issam@gmail.com"
] | sebri.issam@gmail.com |
12e01a17bd141b1e82d95006b641e5bb0343d272 | 484c462c29e3c2f8ac280b79c11db6982c6a8ca6 | /neurolab-0.2.3/neurolab/__init__.py | 1c9445506114073da6f75ac4c16ca8634f996e27 | [] | no_license | thelma1944/Python_Stuff | b5fa53bf008bb5e865204201b144fe20e7f87565 | 077131a2c9f247396dca86fdf18933d38ae8d501 | refs/heads/master | 2021-06-05T12:25:35.779070 | 2020-10-03T18:20:16 | 2020-10-03T18:20:16 | 16,077,931 | 0 | 1 | null | 2021-03-26T00:30:14 | 2014-01-20T17:36:16 | Python | UTF-8 | Python | false | false | 1,754 | py | # -*- coding: utf-8 -*-
"""
Neurolab is a simple and powerful Neural Network Library for Python.
Contains based neural networks, train algorithms and flexible framework
to create and explore other neural network types.
:Features:
- Pure python + numpy
- API like Neural Network Toolbox (NNT) from MATLAB
- Interface to use train algorithms form scipy.optimize
- Flexible network configurations and learning algorithms. You may change: train, error, initializetion and activation functions
- Unlimited number of neural layers and number of neurons in layers
- Variety of supported types of Artificial Neural Network and learning algorithms
:Example:
>>> import numpy as np
>>> import neurolab as nl
>>> # Create train samples
>>> input = np.random.uniform(-0.5, 0.5, (10, 2))
>>> target = (input[:, 0] + input[:, 1]).reshape(10, 1)
>>> # Create network with 2 inputs, 5 neurons in input layer and 1 in output layer
>>> net = nl.net.newff([[-0.5, 0.5], [-0.5, 0.5]], [5, 1])
>>> # Train process
>>> err = net.train(input, target, show=15)
Epoch: 15; Error: 0.150308402918;
Epoch: 30; Error: 0.072265865089;
Epoch: 45; Error: 0.016931355131;
The goal of learning is reached
>>> # Test
>>> net.sim([[0.2, 0.1]]) # 0.2 + 0.1
array([[ 0.28757596]])
:Links:
- `Home Page <http://code.google.com/p/neurolab/>`_
- `PyPI Page <http://pypi.python.org/pypi/neurolab>`_
- `Documentation <http://packages.python.org/neurolab/>`_
- `Examples <http://packages.python.org/neurolab/example.html>`_
"""
import net
from tool import load
__version__ = '0.2.3'
# Development Status :: 1 - Planning, 2 - Pre-Alpha, 3 - Alpha,
# 4 - Beta, 5 - Production/Stable
__status__ = '4 - Beta'
| [
"thelma1944@gmail.com"
] | thelma1944@gmail.com |
666833e38405142de2789716d45b14fc5149cbc6 | 53451bc76fea13b592fbe3d885123d002ea8da14 | /run.py | 4244c8c2101e90d75bd2e8c245813d872224a886 | [] | no_license | jicius/fake_proxy | a640476ad3d6cdcbd3d81741bd66bcaf9083ecec | 882c25b7da6657793bd32963aa2509f99d10cda9 | refs/heads/master | 2021-01-23T01:40:28.358157 | 2017-09-10T03:03:37 | 2017-09-10T03:03:37 | 85,923,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Jicius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from fake_proxy import app
app.config.update(DEBUG=True)
if __name__ == '__main__':
app.run() | [
"bq_ji@yahoo.com"
] | bq_ji@yahoo.com |
b74a713408aefee074e4a9a91ccb1e88510edabe | 38c753e495cd362086777d3e78330325580cb5a3 | /q10.py | ddc209bcdf18eff788f55bb82962785d86a2db3f | [] | no_license | Sandesh-Thapa/Assignment-II-Control-Structure | 578d6841f1e186f1700d3abe8185089d35ace76b | a1c7e7b66a95e38ba7267180970c56f047b114c2 | refs/heads/master | 2023-01-30T08:06:50.304445 | 2020-12-13T15:12:07 | 2020-12-13T15:12:07 | 321,084,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | # Write a function that takes camel-cased strings (i.e. ThisIsCamelCased), and converts them to snake case (i.e. this_is_camel_cased). Modify the function by adding an argument, separator, so it will also convert to the kebab case (i.e.this-is-camel-case) as well.
def convertStringCase(camel, separator):
output = ''
if separator == None:
if camel[0].isupper():
output += camel[0].lower()
for i in range(1, len(camel)):
if camel[i].isupper():
output += f'_{camel[i].lower()}'
else:
output += camel[i]
print(output)
else:
print('Enter string in camel-cased format !!')
elif separator == '-':
if camel[0].isupper():
output += camel[0].lower()
for i in range(1, len(camel)):
if camel[i].isupper():
output += f'{separator}{camel[i].lower()}'
else:
output += camel[i]
print(output)
else:
print('Enter string in camel-case format !!')
string = input("Enter string in camel-case format: ")
convertStringCase(string, None)
convertStringCase(string, '-') | [
"sandeshthapa426@gmail.com"
] | sandeshthapa426@gmail.com |
974fac91bb980b5ccc2df0d561cd18c2080e6149 | 5c22a1b4af40a008681eb2a6b7829a3c135b7d78 | /Coding_and_Evaluation/detection/vgg/cal_comp_rate.py | b5ee3e9a698cafe80874b41eae55353e40b2bd50 | [] | no_license | ZoomChen/DeepFeatureCoding | 67c595a54859b8262d6397fe8ebef6d7c813a7b9 | 93dab42cfbfe9a579ae5f7d88e34e2c612d66b80 | refs/heads/master | 2020-08-20T09:16:34.107215 | 2019-11-05T01:31:21 | 2019-11-05T01:31:21 | 216,005,759 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,918 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import sys
import pickle
quant_feat_dir = '/abs_path/coding/detection/vgg'
Qp_list = [12, 22, 32, 42]
feat_list = ['conv1', 'pool1', 'conv2', 'pool2', 'conv3', 'pool3', 'conv4', 'pool4', 'conv5']
def read_log(file_name):
total_volume = 0
with open(file_name, 'r') as f:
content = f.readlines()
for str_line in content:
if str_line.startswith('POC'):
component_list = [j for j in str_line.split(' ') if j!='']
bits_index = component_list.index('bits')
bits_num = int(component_list[bits_index-1])
total_volume += bits_num
# add meta data: [pad_h 3 bit, pad_w 3 bit], maxLog2Val 32 bit, [feat_h 12 bit(to 4096) feat_w 12 bit]
return total_volume + 6 + 32 + 24
def pickle_save(fname, data):
with open(fname, 'wb') as pf:
pickle.dump(data, pf)
def pickle_load(fname):
with open(fname, 'rb') as pf:
data = pickle.load(pf)
return data
subdirs = os.listdir(quant_feat_dir)
subdirs = sorted(subdirs)
total_amount = len(subdirs)
for feat_type in feat_list:
for Qp in Qp_list:
log_type = 'enc_{}_Qp{}.log'.format(feat_type, Qp)
comp_rate_list = np.zeros([total_amount,])
for (idx,subdir) in enumerate(subdirs):
log_dir = os.path.join(quant_feat_dir, subdir, 'encode_log', log_type)
compressed_bits = read_log(log_dir)
_, pad_size, yuv_size = pickle_load(os.path.join(quant_feat_dir, subdir, feat_type+'_meta.pkl'))
ori_bits = (yuv_size[0]-pad_size[0])*(yuv_size[1]-pad_size[1])*yuv_size[2]*32
comp_rate_list[idx] = compressed_bits / ori_bits
print('{} @ {}: mean {}, std {}'.format(feat_type, Qp, np.mean(comp_rate_list), np.std(comp_rate_list)))
sys.stdout.flush()
| [
"chenzhuo.zoom@gmail.com"
] | chenzhuo.zoom@gmail.com |
488cf30ffd994fadeab06641083eb00fa3b153e5 | 4eff9d232c5dcae30232de53e7de2d7a7902b9fd | /authapp/forms.py | 9af4ac083b66639cb24a899a5698fa19c64c42d9 | [] | no_license | Volhen/geekshop | 56728b195797de18c25061f0a5bb4881eff15a08 | c9226486fb9c9b13faea434e3fba2b7e6a0dd4f9 | refs/heads/master | 2020-05-23T08:31:32.562412 | 2019-05-20T10:41:22 | 2019-05-20T10:41:22 | 184,905,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,566 | py | import random
import hashlib
from django.contrib.auth.forms import AuthenticationForm, UserChangeForm, UserCreationForm
from authapp.models import GeekUser
from django import forms
from .models import GeekUserProfile
class GeekUserLoginForm(AuthenticationForm):
class Meta:
model = GeekUser
fields = ('username', 'password')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control'
class GeekUserRegisterForm(UserCreationForm):
class Meta:
model = GeekUser
fields = ('username', 'first_name', 'last_name', 'password1', 'password2', 'email', 'age', 'avatar')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control'
field.help_text = ''
def clean_age(self):
data = self.cleaned_data['age']
if data < 18:
raise forms.ValidationError("Вы слишком молоды!")
return data
def save(self):
user = super().save()
user.is_active = False
salt = hashlib.sha1(str(random.random()).encode('utf8')).hexdigest()[:6]
user.activation_key = hashlib.sha1((user.email + salt).encode('utf8')).hexdigest()
user.save()
return user
class GeekUserEditForm(UserChangeForm):
class Meta:
model = GeekUser
fields = ('username', 'first_name', 'last_name', 'email', 'age', 'avatar', 'password')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control'
field.help_text = ''
if field_name == 'password':
field.widget = forms.HiddenInput()
def clean_age(self):
data = self.cleaned_data['age']
if data < 18:
raise forms.ValidationError("Вы слишком молоды!")
return data
class GeekUserProfileEditForm(forms.ModelForm):
class Meta:
model = GeekUserProfile
fields = ('tagline', 'phone', 'aboutMe', 'gender')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'form-control'
| [
"zver2485@mail.ru"
] | zver2485@mail.ru |
dbaa16ecbdad9bbf872d8aacc6e0c934db1fa839 | dd34d98b9301a1bea1daaa85f567310b5663847e | /chapter06/6-7.py | 9e4a777d2d537cd70c7bbed2365705c18c69962b | [] | no_license | wngq/PythonCrashCourse | a4d4b34c3380b2ea3d5773331b5bf79a7e1b0abd | f6679319cd560ef336ba452eb28eebf272584b62 | refs/heads/master | 2023-04-03T19:56:29.995690 | 2021-04-14T03:07:01 | 2021-04-14T03:07:01 | 349,893,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | person_1 = {
'first_name': 'bin',
'last_name': 'huang',
'age': 29,
'city': 'nanjing',
}
person_2 = {
'first_name': 'ziyu',
'last_name': 'zhang',
'age': 28,
'city': 'shenzhen',
}
person_3 = {
'first_name': 'chao',
'last_name': 'sun',
'age': 27,
'city': 'shandong',
}
people = [person_1, person_2, person_3]
for pp in people:
# print(pp)
fullname = pp['last_name'] + " " + pp['first_name']
age = str(pp['age'])
city = pp['city']
print(fullname.title() + ", who is " + age + " years old, " + "is living in " + city.title() + ".")
| [
"wqi1203@gmail.com"
] | wqi1203@gmail.com |
aa46e0d853383325897465db18e112db394bcc2d | 27ee4952728504e85d37d47d1c3cba87b550c12f | /htmlsession.py | 57b9ae4d349bb430294af93cb7e15846aa3d6790 | [] | no_license | Minux13/testVerbs | 43bc30eb177a1f85a0c11b638d117e406a3373fd | d79dc4ac428681eb9b5e91880492d5903fa2460a | refs/heads/master | 2021-06-27T15:58:17.745450 | 2019-04-17T16:50:47 | 2019-04-17T16:50:47 | 133,498,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | #!/usr/bin/env python
from requests_html import HTMLSession
session = HTMLSession()
r = session.get('https://python.org/')
| [
"neurona13@gmail.com"
] | neurona13@gmail.com |
84183f7c48cf28f0fa8308a8d63844f013db01fe | 27158575cad2b13ba578f0864172dd667dca3daa | /proj1.py | 1e2378b137cb94c44b0dee3501a24e3ec414c153 | [] | no_license | pcuste1/cmsc441-c9-repo | 6b6aad35dff35fc9d61a467ba01d4133e39bfbe9 | 522bb4a49985e14031ad54093890441188f56624 | refs/heads/master | 2020-06-12T22:03:06.785767 | 2016-12-03T21:24:56 | 2016-12-03T21:24:56 | 75,501,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | from __future__ import print_function
import sys
import time
sys.setrecursionlimit(1500)
matches = {'H':'G', 'G':'H', 'W':'T','T':'W'}
r = []
line = []
def OPT(i,j):
if not i < j-4:
return 0
if r[i][j] != '-':
return r[i][j]
q = -1
for x in range(i, j-4):
temp = OPT(i,x-1) + OPT(x+1, j-1)
if matches[line[x]] == line[j]:
q = max(temp + 1, q)
#k = q
q = max(q, OPT(i, j-1))
#if k < q: optimal[j-1] = [j, q]
r[i][j] = q
return q
def main():
global r
global line
n = 11
r = [['-' for i in range(n+1)] for i in range(n+1)]
infile = open('test.txt', 'r')
for i in infile:
line = list(i.strip())
start_time = time.time()
opt = OPT(0,n)
deltaT = time.time() - start_time
print(opt)
print(" ", end="")
for i in range(n+1):
print(line[i], end= " ")
print()
for i in range(n+1):
for j in range(n+1):
if j == 0:
print(line[i], end= " ")
print(r[i][j], end = " ")
print()
print(str(n+1) + " & " + str(opt) + " & __ & " + str(deltaT) + "\\\\")
print(opt)
main() | [
"pcuste1@umbc.edu"
] | pcuste1@umbc.edu |
05f8987371f5ddb15e977ff76f10a537b712f7bd | 4703810393f666b8f6abd9658d1a8903ee554fe8 | /images360/spiders/images.py | 8f78ec0faca3514e3788514c6ec7042ff1c00fe4 | [] | no_license | wangch230/360Picture | 0ec8a6012c54c43eab2d0ca8a1bf5a6aa08285e2 | 27e90c6dc14cff44faa4310d0ca0f0b213b67426 | refs/heads/master | 2020-04-18T21:22:44.905200 | 2019-01-27T03:02:23 | 2019-01-27T03:02:23 | 167,763,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | # -*- coding: utf-8 -*-
from scrapy import Request, Spider
from urllib.parse import urlencode
import json
from images360.items import Images360Item
class ImagesSpider(Spider):
name = 'images'
allowed_domains = ['images.so.com']
start_urls = ['http://images.so.com/']
def parse(self, response):
result = json.loads(response.text)
for image in result.get('list'):
item = Images360Item()
item['id'] = image.get('imageid')
item['url'] = image.get('qhimg_url')
item['title'] = image.get('group_title')
item['thumb'] = image.get('qhimg_thumb_url')
yield item
# def parse(self, response):
# result = json.loads(response.text)
# for image in result.get('list'):
# item = ImageItem()
# item['id'] = image.get('imageid')
# item['url'] = image.get('qhimg_url')
# item['title'] = image.get('group_title')
# item['thumb'] = image.get('qhimg_thumb_url')
# yield item
def start_requests(self):
data = {'ch': 'beauty', 'listtype': 'new'}
base_url = 'http://images.so.com/zj?'
for i in range(1, self.settings.get('MAX_PAGE') + 1):
data['sn'] = i * 30
params = urlencode(data)
url = base_url + params
yield Request(url, self.parse)
| [
"wangch30@qq.com"
] | wangch30@qq.com |
f9412f6cc667dfbdacaae362e4edfd002c3afe40 | 5e8ad98800aeffd7a63f3469ac27b6bd46bb7be4 | /prime.py | 29d6e8087139b5651c8d01b64aba2496b667c37a | [] | no_license | V-Kiruthika/GuviBeginner | 4d41f35074f290a90559974fdd4b70904cd858a5 | da43fbb7cfac1ea3c56f3cb35dbbe983f9e9e1f6 | refs/heads/master | 2020-06-29T20:44:33.936875 | 2019-08-15T10:29:22 | 2019-08-15T10:29:22 | 200,619,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | d=int(input())
for i in range(2,d):
if d%i!=0:
print("yes")
break;
else:
print("no")
break;
| [
"noreply@github.com"
] | noreply@github.com |
c36eed7135bf3ee45450a5e984b86bfbba0868c1 | cfbbf8406ca014f27b1467ac359e1b14560c67ba | /routes/admin.py | aeaec483a36234fc519e88e80219d5f975a8be5d | [] | no_license | Sajad321/Alamjad_api | abe035c29f8f65b31dff29026a7ce123b15c04a0 | bc44fa6a83a51205f7a00b28605a220f2b6949a1 | refs/heads/master | 2023-02-24T22:22:38.789663 | 2021-01-22T11:24:24 | 2021-01-22T11:24:24 | 287,375,723 | 0 | 1 | null | 2020-09-02T16:14:44 | 2020-08-13T20:39:19 | Python | UTF-8 | Python | false | false | 24,486 | py | from flask import jsonify, abort, Blueprint, request
import json
from .auth import requires_auth
from models import user, report, history_of_pharmacy, history_of_user_activity, doctor, zone, pharmacy, doctor_pharmacies, company, item, acceptance_of_item, order, availability_of_item, notification, item_order
from math import ceil
AdminRoutes = Blueprint('admin', __name__)
THINGS_PER_PAGE = 50
def paginate(request, selection):
page = request.args.get('page', 1, type=int)
current = selection.paginate(page, THINGS_PER_PAGE, False).items
return current
@AdminRoutes.route("/main-admin", methods=['GET'])
@requires_auth("all:role")
def get_main_admin(token):
users = user.query.filter(user.role == 3).count()
doctors = doctor.query.count()
pharmacies = pharmacy.query.count()
reports = report.query.count()
orders = order.query.count()
items = item.query.count()
results = {
"success": True,
'users_count': users,
'doctors_count': doctors,
'pharmacies_count': pharmacies,
'reports_count': reports,
'orders_count': orders,
'items_count': items,
}
return jsonify(results), 200
@AdminRoutes.route("/notifications", methods=['GET'])
@requires_auth("admin:role")
def get_notifications(token):
query = notification.query.join(
report, report.id == notification.report_id).order_by(notification.id.desc()).all()
notifications = [n.format() for n in query]
results = {
"success": True,
'notifications': notifications
}
return jsonify(results), 200
@AdminRoutes.route("/orders", methods=['GET'])
@requires_auth("admin:role")
def get_orders(token):
checkbox = request.args.get("checkbox")
search_type = request.args.get("searchType", None, type=int)
search = request.args.get("search", None)
search1 = request.args.get("search1", None)
search2 = request.args.get("search2", None)
if (search == None or search == "undefined") and (search1 == None or search1 == "undefined") and (search2 == None or search2 == "undefined"):
query = order.query.join(user, user.id == order.user_id).join(zone, zone.id == order.zone_id).join(
pharmacy, pharmacy.id == order.pharmacy_id).join(company, company.id == order.company_id)
else:
query = order.query.join(user, user.id == order.user_id).join(zone, zone.id == order.zone_id).join(
pharmacy, pharmacy.id == order.pharmacy_id).join(company, company.id == order.company_id)
if search_type == 1:
query = query.filter(order.date_of_order >= search1,
order.date_of_order <= search2)
elif search_type == 2:
if search2 or search2 == "undefined":
query = query.filter(user.name.ilike('%{}%'.format(search)), order.date_of_order >= search1,
order.date_of_order <= search2)
elif search1 or search1 == "undefined":
query = query.filter(user.name.ilike(
'%{}%'.format(search)), order.date_of_order >= search1)
elif search or search == "undefined":
query = query.filter(user.name.ilike('%{}%'.format(search)))
elif search_type == 3:
if search2 or search2 == "undefined":
query = query.filter(zone.zone.ilike('%{}%'.format(search)), order.date_of_order >= search1,
order.date_of_order <= search2)
elif search1 or search1 == "undefined":
query = query.filter(zone.zone.ilike(
'%{}%'.format(search)), order.date_of_order >= search1)
elif search or search == "undefined":
query = query.filter(zone.zone.ilike('%{}%'.format(search)))
elif search_type == 4:
if search2 or search2 == "undefined":
query = query.filter(pharmacy.name.ilike('%{}%'.format(search)), order.date_of_order >= search1,
order.date_of_order <= search2)
elif search1 or search1 == "undefined":
query = query.filter(pharmacy.name.ilike(
'%{}%'.format(search)), order.date_of_order >= search1)
elif search or search == "undefined":
query = query.filter(
pharmacy.name.ilike('%{}%'.format(search)))
elif search_type == 5:
if search2 or search2 == "undefined":
query = query.filter(company.name.ilike('%{}%'.format(search)), order.date_of_order >= search1,
order.date_of_order <= search2)
elif search1 or search1 == "undefined":
query = query.filter(company.name.ilike(
'%{}%'.format(search)), order.date_of_order >= search1)
elif search or search == "undefined":
query = query.filter(company.name.ilike('%{}%'.format(search)))
elif search_type == 6:
query = query.filter(item.name.ilike('%{}%'.format(search)))
if checkbox == "true":
query = query.filter(order.approved == 1)
query = query.order_by(order.id.desc())
orders = paginate(request, query)
current_orders = [o.detail() for o in orders]
for date in current_orders:
date['date_of_order'] = str(
date['date_of_order'])
doctor_query = doctor.query.get(date['doctor_id'])
if doctor_query:
date['doctor'] = doctor.d_name(doctor_query)
items_query = item_order.query.join(order, order.id == item_order.order_id).join(
item, item.id == item_order.item_id).filter(item_order.order_id == date['id']).all()
date['items'] = [i.detail() for i in items_query]
date["seeMore"] = {"order_id": date['id'], "see": False}
pages = ceil(query.count() / THINGS_PER_PAGE)
results = {
"success": True,
'orders': current_orders,
"pages": pages,
}
return jsonify(results), 200
@AdminRoutes.route("/orders/<int:order_id>", methods=['PATCH'])
@requires_auth("admin:role")
def patch_orders(token, order_id):
data = json.loads(request.data)
order_data = order.query.get(order_id)
try:
order_data.approved = int(data['approved'])
order.update(order_data)
results = {
"success": True,
}
return jsonify(results), 200
except:
abort(500)
@AdminRoutes.route("/reports-detail", methods=["GET"])
@requires_auth("admin:role")
def get_reports_detail(token):
sorting = request.args.get("sorting", None)
sortingColumn = request.args.get("sortingColumn", None)
search_type = request.args.get("searchType", None, type=int)
search = request.args.get("search", None)
search1 = request.args.get("search1", None)
search2 = request.args.get("search2", None)
if (search == None or search == "undefined") and (search1 == None or search1 == "undefined") and (search2 == None or search2 == "undefined"):
query = report.query.join(user, user.id == report.user_id).join(doctor, doctor.id == report.doctor_id).join(zone, zone.id == report.zone_id).join(
pharmacy, pharmacy.id == report.pharmacy_id).join(company, company.id == report.company_id).join(item, item.id == report.item_id).join(acceptance_of_item, acceptance_of_item.id == report.acceptance_of_item_id)
else:
query = report.query.join(user, user.id == report.user_id).join(doctor, doctor.id == report.doctor_id).join(zone, zone.id == report.zone_id).join(
pharmacy, pharmacy.id == report.pharmacy_id).join(company, company.id == report.company_id).join(item, item.id == report.item_id).join(acceptance_of_item, acceptance_of_item.id == report.acceptance_of_item_id)
if search_type == 1:
query = query.filter(report.date >= search1,
report.date <= search2)
elif search_type == 2:
if search2 or search2 == "undefined":
query = query.filter(doctor.name.ilike('%{}%'.format(search)), report.date >= search1,
report.date <= search2)
elif search1 or search1 == "undefined":
query = query.filter(doctor.name.ilike(
'%{}%'.format(search)), report.date >= search1)
elif search or search == "undefined":
query = query.filter(doctor.name.ilike('%{}%'.format(search)))
elif search_type == 3:
if search2 or search2 == "undefined":
query = query.filter(zone.zone.ilike('%{}%'.format(search)), report.date >= search1,
report.date <= search2)
elif search1 or search1 == "undefined":
query = query.filter(zone.zone.ilike(
'%{}%'.format(search)), report.date >= search1)
elif search or search == "undefined":
query = query.filter(zone.zone.ilike('%{}%'.format(search)))
elif search_type == 4:
if search2 or search2 == "undefined":
query = query.filter(user.name.ilike('%{}%'.format(search)), report.date >= search1,
report.date <= search2)
elif search1 or search1 == "undefined":
query = query.filter(user.name.ilike(
'%{}%'.format(search)), report.date >= search1)
elif search or search == "undefined":
query = query.filter(user.name.ilike('%{}%'.format(search)))
elif search_type == 5:
if search2 or search2 == "undefined":
query = query.filter(company.name.ilike('%{}%'.format(search)), report.date >= search1,
report.date <= search2)
elif search1 or search1 == "undefined":
query = query.filter(company.name.ilike(
'%{}%'.format(search)), report.date >= search1)
elif search or search == "undefined":
query = query.filter(company.name.ilike('%{}%'.format(search)))
elif search_type == 6:
query = query.filter(item.name.ilike('%{}%'.format(search)))
if (sorting == None or sorting == "undefined" or sorting == ""):
query = query.order_by(report.id.desc())
else:
if sorting == "ascending":
if sortingColumn == "history":
query = query.order_by(report.date.asc())
elif sortingColumn == "user":
query = query.order_by(user.name.asc())
elif sortingColumn == "zone":
query = query.order_by(zone.zone.asc())
elif sortingColumn == "doctor":
query = query.order_by(doctor.name.asc())
elif sortingColumn == "pharmacy":
query = query.order_by(pharmacy.name.asc())
else:
if sortingColumn == "history":
query = query.order_by(report.date.desc())
elif sortingColumn == "user":
query = query.order_by(user.name.desc())
elif sortingColumn == "zone":
query = query.order_by(zone.zone.desc())
elif sortingColumn == "doctor":
query = query.order_by(doctor.name.desc())
elif sortingColumn == "pharmacy":
query = query.order_by(pharmacy.name.desc())
reports = paginate(request, query)
current_reports = [r.detail() for r in reports]
for date in current_reports:
last_pharmacy_order_query = history_of_pharmacy.query.join(order, order.id == history_of_pharmacy.order_id).filter(
history_of_pharmacy.pharmacy_id == date['pharmacy_id']).order_by(order.date_of_order.desc()).first()
if last_pharmacy_order_query:
data = history_of_pharmacy.format(last_pharmacy_order_query)
date['last_pharmacy_order_date'] = str(
data['last_pharmacy_order_date'])
date['history'] = str(
date['history'])
pages = ceil(query.count() / THINGS_PER_PAGE)
result = {
"success": True,
"reports": current_reports,
"pages": pages,
"sorting": sorting,
"sortingColumn": sortingColumn,
}
return jsonify(result), 200
@ AdminRoutes.route("/users-detail", methods=["GET"])
@ requires_auth("admin:role")
def get_users_detail(token):
query = user.query.join(zone, zone.id == user.zone_id).filter(
user.role == 3).all()
users = [u.detail() for u in query]
for u in users:
u['reports_count'] = report.query.filter(
report.user_id == u['id']).count()
u['date_of_joining'] = str(
u['date_of_joining'])
result = {
"success": True,
"users": users
}
return jsonify(result), 200
@ AdminRoutes.route("/salesmen-detail", methods=["GET"])
@ requires_auth("admin:role")
def get_salesmen_detail(token):
query = user.query.filter(
user.role == 3).all()
salesmen = [s.table() for s in query]
if request.args.get("to"):
for s in salesmen:
s['reports_count'] = report.query.filter(
report.user_id == s['id'], report.date > request.args.get("from"), report.date < request.args.get("to")).count()
s['orders_count'] = order.query.filter(
order.user_id == s['id'], order.date_of_order > request.args.get("from"), order.date_of_order < request.args.get("to")).count()
orders_price_query = [value for (value,) in order.query.with_entities(order.price).filter(
order.user_id == s['id'], order.date_of_order > request.args.get("from"), order.date_of_order < request.args.get("to")).all()]
s['orders_price'] = sum(orders_price_query)
elif request.args.get("from"):
for s in salesmen:
s['reports_count'] = report.query.filter(
report.user_id == s['id'], report.date > request.args.get("from")).count()
s['orders_count'] = order.query.filter(
order.user_id == s['id'], order.date_of_order > request.args.get("from")).count()
orders_price_query = [value for (value,) in order.query.with_entities(order.price).filter(
order.user_id == s['id'], order.date_of_order > request.args.get("from")).all()]
s['orders_price'] = sum(orders_price_query)
else:
for s in salesmen:
s['reports_count'] = report.query.filter(
report.user_id == s['id']).count()
s['orders_count'] = order.query.filter(
order.user_id == s['id']).count()
orders_price_query = [value for (value,) in order.query.with_entities(order.price).filter(
order.user_id == s['id']).all()]
s['orders_price'] = sum(orders_price_query)
result = {
"success": True,
"salesmen": salesmen
}
return jsonify(result), 200
@ AdminRoutes.route("/doctors-detail", methods=["GET"])
@ requires_auth("admin:role")
def get_doctors_detail(token):
query = doctor.query.join(zone, zone.id == doctor.zone_id).all()
doctors = [d.detail() for d in query]
for d in doctors:
doctors_pharmacies_query = doctor_pharmacies.query.join(
doctor, doctor.id == doctor_pharmacies.doctor_id).join(pharmacy, pharmacy.id == doctor_pharmacies.pharmacy_id).filter(doctor_pharmacies.doctor_id == d['id']).all()
doctors_pharmacies = [dp.detail() for dp in doctors_pharmacies_query]
d["pharmacies"] = doctors_pharmacies
d['date_of_joining'] = str(
d['date_of_joining'])
result = {
"success": True,
"doctors": doctors
}
return jsonify(result), 200
@AdminRoutes.route("/doctors-form", methods=["GET"])
@requires_auth("admin:role")
def get_doctors_form(token):
zones_query = zone.query.all()
zones = [z.format() for z in zones_query]
pharmacies_query = pharmacy.query.all()
pharmacies = [p.short() for p in pharmacies_query]
results = {"zones": zones,
"pharmacies": pharmacies,
"success": True}
return jsonify(results), 200
@AdminRoutes.route("/doctors", methods=["POST"])
@requires_auth("admin:role")
def post_doctors_form(token):
data = json.loads(request.data)
try:
name = data['name']
date_of_joining = data['date_of_joining']
email = data['email']
zone_id = data['zone_id']
phone = data['phone']
speciality = data['speciality']
d_class = data['d_class']
support = data['support']
new_doctor = doctor(
name=name,
date_of_joining=date_of_joining,
email=email,
zone_id=zone_id,
phone=phone,
speciality=speciality,
d_class=d_class,
support=support
)
id_doctor = doctor.insert(new_doctor)
pharmacies = data['pharmacies']
for p in pharmacies:
new_doctor_pharmacy = doctor_pharmacies(
doctor_id=id_doctor, pharmacy_id=p['pharmacy_id'])
doctor_pharmacies.insert(new_doctor_pharmacy)
return jsonify({
'success': True,
}), 201
except:
abort(500)
@AdminRoutes.route("/doctors/<int:doctor_id>", methods=["PATCH"])
@requires_auth("admin:role")
def edit_doctor_form(token, doctor_id):
data = json.loads(request.data)
doctor_data = doctor.query.get(doctor_id)
try:
name = data['name']
if data['phone']:
phone = data['phone']
doctor_data.phone = phone
if data['email']:
email = data['email']
doctor_data.email = email
zone_id = data['zone_id']
speciality = data['speciality']
d_class = data['d_class']
support = data['support']
date_of_joining = data['date_of_joining']
doctor_data.name = name
doctor_data.zone_id = zone_id
doctor_data.speciality = speciality
doctor_data.d_class = d_class
doctor_data.support = support
doctor_data.date_of_joining = date_of_joining
doctor.update(doctor_data)
pharmacies = data['pharmacies']
for p in pharmacies:
if p['id']:
doctor_pharmacies_data = doctor_pharmacies.query.get(p['id'])
doctor_pharmacies_data.pharmacy_id = p['pharmacy_id']
doctor_pharmacies.update(doctor_pharmacies_data)
else:
new_doctor_pharmacy = doctor_pharmacies(
doctor_id=doctor_id, pharmacy_id=p['pharmacy_id'])
doctor_pharmacies.insert(new_doctor_pharmacy)
return jsonify({
'success': True,
}), 201
except:
abort(500)
@ AdminRoutes.route("/pharmacies-detail", methods=["GET"])
@ requires_auth("admin:role")
def get_pharmacies_detail(token):
query = pharmacy.query.join(zone, zone.id == pharmacy.zone_id).all()
pharmacies = [p.detail() for p in query]
for p in pharmacies:
p['date_of_joining'] = str(
p['date_of_joining'])
result = {
"success": True,
"pharmacies": pharmacies
}
return jsonify(result), 200
@ AdminRoutes.route("/pharmacies-form", methods=["GET"])
@ requires_auth("admin:role")
def get_pharmacies_form(token):
zones_query = zone.query.all()
zones = [z.format() for z in zones_query]
results = {"zones": zones,
"success": True}
return jsonify(results), 200
@ AdminRoutes.route("/pharmacies", methods=["POST"])
@ requires_auth("admin:role")
def post_pharmacies_form(token):
data = json.loads(request.data)
try:
name = data['name']
date_of_joining = data['date_of_joining']
phone_number = data['phone_number']
zone_id = data['zone_id']
address = data['address']
support = data['support']
new_pharmacy = pharmacy(
name=name,
date_of_joining=date_of_joining,
phone_number=phone_number,
zone_id=zone_id,
address=address,
support=support
)
pharmacy.insert(new_pharmacy)
return jsonify({
'success': True,
}), 201
except:
abort(500)
@AdminRoutes.route("/pharmacies/<int:pharmacy_id>", methods=["PATCH"])
@requires_auth("admin:role")
def edit_pharmacy_form(token, pharmacy_id):
data = json.loads(request.data)
pharmacy_data = pharmacy.query.get(pharmacy_id)
try:
name = data['name']
if data['phone_number']:
phone_number = data['phone_number']
pharmacy_data.phone_number = phone_number
zone_id = data['zone_id']
address = data['address']
support = data['support']
date_of_joining = data['date_of_joining']
pharmacy_data.name = name
pharmacy_data.phone_number = phone_number
pharmacy_data.zone_id = zone_id
pharmacy_data.address = address
pharmacy_data.support = support
pharmacy_data.date_of_joining = date_of_joining
pharmacy.update(pharmacy_data)
return jsonify({
'success': True,
}), 201
except:
abort(500)
@AdminRoutes.route("/companies", methods=["GET"])
@requires_auth("admin:role")
def get_companies(token):
query = company.query.all()
companies = [c.format() for c in query]
result = {
"success": True,
"companies": companies
}
return jsonify(result), 200
@AdminRoutes.route("/companies", methods=["POST"])
@requires_auth("admin:role")
def post_companies_form(token):
data = json.loads(request.data)
try:
name = data['name']
new_company = company(
name=name
)
company.insert(new_company)
return jsonify({
'success': True,
}), 201
except:
abort(500)
@AdminRoutes.route("/companies/<int:company_id>", methods=["PATCH"])
@requires_auth("admin:role")
def edit_companies_form(token, company_id):
data = json.loads(request.data)
company_data = company.query.get(company_id)
try:
name = data['name']
company_data.name = name
company.update(company_data)
return jsonify({
'success': True,
}), 201
except:
abort(500)
@AdminRoutes.route("/items-detail", methods=["GET"])
@requires_auth("admin:role")
def get_items_detail(token):
query = item.query.join(company, company.id == item.company_id).all()
items = [i.detail() for i in query]
for i in items:
i['expire_date'] = str(
i['expire_date'])
result = {
"success": True,
"items": items
}
return jsonify(result), 200
@AdminRoutes.route("/items-form", methods=["GET"])
@requires_auth("admin:role")
def get_items_form(token):
companies_query = company.query.all()
companies = [c.format() for c in companies_query]
results = {"companies": companies,
"success": True}
return jsonify(results), 200
@AdminRoutes.route("/items", methods=["POST"])
@requires_auth("admin:role")
def post_items_form(token):
data = json.loads(request.data)
try:
name = data['name']
company_id = data['company_id']
expire_date = data['expire_date']
price = data['price']
new_item = item(
name=name,
company_id=company_id,
expire_date=expire_date,
price=price
)
item.insert(new_item)
return jsonify({
'success': True,
}), 201
except:
abort(500)
@AdminRoutes.route("/items/<int:item_id>", methods=["PATCH"])
@requires_auth("admin:role")
def edit_items_form(token, item_id):
data = json.loads(request.data)
item_data = item.query.get(item_id)
try:
name = data['name']
company_id = data['company_id']
expire_date = data['expire_date']
price = data['price']
item_data.name = name
item_data.company_id = company_id
item_data.expire_date = expire_date
item_data.price = price
item.update(item_data)
return jsonify({
'success': True,
}), 201
except:
abort(500)
| [
"saj99h@hotmail.com"
] | saj99h@hotmail.com |
84d0c392bf79f9dc6b5aabeae0dcb651c2507545 | 18574df53001f3b6f717a15289495f4f94cb4389 | /examples/run_ensemble_nowcast.py | 3cc0f10cffa265c0b33d2b8d7c06683ff35e04b8 | [
"BSD-3-Clause"
] | permissive | savelov/nowcast | 6fa789b66724cc65b8d4e611cb6b7c98eb4756a0 | 9c1168b1ba642f15bc4ffb000bdbca6db27c29b1 | refs/heads/gimet | 2021-06-22T15:17:52.527381 | 2020-03-29T16:06:55 | 2020-03-29T16:06:55 | 157,089,594 | 6 | 2 | BSD-3-Clause | 2020-05-03T13:11:31 | 2018-11-11T14:49:03 | Python | UTF-8 | Python | false | false | 7,249 | py | #!/bin/env python
"""Stochastic ensemble precipitation nowcasting
The script shows how to run a stochastic ensemble of precipitation nowcasts with
pysteps.
More info: https://pysteps.github.io/
"""
import datetime
import matplotlib.pylab as plt
import numpy as np
import pickle
import os
import pysteps as stp
import config as cfg
# List of case studies that can be used in this tutorial
#+-------+--------------+-------------+----------------------------------------+
#| event | start_time | data_source | description |
#+=======+==============+=============+========================================+
#| 01 | 201701311030 | mch | orographic precipitation |
#+-------+--------------+-------------+----------------------------------------+
#| 02 | 201505151630 | mch | non-stationary field, apparent rotation|
#+-------+--------------+------------------------------------------------------+
#| 03 | 201609281530 | fmi | stratiform rain band |
#+-------+--------------+-------------+----------------------------------------+
#| 04 | 201705091130 | fmi | widespread convective activity |
#+-------+--------------+-------------+----------------------------------------+
#| 05 | 201806161100 | bom | bom example data |
#+-------+--------------+-------------+----------------------------------------+
# Set parameters for this tutorial
## input data (copy/paste values from table above)
startdate_str = "201810071540"
data_source = "gimet"
## methods
oflow_method = "lucaskanade" # lucaskanade, darts, None
nwc_method = "steps"
adv_method = "semilagrangian" # semilagrangian, eulerian
noise_method = "nonparametric" # parametric, nonparametric, ssft
bandpass_filter = "gaussian"
decomp_method = "fft"
## forecast parameters
n_prvs_times = 3 # use at least 9 with DARTS
n_lead_times = 12
n_ens_members = 3
n_cascade_levels = 6
ar_order = 2
r_threshold = 0.1 # rain/no-rain threshold [mm/h]
adjust_noise = True
prob_matching = True
precip_mask = True
mask_method = "incremental" # sprog, obs or incremental
conditional = False
unit = "mm/h" # mm/h or dBZ
transformation = "dB" # None or dB
adjust_domain = None # None or square
seed = 42 # for reproducibility
# Read-in the data
print('Read the data...')
startdate = datetime.datetime.strptime(startdate_str, "%Y%m%d%H%M")
## import data specifications
ds = stp.rcparams.data_sources[data_source]
## find radar field filenames
input_files = stp.io.find_by_date(startdate, ds["root_path"], ds["path_fmt"], ds["fn_pattern"],
ds["fn_ext"], ds["timestep"], n_prvs_times, 0)
## read radar field files
importer = stp.io.get_method(ds.importer, type="importer")
R, _, metadata = stp.io.read_timeseries(input_files, importer, **ds.importer_kwargs)
Rmask = np.isnan(R)
# Prepare input files
print("Prepare the data...")
## if requested, make sure we work with a square domain
reshaper = stp.utils.get_method(adjust_domain)
R, metadata = reshaper(R, metadata, method="pad")
## if necessary, convert to rain rates [mm/h]
converter = stp.utils.get_method("mm/h")
R, metadata = converter(R, metadata)
## threshold the data
R[R<r_threshold] = 0.0
metadata["threshold"] = r_threshold
## convert the data
converter = stp.utils.get_method(unit)
R, metadata = converter(R, metadata)
## transform the data
transformer = stp.utils.get_method(transformation)
R, metadata = transformer(R, metadata)
## set NaN equal to zero
R[~np.isfinite(R)] = metadata["zerovalue"]
# Compute motion field
oflow_method = stp.motion.get_method(oflow_method)
UV = oflow_method(R)
# Perform the nowcast
nwc_method = stp.nowcasts.get_method(nwc_method)
R_fct = nwc_method(R, UV, n_lead_times, n_ens_members,
n_cascade_levels, kmperpixel=metadata["xpixelsize"]/1000,
timestep=ds.timestep, R_thr=metadata["threshold"],
extrap_method=adv_method, decomp_method=decomp_method,
bandpass_filter_method=bandpass_filter,
noise_method=noise_method, noise_stddev_adj=adjust_noise,
ar_order=ar_order, conditional=conditional,
use_precip_mask=precip_mask, mask_method=mask_method,
use_probmatching=prob_matching, seed=seed)
## if necessary, transform back all data
R_fct, _ = transformer(R_fct, metadata, inverse=True)
R, metadata = transformer(R, metadata, inverse=True)
## convert all data to mm/h
converter = stp.utils.get_method("mm/h")
R_fct, _ = converter(R_fct, metadata)
R, metadata = converter(R, metadata)
## readjust to initial domain shape
R_fct, _ = reshaper(R_fct, metadata, inverse=True)
R, metadata = reshaper(R, metadata, inverse=True)
## plot the nowcast..
R[Rmask] = np.nan # reapply radar mask
stp.plt.animate(R, nloops=2, timestamps=metadata["timestamps"],
R_fct=R_fct, timestep_min=ds.timestep,
UV=UV, motion_plot=cfg.motion_plot,
geodata=metadata, colorscale=cfg.colorscale,
plotanimation=True, savefig=True, path_outputs=cfg.path_outputs,
probmaps=True,probmap_thrs=[0.1,1.0])
# Forecast verification
print("Forecast verification...")
## find the verifying observations
input_files_verif = stp.io.find_by_date(startdate, ds.root_path, ds.path_fmt, ds.fn_pattern,
ds.fn_ext, ds.timestep, 0, n_lead_times)
## read observations
R_obs, _, metadata_obs = stp.io.read_timeseries(input_files_verif, importer,
**ds.importer_kwargs)
R_obs = R_obs[1:,:,:]
metadata_obs["timestamps"] = metadata_obs["timestamps"][1:]
## if necessary, convert to rain rates [mm/h]
R_obs, metadata_obs = converter(R_obs, metadata_obs)
## threshold the data
R_obs[R_obs<r_threshold] = 0.0
metadata_obs["threshold"] = r_threshold
## compute the average continuous ranked probability score (CRPS)
scores = np.zeros(n_lead_times)*np.nan
for i in range(n_lead_times):
scores[i] = stp.vf.CRPS(R_fct[:,i,:,:].reshape((n_ens_members, -1)).transpose(),
R_obs[i,:,:].flatten())
## if already exists, load the figure object to append the new verification results
filename = "%s/%s" % (cfg.path_outputs, "verif_ensemble_nwc_example")
if os.path.exists("%s.dat" % filename):
ax = pickle.load(open("%s.dat" % filename, "rb"))
print("Figure object loaded: %s.dat" % filename)
else:
fig, ax = plt.subplots()
## plot the scores
nplots = len(ax.lines)
x = (np.arange(n_lead_times) + 1)*ds.timestep
ax.plot(x, scores, color="C%i"%(nplots + 1), label = "run %02d" % (nplots + 1))
ax.set_xlabel("Lead-time [min]")
ax.set_ylabel("CRPS")
plt.legend()
## dump the figure object
pickle.dump(plt.gca(), open("%s.dat" % filename, "wb"))
print("Figure object saved: %s.dat" % filename)
# remove the pickle object to plot a new figure
plt.show()
| [
"savelov@gmail.com"
] | savelov@gmail.com |
1c6412b50843364b2655f72d593b23b9af5b1d5d | 2c556a8907579f05e8ab91a4939868d3c4693c2b | /apps/tasks/migrations/0027_auto_20200705_1710.py | 8c73f7d01cf8b2c8e42d9bacc32d9a8abaa13a75 | [] | no_license | TaoHuangNa/YinProject | a6d5e86b080192bef855d31c60201193158e3b1d | facb86596dc6ba0be6d09bec5de3dcdf50cb2a1e | refs/heads/master | 2022-11-18T11:03:00.263466 | 2020-07-13T13:46:30 | 2020-07-13T13:46:30 | 279,313,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | # Generated by Django 2.1.1 on 2020-07-05 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0026_delete_taskstype'),
]
operations = [
migrations.AlterField(
model_name='tasks',
name='state',
field=models.CharField(choices=[('0', '未支付'), ('1', '已支付'), ('2', '审核通过,进行中'), ('3', '审核不通过'), ('4', '已完成')], default='0', help_text='状态 0:未支付,1:已支付,2:审核通过, 3:审核不通过, 4:已完成', max_length=10, verbose_name='状态'),
),
]
| [
"1416501312@qq.com"
] | 1416501312@qq.com |
8c49147cf82f3595fd7da33c4712b0584be9560e | d5ce1d654153baea4177e7bd5e6bb1ff5515ad79 | /dadWeather.py | 1cc63c0945682aea9e385f0cbab4f9cb062094fc | [] | no_license | dennis15926/DadWeather | 177bc10156772c75983a633c89030788abaabcfd | 3170d0e6642f0ef53b2ff2b8e4548345b5041ab3 | refs/heads/master | 2021-01-10T11:55:30.999334 | 2016-03-12T20:56:46 | 2016-03-12T20:56:46 | 53,753,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | #coding: utf-8
import webget
import myFileIO
from datetime import timezone,timedelta
import datetime
URL = 'http://www.cwb.gov.tw/V7/observe/rainfall/Rain_Hr/7.htm'
rain_str = webget.split_text(webget.url_to_text(URL))
days_no_rain = myFileIO.get_days_no_rain()
print (rain_str)
current_time = datetime.datetime.now(timezone(timedelta(hours=8)))
lookup_time = current_time-timedelta(days=1)
rain_list = myFileIO.read_rain_month(lookup_time.month,lookup_time.year)
#insert rain_str into list, filling all missing values with ?
date = lookup_time.day
if len(rain_list) < date:
rain_list = rain_list + ['?']*(date - len(rain_list))
rain_list[date-1]=rain_str
#assert len(rain_list)==date
myFileIO.write_rain_month(lookup_time.month,lookup_time.year,rain_list)
if rain_str == "-":
days_no_rain = days_no_rain + 1
else:
days_no_rain = 0
myFileIO.set_days_no_rain(days_no_rain)
webget.send_email('dennis15926@gmail.com','RainProgram',rain_str)
text = """爸,昨天霧峰有下雨喔!
累計"""+str(rain_str)+"毫米"
if days_no_rain == 0:
webget.send_email(['dennis15926@gmail.com','anchen0102@yahoo.com.tw'],"昨天霧峰有下雨",text)
| [
"dennis15926@gmail.com"
] | dennis15926@gmail.com |
4c682a791c45a4e6ed232c2a6b448d06c18f76d6 | 094115d2aaaa02f738e5a57d7b443d3f030a8c13 | /pki/__init__.py | 1b88213288cf3d87f8303d7453ef63d34157ecd0 | [
"Apache-2.0"
] | permissive | 245754954/pki-1 | 034af83f0dd796b9994888142c01bab61d9078dd | 8e6c17b8719f11a7def15ea66ea39092d9397635 | refs/heads/master | 2023-03-23T05:21:35.718203 | 2021-02-18T14:53:32 | 2021-02-18T14:53:32 | 353,202,531 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,780 | py | import os
import logging.config
from flask import Flask, redirect
from flask_bootstrap import Bootstrap
from mongoengine import connect
logging.root.setLevel(logging.DEBUG)
logging.config.fileConfig(os.path.abspath(os.path.join(os.path.dirname(__file__), 'logging.conf')))
logger = logging.getLogger(__name__)
def create_app():
app = Flask(__name__)
app.config.from_mapping(
BOOTSTRAP_SERVE_LOCAL=True,
SECRET_KEY=os.environ.get("SECRET_KEY") or "catfish",
MONGODB_URL=os.environ.get("MONGODB_URL") or "mongodb://localhost:27017",
TOTP_BASE=os.environ.get("TOTP_BASE") or "dogbird",
WEBAUTH_HEADER=os.environ.get('WEBAUTH_HEADER'),
DEFAULT_DURATION=os.environ.get("DEFAULT_DURATION") or "365",
DEFAULT_OCSP_URL=os.environ.get("DEFAULT_OCSP_URL") or "http://127.0.0.1:5000",
DEFAULT_CA_ISSUER_URL=os.environ.get("DEFAULT_CA_ISSUER_URL") or "http://127.0.0.1:5000",
DEFAULT_POLICY_URL=os.environ.get("DEFAULT_POLICY_URL") or "http://127.0.0.1:5000/repository",
DEFAULT_POLICY_OID=os.environ.get("DEFAULT_POLICY_OID") or "1.3.6.1.4.1",
)
connect("pki", host=app.config.get("MONGODB_URL"))
Bootstrap(app)
@app.route("/")
def home():
return redirect("/certificates")
from pki import certificates, repository, ocsp
from pki.auth import check_permission
if app.config.get("WEBAUTH_HEADER"):
logger.info(f"auth is enabled with {app.config.get('WEBAUTH_HEADER')} header")
certificates.bp.before_request(check_permission)
app.register_blueprint(certificates.bp, url_prefix="/certificates")
app.register_blueprint(repository.bp, url_prefix="/repository")
app.register_blueprint(ocsp.bp, url_prefix="/ocsp")
return app
| [
"wyvernnot@gmail.com"
] | wyvernnot@gmail.com |
1e80f050379c3620ecae456eef6480ff547b77d4 | 13f5c66af02a64aa8c5d988e9560b82bcf058fd0 | /learning_sql/views.py | 8cec81152d26fe1d92d386c49f799cf9269d320b | [] | no_license | heitorchang/reading-list | a1090b969d0f16cbc7c0e371671e85dca0bde201 | 3dcfd68cb02179e75216ff459fda693ec1fb8684 | refs/heads/master | 2023-04-27T03:04:28.122341 | 2023-04-21T14:04:20 | 2023-04-21T14:04:20 | 67,825,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # p. 249
def create_totals_vw():
cursor = cnx.cursor()
cursor.execute("""
CREATE VIEW customer_totals_vw
(cust_id,
cust_type_cd,
cust_name,
num_accounts,
tot_deposits
)
AS
SELECT cst.cust_id, cst.cust_type_cd,
CASE
WHEN cst.cust_type_cd = 'B' THEN
(SELECT bus.name FROM business AS bus WHERE bus.cust_id = cst.cust_id)
ELSE
(SELECT concat(ind.fname, ' ', ind.lname)
FROM individual AS ind
WHERE ind.cust_id = cst.cust_id)
END AS cust_name,
SUM(CASE WHEN act.status = 'ACTIVE' THEN 1 ELSE 0 END) AS tot_active_accounts,
SUM(CASE WHEN act.status = 'ACTIVE' THEN act.avail_balance ELSE 0 END) AS tot_balance
FROM customer AS cst INNER JOIN account AS act
ON act.cust_id = cst.cust_id
GROUP BY cst.cust_id, cst.cust_type_cd;""")
cursor.close()
def create_totals_tbl():
cursor = cnx.cursor()
# NOTE: creating this table freezes data; new data will not be reflected
cursor.execute("""
CREATE TABLE customer_totals
AS
SELECT * FROM customer_totals_vw;""")
cursor.close()
| [
"heitorchang@gmail.com"
] | heitorchang@gmail.com |
77c1a1e2a26adc6671d02a8748df8ead36fdde04 | 60ff6aa653ba0b68e362152268a5fa8e27ddc20a | /recommenders/Exploratory_data_analysis.py | f19637a1a574866a7505cfa0a550860f64e613bd | [] | no_license | Ndu3000/unsupervised-predict-streamlit-template | 9ce03635849c680f887439ac4b575392b5a4ca2a | ddde14972164164ef651806253061db1a34118fd | refs/heads/master | 2022-11-19T20:49:02.995279 | 2020-07-28T10:37:48 | 2020-07-28T10:37:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py |
import pandas as pd
# To create plots
import matplotlib.pyplot as plt # data visualization library
import seaborn as sns
sns.set_style('whitegrid')
from wordcloud import WordCloud, STOPWORDS #used to generate world cloud
#define a function that counts the number of times each genre appear:
def count_word(df, ref_col, lister):
keyword_count = dict()
for s in lister: keyword_count[s] = 0
for lister_keywords in df[ref_col].str.split('|'):
if type(lister_keywords) == float and pd.isnull(lister_keywords): continue
for s in lister_keywords:
if pd.notnull(s): keyword_count[s] += 1
# convert the dictionary in a list to sort the keywords by frequency
keyword_occurences = []
for k,v in keyword_count.items():
keyword_occurences.append([k,v])
keyword_occurences.sort(key = lambda x:x[1], reverse = True)
return keyword_occurences, keyword_count
tone = 100
# Function that control the color of the words
def random_color_func(word=None, font_size=None, position=None,
orientation=None, font_path=None, random_state=None):
h = int(360.0 * tone / 255.0)
s = int(100.0 * 255.0 / 255.0)
l = int(100.0 * float(random_state.randint(70, 120)) / 255.0)
return "hsl({}, {}%, {}%)".format(h, s, l) | [
"menzi639@gmail.com"
] | menzi639@gmail.com |
34e03ed4d72971d6ba7816fbfd77917897ceb6db | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_103/ch81_2020_04_08_14_08_30_221505.py | 3d1850c8883fa9998cbf766d0212133a37e9b36c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | def interseccao_valores(dicio1,dicio2):
lista=[]
for valor in dicio1.values():
for valor2 in dicio2.values():
if valor==valor2:
lista.append(valor)
return lista
| [
"you@example.com"
] | you@example.com |
ff7a3fc726a311d22688ac9d93394bb9217d0333 | c61876227baed49f79332dfc4549744dafaf1e44 | /bot.py | 8df2c1b84e733b4144106e766e60e156dbc33d60 | [] | no_license | TheTrueShell/QueueBot | 5c8ff4e143cdf937884eda4406435440514d9b53 | 033848527e71fc27a718b8585ef6b65874d2ff96 | refs/heads/master | 2023-03-09T19:20:43.287528 | 2021-02-17T18:19:24 | 2021-02-17T18:19:24 | 338,392,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,501 | py | from logging import warning
import discord
from discord import channel
from discord import guild
from discord.ext import commands
import asyncio
class MyClient(discord.Client):
waitingQueue = [] #Holds all members waiting in the queue
roomIDs = [809454410543530014, 809454540504039424, 809454553790545950] #The IDs of the Lab "Rooms"
staffList = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bg_task = self.loop.create_task(self.loopQueue())
async def on_ready(self): #Runs on ready
print('Logged in as')
print(self.user.name)
print(self.user.id)
print('------')
await self.getStaff()
async def loopQueue(self):
await self.wait_until_ready()
while not self.is_closed():
for cID in self.roomIDs:
if len(self.waitingQueue) == 0:
break
for member in self.waitingQueue:
channel = discord.utils.get(client.guild.voice_channels, id=cID)
if len(channel.members) == 1:
for staffmember in channel.members:
if self.staffList.__contains__(staffmember):
await member.move_to(channel)
self.waitingQueue.pop(0)
await asyncio.sleep(60)
async def getStaff(self):
self.staffList.clear()
for member in self.get_all_members():
for role in member.roles:
if str(role) == "Helper" and not self.staffList.__contains__(member):
self.staffList.append(member)
async def checkPermissions(self, message):
return message.author.id == 618876946830589963
#Can be made so it checks for a role
async def on_message(self, message):
if message.author.id == self.user.id: #Stops the bot responding to itself
return
if message.channel.id != 809454274392227870: #Restrains commands to a single channel
return
if message.content.startswith("!queue") or message.content.startswith("!q"): #Queues members
if not self.waitingQueue.__contains__(message.author):
if message.author.voice is None:
await message.channel.send((message.author.mention + ": Please join ``🔈 waiting-room`` to queue."))
elif message.author.voice.channel.id == 809454365164568626:
self.waitingQueue.append(message.author)
await message.add_reaction("✅")
else:
await message.channel.send((message.author.mention + ": Please join ``🔈 waiting-room`` to queue."))
elif self.waitingQueue.__contains__(message.author):
await message.delete()
if message.content.startswith("!pos") or message.content.startswith("!position") or message.content.startswith("!p"): #Shows those queued their position
if self.waitingQueue.__contains__(message.author):
await message.channel.send(message.author.mention + ": You are position ``" + str(self.waitingQueue.index(message.author) + 1) + "``.")
else:
await message.channel.send((message.author.mention + ": You are not in the queue."))
if message.content.startswith("!vc") and await self.checkPermissions(message): #Temporary command. Will become automatic
await self.wait_until_ready()
while not self.is_closed():
for cID in self.roomIDs:
if len(self.waitingQueue) == 0:
break
for member in self.waitingQueue:
channel = discord.utils.get(message.guild.voice_channels, id=cID)
if len(channel.members) == 1:
for staffmember in channel.members:
if self.staffList.__contains__(staffmember):
await member.move_to(channel)
self.waitingQueue.pop(0)
await asyncio.sleep(60)
if message.content.startswith("!clear") and await self.checkPermissions(message): #Clears the channel and shows the help dialog box at the start of the channel.
await message.channel.send("Deleting... Please wait, depending on the number of messages this may take a minute.")
await message.channel.purge()
embed=discord.Embed(title="Commands", description="QueueBot Commands", color=0xedcf07)
embed.add_field(name="!queue / !q", value="Enters you into the queue for labs", inline=False)
embed.add_field(name="!position / !p", value="Shows current position in the queue", inline=False)
await message.channel.send(embed=embed)
if message.content.startswith("!help"): #Shows the help dialogue
embed=discord.Embed(title="Commands", description="QueueBot Commands", color=0xedcf07)
embed.add_field(name="!queue / !q", value="Enters you into the queue for labs", inline=False)
embed.add_field(name="!position / !p", value="Shows current position in the queue", inline=False)
await message.channel.send(embed=embed)
client=MyClient()
ftoken = open("token", "r") #Reads in the token needed for bot authentication
token = ftoken.read().strip("\n")
client.run(token)
| [
"1906816@swansea.ac.uk"
] | 1906816@swansea.ac.uk |
48022a7ebdfbe5fa95fc2ddeea52e546eebfab4b | 6daba5f780eb4e0a10b92da5838eda97925cd649 | /lesson6_step5.py | 75a7f6cc85827d6abc14b99986d3f63b7817f2d1 | [] | no_license | infinity71/test_repository_stepik | 48d05d9e5692d474fb99cf2c7c6b1958917a3594 | 4fcc8f2ed42375a37a1edc058a6f257f838b506e | refs/heads/master | 2022-12-20T08:12:47.267905 | 2020-10-01T18:20:16 | 2020-10-01T18:20:16 | 299,732,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | from selenium import webdriver
import time
import math
link = "http://suninjuly.github.io/find_link_text"
try:
browser = webdriver.Chrome()
browser.get(link)
text = str(math.ceil(math.pow(math.pi, math.e) * 10000))
print(text)
buttonlink = browser.find_element_by_link_text(text)
buttonlink.click()
input1 = browser.find_element_by_tag_name("input")
input1.send_keys("Ivan")
input2 = browser.find_element_by_name("last_name")
input2.send_keys("Petrov")
input3 = browser.find_element_by_class_name("city")
input3.send_keys("Smolensk")
input4 = browser.find_element_by_id("country")
input4.send_keys("Russia")
button = browser.find_element_by_css_selector("button.btn")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
# не забываем оставить пустую строку в конце файла | [
"m.devlikamova@gmail.com"
] | m.devlikamova@gmail.com |
e923e894f298de0501742da399ba096237404c13 | 6aa9fdff566a2ca384ed1b1db6933a415581bc22 | /backend/isasatec_23315/wsgi.py | 0442c9c343131871e14516b7d974bda02284aece | [] | no_license | crowdbotics-apps/isasatec-23315 | ae10c4ecde97b30cde72a627d65354b666ddb32c | 6a0a3bfddbba71ac7ee6256ffd1de0f7b3e565e7 | refs/heads/master | 2023-01-28T16:38:25.528446 | 2020-12-11T15:56:50 | 2020-12-11T15:56:50 | 320,553,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for isasatec_23315 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "isasatec_23315.settings")
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
44b4fec55b74c691af090b8774c0c386e6cc5dc1 | 6effd19fb11796892b52a95c274eaa0dd13126bc | /Server Module/server/gettrafficlights/migrations/0004_auto_20170703_2109.py | 89977b7804880e8f3177e14cc5b20fe446bc5fca | [] | no_license | NourhanEssam/graduation-project | 759c79f63422b777ff5781ab71f3cade3a50f700 | f3f11412a775ef49b63091605e334090b6a6341b | refs/heads/master | 2021-06-21T07:50:07.918818 | 2017-07-14T12:41:08 | 2017-07-14T12:41:08 | 72,365,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-03 19:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gettrafficlights', '0003_auto_20170703_2107'),
]
operations = [
]
| [
"eslamsamir232@gmail.com"
] | eslamsamir232@gmail.com |
cc8073a006724d4c3a463c9da8af11bbef0e2d5c | 19136335b7e88324546fdfed45b4d0b22042202c | /rplugin/python3/deoplete/filter/converter_truncate_menu.py | 90a331d0f918b01825f96575468fc8be3376b89e | [
"MIT"
] | permissive | nholik/deoplete.nvim | 3074fa3cdd5a8a2df5f300d0ac74fedde6555fdf | 614cd3ddf1f352c977f3405e809d967093571117 | refs/heads/master | 2020-05-27T18:05:59.540419 | 2019-05-26T22:26:41 | 2019-05-26T22:26:41 | 188,736,112 | 0 | 0 | NOASSERTION | 2019-05-26T22:06:01 | 2019-05-26T22:06:01 | null | UTF-8 | Python | false | false | 1,034 | py | # ============================================================================
# FILE: converter_truncate_menu.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from deoplete.base.filter import Base
from deoplete.util import truncate_skipping
class Filter(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'converter_truncate_menu'
self.description = 'truncate menu converter'
def filter(self, context):
max_width = context['max_menu_width']
if not context['candidates'] or 'menu' not in context[
'candidates'][0] or max_width <= 0:
return context['candidates']
footer_width = max_width / 3
for candidate in context['candidates']:
candidate['menu'] = truncate_skipping(
candidate.get('menu', ''),
max_width, '..', footer_width)
return context['candidates']
| [
"Shougo.Matsu@gmail.com"
] | Shougo.Matsu@gmail.com |
c55f365e169d1106b279c90b35267dee292c835b | acd749424ec557eb2c0aed20333131eeb738b27a | /pyart/io/nexrad_cdm.py | 2df276d06cb14dcb08a6247e4e97c50c44c7cd4b | [
"BSD-3-Clause"
] | permissive | zxdawn/pyart | c5b8cc505e4eea0db01af40bdd3a796ff11020b2 | fc51a68dfb488392217b2093ed593f07016e793b | refs/heads/CNRAD | 2020-03-17T23:14:30.526023 | 2019-05-19T13:39:00 | 2019-05-19T13:39:00 | 134,036,631 | 9 | 0 | null | 2018-05-22T14:07:30 | 2018-05-19T06:34:09 | Python | UTF-8 | Python | false | false | 13,609 | py | """
pyart.io.nexrad_cdm
===================
Functions for accessing Common Data Model (CDM) NEXRAD Level 2 files.
.. autosummary::
:toctree: generated/
read_nexrad_cdm
_scan_info
_populate_scan_dic
_get_moment_data
"""
import os
from datetime import datetime, timedelta
import netCDF4
import numpy as np
from .nexrad_common import get_nexrad_location
from ..config import FileMetadata, get_fillvalue
from ..core.radar import Radar
from .common import make_time_unit_str, _test_arguments
def read_nexrad_cdm(filename, field_names=None, additional_metadata=None,
file_field_names=False, exclude_fields=None,
station=None, **kwargs):
"""
Read a Common Data Model (CDM) NEXRAD Level 2 file.
Parameters
----------
filename : str
File name or URL of a Common Data Model (CDM) NEXRAD Level 2 file.
File of in this format can be created using the NetCDF Java Library
tools [1]_. A URL of a OPeNDAP file on the UCAR THREDDS Data
Server [2]_ is also accepted the netCDF4 library has been compiled
with OPeNDAP support.
field_names : dict, optional
Dictionary mapping NEXRAD moments to radar field names. If a
data type found in the file does not appear in this dictionary or has
a value of None it will not be placed in the radar.fields dictionary.
A value of None, the default, will use the mapping defined in the
metadata configuration file.
additional_metadata : dict of dicts, optional
Dictionary of dictionaries to retrieve metadata from during this read.
This metadata is not used during any successive file reads unless
explicitly included. A value of None, the default, will not
introduct any addition metadata and the file specific or default
metadata as specified by the metadata configuration file will be used.
file_field_names : bool, optional
True to use the NEXRAD field names for the field names. If this
case the field_names parameter is ignored. The field dictionary will
likely only have a 'data' key, unless the fields are defined in
`additional_metadata`.
exclude_fields : list or None, optional
List of fields to exclude from the radar object. This is applied
after the `file_field_names` and `field_names` parameters.
station : str
Four letter ICAO name of the NEXRAD station used to determine the
location in the returned radar object. This parameter is only
used when the location is not contained in the file, which occur
in older NEXRAD files. If the location is not provided in the file
and this parameter is set to None the station name will be determined
from the filename.
Returns
-------
radar : Radar
Radar object containing all moments and sweeps/cuts in the volume.
Gates not collected are masked in the field data.
References
----------
.. [1] http://www.unidata.ucar.edu/software/netcdf-java/documentation.htm
.. [2] http://thredds.ucar.edu/thredds/catalog.html
"""
# test for non empty kwargs
_test_arguments(kwargs)
# create metadata retrieval object
filemetadata = FileMetadata('nexrad_cdm', field_names,
additional_metadata, file_field_names,
exclude_fields)
# open the file
dataset = netCDF4.Dataset(filename)
dattrs = dataset.ncattrs()
dvars = dataset.variables
if 'cdm_data_type' not in dattrs or dataset.cdm_data_type != 'RADIAL':
raise IOError('%s is not a valid CDM NetCDF file' % (filename))
# determine the scan information
scan_info = _scan_info(dvars)
radials_per_scan = [max(s['nradials']) for s in scan_info]
ngates_per_scan = [max(s['ngates']) for s in scan_info]
ngates = max(ngates_per_scan)
nrays = sum(radials_per_scan)
nsweeps = len(scan_info)
# extract data which changes depending on scan,
# specifically time, azimuth, elevation and fixed angle data, as well as
# the moment data.
time_data = np.empty((nrays, ), dtype='float64')
azim_data = np.empty((nrays, ), dtype='float32')
elev_data = np.empty((nrays, ), dtype='float32')
fixed_agl_data = np.empty((nsweeps, ), dtype='float32')
fdata = {
'Reflectivity':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
'RadialVelocity':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
'SpectrumWidth':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
'DifferentialReflectivity':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
'CorrelationCoefficient':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
'DifferentialPhase':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
}
ray_i = 0
for scan_index, scan_dic in enumerate(scan_info):
var_index = scan_dic['index'][0]
nradials = scan_dic['nradials'][0]
time_var = scan_dic['time_vars'][0]
azimuth_var = scan_dic['azimuth_vars'][0]
elevation_var = scan_dic['elevation_vars'][0]
nradials = scan_dic['nradials'][0]
end = ray_i + nradials
time_data[ray_i:end] = dvars[time_var][var_index][:nradials]
azim_data[ray_i:end] = dvars[azimuth_var][var_index][:nradials]
elev_data[ray_i:end] = dvars[elevation_var][var_index][:nradials]
fixed_agl_data[scan_index] = np.mean(
dvars[elevation_var][var_index][:nradials])
for i, moment in enumerate(scan_dic['moments']):
moment_index = scan_dic['index'][i]
m_ngates = scan_dic['ngates'][i]
m_nradials = scan_dic['nradials'][i]
if moment.endswith('_HI'):
fdata_name = moment[:-3]
else:
fdata_name = moment
sweep = _get_moment_data(dvars[moment], moment_index, m_ngates)
fdata[fdata_name][ray_i:ray_i + m_nradials, :m_ngates] = (
sweep[:m_nradials, :m_ngates])
ray_i += nradials
# time
time = filemetadata('time')
first_time_var = scan_info[0]['time_vars'][0]
time_start = datetime.strptime(dvars[first_time_var].units[-20:],
"%Y-%m-%dT%H:%M:%SZ")
time_start = time_start + timedelta(seconds=int(time_data[0]/1000))
time['data'] = time_data/1000. - int(time_data[0]/1000)
time['units'] = make_time_unit_str(time_start)
# range
_range = filemetadata('range')
max_ngates_scan_index = ngates_per_scan.index(ngates)
scan_dic = scan_info[max_ngates_scan_index]
max_ngates_moment_index = scan_dic['ngates'].index(ngates)
distance_var = scan_dic['distance_vars'][max_ngates_moment_index]
_range['data'] = dvars[distance_var][:]
_range['meters_to_center_of_first_gate'] = _range['data'][0]
_range['meters_between_gates'] = _range['data'][1] - _range['data'][0]
# fields
fields = {}
for moment_name, moment_data in fdata.items():
field_name = filemetadata.get_field_name(moment_name)
field_dic = filemetadata(field_name)
field_dic['_FillValue'] = get_fillvalue()
field_dic['data'] = moment_data
fields[field_name] = field_dic
# metadata
metadata = filemetadata('metadata')
metadata['original_container'] = 'NEXRAD Level II'
# scan_type
scan_type = 'ppi'
# latitude, longitude, altitude
latitude = filemetadata('latitude')
longitude = filemetadata('longitude')
altitude = filemetadata('altitude')
# use the locations in the NetCDF file is available
if ((hasattr(dataset, 'StationLatitude') and
hasattr(dataset, 'StationLongitude') and
hasattr(dataset, 'StationElevationInMeters'))):
lat = dataset.StationLatitude
lon = dataset.StationLongitude
alt = dataset.StationElevationInMeters
else:
# if no locations in the file look them up from station name.
if station is None:
# determine the station name from the filename
# this will fail in some cases, in which case station
# should be implicitly provided in the function call.
station = os.path.basename(filename)[:4].upper()
lat, lon, alt = get_nexrad_location(station)
latitude['data'] = np.array([lat], dtype='float64')
longitude['data'] = np.array([lon], dtype='float64')
altitude['data'] = np.array([alt], dtype='float64')
# sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index
# sweep_end_ray_index
sweep_number = filemetadata('sweep_number')
sweep_mode = filemetadata('sweep_mode')
sweep_start_ray_index = filemetadata('sweep_start_ray_index')
sweep_end_ray_index = filemetadata('sweep_end_ray_index')
sweep_number['data'] = np.arange(nsweeps, dtype='int32')
sweep_mode['data'] = np.array(
nsweeps * ['azimuth_surveillance'], dtype='S')
rays_per_scan = list(radials_per_scan)
sweep_end_ray_index['data'] = np.cumsum(rays_per_scan, dtype='int32') - 1
rays_per_scan.insert(0, 0)
sweep_start_ray_index['data'] = np.cumsum(rays_per_scan[:-1],
dtype='int32')
# azimuth, elevation, fixed_angle
azimuth = filemetadata('azimuth')
elevation = filemetadata('elevation')
fixed_angle = filemetadata('fixed_angle')
azimuth['data'] = azim_data
elevation['data'] = elev_data
fixed_angle['data'] = fixed_agl_data
dataset.close()
return Radar(
time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,
sweep_end_ray_index,
azimuth, elevation,
instrument_parameters=None)
def _scan_info(dvars):
""" Return a list of information on the scans in the volume. """
# determine the time of the sweep start
time_variables = [k for k in dvars.keys() if k.startswith('time')]
scan_start_times = set([])
for var in time_variables:
for time in dvars[var][:, 0]:
scan_start_times.add(time)
scan_start_times = list(scan_start_times)
scan_start_times.sort()
# build the scan_info list
time_var_to_moment = { # time variable to moment conversion
'timeR': 'Reflectivity',
'timeV': 'RadialVelocity',
'timeD': 'DifferentialReflectivity',
'timeC': 'CorrelationCoefficient',
'timeP': 'DifferentialPhase',
'timeR_HI': 'Reflectivity_HI',
'timeV_HI': 'RadialVelocity_HI',
'timeD_HI': 'DifferentialReflectivity_HI',
'timeC_HI': 'CorrelationCoefficient_HI',
'timeP_HI': 'DifferentialPhase_HI',
}
scan_info = [{'start_time': t, 'time_vars': [], 'moments': [],
'nradials': [], 'ngates': [], 'elevation_vars': [],
'azimuth_vars': [], 'distance_vars': [], 'index': []}
for t in scan_start_times]
for time_var in time_variables:
for time_var_i, time in enumerate(dvars[time_var][:, 0]):
scan_index = scan_start_times.index(time)
scan_dic = scan_info[scan_index]
moment = time_var_to_moment[time_var]
_populate_scan_dic(scan_dic, time_var, time_var_i, moment, dvars)
# corner cases, timeV is a dimension for RadialVelocity AND
# SpectrumWidth
if time_var == 'timeV':
_populate_scan_dic(scan_dic, time_var, time_var_i,
'SpectrumWidth', dvars)
if time_var == 'timeV_HI':
_populate_scan_dic(scan_dic, time_var, time_var_i,
'SpectrumWidth_HI', dvars)
return scan_info
def _populate_scan_dic(scan_dic, time_var, time_var_i, moment, dvars):
""" Populate a dictionary in the scan_info list. """
if time_var.endswith('HI'):
var_suffix = time_var[-4:]
else:
var_suffix = time_var[-1:]
ngates = dvars['numGates' + var_suffix][time_var_i]
nradials = dvars['numRadials' + var_suffix][time_var_i]
scan_dic['time_vars'].append(time_var)
scan_dic['index'].append(time_var_i)
scan_dic['moments'].append(moment)
scan_dic['elevation_vars'].append('elevation' + var_suffix)
scan_dic['azimuth_vars'].append('azimuth' + var_suffix)
scan_dic['distance_vars'].append('distance' + var_suffix)
scan_dic['ngates'].append(ngates)
scan_dic['nradials'].append(nradials)
return
def _get_moment_data(moment_var, index, ngates):
""" Retieve moment data for a given scan. """
# mask, scale and offset
moment_var.set_auto_maskandscale(False)
raw_moment_data = moment_var[index][:, :ngates]
if '_Unsigned' in moment_var.ncattrs():
if raw_moment_data.dtype == np.int8:
raw_moment_data = raw_moment_data.view('uint8')
if raw_moment_data.dtype == np.int16:
raw_moment_data = raw_moment_data.view('uint16')
raw_moment_data = np.ma.masked_less_equal(raw_moment_data, 1)
if 'scale_factor' in moment_var.ncattrs():
scale = moment_var.scale_factor
else:
scale = 1.0
if 'add_offset' in moment_var.ncattrs():
add_offset = moment_var.add_offset
else:
add_offset = 0.0
return raw_moment_data * scale + add_offset
| [
"jjhelmus@gmail.com"
] | jjhelmus@gmail.com |
332b13da0a1f4b13b5645e39b38500507e8151e3 | 80f817608976451f032a0c1410cdfe34cd339d39 | /encyclopedia_builder.py | 1946247572660b3466e98eb33a2fca7d79ea0c51 | [] | no_license | erik-roger-fuller/NLP-art-articles | 413fd398b3685059178fd876e2b852cbcb2ceb40 | 616b5cfe8bda5ac52aa83e7111a4ae68cdcae9b5 | refs/heads/main | 2023-06-03T22:43:48.912717 | 2021-06-15T15:52:08 | 2021-06-15T15:52:08 | 346,487,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,942 | py | import pandas as pd
import numpy as np
import json
import csv
import os
import re
from spacy.kb import KnowledgeBase
import spacy
def museum_classify(desc1, desc2, name):
if desc1 == "ART":
desc1= ["Art Museum"]
elif desc1 == "BOT":
desc1= ["Arboretum", "Botanical Garden", "Nature Center"]
elif desc1 == "CMU":
desc1= ["Children Museum"]
elif desc1 == "HST":
desc1= ["History Museum"]
elif desc1 == "NAT":
desc1= ["Natural History Museum","Natural Science Museum"]
elif desc1 == "SCI":
desc1= ["Science Museum", " Technology Museum" ,"Planetariums"]
elif desc1 == "ZAW":
desc1= ["Zoo", "Aquarium", "Wildlife Conservation"]
elif desc1 == "GMU":
desc1= ["Museum", "Uncategorized General Museums"]
elif desc1 == "HSC":
desc1= ["Historical Society", "Historic Preservation"]
else:
desc1 = [desc1]
school = ["school", "college", "university", "univ"]
if any(x in desc2.lower() for x in school):
desc1.append("academic institution")
if name != desc2.title():
name = (name + " at " + desc2.title())
return desc1, name
def ents_encyc_builder(path, begin, spec=None):
entities_loc = os.path.join('/home/erik/Desktop/Datasets/art/art_writing/entity resources', path)
#os.path.join('C:/Users/17742/Desktop/win_art_writing/art_galleries_nyc' , "ART_GALLERY.csv")'art_galleries_nyc/ART_GALLERY.csv'
encyc = pd.DataFrame()
intid = begin
with open(entities_loc, "r", encoding='unicode_escape') as csvfile: #, encoding="utf8"
csvreader = csv.DictReader(csvfile, delimiter=",")
for row in csvreader:
try:
#print(row['NAME'])
name = row['COMMONNAME'].title()
try:
desc1 = row['DISCIPL']
except KeyError:
desc1 = spec
desc2 = row['LEGALNAME']
desc, name = museum_classify(desc1, desc2, name)
city = row['ADCITY'].title()
state = row['ADSTATE']
city = f"{city}, {state}"
loc = "usa"
qid = intid
identity = {"qid":qid, "name": name, "city":city, "loc":loc, "desc":desc }
print(identity)
encyc = encyc.append(identity, ignore_index=True)
intid +=1
except UnicodeDecodeError:
pass
encyc.set_index("qid")
return encyc
"""
school = ["school" , "college" ,"university"]
if any(x in name.lower() for x in school):
desc = "academic institution"
else:
desc = "art gallery"
def spacy_importer_prepper(data):
entities = []
for i in range(data.shape[0]):
ents_found = []
# print(i)
# index, article in data.iterrows()
article = data.iloc[int(i)]
# article = article[0]
para = article["para"]
meta_dict = dict([("unique_id", article["unique_id"]), ("title", article["title"]), ("author", article["author"]),
("pubtime", article["pubtime"])])
#print(meta_dict)
try:
doc = nlp(para)
for ent in doc.ents:
#ent_dict = dict([("entity", str(ent.text)), ("label", str(ent.label_))])
text = ent.text
label = ent.label_
print((text, label), end=', ')
ent_dict = {"txt": str(text), "label": str(text)}
ents_found.append(ent_dict)
#print(ent_dict)
except TypeError:
print(article['unique_id'])
pass
for found_ent in ents_found:
all_dict = found_ent.update(meta_dict)
#print(all_dict, end=', ')
entities.append(all_dict)
print(" \n")
return entities
""" | [
"erik.roger.fuller@gmail.com"
] | erik.roger.fuller@gmail.com |
102f709bebff12b32c93c321b66bd7327cd6e92b | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /accelbyte_py_sdk/api/matchmaking/models/models_query_mock_by.py | 8e41cf6eec7f84d441d5c2d4e272a292a791f88e | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 3,879 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# AccelByte Gaming Services Matchmaking Service (2.25.7)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ModelsQueryMockBy(Model):
"""Models query mock by (models.QueryMockBy)
Properties:
timestamp_after: (timestamp_after) REQUIRED int
"""
# region fields
timestamp_after: int # REQUIRED
# endregion fields
# region with_x methods
def with_timestamp_after(self, value: int) -> ModelsQueryMockBy:
self.timestamp_after = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "timestamp_after"):
result["timestamp_after"] = int(self.timestamp_after)
elif include_empty:
result["timestamp_after"] = 0
return result
# endregion to methods
# region static methods
@classmethod
def create(cls, timestamp_after: int, **kwargs) -> ModelsQueryMockBy:
instance = cls()
instance.timestamp_after = timestamp_after
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> ModelsQueryMockBy:
instance = cls()
if not dict_:
return instance
if "timestamp_after" in dict_ and dict_["timestamp_after"] is not None:
instance.timestamp_after = int(dict_["timestamp_after"])
elif include_empty:
instance.timestamp_after = 0
return instance
@classmethod
def create_many_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> Dict[str, ModelsQueryMockBy]:
return (
{k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_}
if dict_
else {}
)
@classmethod
def create_many_from_list(
cls, list_: list, include_empty: bool = False
) -> List[ModelsQueryMockBy]:
return (
[cls.create_from_dict(i, include_empty=include_empty) for i in list_]
if list_
else []
)
@classmethod
def create_from_any(
cls, any_: any, include_empty: bool = False, many: bool = False
) -> Union[
ModelsQueryMockBy, List[ModelsQueryMockBy], Dict[Any, ModelsQueryMockBy]
]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"timestamp_after": "timestamp_after",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"timestamp_after": True,
}
# endregion static methods
| [
"elmernocon@gmail.com"
] | elmernocon@gmail.com |
cb1e240271c4c122a05c64729f1e9330f325aff6 | aab0c5e931fa5c63a1e11ad107e785d8205e2adf | /lista7jordana/Ex052.py | 948f562d066232eec9d714324bf76760f5d4ebaa | [] | no_license | AtilioA/Python-20191 | 0cba16de6dd5e637920677dfe59ce572ccbca4cc | c1ba4280fab9965ccf47fbf0271cd9efacb86da4 | refs/heads/master | 2020-04-28T16:54:34.641561 | 2019-06-27T15:03:53 | 2019-06-27T15:03:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | from Ex048 import *
def printaMaisProxMedia(lista):
mpm = maisProxMedia(lista)
print("Valor mais próximo da média = {mpm}")
| [
"atiliodadalto@hotmail.com"
] | atiliodadalto@hotmail.com |
25be5070981b8513e439d196c2468bff17d82ecc | 053c59a8ef72644c5cf6c909450e1856ae3ea833 | /pinocchio/vercomp.py | 488bcbe90799846065ec9dcbf3f54b8c95254836 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Ethsnarks/ethsnarks-pinocchio | 5576f29843736b918b0ab1d6897aecc32e2122d7 | 4001749cfd350a5e9ae1608dc06103bf0e384cd8 | refs/heads/master | 2020-04-04T06:41:00.997812 | 2018-11-01T23:37:23 | 2018-11-01T23:37:23 | 155,753,201 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,534 | py | #!/usr/bin/python
from __future__ import print_function
import pickle
import json
import argparse
import sys
import subprocess
import os
import tempfile
import traceback
from .ArithFactory import ArithFactory
from .BooleanFactory import BooleanFactory
from .Timing import Timing
import pycparser
from pycparser import c_ast
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from .Symtab import Symtab, UndefinedSymbol
from .DFG import Undefined, Input, NIZKInput, Constant, DFGFactory, CmpEQ, CmpLT, CmpLEQ, Conditional, Add, Multiply, Negate, Subtract, LeftShift, RightShift, BitOr, BitNot, BitAnd, Xor, LogicalAnd, LogicalNot, Divide, Modulo, UndefinedExpression, NonconstantExpression, NonconstantArrayAccess, StorageRef
from .Struct import StructType, ArrayType, PtrType, IntType, UnsignedType, Field
from . import BitWidth
from .Collapser import Collapser
from .Storage import StorageKey, Storage, Symbol, PseudoSymbol, Null
sys.setrecursionlimit(10000)
class Void(object):
pass
def ast_show(ast, oneline=False):
sio = StringIO()
ast.show(buf = sio)
v = sio.getvalue()
if oneline:
v = v.replace('\n',' ')
return v
class StaticallyInfiniteLoop(Exception):
def __init__(self, sanity_limit): self.sanity_limit = sanity_limit
def __repr__(self): "Loop exceeded %s iters; insane?" % self.sanity_limit
def __str__(self): return repr(self)
class VoidFuncUsedAsExpression(Exception):
pass
class ConstantArrayIndexOutOfRange(Exception):
def __init__(self, s): self.s = s
def __str__(self): return self.s
class ParameterListMismatchesDeclaration(Exception):
def __init__(self, s): self.s = s
def __str__(self): return self.s
class EarlyReturn(Exception):
def __str__(self): return "Function returns before last statement."
class FuncNotDefined(Exception):
def __init__(self, s): self.s = s
def __str__(self): return self.s
class FuncMultiplyDefined(Exception):
def __init__(self, s): self.s = s
def __str__(self): return self.s
class MalformedOutsourceParameters(Exception): pass
class NoInputSpecDefined(Exception): pass
class State(object):
def __init__(self, expr, symtab):
self.expr = expr
self.symtab = symtab
def __repr__(self):
return "State[%s,%s]" % (self.expr, self.symtab)
class TypeState(object):
def __init__(self, type, symtab):
self.type = type
self.symtab = symtab
#class ProfileNugget:
# def __init__(self, collapser, out_expr):
# self.collapser = collapser
# self.out_expr = out_expr
#
# def run(self):
# self.result = self.collapser.collapse_tree(self.out_expr)
class Vercomp(object):
def __init__(self, filename, args, timing, verbose=False):
self.cpp_arg = args.cpp_arg
self.timing = timing
self.loop_sanity_limit = int(args.loop_sanity_limit)
ignore_overflow = (args.ignore_overflow=="True")
print("ignore_overflow=%s" % ignore_overflow)
self.bit_width = BitWidth.BitWidth(int(args.bit_width), ignore_overflow)
self.progress = args.progress
# memoizer for collapsing exprs to scalar constants
self.expr_evaluator = ExprEvaluator()
self.verbose = verbose
self.loops = 0
self.factory = DFGFactory()
# x = self.dfg(Constant, 7)
# y = self.dfg(Constant, 7)
# z = self.dfg(Constant, 8)
# p = self.dfg(Add, x, y)
# q = self.dfg(Add, x, y)
# print (x, y, z, p, q)
# print map(id, (x, y, z, p, q))
tmpname = self.gcc_preprocess(filename, self.cpp_arg)
with open(tmpname, 'r') as handle:
lines = handle.read().split("\n")
os.unlink(tmpname)
lines = self.preprocess(lines)
cfile = "\n".join(lines)
parser = pycparser.CParser()
ast = parser.parse(cfile)
self.root_ast = ast
self.output = self.create_expression()
def gcc_preprocess(self, filename, cpp_arg):
tmpname = tempfile.mktemp()
cpp_args = []
if cpp_arg is not None:
cpp_args = [_.replace('_', '-') for _ in cpp_arg]
# I used to call gcc-4; I'm not sure what machine had two versions
# or what the issue was, but I leave this comment here as a possible
# solution to the next sucker who runs into that problem.
cmd_list = ["gcc"]+cpp_args+['-D', 'BIT_WIDTH=' + str(self.bit_width.width), '-nostdinc', "-E", filename, "-o", tmpname]
print("cmd_list %s" % (" ".join(cmd_list)))
sp = subprocess.Popen(cmd_list)
sp.wait()
return tmpname
def preprocess_line(self, line):
if line=='' or line[0]=='#':
return ''
return line.split("//")[0]
def preprocess(self, lines):
return map(self.preprocess_line, lines)
def dfg(self, *args):
return self.factory.create(*args)
def decode_scalar_type(self, names):
if names == ["int"]:
return IntType()
elif names == ["unsigned", "int"]:
return UnsignedType()
else:
raise RuntimeError("Unknown scalar type: " + str(names))
def decode_type(self, node, symtab, skip_type_decls=False):
# this can also declare a new type; returns TypeState
if isinstance(node, c_ast.IdentifierType):
result = TypeState(self.decode_scalar_type(node.names), symtab)
elif isinstance(node, c_ast.Struct):
fields = []
if node.decls is not None:
# A struct is being declared here.
for field_decl in node.decls:
type_state = self.decode_type(field_decl.type, symtab, skip_type_decls=True)
symtab = type_state.symtab
fields.append(Field(field_decl.name, type_state.type))
struct_type = StructType(node.name, fields)
symtab.declare(Symbol(node.name), struct_type)
else:
# look up the struct
struct_type = symtab.lookup(Symbol(node.name))
result = TypeState(struct_type, symtab)
elif isinstance(node, c_ast.ArrayDecl):
dim_state = self.decode_expression_val(node.dim, symtab)
symtab = dim_state.symtab
dimension = self.evaluate(dim_state.expr)
type_state = self.decode_type(node.type, symtab, skip_type_decls=True)
symtab = type_state.symtab
array_type = ArrayType(type_state.type, dimension)
result = TypeState(array_type, symtab)
elif isinstance(node, c_ast.PtrDecl):
type_state = self.decode_type(node.type, symtab, skip_type_decls=True)
symtab = type_state.symtab
result = TypeState(PtrType(type_state.type), symtab)
elif isinstance(node, c_ast.TypeDecl):
if self.verbose:
print(node.__class__)
print(node.__dict__)
print(ast_show(node))
assert(skip_type_decls)
result = self.decode_type(node.type, symtab)
else:
print(node.__class__)
print(node.__dict__)
print(ast_show(node))
assert(False)
assert(result.type is not None)
return result
def create_storage(self, name, store_type, initial_values, symtab):
# returns State
storage = Storage(name, store_type.sizeof())
if initial_values is not None:
#print "iv %s st %s" % (len(initial_value), store_type.sizeof())
if len(initial_values) != store_type.sizeof():
print("iv %s st %s" % (len(initial_values), store_type.sizeof()))
assert(False)
for idx in range(store_type.sizeof()):
iv = initial_values[idx]
symtab.declare(StorageKey(storage, idx), iv)
else:
for idx in range(store_type.sizeof()):
symtab.declare(StorageKey(storage, idx), self.dfg(Constant, 0))
return State(self.dfg(StorageRef, store_type, storage, 0), symtab)
def declare_variable(self, decl, symtab, initial_value=None):
# returns Symtab
if isinstance(decl.type, c_ast.TypeDecl):
decl_type = decl.type.type
else:
decl_type = decl.type
# declare the type
type_state = self.decode_type(decl_type, symtab)
symtab = type_state.symtab
if decl.name is not None:
# a variable is being declared;
store_type = type_state.type
if isinstance(store_type, ArrayType):
# int w[10] acts like int *w when you access it.
var_type = PtrType(store_type.type)
else:
var_type = store_type
# decode an initializer
initial_values = None
if decl.init is not None:
assert(initial_value==None) # Two sources of initialization?
if isinstance(store_type, ArrayType):
initial_values = []
for expr in decl.init.exprs:
state = self.decode_expression_val(expr, symtab)
symtab = state.symtab
initial_values.append(state.expr)
elif isinstance(store_type, IntType):
initial_values = []
state = self.decode_expression_val(decl.init, symtab)
symtab = state.symtab
initial_values.append(state.expr)
elif isinstance(store_type, PtrType):
state = self.decode_ref(decl.init, symtab)
symtab = state.symtab
initial_value = state.expr
else:
print("type %s" % store_type)
print(ast_show(decl))
print(decl.init.__class__)
print(decl.init.__dict__)
assert(False)
elif initial_value:
if isinstance(initial_value, StorageRef) and not isinstance(store_type, PtrType):
def offset(k,i):
return StorageKey(k.storage, k.idx+i)
initial_values = map(
lambda i: symtab.lookup(offset(initial_value.key(), i)),
range(store_type.sizeof()))
else:
initial_values = [initial_value]
# allocate storage for the new object
if not isinstance(store_type, PtrType):
#print "initial_values for %s is %s" % (decl.name, initial_values)
state = self.create_storage(
decl.name, store_type, initial_values, symtab)
symbol_value = self.dfg(StorageRef, var_type, state.expr.storage, state.expr.idx)
symtab = state.symtab
else:
if initial_value is not None:
symbol_value = initial_value
else:
symbol_value = self.dfg(StorageRef, store_type, Null(), 0);
# point the name at it, with appropriate referenciness.
sym = Symbol(decl.name)
if symtab.is_declared(sym):
# A duplicate declaration should match in type.
# But that doesn't exactly work, because there may be a
# pointer involved.
if (self.verbose):
print("Second declaration of %s" % sym)
value = symtab.lookup(sym)
if (value.type != symbol_value.type):
print(ast_show(decl))
print("sym %s value type %s symbol_value type %s" % (
sym, value.type, symbol_value.type))
assert(False)
# rewrite the assignment to point at a new storage
symtab.assign(sym, symbol_value)
else:
symtab.declare(sym, symbol_value)
return symtab
def create_global_symtab(self):
symtab = Symtab()
for (iname, obj) in self.root_ast.children():
if isinstance(obj, c_ast.Decl):
# print
# print obj.__dict__
# print ast_show(obj)
if isinstance(obj.type, c_ast.FuncDecl):
if self.verbose:
print("Ignoring funcdecl %s for now" % (obj.name))
pass
else:
if self.verbose:
print()
print(ast_show(obj))
symtab = self.declare_variable(obj, symtab)
elif isinstance(obj, c_ast.FuncDef):
if self.verbose:
print("Ignoring funcdef %s for now" % (obj.decl.name))
pass
else:
print(obj.__class__)
assert(False)
symtab.declare(PseudoSymbol("_unroll"), self.dfg(Undefined))
return symtab
def declare_scalar(self, name, value, symtab):
print("decl scalar: %s" % (name,))
storage = Storage(name, 1)
symtab.declare(StorageKey(storage, 0), value)
symtab.declare(Symbol(name), ArrayVal(storage, 0))
def create_scope(self, param_decls, param_exprs, symtab):
scope_symtab = Symtab(symtab, scope=set())
if len(param_decls) != len(param_exprs):
raise ParameterListMismatchesDeclaration(
"declared with %d parameters; called with %d parameters" %
(len(param_decls), len(param_exprs)))
for parami in range(len(param_decls)):
param_decl = param_decls[parami]
#print "create_scope declares %s as %s" % (ast_show(param_decl, oneline=True), param_exprs[parami])
scope_symtab = self.declare_variable(
param_decl, scope_symtab, initial_value=param_exprs[parami])
if self.verbose:
print("scope symtab is: %s" % scope_symtab)
return scope_symtab
def decode_ptr_key(self, node):
# when assigning to a pointer, you must be assigning to a raw
# symbol; runtime Storage() can't hold pointers in our language.
# returns Key
if isinstance(node, c_ast.ID):
return Symbol(node.name)
else:
assert(False)
def decode_ref(self, node, symtab):
# returns State
if isinstance(node, c_ast.ID):
result = State(symtab.lookup(Symbol(node.name)), symtab)
elif isinstance(node, c_ast.StructRef):
result = self.decode_struct_ref(node, symtab)
elif isinstance(node, c_ast.ArrayRef):
result = self.decode_array_ref(node, symtab)
elif isinstance(node, c_ast.UnaryOp):
result = self.decode_expression(node, symtab)
else:
print()
print(ast_show(node))
print(node.__class__)
print(node.__dict__)
assert(False)
if self.verbose:
print("decode_ref %s %s giving %s" % (node.__class__, ast_show(node), result.expr))
return result
def decode_struct_ref(self, structref, symtab):
# returns State
sref_state = self.decode_ref(structref.name, symtab)
storageref = sref_state.expr
symtab = sref_state.symtab
if structref.type == "->":
prior_storageref = storageref
storageref = prior_storageref.deref()
if self.verbose:
print("decode_struct_ref %s turns %s into %s" % (
ast_show(structref), prior_storageref, storageref))
elif structref.type==".":
pass
else:
raise RuntimeError("Unknown structref type!")
struct = storageref.type
assert isinstance(struct, StructType)
assert isinstance(structref.field, c_ast.ID)
field = struct.get_field(structref.field.name)
fieldstorage = self.dfg(StorageRef,
field.type,
storageref.storage,
storageref.idx + struct.offsetof(field.name));
return State(fieldstorage, symtab)
def decode_array_ref(self, arrayref, symtab):
# returns State(StorageRef, Symtab)
subscript_state = self.decode_expression_val(arrayref.subscript, symtab)
symtab = subscript_state.symtab
try:
subscript_val = self.evaluate(subscript_state.expr)
except NonconstantExpression:
msg = "Array subscript isn't a constant expression at %s;\nexpr is %s" % (
arrayref.subscript.coord, subscript_state.expr)
raise NonconstantArrayAccess(msg)
#print "subscript_val expr %s == %s" % (subscript_state.expr, subscript_val)
state = self.decode_ref(arrayref.name, symtab)
if self.verbose:
print("array_ref got %s" % state.expr)
name_storageref = state.expr
if not isinstance(name_storageref, StorageRef):
print(ast_show(arrayref))
print("name: %s" % arrayref.name.name)
print("storageref: %s" % name_storageref)
assert(False)
symtab = state.symtab
if self.verbose:
print(ast_show(arrayref))
print("name_storageref: %s %s" % (name_storageref.type, name_storageref.type.__class__))
if isinstance(name_storageref.type, ArrayType):
element_type = name_storageref.type.type
elif isinstance(name_storageref.type, PtrType):
element_type = name_storageref.type.type
else:
print("name_storageref is %s" % name_storageref)
assert(False)
array_storageref = self.dfg(StorageRef,
element_type,
name_storageref.storage,
name_storageref.idx + subscript_val*element_type.sizeof())
if self.verbose:
print("arrayref --> %s" % array_storageref)
return State(array_storageref, symtab)
def eager_lookup(self, key, symtab):
val = symtab.lookup(key)
if isinstance(val, StorageRef):
newval = symtab.lookup(val.key())
#print "eager_lookup converts %s to %s" % (val, newval)
val = newval
#print "eager_lookup returns %s" % val
return val
def coerce_value(self, expr, symtab):
# TODO is this correct? The expr may be a ref storage acquired early
# in the eval process, then the symtab changed to update
# that location. Hmm.
# This is getting super-brittle. I really have no idea
# which symtab the expr should be evaluated against to coerce
# it down to a value. Probably should have bundled that into a
# wrapper object. Ugh.
if isinstance(expr, StorageRef):
key = expr.key()
expr = symtab.lookup(key)
return expr
# look up an expression, but evaluate away any StorageRef,
# so that the resulting expr can be incorporated as arguments into
# other exprs.
def decode_expression_val(self, expr, symtab, void=False):
state = self.decode_expression(expr, symtab, void=void)
state = State(self.coerce_value(state.expr, symtab), state.symtab)
#print "decode_expression_val returns %s" % state.expr
return state
def decode_expression(self, expr, symtab, void=False):
# returns State
if isinstance(expr, c_ast.UnaryOp):
if (expr.op=="*"):
state = self.decode_ref(expr.expr, symtab)
return State(state.expr.deref(), state.symtab)
else:
if expr.op=="-":
state = self.decode_expression_val(expr.expr, symtab)
return State(self.dfg(Negate, state.expr), state.symtab)
elif expr.op=="~":
state = self.decode_expression_val(expr.expr, symtab)
return State(self.dfg(BitNot, state.expr, self.bit_width), state.symtab)
elif expr.op=="!":
state = self.decode_expression_val(expr.expr, symtab)
return State(self.dfg(LogicalNot, state.expr), state.symtab)
elif expr.op=="&":
sub_state = self.decode_ref(expr.expr, symtab)
symtab = sub_state.symtab
if self.verbose:
print("for %s got expr %s %s" % (expr.expr, sub_state.expr, type(sub_state.expr)))
ref = sub_state.expr.ref()
if self.verbose:
print("&-op decodes to %s" % ref)
return State(ref, symtab)
print("expr.op == %s" % expr.op)
assert(False)
elif isinstance(expr, c_ast.BinaryOp):
left_state = self.decode_expression_val(expr.left, symtab)
right_state = self.decode_expression_val(expr.right, left_state.symtab)
#print "right_state is %s" % right_state.expr
if expr.op=="+":
expr = self.dfg(Add, left_state.expr, right_state.expr)
elif expr.op=="-":
# Hmm. Have to start thinking about representations vs. overflows...
expr = self.dfg(Subtract, left_state.expr, right_state.expr)
elif expr.op=="*":
expr = self.dfg(Multiply, left_state.expr, right_state.expr)
elif expr.op=="<":
expr = self.dfg(CmpLT, left_state.expr, right_state.expr)
elif expr.op=="<=":
expr = self.dfg(CmpLEQ, left_state.expr, right_state.expr)
elif expr.op=="==":
expr = self.dfg(CmpEQ, left_state.expr, right_state.expr)
elif expr.op==">":
# NB the argument order is swapped.
expr = self.dfg(CmpLT, right_state.expr, left_state.expr)
elif expr.op==">=":
# NB the argument order is swapped.
expr = self.dfg(CmpLEQ, right_state.expr, left_state.expr)
elif expr.op=="/":
expr = self.dfg(Divide, left_state.expr, right_state.expr)
elif expr.op=="%":
expr = self.dfg(Modulo, left_state.expr, right_state.expr)
elif expr.op=="^":
expr = self.dfg(Xor, left_state.expr, right_state.expr)
elif expr.op=="<<":
expr = self.dfg(LeftShift, left_state.expr, right_state.expr, self.bit_width)
elif expr.op==">>":
expr = self.dfg(RightShift, left_state.expr, right_state.expr, self.bit_width)
elif expr.op=="|":
expr = self.dfg(BitOr, left_state.expr, right_state.expr)
elif expr.op=="&":
expr = self.dfg(BitAnd, left_state.expr, right_state.expr)
elif expr.op=="&&":
expr = self.dfg(LogicalAnd, left_state.expr, right_state.expr)
else:
print()
print(ast_show(expr))
print(expr.__class__)
print(expr.__dict__)
assert(False) # unimpl
return State(expr, right_state.symtab)
elif isinstance(expr, c_ast.Constant):
assert expr.type=="int"
if expr.value.startswith("0x"):
literal = int(expr.value, 16)
#print "parsed %s as 0x %x" % (expr.value, literal)
else:
literal = int(expr.value, 10)
return State(self.dfg(Constant, literal), symtab)
elif isinstance(expr, c_ast.ArrayRef) or isinstance(expr, c_ast.StructRef):
ref_val_state = self.decode_ref(expr, symtab)
ref_val = ref_val_state.expr
assert(isinstance(ref_val, StorageRef))
# And now it's okay to return StorageRefs; the caller
# must coerce to a value if that's what he needs.
return State(ref_val, ref_val_state.symtab)
elif isinstance(expr, c_ast.ID):
return State(symtab.lookup(Symbol(expr.name)), symtab)
elif isinstance(expr, c_ast.FuncCall):
state = self.decode_funccall(expr, symtab)
if not void and isinstance(state.expr, Void):
print(ast_show(expr))
raise VoidFuncUsedAsExpression()
return state
else:
pass
print(Constant)
print(expr.__class__)
print(expr.__dict__)
assert(False)
def decode_funccall(self, expr, symtab):
func_arg_exprs = []
prev_symtab = symtab
try:
exprs = expr.args.exprs
except:
exprs = []
for arg_expr in exprs:
state = self.decode_expression(arg_expr, prev_symtab)
# NB we can allow exprs here that are lvalues (eg ptr StorageRefs),
# as we'll be writing them back into a symtab like an assignment.
#print "state %s type %s" % (state, type(state))
if (self.verbose):
print("arg %s expands to %s" % (ast_show(arg_expr), state.expr))
func_arg_exprs.append(state.expr)
prev_symtab = state.symtab
func_def = self.find_func(expr.name.name)
try:
param_decls = func_def.decl.type.args.params
except:
param_decls = []
call_symtab = self.create_scope(
param_decls, func_arg_exprs, prev_symtab)
side_effect_symtab = self.transform_compound(
func_def.body, call_symtab, func_scope=True)
result_symtab = call_symtab.apply_changes(side_effect_symtab, prev_symtab)
#print "side_effect_symtab: %s"%side_effect_symtab
try:
result_expr = side_effect_symtab.lookup(PseudoSymbol("return"))
except UndefinedSymbol:
result_expr = Void()
return State(result_expr, result_symtab)
def transform_assignment(self, stmt, symtab):
# returns symtab
if stmt.op == "=":
right_state = self.decode_expression(stmt.rvalue, symtab)
symtab = right_state.symtab
expr = right_state.expr
#print "ta: %s decodes to %s" % (ast_show(stmt.rvalue, 1), expr)
elif len(stmt.op) == 2 and stmt.op[1] == '=':
# These tricksy assignments won't work for pointer values just yet.
left_state = self.decode_expression_val(stmt.lvalue, symtab)
right_state = self.decode_expression_val(stmt.rvalue, left_state.symtab)
if stmt.op == "+=":
if self.verbose:
print("stmt.rvalue %s" % ast_show(stmt.rvalue))
print("left_state %s right_state %s" % (left_state, right_state))
expr = self.dfg(Add, left_state.expr, right_state.expr)
elif stmt.op == "-=":
expr = self.dfg(Subtract, left_state.expr, right_state.expr)
elif stmt.op == "*=":
expr = self.dfg(Multiply, left_state.expr, right_state.expr)
elif stmt.op == "|=":
expr = self.dfg(BitOr, left_state.expr, right_state.expr)
else:
assert(False)
symtab = right_state.symtab
else:
assert(False)
#print "transform_assignment symtab is %s" % symtab
lvalue = stmt.lvalue
if isinstance(lvalue, c_ast.ID) and lvalue.name == "_unroll":
sym = PseudoSymbol(lvalue.name)
symtab.assign(sym, expr)
else:
lvalue_state = self.decode_ref(lvalue, symtab)
symtab = lvalue_state.symtab
#print "node is %s expr is %s" % (ast_show(lvalue), lvalue_state.expr)
#print "type is %s" % lvalue_state.expr.type
#print "transform_assignment lvalue %s type %s" % (lvalue_state.expr, lvalue_state.expr.type)
if lvalue_state.expr.is_ptr():
#print "lvalue was a ptr, it now points elsewhere"
sym = self.decode_ptr_key(lvalue)
symtab.assign(sym, expr)
else:
sym = lvalue_state.expr.key()
sizeof = lvalue_state.expr.type.sizeof()
if sizeof > 1:
#print "Transferring %d values to %s" % (sizeof, lvalue_state.expr)
lkey = lvalue_state.expr
rkey = expr
for i in range(lvalue_state.expr.type.sizeof()):
symtab.assign(lkey.offset_key(i),
symtab.lookup(rkey.offset_key(i)))
else:
# int-valued expression
#print "sym is %s (%s)" % (sym, sym.__class__)
expr = self.coerce_value(expr, symtab)
symtab.assign(sym, expr)
#print "transform_assignment %s" % ast_show(stmt, oneline=1)
#print "+transform_assignment sym %s val %s" % (sym, expr)
return symtab
def transform_if(self, statement, symtab):
# returns symtab
cond_state = self.decode_expression_val(statement.cond, symtab)
symtab = cond_state.symtab
try:
# If condition is statically evaluable. Just run one branch.
cond_val = self.evaluate(cond_state.expr)
if cond_val:
return self.transform_statement(statement.iftrue, symtab)
elif statement.iffalse is not None:
return self.transform_statement(statement.iffalse, symtab)
else:
# no-op.
return symtab
except NonconstantExpression:
# If condition is dynamic. Run both branches (in dedicated scopes)
# and make resulting storage updates conditional on the
# dynamic condition.
then_scope = Symtab(symtab, scope=set())
then_symtab = self.transform_statement(
statement.iftrue, then_scope)
else_scope = Symtab(symtab, scope=set())
if statement.iffalse is not None:
else_symtab = self.transform_statement(
statement.iffalse, else_scope)
else:
else_symtab = else_scope
#new_symtab = Symtab(symtab) # DEAD
new_symtab = symtab
modified_idents = then_scope.scope.union(else_scope.scope)
#print "If modifies %s" % modified_idents
for key in modified_idents:
expr = self.dfg(Conditional, cond_state.expr, then_symtab.lookup(key), else_symtab.lookup(key))
new_symtab.assign(key, expr)
return new_symtab
def transform_while(self, while_stmt, symtab):
return self.transform_loop(while_stmt.cond, while_stmt.stmt, symtab)
def transform_for(self, for_stmt, symtab):
working_symtab = self.transform_statement(for_stmt.init, symtab)
return self.transform_loop(
for_stmt.cond,
c_ast.Compound([for_stmt.stmt, for_stmt.next]),
working_symtab)
def loop_msg(self, m):
if ((self.progress or self.verbose) and self.loops<2):
print("%s%s" % (" "*self.loops, m))
def unroll_static_loop(self, cond, body_compound, symtab):
self.loops += 1
sanity = -1
working_symtab = symtab
#self.loop_msg("___start___")
cond_state = None
cond_val = None
while True:
sanity += 1
if sanity > self.loop_sanity_limit:
print(cond_val)
print(cond_state.expr)
print(ast_show(cond))
raise StaticallyInfiniteLoop(self.loop_sanity_limit)
cond_state = self.decode_expression_val(cond, working_symtab)
# once a condition is statically evaluable, we assume it
# always is.
cond_val = self.evaluate(cond_state.expr)
if not cond_val:
break
if self.verbose:
print("loop body is:")
print(ast_show(body_compound))
if sanity>0 and (sanity & 0x3f)==0:
self.loop_msg("static iter %d" % sanity)
working_symtab = self.transform_statement(
body_compound, cond_state.symtab)
#self.loop_msg("___end___")
self.loops-=1
return working_symtab
def unroll_dynamic_loop(self, cond, body_compound, symtab):
self.loops+=1
try:
_unroll_val = self.evaluate(symtab.lookup(PseudoSymbol("_unroll")))
except UndefinedExpression:
print("At line %s:" % cond.coord.line)
raise
# build up nested conditional scopes
working_symtab = symtab
cond_stack = []
scope_stack = []
for i in range(_unroll_val):
self.loop_msg("dyn iter %s" % i)
cond_state = self.decode_expression_val(cond, working_symtab)
scope = Symtab(cond_state.symtab, scope=set())
cond_stack.append(cond_state.expr)
scope_stack.append(scope)
#print "rep %d; k is: %s" % (i, self.eager_lookup(Symbol("k"), scope))
working_symtab = self.transform_statement(body_compound, scope)
assert _unroll_val == len(cond_stack)
while len(cond_stack) > 0:
cond = cond_stack.pop()
scope = scope_stack.pop()
modified_idents = scope.scope
#applied_symtab = Symtab(working_symtab) #DEAD
applied_symtab = working_symtab
for ref in modified_idents:
if (self.verbose):
print()
print("cond: %s" % cond)
print("iftrue: %s" % working_symtab.lookup(ref))
print("iffalse: %s" % scope.parent.lookup(ref))
applied_symtab.assign(ref,
self.dfg(Conditional, cond, working_symtab.lookup(ref), scope.parent.lookup(ref)))
working_symtab = applied_symtab
self.loops-=1
return working_symtab
def transform_loop(self, cond, body_compound, symtab):
working_symtab = symtab
cond_state = self.decode_expression_val(cond, working_symtab)
try:
try:
cond_val = self.evaluate(cond_state.expr)
except NonconstantExpression:
pass
except Exception:
print("expr is %s" % cond_state.expr)
raise
try:
return self.unroll_static_loop(cond, body_compound, symtab)
except NonconstantExpression as unexpected_nce:
traceback.print_exc(unexpected_nce)
print("\n---------\n")
raise Exception("Unexpected NonconstantExpression; it leaked up from some subexpression evaluation?")
except NonconstantExpression:
#print "Condition is dynamic (ex %s):" % repr(ex)
#print ":: ",ast_show(cond)
#print ":: ",cond_state.expr
# condition can't be evaluated statically at top of loop.
# [NB In principle, it might become statically-evaluable later
# (eg while (x<5 and x<size)), but it's not clear we really
# want to encourage programming that way just to get the
# automatic unroll limit, since it would involve emitting
# a runtime evaluation for x<5.]
# So, we assume that the only way to limit the loop is to
# use the _unroll hint.
return self.unroll_dynamic_loop(cond, body_compound, symtab)
def transform_compound(self, body, outer_symtab, func_scope=False):
# func_scope says that it's okay to return a value
# from (the end of) this compound block.
# returns Symtab
working_symtab = outer_symtab
#print "compound %s is %s" % (body.__class__, ast_show(body))
assert isinstance(body, c_ast.Compound)
children = body.children()
for stmt_idx in range(len(children)):
(name, statement) = children[stmt_idx]
return_allowed = (func_scope and stmt_idx == len(children)-1)
working_symtab = self.transform_statement(statement, working_symtab, return_allowed = return_allowed)
return working_symtab
def transform_statement(self, statement, working_symtab, return_allowed=False):
if isinstance(statement, c_ast.Assignment):
working_symtab = self.transform_assignment(
statement, working_symtab)
elif isinstance(statement, c_ast.If):
working_symtab = self.transform_if(statement, working_symtab)
elif isinstance(statement, c_ast.Return):
if not return_allowed:
raise EarlyReturn()
return_state = self.decode_expression_val(
statement.expr, working_symtab)
#working_symtab = Symtab(return_state.symtab) # DEAD
working_symtab = return_state.symtab
working_symtab.declare(PseudoSymbol("return"), return_state.expr)
elif isinstance(statement, c_ast.Decl):
# NB modify in place. I think that's actually okay,
# when we're not forking them. Which I guess we never
# do, since we use them and discard them.
working_symtab = self.declare_variable(statement, working_symtab)
elif isinstance(statement, c_ast.FuncCall):
# an expression whose result is discarded; only side-effects
# matter.
state = self.decode_expression_val(statement, working_symtab, void=True)
working_symtab = state.symtab
elif isinstance(statement, c_ast.For):
working_symtab = self.transform_for(statement, working_symtab)
elif isinstance(statement, c_ast.While):
working_symtab = self.transform_while(statement, working_symtab)
elif isinstance(statement, c_ast.Compound):
working_symtab = self.transform_compound(statement, working_symtab)
elif isinstance(statement, c_ast.EmptyStatement):
pass
else:
print("class: ",statement.__class__)
print("dict: ",statement.__dict__)
print("ast: ",ast_show(statement))
assert False # unimpl statement type
if self.verbose:
print("after statement %s, symtab:" % ast_show(statement, oneline=True))
print(" %s"%working_symtab)
return working_symtab
#DEAD
# def decode_label_hacks(self, func_name):
# try:
# input = self.find_func(func_name)
# except:
# raise NoInputSpecDefined();
# iv = IDVisitor()
# iv.visit(input)
# return iv.ids
def print_expression(self):
print("Final expr assignments:")
for (name, value) in self.output:
print("%s => %s" % (name, value))
def make_global_storage(self, node, symtab):
# returns State
assert isinstance(node, c_ast.Decl)
type_state = self.decode_type(node.type, symtab)
ptr_type = type_state.type
assert isinstance(ptr_type, PtrType)
store_type = ptr_type.type
state = self.create_storage(node.name, store_type, None, symtab)
symtab = state.symtab
symbol_value = self.dfg(StorageRef, ptr_type, state.expr.storage, state.expr.idx)
return State(symbol_value, symtab)
def root_funccall(self, symtab):
# returns a StorageRef
func_def = self.find_func("outsource")
param_decls = func_def.decl.type.args.params
if len(param_decls) == 3:
has_nizk = True
elif len(param_decls) == 2:
has_nizk = False
else:
raise MalformedOutsourceParameters()
INPUT_ARG_IDX = 0
NIZK_ARG_IDX = 1
OUTPUT_ARG_IDX = -1 # last, regardless of if there's a nizk input
arg_exprs = []
input_state = self.make_global_storage(param_decls[INPUT_ARG_IDX], symtab)
state = input_state
arg_exprs.append(input_state.expr)
if has_nizk:
nizk_state = self.make_global_storage(param_decls[NIZK_ARG_IDX], state.symtab)
state = nizk_state
arg_exprs.append(nizk_state.expr)
output_state = self.make_global_storage(param_decls[OUTPUT_ARG_IDX], state.symtab)
state = output_state
arg_exprs.append(state.expr)
symtab = state.symtab
symtab = self.create_scope(
param_decls, arg_exprs, symtab)
def create_input_keys(param_decl_idx, class_):
in_sym = Symbol(param_decls[param_decl_idx].name)
#print "sym %s" % in_sym
input_storage_ref = symtab.lookup(in_sym).deref()
#print "input_storage_ref %s" % input_storage_ref
#print "root_funccall symtab: %s" % symtab
assert(input_storage_ref.idx==0)
input_list = []
for idx in range(input_storage_ref.type.sizeof()):
#print "create_input_keys(%s)[%d]" % (class_.__name__, idx)
sk = StorageKey(input_storage_ref.storage, idx)
input = self.dfg(class_, sk)
input_list.append(input)
symtab.assign(sk, input)
return input_list
self.inputs = create_input_keys(INPUT_ARG_IDX, Input)
self.nizk_inputs = []
if has_nizk:
self.nizk_inputs = create_input_keys(NIZK_ARG_IDX, NIZKInput)
#print "root_funccall symtab: %s" % symtab
result_symtab = self.transform_compound(
func_def.body, symtab, func_scope=True)
#print "root_funccall result_symtab:"
#result_symtab.dbg_dump_path()
out_sym = Symbol(param_decls[OUTPUT_ARG_IDX].name)
output = result_symtab.lookup(out_sym).deref()
return State(output, result_symtab)
def create_expression(self):
global_symtab = self.create_global_symtab()
# print "Setting up inputs; symtab: %s" % global_symtab
# # mark all input storage locations as non-constant inputs
# for name in self.decode_label_hacks("_input"):
# v = global_symtab.lookup(Symbol(name))
# assert(isinstance(v, ArrayOp))
# assert(v.idx == 0)
# for idx in range(v.storage.size):
# sk = StorageKey(v.storage, idx)
# print "assigning %s" % sk
# global_symtab.assign(sk, Input(sk))
outsource_func = self.find_func("outsource")
if self.verbose:
print("global_symtab: %s" % global_symtab)
self.timing.phase("root_funccall")
out_state = self.root_funccall(global_symtab)
output_storage_ref = out_state.expr
# memoizer for collapsing exprs to minimal expressions (with
# no expressions purely of constant terms).
collapser = ExprCollapser(self.expr_evaluator)
self.timing.phase("collapse_output")
if self.progress:
print("collapsing output")
output = []
for idx in range(output_storage_ref.type.sizeof()):
#print "working on output %d" % idx
self.timing.phase("collapse_output %d" % idx)
sk = StorageKey(output_storage_ref.storage, idx)
try:
out_expr = self.eager_lookup(sk, out_state.symtab)
# global profile_nugget
# profile_nugget = ProfileNugget(collapser, out_expr)
# if (idx in []): #[3, 47]):
# outfn = "/tmp/profiler.%d" % idx
# cProfile.run('profile_nugget.run()', outfn)
# else:
# profile_nugget.run()
# value = profile_nugget.result
value = collapser.collapse_tree(out_expr)
except TypeError:
#print out_expr
raise
output.append((sk, value))
return output
def find_func(self, goal_name):
def is_match(pr):
(name,node) = pr
try:
return node.decl.name==goal_name
except:
return False
matches = filter(is_match, self.root_ast.children())
if len(matches) == 0:
raise FuncNotDefined(goal_name)
elif len(matches) > 1:
raise FuncMultiplyDefined(goal_name)
return matches[0][1]
def evaluate(self, expr):
return self.expr_evaluator.collapse_tree(expr)
class ExprEvaluator(Collapser):
def __init__(self):
Collapser.__init__(self)
def get_dependencies(self, expr):
return expr.collapse_dependencies()
def collapse_impl(self, expr):
return expr.evaluate(self)
def get_input(self, key):
raise NonconstantExpression()
class ExprCollapser(Collapser):
def __init__(self, evaluator):
Collapser.__init__(self)
self.evaluator = evaluator
def get_dependencies(self, expr):
# timing = Timing("ExprCollapser.get_dependencies")
result = expr.collapse_dependencies()
# timing.phase("done")
return result
def collapse_impl(self, expr):
# timing = Timing("ExprCollapser.collapse_impl")
result = expr.collapse_constants(self)
# timing.phase("done")
# if (timing.prev_elapsed_sec > 0.005):
# print "Slow expression to collapse was: %s" % expr
return result
def evaluate_as_constant(self, expr):
return self.evaluator.collapse_tree(expr)
def main(argv):
parser = argparse.ArgumentParser(description='Compile C to QSP/QAP')
parser.add_argument('cfile', metavar='<cfile>',
help='a C file to compile')
parser.add_argument('--print', dest='print_exprs', action='store_true', default=False,
help="print output expressions on stdout")
parser.add_argument('--verbose', dest='verbose', action='store_true', default=False,
help="enable extra verbose output")
parser.add_argument('--il', dest='il_file',
help='intermediate circuit output file')
parser.add_argument('--json', dest='json_file',
help='json version of intermediate circuit output file')
parser.add_argument('--arith', dest='arith_file',
help='arithmetic circuit output file')
parser.add_argument('--bit-width', dest='bit_width', type=int,
help='bit width -- affects bitwise operator semantics and arithmetic circuit output', default=32)
parser.add_argument('--bool', dest='bool_file',
help='boolean circuit output file')
parser.add_argument('--ignore-overflow', dest='ignore_overflow', action='store_true',
help='ignore field-P overflows; never truncate', default=False)
parser.add_argument('--cpparg', dest='cpp_arg', nargs="*",
help='extra arguments to C preprocessor')
parser.add_argument('--loop-sanity-limit', dest='loop_sanity_limit', type=int,
help='limit on statically-measured loop unrolling', default=1000000)
parser.add_argument('--progress', dest='progress', action='store_true', default=False,
help='print progress messages during compilation')
args = parser.parse_args(argv)
timing = Timing(args.cfile, enabled=False)
try:
vercomp = Vercomp(args.cfile, args, timing, args.verbose)
except Exception as ex:
print(repr(ex))
raise
if args.print_exprs:
timing.phase("print_expression")
vercomp.print_expression()
if args.il_file is not None:
timing.phase("emit_il")
fp = open(args.il_file, "w")
# print "\n ".join(types.__dict__.keys())
# x = vercomp.output[0][1].left.left
# print x
# print x.__class__
# print x.__dict__
# pickle.dump(x, fp)
pickle.dump(vercomp.output, fp)
fp.close()
if args.json_file is not None:
timing.phase("emit_json")
fp = open(args.json_file, "w")
json.dump(vercomp.output, fp)
fp.close()
if args.arith_file is not None:
timing.phase("emit_arith")
if vercomp.progress:
print("Compilation complete; emitting arith.")
ArithFactory(args.arith_file, vercomp.inputs, vercomp.nizk_inputs, vercomp.output, vercomp.bit_width)
if args.bool_file is not None:
timing.phase("emit_bool")
if vercomp.progress:
print("Compilation complete; emitting bool.")
BooleanFactory(args.bool_file, vercomp.inputs, vercomp.nizk_inputs, vercomp.output, vercomp.bit_width)
timing.phase("done")
def testcase():
args = "one-matrix.c --bit-width 32 --cpparg Ibuild/ DPARAM=3 DBIT_WIDTH=32 --ignore-overflow True".split(' ')
main(args)
| [
"HarryR@noreply.users.github.com"
] | HarryR@noreply.users.github.com |
799f27d7bd6278066b4a0c11259c76d826704d80 | 48e9d0e84238daf0de290551e3588e9ff3f49549 | /calculadora.py | 43fadb96364fb7c2a09a91ee806895c89e916e0c | [] | no_license | celord/PythonGreencore | 9606af569738703b66d80bce6e423c9a313fa539 | 259aadcc346203f8092f6c6d286e3fca2e9fc550 | refs/heads/master | 2020-05-30T23:18:15.542876 | 2019-06-19T14:39:59 | 2019-06-19T14:39:59 | 190,014,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | def Menu():
print("""*****************
Calculadora
************
Menu
1) Suma
2) Resta
3) Multiplicacion
4) Division
""")
def Calculadora():
"Funcion para calcular Operaciones Aritmeticas"
Menu()
opc = int(input("Seleccion Opcion \n"))
while (opc > 0 and opc <5):
x = int(input("Ingrese Numero\n"))
y = int(input("Ingrese otro numero\n"))
if (opc ==1):
print ("La suma es: ", x + y)
opc = int(input("Seleccione Opcion"))
elif (opc == 2):
print("La resta es:", x-y )
opc = int(input("Seleccione Opcion"))
elif(opc==3):
print("La multiplicacion es:", x * y)
opc = int(input("Seleccione Opcion"))
elif(opc==4):
try:
print("La division es: ", x/y)
opc = int(input("Seleccione Opcion"))
except ZeroDivisionError:
print("No se permite la division entre 0")
opc = int(input("Seleccione Opcion"))
Calculadora()
| [
"celord@gmail.com"
] | celord@gmail.com |
d874442d2b3aae643694eac7e754a09f39b2094b | c74d19de038a0ede9c91e3c357b5a8854e8698ad | /07-Dynamic Programming/coins.py | 175eaf2519923f2d9cb5d90c104f534803dede37 | [
"MIT"
] | permissive | sajjadm624/competitive-programmer-handbook-python | 1e026824faaaed0f6dc1cbf05225f9d7b9e4cf7e | 8b33f4cff7ce5cb20f69b5f1cb67ee8154826981 | refs/heads/master | 2022-06-17T08:37:00.217521 | 2020-05-08T10:33:35 | 2020-05-08T10:33:35 | 260,295,899 | 0 | 0 | MIT | 2020-04-30T19:06:24 | 2020-04-30T19:06:24 | null | UTF-8 | Python | false | false | 1,442 | py | import math
coins = [1,3,4]
#recursive solution to minimum number of coins problem
def c_solve(x):
if x < 0: return math.inf
elif x == 0: return 0
best = math.inf
for c in coins:
best = min(best, c_solve(x-c)+1)
return best
print(c_solve(10)) #solves quickly
print(c_solve(30)) #takes several seconds
#print(c_solve(100)) #times out
#======================================================================
#optimized recursive solution using dynamic programming and memoization
value = {}
def dp_solve(x):
if x < 0: return math.inf
elif x == 0: return 0
try: return value[x]
except: pass
best = math.inf
for c in coins:
best = min(best, dp_solve(x-c)+1)
value[x] = best
return best
print(dp_solve(30)) #solves quickly
print(dp_solve(100)) #solves quickly
#========================================================================
#iterative version of dynamic programming solution, with solution printed
value = {0: 0}
first = {}
def it_solve(n):
for x in range(1,n+1):
value[x] = math.inf
for c in coins:
if x-c >= 0 and value[x-c]+1 < value[x]:
value[x] = value[x-c]+1
first[x] = c
return value[n], first[n]
n = 33
it_solve(n)
while n > 0:
print(first[n])
n -= first[n]
#===========================================
#total number of solutions for each coin sum
count = [1]
n = 33
for x in range(n+1):
count.append(0)
for c in coins:
if x-c >=0:
count[x] += count[x-c]
print(count[33])
| [
"noreply@github.com"
] | noreply@github.com |
150caba5e2ce7bbe0f2fac7228a73b1c48b130e8 | 35be15b1dc120a256750cf66305f169974c2e55c | /ecommerce/jan2020/blog/admin.py | 2f847da382c95e6db6daed732a0c2fa22da850e2 | [] | no_license | tejendrapatel/Ecommerce_Logics | 2f10a5e216691add092f37a8186a9940b905f173 | 16ad13672c275e1be3ee6b3f5cd84d09f1600496 | refs/heads/main | 2023-08-27T00:53:10.438777 | 2021-10-23T11:50:36 | 2021-10-23T11:50:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.contrib import admin
from blog.models import *
admin.site.register(college)
admin.site.register(contact)
| [
"info.prospectias@gmail.com"
] | info.prospectias@gmail.com |
6c542f2791ff8cf70bd64f3ba980fb51f4749655 | 3c383f520c80c97e215753efc76a83aa0a8ad7b1 | /inventory/inventory/urls.py | baf70a45a65d5ccdf831ff3ed905b88fae1ad1f2 | [] | no_license | vermamayank5455/stock_inventory | a75244098042b7148300e88075c884fa55878b88 | e97ca7fee3126427db58de222aa57226c9855c30 | refs/heads/main | 2023-07-15T17:13:22.590562 | 2021-08-18T11:13:06 | 2021-08-18T11:13:06 | 337,970,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | """inventory URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin-se/',include('store.url')),
path('admin/',admin.site.urls),
path('store/', include('store.url')),
]
| [
"vermamayank5455@gmail.com"
] | vermamayank5455@gmail.com |
59f257e74467edf2e02f1c12f63bef4bc528fd7e | 085488720112922ff3aed15f99f3c93911425c4a | /vesper/signal/tests/test_s3_byte_sequence.py | c6f1f484a9b415154c8d517bf998a2ab6d8b4200 | [
"MIT"
] | permissive | HaroldMills/Vesper | 0b61d18bc241af22bfc251088fc87d72add6367b | ec92fe5231f54336499db189a3bbc6cb08a19e61 | refs/heads/master | 2023-07-05T22:45:27.316498 | 2023-07-04T11:58:14 | 2023-07-04T11:58:14 | 19,112,486 | 49 | 6 | MIT | 2023-02-14T16:09:19 | 2014-04-24T14:55:34 | Python | UTF-8 | Python | false | false | 909 | py | import unittest
import warnings
from vesper.signal.tests.byte_sequence_tests import ByteSequenceTests
from vesper.signal.s3_byte_sequence import S3ByteSequence
from vesper.tests.test_case import TestCase
REGION_NAME = 'us-east-2'
BUCKET_NAME = 'vesper-test'
OBJECT_KEY = 'Bytes 00-FF.dat'
OBJECT_LENGTH = 256
# TODO: Look into ResourceWarning issue mentioned below. Is it safe to
# ignore the warnings?
class S3ByteSequenceTests(TestCase, ByteSequenceTests):
@property
def sequence(self):
return S3ByteSequence(REGION_NAME, BUCKET_NAME, OBJECT_KEY)
def setUp(self):
# Without the following, the `S3ByteSequence` unit tests
# output a ResourceWarning about an unclosed transport to the
# console.
warnings.filterwarnings(
action="ignore", message="unclosed", category=ResourceWarning)
if __name__ == '__main__':
unittest.main()
| [
"harold.mills@gmail.com"
] | harold.mills@gmail.com |
2a2d1d8830e835a1494087e94fb849e401876cc4 | bf21cd0ef7a94fa106ccd9f91a4bbfdcda7f94ed | /python-basic/chapter04/ex01_2.py | 2b0d435813f0cc5b511a07e9e93529dd676c29ef | [] | no_license | juneglee/Deep_Learning | fdf8cae1b962aaa0ce557cb53f78a22b6d5ae1e8 | 17a448cf6a7c5b61b967dd78af3d328d63378205 | refs/heads/master | 2023-07-15T03:02:55.739619 | 2021-08-19T14:04:55 | 2021-08-19T14:04:55 | 273,253,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | # 리스트 연선자 : 연결(+) , 반복(*), len()
# 리스트 연산자
list_a = [1, 2, 3]
list_b = [4, 5, 6]
print("# 리스트")
print("list_a = ", list_a)
print("list_b = ", list_b)
print()
# 기본 연산자 : 연결(+) , 반복(*)
print("# 리스트 기본 연산자")
print("list_a + list_b =", list_a + list_b)
print("list_a * 3 =", list_a * 3)
print()
# 길이 구하기 : len()
print("# 길이 구하기")
print("len(list_a) = ", len(list_a))
| [
"klcpop1@gmail.com"
] | klcpop1@gmail.com |
ac59204e9abd021cf11fc1a250525dc4f0c141d8 | 2be1efe0bc2d4e0a3a98936ad203a1d7080b5136 | /internal software/test/test.py | c3bf5baf43199d9d1a38f7c0152a5c8b235c944e | [] | no_license | LC-Sat/internal-software | 06433bf58a27dca7613c01df3ae0419a18eb2542 | 2e74914be1b1fd3fccdffc60adc9b4cdf7247095 | refs/heads/main | 2023-08-13T13:15:25.307593 | 2021-10-16T13:51:46 | 2021-10-16T13:51:46 | 417,841,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | import unittest
import os
import sys
from unittest.mock import patch
import threading
mock_dir = os.path.join(os.path.dirname(__file__), "mock_libs")
assert(os.path.exists(mock_dir))
sys.path.insert(0, mock_dir)
from src import cansat
class FakeSat(threading.Thread):
def __init__(self, mode=cansat.MODE_DELAY):
self.mode = mode
super(FakeSat, self).__init__()
self.sat = cansat.Cansat(key="123456", mode=self.mode)
self.start()
def run(self):
self.sat.main()
def set_wifi(self, wifi):
cansat.WIFI = wifi
def wifi_switch():
try:
while 1:
print("Wifi switch :", cansat.WIFI)
input()
cansat.WIFI = not cansat.WIFI
except (InterruptedError, KeyboardInterrupt):
pass
if __name__ == "__main__":
th = FakeSat(mode=cansat.MODE_MANUAL)
| [
"noreply@github.com"
] | noreply@github.com |
e955c3e29e09ef198d1c10665adeac55d4a8f4db | 337b532bad7432f9719735f2e3cd305cff6264b3 | /sina/sina/spiders/sinaguider.py | c84f94835d5e7c6bbdddbd7909bf8d20a5056da1 | [] | no_license | Mr-chen-Linker/douban | 279e13fdb55f75f868bf4cf77f32af3c06f85f75 | bfaba89fe0d8f03db222db019e28de776ca3cb3d | refs/heads/master | 2020-03-18T14:18:45.907343 | 2018-05-25T10:36:33 | 2018-05-25T10:36:33 | 134,841,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,490 | py | # -*- coding: utf-8 -*-
import scrapy
import os
from ..items import SinaItem
class SinaguiderSpider(scrapy.Spider):
name = 'sinaguider'
allowed_domains = ['news.sina.com.cn']
start_urls = ['http://news.sina.com.cn/guide/']
def parse(self, response):
# 用于暂时保存item的列表
items = []
# 父类的标题列表
parentTitel = response.xpath("//div[@id='tab01']//h3/a/text()").extract()
# 父类的每个链接
parentUrls = response.xpath("//div[@id='tab01']//h3/a/@href").extract()
# 子类标题
subTitel = response.xpath("//div[@id='tab01']//ul/li/a/text()").extract()
# 子类url
subUrls = response.xpath("//div[@id='tab01']//ul/li/a/@href").extract()
# 遍历每个父类,处理url和标题,主要是为了按标题名当做文件夹保存帖子
for i in range(0, len(parentTitel)):
# 指定父类的目录及名称
parentfilename = r"D:/DATA/python/Scrapy_Spiser/douban/douban/data/" + parentTitel[i]
print(parentfilename)
# 如果文件不存在就创建该目录
if (not os.path.exists(parentfilename)):
os.makedirs(parentfilename)
# 遍历每个子类
for j in range(len(subTitel)):
item = SinaItem()
# 保存父类的标题和 url
item["parentTitel"] = parentTitel[i]
item["parentUrls"] = parentUrls[i]
# 首先判断子类是否属于父类,判断方法:子类的Url是不是以父类的url开头的
if_belong = subUrls[j].startswith(item["parentUrls"])
# 如果子类属于父类的
if if_belong:
subFilename = parentfilename + "/" + subTitel[j]
if (not os.path.exists(subFilename)):
os.makedirs(subFilename)
# 保存子类标题、url、文件路径及名称
item["subTitel"] = subTitel[j]
item["subUrls"] = subUrls[j]
item["subFilename"] = subFilename
items.append(item)
# 为了使父类的数据可以传递下去,所以在外面遍历items 发请求 # yield scrapy.Request(subUrls[j], callback=self.second_parse)
# 发送每个小类url的Request请求,得到Response连同包含meta数据 一同交给回调函数 second_parse 方法处理
for item in items:
yield scrapy.Request(item["subUrls"], meta={"meta_1": item}, callback=self.second_parse)
def second_parse(self, response):
'''处理请求子类链接所的到请求的'''
# 保存当初发送请求时传过来的item数据
meta_1 = response.meta["meta_1"]
# 得到子类中的 所有的文章链接
sonUrls = response.xpath('//a/@href').extract()
items = []
for i in range(0, len(sonUrls)):
# 去除无效链接,即开头须是父类的连接,结尾须是.shtml
if_belong = sonUrls[i].endswith(".shtml") and sonUrls[i].startswith(meta_1["parentUrls"])
if if_belong:
item = SinaItem()
item["parentTitel"] = meta_1["parentTitel"]
item["parentUrls"] = meta_1["parentUrls"]
item["subTitel"] = meta_1["subTitel"]
item["subUrls"] = meta_1["subUrls"]
item["subFilename"] = meta_1["subFilename"]
item["sonUrls"] = sonUrls[i]
items.append(item)
# 遍历,发送每个子类下面的每个链接的请求,并传递以上的item数据,交给content_parse函数处理响应
for item in items:
yield scrapy.Request(item["sonUrls"], meta={"meta_2": item}, callback=self.content_parse)
def content_parse(self, response):
'''获取每个帖子的标题和内容的方法'''
# 保存传递过来的元数据
item = response.meta["meta_2"]
# 文章的标题
title = response.xpath("//h1[@class='main-title']/text()")
content_list = response.xpath("//div[@class='article']/p/text()").extract()
content = ""
# 合并内容列表为一个字符串
for content_one in content_list:
content += content_one
item["title"] = title
item["content"] = content
yield item
| [
"39607153+Mr-chen-Linker@users.noreply.github.com"
] | 39607153+Mr-chen-Linker@users.noreply.github.com |
5af4b4a8df574aaa5ac607d9afdff9e1919791bb | 69528c8872d4b67fd5a3ee4206a48deae7a6113c | /HaploManager/find_misassemblies.py | 0f05d1814aa0e9f53d5fbc50f294286fa8159774 | [] | no_license | Firefly007-2018/scripts | 60692ad7dfc450a79edf18f0947d13519595764c | 686a3148169cfd2ef47ffe87709d93e2db6d73d5 | refs/heads/master | 2020-12-04T08:18:54.078319 | 2018-06-19T10:03:25 | 2018-06-19T10:03:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,156 | py | #!/usr/bin/python3 -u
import os
import sys
import pathlib
import argparse
import re
import string
import sqlite3 as sql
from pprint import pprint
from Bio import SeqIO
from collections import defaultdict, namedtuple
from operator import itemgetter
from termcolor import colored
Gap = namedtuple('Gap', 'scaffold start end')
Marker = namedtuple('Marker', 'maternal paternal intercross')
Gene = namedtuple('Gene', 'start end attributes')
Portion = namedtuple('Portion', 'scaffold start end strand length')
class MapPart:
def __init__(self, scaffold, start, end, chromosome, cm, parttype, comment=""):
self.scaffold = scaffold
self.start = int(start)
self.end = int(end)
self.chromosome = chromosome
self.cm = cm
self.parttype = parttype
self.comment = comment
@property
def length(self):
return self.end - self.start + 1
def __repr__(self):
return '{}:{:>8}-{:>8} {:>8}bp\t{}\t{}\t{}\t{}'.format(self.scaffold, self.start, self.end, self.length, self.chromosome, self.cm, self.parttype, self.comment)
class Links:
def __init__(self):
self.next_cm = -1
self.prev_cm = -1
class Correction:
def __init__(self, scaffold, start, end, breaktype, details=""):
self.scaffold = scaffold
self.start = int(start)
self.end = int(end)
self.breaktype = breaktype
self.details = details
def __repr__(self):
return '{}\t{}\t{}\t{}\t{}'.format(self.scaffold, self.start, self.end, self.breaktype, self.details)
def load_corrections(correctfile, scaffolds):
if not correctfile:
return []
corrections = defaultdict(lambda:defaultdict(Correction))
try:
with open(correctfile, 'r') as c:
for line in c:
mode, scaffold, start, end, breaktype, *args = line.rstrip().split('\t')
details = ""
if args:
details = args[0]
if end == 'End':
end = scaffolds[scaffold][max(scaffolds[scaffold])].end
corrections[scaffold][int(start)] = Correction(scaffold, start, end, breaktype, details)
return corrections
except IOError:
print("Can't open corrections file", correctfile)
sys.exit()
class AGP:
def __init__(self, scaffold, start, end, part, parttype):
self.scaffold = scaffold
self.start = int(start)
self.end = int(end)
self.part = part
self.parttype = parttype
def __repr__(self):
return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.scaffold, self.start, self.end, self.part, self.parttype, self.length)
@property
def length(self):
return self.end-self.start+1
def load_agp(agp):
scaffolds = defaultdict(lambda: defaultdict(AGP))
broken_scaffolds = {}
try:
with open(agp, 'r') as a:
for line in a:
scaffold, start, end, part, parttype, *args = line.rstrip().split('\t')
if ':' in args[-1]:
old_scaffold, oldstart, oldend, oldpart = args[-1].split(':')
if old_scaffold not in broken_scaffolds:
broken_scaffolds[old_scaffold] = {}
if scaffold not in broken_scaffolds[old_scaffold]:
broken_scaffolds[old_scaffold][scaffold] = 0
broken_scaffolds[old_scaffold][scaffold] += 1
scaffolds[scaffold][int(start)] = AGP(scaffold, start, end, part, parttype)
return scaffolds, broken_scaffolds
except IOError:
print("Can't open AGP file", agp)
sys.exit()
Haplotype = namedtuple("Haplotype", "hapname hapstart hapend hapstrand name start end strand")
class Merge:
def __init__(self, scaffold, start, end, new, newstart, newend, strand, parttype, hap=None):
self.scaffold = scaffold
self.start = int(start)
self.end = int(end)
self.new = new
self.newstart = int(newstart)
self.newend = int(newend)
self.strand = strand
self.parttype = parttype
self.hap = None
if hap:
self.hap = Haplotype(hap[0], hap[1], hap[2], hap[3], None, None, None, None)
def __repr__(self):
out = '{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}'.format(self.scaffold, self.start, self.end, self.new, self.newstart, self.newend, self.strand, self.parttype)
if self.hap:
out += '\t{}\t{}\t{}\t{}'.format(self.hap.hapname, self.hap.hapstart, self.hap.hapend, self.hap.hapstrand)
return out
def load_merged(merged):
merged_scaffolds = defaultdict(lambda: defaultdict(Merge))
try:
with open(merged, 'r') as m:
for line in m:
scaffold, start, end, new, newstart, newend, strand, parttype, *args = line.rstrip().split('\t')
merged_scaffolds[scaffold][int(start)] = Merge(scaffold, start, end, new, newstart, newend, strand, parttype, args)
return merged_scaffolds
except IOError:
print("Can't open merged TSV file", merged)
sys.exit()
def load_gff(gff):
genes = defaultdict(list)
if not gff:
return genes
try:
with open(gff) as g:
for line in g:
if line.startswith('#'):
continue
f = line.rstrip().split('\t')
scaffold, source, featuretype, start, end, score, strand, phase, attributes = f
if featuretype == 'gene':
genes[scaffold].append(Gene(int(start), int(end), attributes))
except IOError:
print("Failed to load GFF file {}".format(gff))
return genes
def open_input_database(database):
try:
if not os.path.exists(database):
raise IOError
conn = sql.connect(database)
cursor = conn.cursor()
return conn, cursor
except IOError:
print("Can't open database {}".format(database))
sys.exit(1)
except sql.Error:
print("SQL error")
sys.exit(1)
def load_linkage_map(database):
conn_in, ci = open_input_database(database)
genome = {}
linkage_map = {}
for chromosome, cm, scaffold, start, end, length, parttype, comment in ci.execute('select * from scaffold_map order by scaffold, start'):
if not scaffold in genome:
genome[scaffold] = []
genome[scaffold].append(MapPart(scaffold, start, end, chromosome, cm, parttype, comment))
if not chromosome in linkage_map:
linkage_map[chromosome] = {}
if cm != -1 and not cm in linkage_map[chromosome]:
linkage_map[chromosome][cm] = Links()
return genome, linkage_map
def merge_broken(broken_scaffolds, genome, corrections, scaffolds):
broken = 0
for old in sorted(broken_scaffolds):
if len(broken_scaffolds[old]) > 1:
broken += 1
carryon = False
for new in broken_scaffolds[old]:
if new in genome:
carryon = True
if not carryon:
return
print(old)
for new in sorted(broken_scaffolds[old]):
print("\t", new)
if new in genome:
for p in genome[new]:
print("\t\t", p)
if new in corrections and p.start in corrections[new]:
print(colored("\t\t\t" + repr(corrections[new][p.start]), 'red', attrs=['bold']))
else:
print("\t\tMissing!")
for start in sorted(scaffolds[new]):
print(colored("\t\t\t" + repr(scaffolds[new][start]), 'blue', attrs=['bold']))
for new in sorted(broken_scaffolds[old]):
if new in corrections:
for start in corrections[new]:
print("\t\t", corrections[new][start])
def set_up_links(linkage_map):
for chromosome in linkage_map:
cms = sorted(linkage_map[chromosome].keys())
for i, cm in enumerate(cms):
if i > 0:
linkage_map[chromosome][cm].prev_cm = cms[i-1]
if i < len(cms)-1:
linkage_map[chromosome][cm].next_cm = cms[i+1]
class Part:
def __init__(self, scaffold, start, end, active, passive):
self.scaffold = scaffold
self.start = start
self.end = end
self.active = active
self.passive = passive
def __repr__(self):
active = "{}\t{}\t{}\t{}\t{}".format(self.active.scaffold, self.active.start, self.active.end, self.active.strand, self.active.length)
passive = "{}\t{}\t{}\t{}\t{}".format(self.passive.scaffold, self.passive.start, self.passive.end, self.passive.strand, self.passive.length)
return "{}\t{}\t{}\t{}\t{}".format(self.scaffold, self.start, self.end, active, passive)
def load_hm_new_scaffolds(pacbio):
new_scaffolds = defaultdict(list)
if not pacbio:
return new_scaffolds
try:
with open(pacbio, 'r') as pb:
curid = -1
scfnum = 0
part_start = 1
for line in pb:
if line.startswith(("#","\n")):
continue
f = line.rstrip().split('\t')
if curid == -1:
curid = f[0]
if f[0] != curid:
scfnum += 1
curid = f[0]
part_start = 1
active_portion = f[-2] if f[-2] != '0' else f[4]
if active_portion not in ['1', '2']:
print("Invalid active portion!")
print(line)
portion1 = Portion(f[5], int(f[8]), int(f[9]), int(f[10]), int(f[11]))
portion2 = Portion(f[12], int(f[15]), int(f[16]), int(f[17]), int(f[18]))
if active_portion == '1':
active, passive = portion1, portion2
else:
active, passive = portion2, portion1
part_end = part_start + active.length - 1
new_scaffolds[scfnum].append(Part(scfnum, part_start, part_end, active, passive))
part_start = part_end + 1
return new_scaffolds
except IOError:
print("Cannot open PacBio new scaffolds file!", pacbio)
sys.exit()
def get_linkage_trio(parts, i):
trio = []
part = 1
while len(trio) < 3 and i+part <= len(parts)-1:
if parts[i+part].cm != -1 and (not trio or parts[i+part].cm != trio[-1][1]):
trio.append((parts[i+part].chromosome,parts[i+part].cm))
part += 1
if len(trio) < 3:
trio = []
return trio
def find_linkage_errors(scaffold, genome, linkage_map):
last_trio = []
for i in range(0, len(genome[scaffold])-2):
trio = get_linkage_trio(genome[scaffold], i)
if not trio:
return
if trio[0] == trio[2] and trio[0] != trio[1]:
if trio == last_trio:
continue
print("Linkage error:", scaffold, trio)
last_trio = trio
return
def find_scaffold_misassemblies(scaffold, genome, linkage_map, corrections):
misparts = defaultdict(lambda: defaultdict(MapPart))
misgroups = []
i = 0
while i < len(genome[scaffold])-1:
pi = genome[scaffold][i]
if pi.chromosome == 0:
i += 1
continue
j = i + 1
parts = [pi]
while j < len(genome[scaffold]):
pj = genome[scaffold][j]
parts.append(pj)
if pj.chromosome != 0 and pj.cm != -1:
misassembly = False
if pi.chromosome != pj.chromosome:
misassembly = True
elif pi.cm != -1 and pj.cm != -1 and pi.cm != pj.cm:
lmi = linkage_map[pi.chromosome][pi.cm]
if lmi.prev_cm != pj.cm and lmi.next_cm != pj.cm:
misassembly = True
if misassembly:
if (scaffold not in corrections or (scaffold in corrections and
parts[1].start in corrections[scaffold] and
(corrections[scaffold][parts[1].start].breaktype in ['?', 'L'] or
corrections[scaffold][parts[1].start].breaktype == 'R' and ',' in corrections[scaffold][parts[1].start].details))):
misassembled = True
misgroups.append(parts)
for p in parts:
misparts[p.scaffold][p.start] = p
break
j += 1
i += 1
return misparts, misgroups
def output_scaffold(parts, misparts):
for p in parts:
if p.scaffold in misparts and p.start in misparts[p.scaffold]:
print(colored(p, 'red', attrs=['bold']))
else:
print(p)
def output_agp_parts(gap, scaffolds):
agp_parts = []
print("M\t{}\t{}\t{}\t".format(gap.scaffold, gap.start, gap.end))
for start in sorted(scaffolds[gap.scaffold]):
if start >= gap.start and scaffolds[gap.scaffold][start].end <= gap.end:
print(scaffolds[gap.scaffold][start])
agp_parts.append(scaffolds[gap.scaffold][start])
return agp_parts
def output_merged_parts(gap, merged):
for start in sorted(merged[gap.scaffold]):
if gap.start <= start <= gap.end or gap.start <= merged[gap.scaffold][start].end <= gap.end:
print(colored(merged[gap.scaffold][start], 'blue', attrs=['bold']))
else:
print(merged[gap.scaffold][start])
def output_haplotypes(gap, merged, misparts):
hapdict = {}
haps = []
for start in sorted(misparts[gap.scaffold]):
scaffold, s1,e1, s2,e2, start, end, strand = misparts[gap.scaffold][start].comment.split('\t')
if scaffold in merged:
for start in sorted(merged[scaffold]):
if merged[scaffold][start].hap:
hap = repr(merged[scaffold][start])
if hap in hapdict:
continue
hapdict[hap] = 1
haps.append(hap)
for hap in haps:
print(hap)
def output_genes(gap, genes):
gene_found = False
if gap.scaffold not in genes:
return
for g in genes[gap.scaffold]:
if gap.start <= g.start <= gap.end or gap.start <= g.end <= gap.end:
print("Gene", g.start, g.end, g.attributes)
gene_found = True
if not gene_found:
print("No genes")
def get_intercross(maternal, paternal):
intercross = ""
paternal = ''.join([c for c in paternal if c in ['A', 'B', 'H']]) # Strip colors
for i in range(len(maternal)):
if maternal[i] == paternal[i]:
intercross += paternal[i]
else:
intercross += 'H'
return intercross
def load_chromosome_map(ci):
markers = defaultdict(lambda: defaultdict(namedtuple))
for chromosome, maternal, cm, paternal in ci.execute('select distinct chromosome, print, cm, clean from chromosome_map order by chromosome, cm'):
intercross = get_intercross(maternal, paternal)
markers[chromosome][cm]=Marker(maternal, paternal, intercross)
if -1 not in markers[chromosome]:
empty = " " * len(maternal)
markers[chromosome][-1]=Marker(maternal, empty, empty)
return markers
def print_marker(part, markers):
marker = markers[part.chromosome][part.cm]
print("{}\t{}\t{}\tMaternal".format(part.chromosome, part.cm, marker.maternal))
print("{}\t{}\t{}\tPaternal".format(part.chromosome, part.cm, marker.paternal))
print("{}\t{}\t{}\tIntercross".format(part.chromosome, part.cm, marker.intercross))
print()
def output_markers(group, gap, markers, agp_parts, ci):
if not ci:
return
top_marker = markers[group[0].chromosome][group[0].cm]
bottom_marker = markers[group[-1].chromosome][group[-1].cm]
print_marker(group[0], markers)
print_marker(group[-1], markers)
statement = 'select position, pattern, consensus, marker_type, parent_gt, parent_gqs, parent_dps, mq, fs from markers where scaffold=\"{}\" and position>={} and position<={} order by position'.format(
gap.scaffold, gap.start-1, gap.end+1)
current_position = 0
for position, pattern, consensus, markertype, parent_gt, parent_gqs, parent_dps, mq, fq in ci.execute(statement):
for part in agp_parts:
if part.start > current_position and part.end < position:
print()
print(part)
print_marker(group[0], markers)
print_marker(group[-1], markers)
current_position = position
print('\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}'.format(position, pattern, consensus, markertype, parent_gt, parent_gqs, parent_dps, mq, fq))
print_marker(group[0], markers)
print_marker(group[-1], markers)
def output_new_scaffolds(g, new_scaffolds):
if 'x' in g.scaffold:
return
scaffold_num = -1
name_match = re.compile(r'(.+)Sc(\d+)').match(g.scaffold)
if name_match:
scaffold_num = int(name_match.group(2))
misparts = []
if scaffold_num in new_scaffolds:
for part in new_scaffolds[scaffold_num]:
if g.start <= part.start <= g.end or g.start <= part.end <= g.end:
misparts.append(part)
i = 0
while i < len(misparts)-1:
if i <= len(misparts)-2:
j = i+1
if misparts[i].active.scaffold != misparts[j].active.scaffold and misparts[i].passive.scaffold == misparts[j].passive.scaffold:
print(misparts[i])
print(misparts[j])
breakpoint = misparts[j].passive.end if misparts[j].passive.strand == -1 else misparts[j].passive.start+1
print("P\t{}\t{}\t{}".format(misparts[j].passive.scaffold, 'B', breakpoint))
if i <= len(misparts)-3:
k = i+2
if misparts[i].passive.scaffold == misparts[j].active.scaffold == misparts[k].passive.scaffold:
print(misparts[i])
print(misparts[j])
print(misparts[k])
print("P\t{}\t{}\t{}-{}".format(misparts[j].active.scaffold, 'R', misparts[j].active.start+1, misparts[j].active.end))
i += 1
def find_misassemblies(genome, linkage_map, scaffolds, merged, genes, corrections, snps, new_scaffolds):
if snps:
snpdb, snpio = open_input_database(snps)
markers = load_chromosome_map(snpio)
else:
snpio = None
markers = []
for scaffold in sorted(genome):
find_linkage_errors(scaffold, genome, linkage_map)
misparts, misgroups = find_scaffold_misassemblies(scaffold, genome, linkage_map, corrections)
if misgroups:
output_scaffold(genome[scaffold], misparts)
for group in misgroups:
if group[1].start == group[-2].end + 1:
g = Gap(scaffold, group[0].start, group[1].end)
else:
g = Gap(scaffold, group[0].start, group[-1].end)
agp_parts = output_agp_parts(g, scaffolds)
output_merged_parts(g, merged)
output_haplotypes(g, merged, misparts)
output_genes(g, genes)
output_markers(group, g, markers, agp_parts, snpio)
output_new_scaffolds(g, new_scaffolds)
print("-------")
def get_args():
parser = argparse.ArgumentParser(description='''Output new database containing old linkage information on new HaploMerger output
-d database
-s snps
-a AGP
-m merged
-g gff
-c corrections
-p pacbio_new_scaffolds
''')
parser.add_argument('-d', '--database', type=str, required=True)
parser.add_argument('-a', '--agp', type=str, required=True)
parser.add_argument('-m', '--merged', type=str, required=True)
parser.add_argument('-c', '--corrections', type=str, required=False)
parser.add_argument('-g', '--gff', type=str, required=False)
parser.add_argument('-s', '--snps', type=str, required=False)
parser.add_argument('-p', '--pacbio', type=str, required=False)
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
scaffolds, broken_scaffolds = load_agp(args.agp)
corrections = load_corrections(args.corrections, scaffolds)
genes = load_gff(args.gff)
merged = load_merged(args.merged)
genome, linkage_map = load_linkage_map(args.database)
merge_broken(broken_scaffolds, genome, corrections, scaffolds)
set_up_links(linkage_map)
new_scaffolds = load_hm_new_scaffolds(args.pacbio)
find_misassemblies(genome, linkage_map, scaffolds, merged, genes, corrections, args.snps, new_scaffolds) | [
"johnomics@gmail.com"
] | johnomics@gmail.com |
d572a7d14711cfe9547737215020f5f428c3d827 | 2e359b259d3071a97930a1944cb6b7ff336fc369 | /account/views.py | 5b89b2f62c061a1f92ec28b30b5bca035fce1d52 | [] | no_license | skalizzo/Django_Social_Media_Project | bb4171ad3e7b1968361301d0c87cf4c0ac8e9f4e | 18d3695cadb96fc54baeededa8e965e001d7f583 | refs/heads/master | 2023-02-20T18:15:19.199821 | 2021-01-27T20:19:59 | 2021-01-27T20:19:59 | 333,534,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,131 | py | from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm
from django.contrib.auth.decorators import login_required
from .models import Profile
from django.contrib import messages
def user_login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(request,
username=cd['username'],
password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('Authenticated ' \
'successfully')
else:
return HttpResponse('Disabled account')
else:
return HttpResponse('Invalid login')
else:
form = LoginForm()
return render(request, 'account/login.html', {'form': form})
@login_required
def dashboard(request):
return render(request,
'account/dashboard.html',
{'section': 'dashboard'})
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data['password'])
# Save the User object
new_user.save()
# Create the user profile
Profile.objects.create(user=new_user)
return render(request,
'account/register_done.html',
{'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request,
'account/register.html',
{'user_form': user_form})
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user,
data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Profile updated successfully')
else:
messages.error(request, 'Error updating your profile')
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(
instance=request.user.profile)
return render(request,
'account/edit.html',
{'user_form': user_form,
'profile_form': profile_form})
| [
"christian00richter@gmail.com"
] | christian00richter@gmail.com |
4d4579bfc8296b07953246830efeaecae7e6e0ef | c5d15be7f68e8120c7c710b090df21d6f50bf48e | /First Partial/Ejer/Ej2_A01630738.py | 117270a27a76bf54dce99c99ec1610b492222ad1 | [] | no_license | arath11/Fundamentos_programacion_PYTHON | 13f2fa15f3f854ab880ab463f865c656c02384a0 | 4f426f4e29a1d101005e0a038290f9a2e98b7306 | refs/heads/master | 2020-04-23T22:24:28.149248 | 2019-02-19T16:18:52 | 2019-02-19T16:18:52 | 171,500,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | #Julio Arath Rosales Oliden
#A01630738
import math
#Se piden los datos
lado_l=float(input("Inserta la medida del lado L \n"))
lado_h=float(input("Inserta la altura del prisma \n"))
#Se saca el area, sumando las areas de los triangulos y los rectangulos
area_base=(math.sqrt(3)/4)*(lado_l**2)
area_lado=(lado_h*lado_l)
area_todo=(area_base*2)+(area_lado*3)
#Se imprime el area
print("El area de todo el prisma triangular regular es " + str(area_todo) + " cm.")
#Aprendi a imprimir de diferente manera, lo que creo que es mas util a solo comas
#http://www.universoformulas.com/matematicas/geometria/area-prisma-triangular/
| [
"noreply@github.com"
] | noreply@github.com |
6aa411c0ab67854ce1c59337301b2e5b1d25c752 | c918d30d0d026a35acae0253cc4c545cdea90e8e | /python_exercise_classification.py | ac58aeacc654208e33eef82a6d9d99c6d37df4b4 | [] | no_license | zhangwen464/pytorch_studying | d1f397bb29cd2c2af55b1f64cdb37e9ecc668315 | e56566e643222f74b86a212970143c68f0ae2b49 | refs/heads/master | 2020-03-30T00:18:08.446860 | 2018-09-27T02:14:27 | 2018-09-27T02:14:27 | 150,515,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | # author :momo
#time :2018.09.20
# classification network
#prepare training data
import torch
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch.autograd import Variable
n_data=torch.ones(100,2) #数据基本形态
x0=torch.normal(2*n_data,1) #类型0 x data(tensor),shape=(100,2)
y0=torch.zeros(100) #类型0 y data(tensor),shape=(100,1)
x1=torch.normal(-2*n_data,1) #类型1 x data(tensor),shape=(100,2)
y1=torch.ones(100) #类型1 y data(tensor),shape=(100,1)
#注意x,y数据的数据形式是一定要像下面一样
x=torch.cat((x0,x1),0).type(torch.FloatTensor)
y=torch.cat((y0,y1),).type(torch.LongTensor)
#torch只能在variable上训练
x,y=Variable(x),Variable(y)
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
plt.show()
#plt.scatter(x.data.numpy(),y.data.numpy())
#plt.show()
class Net(torch.nn.Module):
def __init__(self,n_feature,n_hidden,n_output):
super(Net,self).__init__()
self.hidden=torch.nn.Linear(n_feature,n_hidden)
self.out = torch.nn.Linear(n_hidden, n_output)
def forward(self,x):
#正向传播输入址,网络分析输出值
x = F.relu(self.hidden(x)) #激励函数(隐藏层的线性值)
x = self.out(x) # 输出值,但是这个不是预测值,预测值还需要再另外计算
return x
net=Net(n_feature=2,n_hidden=10,n_output=2) #n_feature对应输入数据x的维数,输出值对应种类数
# print (net)
optimizer=torch.optim.SGD(net.parameters(),lr=0.02) #传入net的所有参数,学习率设置
#算误差的时候,注意真实值不是one-hot形式的,而是1D tensor,(batch,1)
#但是预测值是2D tensor(batch, n_classes)
loss_func=torch.nn.CrossEntropyLoss()
plt.ion() # 画图
plt.show()
for t in range(100):
out=net(x) #喂给net训练数据x,输出分析值
loss=loss_func(out,y)
optimizer.zero_grad() # 清空上一步的残余更新参数值
loss.backward() # 误差反向传播, 计算参数更新值
optimizer.step() # 将参数更新值施加到 net 的 parameters 上
if t%2==0:
plt.cla()
#再加上softmax的激励函数后的最大概率才是预测值
prediction = torch.max(F.softmax(out),1)[1] #取第一维度最大值,并返回索引值 ,0或者1
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy() #参考标签值 y
plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=pred_y,s=100,lw=0,cmap='RdYlGn')
accuracy=sum(pred_y==target_y)/200
plt.text(1.5,-4,'Accuracy=%.2f' % accuracy,fontdict={'size':20,'color':'red'})
plt.pause(0.2)
plt.ioff()
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
a19d147dc3ac0dc1389f80f703d9bfdb9880730f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/281/66059/submittedfiles/testes.py | 4f5702282440b083bcb9303e18bb7339f798841e | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
print("Hello Word")
print("olá Mundo") | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
39a8bcf785f7ae33a4d0d62d9cb566110b1b5440 | a0fc566d29d8429f9ecc765e59d8d3718f6b60a1 | /Python Core/2-6.py | 92840063b942019d67d2fe87ea25b90193049d7d | [] | no_license | muyicui/pylearn | 0f3ca863cf2629ea246271f81ea2f43a9cbd4dff | 1130bc57f629c31994709ba6bfe176ab220f30c1 | refs/heads/master | 2021-01-10T06:49:48.457891 | 2019-03-15T02:26:20 | 2019-03-15T02:26:20 | 54,449,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | #!/usr/bin/env python
num = int(raw_input('please enter a number:'))
if num > 0:
print '%d is positive' % (num)
elif num == 0:
print '%d is zero' % (num)
else:
print '%d is negative' % (num)
| [
"muyi.cui@outlook.com"
] | muyi.cui@outlook.com |
95dda09cdcc2204cf56cce764f5d795178e6963d | d2f3de8bd68260f4e333be3b812a627f7f2ec10a | /course-65389-DL2020advanced/lesson-08-step-02-pytorch-autograd.py | e98504e22899d5c0e48d9b2e93dffe2f3c206c3e | [] | no_license | sidorkinandrew/stepik | 2b915e1a959d98fd338372ba88d231323a905da5 | 3513846036df036d3ffdea15ad0d7a856a3ec77f | refs/heads/master | 2021-11-23T02:09:18.265495 | 2021-11-20T23:24:36 | 2021-11-20T23:24:36 | 245,150,396 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | from matplotlib import pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
import torch
def find_x_derivative(x, y):
x = torch.tensor([x], dtype=float, requires_grad=True)
y = torch.tensor([y], dtype=float)
z = torch.sin(torch.tan(x)*(x*x/y) + torch.log( torch.exp(-x*x+3) + x*x*x*y) ) * torch.tan( x*x*(torch.exp(x**9)))
z.backward()
return x.grad
find_x_derivative(1, 21) | [
"sidorkina.oksana.19@gmail.com"
] | sidorkina.oksana.19@gmail.com |
31ed4f5f60a390cfa304795d62ffcae162a4cedf | 5eb953194b0dc0d95c9f557a6cc32ab70bf18a26 | /calc2.py | ef03a6646529d3580b98c7a2a0051289aec41202 | [
"MIT"
] | permissive | voodoopeople42/Vproject | 786543460ebe298b0881dbec7b26db98699ee1d7 | 349e80a0d3cfd590cb9dbe667acfbdb7393308e3 | refs/heads/master | 2021-06-25T04:55:23.694415 | 2019-11-29T12:05:14 | 2019-11-29T12:05:14 | 217,142,513 | 0 | 0 | MIT | 2021-03-20T02:23:28 | 2019-10-23T19:58:37 | Python | UTF-8 | Python | false | false | 404 | py | import math
what = input ( "what u want? (cos, sin, log, sqrt): " )
a = int(input("Input a number: "))
if what == "cos":
a = math.cos (a)
print ("result: " + str(a))
elif what == "sin":
a = math.sin (a)
print ("result: " + str(a))
elif what == "log":
a = math.log (a)
print("result: " + str(a))
elif what == "sqrt":
c = math.sqrt (a)
print("result: " + str(a))
print("Thx for using myCalc" ) | [
"s1kwood2033@gmail.com"
] | s1kwood2033@gmail.com |
d342732ac3b6fe72d50a5b8e94fc6365d7766d2f | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20200301/route_table.py | 0f7b2c604bdb6750a43a91e17b7ebf103a368267 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,631 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RouteTableInitArgs', 'RouteTable']
@pulumi.input_type
class RouteTableInitArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input['RouteArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a RouteTable resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[bool] disable_bgp_route_propagation: Whether to disable the routes learned by BGP on that route table. True means disable.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] route_table_name: The name of the route table.
:param pulumi.Input[Sequence[pulumi.Input['RouteArgs']]] routes: Collection of routes contained within a route table.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if disable_bgp_route_propagation is not None:
pulumi.set(__self__, "disable_bgp_route_propagation", disable_bgp_route_propagation)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if route_table_name is not None:
pulumi.set(__self__, "route_table_name", route_table_name)
if routes is not None:
pulumi.set(__self__, "routes", routes)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="disableBgpRoutePropagation")
def disable_bgp_route_propagation(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to disable the routes learned by BGP on that route table. True means disable.
"""
return pulumi.get(self, "disable_bgp_route_propagation")
@disable_bgp_route_propagation.setter
def disable_bgp_route_propagation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_bgp_route_propagation", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="routeTableName")
def route_table_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the route table.
"""
return pulumi.get(self, "route_table_name")
@route_table_name.setter
def route_table_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_table_name", value)
@property
@pulumi.getter
def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteArgs']]]]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@routes.setter
def routes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouteArgs']]]]):
pulumi.set(self, "routes", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class RouteTable(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Route table resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] disable_bgp_route_propagation: Whether to disable the routes learned by BGP on that route table. True means disable.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_table_name: The name of the route table.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]] routes: Collection of routes contained within a route table.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteTableInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Route table resource.
:param str resource_name: The name of the resource.
:param RouteTableInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteTableInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteTableInitArgs.__new__(RouteTableInitArgs)
__props__.__dict__["disable_bgp_route_propagation"] = disable_bgp_route_propagation
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_table_name"] = route_table_name
__props__.__dict__["routes"] = routes
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["subnets"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:network:RouteTable"), pulumi.Alias(type_="azure-native:network/v20150501preview:RouteTable"), pulumi.Alias(type_="azure-native:network/v20150615:RouteTable"), pulumi.Alias(type_="azure-native:network/v20160330:RouteTable"), pulumi.Alias(type_="azure-native:network/v20160601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20160901:RouteTable"), pulumi.Alias(type_="azure-native:network/v20161201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170301:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170901:RouteTable"), pulumi.Alias(type_="azure-native:network/v20171001:RouteTable"), pulumi.Alias(type_="azure-native:network/v20171101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180401:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180701:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20181001:RouteTable"), pulumi.Alias(type_="azure-native:network/v20181101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20181201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190401:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190701:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190901:RouteTable"), pulumi.Alias(type_="azure-native:network/v20191101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20191201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200401:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200501:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200701:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20201101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20210201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20210301:RouteTable"), pulumi.Alias(type_="azure-native:network/v20210501:RouteTable")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteTable, __self__).__init__(
'azure-native:network/v20200301:RouteTable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteTable':
"""
Get an existing RouteTable resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RouteTableInitArgs.__new__(RouteTableInitArgs)
__props__.__dict__["disable_bgp_route_propagation"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["routes"] = None
__props__.__dict__["subnets"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return RouteTable(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="disableBgpRoutePropagation")
def disable_bgp_route_propagation(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to disable the routes learned by BGP on that route table. True means disable.
"""
return pulumi.get(self, "disable_bgp_route_propagation")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the route table resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def routes(self) -> pulumi.Output[Optional[Sequence['outputs.RouteResponse']]]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@property
@pulumi.getter
def subnets(self) -> pulumi.Output[Sequence['outputs.SubnetResponse']]:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | noreply@github.com |
566ddef51d09647542ab85a39035216d08bc0846 | 7b2e267d7f7dccce8d57c37dba772d778923d05d | /pendant_injection.py | f7e1e2bc9ee08a3cab52ee99b1ce6d1bdcd7d7ca | [] | no_license | Posfay/Zero-Forcing | 88ebe7706104b513ccc7064bfcab71c586226fbd | b18e1c97e13c46e70d4379c74d1e5933d8f4cf5a | refs/heads/master | 2023-04-17T04:21:06.437817 | 2021-04-17T16:11:13 | 2021-04-17T16:11:13 | 286,028,300 | 1 | 1 | null | 2020-10-07T21:34:49 | 2020-08-08T11:14:53 | Python | UTF-8 | Python | false | false | 6,588 | py | import os
import datetime
import networkx as nx
import graph_utils
import zero_forcing_process as zf
def inject_pendant_to_edge(edge_list, edge):
"""
Creating a new graph by injecting a pendant to edge
:param edge_list:list(list())
:param edge: list(int) (node1, node2)
:return: Graph
"""
orig_n = int(len(edge_list) * 2 / 3)
# Creating a pendant
pendant_gr = nx.complete_graph(range(orig_n, orig_n + 4))
pendant_gr.remove_edge(orig_n, orig_n + 1)
new_pendant_edges = [(orig_n, orig_n + 4), (orig_n + 4, orig_n + 1)]
original_pendant_edges = [e for e in pendant_gr.edges]
pendant_edges = new_pendant_edges + original_pendant_edges
# Injecting pendant to graph at edge
original_graph_edges = edge_list.copy()
original_graph_edges.remove(edge)
original_graph_edges = original_graph_edges + [(edge[0], orig_n + 5), (edge[1], orig_n + 5)]
connecting_edge = [(orig_n + 4, orig_n + 5)]
new_edges = original_graph_edges + pendant_edges + connecting_edge
# Creating the nx graph
new_graph = nx.Graph()
new_graph.add_nodes_from(range(orig_n + 6))
new_graph.add_edges_from(new_edges)
return new_graph
def inject_pendants(edge_list):
"""
Creating every new graph by injecting a pendant to every edge
:param edge_list: list(list(int))
:return: list(Graph)
"""
new_graphs = []
c = 0
o = len(edge_list)
for edge in edge_list:
new_graph = inject_pendant_to_edge(edge_list, edge)
isomorph = False
for g in new_graphs:
if nx.is_isomorphic(g, new_graph):
isomorph = True
break
if not isomorph:
new_graphs.append(new_graph)
c += 1
print(f"Finished pendant injection, created {c}/{o} graphs")
return new_graphs
def simulate_zf(graphs, n, zfn, origin_graph_path, results_core_path):
"""
Simulates zero forcing on the resulting graphs of pendant injection and saves the graphs
:param graphs: list(Graph)
:param n: int
:param zfn: int
:param origin_graph_path: str
:param results_core_path: str
"""
dir_path = f"{origin_graph_path[:-4]}"
final_path = f"{results_core_path}\\{n}\\{dir_path}"
d = 0
o = len(graphs)
max_zfn = 0
orig_n = n - 6
for graph in graphs:
t1 = datetime.datetime.now()
zf_number, init_black_nodes_successful = zf.simulate_zero_forcing_on_graph(graph)
connecting_node = n - 1
neighbors = [n for n in graph.neighbors(connecting_node)]
neighbors.remove(max(neighbors))
pendant_edge = tuple(neighbors)
graph_utils.write_pendant_graph_to_file(graph, zf_number, init_black_nodes_successful, pendant_edge, final_path)
if zf_number > max_zfn:
max_zfn = zf_number
orig_zf_ratio = zfn / orig_n
current_zf_ratio = zf_number / n
if current_zf_ratio >= orig_zf_ratio:
save_stats(results_core_path, new_graphs_reached_ratio=1)
save_stats(results_core_path, new_graphs_generated=1)
d += 1
t2 = datetime.datetime.now()
print(f"{graph_utils.timestamp()} {d}/{o}."
f" (n={n}, zf={zf_number}, ratio={round(zf_number / n, 2)})"
f" done in {graph_utils.time_diff(t1, t2)}")
orig_zf_ratio = zfn / orig_n
max_zf_ratio = max_zfn / n
if max_zf_ratio >= orig_zf_ratio:
save_reached_ratio(results_core_path, origin_graph_path, orig_zf_ratio, max_zf_ratio)
save_stats(results_core_path, original_graphs_reached_ratio=1)
save_stats(results_core_path, original_graphs_processed=1)
save_processed_graph(results_core_path, origin_graph_path)
def save_stats(results_core_path, original_graphs_processed=0,
new_graphs_generated=0, original_graphs_reached_ratio=0, new_graphs_reached_ratio=0):
"""
Saves statistics about the graph generation
:param results_core_path: str
:param original_graphs_processed: int - Number of graphs that were inputted for pendant injection
:param new_graphs_generated: int - Number of graphs generated from pendant injection
:param original_graphs_reached_ratio: int - Number of original graphs that produced at least one graph which
reached the original one's zero forcing ratio
:param new_graphs_reached_ratio: int - Number of new graphs that reached their respective original graph's zf ratio
"""
stats_path = f"{results_core_path}\\stats.txt"
if os.path.exists(stats_path):
stats_file = open(stats_path, "r")
original_graphs_processed += int(stats_file.readline())
new_graphs_generated += int(stats_file.readline())
original_graphs_reached_ratio += int(stats_file.readline())
new_graphs_reached_ratio += int(stats_file.readline())
stats_file.close()
stats_file = open(stats_path, "w")
stats_file.write(f"{str(original_graphs_processed)}\n")
stats_file.write(f"{str(new_graphs_generated)}\n")
stats_file.write(f"{str(original_graphs_reached_ratio)}\n")
stats_file.write(f"{str(new_graphs_reached_ratio)}\n")
stats_file.close()
def save_processed_graph(results_core_path, original_graph_name):
"""
Saves original graphs that have already been processed
:param results_core_path: str
:param original_graph_name: str
"""
processed_dir_path = f"{results_core_path}\\Processed Graphs"
try:
os.makedirs(processed_dir_path)
except:
pass
processed_graph_path = f"{processed_dir_path}\\{original_graph_name}"
processed_graph_file = open(processed_graph_path, "w")
processed_graph_file.write(" ")
processed_graph_file.close()
def save_reached_ratio(results_core_path, reached_graph_name, orig_zf_ratio, new_zf_ratio):
"""
Saves graphs that reached their respective original graph's zf ratio
:param results_core_path: str
:param reached_graph_name: str
:param orig_zf_ratio: float
:param new_zf_ratio: float
"""
reached_dir_path = f"{results_core_path}\\Graphs Reached Original Ratio"
try:
os.makedirs(reached_dir_path)
except:
pass
new_graph_reached_path = f"{reached_dir_path}\\{reached_graph_name[:-4]} - " \
f"{str(round(new_zf_ratio, 3))}_new_{str(round(orig_zf_ratio, 3))}_orig.txt"
new_graph_reached_file = open(new_graph_reached_path, "w")
new_graph_reached_file.write(" ")
new_graph_reached_file.close()
| [
"benedek@posfay.com"
] | benedek@posfay.com |
74bc608d4b97f1b7c0df621bcabed6cd361d7dbc | dd0b0df88a08a4f4ab249c76cf0ea82482ff37bb | /sfepy/terms/terms.py | 41157ad7b967a814fae63a5692dccc27c5c414df | [
"BSD-3-Clause"
] | permissive | mfkiwl/sfepy | 43e3a2cbed240b8ef387a8ab9037c1f6fd19e0fe | 67275845da49e772b2f8faaa48df165893f2be16 | refs/heads/master | 2021-01-18T07:59:19.289053 | 2013-10-31T09:54:55 | 2013-10-31T09:54:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,287 | py | import re
from copy import copy
import numpy as nm
from sfepy.base.base import (as_float_or_complex, get_default, assert_,
Container, Struct, basestr, goptions)
from sfepy.base.compat import in1d
# Used for imports in term files.
from sfepy.terms.extmods import terms
from sfepy.linalg import split_range
_match_args = re.compile('^([^\(\}]*)\((.*)\)$').match
_match_virtual = re.compile('^virtual$').match
_match_state = re.compile('^state(_[_a-zA-Z0-9]+)?$').match
_match_parameter = re.compile('^parameter(_[_a-zA-Z0-9]+)?$').match
_match_material = re.compile('^material(_[_a-zA-Z0-9]+)?$').match
_match_material_opt = re.compile('^opt_material(_[_a-zA-Z0-9]+)?$').match
_match_material_root = re.compile('(.+)\.(.*)').match
def get_arg_kinds(arg_types):
"""
Translate `arg_types` of a Term to a canonical form.
Parameters
----------
arg_types : tuple of strings
The term argument types, as given in the `arg_types` attribute.
Returns
-------
arg_kinds : list of strings
The argument kinds - one of 'virtual_variable', 'state_variable',
'parameter_variable', 'opt_material', 'user'.
"""
arg_kinds = []
for ii, arg_type in enumerate(arg_types):
if _match_virtual(arg_type):
arg_kinds.append('virtual_variable')
elif _match_state(arg_type):
arg_kinds.append('state_variable')
elif _match_parameter(arg_type):
arg_kinds.append('parameter_variable')
elif _match_material(arg_type):
arg_kinds.append('material')
elif _match_material_opt(arg_type):
arg_kinds.append('opt_material')
if ii > 0:
msg = 'opt_material at position %d, must be at 0!' % ii
raise ValueError(msg)
else:
arg_kinds.append('user')
return arg_kinds
def get_shape_kind(integration):
"""
Get data shape kind for given integration type.
"""
if integration == 'surface':
shape_kind = 'surface'
elif integration in ('volume', 'surface_extra'):
shape_kind = 'volume'
elif integration == 'point':
shape_kind = 'point'
else:
raise NotImplementedError('unsupported term integration! (%s)'
% integration)
return shape_kind
def split_complex_args(args):
"""
Split complex arguments to real and imaginary parts.
Returns
-------
newargs : dictionary
Dictionary with lists corresponding to `args` such that each
argument of numpy.complex128 data type is split to its real and
imaginary part. The output depends on the number of complex
arguments in 'args':
- 0: list (key 'r') identical to input one
- 1: two lists with keys 'r', 'i' corresponding to real
and imaginary parts
- 2: output dictionary contains four lists:
- 'r' - real(arg1), real(arg2)
- 'i' - imag(arg1), imag(arg2)
- 'ri' - real(arg1), imag(arg2)
- 'ir' - imag(arg1), real(arg2)
"""
newargs = {}
cai = []
for ii, arg in enumerate(args):
if isinstance(arg, nm.ndarray) and (arg.dtype == nm.complex128):
cai.append(ii)
if len(cai) > 0:
newargs['r'] = list(args[:])
newargs['i'] = list(args[:])
arg1 = cai[0]
newargs['r'][arg1] = args[arg1].real.copy()
newargs['i'][arg1] = args[arg1].imag.copy()
if len(cai) == 2:
arg2 = cai[1]
newargs['r'][arg2] = args[arg2].real.copy()
newargs['i'][arg2] = args[arg2].imag.copy()
newargs['ri'] = list(args[:])
newargs['ir'] = list(args[:])
newargs['ri'][arg1] = newargs['r'][arg1]
newargs['ri'][arg2] = newargs['i'][arg2]
newargs['ir'][arg1] = newargs['i'][arg1]
newargs['ir'][arg2] = newargs['r'][arg2]
elif len(cai) > 2:
raise NotImplementedError('more than 2 complex arguments! (%d)'
% len(cai))
else:
newargs['r'] = args[:]
return newargs
def vector_chunk_generator(total_size, chunk_size, shape_in,
zero=False, set_shape=True, dtype=nm.float64):
if not chunk_size:
chunk_size = total_size
shape = list(shape_in)
sizes = split_range(total_size, chunk_size)
ii = nm.array(0, dtype=nm.int32)
for size in sizes:
chunk = nm.arange(size, dtype=nm.int32) + ii
if set_shape:
shape[0] = size
if zero:
out = nm.zeros(shape, dtype=dtype)
else:
out = nm.empty(shape, dtype=dtype)
yield out, chunk
ii += size
def create_arg_parser():
from pyparsing import Literal, Word, delimitedList, Group, \
StringStart, StringEnd, Optional, nums, alphas, alphanums
inumber = Word("+-"+nums, nums)
history = Optional(Literal('[').suppress() + inumber
+ Literal(']').suppress(), default=0)("history")
history.setParseAction(lambda str, loc, toks: int(toks[0]))
variable = Group(Word(alphas, alphanums + '._') + history)
derivative = Group(Literal('d') + variable\
+ Literal('/').suppress() + Literal('dt'))
trace = Group(Literal('tr') + Literal('(').suppress() + variable \
+ Literal(')').suppress())
generalized_var = derivative | trace | variable
args = StringStart() + delimitedList(generalized_var) + StringEnd()
return args
# 22.01.2006, c
class CharacteristicFunction(Struct):
def __init__(self, region):
self.igs = region.igs
self.region = region
self.local_chunk = None
self.ig = None
def __call__(self, chunk_size, shape_in, zero=False, set_shape=True,
ret_local_chunk=False, dtype=nm.float64):
els = self.region.get_cells(self.ig)
for out, chunk in vector_chunk_generator(els.shape[0], chunk_size,
shape_in, zero, set_shape,
dtype):
self.local_chunk = chunk
if ret_local_chunk:
yield out, chunk
else:
yield out, els[chunk]
self.local_chunk = None
def set_current_group(self, ig):
self.ig = ig
def get_local_chunk(self):
return self.local_chunk
class ConnInfo(Struct):
def get_region(self, can_trace=True):
if self.is_trace and can_trace:
return self.region.get_mirror_region()[0]
else:
return self.region
def get_region_name(self, can_trace=True):
if self.is_trace and can_trace:
reg = self.region.get_mirror_region()[0]
else:
reg = self.region
if reg is not None:
return reg.name
else:
return None
def iter_igs(self):
if self.region is not None:
for ig in self.region.igs:
if self.virtual_igs is not None:
ir = self.virtual_igs.tolist().index(ig)
rig = self.virtual_igs[ir]
else:
rig = None
if not self.is_trace:
ii = ig
else:
ig_map_i = self.region.get_mirror_region()[2]
ii = ig_map_i[ig]
if self.state_igs is not None:
ic = self.state_igs.tolist().index(ii)
cig = self.state_igs[ic]
else:
cig = None
yield rig, cig
else:
yield None, None
class Terms(Container):
@staticmethod
def from_desc(term_descs, regions, integrals=None):
"""
Create terms, assign each term its region.
"""
from sfepy.terms import term_table
terms = Terms()
for td in term_descs:
try:
constructor = term_table[td.name]
except:
msg = "term '%s' is not in %s" % (td.name,
sorted(term_table.keys()))
raise ValueError(msg)
try:
region = regions[td.region]
except IndexError:
raise KeyError('region "%s" does not exist!' % td.region)
term = Term.from_desc(constructor, td, region, integrals=integrals)
terms.append(term)
return terms
def __init__(self, objs=None):
Container.__init__(self, objs=objs)
self.update_expression()
def insert(self, ii, obj):
Container.insert(self, ii, obj)
self.update_expression()
def append(self, obj):
Container.append(self, obj)
self.update_expression()
def update_expression(self):
self.expression = []
for term in self:
aux = [term.sign, term.name, term.arg_str,
term.integral_name, term.region.name]
self.expression.append(aux)
def __mul__(self, other):
out = Terms()
for name, term in self.iteritems():
out.append(term * other)
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, Term):
out = self.copy()
out.append(other)
elif isinstance(other, Terms):
out = Terms(self._objs + other._objs)
else:
raise ValueError('cannot add Terms with %s!' % other)
return out
def __radd__(self, other):
return self + other
def __sub__(self, other):
if isinstance(other, Term):
out = self + (-other)
elif isinstance(other, Terms):
out = self + (-other)
else:
raise ValueError('cannot subtract Terms with %s!' % other)
return out
def __rsub__(self, other):
return -self + other
def __pos__(self):
return self
def __neg__(self):
return -1.0 * self
def setup(self):
for term in self:
term.setup()
def assign_args(self, variables, materials, user=None):
"""
Assign all term arguments.
"""
for term in self:
term.assign_args(variables, materials, user)
def get_variable_names(self):
out = []
for term in self:
out.extend(term.get_variable_names())
return list(set(out))
def get_material_names(self):
out = []
for term in self:
out.extend(term.get_material_names())
return list(set(out))
def get_user_names(self):
out = []
for term in self:
out.extend(term.get_user_names())
return list(set(out))
def set_current_group(self, ig):
for term in self:
term.char_fun.set_current_group(ig)
class Term(Struct):
name = ''
arg_types = ()
arg_shapes = {}
integration = 'volume'
geometries = ['2_3', '2_4', '3_4', '3_8']
@staticmethod
def new(name, integral, region, **kwargs):
from sfepy.terms import term_table
arg_str = _match_args(name)
if arg_str is not None:
name, arg_str = arg_str.groups()
else:
raise ValueError('bad term syntax! (%s)' % name)
if name in term_table:
constructor = term_table[name]
else:
msg = "term '%s' is not in %s" % (name, sorted(term_table.keys()))
raise ValueError(msg)
obj = constructor(name, arg_str, integral, region, **kwargs)
return obj
@staticmethod
def from_desc(constructor, desc, region, integrals=None):
from sfepy.fem import Integrals
if integrals is None:
integrals = Integrals()
obj = constructor(desc.name, desc.args, None, region)
obj.set_integral(integrals.get(desc.integral, obj.get_integral_info()))
obj.sign = desc.sign
return obj
def __init__(self, name, arg_str, integral, region, **kwargs):
self.name = name
self.arg_str = arg_str
self.region = region
self._kwargs = kwargs
self._integration = self.integration
self.sign = 1.0
self.set_integral(integral)
def __mul__(self, other):
try:
mul = as_float_or_complex(other)
except ValueError:
raise ValueError('cannot multiply Term with %s!' % other)
out = self.copy(name=self.name)
out.sign = mul * self.sign
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, Term):
out = Terms([self, other])
else:
out = NotImplemented
return out
def __sub__(self, other):
if isinstance(other, Term):
out = Terms([self, -1.0 * other])
else:
out = NotImplemented
return out
def __pos__(self):
return self
def __neg__(self):
out = -1.0 * self
return out
def set_integral(self, integral):
"""
Set the term integral.
"""
self.integral = integral
if self.integral is not None:
self.integral_name = self.integral.name
kind = self.get_integral_info()
if kind != integral.kind:
msg = "integral kind for term %s must be '%s'! (is '%s')" \
% (self.name, kind, integral.kind)
raise ValueError(msg)
def setup(self):
self.char_fun = CharacteristicFunction(self.region)
self.function = Struct.get(self, 'function', None)
self.step = 0
self.dt = 1.0
self.is_quasistatic = False
self.has_integral = True
self.has_region = True
self.itype = itype = None
aux = re.compile('([a-z]+)_.*').match(self.name)
if aux:
itype = aux.group(1)
self.raw_itype = itype
self.setup_formal_args()
if self._kwargs:
self.setup_args(**self._kwargs)
else:
self.args = []
def setup_formal_args(self):
self.arg_names = []
self.arg_steps = {}
self.arg_derivatives = {}
self.arg_traces = {}
parser = create_arg_parser()
self.arg_desc = parser.parseString(self.arg_str)
for arg in self.arg_desc:
trace = False
derivative = None
if isinstance(arg[1], int):
name, step = arg
else:
kind = arg[0]
name, step = arg[1]
if kind == 'd':
derivative = arg[2]
elif kind == 'tr':
trace = True
match = _match_material_root(name)
if match:
name = (match.group(1), match.group(2))
self.arg_names.append(name)
self.arg_steps[name] = step
self.arg_derivatives[name] = derivative
self.arg_traces[name] = trace
def setup_args(self, **kwargs):
self._kwargs = kwargs
self.args = []
for arg_name in self.arg_names:
if isinstance(arg_name, basestr):
self.args.append(self._kwargs[arg_name])
else:
self.args.append((self._kwargs[arg_name[0]], arg_name[1]))
self.classify_args()
self.check_args()
def __call__(self, diff_var=None, chunk_size=None, **kwargs):
"""
Subclasses either implement __call__ or plug in a proper _call().
"""
return self._call(diff_var, chunk_size, **kwargs)
def _call(self, diff_var=None, chunk_size=None, **kwargs):
msg = 'base class method "_call" called for %s' \
% self.__class__.__name__
raise RuntimeError(msg)
def assign_args(self, variables, materials, user=None):
"""
Check term argument existence in variables, materials, user data
and assign the arguments to terms. Also check compatibility of
field and term subdomain lists (igs).
"""
if user is None:
user = {}
kwargs = {}
for arg_name in self.arg_names:
if isinstance(arg_name, basestr):
if arg_name in variables.names:
kwargs[arg_name] = variables[arg_name]
elif arg_name in user:
kwargs[arg_name] = user[arg_name]
else:
raise ValueError('argument %s not found!' % arg_name)
else:
arg_name = arg_name[0]
if arg_name in materials.names:
kwargs[arg_name] = materials[arg_name]
else:
raise ValueError('material argument %s not found!'
% arg_name)
self.setup_args(**kwargs)
def classify_args(self):
"""
Classify types of the term arguments and find matching call
signature.
A state variable can be in place of a parameter variable and
vice versa.
"""
self.names = Struct(name='arg_names',
material=[], variable=[], user=[],
state=[], virtual=[], parameter=[])
# Prepare for 'opt_material' - just prepend a None argument if needed.
if isinstance(self.arg_types[0], tuple):
arg_types = self.arg_types[0]
else:
arg_types = self.arg_types
if len(arg_types) == (len(self.args) + 1):
self.args.insert(0, (None, None))
self.arg_names.insert(0, (None, None))
if isinstance(self.arg_types[0], tuple):
assert_(len(self.modes) == len(self.arg_types))
# Find matching call signature using variable arguments - material
# and user arguments are ignored!
matched = []
for it, arg_types in enumerate(self.arg_types):
arg_kinds = get_arg_kinds(arg_types)
if self._check_variables(arg_kinds):
matched.append((it, arg_kinds))
if len(matched) == 1:
i_match, arg_kinds = matched[0]
arg_types = self.arg_types[i_match]
self.mode = self.modes[i_match]
elif len(matched) == 0:
msg = 'cannot match arguments! (%s)' % self.arg_names
raise ValueError(msg)
else:
msg = 'ambiguous arguments! (%s)' % self.arg_names
raise ValueError(msg)
else:
arg_types = self.arg_types
arg_kinds = get_arg_kinds(self.arg_types)
self.mode = Struct.get(self, 'mode', None)
if not self._check_variables(arg_kinds):
raise ValueError('cannot match variables! (%s)'
% self.arg_names)
# Set actual argument types.
self.ats = list(arg_types)
for ii, arg_kind in enumerate(arg_kinds):
name = self.arg_names[ii]
if arg_kind.endswith('variable'):
names = self.names.variable
if arg_kind == 'virtual_variable':
self.names.virtual.append(name)
elif arg_kind == 'state_variable':
self.names.state.append(name)
elif arg_kind == 'parameter_variable':
self.names.parameter.append(name)
elif arg_kind.endswith('material'):
names = self.names.material
else:
names = self.names.user
names.append(name)
self.n_virtual = len(self.names.virtual)
if self.n_virtual > 1:
raise ValueError('at most one virtial variable is allowed! (%d)'
% self.n_virtual)
self.set_arg_types()
self.setup_integration()
if (self.raw_itype == 'dw') and (self.mode == 'eval'):
self.itype = 'd'
else:
self.itype = self.raw_itype
def _check_variables(self, arg_kinds):
for ii, arg_kind in enumerate(arg_kinds):
if arg_kind.endswith('variable'):
var = self.args[ii]
check = {'virtual_variable' : var.is_virtual,
'state_variable' : var.is_state_or_parameter,
'parameter_variable' : var.is_state_or_parameter}
if not check[arg_kind]():
return False
else:
return True
def set_arg_types(self):
pass
def check_args(self):
"""
Common checking to all terms.
Check compatibility of field and term subdomain lists (igs).
"""
vns = self.get_variable_names()
for name in vns:
field = self._kwargs[name].get_field()
if field is None:
continue
if not nm.all(in1d(self.region.vertices,
field.region.vertices)):
msg = ('%s: incompatible regions: (self, field %s)'
+ '(%s in %s)') %\
(self.name, field.name,
self.region.vertices, field.region.vertices)
raise ValueError(msg)
def get_variable_names(self):
return self.names.variable
def get_material_names(self):
out = []
for aux in self.names.material:
if aux[0] is not None:
out.append(aux[0])
return out
def get_user_names(self):
return self.names.user
def get_virtual_name(self):
if not self.names.virtual:
return None
var = self.get_virtual_variable()
return var.name
def get_state_names(self):
"""
If variables are given, return only true unknowns whose data are of
the current time step (0).
"""
variables = self.get_state_variables()
return [var.name for var in variables]
def get_parameter_names(self):
return copy(self.names.parameter)
def get_conn_key(self):
"""The key to be used in DOF connectivity information."""
key = (self.name,) + tuple(self.arg_names)
key += (self.integral_name, self.region.name)
return key
def get_conn_info(self):
vvar = self.get_virtual_variable()
svars = self.get_state_variables()
pvars = self.get_parameter_variables()
all_vars = self.get_variables()
dc_type = self.get_dof_conn_type()
tgs = self.get_geometry_types()
v_igs = v_tg = None
if vvar is not None:
field = vvar.get_field()
if field is not None:
v_igs = field.igs
if vvar.name in tgs:
v_tg = tgs[vvar.name]
else:
v_tg = None
else:
# No virtual variable -> all unknowns are in fact known parameters.
pvars += svars
svars = []
region = self.get_region()
if region is not None:
is_any_trace = reduce(lambda x, y: x or y,
self.arg_traces.values())
if is_any_trace:
region.setup_mirror_region()
self.char_fun.igs = region.igs
vals = []
aux_pvars = []
for svar in svars:
# Allow only true state variables.
if not svar.is_state():
aux_pvars.append(svar)
continue
field = svar.get_field()
if field is not None:
s_igs = field.igs
else:
s_igs = None
is_trace = self.arg_traces[svar.name]
if svar.name in tgs:
ps_tg = tgs[svar.name]
else:
ps_tg = v_tg
val = ConnInfo(virtual=vvar, virtual_igs=v_igs,
state=svar, state_igs=s_igs,
primary=svar, primary_igs=s_igs,
has_virtual=True,
has_state=True,
is_trace=is_trace,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=ps_tg,
region=region,
all_vars=all_vars)
vals.append(val)
pvars += aux_pvars
for pvar in pvars:
field = pvar.get_field()
if field is not None:
p_igs = field.igs
else:
p_igs = None
is_trace = self.arg_traces[pvar.name]
if pvar.name in tgs:
ps_tg = tgs[pvar.name]
else:
ps_tg = v_tg
val = ConnInfo(virtual=vvar, virtual_igs=v_igs,
state=None, state_igs=[],
primary=pvar.get_primary(), primary_igs=p_igs,
has_virtual=vvar is not None,
has_state=False,
is_trace=is_trace,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=ps_tg,
region=region,
all_vars=all_vars)
vals.append(val)
if vvar and (len(vals) == 0):
# No state, parameter variables, just the virtual one.
val = ConnInfo(virtual=vvar, virtual_igs=v_igs,
state=vvar.get_primary(), state_igs=v_igs,
primary=vvar.get_primary(), primary_igs=v_igs,
has_virtual=True,
has_state=False,
is_trace=False,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=v_tg,
region=region,
all_vars=all_vars)
vals.append(val)
return vals
def get_args_by_name(self, arg_names):
"""
Return arguments by name.
"""
out = []
for name in arg_names:
try:
ii = self.arg_names.index(name)
except ValueError:
raise ValueError('non-existing argument! (%s)' % name)
out.append(self.args[ii])
return out
def get_args(self, arg_types=None, **kwargs):
"""
Return arguments by type as specified in arg_types (or
self.ats). Arguments in **kwargs can override the ones assigned
at the term construction - this is useful for passing user data.
"""
ats = self.ats
if arg_types is None:
arg_types = ats
args = []
iname, region_name, ig = self.get_current_group()
for at in arg_types:
ii = ats.index(at)
arg_name = self.arg_names[ii]
if isinstance(arg_name, basestr):
if arg_name in kwargs:
args.append(kwargs[arg_name])
else:
args.append(self.args[ii])
else:
mat, par_name = self.args[ii]
if mat is not None:
mat_data = mat.get_data((region_name, self.integral_name),
ig, par_name)
else:
mat_data = None
args.append(mat_data)
return args
def get_kwargs(self, keys, **kwargs):
"""Extract arguments from **kwargs listed in keys (default is
None)."""
return [kwargs.get(name) for name in keys]
def get_arg_name(self, arg_type, full=False, join=None):
"""
Get the name of the argument specified by `arg_type.`
Parameters
----------
arg_type : str
The argument type string.
full : bool
If True, return the full name. For example, if the name of a
variable argument is 'u' and its time derivative is
requested, the full name is 'du/dt'.
join : str, optional
Optionally, the material argument name tuple can be joined
to a single string using the `join` string.
Returns
-------
name : str
The argument name.
"""
try:
ii = self.ats.index(arg_type)
except ValueError:
return None
name = self.arg_names[ii]
if full:
# Include derivatives.
if self.arg_derivatives[name]:
name = 'd%s/%s' % (name, self.arg_derivatives[name])
if (join is not None) and isinstance(name, tuple):
name = join.join(name)
return name
def get_integral_info(self):
"""
Get information on the term integral.
Returns
-------
kind : 'v' or 's'
The integral kind.
"""
if self.integration:
if self.integration == 'volume':
kind = 'v'
elif 'surface' in self.integration:
kind = 's'
elif self.integration == 'point':
kind = None
else:
raise ValueError('unsupported term integration! (%s)'
% self.integration)
else:
kind = None
return kind
def setup_integration(self):
self.has_geometry = True
self.geometry_types = {}
if isinstance(self.integration, basestr):
for var in self.get_variables():
self.geometry_types[var.name] = self.integration
else:
if self.mode is not None:
self.integration = self._integration[self.mode]
if self.integration is not None:
for arg_type, gtype in self.integration.iteritems():
var = self.get_args(arg_types=[arg_type])[0]
self.geometry_types[var.name] = gtype
gtypes = list(set(self.geometry_types.itervalues()))
if 'surface_extra' in gtypes:
self.dof_conn_type = 'volume'
elif len(gtypes):
self.dof_conn_type = gtypes[0]
def get_region(self):
return self.region
def get_geometry_types(self):
"""
Returns
-------
out : dict
The required geometry types for each variable argument.
"""
return self.geometry_types
def get_current_group(self):
return (self.integral_name, self.region.name, self.char_fun.ig)
def get_dof_conn_type(self):
return Struct(name='dof_conn_info', type=self.dof_conn_type,
region_name=self.region.name)
def set_current_group(self, ig):
self.char_fun.set_current_group(ig)
def igs(self):
return self.char_fun.igs
def get_assembling_cells(self, shape=None):
"""
According to the term integration type, return either the term
region cell indices or local index sequence.
"""
shape_kind = get_shape_kind(self.integration)
ig = self.char_fun.ig
cells = self.region.get_cells(ig, true_cells_only=False)
if shape_kind == 'surface':
cells = nm.arange(cells.shape[0], dtype=nm.int32)
elif shape_kind == 'point':
cells = nm.arange(shape[0], dtype=nm.int32)
else:
cells = cells.astype(nm.int32)
return cells
def iter_groups(self):
if self.dof_conn_type == 'point':
igs = self.igs()[0:1]
else:
igs = self.igs()
for ig in igs:
if self.integration == 'volume':
if not len(self.region.get_cells(ig)): continue
self.set_current_group(ig)
yield ig
def time_update(self, ts):
if ts is not None:
self.step = ts.step
self.dt = ts.dt
self.is_quasistatic = ts.is_quasistatic
def advance(self, ts):
"""
Advance to the next time step. Implemented in subclasses.
"""
def get_vector(self, variable):
"""Get the vector stored in `variable` according to self.arg_steps
and self.arg_derivatives. Supports only the backward difference w.r.t.
time."""
name = variable.name
return variable(step=self.arg_steps[name],
derivative=self.arg_derivatives[name])
def get_approximation(self, variable, get_saved=False):
"""
Return approximation corresponding to `variable`. Also return
the corresponding geometry (actual or saved, according to
`get_saved`).
"""
geo, _, key = self.get_mapping(variable, get_saved=get_saved,
return_key=True)
ig = key[2]
ap = variable.get_approximation(ig)
return ap, geo
def get_variables(self, as_list=True):
if as_list:
variables = self.get_args_by_name(self.names.variable)
else:
variables = {}
for var in self.get_args_by_name(self.names.variable):
variables[var.name] = var
return variables
def get_virtual_variable(self):
aux = self.get_args_by_name(self.names.virtual)
if len(aux) == 1:
var = aux[0]
else:
var = None
return var
def get_state_variables(self, unknown_only=False):
variables = self.get_args_by_name(self.names.state)
if unknown_only:
variables = [var for var in variables
if (var.kind == 'unknown') and
(self.arg_steps[var.name] == 0)]
return variables
def get_parameter_variables(self):
return self.get_args_by_name(self.names.parameter)
def get_materials(self, join=False):
materials = self.get_args_by_name(self.names.material)
for mat in materials:
if mat[0] is None:
materials.remove(mat)
if join:
materials = list(set(mat[0] for mat in materials))
return materials
def get_qp_key(self):
"""
Return a key identifying uniquely the term quadrature points.
"""
return (self.region.name, self.integral.name)
def get_physical_qps(self):
"""
Get physical quadrature points corresponding to the term region
and integral.
"""
from sfepy.fem.mappings import get_physical_qps, PhysicalQPs
if self.integration == 'point':
phys_qps = PhysicalQPs(self.region.igs)
else:
phys_qps = get_physical_qps(self.region, self.integral)
return phys_qps
def get_mapping(self, variable, get_saved=False, return_key=False):
"""
Get the reference mapping from a variable.
Notes
-----
This is a convenience wrapper of Field.get_mapping() that
initializes the arguments using the term data.
"""
integration = self.geometry_types[variable.name]
is_trace = self.arg_traces[variable.name]
if is_trace:
region, ig_map, ig_map_i = self.region.get_mirror_region()
ig = ig_map_i[self.char_fun.ig]
else:
region = self.region
ig = self.char_fun.ig
out = variable.field.get_mapping(ig, region,
self.integral, integration,
get_saved=get_saved,
return_key=return_key)
return out
def get_data_shape(self, variable):
"""
Get data shape information from variable.
Notes
-----
This is a convenience wrapper of FieldVariable.get_data_shape() that
initializes the arguments using the term data.
"""
integration = self.geometry_types[variable.name]
is_trace = self.arg_traces[variable.name]
if is_trace:
region, ig_map, ig_map_i = self.region.get_mirror_region()
ig = ig_map_i[self.char_fun.ig]
else:
region = self.region
ig = self.char_fun.ig
out = variable.get_data_shape(ig, self.integral,
integration, region.name)
return out
def get(self, variable, quantity_name, bf=None, integration=None,
step=None, time_derivative=None):
"""
Get the named quantity related to the variable.
Notes
-----
This is a convenience wrapper of Variable.evaluate() that
initializes the arguments using the term data.
"""
name = variable.name
step = get_default(step, self.arg_steps[name])
time_derivative = get_default(time_derivative,
self.arg_derivatives[name])
integration = get_default(integration, self.geometry_types[name])
data = variable.evaluate(self.char_fun.ig, mode=quantity_name,
region=self.region, integral=self.integral,
integration=integration,
step=step, time_derivative=time_derivative,
is_trace=self.arg_traces[name], bf=bf)
return data
def check_shapes(self, *args, **kwargs):
"""
Default implementation of function to check term argument shapes
at run-time.
"""
pass
def standalone_setup(self):
from sfepy.fem import setup_dof_conns
conn_info = {'aux' : self.get_conn_info()}
setup_dof_conns(conn_info)
materials = self.get_materials(join=True)
for mat in materials:
mat.time_update(None, [Struct(terms=[self])])
def call_get_fargs(self, args, kwargs):
try:
fargs = self.get_fargs(*args, **kwargs)
except RuntimeError:
terms.errclear()
raise ValueError
return fargs
def call_function(self, out, fargs):
try:
status = self.function(out, *fargs)
except RuntimeError:
terms.errclear()
raise ValueError
if status:
terms.errclear()
raise ValueError('term evaluation failed! (%s)' % self.name)
return status
def eval_real(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
out = nm.empty(shape, dtype=nm.float64)
if mode == 'eval':
status = self.call_function(out, fargs)
# Sum over elements but not over components.
out1 = nm.sum(out, 0).squeeze()
return out1, status
else:
status = self.call_function(out, fargs)
return out, status
def eval_complex(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
rout = nm.empty(shape, dtype=nm.float64)
fargsd = split_complex_args(fargs)
# Assuming linear forms. Then the matrix is the
# same both for real and imaginary part.
rstatus = self.call_function(rout, fargsd['r'])
if (diff_var is None) and len(fargsd) >= 2:
iout = nm.empty(shape, dtype=nm.float64)
istatus = self.call_function(iout, fargsd['i'])
if mode == 'eval' and len(fargsd) >= 4:
irout = nm.empty(shape, dtype=nm.float64)
irstatus = self.call_function(irout, fargsd['ir'])
riout = nm.empty(shape, dtype=nm.float64)
ristatus = self.call_function(riout, fargsd['ri'])
out = (rout - iout) + (riout + irout) * 1j
status = rstatus or istatus or ristatus or irstatus
else:
out = rout + 1j * iout
status = rstatus or istatus
else:
out, status = rout, rstatus
if mode == 'eval':
out1 = nm.sum(out, 0).squeeze()
return out1, status
else:
return out, status
def evaluate(self, mode='eval', diff_var=None,
standalone=True, ret_status=False, **kwargs):
"""
Evaluate the term.
Parameters
----------
mode : 'eval' (default), or 'weak'
The term evaluation mode.
Returns
-------
val : float or array
In 'eval' mode, the term returns a single value (the
integral, it does not need to be a scalar), while in 'weak'
mode it returns an array for each element.
status : int, optional
The flag indicating evaluation success (0) or failure
(nonzero). Only provided if `ret_status` is True.
iels : array of ints, optional
The local elements indices in 'weak' mode. Only provided in
non-'eval' modes.
"""
if standalone:
self.standalone_setup()
kwargs = kwargs.copy()
term_mode = kwargs.pop('term_mode', None)
if mode == 'eval':
val = 0.0
status = 0
for ig in self.iter_groups():
args = self.get_args(**kwargs)
self.check_shapes(*args)
_args = tuple(args) + (mode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
shape, dtype = self.get_eval_shape(*_args, **kwargs)
if dtype == nm.float64:
_v, stat = self.eval_real(shape, fargs, mode, term_mode,
**kwargs)
elif dtype == nm.complex128:
_v, stat = self.eval_complex(shape, fargs, mode, term_mode,
**kwargs)
else:
raise ValueError('unsupported term dtype! (%s)' % dtype)
val += _v
status += stat
val *= self.sign
elif mode in ('el_avg', 'el', 'qp'):
vals = None
iels = nm.empty((0, 2), dtype=nm.int32)
status = 0
for ig in self.iter_groups():
args = self.get_args(**kwargs)
self.check_shapes(*args)
_args = tuple(args) + (mode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
shape, dtype = self.get_eval_shape(*_args, **kwargs)
if dtype == nm.float64:
val, stat = self.eval_real(shape, fargs, mode, term_mode,
**kwargs)
elif dtype == nm.complex128:
val, stat = self.eval_complex(shape, fargs, mode, term_mode,
**kwargs)
if vals is None:
vals = val
else:
vals = nm.r_[vals, val]
_iels = self.get_assembling_cells(val.shape)
aux = nm.c_[nm.repeat(ig, _iels.shape[0])[:,None],
_iels[:,None]]
iels = nm.r_[iels, aux]
status += stat
vals *= self.sign
elif mode == 'weak':
vals = []
iels = []
status = 0
varr = self.get_virtual_variable()
if diff_var is not None:
varc = self.get_variables(as_list=False)[diff_var]
for ig in self.iter_groups():
args = self.get_args(**kwargs)
self.check_shapes(*args)
_args = tuple(args) + (mode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
n_elr, n_qpr, dim, n_enr, n_cr = self.get_data_shape(varr)
n_row = n_cr * n_enr
if diff_var is None:
shape = (n_elr, 1, n_row, 1)
else:
n_elc, n_qpc, dim, n_enc, n_cc = self.get_data_shape(varc)
n_col = n_cc * n_enc
shape = (n_elr, 1, n_row, n_col)
if varr.dtype == nm.float64:
val, stat = self.eval_real(shape, fargs, mode, term_mode,
diff_var, **kwargs)
elif varr.dtype == nm.complex128:
val, stat = self.eval_complex(shape, fargs, mode, term_mode,
diff_var, **kwargs)
else:
raise ValueError('unsupported term dtype! (%s)'
% varr.dtype)
vals.append(self.sign * val)
iels.append((ig, self.get_assembling_cells(val.shape)))
status += stat
# Setup return value.
if mode == 'eval':
out = (val,)
else:
out = (vals, iels)
if goptions['check_term_finiteness']:
assert_(nm.isfinite(out[0]).all(),
msg='%+.2e * %s.%d.%s(%s) term values not finite!'
% (self.sign, self.name, self.integral.order,
self.region.name, self.arg_str))
if ret_status:
out = out + (status,)
if len(out) == 1:
out = out[0]
return out
def assemble_to(self, asm_obj, val, iels, mode='vector', diff_var=None):
import sfepy.fem.extmods.assemble as asm
vvar = self.get_virtual_variable()
dc_type = self.get_dof_conn_type()
if mode == 'vector':
if asm_obj.dtype == nm.float64:
assemble = asm.assemble_vector
else:
assert_(asm_obj.dtype == nm.complex128)
assemble = asm.assemble_vector_complex
for ii in range(len(val)):
if not(val[ii].dtype == nm.complex128):
val[ii] = nm.complex128(val[ii])
for ii, (ig, _iels) in enumerate(iels):
vec_in_els = val[ii]
dc = vvar.get_dof_conn(dc_type, ig, active=True)
assert_(vec_in_els.shape[2] == dc.shape[1])
assemble(asm_obj, vec_in_els, _iels, 1.0, dc)
elif mode == 'matrix':
if asm_obj.dtype == nm.float64:
assemble = asm.assemble_matrix
else:
assert_(asm_obj.dtype == nm.complex128)
assemble = asm.assemble_matrix_complex
svar = diff_var
tmd = (asm_obj.data, asm_obj.indptr, asm_obj.indices)
for ii, (ig, _iels) in enumerate(iels):
mtx_in_els = val[ii]
if ((asm_obj.dtype == nm.complex128)
and (mtx_in_els.dtype == nm.float64)):
mtx_in_els = mtx_in_els.astype(nm.complex128)
rdc = vvar.get_dof_conn(dc_type, ig, active=True)
is_trace = self.arg_traces[svar.name]
cdc = svar.get_dof_conn(dc_type, ig, active=True,
is_trace=is_trace)
assert_(mtx_in_els.shape[2:] == (rdc.shape[1], cdc.shape[1]))
sign = 1.0
if self.arg_derivatives[svar.name]:
if not self.is_quasistatic or (self.step > 0):
sign *= 1.0 / self.dt
else:
sign = 0.0
assemble(tmd[0], tmd[1], tmd[2], mtx_in_els,
_iels, sign, rdc, cdc)
else:
raise ValueError('unknown assembling mode! (%s)' % mode)
| [
"cimrman3@ntc.zcu.cz"
] | cimrman3@ntc.zcu.cz |
195f63b7fb6b96f891292d287a89d7dccdcbe323 | 0866c751f954ac0799dc1edf88c5294791a2c5a9 | /order/urls.py | 3c56ff433a1e3339c99a2a2748434cee65e83a07 | [] | no_license | ChubakSopubekov/plovo | cd4b3bb3721b9460d32d0a093e69d6fb2d655abe | af103fe20c4e7dbebbc26cd06592163009c1d37a | refs/heads/main | 2023-04-27T14:37:44.100420 | 2021-05-17T14:17:36 | 2021-05-17T14:17:36 | 361,718,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | from django.urls import path
from .views import OrderListCreateView, OrderView
urlpatterns = [
path('', OrderListCreateView.as_view(), name='order-list-create'),
path('<int:pk>/', OrderView.as_view(), name='order'),
] | [
"sopubekov96@gmail.com"
] | sopubekov96@gmail.com |
24d3f7246c7d2d37f6a7c022003cc680690ab32e | f0b5d51f23c6bab2e01e353141ea89d1f07f8b82 | /catkin_ws/build/rosserial/rosserial_msgs/cmake/rosserial_msgs-genmsg-context.py | c0023794e87a9060bb15a6573580190b0274e95a | [] | no_license | rignitc/semi_humanoid_robot | 84a3a540707392149c380d751838b7ec0a98735d | d6e04258ae57e7627bfd56952bc717e4b069ad1f | refs/heads/master | 2020-08-07T08:26:28.988420 | 2019-10-07T12:01:10 | 2019-10-07T12:01:10 | 213,360,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/nvidia/rigguv2/catkin_ws/src/rosserial/rosserial_msgs/msg/Log.msg;/home/nvidia/rigguv2/catkin_ws/src/rosserial/rosserial_msgs/msg/TopicInfo.msg"
services_str = "/home/nvidia/rigguv2/catkin_ws/src/rosserial/rosserial_msgs/srv/RequestMessageInfo.srv;/home/nvidia/rigguv2/catkin_ws/src/rosserial/rosserial_msgs/srv/RequestParam.srv;/home/nvidia/rigguv2/catkin_ws/src/rosserial/rosserial_msgs/srv/RequestServiceInfo.srv"
pkg_name = "rosserial_msgs"
dependencies_str = ""
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "rosserial_msgs;/home/nvidia/rigguv2/catkin_ws/src/rosserial/rosserial_msgs/msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"rig@nitc.ac.in"
] | rig@nitc.ac.in |
f0bc2d2a2876743d3b1f731be6382a0665dacae3 | 6a16e5e56f38ff4ed1fc02b5a6497a8ac28e51f0 | /__files archives/_helena_final_script-3.py | bf602bbfbbeb4bd872c44b2aeabd780e54252058 | [] | no_license | aslinurtaskin/SHEDIO | 32a3e9a927432e376e8ec38e279dd61bbc5050ce | 659a768ed985843d8499340bfbbb06474945dc69 | refs/heads/master | 2023-03-23T04:49:13.914458 | 2021-03-20T13:02:08 | 2021-03-20T13:02:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,359 | py | import csv
import sys
import cv2 as cv
import numpy as np
import argparse
import random as rng
import math
import matplotlib.pyplot as plt
import turtle
from PIL import Image
from numpy import savetxt
rng.seed(12345)
# Define functions ___________________________________________________
img_path = ('/Users/helenahomsi/Desktop/IAAC/07 TERM 02/HARDWARE II/SHEDIO/Chapter 02 - final/image-01.jpg')
def load_image(img_path):
img = cv.imread(img_path)
print('\n', 'Image is loaded!', '\n')
if img is None:
print('Image not found:', args.input)
exit(0)
return img
def process_image(img):
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
img_blur = cv.blur(img_gray, (3,3))
img_binary = cv.threshold(img_blur, 128, 255, cv.THRESH_BINARY)[1]
return img_binary
def contours(img_binary, i):
threshold = i
# Detect edges using Canny
canny_output = cv.Canny(img_binary, threshold, threshold * 2)
# Find contours
contours, hierarchy = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# Draw contours
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
# print('Contours are in the form of a: ', type(contours))
# print('\n')
# Simplify contours
simp_contours = []
for c in contours:
epsilon = 0.001*cv.arcLength(c,True)
approx_cnt = cv.approxPolyDP(c,epsilon,True)
simp_contours.append(approx_cnt)
# print('Simplified contours are: ', simp_contours)
print(type(simp_contours))
print('\n')
for i in range(len(simp_contours)):
color = (rng.randint(255,255), rng.randint(255,255), rng.randint(255,255))
cv.drawContours(drawing, simp_contours, i, color, 1, cv.LINE_8, hierarchy, 0)
cv.imshow('Image', img)
cv.imshow('Contours', drawing)
cv.waitKey()
list_contours = []
for l in simp_contours:
sublists = l.tolist()
list_contours.append(sublists)
# print('List of contours is: ', list_contours)
# print('Type of contours is: ', type(list_contours))
# print('Length of contours is: ', len(list_contours))
# print('\n')
# List of points in X
points_in_X = []
for i in list_contours:
coordinates_X =[]
for a in i:
coordinates_X.append(a[0][0])
points_in_X.append(coordinates_X)
print('Points in X are: ', points_in_X)
print('\n')
# List of points in Y
points_in_Y = []
for i in list_contours:
coordinates_Y =[]
for a in i:
coordinates_Y.append(a[0][1])
points_in_Y.append(coordinates_Y)
print('Points in Y are: ', points_in_Y)
print('\n')
return list_contours, points_in_X, points_in_Y
def calculate(points_in_X, points_in_Y):
current_pt = [0,0]
pixel_ratio = 2 # in mm
X_next_coordinates = points_in_X.copy()
Y_next_coordinates = points_in_Y.copy()
# print('Starting list for X is: ', points_in_X)
# print('\n')
# print('Starting list for Y is: ', points_in_Y)
# print('\n')
# For points in X
X_next_coordinates = []
for i in points_in_X:
i = i[1:]
i.append(100)
X_next_coordinates.append(i)
# print('Next points are: ', X_next_coordinates)
# print('\n')
# For points in Y
Y_next_coordinates = []
for i in points_in_Y:
i = i[1:]
i.append(100)
Y_next_coordinates.append(i)
# print('New points in Y: ', Y_next_coordinates)
# print('\n')
# Calculate distances
# Distances in X
distance_X = []
for a in range(len(points_in_X)):
distances = []
for b in range(len(points_in_X[a])):
new_coordinates = abs(points_in_X[a][b] - X_next_coordinates[a][b])
distances.append(new_coordinates)
distance_X.append(distances)
# print('Distance in X: ', distance_X)
# print('\n')
distance_X_Error = []
for dis_x in distance_X:
items = []
for value in dis_x:
if value == 0:
value = 1
items.append(value)
distance_X_Error.append(items)
# print('Distance X Error: ', distance_X_Error)
# Distances in Y
distance_Y = []
for a in range(len(points_in_Y)):
new_coordinates = []
for b in range(len(points_in_Y[a])):
new_coordinate = abs(points_in_Y[a][b] - Y_next_coordinates[a][b])
new_coordinates.append(new_coordinate)
distance_Y.append(new_coordinates)
# print('Distance in Y: ', distance_Y)
# print('\n')
distance_Y_Error = []
for dis_y in distance_Y:
items = []
for value in dis_y:
if value == 0:
value = 1
items.append(value)
distance_Y_Error.append(items)
# print('Distance Y Error: ', distance_Y_Error)
# Distances squared in X
distance_X_P2 = []
for a in range(len(distance_X)):
sq = []
for b in range(len(distance_X[a])):
new_coordinates = abs(distance_X[a][b]**2)
sq.append(new_coordinates)
distance_X_P2.append(sq)
# print('Distances X squared are: ', distance_X_P2)
# print('\n')
# Distances squared in Y
distance_Y_P2 = []
for a in range(len(distance_Y)):
sq = []
for b in range(len(distance_Y[a])):
new_coordinates = abs(distance_Y[a][b]**2)
sq.append(new_coordinates)
distance_Y_P2.append(sq)
# print('Distances Y squared are: ', distance_Y_P2)
# print('\n')
# Distances squared added
distance_root = []
for a in range(len(distance_X_P2)):
sq_add = []
for b in range(len(distance_X_P2[a])):
new_coordinates = abs(distance_X_P2[a][b] + distance_Y_P2[a][b])
sq_add.append(new_coordinates)
distance_root.append(sq_add)
# print('Distances squared added are: ', distance_root)
# print('\n')
# Distances to move Forward
distance_d = []
for d in distance_root:
distances = []
for i in d:
distances.append(math.sqrt(i))
distance_d.append(distances)
# print('Distances to move F in image are : ', distance_d)
# print('\n')
# Scaled distances to move Forward
distances = []
for d_r in distance_d:
move = []
for i in d_r:
move.append((i/pixel_ratio))
distances.append(move)
# print('Real distances to move F are : ', distances)
# print('\n')
# Calculate angles
distance_a = []
for a in range(len(distance_X)):
angle_value = []
for b in range(len(distance_X[a])):
try:
angle = abs(distance_Y[a][b] / distance_X[a][b])
except ZeroDivisionError:
angle = abs(distance_Y[a][b] / distance_X_Error[a][b])
pass
angle_value.append(angle)
distance_a.append(angle_value)
# Angles to Rotate
angles_tan = []
for a in range(len(distance_a)):
tan_angle = []
for b in range(len(distance_a[a])):
value = abs(math.tan(distance_a[a][b]))
tan_angle.append(value)
angles_tan.append(tan_angle)
# print('Angles for rotations in R are: ', angles_tan)
# print('\n')
# Rounded values for distances
rounded_distances = []
for i in distances:
distances_round = [round(num) for num in i]
rounded_distances.append(distances_round)
# print('Rounded distances are: ', rounded_distances)
# print('\n')
# Rounded values for angle
rounded_angles = []
for a in angles_tan:
angles_round_tan = [round(num) for num in a]
rounded_angles.append(angles_round_tan)
# print('Rounded angles are: ', rounded_angles)
# print('\n')
# Add in between distances to travel with Pen UP
in_betweens_d = []
last_values_dx = [dx[-1] for dx in points_in_X]
last_values_dy = [dy[-1] for dy in points_in_Y]
zipped_dxdy = zip(last_values_dx, last_values_dy)
for x, y in zipped_dxdy:
extra = [0]
empty = []
operation1 = (abs(x**2 - y**2))
operation2 = math.sqrt(operation1)
operation3 = operation2 / pixel_ratio
operation4 = round(operation3)
empty.append(operation4)
in_betweens_d.append(empty)
in_betweens_d.append(extra)
print('In betweens D are: ', in_betweens_d)
print('\n')
full_list_D = []
for d in range(len(rounded_distances)):
full_list_D.append(rounded_distances[d])
full_list_D.append(in_betweens_d[d])
# Add FIRST distance to travel with Pen UP
first_d = []
first_value_dx = points_in_X[0][0]
first_value_dy = points_in_Y[0][0]
Foperation1 = (abs(first_value_dx**2 + first_value_dy**2))
Foperation2 = math.sqrt(Foperation1)
Foperation3 = Foperation2 / pixel_ratio
Foperation4 = round(Foperation3)
first_d.append(Foperation4)
print('First Value to travel is: ', first_d)
print('\n')
full_list_D.insert(0, first_d)
print('Full list of Distances is: ', full_list_D)
print('\n')
# Add in between angles to rotate with Pen UP
in_betweens_a = []
last_values_ax = [dx[-1] for dx in points_in_X]
last_values_ay = [dy[-1] for dy in points_in_Y]
zipped_dxdy = zip(last_values_dx, last_values_dy)
for x, y in zipped_dxdy:
extra = [0]
empty = []
operation1 = (x / y)
operation2 = math.tan(operation1)
operation3 = round(abs(operation2))
empty.append(operation3)
in_betweens_a.append(empty)
in_betweens_a.append(extra)
print('In betweens A are: ', in_betweens_a)
print('\n')
full_list_A = []
for a in range(len(rounded_angles)):
full_list_A.append(rounded_angles[a])
full_list_A.append(in_betweens_a[a])
print('Full List of Angles is: ', full_list_A)
print('\n')
# Add FIRST angle to rotate with Pen UP
first_a = []
first_value_ax = points_in_X[0][0]
first_value_ay = points_in_Y[0][0]
operation1_ = first_value_ay / first_value_ax
operation2_ = math.tan(operation1_)
operation3_ = round(operation2_)
first_a.append(operation3_)
print('First Value to rotate is: ', first_a)
print('\n')
full_list_A.insert(0, first_a)
print('Full list of Angles is: ', full_list_A)
print('\n')
return full_list_D, full_list_A
def get_instructions(rounded_distances, rounded_angles):
# Decide on common language with robot (check with Jeo)
# F = Forward (takes distance in mm)
# R = Turn right (takes angle in degrees)
# U = PenUp
# D = PenDown
# Get list of strings for Distances
strings_d = []
for i in rounded_distances:
strgs = []
string_ints = [str(num) for num in i]
for i in string_ints:
str_of_ints = 'F' + "".join(i)
strgs.append(str_of_ints)
strings_d.append(strgs)
print('Distances strings are: ', strings_d)
print('\n')
# Get list of strings for Angles
strings_a = []
for i in rounded_angles:
strgs = []
string_ints = [str(num) for num in i]
for i in string_ints:
str_of_ints = 'R' + "".join(i)
strgs.append(str_of_ints)
strings_a.append(strgs)
print('Angles strings are: ', strings_a)
print('\n')
combined = []
for i in range(len(strings_a)):
a = [list(x) for x in zip(strings_a[i], strings_d[i])]
combined.append([inner for outer in a for inner in outer])
print ('Combined List is: ', combined)
print('\n')
instructions = []
for sublist in combined:
for item in sublist:
instructions.append(item)
print ('Flat List is: ', instructions)
return instructions
# Run code ____________________________________________________________
# Load Image
img = load_image(img_path)
# Process Image
img_binary = process_image(img)
# Contours
list_contours, points_in_X, points_in_Y = contours(img_binary,0)
# Calculations
distances_round, angle_round_tan = calculate(points_in_X, points_in_Y)
# Get Instructions
get_instructions(distances_round, angle_round_tan)
print('\n', 'Ma chérie tout va bien', '\n') | [
"helenahomsi17@gmail.com"
] | helenahomsi17@gmail.com |
792b61efe2adbe81bfa8e2d488a1dbf4bd884444 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-das/huaweicloudsdkdas/v3/model/export_top_sql_templates_details_response.py | 1286a8c3db19b4ec0f54ea95567708a585fd8a62 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,619 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ExportTopSqlTemplatesDetailsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'top_sql_templates': 'list[TopSqlTemplate]',
'total_count': 'int'
}
attribute_map = {
'top_sql_templates': 'top_sql_templates',
'total_count': 'total_count'
}
def __init__(self, top_sql_templates=None, total_count=None):
"""ExportTopSqlTemplatesDetailsResponse
The model defined in huaweicloud sdk
:param top_sql_templates: SQL模板列表。
:type top_sql_templates: list[:class:`huaweicloudsdkdas.v3.TopSqlTemplate`]
:param total_count: SQL模板总数。
:type total_count: int
"""
super(ExportTopSqlTemplatesDetailsResponse, self).__init__()
self._top_sql_templates = None
self._total_count = None
self.discriminator = None
if top_sql_templates is not None:
self.top_sql_templates = top_sql_templates
if total_count is not None:
self.total_count = total_count
@property
def top_sql_templates(self):
"""Gets the top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
SQL模板列表。
:return: The top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
:rtype: list[:class:`huaweicloudsdkdas.v3.TopSqlTemplate`]
"""
return self._top_sql_templates
@top_sql_templates.setter
def top_sql_templates(self, top_sql_templates):
"""Sets the top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
SQL模板列表。
:param top_sql_templates: The top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
:type top_sql_templates: list[:class:`huaweicloudsdkdas.v3.TopSqlTemplate`]
"""
self._top_sql_templates = top_sql_templates
@property
def total_count(self):
"""Gets the total_count of this ExportTopSqlTemplatesDetailsResponse.
SQL模板总数。
:return: The total_count of this ExportTopSqlTemplatesDetailsResponse.
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this ExportTopSqlTemplatesDetailsResponse.
SQL模板总数。
:param total_count: The total_count of this ExportTopSqlTemplatesDetailsResponse.
:type total_count: int
"""
self._total_count = total_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExportTopSqlTemplatesDetailsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
64c2c471974ea13afac4cdef5cd02c11de405ea3 | b49df6b6ed1ef168f7cf69d60da58b06d0b69853 | /dowhileloop.py | 2838b65ef0a93109fbe0f14a86a0350bb720b647 | [] | no_license | justus-migosi/do-while-loop | cefd6cd5c4baa75a5158cfc382c31f2d7385ecaf | 097c5f4201c4ec676eda5e1c17a0d96dbd1e7ae3 | refs/heads/main | 2023-03-23T14:37:01.878627 | 2021-03-07T14:55:47 | 2021-03-07T14:55:47 | 345,373,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | x=1
while True:
print(x)
x=x+1
if(x>10):
break | [
"noreply@github.com"
] | noreply@github.com |
19fff9aea639d7c8ef19c6c6ba38178f7a3e5a7a | 0e6c8439bfbe6bb55638e2c50de516146729a8a3 | /website/addons/dataverse/tests/test_provider.py | cef5164276c62e550e11ddaa98492b40a5e9bbc0 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | sf2ne/Playground | 5385e2cb0aa5172e1037ec99eaf36c5239707ff4 | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | refs/heads/testWebFlowers | 2022-12-15T07:44:08.371300 | 2016-03-11T16:32:06 | 2016-03-11T16:32:06 | 53,675,768 | 0 | 0 | Apache-2.0 | 2022-11-22T00:27:34 | 2016-03-11T15:17:39 | Python | UTF-8 | Python | false | false | 1,545 | py | import httplib as http
from nose.tools import * # noqa
import mock
from framework.exceptions import HTTPError
from website.addons.dataverse.tests.utils import (
create_mock_connection, DataverseAddonTestCase, create_external_account,
)
from website.addons.dataverse.provider import DataverseProvider
class TestDataverseSerializerConfig(DataverseAddonTestCase):
def setUp(self):
super(TestDataverseSerializerConfig, self).setUp()
self.provider = DataverseProvider()
def test_default(self):
assert_is_none(self.provider.account)
@mock.patch('website.addons.dataverse.client._connect')
def test_add_user_auth(self, mock_connect):
mock_connect.return_value = create_mock_connection()
external_account = create_external_account()
self.user.external_accounts.append(external_account)
self.user.save()
self.provider.add_user_auth(
self.node_settings,
self.user,
external_account._id,
)
assert_equal(self.node_settings.external_account, external_account)
assert_equal(self.node_settings.user_settings, self.user_settings)
def test_add_user_auth_not_in_user_external_accounts(self):
external_account = create_external_account()
with assert_raises(HTTPError) as e:
self.provider.add_user_auth(
self.node_settings,
self.user,
external_account._id,
)
assert_equal(e.status_code, http.FORBIDDEN)
| [
"rliebz@gmail.com"
] | rliebz@gmail.com |
8d06459eaeb52a36c67f79167e259238fa29ad11 | 2b2464cd7a9c83bc1dd2c3bef144a8421fcd8659 | /61a-su20-practice-mt/question_3/tests/question_3.py | b80bc4fc23584abbd11add72b2b6c742f45848f8 | [] | no_license | royh02/Past-Exams | 6f63853a44341187a0fce2cd7cda9b73e812df91 | 3884fbe45b210cba85427e6005054d1dbf96f8a7 | refs/heads/master | 2022-11-20T19:30:18.356589 | 2020-07-16T03:48:50 | 2020-07-16T03:48:50 | 277,603,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | test = {
'name': 'question_3',
'points': 6,
'suites': [
{
'cases': [
{
'code': r"""
>>> close(123)
123
>>> close(153)
153
>>> close(1523)
153
>>> close(15123)
1123
>>> close(11111111)
11
>>> close(985357)
557
>>> close(14735476)
143576
>>> close(812348567)
1234567
""",
'hidden': False
}
],
'scored': True,
'setup': 'from question_3 import *',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"65890703+xroid-x@users.noreply.github.com"
] | 65890703+xroid-x@users.noreply.github.com |
685eea7db453f95d3b09c7e014f28eeee0ba4439 | a8123a86db99b9365b10ba76dd509d58caa7bc10 | /python/practice/start_again/2021/05182021/Day18.3_Darw_a_spriograph.py | 8814fcac3b147a6c0f49245cd49b4fbe21a8a16f | [] | no_license | smohapatra1/scripting | c0404081da8a10e92e7c7baa8b540acc16540e77 | 3628c9109204ad98231ae8ee92b6bfa6b27e93cd | refs/heads/master | 2023-08-22T20:49:50.156979 | 2023-08-22T20:43:03 | 2023-08-22T20:43:03 | 147,619,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #Draw a Spirograph
from turtle import Turtle, Screen
import turtle as t
import random
tim = t.Turtle()
t.colormode(255)
tim.speed("fastest")
#Random Color
def random_color():
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
color = (r, g, b)
return color
def draw_spirograph(size_of_gap):
for _ in range(int(360/size_of_gap)):
tim.color(random_color())
tim.circle(100)
#current_heading = tim.heading()
# To change the direction
tim.setheading(tim.heading() + size_of_gap )
draw_spirograph(10)
screen = t.Screen()
screen.exitonclick() | [
"samarendra.mohapatra121@gmail.com"
] | samarendra.mohapatra121@gmail.com |
bce74c6a733e3a6ec747ca11da5381c33c922752 | 7125244993e00d1663b3752622fbed7ad2b6e5c0 | /locustFiles_basics/MultipleTask.py | 82728d91bcf5fc5f0e56f7bb41ea2aa014875b9f | [] | no_license | dipanjanmt/LocustProject_2 | c0ecd8c1faeca6e88012d0b641eec18c6c6295da | 6c9205636ce0a38e305fcfee0e20669ae9234ece | refs/heads/master | 2023-05-13T06:46:39.761001 | 2021-06-08T18:41:02 | 2021-06-08T18:41:02 | 375,044,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | from locust import User, between, task, SequentialTaskSet
class SearchProduct(SequentialTaskSet):
@task
def search_men_products(self):
print("Searching men products")
@task
def search_kids_products(self):
print("Searching kids products")
@task
def exit_task_execution(self):
self.interrupt()
class ViewCart(SequentialTaskSet):
@task
def get_cart_items(self):
print("Get all cart items")
@task
def search_cart_item(self):
print("Searching item from cart")
@task
def exit_task_execution(self):
self.interrupt()
class MyUser(User):
wait_time = between(1, 2)
tasks = {
SearchProduct: 4,
ViewCart: 1
}
# @task
# def exit_task_execution(self):
# self.interrupt() | [
"dipanjankundu@Dipanjant-Kundu.local"
] | dipanjankundu@Dipanjant-Kundu.local |
d994f4b20a182b9c9b4b26dea314bed2f83d5097 | da52951c32b37aa75765b718707ce08c0a6208d1 | /ReinforcementLearning/PolicyGradient/PPO/tf2/main.py | b3a0d38e4986d6a9da18c87322ee6faa32643f1d | [] | no_license | philtabor/Youtube-Code-Repository | 08c1a0210f80976df50b01a91f1936a7d5c7b302 | eb3aa9733158a4f7c4ba1fefaa812b27ffd889b6 | refs/heads/master | 2023-08-08T05:28:11.712470 | 2023-03-27T16:07:29 | 2023-03-27T16:07:29 | 144,081,173 | 811 | 568 | null | 2023-07-24T20:00:37 | 2018-08-09T00:21:29 | Python | UTF-8 | Python | false | false | 1,575 | py | import gym
import numpy as np
from agent import Agent
from utils import plot_learning_curve
if __name__ == '__main__':
env = gym.make('CartPole-v0')
N = 20
batch_size = 5
n_epochs = 4
alpha = 0.0003
agent = Agent(n_actions=env.action_space.n, batch_size=batch_size,
alpha=alpha, n_epochs=n_epochs,
input_dims=env.observation_space.shape)
n_games = 300
figure_file = 'plots/cartpole.png'
best_score = env.reward_range[0]
score_history = []
learn_iters = 0
avg_score = 0
n_steps = 0
for i in range(n_games):
observation = env.reset()
done = False
score = 0
while not done:
action, prob, val = agent.choose_action(observation)
observation_, reward, done, info = env.step(action)
n_steps += 1
score += reward
agent.store_transition(observation, action,
prob, val, reward, done)
if n_steps % N == 0:
agent.learn()
learn_iters += 1
observation = observation_
score_history.append(score)
avg_score = np.mean(score_history[-100:])
if avg_score > best_score:
best_score = avg_score
agent.save_models()
print('episode', i, 'score %.1f' % score, 'avg score %.1f' % avg_score,
'time_steps', n_steps, 'learning_steps', learn_iters)
x = [i+1 for i in range(len(score_history))]
plot_learning_curve(x, score_history, figure_file)
| [
"ptabor@gmail.com"
] | ptabor@gmail.com |
7ae123aa82822cebe97dbca6eea26e9c58d5e5fd | 016ea541673c9cd795b7270e1793e76482478134 | /1Basics/3IfStatements/3-07.py | a4fae230281f4d98b9f88b1f3cc814c75636ddac | [] | no_license | zako16/heinold_exercises | 5e77983fc715aa7dc7367561b2a4dae420db2b35 | 5551161e2d16d740df2e9ee7c2a53b235d627046 | refs/heads/master | 2021-06-10T23:34:33.516166 | 2017-02-06T07:24:57 | 2017-02-06T07:24:57 | 70,688,144 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | """
Write a program that asks the user for two numbers and prints !Close! if the numbers are
within .001 of each other and !Not close! otherwise.
"""
numberOne = eval(input("Enter 1 number:"))
numberTwo = eval(input("Enter 2 number:"))
if abs(numberOne - numberTwo) <= .001:
print("Close")
elif abs(numberOne - numberTwo) > .001:
print("Not close")
| [
"pojiratelnica@gmail.com"
] | pojiratelnica@gmail.com |
fe7ef14c8f1f1bd5d70ed77e431a9ed651889e4c | dbc5e86162d0fc97a2c40f91c181762f4c904cde | /photologue/admin.py | 4bb65371d920b247f783e12a1661857367ce650d | [
"BSD-3-Clause"
] | permissive | iberben/django-photologue | 7739df7c6bb2401a5c89587b803dbfef2e673cb4 | cf546ce05172240068aa7d01b41cecf75c3e03e9 | refs/heads/master | 2021-01-19T05:49:14.504577 | 2010-11-12T14:48:21 | 2010-11-12T14:48:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,595 | py | """ Newforms Admin configuration for Photologue
"""
from django.contrib import admin
from models import *
class SetAdmin(admin.ModelAdmin):
list_display = ('name', 'date_added', 'is_public')
list_filter = ('date_added', 'is_public')
date_hierarchy = 'date_added'
prepopulated_fields = {'slug': ('name',)}
class GalleryAdmin(admin.ModelAdmin):
list_display = ('title', 'date_added', 'photo_count', 'is_public')
list_filter = ['date_added', 'is_public']
date_hierarchy = 'date_added'
prepopulated_fields = {'title_slug': ('title',)}
filter_horizontal = ('photos',)
class PhotoAdmin(admin.ModelAdmin):
list_display = ('title', 'date_taken', 'date_added', 'is_public', 'tags', 'view_count', 'admin_thumbnail')
list_filter = ['date_added', 'is_public']
search_fields = ['title', 'title_slug', 'caption']
list_per_page = 10
prepopulated_fields = {'title_slug': ('title',)}
class PhotoEffectAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'color', 'brightness', 'contrast', 'sharpness', 'filters', 'admin_sample')
fieldsets = (
(None, {
'fields': ('name', 'description')
}),
('Adjustments', {
'fields': ('color', 'brightness', 'contrast', 'sharpness')
}),
('Filters', {
'fields': ('filters',)
}),
('Reflection', {
'fields': ('reflection_size', 'reflection_strength', 'background_color')
}),
('Transpose', {
'fields': ('transpose_method',)
}),
)
class PhotoSizeAdmin(admin.ModelAdmin):
list_display = ('name', 'width', 'height', 'crop', 'pre_cache', 'effect', 'increment_count')
fieldsets = (
(None, {
'fields': ('name', 'width', 'height', 'quality')
}),
('Options', {
'fields': ('upscale', 'crop', 'pre_cache', 'increment_count')
}),
('Enhancements', {
'fields': ('effect', 'watermark',)
}),
)
class WatermarkAdmin(admin.ModelAdmin):
list_display = ('name', 'opacity', 'style')
class GalleryUploadAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
return False # To remove the 'Save and continue editing' button
admin.site.register(Set, SetAdmin)
admin.site.register(Gallery, GalleryAdmin)
admin.site.register(GalleryUpload, GalleryUploadAdmin)
admin.site.register(Photo, PhotoAdmin)
admin.site.register(PhotoEffect, PhotoEffectAdmin)
admin.site.register(PhotoSize, PhotoSizeAdmin)
admin.site.register(Watermark, WatermarkAdmin) | [
"ingo.berben@city-live.be"
] | ingo.berben@city-live.be |
aaf7c07df0a3a79d0aa83017aa4a3142f7911d98 | dec5c1416279178c23e81794789ed27e7e806faf | /profiles_api/models.py | 921345fafbd8fe1b8cb4afa2e7952b8838987617 | [
"MIT"
] | permissive | amitarvindpatil/profiles-rest-api | 44c7555888e654a2a64362d21834f5a67aeab07a | c2092bdc13c77e2f1f3cd4940740f752cc2b180f | refs/heads/master | 2022-09-15T06:53:40.777169 | 2020-05-31T09:01:43 | 2020-05-31T09:01:43 | 260,257,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager For UserProfile"""
def create_user(self,email,name,password=None):
"""Create New User Prfile"""
if not email:
raise ValueError('User Must have an email address')
email = self.normalize_email(email)
user = self.model(email=email,name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,name,password):
"""create and save new superuser with given details"""
user = self.create_user(email,name,password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser,PermissionsMixin):
""" DataBase model for user in a system """
email = models.EmailField(max_length=255,unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
""" Retrive Full Name of User"""
return self.name
def get_short_name(self):
""" Retrive Short Name of user """
return self.name
def __str__(self):
""" Retrive String representation of user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile Status Update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.status_text
| [
"amitpatil04041993@gmail.com"
] | amitpatil04041993@gmail.com |
8740e21ca2700f49a46ae97ee6a65222d9aaedd6 | 5494c7990e07e9ff465dd78c642e939abcb2f3ba | /lab/lab07/lab07.py | c84914c7b7645b8ad0e759e7f724519fbca39ee9 | [] | no_license | minleminzui/ucb_cs61a_21summer | cd816dc1874f308f723914aea2874d9bee72d4e8 | 0269ef7f7b3c6180e4c56862bc6c8e8a60743ed5 | refs/heads/main | 2023-08-15T14:40:10.002607 | 2021-10-06T13:57:12 | 2021-10-06T13:57:12 | 414,231,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | def naturals():
"""A generator function that yields the infinite sequence of natural
numbers, starting at 1.
>>> m = naturals()
>>> type(m)
<class 'generator'>
>>> [next(m) for _ in range(10)]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
i = 1
while True:
yield i
i += 1
def scale(it, multiplier):
"""Yield elements of the iterable it multiplied by a number multiplier.
>>> m = scale([1, 5, 2], 5)
>>> type(m)
<class 'generator'>
>>> list(m)
[5, 25, 10]
>>> m = scale(naturals(), 2)
>>> [next(m) for _ in range(5)]
[2, 4, 6, 8, 10]
"""
for i in it:
yield i * multiplier
# yield from (i * multiplier for i in it)
def hailstone(n):
"""Yields the elements of the hailstone sequence starting at n.
>>> for num in hailstone(10):
... print(num)
...
10
5
16
8
4
2
1
"""
yield n
while n != 1:
if n % 2 == 0:
n //= 2
else:
n = n * 3 + 1
yield n
| [
"2969413251@qq.com"
] | 2969413251@qq.com |
7228fe09f21e47607f8b4d813784fbe298fefdb7 | 1080cd26378ef9070c61360211dc113b428216f6 | /.venv/lib/python3.7/bisect.py | 7ed693cbdb8fa88e018a5e0d72e6a3588aba86ce | [
"Apache-2.0"
] | permissive | ricklon/circleciplatformio | b616ba829a1de179830b7979c9864dff17e9a54d | 3b832e3705e0a0b627adb8afd8a4b687c833d0be | refs/heads/master | 2022-12-09T13:03:46.716842 | 2019-08-17T22:56:33 | 2019-08-17T22:56:33 | 202,931,258 | 0 | 0 | Apache-2.0 | 2022-12-08T06:02:49 | 2019-08-17T21:04:38 | Python | UTF-8 | Python | false | false | 58 | py | /home/gitpod/.pyenv/versions/3.7.2/lib/python3.7/bisect.py | [
"rick.rickanderson@gmail.com"
] | rick.rickanderson@gmail.com |
96ecd307f055f3b68969e8d57c8d8d5d0247f15a | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/i5u.py | ddabd582980b6c53e4d7b8520dbd5e019a933eac | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'i5U':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
9bfef8ea5ee76a8076e4ae4927c5af7803deeabb | dc5675b4e32cac0807ea07f70ff75d53497d903e | /causal/convert_to_npy.py | 95245c92657dc0b18ded4d24ed9332b9228f1c2e | [] | no_license | jeff-da/cracking-the-commonsense-code | 7ea8d17fb56541c19f860d498be22d8641c38d22 | 8a54f01888959245f066da8c7c921d949adce93e | refs/heads/master | 2020-10-01T15:40:32.213604 | 2019-12-12T09:31:10 | 2019-12-12T09:31:10 | 227,568,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | """
Convert the GloVe co-occurrence data to a numpy matrix.
"""
from argparse import ArgumentParser
import struct
import numpy as np
from scipy.sparse import coo_matrix, lil_matrix
from tqdm import trange
p = ArgumentParser()
p.add_argument("--vocab_file", required=True)
p.add_argument("--cooccur_file", required=True)
p.add_argument("--out_file", default="cooccur.npz")
args = p.parse_args()
vocab = []
with open(args.vocab_file, "r") as vocab_f:
for line in vocab_f:
vocab.append(line.strip().split()[0])
print("Read a vocabulary with %i tokens from %s." % (len(vocab), args.vocab_file))
v2i = {v: i for i, v in enumerate(vocab)}
with open(args.cooccur_file, "rb") as cooccur_f:
data = []
i, j = [], []
for word1, word2, val in struct.iter_unpack("iid", cooccur_f.read()):
data.append(val)
i.append(word1 - 1)
j.append(word2 - 1)
# if crec.word1 - 1 == v2i["foot"]:
# print("foot", vocab[crec.word2 - 1])
matrix = coo_matrix((data, (i, j)), shape=(len(vocab), len(vocab)))
def save_coo(filename, coo):
np.savez(filename, row=coo.row, col=coo.col, data=coo.data, shape=coo.shape)
save_coo(args.out_file, matrix)
| [
"jexeld@gmail.com"
] | jexeld@gmail.com |
6277d8e8a2d1b27b8540502c24b2e0c1977f2425 | d4b80f29bbfa914159c9bddd6615a625abdecff7 | /PFChargedHadronAnalyzer/python/CfiFile_cfi.py | 064661e1be2b7b70e8b7ac153c209625c4d57764 | [] | no_license | bkansal/PFCalibration | 36153346f8048642be07c26b3c4398829b161a2e | 12a301266ba0031cb82cdfbbf8797b12d6caef7b | refs/heads/master | 2023-05-11T20:09:47.363140 | 2016-11-29T09:28:40 | 2016-11-29T09:28:40 | 124,348,268 | 0 | 4 | null | 2018-03-08T06:36:58 | 2018-03-08T06:36:58 | null | UTF-8 | Python | false | false | 92 | py | import FWCore.ParameterSet.Config as cms
demo = cms.EDAnalyzer('PFChargedHadronAnalyzer'
)
| [
"shubham.pandey@cern.ch"
] | shubham.pandey@cern.ch |
d5af0b0faa18fdfc639b31b41dfbdb93a890659b | 085a6c4ac532bd4f46980f340890659b0cd03824 | /two_sigma_problems/problem_9.py | f2c2e10d39d7f5185f1a978013c9b743178ba7e5 | [
"MIT"
] | permissive | thinhnguyennt7/Daily-Coding-Problem | c66aa51422dc79ee912fbd042fefb2b2cf37a94f | 16d42e33af1de08aac1d888be518e398b4674bc8 | refs/heads/master | 2021-04-04T02:10:52.800504 | 2020-03-18T17:29:44 | 2020-03-18T17:30:01 | 248,416,248 | 1 | 1 | MIT | 2020-03-19T05:13:37 | 2020-03-19T05:13:36 | null | UTF-8 | Python | false | false | 223 | py | """This problem was asked by Two Sigma.
Using a function rand5() that returns an integer from 1 to 5 (inclusive) with
uniform probability, implement a function rand7() that returns an integer
from 1 to 7 (inclusive).
""" | [
"mxcsyounes@gmail.com"
] | mxcsyounes@gmail.com |
bcf00fd1fcede042853180e550443fed81814591 | 4e86bcea2789d983207561889f902efdae99a4a7 | /3.WebDev/django/mysite/main/views.py | ac68f8e3557ed6128c80c33345220109909987f9 | [] | no_license | bloedlink/python | 6a6204222745730eeb010a11f1fc0588208aaf8d | a3be0ddbf5e65d986e3b039cf2059348225f0a0f | refs/heads/master | 2023-08-03T23:28:11.684917 | 2021-03-12T19:59:36 | 2021-03-12T19:59:36 | 254,076,131 | 1 | 0 | null | 2023-07-23T11:18:27 | 2020-04-08T12:04:20 | HTML | UTF-8 | Python | false | false | 313 | py | from django.shortcuts import render
# Create your views here.
def home(request):
return render(request,'index.html')
def master(request):
return render(request,'master.html')
def carpool(request):
return render(request,'carpool.html')
def test(request):
return render(request,'test.html') | [
"nielsjanssen.1990@gmail.com"
] | nielsjanssen.1990@gmail.com |
adc05494fa63bf6c34d7e85ba4b3674a26b19617 | a5e9150f12fe0de60fe8851e1ed9f7b45ca01a00 | /keras_cifar10.py | dc386e3cff981333990ceaead537f870e2d54d02 | [] | no_license | dain5832/DL4CV | d73b9b8cd1807d01b6d3e56b75ce536c56078cc8 | 2af85f03e9bf87e6f9ff27554f7ec02c95d40f03 | refs/heads/master | 2022-11-14T15:23:58.049282 | 2020-07-06T07:15:51 | 2020-07-06T07:15:51 | 277,467,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,588 | py | # import the necessary packages
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.datasets import cifar10
import matplotlib.pyplot as plt
import numpy as np
import argparse
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", required=True,\
help="path to the output loss/accuracy plot")
args = vars(ap.parse_args())
# load the training and testing data, scale it into the range [0, 1],
# then reshape the design matrix
print("[INFO] loading CIFAR-10 data...")
((trainX, trainY), (testX, testY)) = cifar10.load_data()
trainX = trainX.astype("float32") / 255.0
testX = testX.astype("float32") / 255.0
trainX = trainX.reshape((trainX.shape[0], 32 * 32 * 3))
testX = testX.reshape((testX.shape[0], 32 * 32 * 3))
# convert the labels from integers to vectors
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)
# initialize the label names for the CIFAR-10 dataset
labelNames = ["airplane", "automobile", "bird", "cat", "dear",\
"dog", "frog", "horse", "ship", "truck"]
# define the 3072-1054-512-10 architecture using Keras
model = Sequential()
model.add(Dense(1054, activation="relu", input_shape=(3072,)))
model.add(Dense(512, activation="relu"))
model.add(Dense(10, activation="softmax"))
# train the model using SGD
print("[INFO] training network...")
sgd = SGD(0.01)
model.compile(loss="categorical_crossentropy", optimizer="sgd",\
metrics=["accuracy"])
H = model.fit(trainX, trainY, validation_data=(testX, testY),\
epochs=100, batch_size=32)
# evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1),\
predictions.argmax(axis=1), target_names=labelNames))
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, 100), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, 100), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, 100), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, 100), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig(args["output"]) | [
"noreply@github.com"
] | noreply@github.com |
6a2420036fa088bc0c7d614f108b8106288d4953 | f6a9b4b571d58078fc15e65ec1f50828b64f3b09 | /pulp-client/handlers/user.py | 9e5d40643c0e32bf8f75c05f3f51efff9f196e78 | [] | no_license | AyushGupta-Code/pulp-client | f36451fa320f27da83d63defcb3e497ccfa1565c | dc25ad8d87d7bc542955bdb5a54d4294d3dcc034 | refs/heads/master | 2023-04-09T14:41:56.062154 | 2021-04-18T16:11:59 | 2021-04-18T16:11:59 | 350,825,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,050 | py | from db.user import User
from flask_login import login_required
from flask import Flask, redirect, request, url_for, Blueprint
from flask_login import current_user
import requests
import json
#creates a page
userpage = Blueprint('userpage', __name__, template_folder='templates')
# userid is a variable and "users" is constant
@userpage.route("/dashboard/users/<userid>", methods=['GET'])
def get(userid):
#here the user is verified and we have his address as well
if current_user.is_authenticated and current_user.address :
return ("<center>"
"<p>Hello, {}! You're logged in! Email: {} Address: {}</p>"
"<div><p>Google Profile Picture:</p>"
'<img src="{}" alt="Google profile pic"></img></div>'
'<a class="button" href="/logout">Logout</a>'
'''<p><button onclick="window.location.href='/dashboard/users/{}/orders'">Show Orders</button></p></center>'''.format(
current_user.name, current_user.email, current_user.address, current_user.profile_pic, current_user.id
)
)
#here if we dont have address, redirect to this
elif current_user.is_authenticated :
return (
'''
<center><form action="/dashboard/users/{}" method="post">
<label for="address">Please Add your address to continue:</label><br>
<input type="text" id="address" name="address"><br>
<input type="submit" value="Submit">
<p><a class="button" href="/logout">Logout</a></p>
</form></center> '''.format(current_user.id)
)
#here if we have nither authenticated user or address (cant have address if you dont have authenticated user)
else :
return redirect("/")
# userid is a variable and "users" is constant
@login_required
@userpage.route("/dashboard/users/<userid>", methods=['POST'])
def update(userid):
address = request.form['address']
User.update(userid, address)
return redirect("/dashboard/users/" + current_user.id)
| [
"ayushgupta20011@gmail.com"
] | ayushgupta20011@gmail.com |
40fc3a0276b1fc14e4cbde317b29b1d2aaf23308 | c3cc02f10e8e6e892da1c033d8f9f20efc01213a | /upload-login-registration-django/AuthProject/templates/EPIC/Joining/views.py | b6fd731b2fbebcffcd0697b079d2dc0e2d3764e3 | [] | no_license | benchmarketdev/Python-Django | 43d2b28a8538a8c43aad9b37e23488c325ed9f1b | 20e6ac34c3f010a0ab36fd2d6bf8114a20d158f7 | refs/heads/master | 2020-04-01T04:32:42.320898 | 2018-10-13T18:35:28 | 2018-10-13T18:35:28 | 152,867,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | from django.shortcuts import render, redirect
from models import *
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from datetime import datetime
import datetime, decimal
from Employee.models import *
from Library.emailer import *
from Users.models import *
from django.db.models import Q
# Create your views here.
@login_required
def newjoining(request):
urlNewJoining = True
if request.method =="POST":
# print request.POST
firstName = request.POST.get('fname')
lastName = request.POST.get('lname')
email = request.POST.get('email')
mobile = request.POST.get('mobile')
# empCode = request.POST.get('empcode')
# location = request.POST.get('location')
# department = request.POST.get('department')
# designation = request.POST.get('designation')
# band = request.POST.get('band')
try:
cUser = Users.objects.filter(Q(email=email) | Q(mobile=mobile))
if not cUser:
userObj = Users(email = email, firstName = firstName, lastName = lastName, mobile = mobile, isPassChanged = False )
userObj.save()
empObj = EmployeeMaster.objects.create( userId = userObj, surName = lastName, givenName = firstName, personalMobile = mobile, personalEmail = email, motherName = '', fatherName = '', birthPlace = '', motherTongue = '', otherLanguages = '', stateDomicile = '', religion = '', bloodGroup = '' )
userObj.set_password(mobile)
userObj.save()
link = settings.SOFTWARE_URL
data = {'name': userObj.firstName, 'email': userObj.email, 'url': settings.SOFTWARE_URL, 'link': link}
account_created_emailer(data)
estatus = 100
else:
estatus = 102
except:
traceback.print_exc()
estatus = 104
pass
return render(request, 'Joining/new.html', locals())
| [
"eagle@gmail.com"
] | eagle@gmail.com |
351cca2054fb8641c34017b3bc190680a699b824 | 4b44a299bafbd4ca408ce1c89c9fe4a449632783 | /python3/10_Modules/Parallel_Processing/a_get_cpu_count.py | 0a0464db866ec3a6c8aa2be9e3d728d2be413a38 | [] | no_license | umunusb1/PythonMaterial | ecd33d32b2de664eaaae5192be7c3f6d6bef1d67 | 1e0785c55ccb8f5b9df1978e1773365a29479ce0 | refs/heads/master | 2023-01-23T23:39:35.797800 | 2020-12-02T19:29:00 | 2020-12-02T19:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | import multiprocessing as mp
result = '''There are {} processors, in number, in this \
computer'''.format(mp.cpu_count())
print(result)
print(dir(mp))
print(mp.current_process())
| [
"uday3prakash@gmail.com"
] | uday3prakash@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.