blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ffac357dac34a1696e3b5e068e702f602d08121c | fab7d74f1d89d11d7a0381d88f5c09a801378b47 | /node_modules/bufferutil/build/config.gypi | fa302a130ff195fe3d9e90ac940023749ba2ba1e | [
"MIT"
] | permissive | Fen747/battleProject_as | a5739a8db2225dcc23c604eb83302a67924fa26d | cc0bce269f295b6722db43a454c7f954861b0306 | refs/heads/master | 2016-08-12T23:16:42.385015 | 2016-02-28T17:49:41 | 2016-02-28T17:49:41 | 47,924,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,764 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt56l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt56l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "56",
"node_byteorder": "little",
"node_install_npm": "true",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"python": "/home/iojs/bin/python",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"nodedir": "/root/.node-gyp/5.1.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/3.3.12 node/v5.1.0 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"force": "",
"only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"userconfig": "/root/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"progress": "true",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/home/sylchauf/node",
"browser": "",
"cache_lock_wait": "10000",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/root/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "0022",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "",
"node_version": "5.1.0",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/home/sylchauf/node/etc/npmrc",
"init_module": "/root/.npm-init.js",
"parseable": "",
"globalignorefile": "/home/sylchauf/node/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"contact@sylchauf.net"
] | contact@sylchauf.net |
bd0f26dfcbc2fc9c5bd816cf90b8636627754b36 | 2406f8136970bc9430cff45998b703805b19d633 | /src/manage.py | b9f85b7c7df682ff24ea9aa81aad82ebe29fd41d | [] | no_license | deRerumNatura/django-practice | 8d437361a6aba58d2cbb6ce4d7a73e0bd916928b | 4c9c50c2e14ab5db35f325236832b91dc9d77d2c | refs/heads/master | 2020-03-31T04:46:22.365991 | 2018-10-17T15:25:05 | 2018-10-17T15:25:05 | 151,919,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yourenv.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"pollindrom@gmail.com"
] | pollindrom@gmail.com |
a62e6416e6dfaff7d10856cd1a0734e290a41ac2 | ec806b30be1814403f9336c98bb92b33455a9173 | /manage.py | 9b0ad245aaf68e05c3bc2d34dd15711d1c695298 | [] | no_license | MircaGheorghe/MaklerParser | 2b324884f44747229bc50459606b75fc28c630c4 | 639b93fb120445a1a3e690d97643264afa237c59 | refs/heads/master | 2020-12-23T14:50:23.984839 | 2020-01-24T08:54:10 | 2020-01-24T08:54:10 | 237,182,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'maklerParser.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"giony.mirca@gmail.com"
] | giony.mirca@gmail.com |
6c6cd89c3c9e5b783c78eba63d85736272909e6b | 6d4d58298c6eb4a84c0de58bd5907cc897fc9ee7 | /Tkinter/scale.py | b76f2d8503f39378cf01cf80aa973c405907257f | [] | no_license | quadrant26/python | 41d4789f9846770d60502350e2afe873dbde3c0b | a0b5796beea04a9bb44c58d888dd1aaa4f211087 | refs/heads/master | 2021-01-12T18:24:15.982628 | 2016-10-28T15:07:08 | 2016-10-28T15:07:08 | 69,418,452 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from tkinter import *
master = Tk()
'''
Scale
tickinterval=5 # 刻度表
resolution=5 # 精度 == 步长
length=200 # 长度
'''
s1 = Scale(master, from_=0, to=40, tickinterval=5, length=200)
s1.pack()
s2 = Scale(master, from_=0, to=200, orient=HORIZONTAL, resolution=15, length=600)
s2.pack()
def show():
print(s1.get(), s2.get())
Button(master, text="获取位置", command=show).pack()
master.mainloop() | [
"quadrant26@163.com"
] | quadrant26@163.com |
4b3cfa868a095f3bb7908511aa432d27b6fcef9b | e7c03bfa0571852c070b4380913e7f07c9ea23cb | /Programs/Subarray sort.py | 2e2309bdbc3b4a38009b6c158463065a953e7adf | [] | no_license | shashilsravan/Programming | 3a8946505f278e7cf8574e4508192060696eef81 | 5e422de074a145b04b67d46bf8d9258f9894b742 | refs/heads/main | 2023-07-19T00:41:07.602123 | 2021-08-18T17:21:02 | 2021-08-18T17:21:02 | 371,697,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | # O(N)
# def isOutOfOrder(i, num, array):
# if i == 0:
# return num > array[i+1]
# elif i == len(array) - 1:
# return num < array[i-1]
# return num > array[i+1] or num < array[i-1]
#
#
# def subArraySort(array):
#
# if len(array) == 1:
# return 0
#
# minOutOfOrder = float("inf")
# maxOutOfOrder = float("-inf")
#
# for i, num in enumerate(array):
# if isOutOfOrder(i, num, array):
# minOutOfOrder = min(minOutOfOrder, num)
# maxOutOfOrder = max(maxOutOfOrder, num)
#
# if minOutOfOrder == float("inf"):
# return 0
#
# subarrayLeftIdx = 0
# while minOutOfOrder >= array[subarrayLeftIdx]:
# subarrayLeftIdx += 1
#
# subarrayRightIdx = len(array) - 1
# while maxOutOfOrder <= array[subarrayRightIdx]:
# subarrayRightIdx -= 1
#
# # return subarrayRightIdx - subarrayLeftIdx + 1
# return [subarrayLeftIdx, subarrayRightIdx]
def subArraySort(array):
left, right = 0, len(array)-1
start, end = 0, -1
maximum, minimum = float("-inf"), float("inf")
while right >= 0:
if array[left] >= maximum:
maximum = array[left]
else:
end = left
if array[right] <= minimum:
minimum = array[right]
else:
start = right
left += 1
right -= 1
return end - start + 1
print(subArraySort([2, 6, 4, 8, 10, 9, 15])) | [
"shashilsravan.ss.ss@gmail.com"
] | shashilsravan.ss.ss@gmail.com |
f78583af982448696bb392d8d15a60c5d8e44ee9 | 710922bec0ca285713c6a1cf1f0ed0e9a8896a61 | /orders/forms.py | bb30d7fb71ad8e58718d9fd486f90f382c5934f7 | [] | no_license | jian9ang-git/Majorproject | 171c35f75c7aef6f014478e77275603d64ba3d47 | 406b4ea20c4ed095a5b16434eb133e4f38542549 | refs/heads/master | 2023-04-20T18:29:05.488195 | 2021-05-05T13:54:04 | 2021-05-05T13:54:04 | 361,968,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from django import forms
from .models import Order
class OrderCreateForm(forms.ModelForm):
class Meta:
model = Order
fields = ['country', 'city', 'street', 'house', 'post_index', 'delivery_time', 'comment']
| [
"jian9ang1@ya.ru"
] | jian9ang1@ya.ru |
097564ed9fd327dba9f517082c02f5f2cdfcfe91 | 583551e0b268cb35b4ab8fe46288eea9d9f17aa6 | /main/config/user_config.py | 3e61bc92c69d2574a728503731faa11fc0cda497 | [
"MIT"
] | permissive | TonyLiu-TL/baby-name | 9feb13b47a90b42337df2d11b4413f318a9da736 | 21a655072d16054fd9e5a766b592217168be9679 | refs/heads/master | 2022-02-17T19:08:20.496767 | 2017-06-19T15:08:51 | 2017-06-19T15:08:51 | null | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 1,089 | py | #!/usr/bin/env python
# -*- coding: GB18030 -*-
"""
Name : user_config.py
Created on : 2017/06/18 11:28
Author : Liuker <liu@liuker.xyz>
Version : 1.0.0
Copyright : Copyright (C) 2013 - 2017, Liuker's Blog, https://liuker.org.
Description : 用户配置。
"""
import os
ROOT_PATH = os.path.join(os.path.dirname(__file__), os.pardir)
setting = {}
# 限定字,如果配置了该值,则会取用单字字典,否则取用多字字典
setting["limit_world"] = "嘉"
# 姓
setting["name_prefix"] = "刘"
# 性别,取值为 男 或者 女
setting["sex"] = "男"
# 省份
setting["area_province"] = "北京"
# 城市
setting["area_region"] = "海淀"
# 出生的公历年份
setting['year'] = "2017"
# 出生的公历月份
setting['month'] = "6"
# 出生的公历日子
setting['day'] = "18"
# 出生的公历小时
setting['hour'] = "18"
# 出生的公历分钟
setting['minute'] = "18"
# 结果产出文件名称
setting['output_fname'] = "example.txt"
setting['output_fpath'] = os.path.abspath(os.path.join(ROOT_PATH, "outputs", setting['output_fname']))
| [
"root@liuker.xyz"
] | root@liuker.xyz |
f5116227b8af628572f510cdbe1c3851987dd5f9 | 7c0caf6d501d89d0f68e209056263524314d01da | /100.py | 615f48db9a65e7dbfeb1d69f52d1d1b98fb60610 | [] | no_license | aarongertler/euler | 1c885a0550909f1036b3486170ba67b29ff47f21 | c7d880a1b645e587104fdbb6aaab39f6ddf10f22 | refs/heads/master | 2018-10-05T17:53:33.403932 | 2018-06-21T06:41:01 | 2018-06-21T06:41:01 | 83,462,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,622 | py | # If a box contains twenty-one coloured discs, composed of fifteen blue discs and six red discs,
# and two discs were taken at random, it can be seen that the probability of taking two blue discs,
# P(BB) = (15/21)×(14/20) = 1/2.
# The next such arrangement, for which there is exactly 50% chance of taking two blue discs at random,
# is a box containing eighty-five blue discs and thirty-five red discs.
# By finding the first arrangement to contain over 10^12 = 1,000,000,000,000 discs in total,
# determine the number of blue discs that the box would contain.
# We're looking for pairs of numbers a, b such that a/(a+b) * (a-1)/(a+b-1) = 1/2
# That is, (a^2 - a)/(a^2 +ab -a + ab + b^2 -b) = 1/2
# 2a^2 - 2a = a^2 + b^2 + 2ab - a - b
# a^2 - a = b^2 + 2ab - b
# a^2 - a = b(b + 2a - 1)
# There are certain values of a for which b is a whole number.
# It would be nice if I knew enough number theory to know how to look
# up ways to track those numbers down. Instead, let's look for patterns.
# 15/21 and 85/120 are extremely similar numbers, with values close to .71
# Put another way, b/a is very close to 0.4
# Let's explore, and see whether grabbing a few more examples will help us find some kind of central value these converge to
# limit = 1000000
# for a in range(500000, limit): # Red discs
# for b in range(int(4141*a / 10000), int(4143*a / 10000)): # Blue discs
# if a**2 - a == b*(b + 2*a - 1):
# print("a =", a, "b =", b)
# After running some ranges with smaller numbers,
# I notice that we tend to zero in on a particular value of a/b.
# And the pattern "0.4142..." reminds me of sqrt(2) -- we're actually converging there!
from math import sqrt
limit = 10**7
constant = sqrt(2) - 1
# for a in range(10**5, limit): # Red discs
# for b in range(int(a * constant) - 1, int(a * constant) + 1): # Blue discs
# if a**2 - a == b*(b + 2*a - 1):
# print("a =", a, "b =", b)
# Yep, this one hits all of our pairs when I test it on various ranges. Let's close the case.
# flag = False
# start = int(10**12 * sqrt(2)/2) # a / a+b converges to sqrt(2)/2
# a = start
# while flag == False: # This is too slow, let's try it without the range
# for b in range(int(a * constant) - 1, int(a * constant) + 1):
# if a**2 - a == b*(b + 2*a - 1):
# print("a =", a, "b =", b)
# flag = True
# a += 1
# The above solution was checking too many numbers. Looking for more patterns,
# I see that the ratio of each solution's a to the previous a is the same.
# 85/15 ~= 493/85 ~= 2871/493... ~= 5.8284 (can't find any special relationship of this number to the square root of 2)
# Trouble is, as the numbers get bigger, we need to keep fine-tuning our ratio, lest it lose precision
# However! We're always about 12 away from the next a if we take (present a)^2 / (previous a)
# And if we do the same thing with b, we are always right to the nearest integer (getting closer and closer every time)
b, next_b = 6, 35
flag = False
constant = (2 - sqrt(2)) / 2 # For every two disks, we have sqrt(2) blue and 2 - sqrt(2) red
while flag == False:
next_b, b = int(next_b**2 / b), next_b
if (b / constant) > 10**12:
flag = True
print("b =", b)
# b / (2 - sqrt(2)) = a / sqrt(2)
a = round(((b * sqrt(2)) / (2 - sqrt(2))), 0)
print("a =", a) # Note: Answer is rounded up, not down, but that's not hard to check (takes < 1 second)
# Other solutions:
# You can't speed up this solution much, but another option is to use the Diophantine quadratic method, as Dreamshire did.
# That works with a proven pattern, at least, rather than relying on the random recognition I had to use
| [
"aaronlgertler@gmail.com"
] | aaronlgertler@gmail.com |
148cce93ba4cbf3c3aee72d8ef727c2cd6db0f07 | 68d7796724784cf0fc9f08135b5f8c085456cfe5 | /LoadHeadlinesMediametrics.py | 3dc596f3a9598efd867c02435f68e42a076e2ab2 | [] | no_license | AlexanderButakov/scripts-from-work | db26140473ad619a5f4bd84468325e3712b7d9db | 3c0802b917853b46ec87238937c155af95c446de | refs/heads/master | 2020-12-24T15:41:26.570087 | 2015-09-12T07:08:59 | 2015-09-12T07:08:59 | 42,265,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,441 | py | # -*- coding: utf-8 -*-
import urllib2
import sys
from datetime import date
"""
Скрипт скачивает указанные страницы и их количество
с сайта mediametrics.ru, формирует список новостных заголовков,
собранных с этих страниц.
Запускать скрипт можно с разными параметрами, в зависимости от этого
будут скачиваться заголовки по разным тематикам: бизнес, спорт, IT,
Великобритания, США и пр.
"""
# get date for filenames
today = date.today()
d = today.strftime("%Y_%m_%d")
#sys arguments
option = sys.argv[1]
def download_html():
HTML_NAME = "http://mediametrics.ru/rating/"
# если не заданы параметры при запуске, использовать эту страницу
DEFAULT_NAME = "http://mediametrics.ru/rating/ru/day.html"
pages = 3
titles_join = ""
index_p = 1
global option
# в зависимости от указанного параметра value будет прикрепляться к HTML_NAME
options = {'--day':'ru/day.html','--tech':'hitech/ru/day.html','--sport':'sport/ru/day.html',
'--biz':'business/ru/day.html','--gb':'gb/day.html','--us':'us/month.html'
}
for key, value in options.iteritems():
if option == key:
HTML_NAME = HTML_NAME + value
while pages > 0:
html_temp = ""
titles_raw = ""
load_source = urllib2.urlopen(HTML_NAME + "?page=" + str(index_p))
html_temp = load_source.read()
start_titles = html_temp.find('tsv = "')
end_titles = html_temp.find(r'\n";')
titles_raw = html_temp[start_titles+13:end_titles]
titles_join = titles_join + titles_raw
pages = pages - 1
index_p = index_p + 1
return titles_join
# вместо функции лучше использовать HTMLParser().unescape()
def convert_entityrefs(string):
html_chars = {
'»':'»','«':'«',' ':' ','–':'–',
'"':'"','′':'′','″':'″','‘':'‘',
'’':'’','—':'—','‚':'‚','“':'“',
'”':'”','„':'„','€':'€','£':'£',
' ':' ','…':'...','­':'',"'":"'",
'&':'&'
}
flag = False
char_converted = []
s = 0
lst_s = 0
lst_i = 0
for i in range(len(string)):
if string[i] == '&':
s = i
char_converted.append(string[i])
lst_s = len(char_converted)-1 #индекс в листе для &
else:
char_converted.append(string[i])
if string[i] == ';':
tag = string[s:i+1]
lst_i = len(char_converted)-1 # индекс в листе для ;
if flag == False:
for key, value in html_chars.iteritems():
if tag == key:
tag = value
char_converted[s:i+1] = tag
flag = True
else:
for key, value in html_chars.iteritems():
if tag == key:
tag = value
char_converted[lst_s:lst_i+1] = tag
text_converted = ''.join(char_converted)
return text_converted
# составляется список заголовков
def split_titles(titles):
list_of_titles = []
s = 0
for i in range(len(titles)):
if titles[i] == '\\' and titles[i+1] == 'n':
if len(list_of_titles) > 0:
list_of_titles.append(titles[s:i])
s = i+2
else:
list_of_titles.append(titles[:i])
s = i + 2
elif i == len(titles) - 1:
list_of_titles.append(titles[s:])
return list_of_titles
# строки заголовков чистятся от лишей информации,
# чтобы остался только текст
def get_clean_titles(lst):
titles_grouped = []
for item in lst:
titles_clean = []
tab_count = 0
s = 0
for i in range(len(item)-1):
if item[i] == ' ':
tab_count += 1
if len(titles_clean) > 0:
titles_clean.append([item[s:i]])
s = i + 1
else:
titles_clean.append([item[:i]])
s = i + 1
if tab_count == 3:
titles_grouped.append(titles_clean)
break
return titles_grouped
def main():
global option
titles_join = download_html()
converted_entities = convert_entityrefs(titles_join)
list_of_titles = split_titles(converted_entities)
titles_clean = get_clean_titles(list_of_titles)
prefixes = {'--day':'general_','--tech':'hitech_','--sport':'sport_',
'--biz':'business_','--gb':'britain_','--us':'USA_'
}
for key, value in prefixes.iteritems():
if option == key:
prefix = value
filename = prefix + r"news_titles" +"_" + d + ".txt"
file_w = open(filename,'w')
for title in titles_clean:
file_w.write(''.join(title[1]) + '\n')
if __name__ == '__main__':
main()
| [
"t-conspectus@list.ru"
] | t-conspectus@list.ru |
423caa8880968132a474f1107cfd9fc617ce5cba | 5e2dddce9c67d5b54d203776acd38d425dbd3398 | /spacy/lang/da/examples.py | 4072777ec0c61e1fd6a4be512fed4b332319de09 | [
"MIT"
] | permissive | yuxuan2015/spacy_zh_model | 8164a608b825844e9c58d946dcc8698853075e37 | e89e00497ab3dad0dd034933e25bc2c3f7888737 | refs/heads/master | 2020-05-15T11:07:52.906139 | 2019-08-27T08:28:11 | 2019-08-27T08:28:11 | 182,213,671 | 1 | 0 | null | 2019-04-19T06:27:18 | 2019-04-19T06:27:17 | null | UTF-8 | Python | false | false | 507 | py | # coding: utf8
from __future__ import unicode_literals
"""
Example sentences to test spaCy and its language models.
>>> from spacy.lang.da.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple overvejer at købe et britisk startup for 1 milliard dollar",
"Selvkørende biler flytter forsikringsansvaret over på producenterne",
"San Francisco overvejer at forbyde udbringningsrobotter på fortov",
"London er en stor by i Storbritannien"
]
| [
"yuxuan2015@example.com"
] | yuxuan2015@example.com |
0277f072fe7b759a3ace2500f5ff736d17edf182 | ed8fb89843b31e7943f074bb6a0eb877ddf6a3d1 | /gui/move_syncfolder_dialog.py | 90488eb36808d09865590edebe37f2ea2761deec | [] | no_license | manuelVo/fuzzy-robot | 83105f61f93589976048d0e60f484b1b2ce22d6d | f9651ef066620fddd0ee4932efcb65549b264454 | refs/heads/master | 2021-07-15T21:48:25.850484 | 2021-06-29T12:27:40 | 2021-06-29T12:43:45 | 32,819,674 | 1 | 1 | null | 2015-05-01T16:25:28 | 2015-03-24T19:17:42 | Python | UTF-8 | Python | false | false | 925 | py | from PySide2.QtWidgets import *
from enum import Enum
class MoveSyncfolderDialog(QDialog):
def __init__(self):
super(MoveSyncfolderDialog, self).__init__()
self.move = True
self.setWindowTitle("Chnage sync folder")
self.init_ui()
def init_ui(self):
layout = QVBoxLayout(self)
label = QLabel("Savegames exist in the old sync folder. What do you want to do with them?", self)
layout.addWidget(label)
button_move = QPushButton("Move them to the new sync folder", self)
button_move.clicked.connect(lambda: self.accept(True))
layout.addWidget(button_move)
button_leave = QPushButton("Leave them where they are", self)
button_leave.clicked.connect(lambda: self.accept(False))
layout.addWidget(button_leave)
def accept(self, move):
self.move = move
super(MoveSyncfolderDialog, self).accept()
| [
"develop@manuel-voegele.de"
] | develop@manuel-voegele.de |
dc6b0ca2de15895b5c7c35dbdacc769b6f7738e5 | df6ff5c2e0a782a6a1edad73a320040c05282d10 | /test.py | fbe0407ea3bbbe090be76f31ee11c81e2a2c3462 | [] | no_license | adrift00/Siamtracker | eb2ec8117fa58c94360e74539c0417eb34ac6c5d | 637d32827d7a74ec9b7b9fae00d5675763aaa987 | refs/heads/master | 2022-12-07T19:23:07.356089 | 2020-08-25T13:47:19 | 2020-08-25T13:47:19 | 227,269,435 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,266 | py | import os
import argparse
import cv2
import logging
import torch
from pruning_model import prune_model
from toolkit.datasets import get_dataset
from utils.model_load import load_pretrain
from models import get_model
from configs.config import cfg
from trackers import get_tracker
from utils.visual import show_double_bbox
from toolkit.utils.region import vot_overlap
from utils.log_helper import init_log
parser = argparse.ArgumentParser(description='test tracker')
parser.add_argument('--tracker', default='', type=str, help='which tracker to use')
parser.add_argument('--dataset', default='', type=str, help='which dataset to test')
parser.add_argument('--cfg', default='', type=str, help='cfg file to use')
parser.add_argument('--snapshot', default='', type=str, help='base snapshot for track')
parser.add_argument('--video', default='', type=str, help='choose one special video to test')
parser.add_argument('--vis', action='store_true', help='whether to visual')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
torch.set_num_threads(1) # use only one threads to test the real speed
def vot_evaluate(dataset, tracker):
tracker_name = args.tracker
backbone_name = args.cfg.split('/')[-1].split('_')[0]
snapshot_name = args.snapshot.split('/')[-1].split('.')[0]
total_lost = 0
for v_idx, video in enumerate(dataset):
if args.video != '': # if test special video
if video.name != args.video:
continue
frame_count = 0
lost_number = 0
pred_bboxes = []
toc = 0
for idx, (frame, gt_bbox) in enumerate(video):
tic = cv2.getTickCount()
if idx == frame_count:
tracker.init(frame, gt_bbox) # cx,cy,w,h
pred_bboxes.append(1)
elif idx > frame_count:
track_result = tracker.track(frame)
bbox = track_result['bbox'] # cx,cy,w,h
score = track_result['score']
bbox_ = [bbox[0] - bbox[2] / 2, bbox[1] - bbox[3] / 2, bbox[2], bbox[3]] # x,y,w,h
gt_bbox_ = [gt_bbox[0] - (gt_bbox[2] - 1) / 2,
gt_bbox[1] - (gt_bbox[3] - 1) / 2,
gt_bbox[2],
gt_bbox[3]]
overlap = vot_overlap(bbox_, gt_bbox_, (frame.shape[1], frame.shape[0]))
# print('idx: {}\n pred: {}\n gt: {}\n overlap: {}\n'.format(idx, bbox_, gt_bbox_, overlap))
if overlap > 0:
pred_bboxes.append(bbox_)
else:
# print('lost idx: {}'.format(idx))
pred_bboxes.append(2)
frame_count = idx + 5
lost_number += 1
else:
pred_bboxes.append(0)
toc += cv2.getTickCount() - tic
if args.vis and idx > frame_count:
show_double_bbox(frame, bbox, score, gt_bbox, idx, lost_number)
toc /= cv2.getTickFrequency()
result_dir = os.path.join(cfg.TRACK.RESULT_DIR, args.dataset, tracker_name, backbone_name, snapshot_name)
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
result_path = '{}/{}.txt'.format(result_dir, video.name)
with open(result_path, 'w') as f:
for x in pred_bboxes:
if isinstance(x, int):
f.write('{:d}\n'.format(x))
else:
f.write(','.join(['{:.4f}'.format(i) for i in x]) + '\n')
# log
total_lost += lost_number
print('[{:d}/{:d}] | video: {:12s} | time: {:4.1f}s | speed: {:3.1f}fps | lost_number: {:d} ' \
.format(v_idx + 1, len(dataset), video.name, toc, idx / toc, lost_number))
print('total_lost: {}'.format(total_lost))
def ope_evaluate(dataset, tracker):
tracker_name = args.tracker
backbone_name = args.cfg.split('/')[-1].split('_')[0]
snapshot_name = args.snapshot.split('/')[-1].split('.')[0]
for v_idx, video in enumerate(dataset):
if args.video != '': # if test special video
if video.name != args.video:
continue
pred_bboxes = []
runtime = []
toc = 0
for idx, (frame, gt_bbox) in enumerate(video):
tic = cv2.getTickCount()
if idx == 0:
tracker.init(frame, gt_bbox) # cx,cy,w,h
track_result = tracker.track(frame)
bbox = track_result['bbox'] # cx,cy,w,h
score = track_result['score']
bbox_ = [bbox[0] - bbox[2] / 2, bbox[1] - bbox[3] / 2, bbox[2], bbox[3]] # x,y,w,h
gt_bbox_ = [gt_bbox[0] - gt_bbox[2] / 2, gt_bbox[1] - gt_bbox[3] / 2, gt_bbox[2], gt_bbox[3]]
pred_bboxes.append(bbox_)
else:
track_result = tracker.track(frame)
bbox = track_result['bbox'] # cx,cy,w,h
score = track_result['score']
bbox_ = [bbox[0] - bbox[2] / 2, bbox[1] - bbox[3] / 2, bbox[2], bbox[3]] # x,y,w,h
gt_bbox_ = [gt_bbox[0] - gt_bbox[2] / 2, gt_bbox[1] - gt_bbox[3] / 2, gt_bbox[2], gt_bbox[3]]
pred_bboxes.append(bbox_)
toc += cv2.getTickCount() - tic
runtime.append((cv2.getTickCount() - tic) / cv2.getTickFrequency())
if args.vis and idx > 0:
show_double_bbox(frame, bbox, score, gt_bbox, idx, 0)
toc /= cv2.getTickFrequency()
result_dir = os.path.join(cfg.TRACK.RESULT_DIR, args.dataset, tracker_name, backbone_name, snapshot_name,
video.name)
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
result_path = '{}/{}_001.txt'.format(result_dir, video.name)
runtime_path = '{}/{}_time.txt'.format(result_dir, video.name)
# write result
with open(result_path, 'w') as f:
for x in pred_bboxes:
if isinstance(x, int):
f.write('{:d}\n'.format(x))
else:
f.write(','.join(['{:.4f}'.format(i) for i in x]) + '\n')
# write runtime
with open(runtime_path, 'w') as f:
for time in runtime:
f.write('{:.6f}\n'.format(time))
# log
print('[{:d}/{:d}] video: {}, time: {:.1f}s, speed: {:.1f}fps'.format(v_idx + 1,
len(dataset),
video.name,
toc, idx / toc))
def seed_torch(seed=0):
import random
import numpy as np
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def main():
seed_torch(123456)
cfg.merge_from_file(args.cfg)
init_log('global', logging.INFO)
base_model = get_model(cfg.MODEL_ARC)
base_model = load_pretrain(base_model, args.snapshot).cuda().eval()
# # if want test model pruned
# base_model = prune_model(base_model).cuda().eval() # refine the model
# if want to test real pruning
# base_model = get_model(cfg.MODEL_ARC)
# base_model = load_pretrain(base_model, cfg.PRUNING.FINETUNE.PRETRAIN_PATH) # load the mask
# base_model = prune_model(base_model) # refine the model
# base_model=load_pretrain(base_model,args.snapshot).cuda().eval() # load the finetune weight
tracker = get_tracker(args.tracker, base_model)
data_dir = os.path.join(cfg.TRACK.DATA_DIR, args.dataset)
dataset = get_dataset(args.dataset, data_dir)
if args.dataset in ['VOT2016', 'VOT2018']:
vot_evaluate(dataset, tracker)
elif args.dataset == 'GOT-10k':
ope_evaluate(dataset, tracker)
if __name__ == '__main__':
main()
| [
"1219660880@qq.com"
] | 1219660880@qq.com |
e97d8a632c0887ab2a14515b303ea9c83fa90c3e | 50874fc1e32260a5cc48c11c23e077ff4171ef34 | /test1.py | f5326091f5ac9db7061cf51f9a7d24a9c3345afd | [] | no_license | huhao45/gs_lstm | ca47e3b1e6527507fdb74963ff58dbcb4823e500 | 7a653f9702499d6f3f8aae0a7a335af4df005975 | refs/heads/main | 2023-06-16T01:45:15.114334 | 2021-07-09T02:21:51 | 2021-07-09T02:21:51 | 384,297,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 9 15:23:16 2020
@author: 59654
"""
# from flask import Flask
# # 实例化,可视为固定格式
# app = Flask(__name__)
# # route()方法用于设定路由;类似spring路由配置
# @app.route('/helloworld')
# def hello_world():
# return 'Hello, World!!!!'
# if __name__ == '__main__':
# # app.run(host, port, debug, options)
# # 默认值:host="127.0.0.1", port=5000, debug=False
# app.run(host="0.0.0.0", port=5000)
# from flask import Flask
# app = Flask(__name__)
# @app.route('/')
# def hello_world():
# return 'Hello, World!'
# app.run()
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(5000)
tornado.ioloop.IOLoop.current().start() | [
"noreply@github.com"
] | huhao45.noreply@github.com |
3d38aed1e39f09568c500227e5f17a5affc96519 | 8b839132f4f4efef9dd246312e95b65f64230f63 | /Unidad3/eslh_videos/iterator.py | 0f1b925d6bed6f390e596e55692e44a281e0a7ce | [] | no_license | EfrenLanderos/Unidad3 | 05e03594c998b7ad19bede35b3d7487282b2c13a | 7ab08834e82b9efb7401b832c395dedd5f9fc2c2 | refs/heads/master | 2021-05-06T17:35:22.586636 | 2017-11-24T08:01:23 | 2017-11-24T08:01:23 | 111,891,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | """Efrén Santiago Landeros Hernández"""
def count_to (count):
"""Our iterator implementation"""
# Our list
numbers_in_german = ["eins", "zwei", "drei", "vier", "funf"]
# Our built-in iterator
# Creatres a tuple such as (1, "eins")
iterator = zip(range (count), numbers_in_german)
# Iterate though our iterable list
# Extract the German numbers
# Put them in a generator called number
for position, number in iterator:
# Returns a 'generator' containing numbers in German
yield number
# Let´s test the generator returned by our iterator
for num in count_to(3):
print("{}".format(num))
for num in count_to(4):
print("{}".format(num)) | [
"noreply@github.com"
] | EfrenLanderos.noreply@github.com |
0d9a11675df1403976c9cc36cc9cb85bf8bfbbad | 1865e1966c490497798115a4d4ec3a7177dbef98 | /Data_structures/binary_trees/binaryTree.py | ff6be891f60b6a155ca17beae177585aca56f9f2 | [] | no_license | gauravpore/Data-structures-algorithms | fa9fa56c366c103af8f5978d111088f52b396cc3 | 70a2180fa009dd91f9fe80d8d0a45f376f76729c | refs/heads/master | 2023-05-27T12:29:11.993345 | 2021-06-17T06:41:37 | 2021-06-17T06:41:37 | 326,462,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | class Node:
def __init__(self,data):
self.data = data
self.right = None
self.left = None
def insert(self,data):
if self.data:
if data<self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
if data>self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
def findval(self,val):
if val<self.data:
if self.left is None:
return "Not Found in left"
return self.left.findval(val)
elif val>self.data:
if self.right is None:
return "Not Found in right"
return self.right.findval(val)
else:
return ("Found")
def printTree(self):
if self.left:
self.left.printTree()
print (self.data,end=" "),
if self.right:
self.right.printTree()
def printInorder(root):
if root:
printInorder(root.left)
print (root.data)
printInorder(root.right)
root = Node(1)
root.insert(2)
root.insert(3)
root.insert(4)
root.insert(6)
print(root.findval(5))
| [
"67472558+gauravpore@users.noreply.github.com"
] | 67472558+gauravpore@users.noreply.github.com |
daa4907ec87d49590659e7b0bcf5576c027f6b47 | 26e60aaccb4483babe89157cbe215414bc7df18c | /src/services/security_service.py | 905e27d565b9242f6f82eb1cd64a42b2e8ec1aa0 | [
"MIT"
] | permissive | Serious-and-Pro-Gamers/SPG-Backend | 5f241b993ac8ab2bbecc7a24007ad198c0da7f81 | 567a068619867ce5579b75f8d39a2c36fdb8a737 | refs/heads/master | 2022-12-25T08:36:45.291271 | 2019-11-10T23:20:05 | 2019-11-10T23:20:05 | 215,707,624 | 0 | 0 | MIT | 2022-12-08T06:47:36 | 2019-10-17T05:08:00 | Python | UTF-8 | Python | false | false | 1,536 | py | import hashlib
import requests
# Use pwned password api to get the number of online breaches for the password
def get_password_breaches(password):
# Api endpoint for pwned password api
pwned_uri = "https://api.pwnedpasswords.com/range/"
# Encrypt password using sha-1 (Used by the pwned password api)
sha1_pw = hashlib.sha1(password.encode()).hexdigest()
# Process the sha-1 hash into prefix / suffix
# Prefix: first 5 chars
# Suffix: everything after first 5 chars
sha1_pw_prefix = sha1_pw[:5].upper()
sha1_pw_suffix = sha1_pw[5:].upper()
# Send a get request to the pwned password api with the sha-1 prefix
pwned_response = requests.get(pwned_uri + sha1_pw_prefix).text
# Split the response into multiple lines containing suffix hashes
pwned_hashes = pwned_response.splitlines()
# Find the suffix hash that corresponds to the user's password suffix
hash_list = [pwned_hash[pwned_hash.find(":") + 1:] for pwned_hash in pwned_hashes if sha1_pw_suffix in pwned_hash]
# get first elemnt of the hash list (should be one since there should only be one record of the hash suffix in the hash list)
breaches = hash_list[0] if len(hash_list) > 0 else 0
# Return number of password breaches as an integer
return int(breaches)
def process_password(password):
# Get number of online breaches using pwned api
password_breaches = get_password_breaches(password)
# TODO: Finish encrypting password and send to the database
return password_breaches
| [
"lsaplan97@gmail.com"
] | lsaplan97@gmail.com |
73e464745176c4eaa3b172eea37f4f6adda896e3 | fec54c7785cd486256706bfe4b8b365399390742 | /13_expo_2018/wnd_composite_estado2_NovFeb_HMM3.py | 89e9293a1ec5944cb872e1340623a7bbbfa63320 | [] | no_license | yordanarango/MASTER_THESIS | 71f08bcf304f201ed3fcf54e93ca4757c35b46ca | 2310d32965a877174b9662bbd98ce4040be6f4f5 | refs/heads/master | 2021-04-15T18:28:07.140558 | 2018-08-05T16:06:47 | 2018-08-05T16:06:47 | 126,349,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,184 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 18 13:34:25 2018
@author: yordan
"""
from hmmlearn.hmm import GaussianHMM
import numpy as np
import netCDF4 as nc
from netcdftime import utime
from mpl_toolkits.basemap import Basemap
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import pickle
"FUNCIONES"
def ciclo_diurno_anual(matriz, fechas, len_lat, len_lon):
#matriz : matriz de numpy de 3 dimensiones donde cada capa corresponde a una fecha en el vector de pandas "fechas"
#fechas : objeto de pandas con las fechas que corresponden a cada una de las capas en matríz
#len_lat: integer cantidad de pixeles en direccion meridional
#len_lon: integer cantidad de pixeles en direccion zonal
#return: devuelve diccionario con ciclo diuno para cada mes
Dict_ciclo = {}
for i, mes in enumerate(['Ene', 'Feb', 'Mar', 'Abr', 'May', 'Jun', 'Jul', 'Ago', 'Sep', 'Oct', 'Nov', 'Dic']):
for j, hora in enumerate(['0', '6', '12', '18']):
pos = np.where((fechas.month == i+1 ) & (fechas.hour == int(hora)))[0]
M = np.zeros((len(pos), len_lat, len_lon))
for k, l in enumerate(pos):
M[k] = matriz[l]
media = np.mean(M, axis=0)
Dict_ciclo.update({mes+'_'+hora:media})
return Dict_ciclo
def plotear(lllat, urlat, lllon, urlon, dist_lat, dist_lon, Lon, Lat, mapa, bar_min, bar_max, unds, titulo, path, C_T='k', wind=False, mapa_u=None, mapa_v=None):
# lllat (low-left-lat) : float con la latitud de la esquina inferior izquierda
# urlat (uper-right-lat) : float con la latitud de la esquina superior derecha
# lllon (low-left-lon) : float con la longitud de la esquina inferior izquierda en coordenas negativas este
# urlon (uper-right-lon) : float con la longitud de la esquina superior derecha en coordenas negativas este
# dist_lat : entero que indica cada cuanto dibujar las lineas de los paralelos
# dist_lon : entero que indica cada cuanto dibujar las lineas de los meridianos
# Lon : array de numpy con las longitudes del mapa a plotearse en coordenadas negativas este
# Lat : array de numpy con las longitudes del mapa a plotearse
# mapa : array de numpy 2D a plotearse con contourf
# bar_min : mínimo valor del mapa a plotearse
# bar_max : máximo valor del mapa a plotearse
# unds : string de las unidades de la variable que se va a plotear
# titulo : string del titulo que llevará el mapa
# path : 'string de la dirección y el nombre del archivo que contendrá la figura a generarse'
# wind : boolean que diga si se quiere pintar flechas de viento (corrientes), donde True es que sí se va a hacer
# mapa_u : array de numpay 2D con la componente en x de la velocidad y que será utilizado para pintar las flechas. Este tomara algun valor siempre y cuando wind=True
# mapa_v : array de numpay 2D con la componente en y de la velocidad y que será utilizado para pintar las flechas. Este tomara algun valor siempre y cuando wind=True
# return : gráfica o mapa
fig = plt.figure(figsize=(8,8), edgecolor='W',facecolor='W')
ax = fig.add_axes([0.1,0.1,0.8,0.8])
map = Basemap(projection='merc', llcrnrlat=lllat, urcrnrlat=urlat, llcrnrlon=lllon, urcrnrlon=urlon, resolution='i')
map.drawcoastlines(linewidth = 0.8)
map.drawcountries(linewidth = 0.8)
map.drawparallels(np.arange(lllat, urlat, dist_lat), labels=[1,0,0,1])
map.drawmeridians(np.arange(lllon, urlon, dist_lon), labels=[1,0,0,1])
lons,lats = np.meshgrid(Lon,Lat)
x,y = map(lons,lats)
bounds = np.linspace(bar_min, bar_max, 20)
bounds = np.around(bounds, decimals=2)
if wind == False:
CF1 = map.contourf(x,y,mapa, 20, norm=MidpointNormalize(midpoint=0), cmap= plt.cm.viridis, levels=bounds, extend='max')#plt.cm.rainbow , plt.cm.RdYlBu_r
CF2 = map.contourf(x,y,mapa, 20, norm=MidpointNormalize(midpoint=0), cmap= plt.cm.viridis, levels=bounds, extend='min')#plt.cm.rainbow, plt.cm.RdYlBu_r
else:
CF1 = map.contourf(x,y,mapa, 20, cmap= plt.cm.rainbow, levels=bounds, extend='max')#plt.cm.rainbow , plt.cm.RdYlBu_r
CF2 = map.contourf(x,y,mapa, 20, cmap= plt.cm.rainbow, levels=bounds, extend='min')#plt.cm.rainbow, plt.cm.RdYlBu_r
cb1 = plt.colorbar(CF1, orientation='horizontal', pad=0.05, shrink=0.8, boundaries=bounds)
cb1.set_label(unds)
ax.set_title(titulo, size='15', color = C_T)
if wind == True:
Q = map.quiver(x[::2,::2], y[::2,::2], mapa_u[::2,::2], mapa_v[::2,::2], scale=15)
plt.quiverkey(Q, 0.93, 0.05, 2, '2 m/s' )
#map.fillcontinents(color='white')
plt.savefig(path+'.png', bbox_inches='tight', dpi=300)
plt.close('all')
"############################################# CICLO DIURNO DE CICLO ANUAL ################################################"
"Se leen datos de viento a resolución de 0.25 grados"
# archivo = nc.Dataset('/home/yordan/YORDAN/UNAL/TRABAJO_DE_GRADO/DATOS_Y_CODIGOS/DATOS/UyV_1979_2016_res025.nc')
# lat = archivo.variables['latitude'][:]; lon = archivo.variables['longitude'][:]-365
"Fechas"
# time = archivo['time'][:]
# cdftime = utime('hours since 1900-01-01 00:00:0.0', calendar='gregorian')
# fechas = [cdftime.num2date(x) for x in time]
# DATES = pd.DatetimeIndex(fechas)[:]
"Viento"
# v = archivo['v10'][:]
# u = archivo['u10'][:]
# wnd = np.sqrt(v*v+u*u)
"Se calcula ciclo anual de ciclo diurno"
# CICLO_WIND = ciclo_diurno_anual(wnd, DATES, len(lat), len(lon))
"Si no se tiene buen computador, léase dictionario con los ciclos que ya se han calculado anteriormente"
a = open('/home/yordan/YORDAN/UNAL/TESIS_MAESTRIA/13_expo_2018/ciclo_diurno_anual_wind_025_6h.bin', 'rb')
b = open('/home/yordan/YORDAN/UNAL/TESIS_MAESTRIA/13_expo_2018/ciclo_diurno_anual_U_025_6h.bin', 'rb')
c = open('/home/yordan/YORDAN/UNAL/TESIS_MAESTRIA/13_expo_2018/ciclo_diurno_anual_V_025_6h.bin', 'rb')
CICLO_WIND = pickle.load(a)
CICLO_U = pickle.load(b)
CICLO_V = pickle.load(c)
"################################################## COMPUESTOS ##########################################################"
"Se lee matriz de estados"
state_matrix_3st = pickle.load(open('/home/yordan/YORDAN/UNAL/TESIS_MAESTRIA/13_expo_2018/StimateEstados_HHM3st_NovFeb.bin','rb'))
"Se leen datos de viento a resolución de 0.25 grados"
archivo = nc.Dataset('/home/yordan/YORDAN/UNAL/TRABAJO_DE_GRADO/DATOS_Y_CODIGOS/DATOS/UyV_1979_2016_res025.nc')
"Fechas"
time = archivo['time'][:]
cdftime = utime('hours since 1900-01-01 00:00:0.0', calendar='gregorian')
fechas = [cdftime.num2date(x) for x in time]
DATES = pd.DatetimeIndex(fechas)[3::4]
DATES = DATES[:-61] # Para quedar con las mismas fechas del archivo U y V, 6 horas, 10 m, 1979-2016.nc, con el que se hizo el HMM
"Se extraen fechas en estado 2, para un modelo con tres estados"
normal_years = np.array([x for x in set(DATES[~DATES.is_leap_year].year)]) #Años normales. NO bisiestos
DT_st2 = []
year_cualquiera = pd.date_range('2001-11-01', '2002-02-28', freq='D') #año cualquiera (bisiesto) para poder extraer mes y día
for i, d in enumerate(normal_years[1:]-1):
for j in range(state_matrix_3st.shape[1]):
if state_matrix_3st[i, j] == 2:
MONTH = year_cualquiera[j].month
if MONTH <= 2:
DT_st2.append(str(d+1)+'-'+str(year_cualquiera[j].month)+'-'+str(year_cualquiera[j].day)+' -18:00:00')
if MONTH >= 11:
DT_st2.append(str(d)+'-'+str(year_cualquiera[j].month)+'-'+str(year_cualquiera[j].day)+' -18:00:00')
DT = pd.DatetimeIndex(DT_st2) #Vuelvo fechas de pandas
"Se seleccionan fechas entre Diciembre 1 a Febrero 28, en estado 3"
dt_DicFeb = [] #fechas de Diciembre 1 a Febrero 28
for d in normal_years[1:]-1:
pos_DF = np.where((str(d)+'-12-01' <= DT) & (DT <= str(d+1)+'-02-28'))[0] # posiciones de fechas de Noviembre 1 a Noviembre 30 en estado tres
dt_DicFeb.append(DT[pos_DF])
Dt_DicFeb = pd.DatetimeIndex(np.concatenate(dt_DicFeb))
"Se hacen compuestos para Diciembre-Marzo"
lat = archivo.variables['latitude'][:] # va desde 7°N hasta 25°N
lon = archivo.variables['longitude'][:]-365 # va desde -64.5°W hasta -101°W
time = archivo['time'][:]
cdftime = utime('hours since 1900-01-01 00:00:0.0', calendar='gregorian')
fechas = [cdftime.num2date(x) for x in time]
Dates = pd.DatetimeIndex(fechas)[:]
CompU_DicFeb = np.zeros((len(Dt_DicFeb), len(lat), len(lon)))
CompV_DicFeb = np.zeros((len(Dt_DicFeb), len(lat), len(lon)))
MESES = ['Ene', 'Feb', 'Mar', 'Abr', 'May', 'Jun', 'Jul', 'Ago', 'Sep', 'Oct', 'Nov', 'Dic']
for i, d in enumerate(Dt_DicFeb):
pos_date = np.where(Dates == d)[0][0]
u = archivo.variables['u10'][pos_date]
v = archivo.variables['v10'][pos_date]
mes = d.month
cc_U = CICLO_U[MESES[mes-1]+'_18'] # Porque los estados se hicieron para la hora de las 18 horas que es cuando se da la mayor velocidad en el día, y fue con lo que se hicieron los HMM
cc_V = CICLO_V[MESES[mes-1]+'_18'] # Porque los estados se hicieron para la hora de las 18 horas que es cuando se da la mayor velocidad en el día, y fue con lo que se hicieron los HMM
CompU_DicFeb[i] = u - cc_U
CompV_DicFeb[i] = v - cc_V
"Se calcula la velocidad de las anomalías del viento para los estados 1 entre Ene-Abr, Nov-Dic y Nov-Abr. Se plotea"
COMPU_st1_DicFeb = np.mean(CompU_DicFeb, axis = 0); COMPV_st1_DicFeb = np.mean(CompV_DicFeb, axis = 0); COMPWnd_st1_DicFeb = np.sqrt(COMPU_st1_DicFeb*COMPU_st1_DicFeb + COMPV_st1_DicFeb*COMPV_st1_DicFeb); min_DF = np.min(COMPWnd_st1_DicFeb); max_DF = np.max(COMPWnd_st1_DicFeb); Ttl_DF = 'Dicember-February Wind Composites - State 2 (HMM 3)'; path_DF = '/home/yordan/YORDAN/UNAL/TESIS_MAESTRIA/13_expo_2018/CompWind_DF_2st_HMM3'
MIN = np.min([0])
MAX = np.max([0.48])
plotear(lat[-1], lat[0], lon[0], lon[-1], 4, 7, lon[::2], lat[::2], COMPWnd_st1_DicFeb[::2, ::2], MIN, MAX, 'm/s', Ttl_DF, path_DF, C_T='k', wind=True, mapa_u=COMPU_st1_DicFeb[::2, ::2], mapa_v=COMPV_st1_DicFeb[::2, ::2])
| [
"yuarangoj@unal.edu.co"
] | yuarangoj@unal.edu.co |
5501bb7a3a3861187b57d877ffb4df27e04815fa | 9d0d01fcae352e9a7d48d7a8035be775118a556e | /sample/increase_user_profile_category.py | f90f79cb43079b6302a78aae6c0d6fd1114ab931 | [] | no_license | BlueLens/stylelens-user | b278faef0fd32b36355f190e4cd13b95b6e7e57c | aa3698d35c237dd022fb16824945636b0b3660e7 | refs/heads/master | 2021-09-04T20:47:27.881925 | 2018-01-22T09:28:14 | 2018-01-22T09:28:14 | 117,768,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from __future__ import print_function
from stylelens_user.users import Users
from pprint import pprint
api_instance = Users()
device_id = 'xxxx'
try:
api_response = api_instance.increase_user_profile_category(device_id, 'blouse')
pprint(api_response)
except Exception as e:
print("Exception when calling increase_user_profile_category: %s\n" % e)
| [
"master@bluehack.net"
] | master@bluehack.net |
3823d7cb2ea5d0f991c5d3bad05a63649b06b08e | 8db334107ab95ef3872a4817e455cfcb13541424 | /src/train_single.py | 456c7812c9d2a08ec2f62774b0cddeed99c6efce | [] | no_license | ykwon0407/isles2017_deprecated | 98a2248fca22dfd9f8bb1f9fa35ee252b0ba5a3d | b5ebb4ae0746f27746072b80682e8b2a7258ae54 | refs/heads/master | 2020-04-25T01:42:14.685753 | 2019-02-25T02:36:40 | 2019-02-25T02:36:40 | 172,416,347 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,043 | py | from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import confusion_matrix
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping
import numpy as np
import pandas as pd
np.random.seed(1004)
from glob import glob
import gc, os, click, sys, time, logging, shutil
import settings, models
from utils import *
from data import *
N_ITERS=settings.N_ITERS
N_EPOCHS=settings.N_EPOCHS
N_EPOCHS_FINE=settings.N_EPOCHS_FINE
PATIENCE=settings.PATIENCE
PATIENCE_FINE=settings.PATIENCE_FINE
TIME_POINT=settings.TIME_POINT
ROW_STRIDE=settings.ROW_STRIDE
CHA_STRIDE=settings.CHA_STRIDE
N_REPEAT=settings.N_REPEAT
FIXED_WIDTH=settings.FIXED_WIDTH
FIXED_DEPTH=settings.FIXED_DEPTH
@click.command()
@click.option('--cnf', default='c_single_model', show_default=True,
help="Model configuration files")
def main(cnf):
start = time.time()
# Load configuration
CONFIG_DICT=load_module('configs/{}.py'.format(cnf))
globals().update(CONFIG_DICT)
# Set logging
if os.path.exists('loggings/{}.log'.format(name)) is True:
os.remove('loggings/{}.log'.format(name))
logging.basicConfig(filename='loggings/{}.log'.format(name), \
level=logging.INFO, stream=sys.stdout)
stderrLogger=logging.StreamHandler()
stderrLogger.setFormatter(\
logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s'))
logging.getLogger().addHandler(stderrLogger)
logging.info(CONFIG_DICT)
# Cross-validation settings
file_path=data_dir+'/*/*/*OT*nii'
N_sample=len(glob(file_path))
if os.path.exists('weights/{}'.format(name)) is True:
shutil.rmtree('weights/{}'.format(name), ignore_errors=True)
os.mkdir('weights/{}'.format(name))
result=np.zeros((N_sample, N_REPEAT))
for seed in xrange(N_REPEAT):
logging.info('-'*50)
logging.info("Seed {}".format(seed+1))
logging.info('-'*50)
count_folds=0
kf=KFold(n_splits=5, shuffle=True, random_state=1004+seed)
for tr_list, te_list in kf.split(np.arange(N_sample)):
count_folds += 1
logging.info('-'*50)
logging.info("Train {}-Fold".format(count_folds))
logging.info('-'*50)
logging.info("Load Train")
X_train, y_train, lesion_indicator_train=extract_patches_from_mri(tr_list, data_dir, \
is_test=False, is_oversampling=True, row_size=row_size, \
channel_size=channel_size, num_patch=num_patch, \
proportion=proportion, \
fixed_width=FIXED_WIDTH, fixed_depth=FIXED_DEPTH)
X_train, y_train, mean, std=preprocess(X_train, y_train, None, None)
logging.info("Load Validation")
X_val, y_val, lesion_indicator_val=extract_patches_from_mri(te_list, data_dir, \
is_test=False, is_oversampling=False, row_size=row_size, \
channel_size=channel_size, num_patch=num_patch, \
proportion=proportion, \
fixed_width=FIXED_WIDTH, fixed_depth=FIXED_DEPTH)
X_val, y_val = preprocess(X_val, y_val, mean, std)
logging.info("Load Model")
model_class=find_class_by_name(model_name, models)()
model=model_class.create_model(channel_size=channel_size, row_size=row_size, \
n_filter=n_filter, filter_size=filter_size, lr=lr, TIME_POINT=TIME_POINT)
logging.info('-'*50)
logging.info('Fitting : compile.....')
logging.info('-'*50)
# Callbacks
SCHEDULER=lambda epoch:lr*(0.99 ** epoch)
info_check_string='weights/{}/{}_{}.hdf5'.format(name, seed, count_folds)
early_stopping=EarlyStopping(monitor='val_loss', patience=PATIENCE)
model_checkpoint=ModelCheckpoint(info_check_string, monitor='loss', save_best_only=True)
change_lr=LearningRateScheduler(SCHEDULER)
b_generator = balance_generator(X_train, y_train, lesion_indicator_train, batch_size)
model.fit_generator(b_generator, steps_per_epoch=N_ITERS, epochs=N_EPOCHS, \
validation_data=({'main_input':X_val},\
y_val), callbacks=[early_stopping, model_checkpoint, change_lr])
model.load_weights(info_check_string)
if finetune is True:
logging.info('-'*50)
logging.info('Finetuning')
logging.info('-'*50)
# Data load
X_train, y_train, lesion_indicator_train=extract_patches_from_mri(tr_list, data_dir, \
is_test=False, is_oversampling=False, row_size=row_size, \
channel_size=channel_size, num_patch=num_patch, \
proportion=proportion, \
fixed_width=FIXED_WIDTH, fixed_depth=FIXED_DEPTH)
X_train, y_train=preprocess(X_train, y_train, mean, std)
# Callbacks
SCHEDULER_FINE=lambda epoch:lr*(0.99 ** epoch)/15.0
info_check_string_fine='weights/{}/fine_{}_{}.hdf5'.format(name, seed, count_folds)
early_stopping_fine=EarlyStopping(monitor='val_loss', patience=PATIENCE)
model_checkpoint_fine=ModelCheckpoint(info_check_string_fine, monitor='loss', save_best_only=True)
change_lr_fine=LearningRateScheduler(SCHEDULER_FINE)
b_generator_fine=balance_generator(X_train, y_train, lesion_indicator_train, batch_size)
model.fit_generator(b_generator_fine, steps_per_epoch=N_ITERS, epochs=N_EPOCHS_FINE, \
validation_data=({'main_input':X_val},\
y_val), callbacks=[early_stopping_fine, model_checkpoint_fine, change_lr_fine])
model.load_weights(info_check_string_fine)
logging.info('-'*50)
logging.info('Validating')
logging.info('-'*50)
_, label_list=load_mri_from_directory(te_list, , FIXED_WIDTH, FIXED_DEPTH,\
is_test=False, data_dir=data_dir, is_fixed_size=False)
X_val_patch, cache=extract_patches_from_mri(te_list, data_dir, \
is_test=True, is_oversampling=False, row_size=row_size, \
channel_size=channel_size, num_patch=num_patch, \
patch_r_stride=row_size/ROW_STRIDE, \
patch_c_stride=channel_size/CHA_STRIDE, \
proportion=proportion, is_fixed_size=True, \
fixed_width=FIXED_WIDTH, fixed_depth=FIXED_DEPTH)
N_val = len(X_val_patch)
for i in xrange(N_val):
list_sum_of_GT_by_depth_axis=[]
X_val_patch_i=np.transpose(X_val_patch[i].reshape(TIME_POINT, -1, \
channel_size, row_size, row_size), (1,0,2,3,4) )
X_val_patch_i = preprocess(X_val_patch_i, None, mean, std)
y_val_patch_pred_i=model.predict({'main_input':X_val_patch_i}, \
batch_size=batch_size)
y_val_patch_pred=make_brain_from_patches(y_val_patch_pred_i, cache[i], patch_r_stride=row_size/ROW_STRIDE, \
patch_c_stride=channel_size/CHA_STRIDE)
y_val_patch_pred/=((ROW_STRIDE ** 2) * 1.0 * CHA_STRIDE)
y_val_patch=np.transpose(np.array(label_list[i]), (2,0,1))
zoomRate=[float(ai)/bi for ai, bi in zip(y_val_patch.shape, y_val_patch_pred.shape)]
y_val_patch_pred=transform_shrink(y_val_patch_pred, zoomRate)
y_val_patch_pred=(y_val_patch_pred > 0.5)
logging.info('data:{}, pred: {}, GT: {}'.format( (i+1), np.mean(y_val_patch_pred), np.mean(y_val_patch)))
for j in xrange(y_val_patch_pred.shape[0]):
list_sum_of_GT_by_depth_axis.append([np.sum(y_val_patch_pred[j]), np.sum(y_val_patch[j])])
logging.info(list_sum_of_GT_by_depth_axis)
dice_coef=cal_dice_coef(y_val_patch_pred.reshape(-1), y_val_patch.reshape(-1))
logging.info('Dice Coef: {}'.format(dice_coef))
result[te_list[i], seed]=dice_coef
del X_val_patch_i
del y_val_patch_pred, y_val_patch_pred_i
gc.collect()
logging.info("Number of parameters: {}".format(model.count_params()))
del X_train, y_train
del X_val, y_val
del X_val_patch
del model
gc.collect()
logging.info("RESULT: \n {}".format(result[:,seed]))
logging.info("MEAN: {}".format(np.mean(result[:,seed])))
logging.info("STD: {}".format(np.std(result[:,seed])))
logging.info("-"*50)
logging.info("RESULT")
logging.info("-"*50)
logging.info("MEAN: {}".format(np.mean(result)))
logging.info("STD: {}".format(np.std(result)))
pd.DataFrame(result).to_csv('weights/{}/result.csv'.format(name), index=False)
end = time.time()
logging.info("Elapsed time: {}".format(end-start))
logging.info(CONFIG_DICT)
if __name__ == "__main__":
main()
| [
"ykwon0407@snu.ac.kr"
] | ykwon0407@snu.ac.kr |
26ee9a0a71afd0f137d3fe89d042334439657b9d | ed331352dc925c321d89647024362ed0bb0d2445 | /02_tensorflow/05_forward_placeHolder.py | 0a9cce309c2d87e2018ba05c47194d07c9adb4dd | [] | no_license | zzy1120/tensorflow_mooc | 9eb01a4d47b648cffd00fb921ff2730a1d6d93cf | 06943cb92d5585535a89d9f4f939133d192dcbb8 | refs/heads/master | 2021-09-14T04:19:35.160150 | 2018-05-08T12:16:47 | 2018-05-08T12:16:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | # coding:utf8
# 两层简单神经网络(全连接)
import tensorflow as tf
# 定义输入和参数
# 用placeholder实现输入定义 (sess.run中喂一组数据)
x = tf.placeholder(tf.float32, shape=(1,2))
w1 = tf.Variable(tf.random_normal([2,3], stddev = 1, seed = 1))
w2 = tf.Variable(tf.random_normal([3,1], stddev = 1, seed = 1))
# 定义前向传播过程
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
# 使用会话计算结果
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
print "y is :\n", sess.run(y, feed_dict={x: [[0.7, 0.5]]})
| [
"719668276@qq.com"
] | 719668276@qq.com |
0b37d096d0e2783b06ebe88f7dbbe19d847274d7 | a139b3745c1fed0da0641f823c6841bde86235fc | /tigerevents/users/forms.py | b134b40888029fdf56a3666e355507dc49ce9657 | [] | no_license | moinmir/Tiger-Events | 3f02365be9bc8be6d1f56cfe54d255fd56f42532 | a18c274fca63dca7589902768ac38761d5fc47ae | refs/heads/main | 2023-07-11T23:23:16.982061 | 2021-08-03T13:37:24 | 2021-08-03T13:37:24 | 305,765,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, SubmitField, BooleanField, RadioField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flask_login import current_user
from tigerevents.models import User
class RegistrationForm(FlaskForm):
email = StringField("Email", validators=[DataRequired(), Email()])
password = PasswordField("Password", validators=[DataRequired()])
confirm_password = PasswordField(
"Confirm Password", validators=[DataRequired(), EqualTo("password")]
)
submit = SubmitField("Sign Up")
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError("Email already exists.")
class LoginForm(FlaskForm):
email = StringField("Email", validators=[DataRequired(), Email()])
password = PasswordField("Password", validators=[DataRequired()])
remember = BooleanField("Remember Me")
submit = SubmitField("Login")
class RequestResetForm(FlaskForm):
email = StringField("Email", validators=[DataRequired(), Email()])
submit = SubmitField("Request")
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError("No account with that email. You must register first.")
class ResetPasswordForm(FlaskForm):
password = PasswordField("Password", validators=[DataRequired()])
confirm_password = PasswordField(
"Confirm Password", validators=[DataRequired(), EqualTo("password")]
)
submit = SubmitField("Update Password")
| [
"moin@princeton.edu"
] | moin@princeton.edu |
db5991eca1442f93f1470222ae7820bb0ff745f9 | a1f2b5290ae1c44e4fb04be6dc827c9f3f424b8b | /iis-3rdparty-madis/src/main/resources/eu/dnetlib/iis/3rdparty/scripts/madis/lib/pymysql/converters.py | f08fdf013fc784417f0b5a321c595b6ac175f9f3 | [
"Apache-2.0",
"Zlib",
"LicenseRef-scancode-free-unknown"
] | permissive | openaire/iis | c45cd3cf98114605453ba3f42bb3c63d3da6e033 | e807df7bd5f0c5127f9c295243558e9b33884e63 | refs/heads/master | 2023-08-18T14:54:35.485327 | 2023-07-21T15:21:05 | 2023-08-08T15:02:21 | 42,290,369 | 23 | 81 | Apache-2.0 | 2023-09-07T12:44:24 | 2015-09-11T05:55:08 | Java | UTF-8 | Python | false | false | 10,620 | py | import re
import datetime
import time
import sys
from constants import FIELD_TYPE, FLAG
from charset import charset_by_id
PYTHON3 = sys.version_info[0] > 2
try:
set
except NameError:
try:
from sets import BaseSet as set
except ImportError:
from sets import Set as set
ESCAPE_REGEX = re.compile(r"[\0\n\r\032\'\"\\]")
ESCAPE_MAP = {'\0': '\\0', '\n': '\\n', '\r': '\\r', '\032': '\\Z',
'\'': '\\\'', '"': '\\"', '\\': '\\\\'}
def escape_item(val, charset):
if type(val) in [tuple, list, set]:
return escape_sequence(val, charset)
if type(val) is dict:
return escape_dict(val, charset)
if PYTHON3 and hasattr(val, "decode") and not isinstance(val, unicode):
# deal with py3k bytes
val = val.decode(charset)
encoder = encoders[type(val)]
val = encoder(val)
if type(val) in [str, int, unicode]:
return val
val = val.encode(charset)
return val
def escape_dict(val, charset):
n = {}
for k, v in val.items():
quoted = escape_item(v, charset)
n[k] = quoted
return n
def escape_sequence(val, charset):
n = []
for item in val:
quoted = escape_item(item, charset)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, charset):
val = map(lambda x: escape_item(x, charset), val)
return ','.join(val)
def escape_bool(value):
return str(int(value))
def escape_object(value):
return str(value)
def escape_int(value):
return value
escape_long = escape_object
def escape_float(value):
return ('%.15g' % value)
def escape_string(value):
return ("'%s'" % ESCAPE_REGEX.sub(
lambda match: ESCAPE_MAP.get(match.group(0)), value))
def escape_unicode(value):
return escape_string(value)
def escape_None(value):
return 'NULL'
def escape_timedelta(obj):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
return escape_string('%02d:%02d:%02d' % (hours, minutes, seconds))
def escape_time(obj):
s = "%02d:%02d:%02d" % (int(obj.hour), int(obj.minute),
int(obj.second))
if obj.microsecond:
s += ".%f" % obj.microsecond
return escape_string(s)
def escape_datetime(obj):
return escape_string(obj.strftime("%Y-%m-%d %H:%M:%S"))
def escape_date(obj):
return escape_string(obj.strftime("%Y-%m-%d"))
def escape_struct_time(obj):
return escape_datetime(datetime.datetime(*obj[:6]))
def convert_datetime(connection, field, obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if not isinstance(obj, unicode):
obj = obj.decode(connection.charset)
if ' ' in obj:
sep = ' '
elif 'T' in obj:
sep = 'T'
else:
return convert_date(connection, field, obj)
try:
ymd, hms = obj.split(sep, 1)
return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':') ])
except ValueError:
return convert_date(connection, field, obj)
def convert_timedelta(connection, field, obj):
"""Returns a TIME column as a timedelta object:
>>> timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
>>> timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
>>> timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
"""
try:
microseconds = 0
if not isinstance(obj, unicode):
obj = obj.decode(connection.charset)
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = int(tail)
hours, minutes, seconds = obj.split(':')
tdelta = datetime.timedelta(
hours = int(hours),
minutes = int(minutes),
seconds = int(seconds),
microseconds = microseconds
)
return tdelta
except ValueError:
return None
def convert_time(connection, field, obj):
"""Returns a TIME column as a time object:
>>> time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned as None:
>>> time_or_None('-25:06:17') is None
True
>>> time_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
Also note that MySQL's TIME column corresponds more closely to
Python's timedelta and not time. However if you want TIME columns
to be treated as time-of-day and not a time offset, then you can
use set this function as the converter for FIELD_TYPE.TIME.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = int(tail)
hours, minutes, seconds = obj.split(':')
return datetime.time(hour=int(hours), minute=int(minutes),
second=int(seconds), microsecond=microseconds)
except ValueError:
return None
def convert_date(connection, field, obj):
"""Returns a DATE column as a date object:
>>> date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
>>> date_or_None('2007-02-31') is None
True
>>> date_or_None('0000-00-00') is None
True
"""
try:
if not isinstance(obj, unicode):
obj = obj.decode(connection.charset)
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
except ValueError:
return None
def convert_mysql_timestamp(connection, field, timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
"""
if not isinstance(timestamp, unicode):
timestamp = timestamp.decode(connection.charset)
if timestamp[4] == '-':
return convert_datetime(connection, field, timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
def convert_set(s):
return set(s.split(","))
def convert_bit(connection, field, b):
#b = "\x00" * (8 - len(b)) + b # pad w/ zeroes
#return struct.unpack(">Q", b)[0]
#
# the snippet above is right, but MySQLdb doesn't process bits,
# so we shouldn't either
return b
def convert_characters(connection, field, data):
field_charset = charset_by_id(field.charsetnr).name
if field.flags & FLAG.SET:
return convert_set(data.decode(field_charset))
if field.flags & FLAG.BINARY:
return data
if connection.use_unicode:
data = data.decode(field_charset)
elif connection.charset != field_charset:
data = data.decode(field_charset)
data = data.encode(connection.charset)
return data
def convert_int(connection, field, data):
return int(data)
def convert_long(connection, field, data):
return long(data)
def convert_float(connection, field, data):
return float(data)
encoders = {
bool: escape_bool,
int: escape_int,
long: escape_long,
float: escape_float,
str: escape_string,
unicode: escape_unicode,
tuple: escape_sequence,
list:escape_sequence,
set:escape_sequence,
dict:escape_dict,
type(None):escape_None,
datetime.date: escape_date,
datetime.datetime : escape_datetime,
datetime.timedelta : escape_timedelta,
datetime.time : escape_time,
time.struct_time : escape_struct_time,
}
decoders = {
FIELD_TYPE.BIT: convert_bit,
FIELD_TYPE.TINY: convert_int,
FIELD_TYPE.SHORT: convert_int,
FIELD_TYPE.LONG: convert_long,
FIELD_TYPE.FLOAT: convert_float,
FIELD_TYPE.DOUBLE: convert_float,
FIELD_TYPE.DECIMAL: convert_float,
FIELD_TYPE.NEWDECIMAL: convert_float,
FIELD_TYPE.LONGLONG: convert_long,
FIELD_TYPE.INT24: convert_int,
FIELD_TYPE.YEAR: convert_int,
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
FIELD_TYPE.DATETIME: convert_datetime,
FIELD_TYPE.TIME: convert_timedelta,
FIELD_TYPE.DATE: convert_date,
FIELD_TYPE.SET: convert_set,
FIELD_TYPE.BLOB: convert_characters,
FIELD_TYPE.TINY_BLOB: convert_characters,
FIELD_TYPE.MEDIUM_BLOB: convert_characters,
FIELD_TYPE.LONG_BLOB: convert_characters,
FIELD_TYPE.STRING: convert_characters,
FIELD_TYPE.VAR_STRING: convert_characters,
FIELD_TYPE.VARCHAR: convert_characters,
#FIELD_TYPE.BLOB: str,
#FIELD_TYPE.STRING: str,
#FIELD_TYPE.VAR_STRING: str,
#FIELD_TYPE.VARCHAR: str
}
conversions = decoders # for MySQLdb compatibility
try:
# python version > 2.3
from decimal import Decimal
def convert_decimal(connection, field, data):
data = data.decode(connection.charset)
return Decimal(data)
decoders[FIELD_TYPE.DECIMAL] = convert_decimal
decoders[FIELD_TYPE.NEWDECIMAL] = convert_decimal
def escape_decimal(obj):
return unicode(obj)
encoders[Decimal] = escape_decimal
except ImportError:
pass
| [
"l.dumiszewski@icm.edu.pl"
] | l.dumiszewski@icm.edu.pl |
a059d52d4f12dd8142d0e4d940e2f755487388c8 | 353a50bd7b0bb9df9e1380d778cc3e2c192740f5 | /hello world.py | c2adcdf1105aa6c958a6ee6784f2a034b5620e31 | [] | no_license | standrewscollege2018/2019-year-13-classwork-LewisEdmond | 564fc8926afcc72b301484b704bca49e8c219657 | 99d7e2f4f4a22171b334fbbc9b566c56b0db37f2 | refs/heads/master | 2020-05-28T07:39:21.368374 | 2019-05-28T00:30:05 | 2019-05-28T00:30:05 | 188,924,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | # intro to Github and print
print("Hello World")
| [
"led5423@stacmail.net"
] | led5423@stacmail.net |
c7404805651e066294db154117b46ee5c5a6a21a | ff97da14c5c2a47b8b212587df46400f36489023 | /regex_to_enfa/epsnfa.py | 8f53e13821067677a92247fa077e6d08ff815371 | [] | no_license | vivekimsit/Automata-theory | bad66b5244c380dcf2f034c073fc6ffccfb47519 | 4608551f34542e5e0cfce84cda0085a0f89d0624 | refs/heads/master | 2021-01-20T10:10:55.943938 | 2014-10-01T12:37:02 | 2014-10-01T12:37:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,961 | py | import sys,traceback
import os
import string
maxn = 200 #maximum number of states
symbol = 2 #number of symbols ('0','1')
epssymbol = 2
'''g[s1][i][s2]=True if and only if there's an edge with symbol i from state s1 to s2
i: 0 is '0', 1 is '1', 2 is epsilon
For fixed state s1 and a symbol c, it is not necessary to exist s2 such that
g[s1][c][s2]=True. If no such s2 exists, we deem that getting c at state s1 will
make the Epsilon-NFA go into a non-final "dead" state and will directly make the
the string not accepted.'''
g = [[[False] * maxn for i in range(symbol+1)] for j in range(maxn)]
''' closure[s1][s2] is True if and only if s2 is in CL(s1)'''
closure = [[False]*maxn for i in range(maxn)]
'''nextpa[i]=i if the regular expression at position i is not '('
nextpa[i]=j if the regular expression at position i is '(' and jth position holds the corresponding ')'
'''
nextpa = [0]*100
state = 0 #current number of states
#add edge from s1 to s2 with symbol c
def addEdge(s1,c,s2):
global g
g[s1][c][s2]=True
#increase the number of states of NFA by 1
def incCapacity():
global state
global g
for i in range(state+1):
for j in range(symbol+1):
g[i][j][state]=False
g[state][j][i]=False
state = state + 1
return state - 1
#unite two Epsilon-NFAs, with start state s1 and s2, final state t1 and t2, respectively
#return an array of length 2, where the first element is the start state of the combined NFA. the second being the final state
def union(s1,t1,s2,t2):
st=[0]*2
# Adds two new states
st[0]=incCapacity()
st[1]=incCapacity()
# Adds epsilon transition to start states.
addEdge(st[0], epssymbol, s1)
addEdge(st[0], epssymbol, s2)
# Adds epsilon transition to end states.
addEdge(t1, epssymbol, st[1])
addEdge(t2, epssymbol, st[1])
return st
#concatenation of two Epsilon-NFAs, with start state s1 and s2, final state t1 and t2, respectively
#return an array of length 2, where the first element is the start state of the combined NFA. the second being the final state
def concat(s1,t1,s2,t2):
st=[0]*2
# Adds epsilon transition from final state to end state.
addEdge(t1, epssymbol, s2)
st = [s1, t2]
return st
#Closure of a Epsilon-NFA, with start state s and final state t
#return an array of length 2, where the first element is the start state of the closure Epsilon-NFA. the second being the final state
def clo(s,t):
st=[0]*2
# Adds two new states
st[0]=incCapacity()
st[1]=incCapacity()
addEdge(st[0], epssymbol, s)
addEdge(t, epssymbol, st[1])
addEdge(t, epssymbol, s)
addEdge(st[0], epssymbol, st[1])
return st
#Calculate the closure: CL()
def calc_closure():
global closure
global symbol
queue = [0]*maxn
for i in range(state):
for j in range(state):
closure[i][j]=False
#Breadth First Search
head=-1
tail=0
queue[0]=i
closure[i][i]=True
while (head<tail):
head=head+1
j=queue[head]
#search along epsilon edge
for k in range(state):
if ((not closure[i][k]) and (g[j][symbol][k])):
tail=tail+1
queue[tail]=k
closure[i][k]=True
'''parse a regular expression from position s to t, returning the corresponding
Epsilon-NFA. The array of length 2 contains the start state at the first position
and the final state at the second position'''
def parse(re, s, t):
#single symbol
if (s==t):
st=[0]*2
st[0]=incCapacity()
st[1]=incCapacity()
#epsilon
if (re[s]=='e'):
addEdge(st[0],symbol,st[1])
else:
addEdge(st[0],string.atoi(re[s]),st[1])
return st
#(....)
if ((re[s]=='(')and(re[t]==')')):
if (nextpa[s]==t):
return parse(re,s+1,t-1)
#RE1+RE2
i=s
while (i<=t):
i=nextpa[i]
if ((i<=t)and(re[i]=='+')):
st1=parse(re,s,i-1)
st2=parse(re,i+1,t)
st = union(st1[0],st1[1],st2[0],st2[1])
return st
i=i+1
#RE1.RE2
i=s
while (i<=t):
i=nextpa[i]
if ((i<=t) and (re[i]=='.')):
st1=parse(re,s,i-1)
st2=parse(re,i+1,t)
st = concat(st1[0],st1[1],st2[0],st2[1])
return st
i=i+1
#(RE)*
st1=parse(re,s,t-1)
st=clo(st1[0],st1[1])
return st
#calculate the corresponding ')' of '('
def calc_next(re):
global nextpa
nextpa=[0]*len(re)
for i in range(len(re)):
if (re[i]=='('):
k=0
j=i
while (True):
if (re[j]=='('):
k=k+1
if (re[j]==')'):
k=k-1
if (k==0):
break
j=j+1
nextpa[i]=j
else:
nextpa[i]=i
def test(cur, finalstate, level, length, num):
global closure
global g
nextone = [False]*state
if (level>=length):
return cur[finalstate]
if ((num&(1<<level))>0):
c=1
else:
c=0
for i in range(state):
if (cur[i]):
for j in range(state):
if (g[i][c][j]):
for k in range(state):
nextone[k]=(nextone[k] or closure[j][k])
empty=True #test if the state set is already empty
for i in range(state):
if (nextone[i]):
empty=False
if (empty):
return False
return test(nextone,finalstate,level+1,length,num)
def Start(filename):
global state
global g
result=''
#read data case line by line from file
try:
br=open(filename,'r')
for re in br:
print 'Processing '+re+'...'
re=re.strip()
calc_next(re)
state=0
nfa=parse(re,0,len(re)-1)
#calculate closure
calc_closure()
#test 01 string of length up to 6
for length in range(1,6+1):
for num in range(0,(1<<length)):
if (test(closure[nfa[0]],nfa[1],0,length,num)):
for i in range(length):
if ((num&(1<<i))>0):
result=result+'1'
else:
result=result+'0'
result=result+"\n"
#Close the input stream
br.close()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,limit=2, file=sys.stdout)
result=result+'error'
return result
def main(filepath):
return Start('testRE.in')
if __name__ == '__main__':
main(sys.argv[1])
| [
"vivekimsit@gmail.com"
] | vivekimsit@gmail.com |
2b58f093901c18130be53de2b768f49d4d31a0be | d723a27ac4674a8b04004f9c883a4addb7a25532 | /StructuresInPython/src/root/nested/TuplesExercise.py | 2ad10a3cfba6cdc7403ae6e016ace55bb787f14a | [
"MIT"
] | permissive | jlmurphy3rd/code-outhouse | bf8f0c2da73edac249764cd10f98f0a396f0c5be | c18d1fc01299cb70545ebdc8b928023401c25078 | refs/heads/master | 2020-04-06T07:03:22.744281 | 2016-10-16T01:57:50 | 2016-10-16T01:58:23 | 52,179,752 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | '''
Created on May 23, 2016
@author: John
'''
name = raw_input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
counts = dict()
for line in handle:
words = line.split()
if len(words) < 5 : continue
if words[0] != "From" : continue
when = words[5]
tics = when.split(":")
if len(tics) != 3 : continue
hour = tics[0]
counts[hour] = counts.get(hour,0) + 1
lst = counts.items()
lst.sort()
for key, val in lst :
print key, val | [
"Murphy"
] | Murphy |
03a7916c88e7faf3ef06da2e8c41664e43d31aef | d8234481e6e22dc04e38260168d3fd0a2be3ce7b | /gameClasses.py | 707d149d14f4d097f1a35b3a0e932b1fc18142d0 | [] | no_license | jimiez/pygame_rps | b21d13005815cdf7b7822b18acdd923f5e0acbb1 | 677e61a5c0d9babf5ca2546dd58d4a88b25031fb | refs/heads/master | 2021-03-26T11:33:13.923063 | 2020-03-26T07:40:29 | 2020-03-26T07:40:29 | 247,700,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,697 | py | # Classes pertaining to game operations and logic are contained within this source file
import pygame
import random
from uiClasses import *
from constants import *
class GameLogic:
"""
This class contains the entire game logic of Rock, Paper, Scissors. Includes also rudimentary AI and bookkeeping.
"""
def __init__(self):
self.rounds = 0
self.wins = 0
self.losses = 0
self.ties = 0
self.playerChoices = []
self.computerChoices = []
self.listOfChoices = ["rock", "paper", "scissors"]
self.winStats = [] # -1 computer wins, 0 tie, 1 player wins
self.aiRandomness = 5 # percentage chance that AI plays a completely random hand any given round
def _computerLogic(self):
"""
This method is used to figure out the computer's next move. Note that it's entirely self-contained
"""
#First round, start random
if self.rounds < 1:
return random.choice(self.listOfChoices)
# There is a set probability that the AI plays a completely random hand
if random.randint(1,100) < self.aiRandomness:
return random.choice(self.listOfChoices)
# MAIN LOGIC
# if computer lost previous round, play the winning action of previous round. People are much more likely to keep using a winning tactic.
if self.winStats[-1] == 1:
computerChoice = self._oppositeChoice(self.playerChoices[-1])
# if computer won the previous round, switch to what wins what it played in the previous round
elif self.winStats[-1] == -1:
computerChoice = self._oppositeChoice(self.computerChoices[-1])
# if last round was a draw, play a random hand
else:
computerChoice = random.choice(self.listOfChoices)
return computerChoice
def _oppositeChoice(self, choice):
"""
Returns the option that WINS the chosen action
Parameters:
- choice - str, choice to which the opposite is requested
Return: the opposite as a string
"""
if choice == "rock":
return "paper"
elif choice == "paper":
return "scissors"
elif choice == "scissors":
return "rock"
def playRound(self, playerChoice):
"""
Play a single round, requires the player's input as a numeric value
Parameters:
- playerChoice - string, Player's action
"""
# make sure input is valid
if playerChoice not in self.listOfChoices:
print("Invalid choice!")
raise ValueError
# Resolve computer's choice
computerChoice = self._computerLogic()
# Add the latest choices to action history
self.playerChoices.append(playerChoice)
self.computerChoices.append(computerChoice)
# See who won
if playerChoice == computerChoice:
result = 0
self.ties += 1
elif self._oppositeChoice(computerChoice) == playerChoice:
result = 1
self.wins += 1
else:
result = -1
self.losses += 1
self.winStats.append(result)
self.rounds += 1
class Sequence:
"""
A (mostly) abstract class for defining basic functions in a game sequence
"""
def __init__(self):
self.next = self
def input(self, events, keys):
"""
Method for processing input and events from the game sequence
"""
raise NotImplementedError
def update(self):
"""
Game logic should be placed here
"""
raise NotImplementedError
def render(self, screen):
"""
Draws the updates on the screen
"""
raise NotImplementedError
def nextSequence(self, sequence):
"""
Activate the next sequence
"""
self.next = sequence
class SequenceStart(Sequence):
"""
Just a start-up screen
Parameters:
- scoreboard - Scoreboard object, passed between scenes to maintain consistency.
"""
def __init__(self, scoreboard):
Sequence.__init__(self)
self.scoreboard = scoreboard
self.mainfont = pygame.font.SysFont(FONT_NAME, FONT_HUGE)
self.maintext = self.mainfont.render("ROCK, PAPER & SCISSORS!", 1, COLOR_BLACK)
self.maintext_rect = self.maintext.get_rect()
self.maintext_rect.center = (SIZE_SCREEN[0] / 2, SIZE_SCREEN[1] / 2 - 100)
self.subfont = pygame.font.SysFont(FONT_NAME, FONT_NORMAL)
self.subtext = self.subfont.render("Press any key to start", 1, COLOR_BLUE)
self.subtext_rect = self.subtext.get_rect()
self.subtext_rect.center = (SIZE_SCREEN[0] / 2, SIZE_SCREEN[1] / 2 + 100)
# Create a timer for changing the images every quarter of a second
self.changeimage = pygame.USEREVENT
pygame.time.set_timer(self.changeimage, 750)
self.newimages = False
# Load up two random images
self.leftimage = random.choice(list(DICT_IMAGES.values()))
self.rightimage = random.choice(list(DICT_IMAGES.values()))
def input(self, events, keys):
# See if any key is pressed start the actual game
if sum(keys) > 0:
self.nextSequence(SequenceSelection(self.scoreboard))
for e in events:
if e.type == self.changeimage:
self.newimages = True
def update(self):
if self.newimages:
leftoldimage = self.leftimage
rightoldimage = self.rightimage
# Make sure that the same images aren't repeated.
while self.leftimage == leftoldimage:
self.leftimage = random.choice(list(DICT_IMAGES.values()))
while self.rightimage == rightoldimage:
self.rightimage = random.choice(list(DICT_IMAGES.values()))
self.newimages = False
def render(self, screen):
screen.fill(COLOR_WHITE)
screen.blit(self.maintext, self.maintext_rect)
screen.blit(self.subtext, self.subtext_rect)
leftimg_rect = self.leftimage.get_rect()
leftimg_rect.center = (SIZE_SCREEN[0] / 2 - 70, SIZE_SCREEN[1] / 2)
screen.blit(self.leftimage, leftimg_rect)
rightimg_rect = self.rightimage.get_rect()
rightimg_rect.center = (SIZE_SCREEN[0] / 2 + 70, SIZE_SCREEN[1] / 2)
screen.blit(self.rightimage, rightimg_rect)
class SequenceSelection(Sequence):
"""
This sequence handles the part of the game where player chooses their next move
Parameters:
- scoreboard - scoreboard object passed between scenes
"""
def __init__(self, scoreboard):
Sequence.__init__(self)
# Images to be loaded along with offsets
images = ("rock", "paper", "scissors")
offsets = [-150, 0, 150]
self.selections = []
self.choice = None
# Initialize and load the game selections (i.e. rock, paper, scissors).
for i in range(0, 3):
self.selections.append(Selection(images[i], DICT_IMAGES[images[i]], (offsets[i], 0)))
# Init rest
self.scoreboard = scoreboard
self.gamelogic = self.scoreboard.gamelogic
def input(self, events, pressed_keys):
for event in events:
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
for s in self.selections:
if self.choice == None:
self.choice = s.getClick(pos)
def update(self):
if self.choice != None:
self.gamelogic.playRound(self.choice)
self.choice = None
self.nextSequence(SequenceResolve(self.scoreboard))
def render(self, screen):
screen.fill(COLOR_WHITE)
for s in self.selections:
s.update(screen)
self.scoreboard.update(screen)
class SequenceResolve(Sequence):
"""
This sequence displays the result of the previous round and prompts whether the player wants to play another round.
Parameters:
- scoreboard - scoreboard, object passed between scenes
"""
def __init__(self, scoreboard):
Sequence.__init__(self)
self.scoreboard = scoreboard
# Initialize the helper class for displaying results with stuff from the scoreboard class
self.result = ResultDisplayer(
scoreboard.gamelogic.playerChoices[-1],
scoreboard.gamelogic.computerChoices[-1],
scoreboard.gamelogic.winStats[-1])
# The buttons for next round or quitting
self.button_newround = Button((-120, 150), (150, 50), "New round")
self.button_quit = Button((120, 150), (150, 50), "Quit")
def input(self, events, pressed_keys):
for event in events:
if event.type == pygame.MOUSEBUTTONUP:
self.button_newround.getClick(pygame.mouse.get_pos())
self.button_quit.getClick(pygame.mouse.get_pos())
def update(self):
if self.button_newround.clicked:
self.nextSequence(SequenceSelection(self.scoreboard))
if self.button_quit.clicked:
self.nextSequence(None)
def render(self, screen):
screen.fill(COLOR_WHITE)
self.scoreboard.update(screen)
self.result.update(screen)
self.button_newround.update(screen)
self.button_quit.update(screen) | [
"jimiz666@gmail.com"
] | jimiz666@gmail.com |
e4516d18fb4cccb863d378065317d8e0e3ed5dbc | 51630d56d313c538c59086fa605a4753adebe3de | /vj_site/songs/models.py | 4208668a04b4e647c84e4af84e27636bd8faabc5 | [] | no_license | vjoshi19/djangoProjects | 610634b2e5d3f2b5743d02d295547e045119b48f | b986fd832889dab964681d5fd9946f6eb4e0c297 | refs/heads/master | 2021-07-12T09:28:11.969577 | 2017-10-13T21:52:22 | 2017-10-13T21:52:22 | 106,876,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | from django.db import models
# * Performer model should:
# * have a name
# * return the name when turned into a string
class Performer(models.Model):
name = models.CharFeild(max_length=255)
def __str__(self):
return self.name
# * Song model should:
# * have a title
# * have an artist (original performer)
# * have a performer (who's singing it for karaoke) (make this another model)
# * have a length (number of seconds in duration)
# * return '<title> by <artist>' when turned into a string
class Song(models.Model):
title = models.CharFeild(max_length=255)
artist = models.CharFeild(max_length=255)
length = models.IntegerFeild()
performer = models.ForeignKey(Performer)
class Meta:
ordering = ['order',]
def __str__(self):
return (self.title + " by " + self.artist)
| [
"vjoshi@biolifesolutions.com"
] | vjoshi@biolifesolutions.com |
3957622a3eae4aa075c7ec57bfa91cc2ce15cf99 | 63477b7a6e34d76885f39b4e6e303f28d89020d2 | /submissions/abc122/a.py | a41680eba6f8f106954078bebfb777add22fcb90 | [] | no_license | yosuke-ippo/Atcoder | d1087382759fe3ada5cfff5e370b3d3f33934e46 | 2f198a38791536d16ab87f172dc12d7066d0a939 | refs/heads/main | 2023-08-23T16:24:03.995969 | 2021-10-03T20:27:50 | 2021-10-03T20:27:50 | 375,315,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | b = input()
if b =='A':
print('T')
elif b =='T':
print('A')
elif b =='G':
print('C')
elif b =='C':
print('G')
| [
"yoskey.programming@gmail.com"
] | yoskey.programming@gmail.com |
1dd45a57f2bb7a7b8e25b9c3b80f32e5ff2efcf2 | c01f125f70f35f4e9a0252b09030015d0e797c70 | /tests/test_plugin.py | 6081fc833e4104535f491f7416c757eee834d1be | [
"Apache-2.0"
] | permissive | manheim/eds | e48b543cbb32d8476f8e05be3690568834b574e1 | 20398c2585bf666a2d909be52f314116a41061a5 | refs/heads/main | 2023-07-31T12:49:07.689976 | 2021-08-30T13:51:59 | 2021-08-30T13:51:59 | 367,961,136 | 4 | 4 | Apache-2.0 | 2021-09-23T02:39:02 | 2021-05-16T18:49:40 | Python | UTF-8 | Python | false | false | 706 | py | from eds.plugin import BasePlugin
class PluginChild(BasePlugin):
pass
class PluginParent(BasePlugin):
@property
def children(self):
return [PluginChild({})]
class PluginGrandParent(BasePlugin):
@property
def children(self):
return [PluginParent({})]
def test_get_child_plugins():
p = PluginGrandParent({})
assert len(p.descendants) == 2
assert type(p.descendants[0]).__name__ == 'PluginChild'
assert type(p.descendants[1]).__name__ == 'PluginParent'
def test_id_property():
p = PluginChild({'id': 'my_id'})
assert p.id == 'my_id'
def test_yaml_property():
p = PluginChild({'some': 'yaml'})
assert p.yaml == {'some': 'yaml'}
| [
"qwcode@gmail.com"
] | qwcode@gmail.com |
1f8bd2536759070a7ec3e9072c675f5e2455004e | 047043b58b420169834ca2211bab784d2631c69c | /MatchZoo/matchzoo/models/sbdecatten.py | 31b3037f4f5de06af52bd31bda57fe9ec5cda1a1 | [
"Apache-2.0"
] | permissive | thiziri/lates_MZ_and_data_pre | a27b9ddbe867eac8793f2a273e59586c28a62798 | c5c47af4b00451dd597a1d45a459fe7ca46e860b | refs/heads/master | 2020-03-24T06:51:11.054727 | 2019-03-12T15:10:33 | 2019-03-12T15:10:33 | 142,544,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,454 | py | # -*- coding=utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import keras
import keras.backend as K
from keras.models import Sequential, Model
from keras.layers import *
from keras.layers import Reshape, Embedding, Dot
from keras.optimizers import Adam
from model import BasicModel
from layers.DynamicMaxPooling import *
from layers.BiLSTM import BiLSTM
from layers.MultiPerspectiveMatch import MultiPerspectiveMatch
#from layers.Attention import MultiPerspectiveAttention
from layers.SequenceMask import SequenceMask
from utils.utility import *
from keras.activations import softmax
class SBDecAtten(BasicModel):
"""implementation of a siamese decomposeable attention
"""
def __init__(self, config):
super(SBDecAtten, self).__init__(config)
self.__name = 'SBDecAtten'
self.check_list = ['text1_maxlen', 'text2_maxlen',
'embed', 'embed_size', 'vocab_size',
'text1_attention', 'text2_attention',
'dropout_rate']
self.initializer_gate = keras.initializers.RandomUniform(minval=-0.01, maxval=0.01, seed=11) # attention init
self.embed_trainable = config['train_embed']
self.setup(config)
if not self.check():
raise TypeError('[SBDecAtten] parameter check wrong')
print('[SBDecAtten] init done', end='\n')
def setup(self, config):
if not isinstance(config, dict):
raise TypeError('parameter config should be dict:', config)
self.set_default('dropout_rate', 0)
self.set_default('text1_attention', False)
self.set_default('text2_attention', False)
self.config.update(config)
def build(self):
self.projection_dim=300
self.compare_dim=300
self.compare_dropout=0.2
self.projection_hidden = 0
query = Input(name='query', shape=(self.config['text1_maxlen'],))
show_layer_info('Input', query)
doc = Input(name='doc', shape=(self.config['text2_maxlen'],))
show_layer_info('Input', doc)
leaks_input = Input(name='leaks_input', shape=(1,))
show_layer_info('Input', leaks_input)
leaks_dense = Dense(int(self.config['number_dense_units']/2), activation='relu')(leaks_input)
show_layer_info('Dense', leaks_dense)
embedding = Embedding(self.config['vocab_size'], self.config['embed_size'], weights=[self.config['embed']], trainable=self.embed_trainable)
q_embed = embedding(query)
show_layer_info('Embedding', q_embed)
d_embed = embedding(doc)
show_layer_info('Embedding', d_embed)
# ########## compute attention weights for the query words: better then mvlstm alone
if self.config["text1_attention"]:
q_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False)(
q_embed) # use_bias=False to simple combination
show_layer_info('Dense', q_w)
q_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config['text1_maxlen'],))(q_w)
show_layer_info('Lambda-softmax', q_w)
# ########## add attention weights for Q_words
q_w_layer = Lambda(lambda x: K.repeat_elements(q_w, rep=self.config['embed_size'], axis=2))(q_w)
show_layer_info('repeat', q_w_layer)
q_embed = Multiply()([q_w_layer, q_embed])
show_layer_info('Dot-qw', q_embed)
# ####################### attention text1
# ########## compute attention weights for the document words:
if self.config['text2_attention']:
d_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False)(d_embed)
show_layer_info('Dense', d_w)
d_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config['text2_maxlen'],))(d_w)
show_layer_info('Lambda-softmax', d_w)
# ########## add attention weights for D_words
d_w_layer = Lambda(lambda x: K.repeat_elements(d_w, rep=self.config['embed_size'], axis=2))(d_w)
d_embed = Multiply()([d_w_layer, d_embed])
show_layer_info('Dot-qw', d_embed)
# ####################### attention text2
# Projection
projection_layers = []
if self.projection_hidden > 0:
projection_layers.extend([
Dense(self.projection_hidden, activation='elu'),
Dropout(rate=self.config['rate_drop_dense']),
])
projection_layers.extend([
Dense(self.projection_dim, activation=None),
Dropout(rate=self.config['rate_drop_dense']),
])
q1_encoded = self.time_distributed(q_embed, projection_layers)
q2_encoded = self.time_distributed(d_embed, projection_layers)
# Attention
q1_aligned, q2_aligned = self.soft_attention_alignment(q1_encoded, q2_encoded)
# Compare
q1_combined = Concatenate()([q1_encoded, q2_aligned, self.submult(q1_encoded, q2_aligned)])
q2_combined = Concatenate()([q2_encoded, q1_aligned, self.submult(q2_encoded, q1_aligned)])
compare_layers = [
Dense(self.compare_dim, activation='elu'),
Dropout(self.compare_dropout),
Dense(self.compare_dim, activation='elu'),
Dropout(self.compare_dropout),
]
q1_compare = self.time_distributed(q1_combined, compare_layers)
q2_compare = self.time_distributed(q2_combined, compare_layers)
# Aggregate
q1_rep = self.apply_multiple(q1_compare, [GlobalAvgPool1D(), GlobalMaxPool1D()])
q2_rep = self.apply_multiple(q2_compare, [GlobalAvgPool1D(), GlobalMaxPool1D()])
# Classifier
merged = Concatenate()([q1_rep, q2_rep])
dense = BatchNormalization()(merged)
dense = Dense(self.config['number_dense_units'], activation='elu')(dense)
dense = Dropout(self.config['rate_drop_dense'])(dense)
dense = BatchNormalization()(dense)
dense = Dense(self.config['number_dense_units'], activation='elu')(dense)
dense = Dropout(self.config['rate_drop_dense'])(dense)
if self.config['target_mode'] == 'classification':
out_ = Dense(2, activation='softmax')(dense)
elif self.config['target_mode'] in ['regression', 'ranking']:
out_ = Dense(1, activation='sigmoid')(dense)
show_layer_info('Dense', out_)
model = Model(inputs=[query, doc], outputs=out_)
return model
def unchanged_shape(self, input_shape):
"Function for Lambda layer"
return input_shape
def substract(self, input_1, input_2):
"Substract element-wise"
neg_input_2 = Lambda(lambda x: -x, output_shape=self.unchanged_shape)(input_2)
out_ = Add()([input_1, neg_input_2])
return out_
def submult(self, input_1, input_2):
"Get multiplication and subtraction then concatenate results"
mult = Multiply()([input_1, input_2])
sub = self.substract(input_1, input_2)
out_= Concatenate()([sub, mult])
return out_
def apply_multiple(self, input_, layers):
"Apply layers to input then concatenate result"
if not len(layers) > 1:
raise ValueError('Layers list should contain more than 1 layer')
else:
agg_ = []
for layer in layers:
agg_.append(layer(input_))
out_ = Concatenate()(agg_)
return out_
def time_distributed(self, input_, layers):
"Apply a list of layers in TimeDistributed mode"
out_ = []
node_ = input_
for layer_ in layers:
node_ = TimeDistributed(layer_)(node_)
out_ = node_
return out_
def soft_attention_alignment(self, input_1, input_2):
"Align text representation with neural soft attention"
attention = Dot(axes=-1)([input_1, input_2])
w_att_1 = Lambda(lambda x: softmax(x, axis=1),
output_shape=self.unchanged_shape)(attention)
w_att_2 = Permute((2,1))(Lambda(lambda x: softmax(x, axis=2),
output_shape=self.unchanged_shape)(attention))
in1_aligned = Dot(axes=1)([w_att_1, input_1])
in2_aligned = Dot(axes=1)([w_att_2, input_2])
return in1_aligned, in2_aligned
| [
"belkacemthiziri@gmail.com"
] | belkacemthiziri@gmail.com |
ae28174baee504ac103c17d3339d23ea2b076e85 | 7bce6308cfab20992aeaa8a95bcecf2320deb360 | /test_bokeh.py | 29c58d3afc4954e59e8ec3f3a376666b51f913f1 | [
"MIT"
] | permissive | uetke/UTrack | 565bfeb0c176d17cc438f8a5144e954e5159fcdf | efab70bf2e1dddf76e1b7e3a0efbdd611ea856de | refs/heads/master | 2020-03-21T14:03:17.339757 | 2018-08-06T14:33:25 | 2018-08-06T14:33:25 | 138,639,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | import numpy as np
from bokeh.io import curdoc, show
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Slider
from bokeh.plotting import figure
N = 100
x_ = np.linspace(0, 10, 200)
y_ = np.linspace(0, 10, 200)
z_ = np.linspace(0, 10, N)
x, y, z = np.meshgrid(x_, y_, z_, indexing='xy')
data = np.sin(x+z)*np.cos(y)
source = ColumnDataSource(data=dict(image=[data[:, :, 0]]))
p = figure(x_range=(0, 10), y_range=(0, 10))
p.image(image='image', x=0, y=0, dw=10, dh=10, source=source, palette="Spectral11")
slider = Slider(start=0, end=(N-1), value=0, step=1, title="Frame")
def update(attr, old, new):
source.data = dict(image=[data[:, :, slider.value]])
slider.on_change('value', update)
curdoc().add_root(column(p, slider))
show(p) | [
"aquiles@aquicarattino.com"
] | aquiles@aquicarattino.com |
4d84b90578d98e4a3679ca655fd6ad90a1de52b4 | 7d759adfd57925a0306fb097df6fcdbacd6a4ff6 | /base/hw01/dmia/gradient_check.py | a70c55f2f6d5c78173ef6324ff2ff3b5f816e152 | [] | no_license | Dkotlukov/MIPT_Data_Mining_In_Action_2016 | 6c57a30b673f6e54f9f622d8e79131c9470fa268 | 5d054cd147bc57f3d98ce5768aa72d380e760204 | refs/heads/master | 2021-01-12T11:37:04.042744 | 2016-12-04T00:17:39 | 2016-12-04T00:17:39 | 72,231,122 | 0 | 0 | null | 2016-10-28T18:13:04 | 2016-10-28T18:13:04 | null | UTF-8 | Python | false | false | 1,709 | py | import numpy as np
from random import randrange
def eval_numerical_gradient(f, x):
"""
a naive implementation of numerical gradient of f at x
- f should be a function that takes a single argument
- x is the point (numpy array) to evaluate the gradient at
"""
fx = f(x) # evaluate function value at original point
grad = np.zeros(x.shape)
h = 0.00001
# iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
# evaluate function at x+h
ix = it.multi_index
x[ix] += h # increment by h
fxh = f(x) # evalute f(x + h)
x[ix] -= h # restore to previous value (very important!)
# compute the partial derivative
grad[ix] = (fxh - fx) / h # the slope
print(ix, grad[ix])
it.iternext() # step to next dimension
return grad
def grad_check_sparse(f, x, analytic_grad, num_checks):
"""
sample a few random elements and only return numerical
in this dimensions.
"""
h = 1e-5
for i in range(num_checks):
ix = tuple([randrange(m) for m in x.shape])
x[ix] += h # increment by h
fxph = f(x) # evaluate f(x + h)
x[ix] -= 2 * h # increment by h
fxmh = f(x) # evaluate f(x - h)
x[ix] += h # reset
grad_numerical = (fxph - fxmh) / (2 * h)
grad_analytic = analytic_grad[ix]
rel_error = abs(grad_numerical - grad_analytic) / (
abs(grad_numerical) + abs(grad_analytic))
print('numerical: %f analytic: %f, relative error: %e' % (
grad_numerical, grad_analytic, rel_error))
| [
"Dkotlukov@gmail.com"
] | Dkotlukov@gmail.com |
369453e2f55a70a8ed11391d3ecbc71559a5fdf6 | 0825d6cdc2482cac3b365b42e4eb0eb88c4865b6 | /public_html/flask.fcgi | cb1b551732f7df4bb7c38ffc05e1a9808d5fd1fe | [] | no_license | mtbthebest/flask | f3e321350c27cb5b880b059f0b865afd4f33cc94 | 731b41ccd917d771d9190e47b8f64f1bd34edd50 | refs/heads/master | 2020-04-17T14:52:15.558787 | 2019-01-20T15:28:31 | 2019-01-20T15:28:31 | 166,675,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | fcgi | #!/var/www/myapp/public_html/venv/bin/python3.4
import sys
sys.path.append('/var/www/myapp/public_html/venv/lib/python3.4/site-packages')
from flup.server.fcgi import WSGIServer
from myapp import app
if __name__ == '__main__':
WSGIServer(app).run()
| [
"mtbthebest11@gmail.com"
] | mtbthebest11@gmail.com |
1b4aa17bdea1665d06d03f6e5ef54cf78353f7df | c3c43fee5267e38fe5e04456115e3a9444ec251e | /StreamSwitch/utilities.py | 2f0b187a1fbe516941fd088377502605e78ecad5 | [
"MIT"
] | permissive | KMNR/Webstream | dfe0dac1363932320f766b7338e58da48df7fff0 | 61a204c7938c60fa9b9757edb082800362dd4de6 | refs/heads/master | 2020-05-20T16:18:16.238989 | 2017-03-07T07:01:19 | 2017-03-07T07:01:19 | 10,764,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | def getstatus():
import os
f = os.popen("/etc/init.d/darkice status")
try:
pid = f.readlines()[0].rstrip('\n')
except:
pid = 0
return pid
| [
"engineering@kmnr.org"
] | engineering@kmnr.org |
22b3b0d8d5613807e44feec6971ca657d16ba514 | d1868bc506b7bf5039bbb7906dfc552fd67bda49 | /src/server.py | 94cba5d79caeb704f093ee44aa84eda65ba6a950 | [
"MIT"
] | permissive | jackvandrunen/strongpad | 71e491ce3f124723c267595636c489d1d27caa79 | 2f684bfdbc43843481ab311ccc8bbfede46bb1ce | refs/heads/master | 2021-05-27T12:38:57.984565 | 2014-12-16T04:56:57 | 2014-12-16T04:56:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,517 | py | from SocketServer import ThreadingMixIn
from wsgiref.simple_server import WSGIServer, make_server
import json
import os
import sys
import sessions
try:
import bottle
except ImportError:
print 'Missing dependency: bottle'
sys.exit(1)
try:
import markdown2
except ImportError:
print 'Missing dependency: markdown2'
sys.exit(1)
try:
import password
except ImportError:
print 'Missing dependency: scrypt'
sys.exit(1)
@bottle.route('/')
@bottle.view('login')
@sessions.start
def login_page(session):
if session.get('in'):
bottle.redirect('/index')
return {}
@bottle.route('/', method='POST')
@sessions.start
def process_login(session):
if session.get('in'):
bottle.redirect('/index')
p = bottle.request.forms.get('password')
with open('config.json') as f:
passw, salt = json.load(f)['password']
hashed = password.encrypt(p, salt)[0]
if hashed == passw:
session['in'] = True
bottle.redirect('/index')
bottle.redirect('/')
@bottle.route('/index')
@bottle.view('index')
@sessions.start
def serve_index(session):
if not session.get('in'):
bottle.redirect('/')
files = os.listdir('pads/')
files.sort(key=lambda f: os.stat(os.path.join('pads', f)).st_mtime, reverse=True)
return {'files': files}
@bottle.route('/p/<pad>')
@bottle.view('editor')
@sessions.start
def pad_editor(session, pad):
if not session.get('in'):
bottle.redirect('/p/{0}/'.format(pad))
path = 'pads/{0}.md'.format(pad)
response = {}
if os.path.exists(path):
with open(path, 'r') as f:
response['data'] = f.read().replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n')
with open('config.json') as f:
published = json.load(f)['published']
response['published'] = pad in published
else:
response['data'] = ''
response['published'] = False
return response
@bottle.route('/p/<pad>/')
@bottle.view('viewer')
@sessions.start
def pad_viewer(session, pad):
with open('config.json', 'r') as f:
published = json.load(f)['published']
if pad in published:
path = 'pads/{0}.md'.format(pad)
with open(path, 'r') as f:
return {'data': f.read().replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n')}
elif session.get('in'):
bottle.redirect('/p/{0}'.format(pad))
else:
bottle.redirect('/')
@bottle.route('/p/<pad>/<mode:re:(md|html)>')
@sessions.start
def md_viewer(session, pad, mode):
with open('config.json', 'r') as f:
published = json.load(f)['published']
if pad in published or session.get('in'):
path = 'pads/{0}.md'.format(pad)
if mode == 'html':
return markdown2.markdown_path(path)
else:
with open(path, 'r') as f:
bottle.response.content_type = 'text/plain'
return f.read()
else:
bottle.abort(404)
@bottle.route('/p/<pad>/save', method='POST')
@sessions.start
def save_pad(session, pad):
if not session.get('in'):
return 'You are not logged in, or your session expired!'
data = bottle.request.forms.get('data')
path = 'pads/{0}.md'.format(pad)
if data is not None:
with open(path, 'w') as f:
f.write(data)
return 'The operation succeeded!'
return 'The operation failed!'
@bottle.route('/p/<pad>/rename/<newpad>', method='POST')
@sessions.start
def rename_pad(session, pad, newpad):
if not session.get('in'):
return 'You are not logged in, or your session expired!'
oldpath = 'pads/{0}.md'.format(pad)
newpath = 'pads/{0}.md'.format(newpad)
if not os.path.exists(newpath) and os.path.exists(oldpath):
with open(oldpath, 'r') as f_in:
with open(newpath, 'w') as f_out:
f_out.write(f_in.read())
os.unlink(oldpath)
with open('config.json', 'r') as f:
config = json.load(f)
if pad in config['published']:
config['published'].remove(pad)
config['published'].append(newpad)
with open('config.json', 'w') as f:
json.dump(config, f, indent=2)
return 'The operation succeeded!'
return 'The operation failed!'
@bottle.route('/p/<pad>/delete')
@sessions.start
def delete_pad(session, pad):
if not session.get('in'):
bottle.redirect('/')
path = 'pads/{0}.md'.format(pad)
if os.path.exists(path):
os.unlink(path)
with open('config.json', 'r') as f:
config = json.load(f)
if pad in config['published']:
config['published'].remove(pad)
with open('config.json', 'w') as f:
json.dump(config, f, indent=2)
bottle.redirect('/index')
@bottle.route('/p/<pad>/publish', method='POST')
@sessions.start
def publish_pad(session, pad):
if not session.get('in'):
return 'You are not logged in, or your session expired!'
with open('config.json', 'r') as f:
config = json.load(f)
if pad not in config['published']:
config['published'].append(pad)
with open('config.json', 'w') as f:
json.dump(config, f, indent=2)
return 'This pad is now available for viewing!'
@bottle.route('/p/<pad>/unpublish', method='POST')
@sessions.start
def unpublish_pad(session, pad):
if not session.get('in'):
return 'You are not logged in, or your session expired!'
with open('config.json', 'r') as f:
config = json.load(f)
if pad in config['published']:
config['published'].remove(pad)
with open('config.json', 'w') as f:
json.dump(config, f, indent=2)
return 'This pad is no longer available for viewing!'
@bottle.route('/upload', method='POST')
@sessions.start
def upload_image(session):
if not session.get('in'):
bottle.abort(404)
upload = bottle.request.files.get('upload')
upload.filename = bottle.request.forms.get('filename')
upload.save('./uploads')
return 'Done!'
@bottle.route('/uploads')
@bottle.route('/uploads/')
@bottle.view('uploads')
@sessions.start
def serve_index(session):
if not session.get('in'):
bottle.redirect('/')
files = os.listdir('uploads/')
files.sort(key=lambda f: os.stat(os.path.join('uploads', f)).st_mtime, reverse=True)
return {'files': files}
@bottle.route('/uploads/<filename>/delete')
@sessions.start
def delete_pad(session, filename):
if not session.get('in'):
bottle.redirect('/')
path = 'uploads/{0}'.format(filename)
if os.path.exists(path):
os.unlink(path)
bottle.redirect('/uploads')
@bottle.route('/logout')
@sessions.start
def logout(session):
sessions.destroy()
bottle.redirect('/')
@bottle.route('/static/<filename>')
def serve_static(filename):
return bottle.static_file(filename, root='./views/static')
@bottle.route('/uploads/<filename>')
def serve_static(filename):
return bottle.static_file(filename, root='./uploads')
@bottle.error(404)
def not_found(e):
return '404: Not Found'
@bottle.error(403)
def forbidden(e):
return '403: Forbidden'
@bottle.error(500)
def server_error(e):
return '500: Internal Server Error'
class ThreadingWSGIServer(WSGIServer, ThreadingMixIn):
pass
def start(host='localhost', port=3031):
server = make_server(host, port, bottle.default_app(), ThreadingWSGIServer)
print 'Serving on http://{0}:{1}...'.format(host, port)
server.serve_forever()
| [
"jack@fallingduck.net"
] | jack@fallingduck.net |
d72a863ea60149ccb7af9b2726b96bac90c558e0 | ceada1c46e6540f9066f7111f6a0387c24722f5c | /笔试题目/Tencent/腾讯2018笔试试题/小Q的歌单.py | ff48ecece7b61e40ddbed08b1fd00f436a8afaed | [] | no_license | w5802021/leet_niuke | 556c966791e6a5e9a1a8eec61f67973aec9e56ca | 199f2b62101480b963e776c07c275b789c20a413 | refs/heads/master | 2020-06-24T03:30:19.983671 | 2019-12-08T15:46:26 | 2019-12-08T15:46:26 | 180,187,921 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | k = int(input().strip())
a, x, b, y = list(map(int, input().split()))
mod = 1000000007
# dp[i]表示总歌单长度为i时,一共有多少组组成歌单的方法
dp = [0] * (k+1)
dp[0] = 1
#在x首长度为A的歌中选,总歌单长度i的歌单组成方法
for i in range(1, x + 1):
for j in range(k, a - 1, -1):
dp[j] = dp[j] + dp[j-a]
#在y首长度为B的歌中选,总歌单长度i的歌单组成方法
for i in range(1, y + 1):
for j in range(k, b - 1, -1):
dp[j] = dp[j] + dp[j - b]
print(dp[k]) | [
"w5802022@gmail.com"
] | w5802022@gmail.com |
60e5e7acffc54ff9fb0926289c44d554e8655cc6 | 2db1a0038d26ccb6adc572b536cb5cd401fd7498 | /lib/python2.7/dist-packages/oauthlib/oauth2/rfc6749/endpoints/revocation.py | 3c517fca372d4a1f5e5733b02e0006a238b03c3f | [] | no_license | syurk/labpin | e795c557e7d7bcd4ff449cb9a3de32959a8c4968 | 04070dd5ce6c0a32c9ed03765f4f2e39039db411 | refs/heads/master | 2022-12-12T02:23:54.975797 | 2018-11-29T16:03:26 | 2018-11-29T16:03:26 | 159,692,630 | 0 | 1 | null | 2022-11-19T12:15:55 | 2018-11-29T16:04:20 | Python | UTF-8 | Python | false | false | 5,505 | py | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.endpoint.revocation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An implementation of the OAuth 2 `Token Revocation`_ spec (draft 11).
.. _`Token Revocation`: http://tools.ietf.org/html/draft-ietf-oauth-revocation-11
"""
from __future__ import absolute_import, unicode_literals
import logging
from oauthlib.common import Request
from ..errors import InvalidClientError, UnsupportedTokenTypeError
from ..errors import InvalidRequestError, OAuth2Error
from .base import BaseEndpoint, catch_errors_and_unavailability
log = logging.getLogger(__name__)
class RevocationEndpoint(BaseEndpoint):
"""Token revocation endpoint.
Endpoint used by authenticated clients to revoke access and refresh tokens.
Commonly this will be part of the Authorization Endpoint.
"""
valid_token_types = ('access_token', 'refresh_token')
def __init__(self, request_validator, supported_token_types=None,
enable_jsonp=False):
BaseEndpoint.__init__(self)
self.request_validator = request_validator
self.supported_token_types = (
supported_token_types or self.valid_token_types)
self.enable_jsonp = enable_jsonp
@catch_errors_and_unavailability
def create_revocation_response(self, uri, http_method='POST', body=None,
headers=None):
"""Revoke supplied access or refresh token.
The authorization server responds with HTTP status code 200 if the
token has been revoked sucessfully or if the client submitted an
invalid token.
Note: invalid tokens do not cause an error response since the client
cannot handle such an error in a reasonable way. Moreover, the purpose
of the revocation request, invalidating the particular token, is
already achieved.
The content of the response body is ignored by the client as all
necessary information is conveyed in the response code.
An invalid token type hint value is ignored by the authorization server
and does not influence the revocation response.
"""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
try:
self.validate_revocation_request(request)
log.debug('Token revocation valid for %r.', request)
except OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
response_body = e.json
if self.enable_jsonp and request.callback:
response_body = '%s(%s);' % (request.callback, response_body)
return {}, response_body, e.status_code
self.request_validator.revoke_token(request.token,
request.token_type_hint, request)
response_body = ''
if self.enable_jsonp and request.callback:
response_body = request.callback + '();'
return {}, response_body, 200
def validate_revocation_request(self, request):
"""Ensure the request is valid.
The client constructs the request by including the following parameters
using the "application/x-www-form-urlencoded" format in the HTTP
request entity-body:
token (REQUIRED). The token that the client wants to get revoked.
token_type_hint (OPTIONAL). A hint about the type of the token
submitted for revocation. Clients MAY pass this parameter in order to
help the authorization server to optimize the token lookup. If the
server is unable to locate the token using the given hint, it MUST
extend its search accross all of its supported token types. An
authorization server MAY ignore this parameter, particularly if it is
able to detect the token type automatically. This specification
defines two such values:
* access_token: An Access Token as defined in [RFC6749],
`section 1.4`_
* refresh_token: A Refresh Token as defined in [RFC6749],
`section 1.5`_
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter using
the registry defined in `Section 4.1.2`_.
The client also includes its authentication credentials as described in
`Section 2.3`_. of [`RFC6749`_].
.. _`section 1.4`: http://tools.ietf.org/html/rfc6749#section-1.4
.. _`section 1.5`: http://tools.ietf.org/html/rfc6749#section-1.5
.. _`section 2.3`: http://tools.ietf.org/html/rfc6749#section-2.3
.. _`Section 4.1.2`: http://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2
.. _`RFC6749`: http://tools.ietf.org/html/rfc6749
"""
if not request.token:
raise InvalidRequestError(request=request,
description='Missing token parameter.')
if self.request_validator.client_authentication_required(request):
if not self.request_validator.authenticate_client(request):
raise InvalidClientError(request=request)
if (request.token_type_hint and
request.token_type_hint in self.valid_token_types and
request.token_type_hint not in self.supported_token_types):
raise UnsupportedTokenTypeError(request=request)
| [
"syurk738@students.bju.edu"
] | syurk738@students.bju.edu |
ae1a407e9631295a60cb2e2e27a9a53b9148115a | b35a624709024007025a1664683076cafef28b52 | /RevDock/revdock/get_models.py | 18a3630e0805a70ec1b43ea0fd5f08f6e02a6cbf | [] | no_license | Percud/Rev_Docking | d0efdb8efe78d7a8fb8a11d0e11bd602b6c22bde | a60bb74e08f3e74b2939ae827c2df0897e569baa | refs/heads/master | 2022-05-15T23:44:08.715948 | 2022-04-21T08:02:23 | 2022-04-21T08:02:23 | 235,093,711 | 0 | 1 | null | 2020-10-23T09:19:55 | 2020-01-20T12:12:45 | Python | UTF-8 | Python | false | false | 3,183 | py | from revdock.revdocking import *
cwd=os.getcwd()
## HUMAN PLPome
ncbi_acc=pd.read_csv('http://bioinformatics.unipr.it/B6db/tmp/Homo_sapiens.tab',sep='\t',header=None)[2].tolist()
accession=convert_ac(ncbi_acc,'P_REFSEQ_AC','ACC').To.tolist()
get_models(accession,'9606','Human_PLP_swissmodel')
## Get coord from catalytic lysine
os.chdir('Human_PLP_swissmodel_9606/')
output=[]
for pdb in glob.glob('*.pdb'):
## LLP residue ##
print(re.sub(r'HETATM(.*\s*[N|C|O|CA|CB|CG|CE|CD|NZ]\s*)LLP',
r'ATOM \1LYS',open(pdb).read()),
file=open(pdb,'w'))
print(re.sub(r'(?m).*LLP.*\n?', '', open(pdb).read()),
file=open(pdb,'w'))
fa = pdb.split('.pdb')[0]+'.fa'
id = os.path.basename(fa).split('_')[0]
pdb2fasta(pdb, fa)
structure = PDB().get_structure(pdb, pdb)[0]
try:
uni = getfeatures(id)
features = pd.DataFrame(uni.features[0])
print('>'+id+'\n'+uni['sequence.sequence'].values[0], file=open(id+'.fasta', 'w'))
lys = features[(features.category == 'PTM')&(features.description.str.contains('pyridoxal'))].begin.dropna().tolist()[0]
for row in match_fasta_position(id+'.fasta', fa, [lys]):
coord=tuple(structure[row['sequence']][row['hit_num']]['NZ'].get_coord())
output.append([id,pdb,lys,row['sequence'],row['hit_num'],*coord])
except:
output.append([id,pdb])
os.remove(fa)
os.remove(id+'.fasta')
print(output)
pd.DataFrame(output, columns=['uniprot_ac','pdb','lys','chain','res','x','y','z']).to_csv('Human_coord.csv',sep='\t',index=False)
os.chdir(cwd)
## MOUSE PLPome
ncbi_acc=pd.read_csv('http://bioinformatics.unipr.it/B6db/tmp/Mus_musculus.tab',sep='\t',header=None)[2].tolist()
accession=convert_ac(ncbi_acc,'P_REFSEQ_AC','ACC').To.tolist()
get_models(accession,'10090','Mouse_PLP_swissmodel')
## Get coord from catalytic lysine
os.chdir('Mouse_PLP_swissmodel_10090/')
output=[]
for pdb in glob.glob('*.pdb'):
## LLP residue ##
print(re.sub(r'HETATM(.*\s*[N|C|O|CA|CB|CG|CE|CD|NZ]\s*)LLP',
r'ATOM \1LYS',open(pdb).read()),
file=open(pdb,'w'))
print(re.sub(r'(?m).*LLP.*\n?', '', open(pdb).read()),
file=open(pdb,'w'))
fa = pdb.split('.pdb')[0]+'.fa'
id = os.path.basename(fa).split('_')[0]
pdb2fasta(pdb, fa)
structure = PDB().get_structure(pdb, pdb)[0]
try:
uni = getfeatures(id)
features = pd.DataFrame(uni.features[0])
print('>'+id+'\n'+uni['sequence.sequence'].values[0], file=open(id+'.fasta', 'w'))
lys = features[(features.category == 'PTM')&(features.description.str.contains('pyridoxal'))].begin.dropna().tolist()[0]
for row in match_fasta_position(id+'.fasta', fa, [lys]):
coord=tuple(structure[row['sequence']][row['hit_num']]['NZ'].get_coord())
output.append([id,pdb,lys,row['sequence'],row['hit_num'],*coord])
except:
output.append([id,pdb])
os.remove(fa)
os.remove(id+'.fasta')
pd.DataFrame(output, columns=['uniprot_ac','pdb','lys','chain','res','x','y','z']).to_csv('Mouse_coord.csv',sep='\t',index=False)
os.chdir(cwd)
| [
"noreply@github.com"
] | Percud.noreply@github.com |
3acf9cf9c28886823a1327ff209b2ba424538bf0 | 6bc63fdcb0a443f7f72c2cf1ae39b77d45275567 | /lists/urls.py | 2099dc5e596e00dbc7d7b6cc45d07390d134993a | [] | no_license | a-watkin/django-tdd | f7afdfa553189e03d5eefe7e38cb33e73b472a03 | 8ce872a71947e3456992e8111a7f8aa03fc75071 | refs/heads/master | 2021-01-20T05:26:23.419071 | 2018-01-29T13:01:32 | 2018-01-29T13:01:32 | 89,781,514 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | """superlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from lists import views
urlpatterns = [
# url(r'^admin/', admin.site.urls),
# url(r'^$', views.home_page, name='home'),
#
# django prefixes these with /list...remember?
url(r'^new$', views.new_list, name='new_list'),
# It’s time to learn how we can pass parameters from URLs to views: (.+)
# is a capture group, it matches any character up to the /
#
# In other
# words, if we go to the URL /lists/1/, view_list will get a second
# argument after the normal request argument, namely the string "1". If we
# go to /lists/foo/, we get view_list(request, "foo").
url(r'^(\d+)/$', views.view_list, name='view_list'),
# regex, controller (logic), reverse lookup for templates
url(r'^users/(.+)/$', views.my_lists, name='my_lists'),
# url(r'^(\d+)/add_item$', views.add_item, name='add_item'),
]
| [
"atomicpenguiens@gmail.com"
] | atomicpenguiens@gmail.com |
69d1ddd13797217c84720b0a5736e6c9b3caff7e | 629bde72db103849451401ed09a589027d36a1ce | /com/pyutil/tbk/wxbot.py | e2cdc3c43ef3b312c3e8f65f506129e5b2ef31fd | [] | no_license | benniaogithub/pyUtil | c5fac1010b5d2af4370e7577e3c6991035661c2c | eeefb777105b0845beeef24f4c0d5578164cc00b | refs/heads/master | 2021-01-13T03:02:48.586674 | 2018-11-13T18:23:51 | 2018-11-13T18:23:51 | 77,036,550 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | #-*-coding:utf-8-*-
# Time:2018/11/12 23:40
__author__ = 'liuqin212173'
import itchat | [
"715570634@qq.com"
] | 715570634@qq.com |
0349a805c84acec95aceef67fa4cb2d1b4fb7947 | a69d47690d7b383fce880270ee06091531e05987 | /tests/fast_cpp_proto_test.py | 2ba4a8a16091757a0239be5bc12c1bfc0c8737fb | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | scal444/pybind11_protobuf | 9215e608f31f78cc14115be5cbf1bb44ce8be411 | 0f9d7e967cb20e3875be04115e7340ab3211ffe2 | refs/heads/main | 2023-07-07T20:26:14.946720 | 2021-05-11T20:55:46 | 2021-05-11T20:55:46 | 366,458,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,114 | py | # Copyright (c) 2019 The Pybind Development Team. All rights reserved.
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Tests for protobuf casters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import parameterized
from pybind11_protobuf.tests import fast_cpp_proto_example as proto_example
from pybind11_protobuf.tests import test_pb2
from google3.net.proto2.contrib.pyutil import compare
def get_fully_populated_test_message():
"""Returns a TestMessage with all fields set."""
# This tests initializing a proto by keyword argument.
return test_pb2.TestMessage(
string_value='test',
int_value=4,
double_value=4.5,
int_message=test_pb2.IntMessage(value=5),
repeated_int_value=[6, 7],
repeated_int_message=[test_pb2.IntMessage(value=8)],
enum_value=test_pb2.TestMessage.TestEnum.ONE,
repeated_enum_value=[test_pb2.TestMessage.TestEnum.TWO],
string_int_map={'k': 5},
int_message_map={1: test_pb2.IntMessage(value=6)})
def get_message_references():
"""Returns a parameter list of shared proto2 messages."""
x = test_pb2.IntMessage()
l = [('lambda', lambda: x)]
if proto_example.PYBIND11_PROTOBUF_UNSAFE:
l.extend([
# ('int_shared_ptr', proto_example.get_int_message_shared_ptr),
('int_message_ref', proto_example.get_int_message_ref),
('int_message_raw_ptr', proto_example.get_int_message_raw_ptr),
('message_ref', proto_example.get_message_ref),
('message_raw_ptr', proto_example.get_message_raw_ptr)
])
if proto_example.PYBIND11_PROTOBUF_UNSAFE and proto_example.REFERENCE_WRAPPER:
l.extend([('int_message_ref_wrapper',
proto_example.get_int_message_ref_wrapper),
('message_ref_wrapper', proto_example.get_message_ref_wrapper)])
return l
def get_message():
"""Returns a parameter list of shared proto2 messages."""
l = [
('native_proto', test_pb2.IntMessage),
('pybind11_wrapper', proto_example.make_int_message),
('int_message_const_ref', proto_example.get_int_message_const_ref),
('int_message_const_ptr', proto_example.get_int_message_const_raw_ptr),
('int_message_unique_ptr', proto_example.get_int_message_unique_ptr),
('int_message_ptr_copy', proto_example.get_int_message_ptr_copy),
('int_message_ptr_take', proto_example.get_int_message_ptr_take),
('int_message_ref_copy', proto_example.get_int_message_ref_copy),
# functions that return proto2::Message in C++ convert to the concrete
# type in python.
('message_const_ref', proto_example.get_message_const_ref),
('message_const_ptr', proto_example.get_message_const_raw_ptr),
('message_unique_ptr', proto_example.get_message_unique_ptr)
]
if proto_example.REFERENCE_WRAPPER:
l.extend([
('int_message_const_ref_wrapper',
proto_example.get_int_message_const_ref_wrapper),
('int_message_ref_wrapper_copy',
proto_example.get_int_message_ref_wrapper_copy),
])
return l
class ProtoTest(compare.Proto2Assertions):
def test_type(self):
# These are both seen as the concrete type.
self.assertEqual(
str(type(proto_example.make_int_message())),
"<class 'google3.third_party.pybind11_protobuf.tests.test_pb2.IntMessage'>"
)
self.assertEqual(
str(type(proto_example.make_test_message())),
"<class 'google3.third_party.pybind11_protobuf.tests.test_pb2.TestMessage'>"
)
def test_keep_alive_message(self):
message = proto_example.make_test_message()
field = message.int_message
# message should be kept alive until field is also deleted.
del message
field.value = 5
self.assertEqual(field.value, 5)
def test_return_wrapped_message(self):
message = proto_example.make_test_message()
self.assertEqual(message.DESCRIPTOR.full_name, 'pybind11.test.TestMessage')
self.assertEqual(message.__class__.DESCRIPTOR.full_name,
'pybind11.test.TestMessage')
def test_get_message_none(self):
if proto_example.PYBIND11_PROTOBUF_UNSAFE:
self.assertIsNone(proto_example.get_int_message_raw_ptr_none())
@parameterized.named_parameters(
('native_proto', test_pb2.TestMessage),
('pybind11_wrapper', proto_example.make_test_message),
('string', lambda: 'not a proto'))
def test_pass_proto_wrong_type(self, get_message_function):
message = get_message_function()
self.assertRaises(TypeError, proto_example.check_int_message, message, 5)
@parameterized.named_parameters(
('native_proto', test_pb2.IntMessage),
('pybind11_wrapper', proto_example.make_int_message))
def test_check_int_message(self, get_message_function):
message = get_message_function()
message.value = 5
self.assertTrue(proto_example.check_int_message(message, 5))
@parameterized.named_parameters(
('native_proto', test_pb2.IntMessage),
('pybind11_wrapper', proto_example.make_int_message))
def test_check_int_message_safe(self, get_message_function):
message = get_message_function()
message.value = 5
self.assertTrue(proto_example.check_int_message_const_ptr(message, 5))
self.assertTrue(proto_example.check_int_message_value(message, 5))
self.assertTrue(proto_example.check_int_message_rvalue(message, 5))
@parameterized.named_parameters(
('native_proto', test_pb2.IntMessage),
('pybind11_wrapper', proto_example.make_int_message))
def test_check_int_message_unsafe(self, get_message_function):
message = get_message_function()
message.value = 5
if proto_example.PYBIND11_PROTOBUF_UNSAFE:
self.assertTrue(proto_example.check_int_message_ptr(message, 5))
self.assertTrue(proto_example.check_int_message_ref(message, 5))
@parameterized.named_parameters(
('native_proto', test_pb2.IntMessage),
('pybind11_wrapper', proto_example.make_int_message))
def test_check_message(self, get_message_function):
message = get_message_function()
message.value = 5
self.assertTrue(
proto_example.check_message(message, message.DESCRIPTOR.full_name))
self.assertTrue(
proto_example.check_message_const_ptr(message,
message.DESCRIPTOR.full_name))
if proto_example.PYBIND11_PROTOBUF_UNSAFE:
self.assertTrue(
proto_example.check_message_ptr(message,
message.DESCRIPTOR.full_name))
self.assertTrue(
proto_example.check_message_ref(message,
message.DESCRIPTOR.full_name))
@parameterized.named_parameters(
('native_proto', test_pb2.IntMessage),
('pybind11_wrapper', proto_example.make_int_message))
def test_mutate_message(self, get_message_function):
if proto_example.PYBIND11_PROTOBUF_UNSAFE:
message = get_message_function()
proto_example.mutate_int_message_ref(5, message)
self.assertEqual(message.value, 5)
proto_example.mutate_int_message_ptr(6, message)
self.assertEqual(message.value, 6)
@parameterized.named_parameters(
('native_proto', test_pb2.IntMessage),
('pybind11_wrapper', proto_example.make_int_message))
def test_consume_int_message(self, get_message_function):
message = get_message_function()
message.value = 5
proto_example.consume_int_message(message) # makes a copy
self.assertEqual(message.value, 5)
proto_example.consume_message(message) # makes another copy
self.assertEqual(message.value, 5)
@parameterized.named_parameters(get_message_references())
def test_get_int_message_reference(self, get_message_function):
message_1 = get_message_function()
message_1.value = 5
self.assertEqual(message_1.value, 5)
message_2 = get_message_function()
message_2.value = 6
self.assertEqual(message_2.value, 6)
# get_message_function always returns a reference to the same static
# object, so message_1 and message_2 should always be equal.
self.assertEqual(message_1.value, message_2.value)
# test passing the message as a concrete type.
self.assertTrue(proto_example.check_int_message(message_1, 6))
# test passing the message as an abstract type.
self.assertTrue(
proto_example.check_message(message_1, message_1.DESCRIPTOR.full_name))
@parameterized.named_parameters(get_message())
def test_get_message_fns(self, get_message_function):
message = get_message_function()
message.value = 5
self.assertEqual(message.value, 5)
self.assertTrue(proto_example.check_int_message(message, 5))
self.assertTrue(
proto_example.check_message(message, message.DESCRIPTOR.full_name))
def test_overload_fn(self):
self.assertEqual(proto_example.fn_overload(test_pb2.IntMessage()), 2)
self.assertEqual(proto_example.fn_overload(test_pb2.TestMessage()), 1)
if __name__ == '__main__':
unittest.main()
| [
"mchinen@google.com"
] | mchinen@google.com |
5d7f4b9d3c274dae71c1eebe713848a9dfbba5e3 | 8bcb8333bc8d2e23416b3c8428b6fe24d0e82654 | /second.py | 63731aa868e83c9b57b424621df0339cb1096b70 | [] | no_license | Alantan389/if-__name__-__main__- | 7b5ff3818a1e075466b032b4f9dcf797f4bbd099 | fc7c4d3fdb0ff82daf507c5f29af323e2e5226c3 | refs/heads/main | 2023-04-04T22:36:24.803967 | 2021-04-07T02:26:28 | 2021-04-07T02:26:28 | 355,387,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | import first
first.main()
print ("second module's name : {}".format(__name__)) | [
"noreply@github.com"
] | Alantan389.noreply@github.com |
43e03ffe1443acb71b3ff1349f72dbc4635872d0 | 1393305d4573ac73d159d9efd2d58ade00f17dc8 | /Python/Projects/PythonBot/Tests/IMG iterations.py | b4bea19d6eb6b50f982de88f961e20fe7158dff5 | [] | no_license | ShutDownMan/UniProjects | f865136169b4626fc79e92c36e5c6590496216d2 | 40704eb1f51f82df50fb7497985c2ab55a3aff26 | refs/heads/master | 2022-06-02T22:38:04.137699 | 2022-05-12T00:19:40 | 2022-05-12T00:19:40 | 113,806,338 | 3 | 2 | null | 2022-04-04T23:04:57 | 2017-12-11T03:11:54 | C++ | UTF-8 | Python | false | false | 364 | py | import win32ui
name = "Tribal Wars 2 (1.66)" #just an example of a window I had open at the time
w = win32ui.FindWindow( None, name )
t1 = time.time()
count = 0
while count < 1000:
dc = w.GetWindowDC()
dc.GetPixel(1,1)
dc.DeleteDC()
count +=1
t2 = time.time()
tf = t2-t1
it_per_sec = int(count/tf)
print (str(it_per_sec) + " iterations per second") | [
"jedson_gabriel@hotmail.com"
] | jedson_gabriel@hotmail.com |
ed6d85d5c549b82c6b2d6991ffb75c0ca4929d9b | 6cb1b63846e818255945cdf1e8faf4f3e353c735 | /venv/datafountain/guangfudianzhan/tensor_forest.py | ec556138745ab05ce6912bb9f3de3796e550ba80 | [] | no_license | LuckyHandsomeCat/deep_learning | 3eb2bec1133f8e547436a8625b40e8bfa8bc7572 | 8c37912069a06a58f80034fe1be7ba5fbc0865d4 | refs/heads/master | 2020-08-08T02:50:10.278517 | 2018-11-30T11:11:34 | 2018-11-30T11:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,615 | py | # !/usr/bin/env python3
# -*-coding:utf8 -*-
# @TIME :2018/6/21 下午1:27
# @Author :hwwu
# @File :PricePredictor.py
import numpy as np
import sys
path = '/Users/liyangyang/PycharmProjects/mypy/venv/datafountain/guangfudianzhan/'
sys.path.append(path)
import read_data
dis = [1, 190, 379, 567, 755, 940, 1123, 1314, 1503, 1505, 1694, 1879,
2070, 2257, 2444, 2632, 2823, 3013, 3202, 3379, 3567, 3746, 3927, 4089,
4278, 4459, 4648, 4652, 4821, 5010, 5013, 5017, 5059, 5061, 5069, 5074,
5077, 5281, 5285, 5287, 5292, 5508, 5703, 5911, 5913, 5916, 5918, 6121,
6337, 6524, 6528, 6531, 6534, 6723, 6923, 7116, 7326, 7535, 7740, 7937,
8146, 8245, 8258, 8310, 8488, 8705, 8711, 8878, 9088, 9296, 9505, 9719,
9916, 10124, 10335, 10544, 10736, 10914, 10917, 11119, 11331, 11540,
11753, 11963, 12170, 12381, 12592, 12802, 13009, 13214, 13426, 13617,
13830, 14032, 14243, 14457, 14666, 14882, 15091, 15299, 15508, 15719,
15937, 16144, 16348, 16540, 16747, 16925, 17133, 17342,
17527, 17543, 17745, 17876]
dic = [22, 135, 591, 592, 593, 594, 595, 737, 948, 1070, 1173, 1175, 1286,
1362, 1451, 1519, 1565, 1666, 1717, 1894, 2137, 2223, 2271, 2414,
2579, 2797, 2875, 2916, 2986, 2684, 3723, 3597, 3599, 3603, 3605,
3607, 3610, 3601, 3602, 3421, 3393, 3538, 3539, 3540, 5521, 6016,
7437, 11832, 15355, 3152, 3612, 3611]
# def character(id,train_x):
# r =[]
# r.append(id)
# r.append(train_x[10]*0.6+train_x[11]*0.15+train_x[12]*0.25)
# r.append(train_x[13]*0.6+train_x[14]*0.15+train_x[15]*0.25)
# r.append(train_x[16]**(1/2))
# for i in [0,1,2,4,5,6,10,11,12,13,14,15,17,18]:
# r.append(train_x[i])
# for j in range(i,19):
# r.append(train_x[i]+train_x[j])
# r.append(train_x[i]-train_x[j])
# r.append(train_x[i]*train_x[j])
# r.append(train_x[i]/(train_x[j]+0.1))
#
# return r
from sklearn import preprocessing
def load_train_data():
min_max_scaler = preprocessing.MinMaxScaler()
train_ = read_data.read_result_data('public.train.csv')
train_x = train_[:, 2:21]
train_y = train_[:, 21]
train_x = min_max_scaler.fit_transform(train_x)
train_z = train_[:, 1]
train_len = len(train_y)
train_y.shape = (1, train_len)
train_y = np.transpose(train_y)
x, y = [], []
for i in range(train_len):
if ((round(train_x[i][0], 2) != 0.01) | (round(train_x[i][1], 1) != 0.1)):
id = 0.0
for j in range(len(dis)):
if (train_z[i] < dis[j]):
id = 0.5 - np.abs((int(train_z[i]) - dis[j - 1]) / (dis[j] - dis[j - 1]) - 0.5)
break
if (train_z[i] not in dic):
# x.append(character(id,train_x[i]))
x.append([id,
train_x[i][0],
train_x[i][1],
train_x[i][3],
train_x[i][2] * train_x[i][4],
train_x[i][2] * train_x[i][5],
train_x[i][2] * train_x[i][6],
train_x[i][13],
train_x[i][14],
train_x[i][15],
train_x[i][16],
train_x[i][17],
train_x[i][18]
])
y.append(abs(train_y[i]))
print(len(x))
# for i in range(10):
# print(x[i])
return x, y
def load_test_data(file='public.test.csv'):
# train_ = read_data.read_result_data('test_data_all.csv')
train_ = read_data.read_result_data(file)
train_x = train_[:, 2:21]
train_y = train_[:, 1]
train_len = len(train_y)
train_y.shape = (1, train_len)
train_y = np.transpose(train_y)
x, y = [], []
for i in range(train_len):
if ((round(train_x[i][0], 2) != 0.01) | (round(train_x[i][1], 1) != 0.1)):
id = 0.0
for j in range(len(dis)):
if (train_y[i] < dis[j]):
id = 0.5 - np.abs((int(train_y[i]) - dis[j - 1]) / (dis[j] - dis[j - 1]) - 0.5)
break
if (train_y[i] not in dic):
# x.append(character(id, train_x[i]))
y.append(abs(train_y[i]))
for i in range(1):
print(x[i])
print(len(x))
return x, y
# 对训练集和测试集分别进行交叉验证,得到error measure for official scoring : RMSE
x, y = load_train_data()
X_train = x[0:8000:1]
y_train = y[0:8000:1]
X_test = x[8000:8905:1]
y_test = y[8000:8905:1]
#
# x1, y1 = load_test_data()
# X_test = x1[0::1]
# y_test = y1[0::1]
# x2, y2 = load_test_data(file='test_data_all.csv')
# X_test_1 = x2[0::1]
# y_test_1 = y2[0::1]
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import xgboost
n_folds = 5
def rmse_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(X_train)
rmse = np.sqrt(-cross_val_score(model, X_train, y_train, scoring="neg_mean_squared_error", cv=kf))
return (rmse)
def rmse_cv_test(model):
kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(X_train)
rmse = np.sqrt(-cross_val_score(model, X_test, y_test, scoring="neg_mean_squared_error", cv=kf))
return (rmse)
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb
class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models
# we define clones of the original models to fit the data in
def fit(self, X, y):
self.models_ = [clone(x) for x in self.models]
# Train cloned base models
for model in self.models_:
model.fit(X, y)
return self
# Now we do the predictions for cloned models and average them
def predict(self, X):
predictions = np.column_stack([
model.predict(X) for model in self.models_
])
return np.mean(predictions, axis=1)
class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, base_models, meta_model, n_folds=5):
self.base_models = base_models
self.meta_model = meta_model
self.n_folds = n_folds
# We again fit the data on clones of the original models
def fit(self, X, y):
self.base_models_ = [list() for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)
# Train cloned base models then create out-of-fold predictions
# that are needed to train the cloned meta-model
out_of_fold_predictions = np.zeros((len(X), len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X[train_index], y[train_index])
y_pred = instance.predict(X[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
# Now train the cloned meta-model using the out-of-fold predictions as new feature
self.meta_model_.fit(out_of_fold_predictions, y)
return self
# Do the predictions of all base models on the test data and use the averaged predictions as
# meta-features for the final prediction which is done by the meta-model
def predict(self, X):
meta_features = np.column_stack([
np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)
for base_models in self.base_models_])
return self.meta_model_.predict(meta_features)
lasso = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=1, max_iter=100000))
ENet = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3, max_iter=100000))
KRR = KernelRidge(alpha=0.6, kernel='linear', degree=2, coef0=2.5)
GBoost = GradientBoostingRegressor(n_estimators=30000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state=5)
model_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,
learning_rate=0.005, max_depth=23,
max_delta_step=100000,
min_child_weight=1.7817, n_estimators=2200,
reg_alpha=0.4640, reg_lambda=0.8571,
subsample=0.5213, silent=1,
random_state=7, nthread=-1)
model_lgb = lgb.LGBMRegressor(num_leaves=31,
learning_rate=0.03,
n_estimators=1000,
subsample=.9,
colsample_bytree=.9,
random_state=666)
averaged_models = AveragingModels(models=(lasso, ENet, KRR, model_lgb, model_xgb))
stacked_averaged_models = StackingAveragedModels(base_models=(lasso, ENet, model_lgb),
meta_model=model_xgb)
#
# score = rmse_cv(averaged_models)
# score_test = rmse_cv_test(averaged_models)
averaged_models.fit(X_train, y_train)
y_ = averaged_models.predict(X_test)
# y_1 = model_xgb.predict(X_test_1)
# y_1= averaged_models.predict(X_test_1)
# stacked_averaged_models.fit(X_train, y_train)
# y_1 = stacked_averaged_models.predict(X_test)
# stacked_averaged_models.fit(X_train, y_train)
# y_2 = stacked_averaged_models.predict(X_test)
# # y_1= stacked_averaged_models.predict(X_test_1)
#
# y_ = []
# for i in range(len(y_2)):
# y_.append([y_2[i] * 0 + y_3[i] * 1])
# r = []
# for i in range(8338):
# id = y_test[i][0]
# p = y_[i]
# r.append([id, p])
# np.savetxt('/Users/liyangyang/Downloads/datafountain/guangdianfute/test_data_3', r)
#
#
# r1 = []
# for i in range(17243):
# id = y_test_1[i][0]
# p = y_1[i]
# r1.append([id, p])
# np.savetxt('/Users/liyangyang/Downloads/datafountain/guangdianfute/test_data_all_1', r1)
# for i in range(10):
# print(y_test[i], y_[i])
#
#
def rmse_my(y_test, y_, s):
error = []
n = 0
for i in range(len(y_test)):
if ((y_test[i] - y_[i]) * (y_test[i] - y_[i]) > 1):
# print(X_test[i], y_test[i], y_[i])
n += 1
error.append(y_test[i] - y_[i])
print('n', n)
squaredError = []
for val in error:
squaredError.append(val * val) # target-prediction之差平方
print(s, "Square Error: ", sorted(squaredError, reverse=True))
print(s, "MSE = ", sum(squaredError) / len(squaredError)) # 均方误差MSE
from math import sqrt
print(s, "RMSE = ", sqrt(sum(squaredError) / len(squaredError))) # 均方根误差RMSE
#
rmse_my(y_test, y_, 'y_')
# rmse_my(y_train,y_t,'y_t')
| [
"wuhongwei@videopls.com"
] | wuhongwei@videopls.com |
b5ad6bd34ba4c41275c12176798a9ccc96c3d0a1 | b61eec74a5adbe066813281d351b09412aa2b464 | /get_ipaddress.py | 37fdc02396d81534befd077ada0dc178ce8d792a | [] | no_license | aarongo/Cheetah | 3bbc5013e9f31e74687b9a3438ce6a61faf8ddf4 | 5037005b97721c38ebd162469d6952575bbe8228 | refs/heads/master | 2020-12-24T06:58:24.269802 | 2016-08-15T08:02:05 | 2016-08-15T08:02:05 | 49,059,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | #!/usr/bin/env python
# _*_coding:utf-8_*_
# Author: "Edward.Liu"
# Author-Email: lonnyliu@126.com
import socket
import fcntl
import struct
def get_ip_address(ifname):#网卡名称
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
| [
"lonnyliu@126.com"
] | lonnyliu@126.com |
786c1418cb68abdd4a7357ad6d0e666fa49ece82 | 2ee6babe4b5048c6d256aedf8cb580ce6de0717e | /download_data.py | 252abdf1b7bef5b16ad0afc31a0ded8bc48a3f16 | [] | no_license | fmazzasc/Hypertriton_pPb | 887aa8418f3d64d6dd54cff2730cf7d5a42631eb | 6bd465c6c9f6121ef56037666ba0d33179c78066 | refs/heads/main | 2023-06-26T15:32:56.557100 | 2021-07-23T16:00:59 | 2021-07-23T16:00:59 | 306,633,321 | 0 | 1 | null | 2021-01-10T16:38:27 | 2020-10-23T12:41:21 | Python | UTF-8 | Python | false | false | 2,363 | py | import os
if not os.path.exists("Trees"):
os.makedirs("Trees")
if not os.path.exists("Tables"):
os.makedirs("Tables")
if not os.path.exists("Utils"):
os.makedirs("Utils")
if not os.path.exists("Utils/ProdModels"):
os.makedirs("Utils/ProdModels")
if not os.path.exists("Trees/HyperTritonTree_13bc.root"):
os.system("scp lxplus.cern.ch:/eos/user/h/hypertriton/trees/2Body/HyperTritonTree_13bc.root Trees/.")
if not os.path.exists("Trees/HyperTritonTree_13bc_LS.root"):
os.system("scp lxplus.cern.ch:/eos/user/h/hypertriton/trees/2Body/HyperTritonTree_13bc_LS.root Trees/.")
if not os.path.exists("Trees/HyperTritonTree_16qt.root"):
os.system("scp lxplus.cern.ch:/eos/user/h/hypertriton/trees/2Body/HyperTritonTree_16qt.root Trees/.")
if not os.path.exists("Trees/HyperTritonTree_16qt_LS.root"):
os.system("scp lxplus.cern.ch:/eos/user/h/hypertriton/trees/2Body/HyperTritonTree_16qt_LS.root Trees/.")
if not os.path.exists("Trees/HyperTritonTree_17d.root"):
os.system("scp lxplus.cern.ch:/eos/user/h/hypertriton/trees/2Body/HyperTritonTree_17d.root Trees/.") ##G3 based MC
if not os.path.exists("Trees/HyperTritonTree_20l2.root"):
os.system("scp lxplus.cern.ch:/eos/user/h/hypertriton/trees/2Body/HyperTritonTree_20l2.root Trees/.") #G4 based MC
if not os.path.exists("Utils/AnalysisResults_pPb.root"):
os.system("scp lxplus.cern.ch:/eos/user/h/hypertriton/trees/2Body/AnalysisResults_pPb.root Utils/.")
if not os.path.exists("Utils/ProdModels/s3_3body.csv"):
os.system("scp lxplus.cern.ch:/eos/user/h/hypertriton/trees/pPbProdModels/s3_3body.csv Utils/ProdModels/.")
if not os.path.exists("Utils/ProdModels/s3_2body.csv"):
os.system("scp lxplus.cern.ch:/eos/user/h/hypertriton/trees/pPbProdModels/s3_2body.csv Utils/ProdModels/.")
if not os.path.exists("Utils/ProdModels/FullCSM-S3.dat"):
os.system("scp lxplus.cern.ch:/eos/user/h/hypertriton/trees/pPbProdModels/FullCSM-S3.dat Utils/ProdModels/.")
if not os.path.exists("Utils/ProdModels/CSM_predictions_S3_T155MeV_Vc3dNdy.root"):
os.system("scp lxplus.cern.ch:/eos/user/h/hypertriton/trees/pPbProdModels/CSM_predictions_S3_T155MeV_Vc3dNdy.root Utils/ProdModels/.")
if not os.path.exists("Utils/AbsorptionHe3/"):
os.system("scp -r lxplus.cern.ch:/eos/user/h/hypertriton/trees/AbsorptionHe3/ Utils/.")
| [
"fmazzasc@cern.ch"
] | fmazzasc@cern.ch |
5a0b2c9ad37f0d78fa4e8b821ad6b27da80fa96a | a268e5e948c0a73e8bac90461ce1ee0eb7541537 | /save.py | ce1f5d0dd9612c97669af23346103fa2260eb03b | [] | no_license | edwardhsu/ABCLearningCentre | cd0d3d87a4642a1b0269d64af76afdd709a056df | 43981fd40f80d2124d89ac956a79a10707b43a0a | refs/heads/master | 2021-01-22T09:57:56.115601 | 2017-09-04T08:07:58 | 2017-09-04T08:07:58 | 102,333,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | #!/Python27/python
print "Content-type: text/html\n\n"
success=""
print success
| [
"noreply@github.com"
] | edwardhsu.noreply@github.com |
5a1ef2f0e1986600b235ae08b6cbc9d74b037723 | 8ffe69fc54f874c3767822f1703e1d3d9bab4b5d | /agendamiento1.py | a3488fc5e71d459f4f3b48228b6e981ffcae5fd4 | [] | no_license | D49franco/MateDerecho | 73c68c19644aa510a909676d4ca306ff0cdb378e | 596cfe7a3ffd8f6c3e9cba0310f7656ff7976bf9 | refs/heads/master | 2023-03-22T07:53:50.591136 | 2021-03-16T00:27:53 | 2021-03-16T00:27:53 | 283,233,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,748 | py | from PIL import Image
import ipywidgets as widgets
from ipywidgets import interact, interactive, Layout
from IPython.core.display import HTML, display
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
display(HTML(' <div style="background-color: #023324; "><p>'+
'<br></p> <img src="https://www.uexternado.edu.co/wp-content/themes/externado_theme/images/logo-uec.svg" alt="Universidad Externado" width="150" align="left"> '+
'<h1 style="color: white;font-family:Lucida Sans Unicode, Lucida Grande, sans-serif" align="center">Horarios Pitágoras - Matemáticas para Derecho</h1> '+
'<p><br></p></div>'))
display(HTML("A continuación se encuentran los horarios de asesoría de Pitágoras:"))
im = Image.open('horario_fermat.png','r')
display(im)
print()
display(HTML("Aquí puedes elegir a un profesor, y obtendrás información sobre su asesoría:"))
asesoria={"CRISTHIAN PINTO":"Correo: cristhian.pinto@uexternado.edu.co ID: 825 4410 0117 Contraseña:417192",
"ANTONIO PAZ":"Correo: antonio.paz@uexternado.edu.co ID: 873 2363 0577 Contraseña: 679613",
"EMMA CAMARGO":"Correo: emma.camargo@uexternado.edu.co ID: 205 513 673 Contraseña: Asesoria1",
"SEBASTIÁN BALLÉN":"Correo: juan.ballen@uexternado.edu.co ID: 871 8401 6806 Contraseña: Mate_Dere.",
"LILIANA TORRES":"Correo: lilianac.torres@uexternado.edu.co ID: 994 9243 1994 Contraseña: 132082 ",
"JULIÁN ROBLEDO":"Correo: julian.robledo@uexternado.edu.co Enlace: https://uexternado.zoom.us/j/87673013308",
"CAMILA MERCHÁN":"Correo: camila.merchan@uexternado.edu.co Enlace: https://uexternado.zoom.us/j/95788120214 Contraseña: TortugaG34",
"DAVID DÍAZ":"Correo: david.diaz@uexternado.edu.co ID: 835 5097 2322 Contraseña: 1123581321",
"DAVID FRANCO":"Correo: david.franco@uexternado.edu.co ID: 991 8821 5909 Contraseña: Sherlock1!",
"CAMILO DE LA CRUZ":"Correo: camilo.delacruz@uexternado.edu.co"}
claves=list(asesoria.keys())
def funcion(opcion):
display(HTML((asesoria[opcion])))
if opcion=="CAMILO DE LA CRUZ":
display(HTML("Para tener una asesoría con el profesor Camilo, escríbele un correo manifestando tu intención. Él agendará contigo una asesoría."))
else:
display(HTML("Envíale un correo al profesor informándole en que fecha y hora irás, y sobre cuál tema tienes dudas."))
style = {'description_width': 'initial',}
l = Layout( height='auto', width='400px')
wid=widgets.Select(options=claves,description="Elige un profesor",style=style,layout=l)
interact(funcion,opcion=wid)
def llamafuncion():
interact(funcion,opcion=wid)
return | [
"noreply@github.com"
] | D49franco.noreply@github.com |
52467a8b8d9635a7c3453c8a20bd4fc461aa8926 | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/WGL/NV/render_texture_rectangle.py | ae9fe0f7855b1f67327cb2ce2c93ff5b8346eeb8 | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | '''OpenGL extension NV.render_texture_rectangle
This module customises the behaviour of the
OpenGL.raw.WGL.NV.render_texture_rectangle to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/render_texture_rectangle.txt
'''
from OpenGL.raw.WGL.NV.render_texture_rectangle import _EXTENSION_NAME
def glInitRenderTextureRectangleNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"rudnik49@gmail.com"
] | rudnik49@gmail.com |
f203b9afb3fbfa8052ab67b8d87613d76d93072d | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/578101_Colours_Inside_Text_Mode_Python/recipe-578101.py | 55b3619897ec204eb351271aa9008f37f361dfe3 | [
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 4,500 | py | # Colours.py
#
# This DEMO script prints colours and codes for Linux, Classic AMIGA and Windows Python.
#
# This is Public Domain and you may do with it as you please.
#
# Tested on standard classic AMIGA A1200(HD), E-UAE, Debian Linux, Windows XP and Vista,
# and WinUAE from Python 1.4.0 to 3.3A2.
#
# This shows how to enhance text printouts to the screen for better presentation.
# Windows is limited to a complete switch of the whole window to only foreground
# and background. The AMIGA and derivatives are limited to ONLY the first eight
# WorkBench colours. Linux is unable to do ITALICS reliably on various terminal
# programs using the escape mode method...
#
# Because of a fun program I uploaded that was voted down I decided to upload this
# because although some may know about it, MANY won't! I will say no more about the fun
# program. This does NOT do anything to your personal Terminal setups except display
# various modes and colours at the flick of a simple escape sequence.
#
# Copy/drag this file to the Lib(rary) directory/folder/drawer, rename to Colours.py
# and run from the Python Prompt using:-
#
# >>> import Colours<RETURN/ENTER>
#
# And away you go...
#
# $VER: Colours.py_Version_0.00.10_(C)2012_B.Walker_G0LCU.
#
# Enjoy finding simple solutions to often very difficult problems...
# The only, (standard), imports required for this DEMO...
import sys
import os
import time
print("\nColours inside a Linux Terminal, Classic AMIGA CLI")
print("or Windows Command Prompt using Python.")
# The code is self explanatory...
if sys.platform=="linux2" or sys.platform=="darwin":
print("\n\033[0mThis line is your startup defaults...")
print("\n \033[0;37;40mNormal Colors.\033[0m \033[1;37;40mBright, Bold, Foregrond Colors.\033[0m\n")
print(" \033[0;30;47m Black \033[0m 0;30;47m \033[1;30;40m Dark Gray \033[0m 1;30;40m")
print(" \033[0;31;47m Red \033[0m 0;31;47m \033[1;31;40m Bright Red \033[0m 1;31;40m")
print(" \033[0;32;47m Green \033[0m 0;32;47m \033[1;32;40m Bright Green \033[0m 1;32;40m")
print(" \033[0;33;47m Brown \033[0m 0;33;47m \033[1;33;40m Yellow \033[0m 1;33;40m")
print(" \033[0;34;47m Blue \033[0m 0;34;47m \033[1;34;40m Bright Blue \033[0m 1;34;40m")
print(" \033[0;35;47m Magenta \033[0m 0;35;47m \033[1;35;40m Bright Magenta \033[0m 1;35;40m")
print(" \033[0;36;47m Cyan \033[0m 0;36;47m \033[1;36;40m Bright Cyan \033[0m 1;36;40m")
print(" \033[0;37;40m Light Grey \033[0m 0;37;40m \033[1;37;40m White \033[0m 1;37;40m")
print("\n\033[0;4;37;40mUnderlined text...\033[0m")
print("\n\033[1;4;37;40mBright, bold, underlined text...\033[0m")
print("\n\033[0mFinally reset the colours back to your startup defaults...\nPress Ctrl-C to Quit:- ")
if sys.platform=="amiga":
print("\n\033[0mThis line is your startup defaults...")
print("\n\033[0mThe first eight WorkBench colours only! (Assume default bootup colours.)\n")
print(" \033[0;30;41m White on black 0;30;41m, \033[0;32;41mbright white on black, 0;32;41m... \033[0m")
print(" \033[1;30;43m Bold white on user background, 1;30;43m... \033[0m")
print(" \033[0;3;32;44m Normal, italic, bright, white on user background, 0;3;32;44m... \033[0m")
print(" \033[1;3;32;45m Bold, italic, bright, white on user background, 1;3;32;45m... \033[0m")
print(" \033[0;4;31;46m Normal, underlined, black on user background, 0;4;31;46m... \033[0m")
print(" \033[1;3;4;31;47m Bold, italics, underlined, black on user background, 1;3;4;31;47m... \033[0m")
print("\n\033[0mFinally reset the colours back to your startup defaults...\nPress Ctrl-C to Quit:- ")
if sys.platform=="win32":
# Normal colours for a Command Prompt from CMD.EXE is white on black.
os.system("COLOR 07")
print("\nNormal Command Prompt default colours, white on black...\n")
# Hold for about 2 seconds...
time.sleep(2)
# This sets the whole page to green on black.
os.system("COLOR 0A")
print("Refer to the COLOR command for choice of colours.\nThis is green on black for about four seconds...\n")
# Hold for about 4 seconds before bringing back to standard colours...
time.sleep(4)
# These are the default foreground and background colours.
os.system("COLOR 07")
print("Back to the default foreground and background colours...\nPress Ctrl-C to Quit:- ")
while 1: pass
# End of Colours.py code.
# Enjoy finding simple solutions to often very difficult problems...
| [
"betty@qburst.com"
] | betty@qburst.com |
48addabcb59a311514e90f647f9ce60153893ed7 | 2c00381d252d7b99643e30a4127a94875815cf6c | /mysite/settings.py | d9723666a7a95126bdc4d4ad0651d739b0a564a4 | [] | no_license | ivenkat/Dildiya | 50e709b9925ef38cd892aa0a719e6e280c74507b | 9dce83a4457e71aee200ce44a09f6c43e8bc4805 | refs/heads/master | 2022-12-12T14:17:28.491676 | 2018-12-19T04:21:09 | 2018-12-19T04:21:09 | 161,574,189 | 0 | 0 | null | 2022-12-08T01:24:16 | 2018-12-13T02:42:13 | Python | UTF-8 | Python | false | false | 3,957 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4w5shtm)72u4$(y+zy1_#qr2gs*=yqfkzekk35343cz^lcgz2q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'3d8ae327.ngrok.io',
'127.0.0.1',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites', # new
'allauth', # new
'allauth.account', # new
'allauth.socialaccount', # new
'allauth.socialaccount.providers.google', # new
'allauth.socialaccount.providers.facebook',
'users',
'pages',
#bootstrap
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/Users/ishwarya/Django/mysite/db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
'/static/',
]
# Login/user settings
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
AUTH_USER_MODEL = "users.CustomUser"
ACCOUNT_ACTIVATION_DAYS = 2
SITE_ID = 3
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
#Bootstrap
CRISPY_TEMPLATE_PACK = 'bootstrap4'
| [
"ishwarya@tests-MacBook-Air.local"
] | ishwarya@tests-MacBook-Air.local |
6eedf3d893f18e159a6f3b13443f205f44104b4a | 84263fd1391de079c5447359f1a7cd1abfb47126 | /pythonprog/table_output_comp.py | 8f515455e236736bff6a93c70c3f892ee10c3d21 | [] | no_license | Shilpa-T/Python | b19259b1be17182b1a9f86a42c0dd8134e749304 | 280fc16e9c7c0f38b33c59381457fcbbd42b8ae3 | refs/heads/master | 2020-04-19T00:13:38.706605 | 2019-01-27T18:57:52 | 2019-01-27T18:57:52 | 167,841,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | import pandas as pd
df = pd.read_csv('tableout.txt', sep="\t", header=0)
df_data = pd.DataFrame(df)
print df.head()
print df.shape
#print df.loc[:3,"Delta"]
#print df["Delta"].items()
#print type(df['Delta'].items())
"""
if df.loc[df['Delta'] == str(0)]:
print 'PASS'
else:
print 'FAIL'
"""
| [
"shilpindu@gmail.com"
] | shilpindu@gmail.com |
ce824e4b2589a94215280782b40f74f3a608a99c | 53b1af96dad3e639fe41b6f1c277ca1e9319430f | /crawler/crawler/settings.py | f93b7fe618c7abf214ea7b39d5c8cf207095f260 | [] | no_license | btseytlin/crawler_test_task | 5b9fc1393e99789ecf03da3f38612082d4f605f4 | 40b75c0304dc8438c9654a41121c667f06b54af5 | refs/heads/master | 2021-09-04T12:17:09.410624 | 2018-01-18T15:53:54 | 2018-01-18T15:53:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,091 | py | # -*- coding: utf-8 -*-
import os
# Scrapy settings for crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'crawler'
SPIDER_MODULES = ['crawler.spiders']
NEWSPIDER_MODULE = 'crawler.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'crawler.middlewares.CrawlerSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'crawler.middlewares.CrawlerDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'crawler.pipelines.CrawlerPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"bt@wroom.online"
] | bt@wroom.online |
f260cf4cdc928b6e6d3ef42bb6b3302b8db6a53c | df4d9aba687afa8da5ee84e1f6d0021554730752 | /f4.py | d61cbd394c8fde75035c6d36153a41fa9fa2ce02 | [] | no_license | tom021982/python3 | cd214dd3302991879901de479dafd9cc69c89650 | b42ee1988b4bcc3e177e0114ed3d51b181d45c36 | refs/heads/master | 2020-11-27T08:28:07.890185 | 2019-12-21T03:32:29 | 2019-12-21T03:32:29 | 229,370,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | # To pass keyword variable length arguments
def bikes_make(**kwargs):
if kwargs is not None:
print(kwargs)
bikes_make(GTX='Suzuki', FTZ='Yamaha')
| [
"tom.sana@gmail.com"
] | tom.sana@gmail.com |
4ab70b08fc5063952eccb2ac48c112c4fc17c0f1 | 1eefc1fc19dd4b0ded6eaad75d232450d00e0eba | /bench/py/keyedpq_a.py | a6d2ef8d529725c6a88574d63053436b63bbfda8 | [
"MIT"
] | permissive | pskopnik/apq | 2dbfa3e56c6e5c836b9d38a9e4b6bdf2f83eb44a | 827e722ec604d2f7c050f43748136613c3cd3d70 | refs/heads/master | 2021-07-07T08:32:40.204240 | 2020-02-12T13:33:41 | 2020-02-12T13:33:41 | 237,626,912 | 4 | 1 | MIT | 2021-04-20T19:28:42 | 2020-02-01T14:31:05 | Python | UTF-8 | Python | false | false | 3,634 | py | from dataclasses import dataclass, field
from typing import Any, Dict, Generic, List, Tuple, TypeVar, Union
import heapq
import math
_KT = TypeVar('_KT') # key type
_DT = TypeVar('_DT') # data type
_KT_inner = TypeVar('_KT_inner') # alternative key type for inner definitions
_DT_inner = TypeVar('_DT_inner') # alternative data type for inner definitions
class PyKeyedPQA(Generic[_KT, _DT]):
@dataclass(order=True)
class _Entry(Generic[_KT_inner, _DT_inner]):
value: float = field(init=True, compare=True)
change_index: int = field(init=True, compare=True)
key: _KT_inner = field(init=True, compare=False)
data: _DT_inner = field(init=True, compare=False)
class Item(Generic[_KT_inner, _DT_inner]):
def __init__(self, entry: 'PyKeyedPQA._Entry[_KT_inner, _DT_inner]') -> None:
self._entry: PyKeyedPQA._Entry[_KT_inner, _DT_inner] = entry
@property
def key(self) -> _KT_inner:
return self._entry.key
@property
def value(self) -> float:
return self._entry.value
@property
def data(self) -> _DT_inner:
return self._entry.data
def __init__(self) -> None:
self._heap: List[PyKeyedPQA._Entry[_KT, _DT]] = []
self._change_index = 1
self._lookup_dict: Dict[_KT, PyKeyedPQA._Entry[_KT, _DT]] = {}
def _entry_from_identifier(self, identifier: Union[_KT, 'PyKeyedPQA.Item[_KT, _DT]']) -> 'PyKeyedPQA._Entry[_KT, _DT]':
if isinstance(identifier, PyKeyedPQA.Item):
return identifier._entry
else:
return self._lookup_dict[identifier]
def __len__(self) -> int:
return len(self._heap)
def __contains__(self, key: _KT) -> bool:
return key in self._lookup_dict
def __getitem__(self, key: _KT) -> 'PyKeyedPQA.Item[_KT, _DT]':
entry = self._lookup_dict[key]
return PyKeyedPQA.Item(entry)
def __delitem__(self, identifier: Union[_KT, 'PyKeyedPQA.Item[_KT, _DT]']) -> None:
entry = self._entry_from_identifier(identifier)
entry.value, entry.change_index = -math.inf, 0
# impl A
heapq.heapify(self._heap)
heapq.heappop(self._heap)
del self._lookup_dict[entry.key]
def add(self, key: _KT, value: float, data: _DT) -> 'PyKeyedPQA.Item[_KT, _DT]':
entry = PyKeyedPQA._Entry(value, self._change_index, key, data)
self._change_index += 1
heapq.heappush(self._heap, entry)
self._lookup_dict[key] = entry
return PyKeyedPQA.Item(entry)
def change_value(self, identifier: Union[_KT, 'PyKeyedPQA.Item[_KT, _DT]'], value: float) -> None:
entry = self._entry_from_identifier(identifier)
self._change_value(entry, value)
def _change_value(self, entry: 'PyKeyedPQA._Entry[_KT, _DT]', value: float) -> None:
entry.value, entry.change_index = value, self._change_index
self._change_index += 1
# impl A
heapq.heapify(self._heap)
def add_or_change(self, key: _KT, value: float, data: _DT) -> 'PyKeyedPQA.Item[_KT, _DT]':
try:
entry = self._lookup_dict[key]
self._change_value(entry, value)
return PyKeyedPQA.Item(entry)
except KeyError:
return self.add(key, value, data)
def peek(self) -> 'PyKeyedPQA.Item[_KT, _DT]':
entry = self._heap[0]
return PyKeyedPQA.Item(entry)
def pop(self) -> Tuple[_KT, float, _DT]:
entry = heapq.heappop(self._heap)
del self._lookup_dict[entry.key]
return entry.key, entry.value, entry.data
| [
"paul@skopnik.me"
] | paul@skopnik.me |
b96417229799dc4f96e334adb2943e516bd8ae24 | 23e6ce3153b94f2216a26aff954a3142b6b99cc3 | /panchayat_extract.py | 88042a1343582b99032d97389cc2dd30c2ca0cf7 | [] | no_license | ravibalgi/nrega | 199a26238542ab7f4bdca032a508239a2bc7296e | b137dab3be02d10c22bc9dc4a8ff56f8861ee98f | refs/heads/master | 2020-08-27T03:58:21.252391 | 2013-07-08T00:11:22 | 2013-07-08T00:11:22 | 2,186,773 | 0 | 1 | null | 2013-07-08T00:11:22 | 2011-08-10T18:27:32 | Python | UTF-8 | Python | false | false | 3,423 | py | from BeautifulSoup import BeautifulSoup
import urllib2
from urlparse import urlparse
import MySQLdb #MySQL library
import nregadbconfig
def panchayatExtract(url, year):
data = {}
urlparts = urlparse(url)
host = urlparts.hostname
page = urllib2.urlopen(url)
dir(BeautifulSoup)
soup = BeautifulSoup.BeautifulSoup(page)
table_block = soup('table', id = "Table2")[0]
# there are five unwanted rows
unwanted_row = table_block.next.nextSibling
row_count = 1
# traversing the table to remove unwanted rows
while row_count < 5:
unwanted_row = unwanted_row.nextSibling.nextSibling
row_count += 1
# first row of the required data for districts
data_row = unwanted_row.nextSibling.nextSibling
while data_row.td.nextSibling.nextSibling.next.string:
print "Panchayat %s " % (data_row.td.nextSibling.nextSibling.next.string)
# assigning the value of the data_row to the data_col
data_col = data_row
# Pointing to the first column
data_col = data_col.td.nextSibling.nextSibling
# extracting the url, Code, Name via the href tag
# url value extraction
# the url value is extracted as '../../citizen_html'
# hence a small manipulation
# appending the ip-address and the string block '
# panchayat name is scrapped from the screen value
name = data_col.next.string
# Scrapping total no.of works, labor expenditure, material Expenditure
# these are stored in 32nd column hence a manipulation
col_count = 1
while col_count < 32:
data_col = data_col.nextSibling.nextSibling
col_count += 1
# panchayat code index and value. panchayat code is 10 characters
index = temp_url.find("Panchayat_code=")
index=index+15
code = temp_url[index : index + 10]
# scrapping no. of Works noWorks col: 32
noWorks = data_col.next.string
# scrapping labor expenditure col :33
data_col = data_col.nextSibling.nextSibling
labExpn = data_col.next.string
# scrapping material Expenditure col:34
data_col = data_col.nextSibling.nextSibling
matExpn = data_col.next.string
data[name] = {
"works_no": noWorks,
"labour_exp": labExpn,
"matExpn": matExpn,
"year":year}
#DB start
# opening a database connection and inserting the fetched data
db = MySQLdb.connect(host,user,passcode,database)
# cursor for database operations
cursor= db.cursor()
#SQL for inserting data in table
sql = "INSERT INTO "+ panchayat_expense +"(GramPanchayatUniqueId, \
Year, NoOfWorks, LabourExpenditures, MaterialExpenditures) \
VALUES ('%s', '%s', '%s', '%s', '%s' )" % \
(code, year, noWorks, labExpn, matExpn)
#try except block for executing operation
try:
cursor.execute(sql)
# Commit
db.commit()
except:
# Rollback
db.rollback()
# dislodge
db.close()
#DB end
data_row = data_row.nextSibling
return data
if __name__ == "__main__":
print panchayatExtract("http://164.100.112.66/netnrega/writereaddata/citizen_out/phy_fin_reptemp_Out_1821002_local_1112.html")
| [
"ravibalgi@gmail.com"
] | ravibalgi@gmail.com |
b478df5eaf0802fbb67435587ab05a919a09ef64 | 05fc03082078e043142d2de60ee14aadd2014c9e | /heatsource900/Utils/Logger.py | 5fe98ea13bf59d8bf5dfddb4799e03e26ccbe7bb | [] | no_license | stfnhsl/heatsource_version_Boku | cf1e3551a547798a9ba3bb7874b2761e1cad7385 | a8f6b4b90b41e6b531cf5d9b5c020e61c88f5dd0 | refs/heads/master | 2021-01-25T08:42:58.425930 | 2015-01-17T09:33:59 | 2015-01-17T09:33:59 | 27,993,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | # Heat Source, Copyright (C) 2000-2014, Oregon Department of Environmental Quality
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
class LoggerDiety(object):
def __init__(self):
self._last = None
self._file = None
def __del__(self):
try:
self._file.close()
except AttributeError:
pass
def SetFile(self, filename):
try:
self._file = open(filename,"w")
except IOError:
raise IOError("Opening output directory failed. Make sure directory exists before running.")
def __call__(self, message, n=None,t=None): self.write(message)
def write(self, message):
if message != self._last:
t = time.strftime("%H:%M:%S",time.gmtime(time.time()))
self._file.write("%s-> %s\n"% (t,message))
self._last = message
def progress(self): self._file.write(".")
Logger = LoggerDiety()
| [
"ic12b067@technikum-wien.at"
] | ic12b067@technikum-wien.at |
abf49d109342c6378ce64a7a7bad76909142507a | 098ee5aa42f93104be293c25e66699e647339d32 | /rl-handson-rlvs21/rl-handson-rlvs21-main/rlvs_hands_on_sb3.py | f4e11458673979b494d26d6e50a2bb1fb0c3abef | [
"MIT"
] | permissive | castorfou/handson_stablebaselines3 | ed07db4244ec541fd6d354ff7e9c875e3dccb90f | cff9224439caf9f27b3459fa22f662d0a692d218 | refs/heads/main | 2023-08-11T23:45:48.596260 | 2021-10-13T12:13:39 | 2021-10-13T12:13:39 | 351,028,607 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 31,430 | py | #!/usr/bin/env python
# coding: utf-8
# # Stable Baselines3 Hands-on Session - RLVS
#
# Github repo: https://github.com/araffin/rl-handson-rlvs21
#
# Stable-Baselines3: https://github.com/DLR-RM/stable-baselines3
#
# Documentation: https://stable-baselines3.readthedocs.io/en/master/
#
# SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
#
# RL Baselines3 zoo: https://github.com/DLR-RM/rl-baselines3-zoo
#
# [RL Baselines3 Zoo](https://github.com/DLR-RM/rl-baselines3-zoo) is a collection of pre-trained Reinforcement Learning agents using Stable-Baselines3.
#
# It also provides basic scripts for training, evaluating agents, tuning hyperparameters and recording videos.
#
#
# ## Introduction
#
# In this notebook, you will learn the basics for using stable baselines3 library: how to create a RL model, train it and evaluate it. Because all algorithms share the same interface, we will see how simple it is to switch from one algorithm to another.
# You will also learn how to define a gym wrapper and callback to customise the training.
# We will finish this session by trying out multiprocessing and have a hyperparameter tuning challenge.
#
#
# ## Install Dependencies and Stable Baselines3 Using Pip
#
# List of full dependencies can be found in the [README](https://github.com/DLR-RM/stable-baselines3).
#
#
# ```
# pip install stable-baselines3[extra]
# ```
# In[ ]:
get_ipython().system('apt-get install ffmpeg freeglut3-dev xvfb # For visualization')
# In[ ]:
get_ipython().system('pip install stable-baselines3[extra]')
# In[2]:
# Optional: install SB3 contrib to have access to additional algorithms
get_ipython().system('pip install sb3-contrib')
# # Part I: Getting Started
# ## First steps with the Gym interface
#
# An environment that follows the [gym interface](https://stable-baselines3.readthedocs.io/en/master/guide/custom_env.html) is quite simple to use.
# It provides to this user mainly three methods:
# - `reset()` called at the beginning of an episode, it returns an observation
# - `step(action)` called to take an action with the environment, it returns the next observation, the immediate reward, whether the episode is over and additional information
# - (Optional) `render(method='human')` which allow to visualize the agent in action. Note that graphical interface does not work on google colab, so we cannot use it directly (we have to rely on `method='rbg_array'` to retrieve an image of the scene
#
# Under the hood, it also contains two useful properties:
# - `observation_space` which one of the gym spaces (`Discrete`, `Box`, ...) and describe the type and shape of the observation
# - `action_space` which is also a gym space object that describes the action space, so the type of action that can be taken
#
# The best way to learn about gym spaces is to look at the [source code](https://github.com/openai/gym/tree/master/gym/spaces), but you need to know at least the main ones:
# - `gym.spaces.Box`: A (possibly unbounded) box in $R^n$. Specifically, a Box represents the Cartesian product of n closed intervals. Each interval has the form of one of [a, b], (-oo, b], [a, oo), or (-oo, oo). Example: A 1D-Vector or an image observation can be described with the Box space.
# ```python
# # Example for using image as input:
# observation_space = spaces.Box(low=0, high=255, shape=(HEIGHT, WIDTH, N_CHANNELS), dtype=np.uint8)
# ```
#
# - `gym.spaces.Discrete`: A discrete space in $\{ 0, 1, \dots, n-1 \}$
# Example: if you have two actions ("left" and "right") you can represent your action space using `Discrete(2)`, the first action will be 0 and the second 1.
#
#
#
# [Documentation on custom env](https://stable-baselines3.readthedocs.io/en/master/guide/custom_env.html)
#
# Below you can find an example of a custom environment:
# In[3]:
from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Union
import gym
import numpy as np
from stable_baselines3.common.env_checker import check_env
GymObs = Union[Tuple, Dict, np.ndarray, int]
class CustomEnv(gym.Env):
"""
Minimal custom environment to demonstrate the Gym interface.
"""
def __init__(self):
super(CustomEnv, self).__init__()
self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(14,))
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(6,))
def reset(self) -> GymObs:
"""
Called at the beginning of an episode.
:return: the first observation of the episode
"""
return self.observation_space.sample()
def step(self, action: Union[int, np.ndarray]) -> Tuple[GymObs, float, bool, Dict]:
"""
Step into the environment.
:return: A tuple containing the new observation, the reward signal,
whether the episode is over and additional informations.
"""
obs = self.observation_space.sample()
reward = 1.0
done = False
info = {}
return obs, reward, done, info
env = CustomEnv()
# Check your custom environment
# this will print warnings and throw errors if needed
check_env(env)
# ## Imports
# Stable-Baselines3 works on environments that follow the [gym interface](https://stable-baselines3.readthedocs.io/en/master/guide/custom_env.html).
# You can find a list of available environment [here](https://gym.openai.com/envs/#classic_control).
#
# It is also recommended to check the [source code](https://github.com/openai/gym) to learn more about the observation and action space of each env, as gym does not have a proper documentation.
# Not all algorithms can work with all action spaces, you can find more in this [recap table](https://stable-baselines3.readthedocs.io/en/master/guide/algos.html)
# In[4]:
import gym
import numpy as np
# The first thing you need to import is the RL model, check the documentation to know what you can use on which problem
# In[5]:
from stable_baselines3 import PPO, A2C, SAC, TD3, DQN
# In[6]:
# Algorithms from the contrib repo
# https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
from sb3_contrib import QRDQN, TQC
# The next thing you need to import is the policy class that will be used to create the networks (for the policy/value functions).
# This step is optional as you can directly use strings in the constructor:
#
# ```PPO("MlpPolicy", env)``` instead of ```PPO(MlpPolicy, env)```
#
# Note that some algorithms like `SAC` have their own `MlpPolicy`, that's why using string for the policy is the recommended option.
# In[7]:
from stable_baselines3.ppo.policies import MlpPolicy
# ## Create the Gym env and instantiate the agent
#
# For this example, we will use CartPole environment, a classic control problem.
#
# "A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The system is controlled by applying a force of +1 or -1 to the cart. The pendulum starts upright, and the goal is to prevent it from falling over. A reward of +1 is provided for every timestep that the pole remains upright. "
#
# Cartpole environment: [https://gym.openai.com/envs/CartPole-v1/](https://gym.openai.com/envs/CartPole-v1/)
#
# 
#
#
# We chose the MlpPolicy because the observation of the CartPole task is a feature vector, not images.
#
# The type of action to use (discrete/continuous) will be automatically deduced from the environment action space
#
# Here we are using the [Proximal Policy Optimization](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) algorithm, which is an Actor-Critic method: it uses a value function to improve the policy gradient descent (by reducing the variance).
#
# It combines ideas from [A2C](https://stable-baselines3.readthedocs.io/en/master/modules/a2c.html) (having multiple workers and using an entropy bonus for exploration) and [TRPO](https://stable-baselines.readthedocs.io/en/master/modules/trpo.html) (it uses a trust region to improve stability and avoid catastrophic drops in performance).
#
# PPO is an on-policy algorithm, which means that the trajectories used to update the networks must be collected using the latest policy.
# It is usually less sample efficient than off-policy alorithms like [DQN](https://stable-baselines.readthedocs.io/en/master/modules/dqn.html), [SAC](https://stable-baselines3.readthedocs.io/en/master/modules/sac.html) or [TD3](https://stable-baselines3.readthedocs.io/en/master/modules/td3.html), but is much faster regarding wall-clock time.
#
# In[8]:
# Create the gym Env
env = gym.make('CartPole-v1')
# Create the RL agent
model = PPO('MlpPolicy', env, verbose = 1)
# ### Using the model to predict actions
# In[9]:
print(env.observation_space)
print(env.action_space)
# In[10]:
# Retrieve first observation
obs = env.reset()
# In[11]:
# Predict the action to take given the observation
action, _ = model.predict(obs, deterministic=True)
# In[12]:
# We are using discrete actions, therefore `action` is an int
assert env.action_space.contains(action)
print(action)
# Step in the environment
# In[13]:
obs, reward, done, infos = env.step(action)
# In[14]:
print(f"obs_shape={obs.shape}, reward={reward}, done? {done}")
# In[16]:
# Reset the env at the end of an episode
if done:
obs = env.reset()
# ### Exercise (10 minutes): write the function to evaluate the agent
#
# This function will be used to evaluate the performance of an RL agent.
# Thanks to Stable Baselines3 interface, it will work with any SB3 algorithms and any Gym environment.
#
# See docstring of the function for what is expected as input/output.
# In[22]:
from stable_baselines3.common.base_class import BaseAlgorithm
def evaluate(
model: BaseAlgorithm,
env: gym.Env,
n_eval_episodes: int = 100,
deterministic: bool = False,
) -> float:
"""
Evaluate an RL agent for `n_eval_episodes`.
:param model: the RL Agent
:param env: the gym Environment
:param n_eval_episodes: number of episodes to evaluate it
:param deterministic: Whether to use deterministic or stochastic actions
:return: Mean reward for the last `n_eval_episodes`
"""
### YOUR CODE HERE
# TODO: run `n_eval_episodes` episodes in the Gym env
# using the RL agent and keep track of the total reward
# collected for each episode.
# Finally, compute the mean and print it
rewards_list=[]
for i in range(n_eval_episodes):
obs = env.reset()
done=False
reward_sum=0
while(not done):
action, _states = model.predict(obs, deterministic=deterministic)
obs, rewards, done, info = env.step(action)
reward_sum+=rewards
rewards_list.append(reward_sum)
mean_episode_reward = np.sum(rewards_list)/n_eval_episodes
print(f"mean_reward={mean_episode_reward}, number_episodes={n_eval_episodes}")
### END OF YOUR CODE
return mean_episode_reward
# Let's evaluate the un-trained agent, this should be a random agent.
# In[23]:
env = gym.make('CartPole-v1')
model = PPO('MlpPolicy', env, seed=1,verbose=1)
# In[24]:
# Random Agent, before training
mean_reward_before_train = evaluate(model, env, n_eval_episodes=100, deterministic=False)
# Stable-Baselines already provides you with that helper (the actual implementation is a little more advanced):
# In[25]:
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.monitor import Monitor
# In[26]:
# The Monitor wrapper allows to keep track of the training reward and other infos (useful for plotting)
env = Monitor(env)
# In[27]:
# Seed to compare to previous implementation
env.seed(42)
mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=100, deterministic=True)
print(f"mean_reward:{mean_reward:.2f} +/- {std_reward:.2f}")
# ## Train the agent and evaluate it
# In[28]:
# Train the agent for 10000 steps
model.learn(total_timesteps=10000)
# In[29]:
# Evaluate the trained agent
mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=100)
print(f"mean_reward:{mean_reward:.2f} +/- {std_reward:.2f}")
# Apparently the training went well, the mean reward increased a lot!
# ### Prepare video recording
# In[30]:
# Set up fake display; otherwise rendering will fail
import os
os.system("Xvfb :1 -screen 0 1024x768x24 &")
os.environ['DISPLAY'] = ':1'
# In[31]:
import base64
from pathlib import Path
from IPython import display as ipythondisplay
def show_videos(video_path='', prefix=''):
"""
Taken from https://github.com/eleurent/highway-env
:param video_path: (str) Path to the folder containing videos
:param prefix: (str) Filter the video, showing only the only starting with this prefix
"""
html = []
for mp4 in Path(video_path).glob("{}*.mp4".format(prefix)):
video_b64 = base64.b64encode(mp4.read_bytes())
html.append('''<video alt="{}" autoplay
loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{}" type="video/mp4" />
</video>'''.format(mp4, video_b64.decode('ascii')))
ipythondisplay.display(ipythondisplay.HTML(data="<br>".join(html)))
# We will record a video using the [VecVideoRecorder](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html#vecvideorecorder) wrapper, you can learn more about those wrappers in our Documentation.
# In[32]:
from stable_baselines3.common.vec_env import VecVideoRecorder, DummyVecEnv
def record_video(env_id, model, video_length=500, prefix='', video_folder='videos/'):
"""
:param env_id: (str)
:param model: (RL model)
:param video_length: (int)
:param prefix: (str)
:param video_folder: (str)
"""
eval_env = DummyVecEnv([lambda: gym.make(env_id)])
# Start the video at step=0 and record 500 steps
eval_env = VecVideoRecorder(eval_env, video_folder=video_folder,
record_video_trigger=lambda step: step == 0, video_length=video_length,
name_prefix=prefix)
obs = eval_env.reset()
for _ in range(video_length):
action, _ = model.predict(obs, deterministic=True)
obs, _, _, _ = eval_env.step(action)
# Close the video recorder
eval_env.close()
# ### Visualize trained agent
#
#
# In[33]:
record_video('CartPole-v1', model, video_length=500, prefix='ppo-cartpole')
# In[34]:
show_videos('videos', prefix='ppo')
# ### Exercise (5 minutes): Save, Load The Model and that the loading was correct
#
# Save the model and then load it.
#
# Don't forget to check that loading went well: the model must predict the same actions given the same observations.
# In[35]:
# Sample observations using the environment observation space
observations = np.array([env.observation_space.sample() for _ in range(10)])
# Predict actions on those observations using trained model
action_before_saving, _ = model.predict(observations, deterministic=True)
# In[36]:
# Save the model
model.save("ppo_cartpole")
# In[37]:
# Delete the model (to demonstrate loading)
del model
# In[38]:
get_ipython().system('ls *.zip')
# In[39]:
# Load the model
model = PPO.load('ppo_cartpole')
# In[40]:
# Predict actions on the observations with the loaded model
action_after_loading, _ = model.predict(observations, deterministic=True)
# In[41]:
# Check that the predictions are the same
assert np.allclose(action_before_saving, action_after_loading), "Somethng went wrong in the loading"
# ## Bonus: Train a RL Model in One Line
#
# The policy class to use will be inferred and the environment will be automatically created. This works because both are [registered](https://stable-baselines3.readthedocs.io/en/master/guide/quickstart.html).
# In[ ]:
model = PPO('MlpPolicy', "CartPole-v1", verbose=1).learn(1000)
# # Part II: Gym Wrappers
#
#
# In this part, you will learn how to use *Gym Wrappers* which allow to do monitoring, normalization, limit the number of steps, feature augmentation, ...
#
# ## Anatomy of a gym wrapper
# A gym wrapper follows the [gym](https://stable-baselines.readthedocs.io/en/master/guide/custom_env.html) interface: it has a `reset()` and `step()` method.
#
# Because a wrapper is *around* an environment, we can access it with `self.env`, this allow to easily interact with it without modifying the original env.
# There are many wrappers that have been predefined, for a complete list refer to [gym documentation](https://github.com/openai/gym/tree/master/gym/wrappers)
# In[42]:
class CustomWrapper(gym.Wrapper):
"""
:param env: Gym environment that will be wrapped
"""
def __init__(self, env: gym.Env):
# Call the parent constructor, so we can access self.env later
super().__init__(env)
def reset(self):
"""
Reset the environment
"""
obs = self.env.reset()
return obs
def step(self, action):
"""
:param action: ([float] or int) Action taken by the agent
:return: (np.ndarray, float, bool, dict) observation, reward, is the episode over?, additional informations
"""
obs, reward, done, infos = self.env.step(action)
return obs, reward, done, infos
# ### Exercise (7 minutes): limit the episode length
#
# In this exercise, the goal is to create a Gym wrapper that will limit the maximum number of steps per episode (timeout).
#
#
# It will also pass a `timeout` signal in the info dict to tell the agent that the termination was due to reaching the limits.
# In[58]:
class TimeLimitWrapper(gym.Wrapper):
"""
Limit the maximum number of steps per episode.
:param env: Gym environment that will be wrapped
:param max_steps: Max number of steps per episode
"""
class TimeLimitWrapper(gym.Wrapper):
"""
Limit the maximum number of steps per episode.
:param env: Gym environment that will be wrapped
:param max_steps: Max number of steps per episode
"""
def __init__(self, env: gym.Env, max_steps: int = 100):
# Call the parent constructor, so we can access self.env later
super().__init__(env)
self.max_steps = max_steps
# YOUR CODE HERE
# Counter of steps per episode
self.counter=0
# END OF YOUR CODE
def reset(self) -> GymObs:
# YOUR CODE HERE
# TODO: reset the counter and reset the env
self.counter = 0
self.env.reset()
# END OF YOUR CODE
return obs
def step(self, action: Union[int, np.ndarray]) -> Tuple[GymObs, float, bool, Dict]:
# YOUR CODE HERE
# TODO:
# 1. Step into the env
# 2. Increment the episode counter
# 3. Overwrite the done signal when time limit is reached
# (optional) 4. update the info dict (add a "episode_timeout" key)
# when the episode was stopped due to timelimit
obs, reward, done, infos = self.env.step(action)
self.counter+=1
if (self.counter >= self.max_steps):
done=True
infos['episode_timeout']=True
# END OF YOUR CODE
return obs, reward, done, infos
# #### Test the wrapper
# In[59]:
from gym.envs.classic_control.pendulum import PendulumEnv
# Here we create the environment directly because gym.make() already wrap the environement in a TimeLimit wrapper otherwise
env = PendulumEnv()
# Wrap the environment
env = TimeLimitWrapper(env, max_steps=100)
# In[60]:
obs = env.reset()
done = False
n_steps = 0
while not done:
# Take random actions
random_action = env.action_space.sample()
obs, reward, done, infos = env.step(random_action)
n_steps += 1
print(f"Episode length: {n_steps} steps, info dict: {infos}")
# In practice, `gym` already have a wrapper for that named `TimeLimit` (`gym.wrappers.TimeLimit`) that is used by most environments.
# # Part III: Callbacks
#
# In this part, you will learn how to use [Callbacks](https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html) which allow to do monitoring, auto saving, model manipulation, progress bars, ...
# Please read the [documentation](https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html). Although Stable-Baselines3 provides you with a callback collection (e.g. for creating checkpoints or for evaluation), we are going to re-implement some so you can get a good understanding of how they work.
#
# To build a custom callback, you need to create a class that derives from `BaseCallback`. This will give you access to events (`_on_training_start`, `_on_step()`) and useful variables (like `self.model` for the RL model).
#
# `_on_step` returns a boolean value for whether or not the training should continue.
#
# Thanks to the access to the models variables, in particular `self.model`, we are able to even change the parameters of the model without halting the training, or changing the model's code.
# In[61]:
from stable_baselines3.common.callbacks import BaseCallback
# In[ ]:
class CustomCallback(BaseCallback):
"""
A custom callback that derives from ``BaseCallback``.
:param verbose: (int) Verbosity level 0: not output 1: info 2: debug
"""
def __init__(self, verbose=0):
super(CustomCallback, self).__init__(verbose)
# Those variables will be accessible in the callback
# (they are defined in the base class)
# The RL model
# self.model = None # type: BaseRLModel
# An alias for self.model.get_env(), the environment used for training
# self.training_env = None # type: Union[gym.Env, VecEnv, None]
# Number of time the callback was called
# self.n_calls = 0 # type: int
# self.num_timesteps = 0 # type: int
# local and global variables
# self.locals = None # type: Dict[str, Any]
# self.globals = None # type: Dict[str, Any]
# The logger object, used to report things in the terminal
# self.logger = None # type: logger.Logger
# # Sometimes, for event callback, it is useful
# # to have access to the parent object
# self.parent = None # type: Optional[BaseCallback]
def _on_training_start(self) -> None:
"""
This method is called before the first rollout starts.
"""
pass
def _on_rollout_start(self) -> None:
"""
A rollout is the collection of environment interaction
using the current policy.
This event is triggered before collecting new samples.
"""
pass
def _on_step(self) -> bool:
"""
This method will be called by the model after each call to `env.step()`.
For child callback (of an `EventCallback`), this will be called
when the event is triggered.
:return: If the callback returns False, training is aborted early.
"""
return True
def _on_rollout_end(self) -> None:
"""
This event is triggered before updating the policy.
"""
pass
def _on_training_end(self) -> None:
"""
This event is triggered before exiting the `learn()` method.
"""
pass
# Here we have a simple callback that can only be called twice:
# In[62]:
class SimpleCallback(BaseCallback):
"""
a simple callback that can only be called twice
:param verbose: (int) Verbosity level 0: not output 1: info 2: debug
"""
def __init__(self, verbose=0):
super(SimpleCallback, self).__init__(verbose)
self._called = False
def _on_step(self):
if not self._called:
print("callback - first call")
self._called = True
return True # returns True, training continues.
print("callback - second call")
return False # returns False, training stops.
# In[63]:
model = SAC('MlpPolicy', 'Pendulum-v0', verbose=1)
model.learn(8000, callback=SimpleCallback())
# ## Exercise (8 minutes): Checkpoint Callback
#
# In RL, it is quite useful to save checkpoints during training, as we can end up with burn-in of a bad policy. It also useful if you want to see the progression over time.
#
# This is a typical use case for callback, as they can call the save function of the model, and observe the training over time.
# In[ ]:
import os
import numpy as np
# In[64]:
class CheckpointCallback(BaseCallback):
"""
Callback for saving a model every ``save_freq`` steps
:param save_freq:
:param save_path: Path to the folder where the model will be saved.
:param name_prefix: Common prefix to the saved models
:param verbose: Whether to print additional infos or not
"""
def __init__(self, save_freq: int, save_path: str, name_prefix: str = "rl_model", verbose: int = 0):
super().__init__(verbose)
self.save_freq = save_freq
self.save_path = save_path
self.name_prefix = name_prefix
# NOTE: because it derives from `BaseCallback`
# this checkpoint callback has already access to many variables
# like `self.model` (cf ``CustomCallback above for a complete list)
def _init_callback(self) -> None:
## YOUR CODE HERE
# Create folder if needed
# (you may use `os.makedirs()`)
os.makedirs(self.save_path, exist_ok=True)
## END OF YOUR CODE
def _on_step(self) -> bool:
## YOUR CODE HERE
# Save the checkpoint if needed
if (self.num_timesteps % self.save_freq ==0):
print('save model')
self.model.save(self.save_path+self.name_prefix+'_'+str(self.num_timesteps))
## END OF YOUR CODE
return True
# Test your callback:
# In[65]:
log_dir = "./tmp/gym/"
# Create Callback
callback = CheckpointCallback(save_freq=1000, save_path="./tmp/gym/", verbose=1)
model = A2C("MlpPolicy", "CartPole-v1", verbose=1)
model.learn(total_timesteps=5000, callback=callback)
# In[66]:
get_ipython().system('ls "./tmp/gym/"')
# Note: The `CheckpointCallback` as well as other [common callbacks](https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html), like the `EvalCallback` are already included in Stable-Baselines3.
# ## Multiprocessing Demo
#
#
# [Vectorized Environments](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html) are a method for stacking multiple independent environments into a single environment. Instead of training an RL agent on 1 environment per step, it allows us to train it on n environments per step. This provides two benefits:
# * Agent experience can be collected more quickly
# * The experience will contain a more diverse range of states, it usually improves exploration
#
# Stable-Baselines provides two types of Vectorized Environment:
# - SubprocVecEnv which run each environment in a separate process
# - DummyVecEnv which run all environment on the same process
#
# In practice, DummyVecEnv is usually faster than SubprocVecEnv because of communication delays that subprocesses have.
# In[67]:
import time
from stable_baselines3.common.env_util import make_vec_env
# In[68]:
env = gym.make("Pendulum-v0")
n_steps = 1024
# In[69]:
start_time_one_env = time.time()
model = PPO("MlpPolicy", env, n_epochs=1, n_steps=n_steps, verbose=1).learn(int(2e4))
time_one_env = time.time() - start_time_one_env
# In[70]:
print(f"Took {time_one_env:.2f}s")
# In[71]:
start_time_vec_env = time.time()
# Create 16 environments
vec_env = make_vec_env("Pendulum-v0", n_envs=16)
# At each call to `env.step()`, 16 transitions will be collected, so we account for that for fair comparison
model = PPO("MlpPolicy", vec_env, n_epochs=1, n_steps=n_steps // 16, verbose=1).learn(int(2e4))
time_vec_env = time.time() - start_time_vec_env
# In[72]:
print(f"Took {time_vec_env:.2f}s")
# Note: the speedup is not linear but it is already significant.
# # Part IV: The importance of hyperparameter tuning
#
#
# When compared with Supervised Learning, Deep Reinforcement Learning is far more sensitive to the choice of hyper-parameters such as learning rate, number of neurons, number of layers, optimizer ... etc.
#
# Poor choice of hyper-parameters can lead to poor/unstable convergence. This challenge is compounded by the variability in performance across random seeds (used to initialize the network weights and the environment).
#
# ### Challenge (15 minutes): "Grad Student Descent" - Can you beat automatic hyperparameter tuning?
#
# The challenge is to find the best hyperparameters (max performance) for A2C on `CartPole-v1` with a limited budget of 20 000 training steps.
#
# You will compete against automatic hyperparameter tuning, good luck ;)
#
#
# Maximum reward: 500 on `CartPole-v1`
#
# The hyperparameters should work for different random seeds.
# In[73]:
budget = int(2e4)
# #### The baseline: default hyperparameters
# In[74]:
model = A2C("MlpPolicy", "CartPole-v1", seed=8, verbose=1).learn(budget)
# In[75]:
mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=50, deterministic=True)
print(f"mean_reward:{mean_reward:.2f} +/- {std_reward:.2f}")
# **Your goal is to beat that baseline and get closer to the optimal score of 500**
# Time to tune!
# In[77]:
import torch.nn as nn
# In[78]:
policy_kwargs = dict(
net_arch=[
dict(vf=[64, 64], pi=[64, 64]), # network architectures for actor/critic
],
ortho_init=True, # Orthogonal initialization,
activation_fn=nn.Tanh,
)
hyperparams = dict(
n_steps=5,
learning_rate=7e-4,
gamma=0.9999, # discount factor
gae_lambda=1.0, # Factor for trade-off of bias vs variance for Generalized Advantage Estimator
# Equivalent to classic advantage when set to 1.
max_grad_norm=0.5, # The maximum value for the gradient clipping
ent_coef=0.0, # Entropy coefficient for the loss calculation
)
model = A2C("MlpPolicy", "CartPole-v1", seed=8, verbose=1, **hyperparams).learn(budget)
# In[79]:
mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=50, deterministic=True)
print(f"mean_reward:{mean_reward:.2f} +/- {std_reward:.2f}")
# Hint - Recommended Hyperparameter Range
#
# ```python
# gamma = trial.suggest_float("gamma", 0.9, 0.99999, log=True)
# max_grad_norm = trial.suggest_float("max_grad_norm", 0.3, 5.0, log=True)
# gae_lambda = trial.suggest_float("gae_lambda", 0.8, 0.999, log=True)
# # from 2**3 = 8 to 2**10 = 1024
# n_steps = 2 ** trial.suggest_int("exponent_n_steps", 3, 10)
# learning_rate = trial.suggest_float("lr", 1e-5, 1, log=True)
# ent_coef = trial.suggest_float("ent_coef", 0.00000001, 0.1, log=True)
# ortho_init = trial.suggest_categorical("ortho_init", [False, True])
# # tiny: {"pi": [64], "vf": [64]}
# # default: {"pi": [64, 64], "vf": [64, 64]}
# net_arch = trial.suggest_categorical("net_arch", ["tiny", "default"])
# activation_fn = trial.suggest_categorical("activation_fn", [nn.Tanh, nn.ReLU])
# ```
# Simple example of hyperparameter tuning: https://github.com/optuna/optuna/blob/master/examples/rl/sb3_simple.py
#
# Complete example: https://github.com/DLR-RM/rl-baselines3-zoo
# # Conclusion
#
# What we have seen in this notebook:
# - SB3 101
# - Gym wrappers to modify the env
# - SB3 callbacks to access the RL agent
# - multiprocessing to speedup training
# - the importance of good hyperparameters
# - more complete tutorial: https://github.com/araffin/rl-tutorial-jnrr19
#
#
# In[ ]:
| [
"guillaume.ramelet@gmail.com"
] | guillaume.ramelet@gmail.com |
f44e841589a4b3b42e1a661a957d6c0af22c4b82 | acd9895390582b1cf709644deb9260e3effb7194 | /microblog/app/views.py | 94b2ce377a833ecf5500cc8e973c4205db7119bf | [] | no_license | jmccormack200/Flask | 07b09d5534a37d46be65c25c9876ba4cfbd89eba | 34f8bfa7a6e06987971123b5b09bc6fc02b36231 | refs/heads/master | 2021-01-01T05:47:05.790857 | 2015-03-16T02:33:36 | 2015-03-16T02:33:36 | 32,293,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,470 | py | from flask import render_template, flash, redirect, session, url_for, request, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from app import app, db, lm, oid, models
from forms import LoginForm
from models import User, Message, ROLE_USER, ROLE_ADMIN
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.before_request
def before_request():
g.user = current_user
@app.route('/')
@app.route('/index')
@login_required
def index():
user = g.user
posts = [
{
'author': { 'nickname': 'John' },
'body': 'Beautiful day in Portland!'
},
{
'author': { 'nickname': 'Susan' },
'body': 'The Avengers movie was so cool!'
}
]
return render_template('index.html',
title = 'Home',
user = user,
posts = posts)
@app.route('/login', methods = ['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated():
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
return oid.try_login(form.openid.data, ask_for = ['nickname', 'email'])
return render_template('login.html',
title = 'Sign In',
form = form,
providers = app.config['OPENID_PROVIDERS'])
@oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash('Invalid login. Please try again.')
return redirect(url_for('login'))
user = User.query.filter_by(email = resp.email).first()
if user is None:
nickname = resp.nickname
if nickname is None or nickname == "":
nickname = resp.email.split('@')[0]
user = User(nickname = nickname, email = resp.email, role = ROLE_USER)
db.session.add(user)
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember = remember_me)
return redirect(request.args.get('next') or url_for('index'))
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/user/<nickname>')
@login_required
def user(nickname):
user = User.query.filter_by(nickname = nickname).first()
if user == None:
flash('User ' + nickname + ' not found.')
return redirect(url_for('index'))
posts = [
{'author': user, 'body': 'Test post #1'},
{'author': user, 'body': 'Test post #2'}
]
return render_template('user.html',
user = user,
posts = posts)
@app.route('/todo/api/v1.0/tasks', methods=['POST'])
def create_task():
if not request.json or not 'title' in request.json:
abort(400)
task = {
'id': tasks[-1]['id'] + 1,
'title': request.json['title'],
'description': request.json.get('description', ""),
'done': False
}
tasks.append(task)
return jsonify({'task': task}), 201
"""
@app.route('/storeHere/', methods = ['POST'])
def storeHere():
if not request.json or not 'title' in request.json:
abort(400)
phrase = {'phrase': request.json.get('phrase', "")}
message = Message(body = phrase)
db.session.add(message)
db.session.commit()
return jsonify(phrase), 201
"""
@app.route('/showHere')
def route():
message = models.Message.query.all()
return str(message)
| [
"mccormack.wgsi@gmail.com"
] | mccormack.wgsi@gmail.com |
0ba9a6ae387be60fb7bfc64cca1af95592c8afdc | 1d682cab3c9444e29da8402d64fe8c15344d6521 | /ml_engine/trainer/task.py | bcfed2ab652db9cbf62dcbe672918ed003aba67f | [] | no_license | carlespoles/DSCI6051-student | 9d63e05514dd62d0c176f83fec161fb169b0fe05 | 146cab522a6a335f94b8e65a5e1acf5ef3eea60f | refs/heads/master | 2021-01-19T06:48:10.914761 | 2018-01-16T23:45:43 | 2018-01-16T23:45:43 | 87,500,722 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,622 | py | from __future__ import print_function
import numpy as np
import pandas as pd
import os
import glob
import pickle
import gzip
import h5py
#import dl_functions
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.preprocessing.image import array_to_img, img_to_array, load_img
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
from sklearn.metrics import confusion_matrix, roc_curve, roc_auc_score
from sklearn.cross_validation import train_test_split
#from matplotlib import pyplot as plt
#%matplotlib inline
from tensorflow.python.lib.io import file_io
# Defining an architecture.
def cnn_model_v_0(IMG_SIZE):
global NUM_CLASSES
NUM_CLASSES = 2
model = Sequential()
model.add(Convolution2D(32, (3, 3), input_shape=(IMG_SIZE, IMG_SIZE, 3),
activation='relu'))
model.add(Convolution2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='softmax'))
return model
def cnn_model_v_1(IMG_SIZE):
global NUM_CLASSES
NUM_CLASSES = 2
model = Sequential()
model.add(Convolution2D(32, (3, 3), input_shape=(IMG_SIZE, IMG_SIZE, 3),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='softmax'))
return model
# Load data from pickle file located on a bucket.
# with file_io.FileIO("gs://wellio-kadaif-tasty-images-project-pre-processed-images/pre_processed_images/image_data_20000_100.txt", 'r') as f:
# X, y = pickle.load(f)
with file_io.FileIO("gs://wellio-kadaif-tasty-images-project-pre-processed-images/pre_processed_images/image_data_20000_25.txt", 'r') as f:
X, y = pickle.load(f)
# datagen = ImageDataGenerator(rotation_range=40,
# width_shift_range=0.1,
# height_shift_range=0.1,
# shear_range=0.4,
# zoom_range=0.1,
# horizontal_flip=False,
# fill_mode='nearest')
datagen = ImageDataGenerator(
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# Creating a train, test split.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# Creating a validation split out of the training set.
X_train_fit, X_val, y_train_fit, y_val = train_test_split(X_train, y_train,
test_size=0.1,
random_state=42)
# The labels need to be converted to categorical.
# Note that we have 2 categories: good (1) and bad (0) images.
y_train_fit_sparse = np_utils.to_categorical(y_train_fit, 2)
y_val_sparse = np_utils.to_categorical(y_val, 2)
y_test_sparse = np_utils.to_categorical(y_test, 2)
datagen.fit(X_train)
# Creating an instance of a CNN model.
# The image size is 100.
# IMG_SIZE = 100
IMG_SIZE = 25
model_1 = model_1 = cnn_model_v_1(IMG_SIZE)
model_1.compile(loss='binary_crossentropy', optimizer='rmsprop',
metrics=['accuracy'])
model_1.summary()
model_1.fit_generator(datagen.flow(X_train_fit, y_train_fit_sparse,
batch_size=128), steps_per_epoch=len(X_train_fit),
epochs=5, validation_data=(X_val, y_val_sparse))
score = model_1.evaluate(X_test, y_test_sparse, verbose=1)
print('Test loss: {:0,.4f}'.format(score[0]))
print('Test accuracy: {:.2%}'.format(score[1]))
# List of predictions.
predicted_images = []
for i in model_1.predict(X_test):
predicted_images.append(np.where(np.max(i) == i)[0])
print("AUC: {:.2%}\n".format(roc_auc_score(y_test, predicted_images)))
# Creating a confusion matrix.
# plt.figure(figsize=(8, 8))
# cf = dl_functions.show_confusion_matrix(confusion_matrix(y_test, predicted_images), ['Class 0', 'Class 1'])
# plt.savefig('confusion_matrix.png')
# List of probabilities
predictions_probability = model_1.predict_proba(X_test)
# Creating ROC curve.
# plt.figure(figsize=(7, 7))
# rc = dl_functions.plot_roc(y_test, predictions_probability[:,1], "CNN - " + str(len(model_1.layers)) + " layers | # images: " + str(len(X)) + " | image size: " + str(IMG_SIZE), "Tasty Food Images")
# plt.savefig('roc_curve.png')
# model.save('model.h5')
# job_dir='gs://kadaif.getwellio.com'
#
# # Save model.h5 on to google storage
# with file_io.FileIO('model.h5', mode='r') as input_f:
# with file_io.FileIO(job_dir + '/model.h5', mode='w+') as output_f:
# output_f.write(input_f.read())
| [
"carles.poles@gmail.com"
] | carles.poles@gmail.com |
0de1f10b578a38d3355ba5dbb706cda6b0333422 | 38f2117da647d98404bffe0e74df73a70cfb0417 | /imports2.py | c548af73bf0eab0ca6dd15156352cd9f61d3b817 | [] | no_license | laandreagates/Photobomb-Malware-Analysis-Gateway- | ae1cfef99b334e96dcc292a4b178e5810db671e3 | 83ae089f335213c2e527dfc7d7413dea51842de6 | refs/heads/master | 2020-09-14T04:29:12.183834 | 2019-11-21T00:20:47 | 2019-11-21T00:20:47 | 223,017,619 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,180 | py | import pefile
import os
import json
# Defining imports to be used in the main function of the program when called
def Ins (file_name):
#file_list = os.listdir(folder_path)
#for file in file_list:
# with open(file + '.json', 'a') as f:
# for File in file_list:
# print("\n", File)
#json.dump("Imports: ", f)
#json.dump(File, f)
# file_path = os.path.join(folder_path, File)
# Defining file as a pefile to be openned in the program
pe = pefile.PE(file_path)
#with open('ResultsImports.json', 'w') as f:
#for filename in os.listdir(folder_path):
#json.dump(filename, f)
for entry in pe.DIRECTORY_ENTRY_IMPORT:
print([(str(entry.dll))])
| [
"photobombers@photobombers-2a86-wd-login.c.cloudycluster19-den-2254.internal"
] | photobombers@photobombers-2a86-wd-login.c.cloudycluster19-den-2254.internal |
fa8ada0cae4bb97eb172ccff53f4220c9d078288 | 9b87ff811680f2b0c6eb725fcabff3652f81a438 | /app.py | 070407668cd807299217a142a10171ba8ceb9dd6 | [] | no_license | vichus1995/Twitter-Feed | 679db26891209deae08001f660bc632b503e26dd | 1c116c40c96aae0dbbc3a310dd78b6d07b588824 | refs/heads/master | 2022-12-25T04:28:52.509743 | 2020-10-02T14:32:36 | 2020-10-02T14:32:36 | 300,643,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | import tweepy
import json
import pyodbc
from decouple import config
api_key = config('api_key')
api_secret =config('api_secret')
access_key = config('access_key')
access_secret = config('access_secret')
server = config('server')
db_name = config('db_name')
user = config('user')
password =config('password')
sql = '''EXEC dbo.Insert_Twitter_Data @tweetinfo =?'''
def insert_data(tweet_json):
try:
db_conn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+db_name+';UID='+user+';PWD='+ password)
cursor = db_conn.cursor()
cursor.execute(sql,tweet_json)
cursor.commit()
except pyodbc.Error as e:
print(e)
cursor.close()
db_conn.close()
class myStreamListener(tweepy.StreamListener):
def on_connect(self):
print('Connected to Twitter')
def on_error(self):
if status_code != 200:
print('Could not connect to Twitter')
return False
def on_data(self,data):
try :
raw_data = json.loads(data)
if 'text' in raw_data:
tweet ={}
tweet['username'] = raw_data['user']['name']
tweet['text']= raw_data['text']
tweet['created_time']=raw_data['created_at']
tweet['retweets_count']=raw_data['retweet_count']
tweet['location']=raw_data['user']['location']
tweet['place'] =raw_data['place']
tweet_json = json.dumps(tweet)
insert_data(tweet_json)
except pyodbc.Error as e:
print(e)
if __name__ =='__main__':
auth = tweepy.OAuthHandler(api_key,api_secret)
auth.set_access_token(access_key,access_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
listener = myStreamListener(api)
stream = tweepy.Stream(auth,listener =listener)
stream.filter(track = ['Football'], languages = ['en'])
| [
"Vishnu.Suresh@gds.ey.com"
] | Vishnu.Suresh@gds.ey.com |
26c6f36e7575a799ab2217c169fad85d7c9c8f89 | fe7fb3e93b88f467034a32cec42c1a706045df69 | /install.py | d93078c1e4fe4330ad861b5068bd3b248a10d71e | [
"MIT"
] | permissive | marek1914/PyTrinamicMicro | dddd007ce108d7b21bc9ceb2f1f8e0aa5f32067d | f6495013e47f9b465410b2f6d06e7f5abe59c426 | refs/heads/master | 2023-04-11T17:18:48.815664 | 2021-04-20T01:53:30 | 2021-04-20T01:53:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,166 | py | '''
Install script to copy the required files in correct structure on the SD card.
Created on 13.10.2020
@author: LK
'''
import argparse
import os
import shutil
import logging
MPY_CROSS = "mpy-cross"
# Initialize install logger
logger = logging.getLogger(__name__)
formatter = logging.Formatter("[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s")
logger.setLevel(logging.INFO)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
def clean_pytrinamic(path):
logger.info("Cleaning PyTrinamic ...")
shutil.rmtree(os.path.join(path, "PyTrinamic"), ignore_errors=True)
logger.info("PyTrinamic cleaned.")
def clean_motionpy(path):
logger.info("Cleaning MotionPy ...")
shutil.rmtree(os.path.join(path, "PyTrinamicMicro", "platforms", "motionpy"), ignore_errors=True)
logger.info("MotionPy cleaned.")
def clean_pytrinamicmicro_api(path):
logger.info("Cleaning PyTrinamicMicro API ...")
shutil.rmtree(os.path.join(path, "PyTrinamicMicro", "connections"), ignore_errors=True)
shutil.rmtree(os.path.join(path, "PyTrinamicMicro", "examples"), ignore_errors=True)
if(os.path.exists(os.path.join(path, "PyTrinamicMicro", "__init__.py"))):
os.remove(os.path.join(path, "PyTrinamicMicro", "__init__.py"))
if(os.path.exists(os.path.join(path, "PyTrinamicMicro", "PyTrinamicMicro.py"))):
os.remove(os.path.join(path, "PyTrinamicMicro", "PyTrinamicMicro.py"))
if(os.path.exists(os.path.join(path, "PyTrinamicMicro", "tmcl_bootloader.py"))):
os.remove(os.path.join(path, "PyTrinamicMicro", "tmcl_bootloader.py"))
if(os.path.exists(os.path.join(path, "PyTrinamicMicro", "TMCL_Bridge.py"))):
os.remove(os.path.join(path, "PyTrinamicMicro", "TMCL_Bridge.py"))
if(os.path.exists(os.path.join(path, "PyTrinamicMicro", "TMCL_Slave.py"))):
os.remove(os.path.join(path, "PyTrinamicMicro", "TMCL_Slave.py"))
logger.info("PyTrinamicMicro API cleaned.")
def clean_pytrinamicmicro(path):
logger.info("Cleaning PyTrinamicMicro ...")
shutil.rmtree(os.path.join(path, "PyTrinamicMicro"), ignore_errors=True)
logger.info("PyTrinamicMicro cleaned.")
def clean_lib(path):
logger.info("Cleaning libraries ...")
logger.info("Cleaning logging ...")
shutil.rmtree(os.path.join(path, "logging"), ignore_errors=True)
logger.info("logging cleaned.")
logger.info("Cleaning argparse ...")
shutil.rmtree(os.path.join(path, "argparse"), ignore_errors=True)
logger.info("argparse cleaned.")
logger.info("Libraries cleaned.")
def clean_full(path):
logger.info("Cleaning ...")
clean_pytrinamic(path)
clean_pytrinamicmicro(path)
clean_lib(path)
logger.info("Cleaned.")
def compile_recursive(path):
for dirpath, dirnames, filenames in os.walk(path):
for filename in [f for f in filenames if f.endswith(".py")]:
current = os.path.join(dirpath, filename)
logger.info("Compiling {}".format(current))
os.system("{} {}".format(MPY_CROSS, current))
def install_pytrinamic(path, compile, clean):
if(clean):
clean_pytrinamic(path)
base = os.path.join("PyTrinamic", "PyTrinamic")
logger.info("Installing PyTrinamic ...")
if(compile):
logger.info("Compiling PyTrinamic ...")
compile_recursive(base)
logger.info("PyTrinamic compiled.")
logger.info("Copying PyTrinamic ...")
shutil.copytree(base, os.path.join(path, "PyTrinamic"), ignore=shutil.ignore_patterns("*.py" if compile else "*.mpy"))
logger.info("PyTrinamic copied.")
logger.info("PyTrinamic installed.")
def install_motionpy_boot(path, compile, clean):
del clean
logger.info("Installing MotionPy boot ...")
shutil.copy(os.path.join("PyTrinamicMicro", "platforms", "motionpy", "boot.py"), path)
logger.info("MotionPy boot installed.")
def install_motionpy_main(path, compile, clean):
del clean
logger.info("Installing MotionPy main ...")
shutil.copy(os.path.join("PyTrinamicMicro", "platforms", "motionpy", "main.py"), path)
logger.info("MotionPy main installed.")
def install_motionpy(path, compile, clean):
if(clean):
clean_motionpy(path)
base = os.path.join("PyTrinamicMicro", "platforms", "motionpy")
logger.info("Installing platform MotionPy ...")
os.makedirs(os.path.join(path, "PyTrinamicMicro", "platforms"), exist_ok=True)
if(compile):
logger.info("Compiling MotionPy ...")
compile_recursive(base)
logger.info("MotionPy compiled.")
logger.info("Copying MotionPy ...")
shutil.copytree(base, os.path.join(path, "PyTrinamicMicro", "platforms", "motionpy"), ignore=shutil.ignore_patterns("*.py" if compile else "*.mpy"))
logger.info("MotionPy copied.")
logger.info("MotionPy installed.")
def install_pytrinamicmicro_api(path, compile, clean):
if(clean):
clean_pytrinamicmicro_api(path)
logger.info("Installing PyTrinamicMicro API ...")
shutil.copytree(os.path.join("PyTrinamicMicro", "connections"), os.path.join(path, "PyTrinamicMicro", "connections"))
shutil.copy(os.path.join("PyTrinamicMicro", "__init__.py"), os.path.join(path, "PyTrinamicMicro"))
shutil.copy(os.path.join("PyTrinamicMicro", "PyTrinamicMicro.py"), os.path.join(path, "PyTrinamicMicro"))
shutil.copy(os.path.join("PyTrinamicMicro", "tmcl_bootloader.py"), os.path.join(path, "PyTrinamicMicro"))
shutil.copy(os.path.join("PyTrinamicMicro", "TMCL_Bridge.py"), os.path.join(path, "PyTrinamicMicro"))
shutil.copy(os.path.join("PyTrinamicMicro", "TMCL_Slave.py"), os.path.join(path, "PyTrinamicMicro"))
logger.info("PyTrinamicMicro API installed.")
def install_pytrinamicmicro(path, compile, clean):
if(clean):
clean_pytrinamicmicro(path)
base = "PyTrinamicMicro"
logger.info("Installing PyTrinamicMicro ...")
if(compile):
logger.info("Compiling PyTrinamicMicro ...")
compile_recursive(base)
logger.info("PyTrinamicMicro compiled.")
logger.info("Copying PyTrinamicMicro ...")
shutil.copytree(base, os.path.join(path, "PyTrinamicMicro"), ignore=shutil.ignore_patterns("*.py" if compile else "*.mpy"))
logger.info("PyTrinamicMicro copied.")
logger.info("PyTrinamicMicro installed.")
def install_lib(path, compile, clean):
if(clean):
clean_lib(path)
logger.info("Installing libraries ...")
logger.info("Installing logging ...")
base = os.path.join("pycopy-lib", "logging", "logging")
if(compile):
logger.info("Compiling logging ...")
compile_recursive(base)
logger.info("logging compiled.")
logger.info("Copying logging ...")
shutil.copytree(os.path.join("pycopy-lib", "logging", "logging"), os.path.join(path, "logging"), ignore=shutil.ignore_patterns("*.py" if compile else "*.mpy"))
logger.info("logging copied.")
logger.info("logging installed.")
logger.info("Installing argparse ...")
base = os.path.join("pycopy-lib", "argparse", "argparse")
if(compile):
logger.info("Compiling argparse ...")
compile_recursive(base)
logger.info("argparse compiled.")
logger.info("Copying argparse ...")
shutil.copytree(os.path.join("pycopy-lib", "argparse", "argparse"), os.path.join(path, "argparse"), ignore=shutil.ignore_patterns("*.py" if compile else "*.mpy"))
logger.info("argparse copied.")
logger.info("argparse installed.")
logger.info("Libraries installed.")
def install_full(path, compile, clean):
logger.info("Installing full ...")
install_pytrinamic(path, compile, clean)
install_pytrinamicmicro(path, compile, clean)
install_lib(path, compile, clean)
logger.info("Fully installed.")
SELECTION_MAP = {
"full": install_full,
"pytrinamic": install_pytrinamic,
"pytrinamicmicro": install_pytrinamicmicro,
"pytrinamicmicro-full": install_pytrinamicmicro,
"pytrinamicmicro-api": install_pytrinamicmicro_api,
"motionpy": install_motionpy,
"motionpy-boot": install_motionpy_boot,
"motionpy-main": install_motionpy_main,
"lib": install_lib
}
# Argument parsing and mode execution
parser = argparse.ArgumentParser(description='Install the required files in correct structure on the SD card.')
parser.add_argument('path', metavar="path", type=str, nargs=1, default=".",
help='Path to the root of the SD card (default: %(default)s).')
parser.add_argument('-s', "--selection", dest='selection', action='store', nargs="*", type=str.lower,
choices=SELECTION_MAP.keys(),
default=['full'], help='Install selection (default: %(default)s).')
parser.add_argument('-c', "--clean", dest='clean', action='store_true', help='Clean module target directory before installing it there (default: %(default)s).')
parser.add_argument("--compile", dest='compile', action='store_true', help='Compile every module (default: %(default)s).')
args = parser.parse_args()
os.makedirs(args.path[0], exist_ok=True)
for s in args.selection:
SELECTION_MAP.get(s)(args.path[0], args.compile, args.clean)
logger.info("Done.")
| [
"leonard@kug.is"
] | leonard@kug.is |
3209f33aab748b6e290677e9e4cf1db58c6ce157 | 9067cb717725edab5fd1d78a307a8088ccd91373 | /main.py | 9ecb7cc0e30bdd17f7c84b6b9572c87df9045ead | [] | no_license | ngocson98/Dice-Rolling-Python | 55c8f00d5eb0ed46f7586c7a34fb62c1dc0932fc | 62b3e48c3dc1191129124dd385734a901f1b369f | refs/heads/main | 2023-08-18T03:34:21.537897 | 2021-09-18T13:30:56 | 2021-09-18T13:30:56 | 407,859,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | # import libraries
import tkinter
from PIL import Image, ImageTk
import random
# top-level widget which represents the main window of an application
root = tkinter.Tk()
root.geometry('400x400')
root.title('Roll Dice By SON BK')
# Adding label into the frame
BlankLine = tkinter.Label(root, text="")
BlankLine.pack()
# adding label with different font and formatting
HeadingLabel = tkinter.Label(root, text="Hello from SON BK!", fg="light green", bg= "dark green", font="Helvetica 16 bold italic")
HeadingLabel.pack()
# images
dice = ['dice1.png', 'dice2.png', 'dice3.png', 'dice4.png', 'dice5.png', 'dice6.png']
# simulating the dice with random numbers between 0 to 6 and generating image
DiceImage = ImageTk.PhotoImage(Image.open(random.choice(dice)))
# construct a label widget for image
ImageLabel = tkinter.Label(root, image=DiceImage)
ImageLabel.image = DiceImage
# packing a widget in the parent widget
ImageLabel.pack(expand=True)
# Design Button
def roll_dice():
DiceImage = ImageTk.PhotoImage(Image.open(random.choice(dice)))
ImageLabel.configure(image=DiceImage) #update image
ImageLabel.image = DiceImage
button = tkinter.Button(root, text="Roll Dice", fg="blue", command=roll_dice)
button.pack(expand=True)
root.mainloop() | [
"noreply@github.com"
] | ngocson98.noreply@github.com |
0a4ca2a176bc9ea68d88d726b4181f2b11b1ca02 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_208/ch6_2020_03_09_12_38_14_729372.py | dc720271c51080ed003778f9f1ed2ba1e4330096 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | def c_em_f (temp_c):
temp_f=(9*temp_c)/5 + 32
return temp_f
celsius=30
fahrenheit=c_em_f(celsius)
print(fahrenheit) | [
"you@example.com"
] | you@example.com |
23bf1bbacfdbf74877a293f253aeb9eeef25e017 | c469cf283694f4f3506f3aa6b099da429dfdab8b | /Clase9/Ejercicio6.py | d3298feb8c34562744f3ab3e11114405307dd03b | [] | no_license | JuanDa15/Computacion-Grafica | efaa0afe7b2d40fff869d52596001006b2fad82f | b44064b25444f212232bce4ec9114c76b704ffc7 | refs/heads/master | 2021-01-14T05:36:30.751964 | 2020-06-19T06:17:21 | 2020-06-19T06:17:21 | 242,614,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py | import pygame
from LibreriaGeneral import *
pygame.init()
#----------------------------------------------------
width = 1280
high =920
window = pygame.display.set_mode([width,high])
middle = [width/2,high/2]
end = False
#PRIMERA CARA
a1 = [0,0]
a2 = PolarToCartesian(150,150)
a3 = Traslacion(a2,0,75)
a4 = PolarToCartesian(75,330)
a1s = CartToScreen(middle,a1)
a2s = CartToScreen(middle,a2)
a3s = CartToScreen(middle,a3)
a4s = CartToScreen(a3s,a4)
PrimerCara = [a1s,a2s,a3s,a4s]
#SEGUNDA CARA
b1 = PolarToCartesian(75,30)
b2 = PolarToCartesian(75,30)
b1s = CartToScreen(a1s,b1)
b2s = CartToScreen(a4s,b2)
SegundaCara = [a4s,a1s,b1s,b2s]
#TERCER POLIGONO
c1 = PolarToCartesian(75,30)
c1s = CartToScreen(a3s,c1)
TercerCara = [a3s,a4s,b2s,c1s]
#CUARTO POLIGONO
d1 = Traslacion(b1,0,150)
d2 = PolarToCartesian(150,150)
d1s = CartToScreen(middle,d1)
d2s = CartToScreen(d1s,d2)
CuartaCara = [c1s,b2s,b1s,d1s,d2s]
#quinta cara
e1 = PolarToCartesian(75,30)
e2 = Traslacion(e1,0,150)
e1s = CartToScreen(b1s,e1)
e2s = CartToScreen(b1s,e2)
QuintaCara = [b1s,e1s,e2s,d1s]
#sexta cara
f1 = PolarToCartesian(150,150)
f1s = CartToScreen(e2s,f1)
SextaCara = [e2s,d1s,d2s,f1s]
#septima cara
#g1 = PolarToCartesian(50,150)
#g1s = CartToScreen(d3s,g1)
#SeptimaCara = [d3s,d2s,f1s,g1s]
if __name__ == "__main__":
#drawPlane(window,middle)
while not end:
for event in pygame.event.get():
if event.type == pygame.QUIT:
end = True
drawPolygon(window,SelectColor('Red'),PrimerCara)
drawPolygon(window,SelectColor('Red'),SegundaCara)
drawPolygon(window,SelectColor('Red'),TercerCara)
drawPolygon(window,SelectColor('Red'),CuartaCara)
drawPolygon(window,SelectColor('Red'),QuintaCara)
drawPolygon(window,SelectColor('Red'),SextaCara)
#drawPolygon(window,SelectColor('Red'),SeptimaCara) | [
"jdoo1115@gmail.com"
] | jdoo1115@gmail.com |
61f13c7b53128c5b7fb4c0e3f55e360007e6508f | 888df2b6fcdd50442f7f174268617b3f161a3581 | /test_unittest.py | eb11c7b724b73a2283e482ab092d3ecec68f45dd | [] | no_license | onethousandth/MyTest | a85764353c6bf8bd8d39e727245ec55ed7bfc108 | 4397455ac200a7cb0289301e3baa12911ba77729 | refs/heads/master | 2020-12-27T01:16:30.005275 | 2020-04-30T06:28:50 | 2020-04-30T06:28:50 | 237,715,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | # -*- coding:utf-8 -*-
# Copyright (C) AddyXiao <addyxiao@msn.cn>
#
#
#
"A Unit Test Case"
__author__="AddyXiao"
__author_email__="addyxiao@msn.cn"
import test
import unittest
| [
"addyxiao@msn.cn"
] | addyxiao@msn.cn |
1a7c1a00cd1b6f7cbe1d42ea7cd82be6881386ec | ded1371ac9b046096668bc08246154b3cdecc1ee | /ipconfig_all.py | bfb9b5ede983186ed28bae385052e6b3291bb31c | [] | no_license | tkl154t/PythonScript | 80c13d1d55ce01bf402c6cc7787259b11340a5d9 | fcabe4d92619a8e8008f0851e85b6be5472cb15a | refs/heads/master | 2023-07-18T09:12:39.450152 | 2021-09-02T12:27:33 | 2021-09-02T12:27:33 | 402,411,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,881 | py | from tkinter import *
from tkinter import ttk
import subprocess
def ipconfig_dump():
ipconfig = subprocess.check_output('ipconfig /all')
ipconfig = ipconfig.decode()
parse = ipconfig.split('\r\n\r\n')
parse_len = len(parse)
interfaces = []
for i in range(0, parse_len, 2):
line1 = parse[i]
line2 = parse[i+1]
interface = line1 + '\n' + line2
interfaces.append(interface.strip())
list = []
for interface in interfaces:
i_list = []
tmp = interface.split('\n')
i_list.append(tmp[0])
for i in range(1, len(tmp)):
line = tmp[i]
record_list = []
record_list.append(line[:38].strip())
record_list.append(line[39:].strip())
i_list.append(record_list)
list.append(i_list)
return list
class App(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.master = master
self.initUI()
def initUI(self):
self.app_config()
# =============================
tree = ttk.Treeview(self)
tree.pack(fill=BOTH, expand=TRUE)
tree["columns"] = ('one')
tree.column("#0", width=270, minwidth=350, stretch=YES)
tree.column("one", width=150, minwidth=200, stretch=YES)
tree.heading("#0", text="Name", anchor=W)
tree.heading("one", text="Values", anchor=W)
dumps = ipconfig_dump()
for i in dumps:
interface = tree.insert('', END, text=i[0])
for j in range(1, len(i)):
line = i[j]
tree.insert(interface, END, text=line[0], values=(line[1],))
def app_config(self):
self.master.title('ipconfig /all')
self.configure(background='blue')
self.pack(fill=BOTH, expand=True)
root = Tk()
app = App(root)
root.mainloop()
| [
"52850191+tkl154t@users.noreply.github.com"
] | 52850191+tkl154t@users.noreply.github.com |
71a9b78dfb41a017f510e227810fec032450109d | c0a8241b569f089c4fcf35fb6bb3069f125fc6b3 | /newFromTemplate.py | b0602ed504e218587141479a6ea0496543622fdb | [] | no_license | muscleorange/NewFromTemplate | bbb0d18041b7231ac9109623573866e1febba6dc | 143adcc32014c21c5048f2e6827add6f1e6bc7b9 | refs/heads/master | 2020-05-17T10:40:09.643522 | 2015-01-25T06:48:08 | 2015-01-25T06:48:08 | 29,805,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | import os, codecs,time, sublime, sublime_plugin
PLUGIN_NAME = 'NewFromTemplate'
SYNTAX_KEY = 'syntax'
DEFAULT_SYNTAX_FILE = 'Packages/HTML/HTML.tmLanguage'
class NewFromTemplateCommand(sublime_plugin.TextCommand):
def run(self, edit, ext):
view = sublime.active_window().new_file()
template = get_template_file(ext)
view.insert(edit, 0, rend_template(template))
view.set_syntax_file(get_syntax_file(ext))
if ext == 'java':
view.set_name("example.java");
def get_settings():
settings = sublime.load_settings(PLUGIN_NAME + '.sublime-settings')
return settings
#get syntax file
def get_syntax_file(ext):
settings = get_settings()
if settings.get(SYNTAX_KEY) is None:
return DEFAULT_SYNTAX_FILE
syntaxs = settings.get(SYNTAX_KEY)
if ext in syntaxs:
return syntaxs.get(ext)
return DEFAULT_SYNTAX_FILE
#get template directory path
def get_templates_path():
packages_path = sublime.packages_path()
return os.path.join(packages_path,PLUGIN_NAME,'templates')
#read template file
def get_template_file(ext):
template_dir = get_templates_path()
file_path = os.path.join(template_dir,ext+'.tmpl')
if os.path.exists(file_path):
return read_file(file_path)
return ''
#read a file
def read_file(path):
return codecs.open(path,"r","utf-8").read()
#rend a template file, replace variables with values
def rend_template(tmpl_str):
settings = get_settings()
author = settings.get('author')
for atrr in author:
tmpl_str = tmpl_str.replace('$%s$' % atrr, author.get(atrr))
date_format = settings.get('date_format', '%d/%m/%Y')
tmpl_str = tmpl_str.replace('$date$', time.strftime(date_format))
encoding = settings.get('encoding', 'UTF-8')
return tmpl_str.replace('$encoding$', encoding) | [
"muscleorangee@gmail.com"
] | muscleorangee@gmail.com |
8b1d364b81e74cdb7906ce10f1dbc03a04f5eaae | edae0a1bd47af429019404520410b231b19b0e58 | /iSkiRussiaPython.py | 3273c11ac641e3f94f1eaddfaa59c4da786bfa98 | [] | no_license | akseryanin/iSkiRussia | ff7cdd1552132a6b2dc8637053cf79ea9dfb4711 | cc3b3dd1d9135b8182d086c25e333ea6446f9ad0 | refs/heads/master | 2021-02-08T23:54:58.119683 | 2020-03-01T19:52:58 | 2020-03-01T19:52:58 | 244,213,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,543 | py | from tkinter import *
from random import *
import pyowm
import time
def main():
UserCoordinatX, UserCoordinatY = 55.553236, 37.552202
DataOfSlopeInformation = LoadingAndReturnInformation(UserCoordinatX, UserCoordinatY)
return GetInformationsOfSlopes(DataOfSlopeInformation)
def LoadingAndReturnInformation(UserCoordinatX, UserCoordinatY):
global CounterOfSlopes
LI = PhotoImage(file="LoadingImage.gif")
LoadingImage = canvas.create_image(_width // 2, _height // 2, image=LI)
LoadingText = canvas.create_text(_width // 2,_height // 2,text="iSki Russia",font="Verdana 30", fill="red")
LoadingPanelContur = canvas.create_rectangle(0, _height - 25, _width + 1, _height + 1, fill="white", outline="white")
LoadingPanelFill = canvas.create_rectangle(0, _height - 25, 0, _height + 1, fill="green", outline="green")
canvas.pack()
DataOfSlopeInformation = []
DataOfSlopesTxt = open("DataOfSlopes.txt")
CounterOfSlopes = int(DataOfSlopesTxt.readline())
for i in range(1, _width + 2):
canvas.delete(LoadingPanelFill)
if i <= CounterOfSlopes:
help = list(DataOfSlopesTxt.readline().split())
SlopeX, SlopeY = float(help[1]), float(help[2])
DataOfSlopeInformation.append((distance(UserCoordinatX, UserCoordinatY, SlopeX, SlopeY), help[0], SlopeX, SlopeY))
LoadingPanelFill = canvas.create_rectangle(0, _height - 25, i, _height + 1, fill="green", outline="green")
canvas.update()
DataOfSlopesTxt.close()
DataOfSlopeInformation.sort()
canvas.delete(LoadingPanelFill, LoadingPanelContur, LoadingText)
return DataOfSlopeInformation
def GetInformationsOfSlopes(DataOfSlopeInformation):
DataOfRectangle = [None for i in range(CounterOfSlopes)]
DataOfText = [None for i in range(CounterOfSlopes)]
DataOfRectangleCoordinatsAndText = [None for i in range(CounterOfSlopes)]
DataOfRectangleCoordinatsAndText[0] = [10, 10, _width // 2 - 20, 60, DataOfSlopeInformation[0][1], DataOfSlopeInformation[0][2], DataOfSlopeInformation[0][3]]
for i in range(1, CounterOfSlopes):
DataOfRectangleCoordinatsAndText[i] = [10, DataOfRectangleCoordinatsAndText[i - 1][1] + 60, _width // 2 - 20, DataOfRectangleCoordinatsAndText[i - 1][3] + 60, DataOfSlopeInformation[i][1], DataOfSlopeInformation[i][2], DataOfSlopeInformation[i][3]]
for i in range(CounterOfSlopes):
DataOfRectangle[i] = canvas.create_rectangle(DataOfRectangleCoordinatsAndText[i][0], DataOfRectangleCoordinatsAndText[i][1], DataOfRectangleCoordinatsAndText[i][2], DataOfRectangleCoordinatsAndText[i][3], fill="white")
DataOfText[i] = canvas.create_text((DataOfRectangleCoordinatsAndText[i][0] + DataOfRectangleCoordinatsAndText[i][2]) // 2, DataOfRectangleCoordinatsAndText[i][1] + 10, text=DataOfSlopeInformation[i][1], font="Verdana 11")
InformationScreen = canvas.create_rectangle(_width // 2, 10, _width - 10, 125, fill="white")
return DataOfRectangleCoordinatsAndText
def distance(x, y, x1, y1):
return ((x - x1) ** 2 + (y - y1) ** 2) ** 0.5
def GetWeatherInformation(event):
global FirstGet, DegreeInformationText, WindInformationText, AirMoistureInformationText, PressureInformationText, SkiSlopeInformationText, MouseX, MouseY, DuringIndex, Map
MouseX, MouseY = event.x, event.y
if FirstGet:
canvas.delete(DegreeInformationText, WindInformationText, AirMoistureInformationText, PressureInformationText, SkiSlopeInformationText)
for i in range(CounterOfSlopes):
if DataOfRectangleCoordinatsAndText[i][0] <= MouseX <= DataOfRectangleCoordinatsAndText[i][2] and DataOfRectangleCoordinatsAndText[i][1] <= MouseY <= DataOfRectangleCoordinatsAndText[i][3]:
DuringIndex = i
break
observation = owm.weather_at_coords(DataOfRectangleCoordinatsAndText[DuringIndex][5], DataOfRectangleCoordinatsAndText[DuringIndex][6])
DataOfWeather = observation.get_weather()
DegreeInformationText = canvas.create_text(_width // 4 * 3, 20, text="{} Degree by Celsius".format(DataOfWeather.get_temperature('celsius')["temp"]), font="Verdana 12")
WindInformationText = canvas.create_text(_width // 4 * 3, 40, text="Speed of wind: {} m/s".format(DataOfWeather.get_wind()["speed"]), font="Verdana 12")
AirMoistureInformationText = canvas.create_text(_width // 4 * 3, 60, text="Air moisture: {}%".format(DataOfWeather.get_humidity()), font="Verdana 12")
PressureInformationText = canvas.create_text(_width // 4 * 3, 80, text="Pressure: {} mm Hg. article".format(randint(700, 800)), font="Verdana 11")
AreOpenSkiSlope = randint(0, 1)
if AreOpenSkiSlope == 1:
SkiSlopeInformationText = canvas.create_text(_width // 4 * 3, 100, text="Ski slope are open", font="Verdana 12")
else:
SkiSlopeInformationText = canvas.create_text(_width // 4 * 3, 100, text="Ski slope are close", font="Verdana 12")
_image = '{}.gif'.format(DataOfRectangleCoordinatsAndText[DuringIndex][4])
FirstGet = True
_width, _height = 500, 600
owm = pyowm.OWM('15ea7bd687a6016f005a3668bc437e09')
root = Tk()
root.title("iSkiRussia")
canvas = Canvas(root, width=_width, height=_height, bg="lightblue")
CounterOfSlopes = -1
DuringIndex = 0
DataOfRectangleCoordinatsAndText = main()
DegreeInformationText, WindInformationText, AirMoistureInformationText, PressureInformationText, SkiSlopeInformationText, Map = None, None, None, None, None, None
FirstGet = False
MouseX, MouseY = -1, -1
canvas.bind('<1>', GetWeatherInformation)
root.mainloop() | [
"sasha.seryanin@yandex.ru"
] | sasha.seryanin@yandex.ru |
baab7992de967b7c9b489df34e75c10e9ab1542b | fddba2b7503392c6021f095341789157f6c780a0 | /Python/Server.py | 8a7feb0e670cc4c94659030bff745f87dcd8d8e1 | [
"MIT"
] | permissive | NameOfTheDragon/AlpacaDiscoveryTests | 75b8c6f5db243b97c051cdc884f7bcb76a8b0863 | a2a88904a4d39d2caa1eaa8f62958646d402e3c8 | refs/heads/master | 2020-06-25T05:38:33.341205 | 2019-07-27T16:37:47 | 2019-07-27T16:37:47 | 199,218,557 | 0 | 0 | null | 2019-07-27T22:23:11 | 2019-07-27T22:23:10 | null | UTF-8 | Python | false | false | 843 | py | # (c) 2019 Daniel Van Noord
# This code is licensed under MIT license (see License.txt for details)
import socket
port = 32227
server_address = ('0.0.0.0', port) #listen for any IP
# Create listening port
# ---------------------
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #share address
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) #needed on Linux and OSX to share port with net core. Remove on windows.
try:
sock.bind(server_address)
except:
print('failure to bind')
sock.close()
raise
AlpacaDiscovery = "alpaca discovery"
AlpacaResponse = "alpaca here:4567"
while True:
data, addr = sock.recvfrom(1024)
if AlpacaDiscovery in str(data, "ascii"):
sock.sendto(AlpacaResponse.encode(), addr)
| [
"sg1mash98@gmail.com"
] | sg1mash98@gmail.com |
b9a93b816b8b8e2016cb8fdab5075530c2b480da | a0d55dd96a75c1a25d5bd9186d3dd2347d894559 | /Codigo/CodigoPython/strings.py | cc50274fc344b57fd6581db7d6fb35a502ddfabe | [] | no_license | mayralina/codigo-py | 8aae34fed4d0b22019fb01401814799b19c11c83 | 50873e5cbc1499da49e124a714a651303db8c272 | refs/heads/master | 2020-06-17T19:28:33.747659 | 2019-07-09T18:39:32 | 2019-07-09T18:39:32 | 196,024,949 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | #Strings in python are sorrounded by either single pr double quotation mark
#lets look at strings formatting ans some strings methods
name="mayra"
age= 21
#concatenate
print("hello, my name is "+ name +"and i am" + str(age))
| [
"labsol@nn2.innovalabsnet.net"
] | labsol@nn2.innovalabsnet.net |
2385ade9d5aff56bcda4a9d86529284527f04d7c | ab574f7511fa15e5ea50a26f26e3e38f7e33505a | /win_2020/scipy/special/_comb.py | fb308e79f6f70645350cc38ce4c8bda455fa2b22 | [] | no_license | zclongpop123/maya_python_packages | 49d6b340512a2580bc8c14ae6281ca3f57017acd | 4dd4a48c41749443ac16053d20aec04e9d2db202 | refs/heads/master | 2021-11-30T01:49:41.846727 | 2021-11-17T01:47:08 | 2021-11-17T01:47:08 | 49,186,909 | 16 | 9 | null | 2017-03-07T00:13:41 | 2016-01-07T06:48:35 | Python | UTF-8 | Python | false | false | 282 | py | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_comb.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| [
"aton.lerin@gmail.com"
] | aton.lerin@gmail.com |
de6799d97f3c6035e63beaff2a43d8298e3b022f | 448f020afcaf0bb8702860bef37162167474c145 | /Algorithm/Lesson5/Task5.1.1.py | 77ddcd4ec9a90f67452ff3a16386a5c887d6a464 | [] | no_license | Crasti/Homework | ce36c0a5754da3dc03a882798f02c09a1d72337c | 7cbd1c0d9fa25eb8b4ea28d493c54aee2b0ee747 | refs/heads/master | 2020-08-12T17:09:22.896324 | 2020-02-09T12:27:58 | 2020-02-09T12:27:58 | 214,806,346 | 0 | 0 | null | 2020-02-19T12:41:45 | 2019-10-13T11:15:04 | TSQL | UTF-8 | Python | false | false | 2,331 | py | """Пользователь вводит данные о количестве предприятий, их наименования и прибыль за 4 квартала (т.е. 4 отдельных
числа) для каждого предприятия.. Программа должна определить среднюю прибыль (за год для всех предприятий) и вывести
наименования предприятий, чья прибыль выше среднего и отдельно вывести наименования предприятий, чья прибыль ниже
среднего. """
from collections import namedtuple
class Company(object):
def __init__(self, num_of_comp, name, q1, q2, q3, q4):
self.num_of_comp = num_of_comp
self.name = name
self.q1 = q1
self.q2 = q2
self.q3 = q3
self.q4 = q4
def company(self):
company_dict = namedtuple('company', 'name, q1, q2, q3, q4')
for i in range(self.num_of_comp):
company = company_dict(
name=self.name,
q1=self.q1,
q2=self.q2,
q3=self.q3,
q4=self.q4)
return Company.average_cost(company), self.num_of_comp
def average_cost(self, company, num_of_comp):
average = int(sum(company.values()) / num_of_comp)
return Company.print_rez(average, company)
def print_rez(self, average, company):
print(f'Средняя прибыль компаний: {average}')
print(f"Прибыль ниже среднего: {[key for key, val in company.items() if val < average]}")
print(f"Прибыль выше среднего: {[key for key, val in company.items() if val > average]}")
num_of_comp = int(input('Введите количество компаний: '))
for j in range(num_of_comp):
company = Company(
num_of_comp,
name=input('Введите название компании: '),
q1=int(input('Прибыль за 1 квартал: ')),
q2=int(input('Прибыль за 2 квартал: ')),
q3=int(input('Прибыль за 3 квартал: ')),
q4=int(input('Прибыль за 4 квартал: '))
)
company.company()
| [
"c_crasti@rambler.ru"
] | c_crasti@rambler.ru |
6ac42ec8dce5222f5bcadf4c3cbc4d0b828b4558 | 6f1df1ea68b6dfa02590b1b3baab4ecbabd33326 | /Lambdas/analyzeFace.py | 522f10e1f4b8e86601f94c4abde3036e4acd200c | [] | no_license | pelincetin/Facial-Recognition-SmartDoor | 5691611bebc684a7a335d3400949b46185236552 | bfaf8c991b848ab78af2efd5798b3e691ae85411 | refs/heads/main | 2023-03-03T17:50:36.629523 | 2021-02-14T21:43:41 | 2021-02-14T21:43:41 | 322,814,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,765 | py | import os
### AWS:
import boto3
from boto3.dynamodb.conditions import Key
import json
import base64
import random
import datetime
import time
import cv2
import numpy as np
AUTHORIZED_IMAGE_BUCKET = "b1-photos-visitors"
UNAUTHORIZED_IMAGE_BUCKET = "unauthorized-visitors"
FRONTEND_BUCKET = "coms6998-hw2-frontend"
ADMIN_PHONE_NUMBER = "+1_PutNumberHere"
###################################################################################################
### Stream/image processing.
###################################################################################################
def alreadyProcessedCurrentUnauthorizedPerson():
'''
Check if the current person, who is unauthorized, has already been processed.
If so, their image is in the S3 bucket and their face is in Rekognition.
'''
collectionId='newVisitors'
fileName='unauthorized.jpg'
threshold = 90
maxFaces=2
client=boto3.client('rekognition')
response=client.search_faces_by_image(CollectionId=collectionId,
Image={'S3Object':{'Bucket':UNAUTHORIZED_IMAGE_BUCKET,'Name':fileName}},
FaceMatchThreshold=threshold,
MaxFaces=maxFaces)
faceMatches=response['FaceMatches']
### If there's anything in faceMatches, then it's pretty confident that it's a match.
### If it's empty, then we haven't processed this new visitor before.
return len(faceMatches) > 0
def getImageFromStream():
### Reshma's code: extract picture.
payload = get_byte_stream_from_kinesis()
fileName = writePayloadToFile(payload)
image = getImageFromFile(fileName)
print("image:", image)
if image is None:
print ("No image found. Are you sure Kinesis is running?")
return
return image
def addUnauthorizedImageToCollection():
client=boto3.client('rekognition')
response=client.index_faces(CollectionId="newVisitors",
Image={'S3Object':{'Bucket':"unauthorized-visitors",'Name':"unauthorized.jpg"}},
ExternalImageId="unauthorized",
MaxFaces=1,
QualityFilter="AUTO",
DetectionAttributes=['ALL'])
print ("rekognition response:")
print(response)
faceID = response["FaceRecords"][0]["Face"]["FaceId"] #response["FaceSearchResponse"][0]["MatchedFaces"][0]["Face"]["FaceId"]
print("faceID:", faceID)
return faceID
def writeImageToS3(bucket, image, fileName):
### Save picture to S3
# https://stackoverflow.com/a/56593242
print("encoding image")
image_string = cv2.imencode('.jpg', image)[1].tostring()
print("writing to S3")
s3 = boto3.client('s3', region_name='us-east-1')
s3.put_object(Bucket=bucket, Key = fileName, Body=image_string)
print("done")
###################################################################################################
### New users.
###################################################################################################
## For testing; get "unauthorized.jpg"
def getTestImageFromS3():
s3 = boto3.resource('s3', region_name='us-east-1')
bucket = s3.Bucket(UNAUTHORIZED_IMAGE_BUCKET)
img = bucket.Object("unauthorized.jpg").get().get('Body').read()
nparray = cv2.imdecode(np.asarray(bytearray(img)), cv2.IMREAD_COLOR)
photo = nparray
return photo
def processNewVisitor():
'''
When we get an unknown visitor, take a screenshot of them. Store the photo in the Rekognition collection.
Contact administrator: send them photo of the person. If approved, collect their name and number.
Then, using their faceID, send them through the normal route of OTP -> access.
'''
### Uncomment this part when done testing:
print("getting image")
image = getImageFromStream()
print ("writing dummy image")
writeImageToS3(UNAUTHORIZED_IMAGE_BUCKET, image, fileName = "unauthorized.jpg") # garbo write
# For testing purposes, if kinesis isn't running:
# image = getTestImageFromS3()
### Check if already seen this unauthorized face, so that this isn't triggered
### multiple times a second.
if alreadyProcessedCurrentUnauthorizedPerson():
return
else:
print ("adding unknown image to Rekognition")
faceID = addUnauthorizedImageToCollection()
currentTime = datetime.datetime.now().strftime("%Y-%m-%d:%H-%M-%S")
fileName = "{}_{}.jpg".format(faceID, currentTime)
print("writing final image to s3")
writeImageToS3(UNAUTHORIZED_IMAGE_BUCKET, image, fileName) # real write
frontendFileName = "images/unauthorized/{}.jpg".format(faceID)
writeImageToS3(FRONTEND_BUCKET, image, frontendFileName)
print ("asking owner for authorization")
askOwnerToAuthorizeUser(faceID)
### Reshma code:
def get_byte_stream_from_kinesis():
print("Running Reshma's code:")
STREAM_ARN = "arn:aws:kinesisvideo:us-east-1:875021110712:stream/KVS1/1604792887592"
kinesis_client = boto3.client('kinesisvideo', region_name='us-east-1')
response = kinesis_client.get_data_endpoint(
StreamARN=STREAM_ARN,
APIName="GET_MEDIA"
)
print("getting kinesis")
video_client = boto3.client(
"kinesis-video-media",
endpoint_url=response['DataEndpoint'],
region_name="us-east-1")
print("getting video")
response = video_client.get_media(
StreamARN=STREAM_ARN,
StartSelector={'StartSelectorType': 'NOW'}
)
print("getting response")
print("response:", response.keys())
payload=response["Payload"]
print("payload:", payload)
return payload
def writePayloadToFile(payload):
# https://stackoverflow.com/a/60984632
print("starting to write")
currentTime = datetime.datetime.now().strftime("%Y-%m-%d:%H-%M-%S")
fileName = '/tmp/payload_{}.mkv'.format(currentTime)
with open(fileName, 'wb+') as f:
numBytes = 1024
chunk = payload.read(1024*8)
# while chunk:
for byte in range(numBytes):
f.write(chunk)
chunk = payload.read(1024*8)
print ("done writing")
return fileName
def getImageFromFile(fileName):
vidcap = cv2.VideoCapture(fileName)
success, image = vidcap.read()
return image
def askOwnerToAuthorizeUser(faceID):
text_message = "You have a visitor! Please go to "
text_message += "http://coms6998-hw2-frontend.s3-website-us-east-1.amazonaws.com/WP1.html?faceID={}".format(faceID)
text_message += " to authorize them."
sendSMS(phoneNumber = ADMIN_PHONE_NUMBER, text_message = text_message)
###################################################################################################
### Standard process for all users:
###################################################################################################
def checkIfAlreadyTexted(faceID):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('passcodesDB1')
try:
response = table.get_item(Key={'faceID': faceID})
print("OTP response:", response)
if "Item" in response:
print ("Already texted")
return True
else:
print ("Not already texted")
return False
except botocore.exceptions.ClientError as e:
print("Not already texted")
return False
def getPhoneNumberFromFaceID(faceID):
'''
Query DynamoDB for user info.
args:
faceID (str): User's unique face ID.
returns:
phoneNumber (str):
'''
dynamodb = boto3.resource('dynamodb',region_name='us-east-1')
table = dynamodb.Table('visitorsDB2')
response = table.scan(
FilterExpression=Key('faceID').eq(faceID)
)
### If not found:
if len(response["Items"]) == 0:
print("No numbers found.")
return response['Items'][0]["phoneNum"]
ALPHABET = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
PASSWORD_LENGTH = 5
PASSCODE_EXPIRY_TIME = 5 ## Only last for 5 minutes.
def makeOneTimePassword():
'''
Generate a random sequence.
returns:
Random passcode.
'''
return "".join(random.choices(ALPHABET, k = PASSWORD_LENGTH))
def makeAndSaveOneTimePasscode(faceID):
'''
Create the OTP, save it to the DB, return it.
'''
passcode = makeOneTimePassword()
### 5 minutes from now:
expireTime = datetime.datetime.today() + datetime.timedelta(minutes=PASSCODE_EXPIRY_TIME)
### Format:
formattedExpireTime = str(time.mktime(expireTime.timetuple()))
dynamodb = boto3.client('dynamodb')
dynamodb.put_item(
TableName = 'passcodesDB1',
Item = {
'AccessCode': { 'S': passcode },
'ttl': { 'N': str(formattedExpireTime) },
'faceID': { 'S': faceID },
'used': { 'BOOL': False}
})
return passcode
def sendSMS(phoneNumber, text_message):
'''
Generic text message function.
'''
print ("texting {} this message: {}".format(phoneNumber, text_message))
sns_client = boto3.client('sns')
response = sns_client.publish(
PhoneNumber=phoneNumber,
Message=text_message,
)
print("text message response:", response)
return
def sendOneTimePassword(faceID):
### Get phone number.
print("getting number")
phoneNumber = getPhoneNumberFromFaceID(faceID)
print ("phone number is:", phoneNumber)
passcode = makeAndSaveOneTimePasscode(faceID)
### Send them the password
otpPageURL = "http://coms6998-hw2-frontend.s3-website-us-east-1.amazonaws.com/WP2.html?faceID={f}".format(f = faceID)
textMessage = "Your one time password is {}. Log in from {}.".format(passcode, otpPageURL)
print("textMessage:", textMessage)
sendSMS(phoneNumber, textMessage)
return
def appendImageInfoToDB(faceID, fileName, currentTime):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table("visitorsDB2")
result = table.update_item(
Key={
'faceID': faceID,
},
UpdateExpression="SET photos = list_append(photos, :i)",
ExpressionAttributeValues={
':i': [{
"objectKey": fileName,
"bucket": AUTHORIZED_IMAGE_BUCKET,
"createdTimestamp": currentTime
}],
},
ReturnValues="UPDATED_NEW"
)
return
def lambda_handler(event, context):
data_raw = event['Records'][0]['kinesis']['data']
### Testing:
'''
data_raw = {
"Records": [
{
"kinesis": {
"kinesisSchemaVersion": "1.0",
"partitionKey": "036be6a5-5893-4d9f-9798-b99abf03002b",
"sequenceNumber": "49612556404849341751321948912022266895104601742911733938",
"data": "eyJJbnB1dEluZm9ybWF0aW9uIjp7IktpbmVzaXNWaWRlbyI6eyJTdHJlYW1Bcm4iOiJhcm46YXdzOmtpbmVzaXN2aWRlbzp1cy1lYXN0LTE6ODc1MDIxMTEwNzEyOnN0cmVhbS9LVlMxLzE2MDQ3OTI4ODc1OTIiLCJGcmFnbWVudE51bWJlciI6IjkxMzQzODUyMzMzMTgxNDY3MTI5Mjc5NTY0OTgwNTUzMDk4MjgyMjE5NzM5MDAwIiwiU2VydmVyVGltZXN0YW1wIjoxLjYwNTEyODQwMjk1RTksIlByb2R1Y2VyVGltZXN0YW1wIjoxLjYwNTEyODM5OTQ2M0U5LCJGcmFtZU9mZnNldEluU2Vjb25kcyI6MS43NjE5OTk5NjQ3MTQwNTAzfX0sIlN0cmVhbVByb2Nlc3NvckluZm9ybWF0aW9uIjp7IlN0YXR1cyI6IlJVTk5JTkcifSwiRmFjZVNlYXJjaFJlc3BvbnNlIjpbeyJEZXRlY3RlZEZhY2UiOnsiQm91bmRpbmdCb3giOnsiSGVpZ2h0IjowLjQyNDg3NjQ4LCJXaWR0aCI6MC4xOTgxOTE3MywiTGVmdCI6MC40MTMxNDE1NSwiVG9wIjowLjU5OTE5NzJ9LCJDb25maWRlbmNlIjo5OS45OTg1MSwiTGFuZG1hcmtzIjpbeyJYIjowLjQ3MTU4MjIzLCJZIjowLjc1Nzk4NjEsIlR5cGUiOiJleWVMZWZ0In0seyJYIjowLjU2MDA5MDEsIlkiOjAuNzYxMzc4OCwiVHlwZSI6ImV5ZVJpZ2h0In0seyJYIjowLjQ3ODM3Nzg4LCJZIjowLjk1MjUwNzEsIlR5cGUiOiJtb3V0aExlZnQifSx7IlgiOjAuNTUyMTk4NSwiWSI6MC45NTUyOTEsIlR5cGUiOiJtb3V0aFJpZ2h0In0seyJYIjowLjUxMzg4MTU2LCJZIjowLjg2NDQxNzk3LCJUeXBlIjoibm9zZSJ9XSwiUG9zZSI6eyJQaXRjaCI6MS4xNTA0NDE1LCJSb2xsIjowLjg4NjQ4NzUsIllhdyI6LTAuNDUwNTc3NH0sIlF1YWxpdHkiOnsiQnJpZ2h0bmVzcyI6NzAuNDQ3OCwiU2hhcnBuZXNzIjo4Ni44NjAxOX19LCJNYXRjaGVkRmFjZXMiOlt7IlNpbWlsYXJpdHkiOjk5Ljg5NjIzLCJGYWNlIjp7IkJvdW5kaW5nQm94Ijp7IkhlaWdodCI6MC4zMjY4MDYsIldpZHRoIjowLjI5OTA0LCJMZWZ0IjowLjM3MDkyLCJUb3AiOjAuMjI1ODU3fSwiRmFjZUlkIjoiM2VhMzg4NjItMzA3OS00MWZhLTk4ODktMGE0MzE3M2U4YjBhIiwiQ29uZmlkZW5jZSI6OTkuOTk3OSwiSW1hZ2VJZCI6ImQ1Zjg0MmRmLTU3OGYtMzEzMC1hZDFjLWY5Y2FkODJjOGU4ZSIsIkV4dGVybmFsSW1hZ2VJZCI6ImFsZXhzLWZhbmN5LXBob3RvIn19LHsiU2ltaWxhcml0eSI6OTguNzY2MTA2LCJGYWNlIjp7IkJvdW5kaW5nQm94Ijp7IkhlaWdodCI6MC43NzM2NDgsIldpZHRoIjowLjUwMzExMywiTGVmdCI6MC4yNjE1ODUsIlRvcCI6MC4xMjQ0MzV9LCJGYWNlSWQiOiI4YzkxNjkzZi1lNTlmLTRhZmYtOTA1Ny1kZTZlMmM3NmY2YzIiLCJDb25maWRlbmNlIjo5OS45OTk3OTQsIkltYWdlSWQiOiI1MWJjYzM3Yi02MGYxLTM2ZTctOTg4ZS1lZjU0MmI0NjI3YzAiLCJFeHRlcm5hbEltYWdlSWQiOiJhbGV4cy1mYi1waG90byJ9fV19XX0=",
"approximateArrivalTimestamp": 1605128405.381
},
"eventSource": "aws:kinesis",
"eventVersion": "1.0",
"eventID": "shardId-000000000011:49612556404849341751321948912022266895104601742911733938",
"eventName": "aws:kinesis:record",
"invokeIdentityArn": "arn:aws:iam::875021110712:role/service-role/analyzeFace-role-sab4636m",
"awsRegion": "us-east-1",
"eventSourceARN": "arn:aws:kinesis:us-east-1:875021110712:stream/facesSeen"
}
]
}
'''
data_str = base64.b64decode(data_raw).decode('ASCII')
data = json.loads(data_str)
print("data: ", str(data))
matchedFaces = data["FaceSearchResponse"][0]["MatchedFaces"]
### if not in collection (unknown user):
if len(matchedFaces) == 0:
print("unknown user")
processNewVisitor()
### If known/authorized user:
else:
### Take most similar face:
print("known user")
faceID = matchedFaces[0]["Face"]["FaceId"]
# Check if the faceID is in the OTP DynamoDB table.
# If so, do nothing - we've already texted the user.
alreadyTexted = checkIfAlreadyTexted(faceID)
if alreadyTexted:
pass
# Otherwise, create OTP for them, add (OTP, faceID, timestamp) to table, and text them.
else:
print("Making and saving OTP")
passcode = makeAndSaveOneTimePasscode(faceID)
phoneNum = getPhoneNumberFromFaceID(faceID)
sendOneTimePassword(faceID)
### commented for testing:
image = getImageFromStream()
### testing:
# image = getTestImageFromS3()
print("start image:")
print("done image")
currentTime = datetime.datetime.now().strftime("%Y-%m-%d:%H-%M-%S")
fileName = "{}_{}.jpg".format(faceID, currentTime)
writeImageToS3(AUTHORIZED_IMAGE_BUCKET, image, fileName)
# TODO add the info to the photos column
appendImageInfoToDB(faceID, fileName, currentTime)
return {
'statusCode': 200,
'body': json.dumps('Hello from Lambda!')
}
| [
"pc2807@barnard.edu"
] | pc2807@barnard.edu |
09a2e5bbd38e0b2fedf793682e84c829721999b1 | a272ce73325eb141c8956de5a11bb96850b3145b | /day07/day07.py | 034fedba86f3fa944d77b616c7e250dc43e6f6b2 | [] | no_license | jancaaa/advent-of-code2020 | 9807e3d871c63eb501c93247a2d8c5c9a915821f | 13d4517961bfd59613a15e91f59b39c702bcbe78 | refs/heads/master | 2023-02-05T09:00:24.123442 | 2020-12-24T13:45:31 | 2020-12-24T13:45:31 | 318,792,566 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,038 | py | def read_file(file) -> dict:
with open(file) as fp:
line = fp.readline().rstrip()
rules = {}
while line:
bag, content = process_line(line)
rules[bag] = content
line = fp.readline().rstrip()
return rules
def process_line(line: str) -> dict:
line = line[:-1] # remove dot
line = line.replace("bags", "")
line = line.replace("bag", "")
bag, bag_content = line.split("contain")
bag_content = bag_content.split(",")
processed_content = []
for c in bag_content:
c = c.strip()
if c != 'no other':
c = c.split(None, 1)
record = {"count": int(c[0]), "color": c[1]}
processed_content.append(record)
return bag.strip(), processed_content
def contains_shiny_gold(bag: str, rules: dict) -> bool:
if bag == 'shiny gold':
return True
elif not bag: # []
return False
else:
for b in rules[bag]:
x = contains_shiny_gold(b["color"], rules)
if x:
return True
return False
def content_count(color: str, rules: dict) -> int:
count = 0
content = rules[color]
for c in content:
count += c["count"] + c["count"] * content_count(c["color"], rules)
return count
def tests():
rules = read_file("test_input.txt")
assert content_count("faded blue", rules) == 0
assert content_count("dotted black", rules) == 0
assert content_count("vibrant plum", rules) == 11
assert content_count("dark olive", rules) == 7
assert content_count("shiny gold", rules) == 32
def part1(rules: dict) -> int:
count = 0
for color in rules.keys():
if color != 'shiny gold' and contains_shiny_gold(color, rules):
count += 1
return count
def part2(rules: dict) -> int:
return content_count('shiny gold', rules)
if __name__ == "__main__":
tests()
rules = read_file("input.txt")
print(f"Part 1: {part1(rules)}")
print(f"Part 2: {part2(rules)}")
| [
"janca.zahradnickova@gmail.com"
] | janca.zahradnickova@gmail.com |
de224c21cd7809746c3c0bc0b4c7f5270e92575a | c1189ae95dc254b16a1adc6ea41bfc7b91ba6e49 | /Plain/datasets/cifar100.py | 31168f0855b356a5f8cea547b09ee354d0db7f24 | [] | no_license | tualgfhite/adversarial-contrastive-learning | ee892f6bff6ad28e60eed2780b91d8d8e90065f5 | 7c977570d3cb9cc9d4bf3ae7311c8a2a3609a8e2 | refs/heads/main | 2023-06-10T00:44:42.573158 | 2021-07-01T06:07:23 | 2021-07-01T06:07:23 | 381,352,128 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | from __future__ import print_function
from PIL import Image
import torchvision.datasets as datasets
import torch.utils.data as data
class CIFAR100Instance(datasets.CIFAR100):
"""CIFAR10Instance Dataset.
"""
def __getitem__(self, index):
if self.train:
img, target = self.data[index], self.targets[index]
else:
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.transform is not None:
img1 = self.transform(img)
if self.train:
img2 = self.transform(img)
if self.train:
return img1, img2, target, index
else:
return img1, target, index
| [
"420367843@qq.com"
] | 420367843@qq.com |
2052d499d056d97f3c9520c6b4cfc144c740a151 | 48475d7a3599e542adcab313f275a77aae211985 | /ex16.py | 4b4d004128e6da94d1a63887e1a19194098133f6 | [] | no_license | katylouise/python_the_hard_way | 9eb8c4ca5788c49902e95a6ec711f7968ebd24e5 | 2e5a52b21f0e652ba937d844f702b2cca6676dda | refs/heads/master | 2021-01-21T06:55:21.418864 | 2015-01-18T17:04:10 | 2015-01-18T17:04:10 | 27,346,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | #import argv from sys module
from sys import argv
#variables to use
script, filename = argv
#print three lines - first one uses the filename given above.
print "We're going to erase %r." % filename
print "If you don't want that, hit CTRL-C (^C)."
print "If you do want that, hit RETURN."
#prompt user for input using ?
raw_input("?")
#print string
print "Opening the file..."
#define file object as the file given above. It is writable.
target = open(filename, 'w')
#print string
print "Truncating the file. Goodbye!"
#empty the file object
target.truncate()
#print string
print "Now I'm going to ask you for three lines."
#prompt user for three lines of input
line1 = raw_input("line 1: ")
line2 = raw_input("line 2: ")
line3 = raw_input("line 3: ")
#print string
print "I'm going to write these to the file."
#define variable content as user's three line inputs
content = "%s \n %s \n %s" % (line1, line2, line3)
#write content to file named above
target.write(content)
#print string
print "And finally, we close it."
#close file object
target.close() | [
"rklappleyard@gmail.com"
] | rklappleyard@gmail.com |
671a0ab812fb1c9980409afac2d6be2ca1345a4a | d2edafd56e3f407d0a04a7081ba48c1b0f0b8be4 | /lesson4/sensing/serializers.py | 965c461a663f71bf197fc1cf15ee400ea0c2dc9a | [] | no_license | kevinwlu/iot | 65ef29a0e14bb7314100c2f0262dba5ee37b278d | 10e705a46a5ee37106c798d452a5838f09f81c98 | refs/heads/master | 2023-08-18T13:56:24.427109 | 2023-08-17T01:10:10 | 2023-08-17T01:10:10 | 49,343,710 | 164 | 121 | null | 2022-03-26T00:24:46 | 2016-01-09T22:21:30 | Python | UTF-8 | Python | false | false | 341 | py | from myapp.models import Room, Door
from rest_framework import serializers
class RoomSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Room
fields = ('url', 'name')
class DoorSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Door
fields = ('url', 'name')
| [
"kevin.wen.lu@gmail.com"
] | kevin.wen.lu@gmail.com |
7b8faf926277ef68c8b530b2733c0d1e1c1305af | 09e2744e5efeac07eaa9f42b91410f95299cff58 | /model/operation.py | d9b518f73b431ab886cc15ad19fa00b13126d36b | [] | no_license | adajw/ConcurrenTree | 41cbc677ed8015eb2ea2130125c25278d3aa9594 | cfc24a62c33311b282f557d31e556d215a0b4ccd | refs/heads/master | 2020-04-08T19:38:22.317203 | 2011-10-20T18:46:19 | 2011-10-20T18:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,046 | py | from ConcurrenTree.model import ModelBase
import ConcurrenTree.util.hasher as hasher
import instruction
from address import Address
from copy import deepcopy
import traceback
import json
class Operation(ModelBase):
''' A collection of instructions '''
def __init__(self, instructions = [], protostring = None):
''' If protostring is present, uses that existing serialized instruction set. If not, use instructions. '''
if protostring:
instructions = json.loads(protostring)
try:
self.instructions = instruction.set(instructions)
except:
raise ParseError()
def apply(self, tree):
backup = deepcopy(tree)
for i in self.instructions:
try:
i.apply(tree)
except Exception as e:
tree = backup
traceback.print_exc()
raise OpApplyError()
tree.applied.add(self.hash)
@property
def inserts(self):
results = []
for i in self.instructions:
if isinstance(i,instruction.Insertion):
results.append(i)
return results
@property
def dep_provides(self):
''' The dependencies that this operation provides to the tree '''
return set([str(i.address_object.proto()+[i.position, i.value]) for i in self.inserts])
@property
def dep_requires(self):
''' The dependencies that this operation requires before it can be applied '''
return set([str(i.address_object) for i in self.instructions]) - self.dep_provides
def ready(self, tree):
''' Checks a tree for existence of all dependencies '''
for i in self.dep_requires:
try:
Address(i).resolve(tree)
except Exception as e:
traceback.print_exc()
return False
return True
def applied(self, tree):
''' Returns whether or not this op has been applied to the tree '''
return self.hash in tree.applied
def compress(self):
# Todo - op compression (combining deletion instructions together)
pass
def proto(self):
''' Returns a protocol operation object '''
return {"type":"op","instructions":[i.proto() for i in self.instructions]}
class ParseError(SyntaxError): pass
class OpApplyError(SyntaxError): pass
| [
"campadrenalin@gmail.com"
] | campadrenalin@gmail.com |
f238135fc35efb42936bcb5fb5837ba83494fe7b | 0d87d119aa8aa2cc4d486f49b553116b9650da50 | /autom4te.cache/test/functional/p2p_timeouts.py | b87f48266e53f8ad1a0ba5ea15fe0d11d5b0ef5d | [
"MIT"
] | permissive | KingricharVD/Nests | af330cad884745cc68feb460d8b5547d3182e169 | 7b2ff6d27ccf19c94028973b7da20bdadef134a7 | refs/heads/main | 2023-07-07T12:21:09.232244 | 2021-08-05T01:25:23 | 2021-08-05T01:25:23 | 386,196,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,325 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various net timeouts.
- Create three nesteggd nodes:
no_verack_node - we never send a verack in response to their version
no_version_node - we never send a version (only a ping)
no_send_node - we never send any P2P message.
- Start all three nodes
- Wait 1 second
- Assert that we're connected
- Send a ping to no_verack_node and no_version_node
- Wait 30 seconds
- Assert that we're still connected
- Send a ping to no_verack_node and no_version_node
- Wait 31 seconds
- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds)
"""
from time import sleep
from test_framework.mininode import *
from test_framework.test_framework import PivxTestFramework
from test_framework.util import *
class TestNode(P2PInterface):
def on_version(self, message):
# Don't send a verack in response
pass
class TimeoutsTest(PivxTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
# Setup the p2p connections and start up the network thread.
no_verack_node = self.nodes[0].add_p2p_connection(TestNode())
no_version_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False)
no_send_node = self.nodes[0].add_p2p_connection(TestNode(), send_version=False)
network_thread_start()
sleep(1)
assert no_verack_node.connected
assert no_version_node.connected
assert no_send_node.connected
no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping())
sleep(30)
assert "version" in no_verack_node.last_message
assert no_verack_node.connected
assert no_version_node.connected
assert no_send_node.connected
no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping())
sleep(31)
assert not no_verack_node.connected
assert not no_version_node.connected
assert not no_send_node.connected
if __name__ == '__main__':
TimeoutsTest().main()
| [
"northerncommunity1@gmail.com"
] | northerncommunity1@gmail.com |
8dda537fcc70bd1cde6b546857d802fec8b22277 | 4000abb325ae6010ffe337cc3e549f5942522503 | /venv/lib/python3.6/codecs.py | ad6d31748cc6adaa0893204a0f91c28a60c62bcd | [] | no_license | ehowing/sc_project | a658a5bc619c39e401fd4956d931f39cd23c78b8 | 28ad709dc450930dd73af3fdeda62a064371fd5f | refs/heads/master | 2021-09-10T03:05:02.080233 | 2018-03-20T20:50:25 | 2018-03-20T20:50:25 | 126,080,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | /Users/emilyhowing/anaconda3/lib/python3.6/codecs.py | [
"emilyhowing@Emilys-MacBook-Pro-4.local"
] | emilyhowing@Emilys-MacBook-Pro-4.local |
91def3dcff22a5ae4e404dc17a35acd1e8d93b79 | 95fd32efd9d1795beb7d4294a98858107fbd43d1 | /products/migrations/0001_initial.py | e20f9bd2fbd62797d4a24f9098d0d1fcd92c9d3b | [] | no_license | Atilaus/healthy | 4203e13e65b24a904866cbf072b3362747ea4e06 | b9058431e6f7038e233970fa6b1b86a33c727c6d | refs/heads/master | 2020-07-04T19:27:57.497267 | 2019-08-23T18:47:43 | 2019-08-23T18:47:43 | 202,388,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py | # Generated by Django 2.2.2 on 2019-06-22 17:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default=None, max_length=64, null=True)),
('description', models.TextField(blank=True, default=None, null=True)),
('is_active', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='products_images')),
('description', models.TextField(blank=True, default=None, null=True)),
('is_active', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('product', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='products.Product')),
],
options={
'verbose_name': 'Image',
'verbose_name_plural': 'Images',
},
),
]
| [
"atilaus@mail.ru"
] | atilaus@mail.ru |
8507be53f6de42ca6bd8aa10d87e80d87a4720e3 | 004592a9136cc33abdbfa6351ae5ba0315c31183 | /Using_in_as_a_logical_operator.py | d42edabe682e7a7ca63b47dc5f1aa7b2ce880c88 | [] | no_license | timlefkowitz/python | c9e9ae6c476a9d3a3ef84f9571662ce21d173870 | ad7273af1905b4bb3c96d809c16991d50398b33e | refs/heads/master | 2023-07-26T18:44:45.543732 | 2021-09-09T23:39:47 | 2021-09-09T23:39:47 | 293,598,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | #Using_in_as_a_logical_operator
fruit = 'banana'
'n' in fruit
'n' in fruit
'nan' in fruit
if 'a' in fruit :
print('Found it!')
| [
"timsphotography@gmail.com"
] | timsphotography@gmail.com |
a463f071d665f03508ace4e365e5f587e5e22f39 | 3e70eda6819fec5bf5ba2299573b333a3a610131 | /mole/tools/protocol_test/send_package.py | 6839a91ecc4707f8f38f98bb00e91c7abfe1a7b3 | [] | no_license | dawnbreaks/taomee | cdd4f9cecaf659d134d207ae8c9dd2247bef97a1 | f21b3633680456b09a40036d919bf9f58c9cd6d7 | refs/heads/master | 2021-01-17T10:45:31.240038 | 2013-03-14T08:10:27 | 2013-03-14T08:10:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,381 | py | #! /usr/bin/env python
from xml.dom import minidom
import protocol
import string
import struct
import time
import thread
import mypub
#global len_param #store
global cmd_idss # store cmds' ids
global intervals # store intervals
global login_userid # user id for logining to server
global login_passwd # passwd
class XmlParser:
def __init__(self, file):
self.loadSource(file)
def load(self, source):
"""load XML input source, return parsed XML document
- a filename of a local XML file ("~/diveintopython/common/py/kant.xml")
"""
try:
sock = open(source)
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
xmldoc = minidom.parse(sock).documentElement
sock.close()
return xmldoc
def loadSource(self, source):
"""load source"""
self.source = self.load(source)
def parse_xml(source):
p=XmlParser(source)
xmldoc =p.source
#print xmldoc.toxml()
# get login user info
login_user_list = xmldoc.getElementsByTagName('login_user')
global login_userid
login_userid = login_user_list[0].attributes["userid"].value
global login_passwd
login_passwd = login_user_list[0].attributes["passwd"].value
cmdlist = xmldoc.getElementsByTagName('cmd')
num_cmd = cmdlist.length # the number of packages waiting for sending
global cmd_ids
cmd_ids=[] #initialization
global intervals #initialization
intervals=[]
dict=[]
if num_cmd>0:
for m in range(0, num_cmd):
cmd1 = cmdlist[m] #cmd node
#print cmd1.nodeName #1
cmd_ids.append(cmd1.attributes["id"].value)
intervals.append(cmd1.attributes["interval"].value)
#print intervals
nodeList = cmd1.childNodes
paramsNode = nodeList[1]
paramList = paramsNode.childNodes
paramListLen = paramList.length
# serialize the params
pair={}
i=1 #jump over the first text node
j=0
format=""
while i<paramListLen:
param = paramList[i]
name = param.attributes["Name"].value
type = param.attributes["Type"].value
len = param.attributes["Len"].value
value = param.attributes["Value"].value
# print name, type, len, value
# add more format here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if (type=="string"):
arr1 = [len, 's']
format = string.join(arr1, '')
# format = format.encode("utf-8")
elif(type=="int"):
format = 'L'
elif (type=="uchar"):
format = 'B'
pair[j]=[format, value]
i = i+2 #jump over the text node between tags
j = j+1
#end while
dict.append(pair)
# global len_param
# len_param = j
#end for
else:
print "No command found"
exit(1)
#end if
#print cmd_ids, intervals, dict
return dict
def capture_message(protocol_instance, *args):
while 1:
out_info=protocol_instance.getrecvmsg()
if(out_info):
proto_len,version,cmdid,userid,result, pri_msg=out_info;
print cmdid, result, userid , mypub.print_hex_16(pri_msg,0 );
def do_send_package(source):
dict=parse_xml(source)
num_cmds = len(dict)
return_msg = ""
# return_msg = return_msg +"<DIV><img src=12.jpg></DIV>"
return_msg = return_msg + "<p>begin logining...</p>"
p=protocol.mole_online("10.1.1.5",7777 ,4, int(login_userid), str(login_passwd)) #login on login server
time.sleep(1)
print "exec"
while 1:
out_info=p.getrecvmsg()
if(out_info):
proto_len,version,cmdid,userid,result, pri_msg=out_info;
return_msg = return_msg + "<p>Cmdid: " + str(cmdid) + " Result: " + str(result)+"</p>"
else:
break
# thread.start_new_thread(capture_message, (p,))
# print the message we capture
# print num_cmds
# now it's time to excute these commands
return_msg = return_msg + "<HR style=\"FILTER: alpha(opacity=100,finishopacity=0,style=1)\" width=\"80%\" color=\#987cb9 SIZE=3 align=LEFT>" + "<p>Begin sending packages...</p>"
for m in range(0, num_cmds):
pair=dict[m].values()
num_param = len(pair)
# print pair, num_param
primsg=""
for i in range(0, num_param):
format='>'+pair[i][0]
# print pair[i][0],pair[i][1]
value = str(pair[i][1])
if (pair[i][0]=='L' or pair[i][0]=='B'):
value = int(pair[i][1])
# elif pair[i][0]=='B':
# value =
#print value
# format = format.encode("utf-8")
primsg= primsg + struct.pack(format, value)
sendbuf = p.pack(int(cmd_ids[m]), int(login_userid), primsg)
p.get_socket().send(sendbuf)
time.sleep(float(intervals[m]))
while 1: #wait for the corresponding msg
out_info=p.getrecvmsg()
if(out_info):
proto_len,version,cmdid,userid,result, pri_msg=out_info
if pri_msg==primsg and cmdid==cmd_ids[m]: #make sure the received package is the same with the sended one
return_msg = return_msg + "<p>recving package %s ...</p>" %m
return_msg = return_msg + "<p>Cmdid: " + str(cmdid) + " Result: " + str(result)+"</p>"
break
else: # ignore the package
continue
else:
return_msg = return_msg+"<HR style=\"FILTER: alpha(opacity=100,finishopacity=0,style=1)\" width=\"80%\" color=\#987cb9 SIZE=3 align=LEFT> "+"<p>Send pacakge %s error, Cmdid: %s</p>" %(m, cmd_ids[m])
return_msg = return_msg+"<p>sockect disconnected</p>"
if m==(num_cmds-1):
break
return_msg = return_msg+"<p>login again...</p>"
p=protocol.mole_online("10.1.1.5",7777 ,4, int(login_userid), str(login_passwd)) #login on login server
time.sleep(1)
while 1:
out_info=p.getrecvmsg()
if(out_info):
proto_len,version,cmdid,userid,result, pri_msg=out_info;
return_msg = return_msg + "<p>Cmdid: " + str(cmdid) + " Result: " + str(result)+"</p>"
else:
break
return_msg = return_msg + "<HR style=\"FILTER: alpha(opacity=100,finishopacity=0,style=1)\" width=\"80%\" color=\#987cb9 SIZE=3 align=LEFT> " +"<p>Send packages again...</p>"
break
return """\
<html><body>
%s
</body></html>
""" % return_msg
| [
"smyang.ustc@gmail.com"
] | smyang.ustc@gmail.com |
a5626232faa079360174e66800da7008e072877e | 3d7fe86a8c2d3cb5212e9ab8f2fa2927e5974bdd | /build/update_src.py | 48a9fa9fbd3ecfc271bba10885cedae8bb130cfb | [] | no_license | telliott99/covid | 3b829e6a767fc16d2741de01ac02fea766895e3c | f79f55fc815af3237df8856c2eb7c2e5ee733d4a | refs/heads/master | 2022-11-29T23:16:02.054703 | 2020-08-06T22:52:18 | 2020-08-06T22:52:18 | 259,958,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,365 | py | import sys, os, subprocess
base = os.environ.get('covid_base')
if not base in sys.path:
sys.path.insert(0,base)
sys.path.insert(1,base + '/myutil')
import uinit, udates
import udb, ufile, ukeys, ufmt
def run(src):
#-------------------------------
# older source files are in subdirectories
# /Apr, /May etc.
# those directories are assumed to be complete!
# filter out directories
# recent files only
fL = ufile.list_directory(src)
# here, we need full paths
src_recent = [src + '/' + fn for fn in fL]
#-------------------------------
# files are named as dates
date_list = [udates.date_from_path(p) for p in src_recent]
first_all = '2020-03-22'
first_recent = udates.date_from_path(src_recent[0])
all_dates = udates.generate_dates(first=first_recent)
last = all_dates[-1]
updated = False
for date in all_dates:
if not date in date_list:
print('fetch missing data', date)
ret_code = subprocess.call(['python', "fetch.py", src, date])
if ret_code != 0:
print('error in fetch.py')
sys.exit()
updated = True
if not updated:
print('src files up-to-date')
#-------------------------------------
def list_src_all(src):
src_all = []
todo = []
# distinguish full paths from file/directory names
# src is a full path
# so is d
def process_dir(d):
# os.listdir returns just the file/directory names
dL = os.listdir(d)
for fn in dL:
if fn.startswith('.'):
continue
p = d + '/' + fn
if os.path.isdir(p):
todo.append(p)
else:
if d == src:
src_all.append(fn)
else:
# paths without leading '/'
d = d.replace(src + '/','')
src_all.append(d + '/' + fn)
process_dir(src)
while todo:
next = todo.pop()
process_dir(next)
def filter(fn):
if '/' in fn:
return fn.split('/')[-1]
return fn
return sorted(src_all, key=filter)
| [
"telliott999@gmail.com"
] | telliott999@gmail.com |
2fa3f9a40f5137022ef36d140602f610415aee95 | 4b7cd627abf72de828ed9a8ce1dc0f27443cc266 | /migrations/versions/4f1f829191d2_posts_table.py | 433a963cd5d12a37832ee49eac0618cdca40afc7 | [] | no_license | PlutoniumProphet/microblog | 8c23df57f75def21020ad0b09495fdd47b95001c | 1f43996c6a6da59277c33aa37ea92966adf1e84d | refs/heads/master | 2023-08-04T13:37:51.056247 | 2023-05-22T15:13:55 | 2023-05-22T15:13:55 | 202,483,280 | 1 | 1 | null | 2023-07-25T20:47:25 | 2019-08-15T06:04:11 | Python | UTF-8 | Python | false | false | 1,057 | py | """posts table
Revision ID: 4f1f829191d2
Revises: 4592a152f0ec
Create Date: 2019-10-18 17:54:28.565831
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4f1f829191d2'
down_revision = '4592a152f0ec'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(length=140), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_post_timestamp'), 'post', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_post_timestamp'), table_name='post')
op.drop_table('post')
# ### end Alembic commands ###
| [
"grahamkesley@gmail.com"
] | grahamkesley@gmail.com |
8a92cbb2d45735320e977801ecbb10f820360c3a | 091cc684740bc76932352d230db4a08bf011b7ec | /leetcode/two_sum.py | 9d4a1928369cce2e8433530f1acbd921897f887e | [] | no_license | dm36/interview-practice | 5232601d8de23e80557b3e2a96ff9d3589017052 | 612966ea0a813faaabd5dca98ce6dd524b8b4cef | refs/heads/master | 2022-02-14T00:46:20.395743 | 2019-07-19T19:47:34 | 2019-07-19T19:47:34 | 197,820,260 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # iterate through each element in the array
# check if target - the present element exists in the hash map
# if it does- you've found your two sum! return the index of the present element and the previously hashed index
# if it doesn't- hash the element to its index
def two_sum(arr, target):
hash = {}
for i in range(len(arr)):
if target - arr[i] in hash:
return hash[target - arr[i]], i
else:
hash[arr[i]] = i
print two_sum([1, 2, 3, 4, 5], 6)
| [
"dhruv.madhawk@gmail.com"
] | dhruv.madhawk@gmail.com |
54d15a5b1b43f64e685001c5cfda847f37ff16f1 | 85eec28ac8c021d9af9a92d42f5edbcd3301a568 | /src/demo.py | f6395d110038e40d3f98a716af2118f6bbc1684a | [] | no_license | oscar86hsu/docker_multiarch_demo | 1182ee15b6d5414ffa6f610e2cf04f9b2930b2ca | c6544d2b223c8899e54c1a2b4a1a33cebc674a48 | refs/heads/master | 2020-12-15T20:11:59.295746 | 2020-01-21T07:47:42 | 2020-01-21T07:47:42 | 235,240,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | import os
print("This is a " + os.uname().machine + " Machine!\n") | [
"oscar86hsu@gmail.com"
] | oscar86hsu@gmail.com |
e9d98b45c3fb60b3811bb26bf569e6581f048af5 | 0b48df8282c134296c4131396340cb7f03e46550 | /manage.py | e64f448a2fec2ba7f0c46e6f81ff29c9f64f0ace | [] | no_license | acaciawater/gwt_auth | 368dc21fd1e9358d6a38061305fb161306e4b1c0 | 2cea9d77ef195f159fe23192cef36144923b6b95 | refs/heads/master | 2020-04-05T05:33:25.907621 | 2019-01-24T11:47:00 | 2019-01-24T11:47:00 | 156,600,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gwtauth.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"theo@acacia.local"
] | theo@acacia.local |
121e638cba278814f30a3ea98f2ac5de450572a9 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dris/huaweicloudsdkdris/v1/model/rsu_dto.py | 1ee02d2ae08d164d310b61729037617197699be9 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 20,723 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RsuDTO:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'rsu_id': 'str',
'name': 'str',
'description': 'str',
'esn': 'str',
'last_modified_time': 'datetime',
'created_time': 'datetime',
'last_online_time': 'datetime',
'ip': 'str',
'position_description': 'str',
'location': 'RsuLocation',
'status': 'str',
'rsu_model_id': 'str',
'intersection_id': 'str',
'related_edge_num': 'int',
'software_version': 'str'
}
attribute_map = {
'rsu_id': 'rsu_id',
'name': 'name',
'description': 'description',
'esn': 'esn',
'last_modified_time': 'last_modified_time',
'created_time': 'created_time',
'last_online_time': 'last_online_time',
'ip': 'ip',
'position_description': 'position_description',
'location': 'location',
'status': 'status',
'rsu_model_id': 'rsu_model_id',
'intersection_id': 'intersection_id',
'related_edge_num': 'related_edge_num',
'software_version': 'software_version'
}
def __init__(self, rsu_id=None, name=None, description=None, esn=None, last_modified_time=None, created_time=None, last_online_time=None, ip=None, position_description=None, location=None, status=None, rsu_model_id=None, intersection_id=None, related_edge_num=None, software_version=None):
"""RsuDTO
The model defined in huaweicloud sdk
:param rsu_id: **参数说明**:RSU的唯一标识符,在平台创建RSU时由平台生成。
:type rsu_id: str
:param name: **参数说明**:RSU的名字。 **取值范围**:长度不低于1不超过128,只允许中文、字母、数字、下划线(_)、连接符(-)的组合。
:type name: str
:param description: **参数说明**:RSU的描述。 **取值范围**:只允许中文、字母、数字、下划线(_)、中文分号(;)、中文冒号(:)、中文问号(?)、中文感叹号(!)中文逗号(,)、中文句号(。)、英文分号(;)、英文冒号(:)、英文逗号(,)、英文句号(.)、英文问号(?)、英文感叹号(!)、顿号(、)、连接符(-)的组合。
:type description: str
:param esn: **参数说明**:RSU的设备序列号。 **取值范围**:只允许字母、数字、下划线(_)的组合。
:type esn: str
:param last_modified_time: **参数说明**:最后修改的时间。 格式:yyyy-MM-dd'T'HH:mm:ss'Z' 例如 2020-09-01T01:37:01Z
:type last_modified_time: datetime
:param created_time: **参数说明**:创建的时间。 格式:yyyy-MM-dd'T'HH:mm:ss'Z' 例如 2020-09-01T01:37:01Z
:type created_time: datetime
:param last_online_time: **参数说明**:最后的在线时间。 格式:yyyy-MM-dd'T'HH:mm:ss'Z' 例如 2020-09-01T01:37:01Z
:type last_online_time: datetime
:param ip: **参数说明**:RSU的IP。满足IP的格式,例如127.0.0.1。
:type ip: str
:param position_description: **参数说明**:安装位置编码,由用户自定义。 **取值范围**:长度不低于1不超过128,只允许字母、数字、下划线(_)的组合。
:type position_description: str
:param location:
:type location: :class:`huaweicloudsdkdris.v1.RsuLocation`
:param status: **参数说明**:RSU设备状态。 **取值范围**: - ONLINE:在线 - OFFLINE:离线 - INITIAL:初始化 - UNKNOWN:未知
:type status: str
:param rsu_model_id: **参数说明**:RSU型号ID,用于唯一标识一个RSU型号,在平台创建RSU型号后由平台分配获得,获取方法可参见 [创建RSU型号](https://support.huaweicloud.com/api-v2x/v2x_04_0020.html)。 **取值范围**:长度不低于1不超过36,只允许字母、数字、连接符(-)的组合。 **该字段仅供使用MQTT协议RSU设备的用户输入。使用websocket协议RSU设备的用户需忽略此字段。**
:type rsu_model_id: str
:param intersection_id: **参数说明**:在地图中,rsu所在区域对应的路口ID,也即区域ID拼接路口ID,格式为:region-node_id。其中路网最基本的构成即节点和节点之间连接的路段。节点可以是路口,也可以是一条 路的端点。一个节点的ID在同一个区域内是唯一的。
:type intersection_id: str
:param related_edge_num: **参数说明**:RSU可关联的Edge的数量。
:type related_edge_num: int
:param software_version: **参数说明**:RSU的软件版本,由RSU上报其软件版本。
:type software_version: str
"""
self._rsu_id = None
self._name = None
self._description = None
self._esn = None
self._last_modified_time = None
self._created_time = None
self._last_online_time = None
self._ip = None
self._position_description = None
self._location = None
self._status = None
self._rsu_model_id = None
self._intersection_id = None
self._related_edge_num = None
self._software_version = None
self.discriminator = None
if rsu_id is not None:
self.rsu_id = rsu_id
if name is not None:
self.name = name
if description is not None:
self.description = description
if esn is not None:
self.esn = esn
if last_modified_time is not None:
self.last_modified_time = last_modified_time
if created_time is not None:
self.created_time = created_time
if last_online_time is not None:
self.last_online_time = last_online_time
if ip is not None:
self.ip = ip
if position_description is not None:
self.position_description = position_description
if location is not None:
self.location = location
if status is not None:
self.status = status
if rsu_model_id is not None:
self.rsu_model_id = rsu_model_id
if intersection_id is not None:
self.intersection_id = intersection_id
if related_edge_num is not None:
self.related_edge_num = related_edge_num
if software_version is not None:
self.software_version = software_version
@property
def rsu_id(self):
"""Gets the rsu_id of this RsuDTO.
**参数说明**:RSU的唯一标识符,在平台创建RSU时由平台生成。
:return: The rsu_id of this RsuDTO.
:rtype: str
"""
return self._rsu_id
@rsu_id.setter
def rsu_id(self, rsu_id):
"""Sets the rsu_id of this RsuDTO.
**参数说明**:RSU的唯一标识符,在平台创建RSU时由平台生成。
:param rsu_id: The rsu_id of this RsuDTO.
:type rsu_id: str
"""
self._rsu_id = rsu_id
@property
def name(self):
"""Gets the name of this RsuDTO.
**参数说明**:RSU的名字。 **取值范围**:长度不低于1不超过128,只允许中文、字母、数字、下划线(_)、连接符(-)的组合。
:return: The name of this RsuDTO.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RsuDTO.
**参数说明**:RSU的名字。 **取值范围**:长度不低于1不超过128,只允许中文、字母、数字、下划线(_)、连接符(-)的组合。
:param name: The name of this RsuDTO.
:type name: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this RsuDTO.
**参数说明**:RSU的描述。 **取值范围**:只允许中文、字母、数字、下划线(_)、中文分号(;)、中文冒号(:)、中文问号(?)、中文感叹号(!)中文逗号(,)、中文句号(。)、英文分号(;)、英文冒号(:)、英文逗号(,)、英文句号(.)、英文问号(?)、英文感叹号(!)、顿号(、)、连接符(-)的组合。
:return: The description of this RsuDTO.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this RsuDTO.
**参数说明**:RSU的描述。 **取值范围**:只允许中文、字母、数字、下划线(_)、中文分号(;)、中文冒号(:)、中文问号(?)、中文感叹号(!)中文逗号(,)、中文句号(。)、英文分号(;)、英文冒号(:)、英文逗号(,)、英文句号(.)、英文问号(?)、英文感叹号(!)、顿号(、)、连接符(-)的组合。
:param description: The description of this RsuDTO.
:type description: str
"""
self._description = description
@property
def esn(self):
"""Gets the esn of this RsuDTO.
**参数说明**:RSU的设备序列号。 **取值范围**:只允许字母、数字、下划线(_)的组合。
:return: The esn of this RsuDTO.
:rtype: str
"""
return self._esn
@esn.setter
def esn(self, esn):
"""Sets the esn of this RsuDTO.
**参数说明**:RSU的设备序列号。 **取值范围**:只允许字母、数字、下划线(_)的组合。
:param esn: The esn of this RsuDTO.
:type esn: str
"""
self._esn = esn
@property
def last_modified_time(self):
"""Gets the last_modified_time of this RsuDTO.
**参数说明**:最后修改的时间。 格式:yyyy-MM-dd'T'HH:mm:ss'Z' 例如 2020-09-01T01:37:01Z
:return: The last_modified_time of this RsuDTO.
:rtype: datetime
"""
return self._last_modified_time
@last_modified_time.setter
def last_modified_time(self, last_modified_time):
"""Sets the last_modified_time of this RsuDTO.
**参数说明**:最后修改的时间。 格式:yyyy-MM-dd'T'HH:mm:ss'Z' 例如 2020-09-01T01:37:01Z
:param last_modified_time: The last_modified_time of this RsuDTO.
:type last_modified_time: datetime
"""
self._last_modified_time = last_modified_time
@property
def created_time(self):
"""Gets the created_time of this RsuDTO.
**参数说明**:创建的时间。 格式:yyyy-MM-dd'T'HH:mm:ss'Z' 例如 2020-09-01T01:37:01Z
:return: The created_time of this RsuDTO.
:rtype: datetime
"""
return self._created_time
@created_time.setter
def created_time(self, created_time):
"""Sets the created_time of this RsuDTO.
**参数说明**:创建的时间。 格式:yyyy-MM-dd'T'HH:mm:ss'Z' 例如 2020-09-01T01:37:01Z
:param created_time: The created_time of this RsuDTO.
:type created_time: datetime
"""
self._created_time = created_time
@property
def last_online_time(self):
"""Gets the last_online_time of this RsuDTO.
**参数说明**:最后的在线时间。 格式:yyyy-MM-dd'T'HH:mm:ss'Z' 例如 2020-09-01T01:37:01Z
:return: The last_online_time of this RsuDTO.
:rtype: datetime
"""
return self._last_online_time
@last_online_time.setter
def last_online_time(self, last_online_time):
"""Sets the last_online_time of this RsuDTO.
**参数说明**:最后的在线时间。 格式:yyyy-MM-dd'T'HH:mm:ss'Z' 例如 2020-09-01T01:37:01Z
:param last_online_time: The last_online_time of this RsuDTO.
:type last_online_time: datetime
"""
self._last_online_time = last_online_time
@property
def ip(self):
"""Gets the ip of this RsuDTO.
**参数说明**:RSU的IP。满足IP的格式,例如127.0.0.1。
:return: The ip of this RsuDTO.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this RsuDTO.
**参数说明**:RSU的IP。满足IP的格式,例如127.0.0.1。
:param ip: The ip of this RsuDTO.
:type ip: str
"""
self._ip = ip
@property
def position_description(self):
"""Gets the position_description of this RsuDTO.
**参数说明**:安装位置编码,由用户自定义。 **取值范围**:长度不低于1不超过128,只允许字母、数字、下划线(_)的组合。
:return: The position_description of this RsuDTO.
:rtype: str
"""
return self._position_description
@position_description.setter
def position_description(self, position_description):
"""Sets the position_description of this RsuDTO.
**参数说明**:安装位置编码,由用户自定义。 **取值范围**:长度不低于1不超过128,只允许字母、数字、下划线(_)的组合。
:param position_description: The position_description of this RsuDTO.
:type position_description: str
"""
self._position_description = position_description
@property
def location(self):
"""Gets the location of this RsuDTO.
:return: The location of this RsuDTO.
:rtype: :class:`huaweicloudsdkdris.v1.RsuLocation`
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this RsuDTO.
:param location: The location of this RsuDTO.
:type location: :class:`huaweicloudsdkdris.v1.RsuLocation`
"""
self._location = location
@property
def status(self):
"""Gets the status of this RsuDTO.
**参数说明**:RSU设备状态。 **取值范围**: - ONLINE:在线 - OFFLINE:离线 - INITIAL:初始化 - UNKNOWN:未知
:return: The status of this RsuDTO.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this RsuDTO.
**参数说明**:RSU设备状态。 **取值范围**: - ONLINE:在线 - OFFLINE:离线 - INITIAL:初始化 - UNKNOWN:未知
:param status: The status of this RsuDTO.
:type status: str
"""
self._status = status
@property
def rsu_model_id(self):
"""Gets the rsu_model_id of this RsuDTO.
**参数说明**:RSU型号ID,用于唯一标识一个RSU型号,在平台创建RSU型号后由平台分配获得,获取方法可参见 [创建RSU型号](https://support.huaweicloud.com/api-v2x/v2x_04_0020.html)。 **取值范围**:长度不低于1不超过36,只允许字母、数字、连接符(-)的组合。 **该字段仅供使用MQTT协议RSU设备的用户输入。使用websocket协议RSU设备的用户需忽略此字段。**
:return: The rsu_model_id of this RsuDTO.
:rtype: str
"""
return self._rsu_model_id
@rsu_model_id.setter
def rsu_model_id(self, rsu_model_id):
"""Sets the rsu_model_id of this RsuDTO.
**参数说明**:RSU型号ID,用于唯一标识一个RSU型号,在平台创建RSU型号后由平台分配获得,获取方法可参见 [创建RSU型号](https://support.huaweicloud.com/api-v2x/v2x_04_0020.html)。 **取值范围**:长度不低于1不超过36,只允许字母、数字、连接符(-)的组合。 **该字段仅供使用MQTT协议RSU设备的用户输入。使用websocket协议RSU设备的用户需忽略此字段。**
:param rsu_model_id: The rsu_model_id of this RsuDTO.
:type rsu_model_id: str
"""
self._rsu_model_id = rsu_model_id
@property
def intersection_id(self):
"""Gets the intersection_id of this RsuDTO.
**参数说明**:在地图中,rsu所在区域对应的路口ID,也即区域ID拼接路口ID,格式为:region-node_id。其中路网最基本的构成即节点和节点之间连接的路段。节点可以是路口,也可以是一条 路的端点。一个节点的ID在同一个区域内是唯一的。
:return: The intersection_id of this RsuDTO.
:rtype: str
"""
return self._intersection_id
@intersection_id.setter
def intersection_id(self, intersection_id):
"""Sets the intersection_id of this RsuDTO.
**参数说明**:在地图中,rsu所在区域对应的路口ID,也即区域ID拼接路口ID,格式为:region-node_id。其中路网最基本的构成即节点和节点之间连接的路段。节点可以是路口,也可以是一条 路的端点。一个节点的ID在同一个区域内是唯一的。
:param intersection_id: The intersection_id of this RsuDTO.
:type intersection_id: str
"""
self._intersection_id = intersection_id
@property
def related_edge_num(self):
"""Gets the related_edge_num of this RsuDTO.
**参数说明**:RSU可关联的Edge的数量。
:return: The related_edge_num of this RsuDTO.
:rtype: int
"""
return self._related_edge_num
@related_edge_num.setter
def related_edge_num(self, related_edge_num):
"""Sets the related_edge_num of this RsuDTO.
**参数说明**:RSU可关联的Edge的数量。
:param related_edge_num: The related_edge_num of this RsuDTO.
:type related_edge_num: int
"""
self._related_edge_num = related_edge_num
@property
def software_version(self):
"""Gets the software_version of this RsuDTO.
**参数说明**:RSU的软件版本,由RSU上报其软件版本。
:return: The software_version of this RsuDTO.
:rtype: str
"""
return self._software_version
@software_version.setter
def software_version(self, software_version):
"""Sets the software_version of this RsuDTO.
**参数说明**:RSU的软件版本,由RSU上报其软件版本。
:param software_version: The software_version of this RsuDTO.
:type software_version: str
"""
self._software_version = software_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RsuDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
cd635e7fde615822a500a7628367f65fe55fcd13 | e673f2fcaa0c9d9dcff45cf0abd1660154cbf9fe | /이것이코딩테스트다/chapter06_정렬/04_두_배열의_원소교체/exam/6-12.py | 62a2fed0e3b758676e96f078b2a843a9ff48ee58 | [] | no_license | seohae2/python_algorithm_day | 87e513426c9d89a4f250aa536fc45dbadd88eb5d | 9f7995c6ca3781859e0ff05d45ced7b0ccdae4c2 | refs/heads/master | 2023-03-14T02:43:47.013763 | 2021-03-02T08:28:01 | 2021-03-02T08:28:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | n, k = map(int, input().split())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
a.sort()
b.sort(reverse=True)
for i in range(k):
if a[i] < b[i]:
a[i], b[i] = b[i], a[i]
else:
break
print(sum(a))
| [
"seohae0406@gmail.com"
] | seohae0406@gmail.com |
73810706602a48349c930693d1738574e9c9f5ab | 607e788bdc45684de30a04ab0025c0a2155cc192 | /ipmi.py | d02b198d2ef23dac635fc53652fd2aaf52616d9e | [] | no_license | jenovasephiroth/python | 2a04e8103ddf131a986cc3512463c891b6d09af4 | 8920db29baebabca4301af5af11ef18a35535275 | refs/heads/master | 2020-12-07T03:56:50.479141 | 2018-01-02T01:47:22 | 2018-01-02T01:47:22 | 68,796,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,824 | py | #!/bin/env python
#coding=utf-8
import json
import commands,time
from flask import abort
from flask import Flask
from flask import request
app = Flask(__name__)
def authinfo(post_data):
if not post_data.has_key('host'):
return json.dumps("You need to provide host.")
elif not post_data.has_key('user'):
return json.dumps("You need to provide user.")
elif not post_data.has_key('password'):
return json.dumps("You need to provide password.")
else:
return 'pass'
# 查看sel time
@app.route('/ipmi/api001/seltime/select', methods=['POST'])
def index_seltime_get():
post_data = request.json
return_info = authinfo(post_data)
if return_info != "pass":
return return_info
result = commands.getoutput("/usr/bin/ipmitool -I lanplus -H %s -U %s -P %s sel time get" % (post_data["host"], post_data["user"], post_data["password"]))
return json.dumps(result)
# 设置sel time
@app.route('/ipmi/api001/seltime/set/', methods=['POST'])
def index_seltime_set():
post_data = request.json
return_info = authinfo(post_data)
if return_info != "pass":
return return_info
if not post_data.has_key('timestamp'):
return json.dumps("You need to provide timestamp.")
if not post_data["timestamp"].isdigit():
return json.dumps("You need to provide timestamp.")
x = time.localtime(int(post_data["timestamp"]))
x = time.strftime('%m/%d/%Y %H:%M:%S',x)
result = commands.getoutput("/usr/bin/ipmitool -I lanplus -H %s -U %s -P %s sel time set '%s'" % (post_data["host"], post_data["user"], post_data["password"], x))
return json.dumps(result)
# 设备管理
@app.route('/ipmi/api001/power/<action>', methods=['POST'])
def index_power(action):
method_list = ["status", "off", "soft", "on", "reset"]
post_data = request.json
return_info = authinfo(post_data)
if return_info != "pass":
return return_info
while action in method_list:
commands_str = "/usr/bin/ipmitool -I lanplus -H %s -U %s -P %s power %s" % (post_data["host"], post_data["user"], post_data["password"], action)
result = commands.getoutput(commands_str)
break
else: result = "This method is not supported."
return json.dumps(result)
# 启动项管理
@app.route('/ipmi/api001/bootdev/<option>', methods=['POST'])
def index_bootdev(option):
start_list = ["pxe", "disk", "cdrom"]
post_data = request.json
return_info = authinfo(post_data)
if return_info != "pass":
return return_info
while option in start_list:
commands_str = "/usr/bin/ipmitool -I lanplus -H %s -U %s -P %s chassis bootdev %s" % (post_data["host"], post_data["user"], post_data["password"], option)
result = commands.getoutput(commands_str)
break
else:
result = "This option is not supported."
return json.dumps(result)
# 查看BMC的LAN信息
@app.route('/ipmi/api001/lanprint/<int:id>', methods=['POST'])
def index_lanprint(id):
post_data = request.json
return_info = authinfo(post_data)
if return_info != "pass":
return return_info
commands_str = "/usr/bin/ipmitool -I lanplus -H %s -U %s -P %s lan print %d" % (post_data["host"], post_data["user"], post_data["password"], id)
result = commands.getoutput(commands_str)
return json.dumps(result)
# 查看ipmi服务器端当前活动的session会话
@app.route('/ipmi/api001/ActiveSession', methods=['POST'])
def index_ActiveSession():
post_data = request.json
return_info = authinfo(post_data)
if return_info != "pass":
return return_info
commands_str = "/usr/bin/ipmitool -I lanplus -H %s -U %s -P %s session info active" % (post_data["host"], post_data["user"], post_data["password"])
result = commands.getoutput(commands_str)
return json.dumps(result)
# 查看BMC的信息
@app.route('/ipmi/api001/BMCInfo', methods=['POST'])
def index_BMCInfo():
post_data = request.json
return_info = authinfo(post_data)
if return_info != "pass":
return return_info
commands_str = "/usr/bin/ipmitool -I lanplus -H %s -U %s -P %s mc info" % (post_data["host"], post_data["user"], post_data["password"])
result = commands.getoutput(commands_str)
return json.dumps(result)
# 传感器SDR 列表信息
@app.route('/ipmi/api001/SDRList', methods=['POST'])
def index_SDRList():
post_data = request.json
return_info = authinfo(post_data)
if return_info != "pass":
return return_info
commands_str = "/usr/bin/ipmitool -I lanplus -H %s -U %s -P %s sdr elist full" % (post_data["host"], post_data["user"], post_data["password"])
result = commands.getoutput(commands_str)
return json.dumps(result)
if __name__ =="__main__":
app.run(debug=True, host='10.0.0.60', port=80)<p> </p>
| [
"noreply@github.com"
] | jenovasephiroth.noreply@github.com |
0e76f5b7e1bce205064a97e4dfe6ac23081a9c5e | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/L76/76-66_wat_20Abox/set_5.py | 9a7fa059848ab96d10053cfa581c6019abfcee52 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L76/wat_20Abox/ti_one-step/76_66/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_5.in'
temp_pbs = filesdir + 'temp_5.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_5.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_5.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
217349a2689d210260ece6f542b88352d0771f78 | 4fb1132d9472e663a32095aeac480987e3688aff | /lesson_4/task_4.2.py | c297d5c472e494e00660455695644feb89a86bd9 | [] | no_license | alrexo/itea_python_adv | 70bb3f59bba434da9d06aa5f11b36faa2c71009b | 10de8246f8f01bee8be84201e2148ecf23587e65 | refs/heads/master | 2022-12-14T05:31:34.204751 | 2018-12-05T21:37:16 | 2018-12-05T21:37:16 | 152,499,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,801 | py | from abc import ABC, abstractmethod
class ValidationError(AssertionError):
pass
class NumberBaseContext(ABC):
@abstractmethod
def __init__(self, **kwargs):
pass
@abstractmethod
def validate(self, value):
pass
@abstractmethod
def __getattr__(self, item):
pass
@abstractmethod
def __setattr__(self, key, value):
pass
@abstractmethod
def __len__(self):
pass
@abstractmethod
def __str__(self):
pass
@abstractmethod
def __iter__(self):
pass
class Context(dict, NumberBaseContext):
def __init__(self, **kwargs):
"""Constructor of the Context class.
Can take any number of variables"""
super().__init__()
if not all(self.validate(v) for k, v in kwargs.items()):
raise TypeError("Validation error")
for k, v in kwargs.items():
self.__setattr__(k, v)
def validate(self, value):
return isinstance(value, object)
def __getattr__(self, item):
"""Returns variable's value"""
return self[item]
def __setattr__(self, key, value):
"""Sets a value for a variable.
If the name is invalid, throws NameError"""
if not self.validate(value):
raise TypeError("Validation error")
if not key.isidentifier():
raise NameError
self[key] = value
def __len__(self):
"""Returns number of variables"""
return len(self.keys())
def __str__(self):
"""Represents class as a string"""
return "Class({})".format(", ".join("{}={}".format(k, v) for k, v in self.items()))
def __iter__(self):
"""Iteration tool"""
for k, v in self.items():
yield "{}={}".format(k, v)
class RealContext(Context):
def __init__(self, **kwargs):
if not all(self.validate(v) for k, v in kwargs.items()):
raise TypeError("Given value is not a real number")
super().__init__(**kwargs)
def validate(self, value):
return isinstance(value, (int, float)) or (isinstance(value, complex) and value.imag == 0)
def __setattr__(self, key, value):
if not self.validate(value):
raise TypeError("Given value is not a real number")
super().__setattr__(key, value)
class ComplexContext(Context):
def __init__(self, **kwargs):
if not all(self.validate(v) for k, v in kwargs.items()):
raise TypeError("Given value is not a complex number")
super().__init__(**kwargs)
def validate(self, value):
return isinstance(value, complex) and value.imag != 0
def __setattr__(self, key, value):
if not self.validate(value):
raise TypeError("Given value is not a complex number")
super().__setattr__(key, value)
class NumberContext(RealContext, ComplexContext):
def __init__(self, **kwargs):
if not all(self.validate(v) for k, v in kwargs.items()):
raise ValidationError("Given value is not a number")
super().__init__(**kwargs)
def validate(self, value):
return RealContext().validate(value) or ComplexContext().validate(value)
def __setattr__(self, key, value):
if not self.validate(value):
raise TypeError("Given value is not a number")
super().__setattr__(key, value)
inr = 5
flt = 8.5
com = 20j
real = RealContext()
real.inr = inr
print(real.inr)
real.flt = flt
print(real.flt)
try:
real.com = com
except TypeError as e:
print(com, repr(e))
comp = ComplexContext()
comp.com = com
print(comp.com)
try:
comp.inr = 20
except TypeError as e:
print(inr, repr(e))
num = NumberContext()
num.inr = inr
num.flt = flt
num.com = com
print(num.inr, num.flt, num.com)
| [
"lekskeks@yahoo.de"
] | lekskeks@yahoo.de |
eca1497d1126c404237da5f7a818203d6edaf519 | 9bf4d08c786de8c1bd2a737aa4d8cdcd7ff02c42 | /minecraftcodex/database/migrations/0004_auto__add_jarfile.py | f2f1e16e4c708a6ee8c3a5d371e06807a1aa4216 | [] | no_license | fmartingr/minecraftcodex | 3e7d626b4c70bb80796de7b5714b33311c3c5949 | 94d51dac790a30d16d9cd4198bfd514c2242db2e | refs/heads/master | 2020-04-06T07:13:18.043455 | 2013-09-17T14:13:49 | 2013-09-17T14:13:49 | 12,439,100 | 0 | 0 | null | 2019-03-06T09:04:58 | 2013-08-28T16:41:38 | CSS | UTF-8 | Python | false | false | 2,437 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'JarFile'
db.create_table(u'database_jarfile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('version', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['database.Version'])),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
))
db.send_create_signal('database', ['JarFile'])
def backwards(self, orm):
# Deleting model 'JarFile'
db.delete_table(u'database_jarfile')
models = {
'database.jarfile': {
'Meta': {'object_name': 'JarFile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['database.Version']"})
},
'database.mod': {
'Meta': {'ordering': "['name']", 'object_name': 'Mod'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'database.version': {
'Meta': {'ordering': "['date']", 'object_name': 'Version'},
'changelog': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mod': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['database.Mod']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'release'", 'max_length': '10', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version_number': ('django.db.models.fields.CharField', [], {'default': "'0.1.0'", 'max_length': '256'})
}
}
complete_apps = ['database'] | [
"fmartin@bytepix.com"
] | fmartin@bytepix.com |
43df39f468035492466b6495a11e168aee5b6eac | eaf921d22d1d42d70b5f49d8f97f42e27ad5c16f | /Chap01/Liste/dico_param.py | e4005fcda7d0121daa7eab24dfb31e5bd1debe77 | [] | no_license | atastet/Python_openclassroom | 0f3ff87c44527edde4722a311bf3bbece123edfd | 37992f3eb8937b7480aedbbd83152451eaa8c6b8 | refs/heads/master | 2020-03-19T13:48:26.439159 | 2019-01-30T20:25:30 | 2019-01-30T20:25:30 | 136,595,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | #!/usr/bin/python3.4
# -*-coding:Utf-8
def fonction_inconnue(*en_liste, **en_dico):
print("J'ai reçu : {} en param et {} en param nommés".format(en_liste, en_dico))
fonction_inconnue(1, 2, 3, Zizou = "Goal", Barthez = "Arret")
| [
"anthonytastet@macbook-pro-de-myriam-2.home"
] | anthonytastet@macbook-pro-de-myriam-2.home |
a6c6cd826787dbeadae49c7e749dae1d3b6a816e | a0c838b8ed5e5549edb6cebc46b1160d2bc6c6f7 | /jupyter_execute/stubs/qiskit.circuit.library.PolynomialPauliRotations.draw.py | 23a9d1eb1877428e3812792a69dd62d10a1dd459 | [] | no_license | NatchapolColab/qiskit-translations | 2afd7d2a2b70ded45675a837935c1126eb7e701d | 63c7b13ee4ed2d31bded0b6dcba875371b18f6e8 | refs/heads/main | 2023-08-27T14:58:20.017314 | 2021-11-09T19:35:41 | 2021-11-09T19:35:41 | 427,694,754 | 0 | 0 | null | 2021-11-13T14:58:46 | 2021-11-13T14:58:46 | null | UTF-8 | Python | false | false | 334 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.tools.visualization import circuit_drawer
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.measure(q, c)
qc.draw(output='mpl', style={'backgroundcolor': '#EEEEEE'})
| [
"soolu.elto@gmail.com"
] | soolu.elto@gmail.com |
041a250546f744135c6f4d088881c3db2b965d8d | 255a2a56f8e14a23b33827147080e758184509dc | /using_csv_module.py | 8934755746ed749d55f09098bb620f94f8add8e1 | [] | no_license | catlaughing/Data-Wrangling-with-Python | d8a2455557a9b8821570c2348890111da7ffd5d5 | 2e5e1f8fb8f42fa76af2269e9c204948f4dd2904 | refs/heads/master | 2020-04-14T10:02:51.472897 | 2019-01-02T00:25:23 | 2019-01-02T00:25:23 | 163,776,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | #!/usr/bin/env python
"""
Your task is to process the supplied file and use the csv module to extract data from it.
The data comes from NREL (National Renewable Energy Laboratory) website. Each file
contains information from one meteorological station, in particular - about amount of
solar and wind energy for each hour of day.
Note that the first line of the datafile is neither data entry, nor header. It is a line
describing the data source. You should extract the name of the station from it.
The data should be returned as a list of lists (not dictionaries).
You can use the csv modules "reader" method to get data in such format.
Another useful method is next() - to get the next line from the iterator.
You should only change the parse_file function.
"""
import csv
import os
DATADIR = ""
DATAFILE = "745090.csv"
def parse_file(datafile):
name = ""
data = []
with open(datafile,'rb') as f:
name = f.next()
f.next()
reader = csv.reader(f)
for i in reader:
data.append(i)
# Do not change the line below
name = name.split('"')
name = name[1]
return (name, data)
def test():
datafile = os.path.join(DATADIR, DATAFILE)
name, data = parse_file(datafile)
# assert name == "MOUNTAIN VIEW MOFFETT FLD NAS"
# assert data[0][1] == "01:00"
# assert data[2][0] == "01/01/2005"
# assert data[2][5] == "2"
if __name__ == "__main__":
test()
| [
"shadieqcool@gmail.com"
] | shadieqcool@gmail.com |
32ce329cb064312e79c1a68a67a632992fb41fd8 | 5d30adb04a89e227412af50ed3a8bdb614bf33b5 | /project_app/migrations/0020_auto_20181108_1133.py | ee8a8079a16f29c3b795e8ffa50bc7fabb65fb9c | [] | no_license | bladejun/IHAQ | 13c8cffc6fd9c6e1eb8ba8412f055df22f477567 | f04707d30ea88abb4f8acab89046298f8d848b76 | refs/heads/master | 2020-04-10T01:25:19.355786 | 2018-12-06T18:36:46 | 2018-12-06T18:36:46 | 160,715,410 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | # Generated by Django 2.0.1 on 2018-11-08 02:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_app', '0019_auto_20181108_1131'),
]
operations = [
migrations.RenameField(
model_name='classnode',
old_name='class_code',
new_name='class_id',
),
migrations.AlterField(
model_name='chatnode',
name='created_date',
field=models.CharField(default='2018-11-08 11:33:05', max_length=50),
),
]
| [
"yyj940@naver.com"
] | yyj940@naver.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.