blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4ee5adc760fb98dc1ad8b0d45a0f91d8a109654b | 4dc4659d77107a58cc57061c455728c39c1c0b99 | /python/leap/leap.py | b9394c7549afa1240f1c3beeec3ea9e36d4a22bd | [] | no_license | dbjennings/exercism | 1f9905b6c8e0f8889c1448e4c178c9066b53ece3 | f9e86cb7f742d13256a6bea1b51d6f9d9f4b19a3 | refs/heads/main | 2023-03-11T13:54:15.725797 | 2021-02-27T16:38:32 | 2021-02-27T16:38:32 | 333,178,736 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | def leap_year(year: int) -> bool:
return True if (year%4==0 and not year%100==0) or year%400==0 else False
| [
"dbjennings@gmail.com"
] | dbjennings@gmail.com |
6054154ce67ea761c4681748339567f9bb5858b2 | d71e4a1903c797365278aba561b266c897a09bf5 | /chat/migrations/0001_initial.py | 2b8ac9a170fe2d3c5606d27a61c3e3f8821271ad | [] | no_license | sprajosh/ping-me | 68127fbd6354753ab08297f331715789c3a72376 | ea5c3e7ded8e25d3aa8ff05594a8bf1c78ecf9c6 | refs/heads/master | 2022-09-07T08:33:44.239854 | 2020-05-18T11:26:42 | 2020-05-18T11:26:42 | 262,297,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | # Generated by Django 3.0.6 on 2020-05-16 10:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('friends', models.ManyToManyField(blank=True, related_name='_contact_friends_+', to='chat.Contact')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='friends', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='chat.Contact')),
],
),
migrations.CreateModel(
name='ChatRoom',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room', models.CharField(max_length=100)),
('messages', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='chat.Message')),
('participants', models.ManyToManyField(blank=True, related_name='chats', to='chat.Contact')),
],
),
]
| [
"siddharth.prajosh@belong.co"
] | siddharth.prajosh@belong.co |
0f124d52fda3708bea9657d229b2ee1e1952a80a | 492693e586c0beb1a38344f10c7366c4739bf062 | /syldb/parser/__init__.py | 2887bc0b275e461ed8ad4c449ff49b3a80e9e3c1 | [] | no_license | qihao123/my_database | 6794bd95bf3865616e1611117dcda702ffdc3019 | 8ae0b4e1a1efaf2a4f9442ec193203580bbd9f6c | refs/heads/master | 2022-12-14T00:57:29.043673 | 2020-09-23T00:45:12 | 2020-09-23T00:45:12 | 297,696,222 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,183 | py | import re
from syldb.case import *
class SQLParser:
def __init__(self):
#
self.__pattern_map = {
'SELECT': r'(SELECT|select) (.*) (FROM|from) (.*)',
'UPDATE': r'(UPDATE|update) (.*) (SET|set) (.*)',
'INSERT': r'(INSERT|insert) (INTO|into) (.*)\((.*)\) (VALUES|values)\((.*)\)'
}
self.__action_map = {
'SELECT': self.__select,
'UPDATE': self.__update,
'DELETE': self.__delete,
'INSERT': self.__insert,
'USE': self.__use,
'EXIT': self.__exit,
'QUIT': self.__exit,
'SHOW': self.__show,
'DROP': self.__drop
}
self.SYMBOL_MAP = {
'IN': InCase,
'NOT_IN': NotInCase,
'>': GreaterCase,
'<': LessCase,
'=': IsCase,
'!=': IsNotCase,
'>=': GAECase,
'<=': LAECase,
'LIKE': LikeCase,
'RANGE': RangeCase
}
# 处理掉空格
def __filter_space(self, obj):
ret = []
for x in obj:
if x.strip() == '' or x.strip() == 'AND':
continue
ret.append(x)
return ret
# 解析语句
def parse(self, statement):
tmp_s = statement
# 如果语句中有 where 操作符,
if 'where' in statement:
statement = statement.split("where")
else:
statement = statement.split("WHERE")
# 基础的 SQL 语句都是空格隔开关键字,此处使用空格分隔 SQL 语句
base_statement = self.__filter_space(statement[0].split(" "))
# SQL 语句一般由最少三个关键字组成,这里设定长度小于 2 时,又非退出等命令,则为错误语法
if len(base_statement) < 2 and base_statement[0] not in ['exit', 'quit']:
raise Exception('Syntax Error for: %s' % tmp_s)
# 在定义字典 __action_map 时,字典的键使用的是大写字符,此处转换为大写格式
action_type = base_statement[0].upper()
if action_type not in self.__action_map:
raise Exception('Syntax Error for: %s' % tmp_s)
# 根据字典得到对应的值
action = self.__action_map[action_type](base_statement)
if action is None or 'type' not in action:
raise Exception('Syntax Error for: %s' % tmp_s)
action['conditions'] = {}
conditions = None
if len(statement) == 2:
conditions = self.__filter_space(statement[1].split(" "))
if conditions:
for index in range(0, len(conditions), 3):
field = conditions[index]
symbol = conditions[index + 1].upper()
condition = conditions[index + 2]
if symbol == 'RANGE':
condition_tmp = condition.replace("(", '').replace(")", '').split(",")
start = condition_tmp[0]
end = condition_tmp[1]
case = self.SYMBOL_MAP[symbol](start, end)
elif symbol == 'IN' or symbol == 'NOT_IN':
condition_tmp = condition.replace("(", '').replace(")", '').replace(" ", '').split(",")
condition = condition_tmp
case = self.SYMBOL_MAP[symbol](condition)
else:
case = self.SYMBOL_MAP[symbol](condition)
action['conditions'][field] = case
return action
def __get_comp(self, action):
return re.compile(self.__pattern_map[action])
# 查询
def __select(self, statement):
comp = self.__get_comp('SELECT')
ret = comp.findall(" ".join(statement))
if ret and len(ret[0]) == 4:
fields = ret[0][1]
table = ret[0][3]
if fields != '*':
fields = [field.strip() for field in fields.split(",")]
return {
'type': 'search',
'fields': fields,
'table': table
}
return None
# 更新
def __update(self, statement):
statement = ' '.join(statement)
comp = self.__get_comp('UPDATE')
ret = comp.findall(statement)
if ret and len(ret[0]) == 4:
data = {
'type': 'update',
'table': ret[0][1],
'data': {}
}
set_statement = ret[0][3].split(",")
for s in set_statement:
s = s.split("=")
field = s[0].strip()
value = s[1].strip()
if "'" in value or '"' in value:
value = value.replace('"', '').replace("'", '').strip()
else:
try:
value = int(value.strip())
except:
return None
data['data'][field] = value
return data
return None
# 删除
def __delete(self, statement):
return {
'type': 'delete',
'table': statement[2]
}
# 插入
def __insert(self, statement):
comp = self.__get_comp('INSERT')
ret = comp.findall(" ".join(statement))
if ret and len(ret[0]) == 6:
ret_tmp = ret[0]
data = {
'type': 'insert',
'table': ret_tmp[2],
'data': {}
}
fields = ret_tmp[3].split(",")
values = ret_tmp[5].split(",")
for i in range(0, len(fields)):
field = fields[i]
value = values[i]
if "'" in value or '"' in value:
value = value.replace('"', '').replace("'", '').strip()
else:
try:
value = int(value.strip())
except:
return None
data['data'][field] = value
return data
return None
# 选择使用的数据库
def __use(self, statement):
return {
'type': 'use',
'database': statement[1]
}
# 退出
def __exit(self, _):
return {
'type': 'exit'
}
# 查看数据库列表或数据表 列表
def __show(self, statement):
kind = statement[1]
if kind.upper() == 'DATABASES':
return {
'type': 'show',
'kind': 'databases'
}
if kind.upper() == 'TABLES':
return {
'type': 'show',
'kind': 'tables'
}
# 删除数据库或数据表
def __drop(self, statement):
kind = statement[1]
if kind.upper() == 'DATABASE':
return {
'type': 'drop',
'kind': 'database',
'name': statement[2]
}
if kind.upper() == 'TABLE':
return {
'type': 'drop',
'kind': 'table',
'name': statement[2]
} | [
"1047355811@qq.com"
] | 1047355811@qq.com |
634608758f1e4decc94b0dc35144bfd9eade1a4d | 58830432068a820ccf391c9cba715e2f10ae1bd0 | /status_codes.py | 90d71d5538c43af4cb5e77a70006a694a349c906 | [
"Apache-2.0"
] | permissive | Imafikus/petnica-api-workshop | 1e1d5d6a67778482a679c2f9731a0691e9cba368 | 34c14b8e444007fcfbece8225a22916cf0a32940 | refs/heads/master | 2023-08-20T17:42:23.532738 | 2021-10-23T17:08:54 | 2021-10-23T17:08:54 | 419,417,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | OK = 200
NO_CONTENT = 204
BAD_REQUEST = 400
NOT_FOUND = 404
SERVICE_UNAVAILABLE = 503
INTERNAL_SERVER_ERROR = 500 | [
"aleksatesicteske@gmail.com"
] | aleksatesicteske@gmail.com |
0bcd207f5832badf32d6c99d3f945855f3adf92c | 2913a762605296fd4b51b9b1c01516e2d7cc73ce | /tasks/cephfs/test_readahead.py | 39f4fb88ce14d9da50d7fe8de9e39ee015046829 | [] | no_license | vishalkanaujia/ceph-qa-suite | 7ac70f9e9d494f1901716af1cc98f8e86ac76a58 | 02e7e8395b043355900067c375f982c0fd14630c | refs/heads/master | 2020-12-25T21:01:33.061979 | 2016-12-07T17:18:42 | 2016-12-07T17:18:42 | 62,205,524 | 0 | 0 | null | 2016-06-29T07:32:03 | 2016-06-29T07:32:03 | null | UTF-8 | Python | false | false | 1,113 | py | import logging
from tasks.cephfs.cephfs_test_case import CephFSTestCase
log = logging.getLogger(__name__)
class TestReadahead(CephFSTestCase):
def test_flush(self):
# Create 32MB file
self.mount_a.run_shell(["dd", "if=/dev/urandom", "of=foo", "bs=1M", "count=32"])
# Unmount and remount the client to flush cache
self.mount_a.umount_wait()
self.mount_a.mount()
self.mount_a.wait_until_mounted()
initial_op_r = self.mount_a.admin_socket(['perf', 'dump', 'objecter'])['objecter']['op_r']
self.mount_a.run_shell(["dd", "if=foo", "of=/dev/null", "bs=128k", "count=32"])
op_r = self.mount_a.admin_socket(['perf', 'dump', 'objecter'])['objecter']['op_r']
assert op_r >= initial_op_r
op_r -= initial_op_r
log.info("read operations: {0}".format(op_r))
# with exponentially increasing readahead, we should see fewer than 10 operations
# but this test simply checks if the client is doing a remote read for each local read
if op_r >= 32:
raise RuntimeError("readahead not working")
| [
"batrick@batbytes.com"
] | batrick@batbytes.com |
5c6a2a1f780be7309e047a5135b710121c1f09a1 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2008/programming/languages/python/pysvn/actions.py | 1c2d0e6973941091eee66958f57fdd2fde4e9402 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import shelltools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "pysvn-%s/Source" % get.srcVERSION()
def setup():
shelltools.system("python setup.py configure")
def build():
autotools.make()
def install():
pisitools.insinto("/usr/lib/%s/site-packages/pysvn" % get.curPYTHON(), "pysvn/__init__.py")
pisitools.insinto("/usr/lib/%s/site-packages/pysvn" % get.curPYTHON(), "pysvn/_pysvn_*.so")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
661db321fd4b4f16b3d1e17e47034e859493c1e6 | a16287fe627227f1e9e26bce5e4cf75b3b55c9ce | /profiles_project/profiles_api/urls.py | ec08b223a35a31ce422889f2b23547e5db1f21f9 | [] | no_license | saif43/profiles-rest-api | c825c4547fe18f3a4d90ae6fbc770aa9fcd07b1d | 79c91a08bfba9d5c4334b87d4b0aebe5d3508b5f | refs/heads/master | 2023-08-06T05:55:39.225674 | 2020-08-26T06:26:49 | 2020-08-26T06:26:49 | 260,665,029 | 0 | 0 | null | 2021-09-22T19:15:46 | 2020-05-02T10:39:55 | Python | UTF-8 | Python | false | false | 507 | py | from django.urls import path, include
from profiles_api import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register("hello-viewset", views.HelloViewSet, basename="hello-viewset")
router.register("profile", views.UserProfileViewSet)
router.register("feed", views.ProfileFeedItemView)
urlpatterns = [
path("hello-view/", views.HelloApiView.as_view()),
path("", include(router.urls)),
path("login/", views.UserLoginApiView.as_view()),
]
| [
"saif.ahmed.anik.0@gmail.com"
] | saif.ahmed.anik.0@gmail.com |
5f7af2b6451845d02a33e1908e91626ca9eb3e6d | f6de15dd01a3e514afb66839856126026b423fd0 | /UTS 2/Letter.py | 2122302310fd4d6651fa64402bc5b3087e9251ce | [] | no_license | NizanHulq/Kuliah-Python | 918e3ab9a72cbabaae6f38c5fea004641926c8b6 | f0cc2abeecc497da2a03cf18408252cb636e89fc | refs/heads/main | 2023-08-21T17:48:51.661188 | 2021-10-08T16:03:37 | 2021-10-08T16:03:37 | 415,047,439 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | n = int(input())
push = input().split()
hasil = []
index = 0
for i in range(n):
if hasil == []:
hasil.append(push[i])
elif len(hasil)%2 != 0:
hasil.insert(index,push[i])
index += 1
elif len(hasil)%2 == 0:
hasil.insert(i//2,push[i])
print(" ".join(hasil))
| [
"nizandiaulhaq@gmail.com"
] | nizandiaulhaq@gmail.com |
d9eb397cfc92c3f8d261e879e730c250218f9b2b | d3908fc3baeb65ad65b422e99416ce4114ad4357 | /Prediccion/main.py | f100cb87a407891f2d04afecc798b290f253fa17 | [] | no_license | Omar97perez/TrabajoFinalAnalisisDeDatos | 4adfbfc8b8975462bed67a726005a405bd775a19 | 816e170adf995db9d88574c83c99f3f8e31d453e | refs/heads/master | 2022-10-11T02:09:10.254520 | 2020-06-11T17:30:14 | 2020-06-11T17:30:14 | 271,036,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,800 | py | import matplotlib.pyplot as plt
import pandas as pd
import StrategyFile as sf
import sys
import string
import os
import geopandas as gpd
import numpy as np
from sklearn import datasets, linear_model
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_predict, train_test_split
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn import model_selection
from pandas.plotting import scatter_matrix
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets import make_blobs
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from scipy.cluster.hierarchy import dendrogram
from sklearn.datasets import load_iris
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
from time import time
pedirParametros = int(sys.argv[2])
#Cargamos los datos de un fichero
file = sys.argv[1]
fichero = os.path.splitext(file)
nombreFichero = fichero[0]
fichero = fichero[0] + ".csv"
if file.endswith('.csv'):
fileSelected = sf.Csv(file, fichero)
df = fileSelected.collect()
elif file.endswith('.json'):
fileSelected= sf.Json(file, fichero)
df = fileSelected.collect()
elif file.endswith('.xlsx'):
fileSelected= sf.Xlsx(file, fichero)
df = fileSelected.collect()
else:
print("Formato no soportado")
sys.exit()
if(pedirParametros == 1):
algoritmoSeleccionado = int(input('¿Qué algoritmo quiere ejecutar?: \n\t 1. Regresión Lineal. \n\t 2. Árbol de Regresión. \n\t 3. Regresión árbol Aleatorio. \n\t 4. Red Neuronal.\n > '))
columnaSeleccionadaInicial = int(input('¿Qué columna inicial quiere analizar?\n > '))
columnaSeleccionada = int(input('¿Qué columna final quiere analizar?\n > '))
valoresPredecir = input('¿Qué valores tiene para predecir?\n > ')
else:
algoritmoSeleccionado = int(sys.argv[3])
columnaSeleccionadaInicial = int(sys.argv[4])
columnaSeleccionada = int(sys.argv[5])
valoresPredecir = sys.argv[6]
rutaEscribirJson = sys.argv[7]
array = df.values
X = (array[:,columnaSeleccionadaInicial:columnaSeleccionada])
Y = (array[:,columnaSeleccionada])
if algoritmoSeleccionado == 1:
model = LinearRegression()
elif algoritmoSeleccionado == 2:
model = DecisionTreeRegressor()
elif algoritmoSeleccionado == 3:
model = RandomForestRegressor()
elif algoritmoSeleccionado == 4:
model = MLPRegressor()
else:
print("El algoritmo introducido no existe")
sys.exit()
valorSplit = valoresPredecir.split(",")
valorMap = list(map(float, valorSplit))
valoresPredecir = np.array([valorMap])
reg = model.fit(X, Y)
result = reg.predict(valoresPredecir)
validation_size = 0.22
seed = 123
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
kfold = model_selection.KFold(n_splits=10, random_state=seed, shuffle=True)
cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold)
msg = "%s (%f) \n %s (%f)" % ('Predicción:', result, 'Porcentaje de acierto:', cv_results.mean())
model.fit(X_train, Y_train)
predictions = model.predict(X_validation)
fig = plt.figure()
fig.suptitle(msg)
ax = fig.add_subplot(111)
plt.boxplot(cv_results)
ax.set_xticklabels('BR')
if(pedirParametros == 1):
plt.show()
else:
print(nombreFichero)
plt.savefig(nombreFichero) | [
"omarperezznakar@gmail.com"
] | omarperezznakar@gmail.com |
7441a3d4569d7d34932af80949a4ac69d7019d91 | d9eda3d6f14bd35229d25118493a1d8157bdcb8b | /Interview/Code_Snippets/python/ll_add.py | 5c16659ed783acecd1e51e9448723f71f7bd0f00 | [] | no_license | marannan/repo_1 | 070618cafd3762733f9fca11ae866988b2fac5a9 | 4667a2d761423368675bd463c888918d2cdaf828 | refs/heads/master | 2021-01-17T17:45:06.869090 | 2016-07-06T19:52:58 | 2016-07-06T19:52:58 | 47,296,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,366 | py | from ll import *
from ll_reverse import *
def linked_lists_add(ll_1, ll_2):
if ll_1.head == None and ll_2.head == None:
return None
elif ll_1.head != None and ll_2.head == None:
return ll_1
elif ll_1.head == None and ll_2.head != None:
return ll_2
else:
cur_1 = ll_1.head
cur_2 = ll_2.head
ll_added = linked_list()
cur_3 = ll_added.head
carry = 0
sign = 1
while cur_1 and cur_2:
new_data = cur_1.data + cur_2.data + carry
if new_data < 0:
sign = -1
new_data_add = ((new_data * sign) % 10) * sign
carry = ((new_data * sign) / 10) * sign
new_node = node(new_data_add)
if ll_added.head == None:
ll_added.head = new_node
else:
cur_3.next = new_node
cur_3 = new_node
cur_1 = cur_1.next
cur_2 = cur_2.next
while cur_1:
new_data = cur_1.data + carry
if new_data < 0:
sign = -1
new_data_add = ((new_data * sign ) % 10) * sign
carry = ((new_data * sign) / 10) * sign
new_node = node(new_data_add)
cur_3.next = new_node
cur_3 = new_node
cur_1 = cur_1.next
while cur_2:
new_data = cur_2.data + carry
if new_data < 0:
sign = -1
new_data_add = ((new_data * sign) % 10) * sign
carry = ((new_data * sign) / 10) * sign
new_node = node(new_data_add)
cur_3.next = new_node
cur_3 = new_node
cur_2 = cur_2.next
if carry != 0:
new_node = node(carry)
cur_3.next = new_node
return ll_added
#recursive handles negative nos
def linked_lists_add_2(node_1, node_2, carry=0):
if node_1 == None and node_2 == None and carry == 0:
return None
value = carry
if node_1:
value = value + node_1.data
if node_2:
value = value + node_2.data
new_node = node()
if value >= 0:
new_node.data = value % 10
carry = value / 10
else: #handle the negative case
new_node.data = (-1) * ((value * -1) % 10)
carry = (-1) * ((value * -1) / 10)
if node_1 == None:
next_1 = None
else:
next_1 = node_1.next
if node_1 == None:
next_2 = None
else:
next_2 = node_2.next
new_node.next = linked_lists_add_2(next_1, next_2, carry)
return new_node
if __name__ == "__main__":
ll_1 = linked_list()
ll_2 = linked_list()
ll_1.add_nodes([9])
ll_2.add_nodes([-9,-9])
ll_1_rev = linked_list_reverse(ll_1)
ll_2_rev = linked_list_reverse(ll_2)
#ll_1_rev.display_nodes()
#ll_2_rev.display_nodes()
#linked_list_reverse(add_linked_lists(ll_1_rev,ll_2_rev)).display_nodes()
ll_add = linked_list()
ll_add.head = linked_lists_add_2(ll_1_rev.head, ll_2_rev.head, 0)
linked_list_reverse(ll_add).display_nodes()
return
| [
"marannan@wisc.edu"
] | marannan@wisc.edu |
3117e2434b2fceef3242be8a154e06d0d429604c | abf79ee08c9bfefb451806bde829082f0c4b16da | /DatabaseFiller/filler.py | d1c3fe75ac912da770add08dc9dcdd544ce5d900 | [] | no_license | gbarboteau/OFFSubstitutes | 290ce5c5be11ea62ac389e61bb3b5a41accd80b5 | f1089487dcae99a2fd6e5edb79a9f7f8404404c3 | refs/heads/master | 2021-02-03T21:41:29.676403 | 2020-03-28T13:18:43 | 2020-03-28T13:18:43 | 243,547,924 | 0 | 0 | null | 2020-03-05T12:26:53 | 2020-02-27T15:20:38 | Python | UTF-8 | Python | false | false | 1,523 | py | """Fill the database with the data collected by
datacollecter.py
"""
import mysql.connector
class Filler:
"""Adds a set of data in the openfoodfacts
database. Needs to be authentificated.
"""
def __init__(self, my_data, my_auth):
"""Create an instance of Filler"""
self.user = my_auth.user
self.password = my_auth.password
self.my_data = my_data
def put_it_in_tables(self):
"""Put all the data given at the instance creation
into the database.
"""
my_connection = mysql.connector.connect(user=self.user, password=self.password, database='openfoodfacts')
cursor = my_connection.cursor(buffered=True)
for i in self.my_data:
prod_name = i['product_name']
try:
add_aliment = ("INSERT INTO aliment "
"(product_name, product_description, barcode, nutritional_score, stores, product_category) "
"VALUES (%s, %s, %s, %s, %s, %s)")
data_aliment = (i['product_name'].replace("'", "''"), i['product_description'].replace("'", "''"), i['barcode'].replace("'", "''"), i['nutritional_score'].replace("'", "''"), i['stores'].replace("'", "''"), i['product_category'].replace("'", "''"))
cursor.execute(add_aliment, data_aliment)
except mysql.connector.IntegrityError:
pass
my_connection.commit()
cursor.close()
my_connection.close()
print("ok c'est fait")
| [
"g.barboteau@gmail.com"
] | g.barboteau@gmail.com |
5368330febedbc4e6056fc6cc5d8026417fa248e | e4677de1b20f989cc26a564c770672eb029bf2d1 | /PythonProject/MultichannelBiosignalEmotionRecognition/model_1th/new_RNN/data.py | eb310350d68d8467e8a24ef9543d473104f49970 | [] | no_license | muyi110/CodeRepository | 7def0caf1a77fbe6d9eb37ab7b63401d9234bbd5 | 95dd4889734174c1895928f9a1da40bae7b2f046 | refs/heads/master | 2018-12-08T17:10:06.923526 | 2018-12-06T07:45:40 | 2018-12-06T07:45:40 | 117,515,704 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,686 | py | # -*- coding:UTF-8 -*-
import os
import math
import numpy as np
import features
from features import data_filter, differential_entropy
SAMPLES_PATH = '../../data_analysis/samples/'
params = (features.b_theta, features.a_theta, features.b_alpha, features.a_alpha,
features.b_beta, features.a_beta, features.b_gamma, features.a_gamma)
def get_samples_data(path, people_list, trial_list, windows=4, overlapping=3):
'''
windows: 划分的时间窗口长度
overlapping: 时间窗口的重叠长度
'''
samples_dirs = os.listdir(path) # 目录的顺序是随机的
samples_dirs = sorted(samples_dirs)
file_path = [os.path.join(path, samples_dirs[i]) for i in range(len(samples_dirs))]
datas = []
labels = []
for people in people_list:
for trial in trial_list:
data = np.loadtxt(file_path[people]+'/trial_'+str(trial+1)+".csv", delimiter=',',
skiprows=0, dtype=np.float32)
data = data[:32,128*3:] # 只是提取后 60S 的 EEG 数据
# # 各个通道数据归一化处理(0-1归一化)
# for i in range(data.shape[0]):
# _min = data[i].min()
# _max = data[i].max()
# data[i] = (data[i] - _min) / (_max - _min)
# # data[i] = (data[i] - data[i].mean()) / data[i].std()
# 获取对应的 labels
labels_value = np.loadtxt(file_path[people]+'/label.csv', delimiter=',',
skiprows=0, dtype=np.float32)[trial,:2]
if labels_value[0] > 5. and labels_value[1] > 5.:
label = 1 # 第一象限
elif labels_value[0] >= 5. and labels_value[1] <= 5.:
label = 2 # 第二象限
elif labels_value[0] < 5. and labels_value[1] <= 5.:
label = 3 # 第三象限
elif labels_value[0] <= 5. and labels_value[1] > 5.:
label = 4 # 第四象限
# 将 60S 的数据按照时间窗口大小进行分割(data.shape=(32, 7680))
step = windows - overlapping # 每次移动的步长
iterator_num = int((60 - windows) / step + 1) # 划分时间片段总个数
for iterator in range(iterator_num):
data_slice = data[:,128*(iterator*step):128*(iterator*step+windows)]
datas.append(data_slice)
labels.append(label)
print("Get sample data success!")
print("Total sample number is: ", len(labels))
print("label 1: {} label 2: {} label 3: {} label 4: {}.".format(np.sum(np.array(labels)==1),
np.sum(np.array(labels)==2),
np.sum(np.array(labels)==3),
np.sum(np.array(labels)==4)))
return (datas, labels)
def index_generator(num_examples, batch_size, seed=0):
'''此函数用于生成 batch 的索引'''
np.random.seed(seed)
permutation = list(np.random.permutation(num_examples))
num_complete_minibatches = math.floor(num_examples/batch_size)
for k in range(0, num_complete_minibatches):
X_batch_index = permutation[k*batch_size:(k+1)*batch_size]
y_batch_index = permutation[k*batch_size:(k+1)*batch_size]
yield (X_batch_index, y_batch_index)
if num_examples % batch_size != 0:
X_batch_index = permutation[num_complete_minibatches*batch_size:num_examples]
y_batch_index = permutation[num_complete_minibatches*batch_size:num_examples]
yield (X_batch_index, y_batch_index)
def read_data(path=SAMPLES_PATH, people_list = list(range(0,32)), trial_list=list(range(0,40)), windows=4, overlapping=3, raw_data=False, sample_flag=None):
# datas 和 labels 都是 list. datas 中的每一项是 shape=(32, 128*windows) 的数组
datas, labels = get_samples_data(path, people_list, trial_list, windows, overlapping)
datas_result = []
for data in datas:
data_list = []
if not raw_data:
# 数据预处理,提取特征(每一个样本处理后的结果是 numpy.narray 且 shape=(features, seq_length))
for window in range(windows): # 1S 为一个单位提取特征
features_list = []
for i in range(32): # 依次处理 32 通道的 EEG 信号
X = data[i, 128*(window):128*((window+1))]
theta, alpha, beta, gamma = data_filter(X, params) # 获取各个频率段的数据
features_list.append(differential_entropy(theta))
features_list.append(differential_entropy(alpha))
features_list.append(differential_entropy(beta))
features_list.append(differential_entropy(gamma))
_max = max(features_list)
_min = min(features_list)
data_list.append((np.array(features_list).reshape(-1, 1) - _min)/(_max - _min)) # 0-1化处理
datas_result.append(np.c_[tuple(data_list)]) # shape=(features, seq_length)
if(raw_data):
datas_result = datas
del datas # 释放内存
assert len(datas_result) == len(labels)
if not raw_data and (sample_flag==True):
np.save("./data_set/train_datas_features", datas_result)
np.save("./data_set/train_label_features", labels)
if not raw_data and (sample_flag==False):
np.save("./data_set/test_datas_features", datas_result)
np.save("./data_set/test_label_features", labels)
return (datas_result, labels)
| [
"ylqing5470@126.com"
] | ylqing5470@126.com |
0bdb8f6c369ce6db519789ab50cda46d69876d3f | 6e158a54409937515b14676730adfadfd457d4ae | /shared/cp_utils.py | b5c5cd38f32869dc9aab5b9304d2b2472c563152 | [] | no_license | Tjstretchalot/machinelearning | e2b277efd99f6e45005cb92a0cc17e90bf7d37e4 | 5a3b17c49211a63f71cdf40ca35e00a3af4b198a | refs/heads/master | 2020-05-02T09:25:25.032430 | 2019-07-25T14:37:43 | 2019-07-25T14:37:43 | 177,871,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | """Utils regarding cloning / copying things"""
import torch
def copy_linear(lyr: torch.nn.Linear) -> torch.nn.Linear:
cp_lyr = torch.nn.Linear(lyr.in_features, lyr.out_features, lyr.bias is not None)
cp_lyr.weight.data[:] = lyr.weight.data
if lyr.bias is not None:
cp_lyr.bias.data[:] = lyr.bias.data
return cp_lyr
| [
"mtimothy984@gmail.com"
] | mtimothy984@gmail.com |
240cc765f95d16aa2871d03c16aa123fd07e1e3b | a24375133a6e043610ea33e7b7b80422a3a74361 | /djangoPolymorphicTestcase/wsgi.py | d97c1aa6ed4064e98645395b0cc6bf0cf22c0805 | [] | no_license | nmoskopp/djangoPolymorphicTestcase | f6421f10aa9b7dfb155fa74eef55d71a2d875bd6 | 9daca92ab4d956be50f9cbdddbe222fc01c5d2ec | refs/heads/master | 2021-01-20T02:01:56.008656 | 2017-04-25T13:09:08 | 2017-04-25T13:10:05 | 89,362,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | """
WSGI config for djangoPolymorphicTestcase project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoPolymorphicTestcase.settings")
application = get_wsgi_application()
| [
"nils.moskopp@grandcentrix.net"
] | nils.moskopp@grandcentrix.net |
b266cbeac957a139e15179334510fb6c365e7313 | 78a8321f69fbf590880de9e1570c88f87a4ca83f | /terVer5/main.py | e04990aed60695f5359e4f0a2a8a556c75192c81 | [] | no_license | Sapfir0/terVer | 0129a966203e0a89c4d0422673edcce58642b5bb | c074c00d853e8a87d5a4fc97341ba80aecd608a7 | refs/heads/master | 2020-06-21T13:28:14.341909 | 2019-07-17T21:34:32 | 2019-07-17T21:34:32 | 197,466,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,321 | py |
import json
import math as mt
import matplotlib.pyplot as plt
import numpy as np
# возвращает массив данных из файла
def readDataFromFile(filename):
file = open(filename, 'r')
arr = json.loads(file.read())
file.close()
return arr
def showVariationRow(xi, ni, wi):
print('Вариационный ряд:')
print('\t|\tXi\t|\tni\t|\twi\t|')
for i in range(len(xi)):
print('', xi[i], ni[i], "%.4f" % wi[i], '', sep='\t|\t')
# расчет повторов
def counter(array):
result = {i: array.count(i) for i in array}
return result
#
def probability(kortez):
arr = np.array([kortez[i] for i in kortez])
arr = arr / arr.sum()
return arr.tolist()
def createrRow(variant):
rrr = counter(variant)
wi = probability(rrr)
xi = [i for i in rrr]
ni = [rrr[i] for i in rrr]
return xi, ni, wi
# Poligon
def showPoligon(x, y, title = 'Полигон'):
plt.ylim([-0.01, max(y)+(max(y)/10)])
plt.xlim([min(x)-1, max(x)+2])
plt.plot(x, y)
plt.title(title)
# plt.title(r'$p(x)=\frac{1}{\sqrt{2\pi\sigma^{2}}}e^{-\frac{(x-\mu)^{2}}{2\sigma^{2}}}$',
# fontsize = 20, # увеличиваем размер
# pad = 20) # приподнимаем над "Axes"
plt.grid()
plt.show()
def showRaspGraph(x, y, title = 'Эмпирическая функция распределения'):
arrowstyle = {
'arrowstyle': "->"
}
plt.ylim([-0.01, 1.1])
plt.xlim([min(x)-1, max(x)+2])
# лыл
head_w = 0.01
plt.hlines(y=0, xmin=-56465, xmax=min(x), linewidth=1.5, color='000000')
buff = 0.0
for i in range(0, len(y)-1):
plt.arrow(x[i+1], y[i]+buff, -(x[i+1]-x[i]), 0, head_width=head_w)
buff += y[i]
# в идеале здесь должна быть линия уходящая в бесконечность
dxinf = 10000
plt.arrow(max(x)+dxinf, 1, -dxinf, 0, head_width=head_w)
plt.title(title)
plt.grid()
plt.show()
def showFunctionRaspr(x, y):
print("Эмпирическая функция распределения: ")
buff=y[0]
print("F*(x) = ", "%.4f" % 0.0, " При", "x <", x[0])
for i in range(1, len(y)):
print("F*(x) = ", "%.4f" % buff, " При", x[i - 1], "< x <=", x[i])
buff += y[i]
print("F*(x) = ", "%.4f" % buff, " При", "x >", x[len(y) - 1])
showRaspGraph(x, y)
def firstTask():
filename = str(input('Введите название файла: '))
variant = readDataFromFile(filename)
variant = sorted(variant)
xi, ni, wi = createrRow(variant)
showVariationRow(xi, ni, wi)
# text = colored('Hello, World!', 'red', attrs=['reverse', 'blink'])
# print(text)
# show graphs
showPoligon(xi, ni)
showPoligon(xi, wi)
showFunctionRaspr(xi, wi)
xi = np.array(xi)
ni = np.array(ni)
wi = np.array(wi)
selectiveAverage = (xi * ni).sum() / ni.sum()
print("Выборочное среднее, найденное по формуле xв = (x1*n1 + x2*n2 + ... + xk*nk)/n :", selectiveAverage)
# find moda
moda = xi[wi.tolist().index(max(wi))]
print("Мода выборки (варианта с наибольшей частотой появления) : ", moda)
# find dispersion
dispersion = (np.power(xi - selectiveAverage, 2) * ni).sum()/ni.sum()
print("Выборочная дисперсия Dв - это среднее арифметическое квадратов отклонений всех вариант выборки от её средней")
print("Найдена по формуле ((x1 - xв)^2 *n1 + (x2 - xв)^2 *n2 + ... + (xk - xв)^2 *nk)/n : ", dispersion)
S = mt.sqrt(dispersion * ni.sum() / (ni.sum() - 1))
print("Исправленное выборочное среднеквадратичное отклонение S по формуле sqrt(n*Dв/(n-1)) : ", S)
def thirdTask():
intervals = [[], []]
variant = []
# ---
# етот чел сверху смотрит прямо в душу
count = int(input('Число разбиений интервального ряда:'))
# ввод интервалов
for i in range(count):
buff = [float(input('вводи начало интервала: ')),
float(input('вводи конец интервала: '))]
intervals[0].append(buff[0])
intervals[1].append(buff[1])
variant.append((intervals[0][i]+intervals[1][i])/2)
ni = []
for i in range(count):
ni.append(int(input('Введите число появлений значений из ' +
str(i+1) + ' интервального ряда: ')))
xi = np.array(variant)
ni = np.array(ni)
wi = ni / ni.sum()
showVariationRow(xi, ni, wi)
showPoligon(xi, wi)
showFunctionRaspr(xi, wi)
# хызы что дальше
F = np.full(count+3, 0, dtype=float)
variantsForFunc = np.full(count+3, 0, dtype=float)
variantsForFunc[1] = intervals[0][0]
for i in range(0, count):
variantsForFunc[i + 2] = intervals[1][i]
for j in range(i):
F[i + 1] += wi[j]
F[count + 1] = 1
F[count + 2] = 1
variantsForFunc[count + 2]=intervals[1][count - 1] + 5
# for i in range(0, count+3):
# print(variantsForFunc[i], F[i])
showPoligon(variantsForFunc, F)
print("Размах интервального ряда:",
intervals[1][count - 1] - intervals[0][0])
selectiveAverage = (xi * ni).sum()/ni.sum()
print("Выборочное среднее, найденное по формуле xв = (x1*n1 + x2*n2 + ... + xk*nk)/n :", selectiveAverage)
dispersion = (np.power(xi - selectiveAverage, 2) * ni).sum() / ni.sum()
print("Выборочная дисперсия, найденное по формуле xв = ((x1 - xср)*n1 + (x2 - xср)*n2 + ... + (xk - xср)*nk)/n :", dispersion)
def main():
request = int(input('Задание: '))
if request == 1 or request == 2:
firstTask()
elif request == 3:
thirdTask()
else:
print('Задание not found')
main()
input(' жмяк ') | [
"sapfir999999@yandex.ru"
] | sapfir999999@yandex.ru |
3bfc997a57ff5e17026057e870029689210fdfea | 9cef1dc0a6a7b95ceb8a2d892bc39e9a0d15b681 | /tempest/tempest/api/compute/volumes/test_attach_volume.py | 75f9795619a150816bcb9b06e15afc4ca1cd0367 | [
"Apache-2.0"
] | permissive | bopopescu/OpenStack-CVRM-1 | d2a4353cfe6d53634456e43a726698bd705db1db | fc0128258bf7417c6b9e1181d032529efbb08c42 | refs/heads/master | 2022-11-22T01:50:05.586113 | 2015-12-15T07:56:22 | 2015-12-15T07:57:01 | 282,140,514 | 0 | 0 | null | 2020-07-24T06:24:49 | 2020-07-24T06:24:48 | null | UTF-8 | Python | false | false | 5,529 | py | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest import test
CONF = config.CONF
class AttachVolumeTestJSON(base.BaseV2ComputeTest):
def __init__(self, *args, **kwargs):
super(AttachVolumeTestJSON, self).__init__(*args, **kwargs)
self.attachment = None
@classmethod
def resource_setup(cls):
cls.prepare_instance_network()
super(AttachVolumeTestJSON, cls).resource_setup()
cls.device = CONF.compute.volume_device_name
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
def _detach(self, server_id, volume_id):
if self.attachment:
self.servers_client.detach_volume(server_id, volume_id)
self.volumes_client.wait_for_volume_status(volume_id, 'available')
def _delete_volume(self):
# Delete the created Volumes
if self.volume:
self.volumes_client.delete_volume(self.volume['id'])
self.volumes_client.wait_for_resource_deletion(self.volume['id'])
self.volume = None
def _create_and_attach(self):
# Start a server and wait for it to become ready
admin_pass = self.image_ssh_password
_, self.server = self.create_test_server(wait_until='ACTIVE',
adminPass=admin_pass)
# Record addresses so that we can ssh later
_, self.server['addresses'] = (
self.servers_client.list_addresses(self.server['id']))
# Create a volume and wait for it to become ready
_, self.volume = self.volumes_client.create_volume(
1, display_name='test')
self.addCleanup(self._delete_volume)
self.volumes_client.wait_for_volume_status(self.volume['id'],
'available')
# Attach the volume to the server
_, self.attachment = self.servers_client.attach_volume(
self.server['id'],
self.volume['id'],
device='/dev/%s' % self.device)
self.volumes_client.wait_for_volume_status(self.volume['id'], 'in-use')
self.addCleanup(self._detach, self.server['id'], self.volume['id'])
@testtools.skipUnless(CONF.compute.run_ssh, 'SSH required for this test')
@test.attr(type='gate')
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
# the volume remains attached.
self._create_and_attach()
self.servers_client.stop(self.server['id'])
self.servers_client.wait_for_server_status(self.server['id'],
'SHUTOFF')
self.servers_client.start(self.server['id'])
self.servers_client.wait_for_server_status(self.server['id'], 'ACTIVE')
linux_client = remote_client.RemoteClient(self.server,
self.image_ssh_user,
self.server['adminPass'])
partitions = linux_client.get_partitions()
self.assertIn(self.device, partitions)
self._detach(self.server['id'], self.volume['id'])
self.attachment = None
self.servers_client.stop(self.server['id'])
self.servers_client.wait_for_server_status(self.server['id'],
'SHUTOFF')
self.servers_client.start(self.server['id'])
self.servers_client.wait_for_server_status(self.server['id'], 'ACTIVE')
linux_client = remote_client.RemoteClient(self.server,
self.image_ssh_user,
self.server['adminPass'])
partitions = linux_client.get_partitions()
self.assertNotIn(self.device, partitions)
@test.skip_because(bug="1323591", interface="xml")
@test.attr(type='gate')
def test_list_get_volume_attachments(self):
# Create Server, Volume and attach that Volume to Server
self._create_and_attach()
# List Volume attachment of the server
_, body = self.servers_client.list_volume_attachments(
self.server['id'])
self.assertEqual(1, len(body))
self.assertIn(self.attachment, body)
# Get Volume attachment of the server
_, body = self.servers_client.get_volume_attachment(
self.server['id'],
self.attachment['id'])
self.assertEqual(self.server['id'], body['serverId'])
self.assertEqual(self.volume['id'], body['volumeId'])
self.assertEqual(self.attachment['id'], body['id'])
class AttachVolumeTestXML(AttachVolumeTestJSON):
_interface = 'xml'
| [
"zaman.khalid@gmail.com"
] | zaman.khalid@gmail.com |
dae4b6f92d98a0f3170f3be3734c693c2b205b45 | 7b445d3cfd2ce94859a9438ccf0dc6e7867ed75a | /telescope.py | 2555836ef4114db18f0d542f21fa04b797bbd242 | [] | no_license | slegeza/telescope | a420eb893cc76ad43fd3311cc1e5272f373f2382 | b7fd176f44ec05e3b87f1499fb926d9b9767d539 | refs/heads/master | 2020-03-25T19:13:23.251244 | 2018-08-08T22:00:57 | 2018-08-08T22:00:57 | 144,071,430 | 0 | 0 | null | 2019-09-17T16:32:40 | 2018-08-08T21:53:37 | Python | UTF-8 | Python | false | false | 14,556 | py | import time
import math
import jdcal
from math import cos,sin,tan,acos,asin,atan
from datetime import datetime
from IPython.display import clear_output
#Initial conditions
im_still_presenting=True
longitude= 83.6123 #W
latitude= 41.6624 #N
tstep=1/5 #timestep between time calculations
utc_time=datetime.utcnow()
New_T=False #New Target
alpha=0 #angle off meridian
class converted_DMS:
'''For values converted TO DMS from some other unit'''
newd=0
newm=0
news=0
snewd=''
snewm=''
snews=''
class converted_HMS:
'''The same as above but for HMS.'''
newh=0
newm=0
news=0
snewh=''
snewm=''
snews=''
class Coordinates:
lon=83.6123
E=1
W=0
lat=41.6624
N=1
S=0
class current_DEC:
degree=0
minute=0
second=0
sdegree=''
sminute=''
ssecond=''
class current_GST:
'''Current Greenwich Sidereal Time'''
hour=0
minute=0
second=0
shour=''
sminute=''
ssecond=''
class current_HA:
hour=0
minute=0
second=0
shour='0'
sminute='0'
ssecond='0'
class current_LST:
hour=0
minute=0
second=0
shour=''
sminute=''
ssecond=''
class current_RA:
hour=0
minute=0
second=0
shour=current_LST.shour
sminute=current_LST.sminute
ssecond=current_LST.ssecond
class equinox:
'''Defining point where RA is zero (vernal equinox at Greenwich at noon)'''
year=utc_time.year
month=3
day=21
hour=12
minute=0
second=0
class sday:
'''1 solar day=this many sidreal days.'''
year=1.
month=1.
day=1.
hour=23.
minute=56.
second=4.091
class target_DEC:
degree=0
minute=0
second=0
sdegree='0'
sminute='0'
ssecond='0'
class target_RA:
hour=0
minute=0
second=0
shour='0'
sminute='0'
ssecond='0'
class target_temp:
hour=0
minute=0
second=0
shour='0'
sminute='0'
ssecond='0'
degree=0
dminute=0
dsecond=0
dsdegree='0'
dsminute='0'
dssecond='0'
def airmass():
'''Airmass
x=altitude of telescope above horizon.'''
# z=90-altitude()
return
def altitude():
'''Finds angle between telescope and ground.'''
#theta=abs(HMS_Degrees(current_HA)) #theta=decimal degree value for hour angle.
#delta=abs(DMS_Degrees(current_DEC))
#90-theta=the angle between the telescope and the ground
#x=90-abs(asin((sin(latitude)*sin(delta))+(cos(latitude)*cos(delta)*cos(theta))))
#alt=abs(round(x,2))
return
def DecimalYear(x):
'''Converts dates into decimal years'''
year_num=x-datetime(year=x.year, month=1, day=1,hour=0,minute=0,second=0)
year_den=datetime(year=x.year+1, month=1, day=1,hour=0,minute=0,second=0) - datetime(year=x.year, \
month=1, day=1,hour=0,minute=0,second=0)
return x.year + year_num/year_den
def DEC(x,theta):
'''this is useless why did I make this function'''
return 0
def Degrees_DMS(x):
'''Similar to Degrees_DMS, but converts decimal degrees into degress minutes seconds.'''
while x>=360.:
x-=360
dec,int_=math.modf(x)
degree=int(int_)
minute=dec*60
dec,int_=math.modf(minute)
second=dec*60
minute=int(int_)
second=round(second,3)
sdegree=str(degree)
sminute=str(minute)
ssecond=str(second)
converted_DMS.newd=degree
converted_DMS.newm=minute
converted_DMS.news=second
converted_DMS.snewd=sdegree
converted_DMS.snewm=sminute
converted_DMS.snews=ssecond
return #sdegree+' h '+ sminute +' m '+ssecond+'s'
def Degrees_HMS(x):
'''Does the exact opposite of HMS_Degrees.'''
while x>=360.:
x-=360
dec,int_=math.modf(x)
hour=int(int_)
#minute,int_=math.modf(hour)
minute=dec*60
dec,int_=math.modf(minute)
second=dec*60
minute=int(int_)
second=round(second,3)
shour=str(hour)
sminute=str(minute)
ssecond=str(second)
converted_HMS.newh=hour
converted_HMS.newm=minute
converted_HMS.news=second
converted_HMS.snewh=shour
converted_HMS.snewm=sminute
converted_HMS.snews=ssecond
return
def DMS_Degrees(x):
'''Similar to HMS_Degrees, but accepts an argument of degrees minutes seconds and puts it into decimal degrees.'''
degree,minute,second=x.degree,x.minute,x.second
a=degree
b=minute/60.
c=second/3600
return a+b+c
def HA(x):
'''Instantaneous hour angle, or angle from meridian in DMS.
x=alpha=RA in HMS'''
phi=HMS_Degrees(x)
l=HMS_Degrees(current_LST)
phi-=l
Degrees_HMS(phi)
if phi>0:
current_HA.hour=converted_HMS.newh*-1
current_HA.minute=abs(converted_HMS.newm)
current_HA.second=abs(converted_HMS.news)
current_HA.shour='-'+str(converted_HMS.snewh)
current_HA.sminute=str(current_HA.minute)
current_HA.ssecond=str(current_HA.second)
else:
current_HA.hour=converted_HMS.newh*-1
current_HA.minute=converted_HMS.newm*-1
current_HA.second=converted_HMS.news*-1
current_HA.shour='+'+str(current_HA.hour)
current_HA.sminute=str(current_HA.minute)
current_HA.ssecond=str(current_HA.second)
return 0
def HMS_Degrees(x):
'''Converts HMS measurements (sidereal time, RA) to decimal degrees.'''
hour,minute,second=x.hour,x.minute,x.second
a=hour
b=minute/60.
c=second/3600
return a+b+c
def JD(x):
''' Converts UTC time into Julian date
http://129.79.46.40/~foxd/cdrom/musings/formulas/formulas.htm
The above website contains all kinds of useful formulae for
this type of stuff :)'''
return sum(jdcal.gcal2jd(x.year,x.month,x.day))
def LST(x):
'''This gives us Greenwich LST and local LST as a function of longitude.'''
#Greenwich Sidereal Time
#JD @ 0 hours GMT
julian=sum(jdcal.gcal2jd(x.year,x.month,x.day))
#print(julian)
h,m,s=x.hour,x.minute,x.second
ctime=h+m/60.+s/3600. #UTC time in decimal hours
T=(julian-2451545.0)/36525.0
#Current Sidereal Time in Greenwich:
T0=6.697374558+(2400.051336*T)+(0.000025862*(T**2))+(ctime*1.0027379093)
while T0>=24:
T0-=24 #GST in decimal hours
if Coordinates.W==1:
lon=longitude*-1
E=1
W=0
else:
lon=longitude
dec1,int_=math.modf(T0) #int=hours, dec1=frac. of hours
#print(int_,dec1)
hour=int_ #h=value for hour
#print(hour,'hours!')
dec2,int_=math.modf(T0)
#print(dec2,int_)
dec2*=60. #dec2=value for minutes
#print(dec2,'minutes!')
dec3,int_=math.modf(dec2)
dec3*=60 #dec3=value for seconds. NOT an integer.
dec3=round(dec3,3) #rounding off the seconds value
dec2=int(dec2) #rounding off the minutes value. The extra bit was passed to the seconds.
#print(dec3,'seconds!')
temp=hour
hour=int(temp) #rounding off for hours
#Hour, Dec2, and Dec3 at this point are LST at Greenwich in HMS
current_GST.hour=hour
while current_GST.hour>=24:
current_GST.hour-=24
current_GST.minute=dec2
while current_GST.minute>=60.:
current_GST.hour+=1
current_GST.minute-=60
current_GST.second=dec3
while current_GST.second>=60.:
current_GST.minute+=1
current_GST.second-=60
#print(hour,dec2,dec3)
s_hour,s_dec2,s_dec3=str(hour),str(dec2),str(dec3)
current_GST.shour=s_hour+'h '
current_GST.sminute=s_dec2+'m '
current_GST.ssecond=s_dec3+'s '
#Now the class current_GST contains up to date values for the Greenwich LST :)
#Now, for the local LST depending on longitude:
lst=T0-(longitude/15.)
while lst<0:
lst+=24.
#As above:
#print(lst)
dec1,int_=math.modf(lst) #int=hours, dec1=frac. of hours
#print(int_,dec1)
hour=int_ #h=value for hour
#print(hour,'hours!')
dec2,int_=math.modf(lst)
#print(dec2,int_)
dec2*=60. #dec2=value for minutes
#print(dec2,'minutes!')
dec3,int_=math.modf(dec2)
dec3*=60 #dec3=value for seconds. NOT an integer.
dec3=round(dec3,3) #rounding off the seconds value
dec2=int(dec2) #rounding off the minutes value. The extra bit was passed to the seconds.
#print(dec3,'seconds!')
temp=hour
hour=int(temp) #rounding off for hours
current_LST.hour=hour
while current_LST.hour>=24.:
current_LST.hour-=24
current_LST.minute=dec2
while current_LST.minute>=60.:
current_LST.hour+=1
current_LST.minute-=60.
current_LST.second=dec3
while current_LST.second>=60.:
current_LST.minute+=1
currenet_LST.second-=60.
s_hour2,s_dec2,s_dec3=str(hour),str(dec2),str(dec3)
current_LST.shour=s_hour2
current_LST.sminute=s_dec2
current_LST.ssecond=s_dec3
return
def NewTarget():
'''Gets target info from user, and updates hour angle from that.'''
target_RA.hour= int(input('Enter RA hour : '))
while target_RA.hour>=24:
target_RA.hour-=24
target_RA.minute= int(input('Enter RA minute : '))
while target_RA.minute>=60.:
target_RA.hour+=1
target_RA.minute-=60
target_RA.second= float(input('Enter RA second : '))
while target_RA.second>=60:
target_RA.minute+=1
target_RA.second-=60
target_DEC.degree=int(input('Enter DEC degree: '))
while target_DEC.degree>=90:
target_DEC.degree-=90
target_DEC.minute= int(input('Enter DEC minute: '))
while target_DEC.minute>=60.:
target_DEC.degree+=1
target_DEC.minute-=60
target_DEC.second=float(input('Enter DEC second: '))
while target_DEC.second>=60:
target_DEC.minute+=1
target_DEC.second-=60
target_RA.shour,target_RA.sminute,target_RA.ssecond=str(target_RA.hour), \
str(target_RA.minute),str(target_RA.second)
target_DEC.sdegree,target_DEC.sminute,target_DEC.ssecond=str(target_DEC.degree), \
str(target_DEC.minute),str(target_DEC.second)
if target_DEC.degree<-90 or target_DEC.degree>90:
zeroTarget()
IsTracking=False
return
def RA(x,ha):
'''Returns instantaneous RA, which is a function of time and hour angle.
x=current_LST
ha=hour angle in decimal degrees'''
if IsTracking==False:
d=HMS_Degrees(x)+ha
Degrees_HMS(d)
current_RA.hour=converted_HMS.newh
current_RA.minute=converted_HMS.newm
current_RA.second=converted_HMS.news
current_RA.shour=converted_HMS.snewh
current_RA.sminute=converted_HMS.snewm
current_RA.ssecond=converted_HMS.snews
return
def RA_Change(x):
'''Calculates difference in right ascension from current position and target.'''
current_pos=DMS_Degrees(current_RA)
target_pos =DMS_Degrees(target_RA)
diff=target_pos-current_pos
return Degrees_HMS(diff)
def Track():
'''Tracks a target, specified by target classes.'''
current_RA.hour=target_RA.hour
current_RA.minute=target_RA.minute
current_RA.second=target_RA.second
current_DEC.degree=target_DEC.degree
current_DEC.minute=target_DEC.minute
current_DEC.second=target_DEC.second
current_RA.shour=str(target_RA.hour)
current_RA.sminute=str(target_RA.minute)
current_RA.ssecond=str(target_RA.second)
current_DEC.sdegree=str(target_DEC.degree)
current_DEC.sminute=str(target_DEC.minute)
current_DEC.ssecond=str(target_DEC.second)
global IsTracking
IsTracking=True
return
def Status():
if IsTracking==False:
return 'Locked'
else:
return 'Tracking!'
return
def StopTrack():
global IsTracking
IsTracking=False
return
def zero_All():
'''Zeros telescope!'''
zero_DEC()
zero_RA()
return
def zero_DEC():
Degrees_DMS(latitude)
current_DEC.degree=converted_DMS.newd
current_DEC.minute=converted_DMS.newm
current_DEC.second=converted_DMS.news
current_DEC.sdegree=converted_DMS.snewd
current_DEC.sminute=converted_DMS.snewm
current_DEC.ssecond=converted_DMS.snews
return
def zero_RA():
if IsTracking==False:
current_RA.hour=current_LST.hour
current_RA.minute=current_LST.minute
current_RA.second=current_LST.second
current_RA.shour=current_LST.shour
current_RA.sminute=current_LST.sminute
current_RA.ssecond=current_LST.ssecond
return
def zero_Target():
'''Zeros the values for target...do I really have to write that?'''
target_RA.hour=0
target_RA.minute=0
target_RA.second=0
target_RA.shour='0'
target_RA.sminute='0'
target_RA.ssecond='0'
target_DEC.degree=0
target_DEC.minute=0
target_DEC.second=0
target_DEC.sdegree='0'
target_DEC.sminute='0'
target_DEC.ssecond='0'
return
IsTracking=False
zero_RA() #sets RA to meridian
zero_DEC()
zero_Target()
go=0 #condition for program to prompt for target position
if go==1:
NewTarget()
else:
StopTrack()
while im_still_presenting==True:
utc_time,loc_time=datetime.utcnow(),time.ctime()
LST(utc_time)
RA(current_LST,alpha)
if IsTracking==False:
HA(current_RA)
else:
HA(target_RA)
if go==1: Track()
print('Coordinates: ', latitude, ' N',longitude, ' W',flush=True)
print('',flush=True)
print('UTC :',utc_time,' Julian Day:',JD(utc_time),flush=True)
print('Local time :',loc_time,' Air Mass :','....',flush=True)
#print(DateToDecimal(utc_time))
print('LST :',current_LST.shour+'h',current_LST.sminute+'m',current_LST.ssecond+'s',' Altitude :','....',\
'degrees',flush=True)
print('Hour Angle :',current_HA.shour+'h',current_HA.sminute+'m',current_HA.ssecond+'s',flush=True)
print('--------------------------------------------------------------------',flush=True)
print('Target Data:',' Current Status: ',Status(),flush=True)
print('',flush=True)
print('Target RA :',target_RA.shour+'h',target_RA.sminute+'m',target_RA.ssecond+'s',
' Current RA:',current_RA.shour+'h',current_RA.sminute+'m',current_RA.ssecond+'s',flush=True)
print('Target DEC :',target_DEC.sdegree+'\xb0',target_DEC.sminute+'m',target_DEC.ssecond+'s',
' Current DEC:',current_DEC.sdegree+'\xb0',current_DEC.sminute+'m',current_DEC.ssecond+'s',flush=True)
time.sleep(tstep)
clear_output(wait=True)
| [
"noreply@github.com"
] | slegeza.noreply@github.com |
144e62e292c034341f4875ad35563ad05f89afb0 | 0486a473653b4f74581d063de54ff9c9f66a05a5 | /scripts/fmri/makeVec.py | 83732649a223a53aa46e32336adeefd994cc601a | [] | no_license | mlko53/cultsid | 845b710fe847a520d3e886e6f5d9b803d8a8d4db | 7819d3f8acbe2d0353a36ac3a54d644916371d9d | refs/heads/master | 2021-11-30T02:35:45.744875 | 2021-11-14T07:08:02 | 2021-11-14T07:08:02 | 150,183,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,006 | py | #!/usr/bin/env python
# Stephanie Greer 07-26-07
# Last modified 02-01-11 by Kiefer Katovich
Usage = """This script is for creating vectors from master matrix files and vector description files."""
import string
import sys
import os
import re
def generateVector(header, masterMat, conditions, codes):
toReturn = []
print "This vector will be ", len(masterMat), " entrys long"
for i in range(len(masterMat)):
toReturn.insert(i, "0")
for c in range(len(conditions)):
cur = conditions[c]
#used to check each comparator. Defalt is one incase there are no entries for a comparator.
checks = [1, 1, 1];
mark = "0"
for e in cur["="]:
ind = header.index(e[0])
if(not masterMat[i][ind].strip(" \t\n:\"") in e[1]):
checks[0] = 0
break
for e in cur[">"]:
ind = header.index(e[0])
#NOTICE: this converts to INTEGERS--if the csv contains decimals and you are checking that this must
#be changed to support floats
if(float(masterMat[i][ind].strip(" \t\n:\"")) <= float(e[1][0])):
checks[1] = 0
#print masterMat[i][ind].strip(" \t\n:\"")
break
for e in cur["<"]:
ind = header.index(e[0])
#NOTICE: this converts to INTEGERS--if the csv contains decimals and you are checking that this must
#be changed to support floats
if(float(masterMat[i][ind].strip(" \t\n:\"")) >= float(e[1][0])):
checks[2] = 0
break
if(not 0 in checks): #this means that all of the checks are still 1 and all conditions succeded
mark = codes[c].strip(" \t\n:\"")
if("$" in mark): #use a field instead of a number
col = mark.strip("$")
if(col in header):
ind = header.index(col)
mark = masterMat[i][ind]
else:
print "ERROR: The code \"", col, "\" is not in the set of columns."
return []
if(mark != "0"):
if(toReturn[i] != "0"):
print "Warning: row ", i, "matches code: ", toReturn[i], " and code: ", mark
toReturn[i] = mark
#sanity check on the codings:
for elm in codes:
if((not "$" in elm) & (not elm.strip(" \t\n:\"") in toReturn)):
print "Warning: code \"", elm.strip(" \t\n:\""), "\" never appears in the output"
#return the new vector
return toReturn
""" checkClean is called form ParesVector used to make sure that the contitions and fields to match are acceptable values. It also strips all the leading and trailing whitespace from each field."""
def checkClean(condition, header, masterMat, splitter):
toReturn = condition
toReturn[0] = condition[0].strip(" \t\n:\"")
if(not toReturn[0] in header):
print "ERROR: The label \"", toReturn[0], "\" is not in the set of columns."
return []
matches = condition[1].split(",")
#print matches
for n in range(len(matches)):
matches[n] = matches[n].strip(" \t\n:\"")
if splitter == '=':
if(not matches[n] in masterMat):
print "Warning: The value \"", matches[n], "\" does not appear anywhere in the origional matrix. (check for misspelling or missing commas.)"
matches.remove(matches[n])
toReturn[1] = tuple(matches)
return toReturn
""" takes a vector discription and returns the conditions and codes needed for generate Vector"""
def getConditions(vec, header, matFile):
conditions = []
codes = []
vecDef = vec.split("MARK")
for i in range(len(vecDef))[1:]: #skip the first entry because it will be the header info
cur = vecDef[i].split("WITH")
if(len(cur) < 2):
print "ERROR: \"MARK\" key followed by no \"WITH\" key"
elif(len(cur) > 2):
print "ERROR: more than one \"WITH\" key for one \"MARK\" key"
else:
codes.insert(i, cur[1])
withinCond = cur[0].split("AND")
#conditions will be a dictionary with =, < amd < as keys for easy matching in "generateVector"
conditions.insert(i - 1, {"=":[], ">":[], "<":[]})
for j in range(len(withinCond)):
if(withinCond[j].find("=") != -1):
spliter = "="
elif(withinCond[j].find(">") != -1):
spliter = ">"
elif(withinCond[j].find("<") != -1):
spliter = "<"
else:
print "comparator must be either \"=\", \">\" or \"<\""
return
#prepares the condition for "generateVector" using "checkClean"
cleanCond = checkClean(withinCond[j].split(spliter), header, ("").join(matFile), spliter)
if(cleanCond != []):
conditions[i - 1][spliter].append(cleanCond)
else:
return
return [conditions, codes]
def striplist(listIn):
return([x.strip() for x in listIn])
def getBuildConditions(vec, buildFile, header, matFile):
buildHeader = striplist(buildFile[0].split(","))
buildFile = buildFile[1:]
#this will parse the input matrix and turn it into a list of lists
buildMat = makeMat(buildFile)
conditions = []
codes = []
title = ''
vecDef = vec.split("MARK")
if(len(vecDef) > 2):
print "WARNING: You can only have one MARK statement in your BUILD_FROM vector. Only the first one will be used."
splitWith = vecDef[1].split("WITH")
if(len(splitWith) < 2):
print "ERROR: \"MARK\" key followed by no \"WITH\" key"
elif(len(splitWith) > 2):
print "ERROR: more than one \"WITH\" key for one \"MARK\" key"
else:
insertCol = splitWith[1].strip()
if(insertCol in buildHeader):
insertInd = buildHeader.index(insertCol)
else:
print "ERROR: The name, ", insertCol, ", never appears in the BUILD_FROM file."
withinCond = splitWith[0].split("AND")
matchCols = []
matchInds = []
for w in range(len(withinCond)):
cur = withinCond[w].strip()
if(cur in buildHeader):
matchCols.insert(w, cur)
matchInds.insert(w, buildHeader.index(cur))
else:
print "ERROR: The name, ", matchCol, ", never appears in the BUILD_FROM file."
for i in range(len(buildMat)):
conditions.insert(i, {"=":[], ">":[], "<":[]})
for m in range(len(matchInds)):
cleanCond = checkClean([matchCols[m], buildMat[i][matchInds[m]]], header, ("").join(matFile))
if(cleanCond != []):
conditions[i]["="].append(cleanCond)
codes.insert(i, buildMat[i][insertInd])
title = insertCol
return [conditions, codes, title]
def makeMat(matFile):
mat = []
for i in range(len(matFile)):
curItem = matFile[i].split(",")
if(curItem[0] != ""):
mat.insert(i, curItem)
elif (len(curItem) != 1): #ignore all the lines with nothing on them (like extra lines at ethe end of a file)
mat.insert(i, curItem)
return mat
"""ParseVector takes a string that includes all the information for one vector and parses the contents of that string. It is called from the main loop on each vector individually."""
def ParseVector(vec, subject):
vecIO = vec.split("\"")
inKey = vecIO[0].strip(" \t\n:")
if (inKey != "INPUT"):
print "Skipping vector because there is no \"INPUT\" file. (check for missing quotes around filename)"
return
infile = vecIO[1].strip(" \t\n")
print os.getcwd()
# MK 9.23 - modifly infile csv path to reflect new retructured directory
infile = "../../../bhvr/fmri/" + subject + "_" + infile[:3].upper() + '.csv'
outKey = vecIO[2].strip(" \t\n:")
outfile = ""
append = False
build = False
title = ""
if (outKey == "OUTPUT"):
outfile = vecIO[3].strip(" \t\n")
print "Vector will be savd as ", outfile
elif(outKey == "BUILD_FROM"):
buildfile = vecIO[3].strip(" \t\n")
outfile = infile
build = True
print "Vector will added as a column in the file:", infile
elif(outKey == "APPEND_TO"):
outfile = vecIO[3].strip(" \t\n")
append = True
if(vecIO[4].strip(" \t\n:") == "TITLE"):
title = vecIO[5].strip(" \t\n")
print "Vector will added as a column in the file:", outfile
else:
print "No output given (check for missing quotes around filename). \nVector will be saved as \"vector.1D\""
outfile = "vector.1D"
matFile = open(infile).read().split("\n") #matFile is a list containing each line of the input matrix
if (len(matFile) == 1):
matFile = open(infile).read().split('\r')
header = matFile[0].split(",") #header now contains a list of the column headers (first line) in the input matrix
for n in range(len(header)):
header[n] = header[n].strip(" \t\n:\"")
#this will parse the input matrix and turn it into a list of lists
matFile = matFile[1:]
mat = makeMat(matFile)
if(build):
buildFile = open(buildfile).read().split("\n")
cond_output = getBuildConditions(vec, buildFile, header, matFile)
title = cond_output[2]
else:
cond_output = getConditions(vec, header, matFile)
conditions = cond_output[0]
codes = cond_output[1]
#print conditions
#print codes
finalvec = generateVector(header, mat, conditions, codes)
#save the new vector returned from generateVector
if(append | build):
out = open(outfile, "r")
outText = out.read().split("\n")
out.close()
if(len(outText) < len(finalvec)):
print "Warning: output vector and file to append to do not match in length. Output will be saved as \"vector.1D\""
outfile = "vector.1D"
elif(title in outText[0]):
print "WARNING: the column, ", title.strip(), ", already exists. It will be overwriten."
header = striplist(outText[0].split(','))
ind = header.index(title)
outMat = makeMat(outText[1:])
for i in range(len(finalvec)):
outMat[i][ind] = finalvec[i]
finalvec[i] = ",".join(outMat[i])
finalvec.insert(0, outText[0])
elif(title != ""):
finalvec.insert(0, title)
for i in range(len(finalvec)):
finalvec[i] = outText[i] + "," + finalvec[i]
out = open(outfile, "w")
out.write("\n".join(finalvec))
out.close()
##### Flow of control starts here ######
#start by getting the input
if (len(sys.argv) < 1 ):
print Usage
File = sys.argv[1]
fid = open(File)
mat2v = fid.read() #mat2v now stores the file contents for parsing
fid.close()
#split on "BEGIN_VEC"
beginVec = mat2v.split("BEGIN_VEC")
#loops through each individual vector
count = 1
for cur in beginVec[1:]:
oneVec = cur.split("END_VEC")[0] #oneVec now contains all info between one "BEGIN_VEC" and "END_VEC" pair.
print "Vector", count, ":"
ParseVector(oneVec, sys.argv[2])
count = count + 1
print "\n"
| [
"mlko53@stanford.edu"
] | mlko53@stanford.edu |
8f56803ee54c6e67cd01d7a3d54d50fc47302006 | d54c67daecf5bd1d2559569c7f228b43cee42055 | /problem025.py | 4a98aecabe606b2bc339dae65b17e2151d7fca97 | [
"MIT"
] | permissive | mazayus/ProjectEuler | 0ba39169eec550ca798dcbb480ce0d2a63065211 | 64aebd5d80031fab2f0ef3c44c3a1118212ab613 | refs/heads/master | 2016-09-06T13:13:47.804437 | 2015-11-01T15:23:25 | 2015-11-01T15:23:25 | 34,908,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | #!/usr/bin/env python3
def fibs():
fib1, fib2 = 1, 1
while True:
yield fib1
fib1, fib2 = fib2, fib1 + fib2
print(next(i for (i, f) in enumerate(fibs(), 1) if len(str(f)) == 1000))
| [
"milan.izai@gmail.com"
] | milan.izai@gmail.com |
76c304371760591a370e3a7016b8b3f37697c8a3 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/compute/instances/network_interfaces/__init__.py | 0e14c7451d0cb7bb48f26fdf7d6ff9b8afe2549f | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 1,437 | py | # -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading and manipulating instances."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class InstanceNetworkInterfaces(base.Group):
"""Read and manipulate Compute Engine VM instance network interfaces."""
InstanceNetworkInterfaces.detailed_help = {
'DESCRIPTION': """
Read and manipulate Compute Engine VM instance network interfaces.
For more information about VM instance network interfaces, see the
[network interfaces documentation](https://cloud.google.com/vpc/docs/multiple-interfaces-concepts).
See also: [VM instance network interfaces API](https://cloud.google.com/compute/docs/reference/rest/v1/instances/updateNetworkInterface).
""",
}
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
3b58daa99e2271a5c35764b8e85df4f098e39220 | f8e7836f85c4256ecf557607d95f4411534a4258 | /backend/menu/api/v1/urls.py | 111633e9de336436f653d7b7b1c7a5eb1c2eee5a | [] | no_license | crowdbotics-apps/hottest-in-da-city-23083 | 99d8120a9fdab2b48f83bc27ae1c21fe91057e39 | 73f62cdd5cd23510f4d02f9724e60a6998b0a4ac | refs/heads/master | 2023-01-20T21:43:14.387140 | 2020-11-30T19:10:30 | 2020-11-30T19:10:30 | 317,314,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import (
ItemVariantViewSet,
CountryViewSet,
ItemViewSet,
CategoryViewSet,
ReviewViewSet,
)
router = DefaultRouter()
router.register("item", ItemViewSet)
router.register("country", CountryViewSet)
router.register("review", ReviewViewSet)
router.register("itemvariant", ItemVariantViewSet)
router.register("category", CategoryViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
eca24ff7bc580f259dd18ebec0de9b8ecd45dd92 | 2aa6e275fff292d9dca954b909c5a2ed7edc7975 | /Chatbot_KG_rest/Api/bot/kbqa_server.py | e5b5ffddc540a016c0bd387cff09e46d09cbf078 | [] | no_license | charlesXu86/Chatbot_KG | f2733a6b830c521ead9273de0e534941e58ee96b | 4ca161e28e4544fb7b66688206d2b3a67c68ef25 | refs/heads/master | 2022-04-14T12:41:27.251751 | 2020-03-27T14:02:39 | 2020-03-27T14:02:39 | 198,364,335 | 12 | 13 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | # -*- coding: utf-8 -*-
'''
@Author : Xu
@Software: PyCharm
@File : kbqa_server.py
@Time : 2020/3/9 9:57 上午
@Desc :
'''
from django.http import JsonResponse
import json
import logging
import datetime
from Chatbot_KG_rest.Api.bot.kbqa_predict import get_answer
logger = logging.getLogger(__name__)
def kbqa_server(request):
if request.method == 'POST':
try:
jsonData = json.loads(request.body.decode('utf-8'))
msg = jsonData["msg"]
localtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
result = get_answer(msg)
dic = {
"desc": "Success",
"ques": msg,
"result": result,
"time": localtime
}
log_res = json.dumps(dic, ensure_ascii=False)
logger.info(log_res)
return JsonResponse(dic)
except Exception as e:
logger.info(e)
else:
return JsonResponse({"desc": "Bad request"}, status=400) | [
"charlesxu86@163.com"
] | charlesxu86@163.com |
31e2d70f5d872dd9f0e4d55886d5d02c3e8cad8c | ca550fd82630630abcf819025fcef4f88ae17052 | /demos/view_file.py | 12b83a72d82ae9c5f2dd480cefb792e57f8066a3 | [] | no_license | MatthewDaws/TileWindow | 63647ca737bea0c6631f46870a66c98fc4cca85c | dd0ebba5e50e81deb9f952dac1fbd78223c80954 | refs/heads/master | 2021-01-02T09:29:59.740071 | 2017-08-15T20:27:49 | 2017-08-15T20:27:49 | 99,228,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | # Display a image and allow zooming in and out and scrolling
import os, sys
sys.path.insert(0, os.path.abspath(os.path.join("..")))
import tilewindow
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.filedialog
import PIL.Image
root = tk.Tk()
tilewindow.util.stretch(root, rows=[1], columns=[0])
frame = ttk.Frame(root)
tilewindow.util.stretch(frame, [0], [0])
frame.grid(sticky=tk.NSEW, row=1)
image_widget = tilewindow.Image(frame)
image_widget.grid(row=0, column=0, sticky=tk.NSEW)
xscroll, yscroll = image_widget.make_scroll_bars(frame)
yscroll.grid(row=0, column=1, sticky=tk.NS)
xscroll.grid(row=1, column=0, sticky=tk.EW)
filename = tkinter.filedialog.askopenfilename(parent=root, filetypes=[("PNG file", "*.png"),
("JPEG file", "*.jpg"), ("Other PIL supported file", "*.*")])
image = PIL.Image.open(filename)
image_widget.set_image(image, allow_zoom=True)
xscroll.set_to_hide()
yscroll.set_to_hide()
frame = ttk.Frame(root)
frame.grid(sticky=tk.NSEW, row=0)
def no_zoom():
image_widget.zoom = 1.0
ttk.Button(frame, text="Restore zoom", command=no_zoom).grid(row=0, column=0)
def zoom():
w, h = image_widget.size
zw = w / image.width
zh = h / image.height
image_widget.zoom = min(zw, zh)
ttk.Button(frame, text="Zoom to window", command=zoom).grid(row=0, column=1)
root.mainloop()
| [
"matt.daws@cantab.net"
] | matt.daws@cantab.net |
9ff226798f03d5f8dd6b0c6bb08ff4f2d79d00a0 | 1f19b252174ef68969e5df10752950c2b296e6ce | /group-nlpppppp/main.py | 59fbee5c510f161c873aa0b37d7dbb060de95583 | [] | no_license | hdanch/git | 015fae9a283c914d78bc61c686e60251dceb9fdb | c8d98fef82658760c9e8929afc8804515ae6fd9c | refs/heads/master | 2020-03-13T12:19:51.050832 | 2018-04-26T08:05:16 | 2018-04-26T08:05:16 | 131,117,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | #!/usr/bin/env python3
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.datasets import dump_svmlight_file
from sklearn import metrics
import numpy as np
import sys
import logging
import settings
import utils
import features
LOGGER = logging.getLogger("task3")
def main(task='A'):
TASK = task
DATASET_FP = './data/SemEval2018-T4-train-task' + TASK + '.txt'
FNAME = './result/predictions-task' + TASK + '.txt'
LOGGER.debug('Task file ' + DATASET_FP)
LOGGER.debug('Result file ' + FNAME)
PREDICTIONSFILE = open(FNAME, "w")
K_FOLDS = 10 # 10-fold crossvalidation
CLF = LinearSVC(tol=1e-8) # the default, non-parameter optimized linear-kernel SVM
# Loading dataset and featurised simple Tfidf-BoW model
corpus, y = utils.parse_dataset(DATASET_FP)
# Loading tokenized tweet, pos and entity from ark and tw
syn_info = utils.parse_tweet()
# Get features
X = features.featurize(corpus, syn_info)
class_counts = np.asarray(np.unique(y, return_counts=True)).T.tolist()
LOGGER.debug('class counts ' + str(class_counts))
# Returns an array of the same size as 'y' where each entry is a prediction obtained by cross validated
predicted = cross_val_predict(CLF, X, y, cv=K_FOLDS)
# confusion matrix
confu = metrics.confusion_matrix(y, predicted)
print(confu)
# Modify F1-score calculation depending on the task
if TASK.lower() == 'a':
score = metrics.f1_score(y, predicted, pos_label=1)
elif TASK.lower() == 'b':
score = metrics.f1_score(y, predicted, average="macro")
LOGGER.debug("F1-score Task" + TASK + ': ' + str(score*100))
print('**********************************')
print ("F1-score Task", TASK, score*100)
print('**********************************')
for p in predicted:
PREDICTIONSFILE.write("{}\n".format(p))
PREDICTIONSFILE.close()
if __name__ == '__main__':
TASK = sys.argv[1]
main(task=TASK)
| [
"hdan@hdandeMBP.lan"
] | hdan@hdandeMBP.lan |
95dd71c6f5d7836cd8ed789a1e9c4f0e4def8cb5 | ac45b55915e634815922329195c203b1e810458c | /baseline1322.py | 507bfec79efbbce081a8f193e9552d7f0ec41198 | [] | no_license | mj1e16lsst/iridisPeriodicNew | 96a8bfef0d09f13e18adb81b89e25ae885e30bd9 | dc0214b1e702b454e0cca67d4208b2113e1fbcea | refs/heads/master | 2020-03-23T15:01:23.583944 | 2018-07-23T18:58:59 | 2018-07-23T18:58:59 | 141,715,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,059 | py | from operator import add
#from astropy import units as u
#from astropy.coordinates import SkyCoord
#from astropy.stats import LombScargle
#from gatspy.periodic import LombScargleFast
from functools import partial
#from gatspy import periodic
#import matplotlib.pyplot as plt
#from matplotlib.font_manager import FontProperties
import lomb_scargle_multiband as periodic
from multiprocessing import Pool
import numpy as np
import os
#from sqlite3 import *
import random
from random import shuffle
from random import randint
import Observations
import Magnitudes
# In[13]:
#conn = connect('minion_1016_sqlite.db')
#conn = connect('astro_lsst_01_1004_sqlite.db')
#conn = connect('minion_1020_sqlite.db')
# In[14]:
# LSST zero points u,g,r,i,z,y
zeroPoints = [0,26.5,28.3,28.13,27.79,27.4,26.58]
FWHMeff = [0.8,0.92,0.87,0.83,0.80,0.78,0.76] # arcmins?
pixelScale = 0.2
readOut = 12.7
sigSys = 0.005
flareperiod = 4096
flarecycles = 10
dayinsec=86400
background = 40
# sat mag u,g,r,i,z,y=14.7,15.7,15.8,15.8,15.3 and 13.9
# start date 59580.033829 end date + 10 years
#maglist=[20]*7
lim = [0, 23.5, 24.8, 24.4, 23.9, 23.3, 22.1] # limiting magnitude ugry
sat = [0, 14.7, 15.7, 15.8, 15.8, 15.3, 13.9] # sat mag as above
# In[15]:
looooops = 10000
maglength = 20
freqlength = 20
processors = 20
startnumber = 0
endnumber = startnumber + 1
#observingStrategy = 'minion'
observingStrategy = 'astroD'
#observingStrategy = 'panstars'
inFile = '/home/mj1e16/periodic/in'+str(startnumber)+'.txt'
outFile = '/home/mj1e16/periodic/outbaseline1322'+str(startnumber)+'.txt'
#inFile = '/home/ubuntu/vagrant/'+observingStrategy+'/in'+observingStrategy+'KtypefullresultsFile'+str(startnumber)+'.txt'
#outFile = '/home/ubuntu/vagrant/'+observingStrategy+'/out'+observingStrategy+'KtypefullresultsFile'+str(startnumber)+'.txt'
obs = Observations.obsbaseline1322
# In[19]:
def magUncertainy(Filter, objectmag, exposuretime,background, FWHM): # b is background counts per pixel
countsPS = 10**((Filter-objectmag)/2.5)
counts = countsPS * exposuretime
uncertainty = 1/(counts/((counts/2.3)+(((background/2.3)+(12.7**2))*2.266*((FWHM/0.2)**2)))**0.5) # gain assumed to be 1
return uncertainty
#from lsst should have got the website! https://smtn-002.lsst.io/
# In[20]:
def averageFlux(observations, Frequency, exptime):
b = [0]*len(observations)
for seconds in range(0, exptime):
a = [np.sin((2*np.pi*(Frequency))*(x+(seconds/(3600*24)))) for x in observations] # optical modulation
b = map(add, a, b)
c = [z/exptime for z in b]
return c
def Flux(observations,Frequency,exptime):
a = [np.sin((2*np.pi*(Frequency)*x)) for x in observations]
return a
# In[21]:
def ellipsoidalFlux(observations, Frequency,exptime):
period = 1/(Frequency)
phase = [(x % (2*period)) for x in observations]
b = [0]*len(observations)
for seconds in range(0, exptime):
a = [np.sin((2*np.pi*(Frequency))*(x+(seconds/(3600*24)))) for x in observations] # optical modulation
b = map(add, a, b)
c = [z/exptime for z in b]
for x in range(0,len(phase)):
if (phase[x]+(1.5*period)) < (3*period):
c[x] = c[x]*(1./3.)
else:
c[x] = c[x]*(2./3.)
return c
## this is doing something but not the right something, come back to it
# In[22]:
def flaring(B, length, dayinsec=86400,amplitude=1):
global flareMag, minutes
fouriers = np.linspace(0.00001,0.05,(dayinsec/30))
logF = [np.log(x) for x in fouriers] # start at 30 go to a day in 30 sec increments
real = [random.gauss(0,1)*((1/x)**(B/2)) for x in fouriers] #random.gauss(mu,sigma) to change for values from zurita
# imaginary = [random.gauss(0,1)*((1/x)**(B/2)) for x in fouriers]
IFT = np.fft.ifft(real)
seconds = np.linspace(0,dayinsec, (dayinsec/30)) # the day in 30 sec increments
minutes = [x for x in seconds]
minimum = (np.max(-IFT))
positive = [x + minimum for x in IFT] # what did this even achieve? it helped with normalisation!
normalised = [x/(np.mean(positive)) for x in positive] # find normalisation
normalisedmin = minimum/(np.mean(positive))
normalised = [x - normalisedmin for x in normalised]
flareMag = [amplitude * x for x in normalised] # normalise to amplitude
logmins = [np.log(d) for d in minutes] # for plotting?
# plt.plot(minutes,flareMag)
# plt.title('lightcurve')
# plt.show()
return flareMag
# In[55]:
def lombScargle(frequencyRange,objectmag=20,loopNo=looooops,df=0.001,fmin=0.001,numsteps=100000,modulationAmplitude=0.1,Nquist=200): # frequency range and object mag in list
#global totperiod, totmperiod, totpower, date, amplitude, frequency, periods, LSperiod, power, mag, error, SigLevel
results = {}
totperiod = []
totmperiod = []
totpower = [] # reset
SigLevel = []
filterletter = ['o','u','g','r','i','z','y']
period = 1/(frequencyRange)
if period > 0.5:
numsteps = 10000
elif period > 0.01:
numsteps = 100000
else:
numsteps = 200000
freqs = fmin + df * np.arange(numsteps) # for manuel
allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy = [], [], [], [], [], [], [] #reset
measuredpower = [] # reset
y = [allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy] # for looping only
for z in range(1, len(y)):
#y[z] = averageFlux(obs[z], frequencyRange[frange], 30) # amplitde calculation for observations, anf frequency range
y[z] = ellipsoidalFlux(obs[z], frequencyRange,30)
y[z] = [modulationAmplitude * t for t in y[z]] # scaling
for G in range(0, len(y[z])):
flareMinute = int(round((obs[z][G]*24*60*2)%((dayinsec/(30*2))*flarecycles)))
y[z][G] = y[z][G] + longflare[flareMinute] # add flares swapped to second but not changing the name intrtoduces fewer bugs
date = []
amplitude = []
mag = []
error = []
filts = []
for z in range(1, len(y)):
if objectmag[z] > sat[z] and objectmag[z] < lim[z]:
#date.extend([x for x in obs[z]])
date.extend(obs[z])
amplitude = [t + random.gauss(0,magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])) for t in y[z]] # scale amplitude and add poisson noise
mag.extend([objectmag[z] - t for t in amplitude]) # add actual mag
error.extend([sigSys + magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])+0.2]*len(amplitude))
filts.extend([filterletter[z]]*len(amplitude))
phase = [(day % (period*2))/(period*2) for day in obs[z]]
pmag = [objectmag[z] - t for t in amplitude]
# plt.plot(phase, pmag, 'o', markersize=4)
# plt.xlabel('Phase')
# plt.ylabel('Magnitude')
# plt.gca().invert_yaxis()
# plt.title('filter'+str(z)+', Period = '+str(period))#+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)))
# plt.show()
# plt.plot(date, mag, 'o')
# plt.xlim(lower,higher)
# plt.xlabel('time (days)')
# plt.ylabel('mag')
# plt.gca().invert_yaxis()
# plt.show()
model = periodic.LombScargleMultibandFast(fit_period=False)
model.fit(date, mag, error, filts)
power = model.score_frequency_grid(fmin, df, numsteps)
if period > 10.:
model.optimizer.period_range=(10, 110)
elif period > 0.51:
model.optimizer.period_range=(0.5, 10)
elif period > 0.011:
model.optimizer.period_range=(0.01, 0.52)
else:
model.optimizer.period_range=(0.0029, 0.012)
LSperiod = model.best_period
if period < 10:
higher = 10
else:
higher = 100
# fig, ax = plt.subplots()
# ax.plot(1./freqs, power)
# ax.set(xlim=(0, higher), ylim=(0, 1.2),
# xlabel='period (days)',
# ylabel='Lomb-Scargle Power',
# title='Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)));
# plt.show()
phase = [(day % (period*2))/(period*2) for day in date]
#idealphase = [(day % (period*2))/(period*2) for day in dayZ]
#print(len(phase),len(idealphase))
#plt.plot(idealphase,Zmag,'ko',)
# plt.plot(phase, mag, 'o', markersize=4)
# plt.xlabel('Phase')
# plt.ylabel('Magnitude')
# plt.gca().invert_yaxis()
# plt.title('Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)))
# plt.show()
#print(period, LSperiod, period*20)
# print('actualperiod', period, 'measured period', np.mean(LSperiod),power.max())# 'power',np.mean(power[maxpos]))
# print(frequencyRange[frange], 'z', z)
# totperiod.append(period)
# totmperiod.append(np.mean(LSperiod))
# totpower.append(power.max())
mpower = power.max()
measuredpower.append(power.max()) # should this correspond to period power and not max power?
maxpower = []
counter = 0.
for loop in range(0,loopNo):
random.shuffle(date)
model = periodic.LombScargleMultibandFast(fit_period=False)
model.fit(date, mag, error, filts)
power = model.score_frequency_grid(fmin, df, numsteps)
maxpower.append(power.max())
for X in range(0, len(maxpower)):
if maxpower[X] > measuredpower[-1]:
counter = counter + 1.
Significance = (1.-(counter/len(maxpower)))
#print('sig', Significance, 'counter', counter)
SigLevel.append(Significance)
#freqnumber = FrangeLoop.index(frequencyRange)
#magnumber = MagRange.index(objectmag)
#print(fullmaglist)
#listnumber = (magnumber*maglength)+freqnumber
# print(listnumber)
# measuredperiodlist[listnumber] = LSperiod
# periodlist[listnumber] = period
# powerlist[listnumber] = mpower
# siglist[listnumber] = Significance
# fullmaglist[listnumber] = objectmag
# results order, 0=mag,1=period,2=measuredperiod,3=siglevel,4=power,5=listnumber
results[0] = objectmag[3]
results[1] = period
results[2] = LSperiod
results[3] = Significance
results[4] = mpower
results[5] = 0#listnumber
return results
# In[24]:
#findObservations([(630,)])
#remove25(obs)
#averageFlux(obs[0], 1, 30)
longflare = []
for floop in range(0,flarecycles):
flareone = flaring(-1, flareperiod, amplitude=0.3)
flareone = flareone[0:1440]
positiveflare = [abs(x) for x in flareone]
longflare.extend(positiveflare)
# In[25]:
PrangeLoop = np.logspace(-2.5,2,freqlength)
FrangeLoop = [(1/x) for x in PrangeLoop]
# In[26]:
# reset results file
with open(inFile,'w') as f:
f.write('fullmaglist \n\n periodlist \n\n measuredperiodlist \n\n siglist \n\n powerlist \n\n listnumberlist \n\n end of file')
# In[57]:
results = []
fullmeasuredPeriod = []
fullPeriod = []
fullPower = []
fullSigLevel = []
fullMag = []
MagRangearray = np.linspace(17,24,maglength)
MagRange = [x for x in MagRangearray]
maglist = []
for x in range(len(MagRange)):
maglist.append([MagRange[x]]*7)
newlist = Magnitudes.mag1322
pool = Pool(processors)
for h in range(startnumber,endnumber):
print(newlist[h])
results.append(pool.map(partial(lombScargle, objectmag=newlist[h]),FrangeLoop))
twoDlist = [[],[],[],[],[],[]]
for X in range(len(results)):
for Y in range(len(results[X])):
twoDlist[0].append(results[X][Y][0])
twoDlist[1].append(results[X][Y][1])
twoDlist[2].append(results[X][Y][2])
twoDlist[3].append(results[X][Y][3])
twoDlist[4].append(results[X][Y][4])
twoDlist[5].append(results[X][Y][5])
with open(inFile, 'r') as istr:
with open(outFile,'w') as ostr:
for i, line in enumerate(istr):
# Get rid of the trailing newline (if any).
line = line.rstrip('\n')
if i % 2 != 0:
line += str(twoDlist[int((i-1)/2)])+','
ostr.write(line+'\n')
| [
"mj1e16@soton.ac.uk"
] | mj1e16@soton.ac.uk |
b2aa1e0a8a9d0eb05e16f353c4dec15e19aaaf08 | 5f2608d4a06e96c3a032ddb66a6d7e160080b5b0 | /week8/homework_w8_q_b2.py | 316c9a3ef7340c56b95ea8fea6fe5e427daf4ec1 | [] | no_license | sheikhusmanshakeel/statistical-mechanics-ens | f3e150030073f3ca106a072b4774502b02b8f1d0 | ba483dc9ba291cbd6cd757edf5fc2ae362ff3df7 | refs/heads/master | 2020-04-08T21:40:33.580142 | 2014-04-28T21:10:19 | 2014-04-28T21:10:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | import random, math, os
def energy(S, N, nbr):
E = 0.0
for k in range(N):
E -= S[k] * sum(S[nn] for nn in nbr[k])
return 0.5 * E
L = 32 # 2, 4, 8, 16, 32
N = L * L
nbr = {i : ((i // L) * L + (i + 1) % L, (i + L) % N,
(i // L) * L + (i - 1) % L, (i - L) % N)
for i in range(N)}
T = 2.27
p = 1.0 - math.exp(-2.0 / T)
nsteps = 100000
S = [random.choice([1, -1]) for k in range(N)]
E = [energy(S, N, nbr)]
filename = 'local_'+ str(L) + '_' + str(T) + '.txt'
if os.path.isfile(filename):
f = open(filename, 'r')
S = []
for line in f:
S.append(int(line))
f.close()
print 'starting from file', filename
else:
S = [random.choice([1, -1]) for k in range(N)]
print 'starting from scratch'
for step in range(nsteps):
k = random.randint(0, N - 1)
Pocket, Cluster = [k], [k]
while Pocket != []:
j = random.choice(Pocket)
for l in nbr[j]:
if S[l] == S[j] and l not in Cluster \
and random.uniform(0.0, 1.0) < p:
Pocket.append(l)
Cluster.append(l)
Pocket.remove(j)
for j in Cluster:
S[j] *= -1
E.append(energy(S, N, nbr))
# print sum(E)/ len(E) / N
E_mean = sum(E)/ len(E)
E2_mean = sum(a ** 2 for a in E) / len(E)
cv = (E2_mean - E_mean ** 2 ) / N / T ** 2
f = open(filename, 'w')
for a in S:
f.write(str(a) + '\n')
f.close()
print 'cv =', cv
| [
"noelevans@gmail.com"
] | noelevans@gmail.com |
5938e5d03d2962f5aff7d1e814157938180f4be7 | e8ebcbe979a4eef5289ac0f6f7ad36eb893fed39 | /choiceNet/migrations/0006_auto__add_session.py | d9804f76065d9b827f8d6cc6adefc57dd8088e91 | [] | no_license | qysnolan/ChoiceNet | a752b59f7184246d3abdb513cb1b794c91f007ef | e1c9765ac0e24c640f271f84c87cc393d1850e16 | refs/heads/master | 2021-01-18T14:28:53.389507 | 2014-09-14T01:45:27 | 2014-09-14T01:45:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,659 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Session'
db.create_table(u'choiceNet_session', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('session', self.gf('django.db.models.fields.IntegerField')(default=0)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='session_user', to=orm['accounts.User'])),
('start_time', self.gf('django.db.models.fields.DateTimeField')()),
('end_time', self.gf('django.db.models.fields.DateTimeField')()),
('is_login', self.gf('django.db.models.fields.BooleanField')(default=False)),
('a', self.gf('django.db.models.fields.IntegerField')(default=0)),
('q', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'choiceNet', ['Session'])
def backwards(self, orm):
# Deleting model 'Session'
db.delete_table(u'choiceNet_session')
models = {
u'accounts.user': {
'Meta': {'ordering': "['last_name', 'first_name']", 'object_name': 'User'},
'accountType': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isSuper': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '70'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'choiceNet.balance': {
'Meta': {'ordering': "['user']", 'object_name': 'Balance'},
'balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'balance_user'", 'to': u"orm['accounts.User']"})
},
u'choiceNet.invoice': {
'Meta': {'ordering': "['date_created', 'number']", 'object_name': 'Invoice'},
'amount': ('django.db.models.fields.IntegerField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'buyer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoice_buyer'", 'to': u"orm['accounts.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoice_service'", 'to': u"orm['service.Service']"})
},
u'choiceNet.session': {
'Meta': {'ordering': "['start_time', 'user']", 'object_name': 'Session'},
'a': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_login': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'q': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'session': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'session_user'", 'to': u"orm['accounts.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'service.service': {
'Meta': {'ordering': "['name', 'process_id']", 'object_name': 'Service'},
'cost': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_used': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'delay': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '50', 'decimal_places': '3', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '3000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_bandwidth': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '50', 'decimal_places': '9', 'blank': 'True'}),
'min_bandwidth': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '50', 'decimal_places': '9', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'service_owner'", 'null': 'True', 'to': u"orm['accounts.User']"}),
'picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pre_requirements': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'process_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'service_input': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'service_output': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'service_type': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['service.ServiceType']"})
},
u'service.servicetype': {
'Meta': {'ordering': "['name']", 'object_name': 'ServiceType'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['choiceNet'] | [
"qysnolan@gmail.com"
] | qysnolan@gmail.com |
be22cf91a9a1ead05cbc8a1b15e1030eed65efa2 | e3219f8a7e8cb0376b7d46823854b1335462eb64 | /lependu.py | fd7d5a1197c410d1b9860e9588b6d39b108fa48d | [] | no_license | aconrad/lependu | e3c761b6fb5c19af3e6a52241a6f3bae120be698 | a7f2d1b10e9fd8151283e15c885e82beb692b91d | refs/heads/master | 2020-06-13T05:22:39.876419 | 2016-12-02T23:57:20 | 2016-12-02T23:57:20 | 75,440,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,002 | py | from collections import defaultdict
DICT = '/usr/share/dict/words'
words_by_length = {}
def read_words(dict_file):
words = set()
for line in open(dict_file):
word = line.rstrip().lower()
words.add(word)
return words
class WordResolver(object):
def __init__(self, word_length, max_attempts=6, word_dict=DICT):
self.word_length = word_length
self.max_attempts = max_attempts
all_words = read_words(DICT)
words_by_length = self._word_dict_by_length(all_words)
self._possible_words = words_by_length[word_length]
self._attempted_letters = set()
self._unmatched_letters = set()
self._resolved_word = [None] * word_length
def _word_dict_by_length(self, words):
d = defaultdict(set)
for word in words:
word_length = len(word)
d[word_length].add(word)
return d
def is_resolved(self):
return None not in self._resolved_word
def was_attempted(self, letter):
return letter in self._attempted_letters
@property
def attempted_letters(self):
return sorted(self._attempted_letters)
def attempt(self, letter, position):
if position is None:
self._attempted_letters.add(letter)
self._update_possible_words()
return
letter_index = position - 1
# if self._resolved_word[letter_index] is not None and letter in self._attempted_letters:
# raise Exception("Letter already attempted!")
self._resolved_word[letter_index] = letter
self._attempted_letters.add(letter)
self._update_possible_words()
def _update_possible_words(self):
new_possible_words = set()
for word in self._possible_words:
for i, letter in enumerate(word):
if self._resolved_word[i] not in (None, letter):
break
else:
new_possible_words.add(word)
for word in new_possible_words:
for
self._possible_words = new_possible_words
@property
def possible_words(self):
return sorted(self._possible_words)
if __name__ == '__main__':
word_lenght = int(raw_input("How many letters? "))
wr = WordResolver(word_lenght)
while not wr.is_resolved():
next_letter = raw_input("What did you attempt last? (already attempted: %s) " % ", ".join(wr.attempted_letters))
next_letter = next_letter.lower()
positions = raw_input("Did this letter match any positions in the word? If yes, which (space-separated position numbers)? [Enter for no match] ")
if not positions:
positions = []
wr.attempt(next_letter, None)
else:
positions = [int(pos) for pos in positions.split(" ")]
for position in positions:
wr.attempt(next_letter, position)
print("possible words: %s" % ", ".join(wr.possible_words))
| [
"alexandre.conrad@gmail.com"
] | alexandre.conrad@gmail.com |
fd4d5627e93cdd9ab5b2a749e2b0fcf333c5b7f8 | 0a44e2fc6214a95036d725d1ed28196aaf83e615 | /keras_transformer/demo/translation/TranslationDataGenerator.py | 5fdf3004dae94294d1c96741397a4397199b48b9 | [
"Apache-2.0"
] | permissive | erelcan/keras-transformer | 6f4e9e0d9e37ddcd12f7bedfa9e2bcf433491c0a | ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f | refs/heads/master | 2023-03-05T16:42:37.910220 | 2021-02-16T16:11:41 | 2021-02-16T16:11:41 | 338,855,231 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,075 | py | import os
import io
import zipfile
from random import shuffle
from keras.utils import get_file
from keras_transformer.generators.outer.OuterGeneratorABC import OuterGeneratorABC
from keras_transformer.utils.common_utils import select_items
class TranslationDataGenerator(OuterGeneratorABC):
def __init__(self, batch_size, zip_file_path, file_url, extraction_path, num_of_samples=None, shuffle_on=True, id_list=None):
super().__init__()
# Assuming that data fits in memory. Otherwise, change data format; and use dask etc.
self._batch_size = batch_size
self._shuffle_on = shuffle_on
self._zip_file_path = zip_file_path
self._file_url = file_url
self._extraction_path = extraction_path
self._num_of_samples = num_of_samples
self._source_sentences, self._target_sentences = self._create_dataset()
self._num_of_batches = self._num_of_samples // self._batch_size
self._remaining_size = self._num_of_samples % self._batch_size
if id_list is None:
self._id_list = list(range(self._num_of_samples))
else:
# In case, we would like to train with a given list of samples
# (May be handy for handling evaluation split~).
self._id_list = id_list
if self._shuffle_on:
shuffle(self._id_list)
self._cur_pointer = 0
def __next__(self):
# May throw exception, if end of data..
# For now, returning empty data..
batch_indices = []
while len(batch_indices) < self._batch_size and self._cur_pointer < self._num_of_samples:
batch_indices.append(self._id_list[self._cur_pointer])
self._cur_pointer += 1
return select_items(self._source_sentences, batch_indices), select_items(self._target_sentences, batch_indices)
def __iter__(self):
return self
def __len__(self):
# Returns number of batches, excluding the remaining.
return self._num_of_batches
def refresh(self):
self._cur_pointer = 0
if self._shuffle_on:
shuffle(self._id_list)
def get_remaining_size(self):
return self._remaining_size
def _download_and_extract_data(self):
path_to_file = get_file(fname=self._zip_file_path, origin=self._file_url, extract=True)
file_path = os.path.dirname(path_to_file) + self._extraction_path
# For get_file, extract option is not working when absolute path is provided. Hence manually extracting the zip.
with zipfile.ZipFile(self._zip_file_path, 'r') as zip_ref:
zip_ref.extractall(os.path.dirname(path_to_file))
return file_path
def _create_dataset(self):
file_path = self._download_and_extract_data()
lines = io.open(file_path, encoding='UTF-8').read().strip().split('\n')
if self._num_of_samples is None:
self._num_of_samples = len(lines)
word_pairs = [[w for w in l.split('\t')] for l in lines[:self._num_of_samples]]
return zip(*word_pairs)
| [
"erelcan89@gmail.com"
] | erelcan89@gmail.com |
c871447e39e6eb8de18b2fb0356d8bd39cff3949 | 86d6c98b22392a7a54fe04db4505c625d541a984 | /logs.py.save.4 | 42385b90b5febe29a9dee25c0bc424890d2e3eae | [] | no_license | TCMG476Py/TCMG476 | c18ce1acd194222a00b93305c27db45b3bf0a513 | a81323b1484abe3063fe773ef09a7e30e83aec7e | refs/heads/master | 2021-07-17T16:01:11.141483 | 2017-10-18T21:53:26 | 2017-10-18T21:53:26 | 107,355,014 | 0 | 2 | null | 2017-10-18T21:53:27 | 2017-10-18T03:34:34 | Python | UTF-8 | Python | false | false | 791 | 4 | #! /usr/bin/python
import urllib2
response = urllib2.urlopen('https://s3.amazonaws.com/tcmg412-fall2016/http_access_log')
html = response.read()
print('(1) How many total requests were made in the time period represented in the log?')
print('(2) How many requests were made on each day? per week? per month?')
print('(3)What percentage of the requests were not successful (any 4xx status code)?')
print('(4)What percentage of the requests were redirected elsewhere (any 3xx codes)?')
print('(5)What was the most-requested file?')
print('(6)What was the least-requested file?')
print('(7) quit')
answer = int(input('Choose a question to answer:'))
if answer == 1:
lines = len(html.splitlines())
print('Total request made:',lines)
if answer == 3:
sta4xx = len(html.split('.*\"(.*) .*'))
| [
"ubuntu@ip-172-31-24-41.us-east-2.compute.internal"
] | ubuntu@ip-172-31-24-41.us-east-2.compute.internal |
9872650a638a9719df5daac27ecaf389caa85e68 | e7656ac263c5034deccaf9ed6a72fb94caffc76f | /apptodo/models.py | 676c18deec8161486d3f93b4b6c6d0281579951f | [] | no_license | alayo24/first_try | feb4894ead0b49d5d4a0c4f0507d0a1594adf409 | 86c4e2de8f2c6836cac84ad48f181bef5259d01c | refs/heads/master | 2020-03-31T13:15:22.482815 | 2018-10-09T12:34:06 | 2018-10-09T12:34:06 | 152,247,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | from time import strftime
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from datetime import date
from timezone_field import TimeZoneFormField
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=())
description = models.CharField(max_length=100, default='')
city = models.CharField(max_length=100, default='')
website = models.URLField(default='')
phone = models.IntegerField(default=0)
Location = models.CharField(max_length=100, default='my Location default')
def create_profile(sender, **kwargs):
if kwargs['created']:
user_profile = UserProfile.objects.create(user=kwargs['instance'])
post_save.connect(create_profile, sender=User)
class TodoApp(models.Model):
name = models.CharField(max_length=30)
content = models.TextField(blank=False, default='')
description = models.TextField(blank=False)
time_added = models.DateTimeField(default=timezone.now().strftime("%Y-%m-%d"))
due_date = models.DateTimeField()
created =models.DateTimeField()
class Meta:
ordering = ["-time_added"]
def __str__(self):
return self.title
#to let django know u created a model/table go to ur terminal and type (python manage.py makemigrations)
#then write python manage.py migrate
#to create admin python manage.py createsuperuser
| [
"abasstiti1@gmail.com"
] | abasstiti1@gmail.com |
5be9a8f527a4091484b255604172833b789880b5 | 7564184b9d079d8ad777677622483dc0a737d901 | /Brain/brain.py | 6c2b7f46324aec1fe15d0f28e63dcdf09668e012 | [] | no_license | GPrendi30/discord_bot | 7306823ce3a69d4606b15b2bd884f84f32ff5522 | 1e1f84a386be38ee9263ea73a7b1fe717ef4d3c5 | refs/heads/master | 2023-06-26T22:20:02.065089 | 2021-08-03T18:19:51 | 2021-08-03T18:19:51 | 356,424,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | from transformers import pipeline, Conversation
from os import path
from Brain.memory.memory import Memory
from Brain.voice.voice import Voice
class my_convo(Conversation):
def __repr__(self):
output = self.generated_responses[-1]
return output
class Brain:
''' Brain of the Bot '''
def __init__(self):
self.pipeline = pipeline("conversational")
self.voice = Voice()
self.memory = Memory(path.abspath('Brain/memory/cell'))
self.convo = dict({})
self.hasVoice = False
def feed(self, channel, message):
try:
conv = self.convo[channel]
conv = my_convo(message)
self.convo[channel] = conv
self.memory.add_user_input(channel, str(message))
except:
self.convo[channel] = my_convo(message)
def answer(self, channel):
ans = self.pipeline([self.convo[channel]])
self.memory.add_gen_response(channel, str(ans))
print(self.convo)
if self.hasVoice:
return self.voice.speak(str(ans))
else:
return str(ans)
def reset_memory(self, channel):
conv = self.convo[channel]
conv = my_convo('')
def speak(self, sen):
return self.voice(sen)
def listen(self, audio):
return self.voice(audio)
def voiceModeOn(self):
self.hasVoice = True
def voiceModeOff(self):
self.hasVoice = False
| [
"gerald.prendi@usi.ch"
] | gerald.prendi@usi.ch |
cbc59252cc7ba2d77e8ec6e0bc0cf2f32eae6eca | 6ccda268ca48ec1f6087caf5a2576a69379f95d6 | /tagger_new.py | 465892c12880acc7d04eca3257fe8c713c60520c | [] | no_license | dxcv/Individual-Project | 406ce75ad70fe85db5e9e6586ed7648ede3f7027 | 25955c2deafb599da7c3b3e159b846f09cd9839a | refs/heads/master | 2020-07-01T23:59:33.009265 | 2019-08-08T20:09:22 | 2019-08-08T20:09:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,756 | py | import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.contrib import slim
import matplotlib.pyplot as plt
import numpy as np
import csv
import os
from sklearn.metrics import f1_score
#os.environ['CUDA_VISIBLE_DEVICES']='0,1' # Number of GPUs to run on
os.environ['CUDA_VISIBLE_DEVICES']='-1'
class Tagger(object):
# take as input all the four features
# [[257,10],[70,10],[27,10],[24,10]]
# the way we compute the Q-value and updating agent keeps unchanged
# four taggers concatenated in the one tagger, output (predictive marginals) from four taggers input to an fcl
# fcl output the true predictive marginal
def __init__(self, model_file, n_steps, n_input, feature_number, training = True, epochs = 10, expnum = 0, cvit = ''):
self.expnum = expnum
self.header = 'EXP_{0}/new_checkpoint'.format(self.expnum)
self.header_test = 'EXP_{0}/new_checkpoint_test'.format(self.expnum)
self.model_file = model_file
if not os.path.exists(self.header + os.sep + self.model_file):
os.makedirs(self.header + os.sep + self.model_file)
self.training = training
#self.feature_shapes = [[257,10],[70,10],[27,10],[24,10]]
self.learning_rate = 1e-3
self.n_batches = 10
self.batch_size = 5
self.display_step = 33
self.save_step = 5
self.epochs = epochs
self.feature_number = feature_number
self.acc_f = []
self.loss_f = []
self.cvit = cvit
# Network Parameters
self.n_input1 = 257
self.n_input2 = 70
self.n_input3 = 27
self.n_input4 = 24
self.n_steps = n_steps
self.n_hidden = 64
self.n_classes = 3
# classifier
def lstm(x, weights, biases, i):
x = tf.unstack(x, self.n_steps, 1)
with tf.variable_scope('tagger1_feature_{0}'.format(i)):
lstm_cell = rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0)
with tf.variable_scope('tagger2_feature_{0}'.format(i)):
self.outputs, _ = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# take the self.outputs to the Q-network
logitx = tf.stack(self.outputs)
##### average before fcl
self.avg_outputs = tf.reduce_mean(tf.stack(self.outputs), 0)
pred = tf.matmul(self.avg_outputs, weights['out']) + biases['out']
#### average after fcl
#self.matmul = []
#for i in range(logitx.get_shape()[0]):
# self.matmul.append(tf.matmul(logitx[i], weights['out']) + biases['out'])
# it is first averaged then input to the fcl
#pred = tf.reduce_mean(tf.stack(self.matmul), 0)
matmul = tf.zeros([10,3], tf.int32)
#pred = tf.matmul(self.avg_outputs, weights['out']) + biases['out']
return pred, logitx, matmul
# individual lstm models
self.x1 = tf.placeholder("float", [None, self.n_steps, self.n_input1])
self.x2 = tf.placeholder("float", [None, self.n_steps, self.n_input2])
self.x3 = tf.placeholder("float", [None, self.n_steps, self.n_input3])
self.x4 = tf.placeholder("float", [None, self.n_steps, self.n_input4])
# final output
self.y = tf.placeholder("int32", [None])
# Define weights
with tf.variable_scope('weight_feature_1'):
self.weights1 = {'out': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes]))}
with tf.variable_scope('bias_feature_1'):
self.biases1 = {'out': tf.Variable(tf.random_normal([self.n_classes]))}
with tf.variable_scope('weight_feature_2'):
self.weights2 = {'out': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes]))}
with tf.variable_scope('bias_feature_2'):
self.biases2 = {'out': tf.Variable(tf.random_normal([self.n_classes]))}
with tf.variable_scope('weight_feature_3'):
self.weights3 = {'out': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes]))}
with tf.variable_scope('bias_feature_3'):
self.biases3 = {'out': tf.Variable(tf.random_normal([self.n_classes]))}
with tf.variable_scope('weight_feature_4'):
self.weights4 = {'out': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes]))}
with tf.variable_scope('bias_feature_4'):
self.biases4 = {'out': tf.Variable(tf.random_normal([self.n_classes]))}
# LSTM
with tf.name_scope('lstm1'):
self.pred1, self.xlogits1, self.xfcls1 = lstm(self.x1, self.weights1, self.biases1, 1)
with tf.name_scope('lstm2'):
self.pred2, self.xlogits2, self.xfcls2 = lstm(self.x2, self.weights2, self.biases2, 2)
with tf.name_scope('lstm3'):
self.pred3, self.xlogits3, self.xfcls3 = lstm(self.x3, self.weights3, self.biases3, 3)
with tf.name_scope('lstm4'):
self.pred4, self.xlogits4, self.xfcls4 = lstm(self.x4, self.weights4, self.biases4, 4)
with tf.name_scope('sm1'):
self.sm1 = tf.nn.softmax(self.pred1)
with tf.name_scope('sm2'):
self.sm2 = tf.nn.softmax(self.pred2)
with tf.name_scope('sm3'):
self.sm3 = tf.nn.softmax(self.pred3)
with tf.name_scope('sm4'):
self.sm4 = tf.nn.softmax(self.pred4)
self.predss = tf.concat([tf.concat([self.pred1, self.pred2], axis=1),tf.concat([self.pred3, self.pred4], axis=1)],axis=1)
# the sm1, sm2, sm3 and sm4 are prefictive marginals of the four lstm models
# these are then inputed to a fcl
self.confs = [self.sm1, self.sm2, self.sm3, self.sm4]
a = tf.concat([self.sm1, self.sm2], axis=1)
b = tf.concat([self.sm3, self.sm4], axis=1)
self.net = tf.concat([a, b], axis=1)
# TODO: only input in the self.net to the Q_network
with tf.variable_scope('weight_feature_fcl'):
self.weights = {'out': tf.Variable(tf.random_normal([12, self.n_classes]))}
with tf.variable_scope('bias_feature_fcl'):
self.biases = {'out': tf.Variable(tf.random_normal([self.n_classes]))}
self.pred = tf.matmul(self.net, self.weights['out']) + self.biases['out']
self.sm = tf.nn.softmax(self.pred)
# Define loss and optimizer
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.pred, labels=self.y))
with tf.variable_scope('adam1_feature_{0}'.format(self.feature_number)):
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
# Evaluate model
self.correct_pred = tf.equal(tf.argmax(self.pred, 1), tf.cast(self.y, tf.int64))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
self.sess = tf.Session(graph=tf.get_default_graph())
self.saver = tf.train.Saver()
def train(self, data_x, data_y, feature_number):
ckpt = tf.train.get_checkpoint_state(self.header + os.sep + self.model_file)
if ckpt and ckpt.model_checkpoint_path and (data_x != []):
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
else:
self.sess.run(tf.initialize_variables(tf.global_variables()))
if len(data_y) >= 150:
self.batch_size = 32
if data_x != []:
data_x1 = []
data_x2 = []
data_x3 = []
data_x4= []
for i in range(len(data_x)):
data_x1.append(data_x[i][0])
data_x2.append(data_x[i][1])
data_x3.append(data_x[i][2])
data_x4.append(data_x[i][3])
for i in range(self.epochs):
step = 1
while step * self.batch_size <= len(data_y):
if data_x != []:
batch_x1 = data_x1[(step - 1) * self.batch_size:step * self.batch_size]
batch_x2 = data_x2[(step - 1) * self.batch_size:step * self.batch_size]
batch_x3 = data_x3[(step - 1) * self.batch_size:step * self.batch_size]
batch_x4 = data_x4[(step - 1) * self.batch_size:step * self.batch_size]
else:
batch_x1 = data_x[(step - 1) * self.batch_size:step * self.batch_size]
batch_x2 = data_x[(step - 1) * self.batch_size:step * self.batch_size]
batch_x3 = data_x[(step - 1) * self.batch_size:step * self.batch_size]
batch_x4 = data_x[(step - 1) * self.batch_size:step * self.batch_size]
batch_y = data_y[(step - 1) * self.batch_size:step * self.batch_size]
self.sess.run(self.optimizer, feed_dict={self.x1: batch_x1, self.x2: batch_x2, self.x3: batch_x3, self.x4: batch_x4, self.y: batch_y})
if step % self.display_step == 0:
acc = self.sess.run(self.accuracy, feed_dict={self.x1: batch_x1, self.x2: batch_x2, self.x3: batch_x3, self.x4: batch_x4, self.y: batch_y})
loss = self.sess.run(self.loss, feed_dict={self.x1: batch_x1, self.x2: batch_x2, self.x3: batch_x3, self.x4: batch_x4, self.y: batch_y})
#print("Epoch: " + str(i + 1) + ", iter: " + str(
# step * self.batch_size) + ", Minibatch Loss= " + "{:.6f}".format(
# loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
self.loss_f.append(loss)
self.acc_f.append(100 * acc)
step += 1
if (i+1) % self.save_step == 0:
self.saver.save(self.sess, self.header + os.sep + self.model_file + os.sep + 'model.ckpt', i+1)
### used during testing
def train_mode_B(self, data_x, data_y, feature_number):
if not os.path.exists(self.header_test + os.sep + 'test_B_{0}_/'.format(self.cvit) +self.model_file):
os.makedirs(self.header_test + os.sep + 'test_B_{0}_/'.format(self.cvit) +self.model_file)
ckpt = tf.train.get_checkpoint_state(self.header + os.sep + self.model_file)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
else:
print('error')
'###############################ERROR#############################'
if len(data_y) >= 150:
self.batch_size = 32
if data_x != []:
data_x1 = []
data_x2 = []
data_x3 = []
data_x4= []
for i in range(len(data_x)):
data_x1.append(data_x[i][0])
data_x2.append(data_x[i][1])
data_x3.append(data_x[i][2])
data_x4.append(data_x[i][3])
for i in range(self.epochs):
step = 1
while step * self.batch_size <= len(data_y):
if data_x != []:
batch_x1 = data_x1[(step - 1) * self.batch_size:step * self.batch_size]
batch_x2 = data_x2[(step - 1) * self.batch_size:step * self.batch_size]
batch_x3 = data_x3[(step - 1) * self.batch_size:step * self.batch_size]
batch_x4 = data_x4[(step - 1) * self.batch_size:step * self.batch_size]
else:
batch_x1 = data_x[(step - 1) * self.batch_size:step * self.batch_size]
batch_x2 = data_x[(step - 1) * self.batch_size:step * self.batch_size]
batch_x3 = data_x[(step - 1) * self.batch_size:step * self.batch_size]
batch_x4 = data_x[(step - 1) * self.batch_size:step * self.batch_size]
batch_y = data_y[(step - 1) * self.batch_size:step * self.batch_size]
self.sess.run(self.optimizer, feed_dict={self.x1: batch_x1, self.x2: batch_x2, self.x3: batch_x3, self.x4: batch_x4, self.y: batch_y})
if step % self.display_step == 0:
acc = self.sess.run(self.accuracy, feed_dict={self.x1: batch_x1, self.x2: batch_x2, self.x3: batch_x3, self.x4: batch_x4, self.y: batch_y})
loss = self.sess.run(self.loss, feed_dict={self.x1: batch_x1, self.x2: batch_x2, self.x3: batch_x3, self.x4: batch_x4, self.y: batch_y})
#print("Epoch: " + str(i + 1) + ", iter: " + str(
# step * self.batch_size) + ", Minibatch Loss= " + "{:.6f}".format(
# loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
self.loss_f.append(loss)
self.acc_f.append(100 * acc)
step += 1
if (i+1) % self.save_step == 0:
self.saver.save(self.sess, self.header_test + os.sep + 'test_B_{0}_/'.format(self.cvit) + self.model_file + os.sep + 'model.ckpt', i+1)
def get_predictions(self, x):
ckpt = tf.train.get_checkpoint_state(self.header + os.sep + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path) # no need to restore since we use the last one
if len(np.array(x[0]).shape)==2:
pred = np.argmax(self.sess.run(self.pred, feed_dict={self.x1: [x[0]], self.x2: [x[1]],self.x3: [x[2]],self.x4: [x[3]]}), 1)
else:
pred = np.argmax(self.sess.run(self.pred, feed_dict={self.x1: x[0], self.x2: x[1],self.x3: x[2],self.x4: x[3]}), 1)
return pred
def get_marginal(self, x):
ckpt = tf.train.get_checkpoint_state(self.header+ os.sep + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
if len(np.array(x[0]).shape)==2:
# should we use net or marginals?
marginal = self.sess.run(self.net, feed_dict={self.x1: [x[0]], self.x2: [x[1]],self.x3: [x[2]],self.x4: [x[3]]})
else:
#x = np.array(x)
marginal = self.sess.run(self.net, feed_dict={self.x1: x[0], self.x2: x[1],self.x3: x[2],self.x4: x[3]})
return marginal
def get_confidence(self, x):
ckpt = tf.train.get_checkpoint_state(self.header + os.sep + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
if len(np.array(x[0]).shape)==3:
margs_all = self.sess.run(self.confs, feed_dict={self.x1: x[0], self.x2: x[1], self.x3: x[2], self.x4: x[3]})
confs = []
for margs in margs_all:
margs = margs+np.finfo(float).eps
margs = -np.sum(np.multiply(margs,np.log(margs)),axis=1)
margs = np.minimum(1, margs)
margs = np.maximum(0, margs)
#conf = np.mean(1-margs)
conf = 1-margs
confs.append(conf.reshape(-1,1))
confs = np.concatenate(confs, axis = 1)
else:
margs_all = self.sess.run(self.confs, feed_dict={self.x1: [x[0]], self.x2: [x[1]],self.x3: [x[2]],self.x4: [x[3]]})
confs = []
for margs in margs_all:
conf = [1-np.maximum(0, np.minimum(1, - np.sum(margs * np.log(margs+np.finfo(float).eps))))]
confs = conf + confs
return confs
def get_uncertainty(self, x, y):
ckpt = tf.train.get_checkpoint_state(self.header + os.sep + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
loss = self.sess.run(self.loss, feed_dict={self.x1: [x[0]], self.x2: [x[1]],self.x3: [x[2]],self.x4: [x[3]], self.y: y})
return loss
def get_xlogits(self, x, y):
ckpt = tf.train.get_checkpoint_state(self.header + os.sep + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
if len(np.array(x[0]).shape)==2:
logits = self.sess.run(self.xlogits1, feed_dict={self.x1: [x[0]], self.y: [y]})
else:
logits = self.sess.run(self.xlogits1, feed_dict={self.x1: x[0], self.y: y})
return logits
def get_xfcls(self, x):
ckpt = tf.train.get_checkpoint_state(self.header + os.sep + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path) # no need to restore since we use the last one
if len(np.array(x[0]).shape)==2:
xfcls = self.sess.run(self.xfcls1, feed_dict={self.x1: [x[0]]})
else:
xfcls = self.sess.run(self.xfcls1, feed_dict={self.x1: x[0]})
return xfcls
def test(self, X_test, Y_true):
ckpt = tf.train.get_checkpoint_state(self.header + os.sep + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
if len(np.array(X_test[0]).shape) == 2:
acc = self.sess.run(self.accuracy, feed_dict={self.x1: [X_test[0]], self.x2: [X_test[1]], self.x3: [X_test[2]], self.x4: [X_test[3]], self.y: [Y_true]})
else:
acc = self.sess.run(self.accuracy, feed_dict={self.x1: X_test[0], self.x2: X_test[1], self.x3: X_test[2], self.x4: X_test[3], self.y: Y_true})
# f_1 score and conf matrix
return acc
def get_f1_score(self, X_test, Y_true):
ckpt = tf.train.get_checkpoint_state(self.header+ os.sep + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path) # no need to restore since we use the last one
if len(np.array(X_test[0]).shape) == 2:
pred = np.argmax(self.sess.run(self.pred, feed_dict={self.x1: [X_test[0]], self.x2: [X_test[1]], self.x3: [X_test[2]], self.x4: [X_test[3]]}), 1)
else:
pred = np.argmax(self.sess.run(self.pred, feed_dict={self.x1: X_test[0], self.x2: X_test[1], self.x3: X_test[2], self.x4: X_test[3]}), 1)
f1 = f1_score(Y_true, pred, average='macro')
return f1
def get_predictions_B(self, x):
ckpt = tf.train.get_checkpoint_state(self.header_test + os.sep + 'test_B_{0}_/'.format(self.cvit) + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path) # no need to restore since we use the last one
if len(np.array(x[0]).shape)==2:
pred = np.argmax(self.sess.run(self.pred, feed_dict={self.x1: [x[0]], self.x2: [x[1]],self.x3: [x[2]],self.x4: [x[3]]}), 1)
else:
pred = np.argmax(self.sess.run(self.pred, feed_dict={self.x1: x[0], self.x2: x[1],self.x3: x[2],self.x4: x[3]}), 1)
return pred
def test_B(self, X_test, Y_true):
ckpt = tf.train.get_checkpoint_state(self.header_test + os.sep + 'test_B_{0}_/'.format(self.cvit) + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
if len(np.array(X_test[0]).shape) == 2:
acc = self.sess.run(self.accuracy, feed_dict={self.x1: [X_test[0]], self.x2: [X_test[1]], self.x3: [X_test[2]], self.x4: [X_test[3]], self.y: [Y_true]})
else:
acc = self.sess.run(self.accuracy, feed_dict={self.x1: X_test[0], self.x2: X_test[1], self.x3: X_test[2], self.x4: X_test[3], self.y: Y_true})
# f_1 score and conf matrix
return acc
def get_f1_score_B(self, X_test, Y_true):
ckpt = tf.train.get_checkpoint_state(self.header_test + os.sep + 'test_B_{0}_/'.format(self.cvit) + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path) # no need to restore since we use the last one
if len(np.array(X_test[0]).shape) == 2:
pred = np.argmax(self.sess.run(self.pred, feed_dict={self.x1: [X_test[0]], self.x2: [X_test[1]], self.x3: [X_test[2]], self.x4: [X_test[3]]}), 1)
else:
pred = np.argmax(self.sess.run(self.pred, feed_dict={self.x1: X_test[0], self.x2: X_test[1], self.x3: X_test[2], self.x4: X_test[3]}), 1)
f1 = f1_score(Y_true, pred, average='macro')
return f1
def get_marginal_B(self, x):
ckpt = tf.train.get_checkpoint_state(self.header_test + os.sep + 'test_B_{0}_/'.format(self.cvit) + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
if len(np.array(x[0]).shape)==2:
# should we use net or marginals?
marginal = self.sess.run(self.net, feed_dict={self.x1: [x[0]], self.x2: [x[1]],self.x3: [x[2]],self.x4: [x[3]]})
else:
#x = np.array(x)
marginal = self.sess.run(self.net, feed_dict={self.x1: x[0], self.x2: x[1],self.x3: x[2],self.x4: x[3]})
return marginal
def get_confidence_B(self, x):
ckpt = tf.train.get_checkpoint_state(self.header_test + os.sep + 'test_B_{0}_/'.format(self.cvit) + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
if len(np.array(x[0]).shape)==3:
margs_all = self.sess.run(self.confs, feed_dict={self.x1: x[0], self.x2: x[1], self.x3: x[2], self.x4: x[3]})
confs = []
for margs in margs_all:
margs = margs+np.finfo(float).eps
margs = -np.sum(np.multiply(margs,np.log(margs)),axis=1)
margs = np.minimum(1, margs)
margs = np.maximum(0, margs)
#conf = np.mean(1-margs)
conf = 1-margs
confs.append(conf.reshape(-1,1))
confs = np.concatenate(confs, axis = 1)
else:
margs_all = self.sess.run(self.confs, feed_dict={self.x1: [x[0]], self.x2: [x[1]],self.x3: [x[2]],self.x4: [x[3]]})
confs = []
for margs in margs_all:
conf = [1-np.maximum(0, np.minimum(1, - np.sum(margs * np.log(margs+np.finfo(float).eps))))]
confs = conf + confs
return confs
def get_uncertainty_B(self, x, y):
ckpt = tf.train.get_checkpoint_state(self.header_test + os.sep + 'test_B_{0}_/'.format(self.cvit) + self.model_file)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
loss = self.sess.run(self.loss, feed_dict={self.x1: [x[0]], self.x2: [x[1]],self.x3: [x[2]],self.x4: [x[3]], self.y: y})
return loss
| [
"noreply@github.com"
] | dxcv.noreply@github.com |
394ab50650182a6f6a0585c4bd30140e95a728c0 | acbf7de25d7fa45c2ec951cbc1b413472273cc59 | /blog/migrations/0001_initial.py | 6da7d17f63d5952a989afb1ab6730a9dcf58fb73 | [] | no_license | samuraidan1/my-first-blog | cb811f6812a8286584fcd171a154f30ed9ee5983 | 38e148cfc278384d1272abe6caf46c14b7ff893f | refs/heads/master | 2021-05-26T11:45:34.202199 | 2020-04-09T18:21:53 | 2020-04-09T18:21:53 | 254,118,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # Generated by Django 2.2.12 on 2020-04-08 12:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"zhaksylykaidana@gmail.com"
] | zhaksylykaidana@gmail.com |
28ba3d851ee03948793dd137fff26cc26c6cb886 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/labels_20200908184214.py | 1ad507189c6df70f8faa20b9f588834565e44cb9 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | def labels(S):
if len(S) == 0:
return 0
output_arr = []
last_indices = {}
for i in range(len(S)):
last_indices[S[i]- 'a'] = i
print('last',last_indices)
labels("ababcbacadefegdehijhklij") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
88d5414346251640121e479c2da1d66bec23c2e6 | 5beb49e3d9a3c9c8d611618e9f4e0bb53d36035c | /app/pyfile.py | 9558cd7760346de06eb5ddcd6ed01992e503ccb5 | [] | no_license | catufunwa/Hello-World | 8ccc6bf7446d97ac49ba840cec01a943a696a42e | 7299f5f81731e913e4c0371ffad3579bb4319680 | refs/heads/master | 2016-09-11T21:37:03.763523 | 2016-04-21T18:23:05 | 2016-04-21T18:23:05 | 1,515,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | #!python
print 'hello' | [
"catufunwa@cbinsights.com"
] | catufunwa@cbinsights.com |
8db9fd02b77cc8817b36c7a2f1c3d49b7dd9eecc | b601ea8b7c54fa8d8bb185e7b0e7c078f6431f04 | /chapter7/example23/deco_fabric.py | d2d2a6128ced6ff9ef2b237f6b217dd1efac66f5 | [] | no_license | pavoli/fluentpython_LucianoRamalho | 08396df4065fa7584c45853a506988a67814c024 | b44d4c6481fb135ab9a022120a5abc7505af3105 | refs/heads/master | 2020-09-07T15:51:45.299675 | 2020-02-10T02:27:25 | 2020-02-10T02:27:25 | 220,833,381 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | # -*- coding: utf-8 -*-
__author__ = 'p.olifer'
__version__ = '1.0'
registry = set()
def register(active=True):
def decorate(func):
print('running register (active=%s)->decorate(%s)' % (active, func))
if active:
registry.add(func)
else:
registry.discard(func)
return func
return decorate
@register(active=False)
def f1():
print('running f1()')
@register()
def f2():
print('running f2()')
def f3():
print('running f3()')
def main():
print('running main()')
print('registry ->', registry)
f1()
f2()
f3()
if __name__ == '__main__':
main()
print('set -> ', registry) | [
"pavel.olifer@gmail.com"
] | pavel.olifer@gmail.com |
7cc80bce7b6d04b575d179aae41d5f848acfa2d3 | f719e10667407144e0e18258f023e2187fe4e53c | /medusa/constants.py | f70493002351a4852da7da55507345be406e7245 | [] | no_license | loudbirds/mamba | 7e3c38442a5911ba7e6585ef5999d9efa262ed6f | 10a5b73db166fc2f2379be47cb4d2e17a0a548ec | refs/heads/master | 2020-06-20T07:53:42.786472 | 2017-06-13T10:00:39 | 2017-06-13T10:00:39 | 92,957,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | WORKER_THREAD = 'thread'
WORKER_GREENLET = 'greenlet'
WORKER_PROCESS = 'process'
WORKER_TYPES = (WORKER_THREAD, WORKER_GREENLET, WORKER_PROCESS)
class EmptyData(object):
pass | [
"ervin.bosenbacher@loudbirds.com"
] | ervin.bosenbacher@loudbirds.com |
8bc4bcb74f783e3983e2c1cc896d33c02b527584 | f35535e1fd6e860766a7ddde89794690a3d114a3 | /src/generate_data.py | 123a076b06b29476cf5e58f35998ed51b310f634 | [] | no_license | VestaAfzali/StopPermutingFeatures | 3f48c1340bad227b9a0183f7547e9c52eb30e6ca | 14d0d5429cac492c269fc69f2df4c20251c05467 | refs/heads/master | 2022-11-20T15:17:14.077396 | 2020-07-27T16:03:18 | 2020-07-27T16:03:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,809 | py | from typing import Tuple, Dict
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from scipy.special import expit
def generate_weights_gamma(
gamma: float = 1,
scale: float = 1,
n_features: int = 20,
seed: int = 42
) -> np.array:
"""
Generate gamma-distributed weights. Sum of weights = 1.
:param gamma: gamma parameter of gamma distribution
:param scale: scale parameter of gamma distribution
:param n_features: number of features (i.e. lengths of weights)
:param seed: random state
:return:
"""
np.random.seed(seed)
weights = np.random.gamma(gamma, scale, size=n_features)
weights = weights / np.sum(weights)
return weights
def get_correlated_data_stats(
data: np.array
) -> Dict[str, float]:
"""
Calculated correlation statistics of the given dataset
:param data: input data
:return:
"""
n_features = data.shape[1]
corr = pd.DataFrame(data).corr()
corr = np.array(corr)
assert corr.shape[0] == corr.shape[1] == n_features
pair_correlations = []
for i in range(n_features):
for j in range(n_features):
if i > j:
pair_correlations.append(corr[i, j])
abs_pair_correlations = [abs(c) for c in pair_correlations]
assert len(pair_correlations) == (n_features * n_features - n_features) / 2
data_corr_stats = {
"correlation_min": np.min(pair_correlations),
"correlation_max": np.max(pair_correlations),
"correlation_median": np.median(pair_correlations),
"correlation_mean": np.mean(pair_correlations),
"correlation_std": np.std(pair_correlations),
"abs_correlation_min": np.min(abs_pair_correlations),
"abs_correlation_max": np.max(abs_pair_correlations),
"abs_correlation_median": np.median(abs_pair_correlations),
"abs_correlation_mean": np.mean(abs_pair_correlations),
"abs_correlation_std": np.std(abs_pair_correlations)
}
return data_corr_stats
def generate_normal_correlated_data(
mu: float = 0,
var: float = 1,
n_features: int = 20,
n_samples: int = 2000,
max_correlation: float = 0.99,
noise_magnitude_max: float = 3,
seed: int = 42
) -> np.array:
"""
Generate normally distributed uncorrelated data and add noise to it.
:param mu: mean
:param var: variance
:param n_features: number of features in generated data
:param n_samples: number of samples in generated data
:param max_correlation: max pair correlation between features
:param noise_magnitude_max: magnitude of noise to add to data.
Noise will be generated uniformly from [-0.5, 0.5] * noise_magnitude_max range
:param seed: random state
:return:
"""
r = np.ones((n_features, n_features)) * max_correlation * var ** 2
for i in range(n_features):
r[i, i] = var
np.random.seed(seed)
x = np.random.multivariate_normal([mu] * n_features, r, size=n_samples)
np.random.seed(seed + 1)
noise_magnitudes = np.random.random(n_features) * noise_magnitude_max
for ind, noise_magniture in enumerate(noise_magnitudes):
np.random.seed(seed + 1 + ind)
noise = (np.random.random(n_samples) - 0.5) * noise_magniture
x[:, ind] = x[:, ind] + noise
x = StandardScaler().fit_transform(x)
return x
def generate_normal_data(
mu: float = 0,
var: float = 1,
n_features: int = 20,
n_samples: int = 2000,
seed: int = 42
) -> np.array:
"""
Generate normally distributed uncorrelated data
:param mu: mean
:param var: variance
:param n_features: number of features in generated data
:param n_samples: number of samples in generated data
:param seed: random state
:return:
"""
x = []
for i in range(n_features):
np.random.seed(seed + i)
x_ = np.random.normal(mu, var, n_samples).reshape(-1, 1)
x.append(x_)
x = np.hstack(x)
x = StandardScaler().fit_transform(x)
return x
def generate_normal_target(
data: np.array,
weights: np.array,
task: str = "classification"
) -> np.array:
"""
Generate a target for regression or classification task.
Target is linear combination of data features and corresponding weights (sign selected at random).
:param data: input features
:param weights: weight of each feature
:param task: "classification" (output - binary labels) or "regression" (output - target within (-3,3) range)
:return:
"""
n_samples, n_features = data.shape
assert n_features == len(weights)
y = np.zeros(n_samples)
for ind in range(n_features):
x = data[:, ind]
weight = weights[ind]
# randomly select sign of influence - +/-
np.random.seed(ind)
if np.random.rand() >= 0.5:
y = y + x * weight
else:
y = y - x * weight
# min max scale into pre-defined range to avoid sigmoid+round problems
y = StandardScaler().fit_transform(y.reshape(-1, 1))[:, 0]
if task == "classification":
y = expit(y) # sigmoid
y = np.round(y) # get labels
return y
def generate_normal_target_functions(
data: np.array,
task: str = "classification"
) -> np.array:
n_samples, n_features = data.shape
functions_to_select_from = {
"linear": lambda x: x,
"**2": lambda x: x**2,
"**3": lambda x: x**3,
"exp": lambda x: np.exp(x),
">0": lambda x: float(x > 0.5),
"sigmoid": lambda x: expit(x)
}
functions_to_select_from = list(functions_to_select_from.items())
# TODO: check how correlations affect ICI plots
pass
| [
"dvr.energy@gmail.com"
] | dvr.energy@gmail.com |
23dd015c16f9b5083c8ad049f724ebc4ef3940be | a48f6d46394a1ec631c00da0ac87d688b2a86c95 | /comfyapi/migrations/0005_product_featured.py | 0b9c8b4825a77e110894054f6a620ba4d6dc02ca | [] | no_license | Dancan1998/comfy-api | b84e15404383e6549ad8e988d33c37946438e45a | 9f71686eb6a205d59322dacb1344c19c9e5bd2f5 | refs/heads/master | 2023-03-22T05:53:29.955044 | 2021-03-06T03:37:04 | 2021-03-06T03:37:04 | 340,947,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # Generated by Django 3.1.7 on 2021-02-22 00:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comfyapi', '0004_product_shipping'),
]
operations = [
migrations.AddField(
model_name='product',
name='featured',
field=models.BooleanField(default=False),
),
]
| [
"dancankingstar@gmail.com"
] | dancankingstar@gmail.com |
66ab2bf274b421f8dc06c7b13e470f5665b9deca | bd5d30fb157aea3eef8fe0a434645a593635e1e1 | /recursivemenu/settings.py | 51ce43e37e5ae5ef2660adaf795215d102b27590 | [] | no_license | oneflower/recursivemenu | 10ed48b607dfa1272ba4515856b1f929775719fd | 35ad65952c417d9932b98da841a405d289595d48 | refs/heads/master | 2022-12-04T08:28:17.587539 | 2020-08-24T16:01:31 | 2020-08-24T16:01:31 | 289,972,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,738 | py | """
Django settings for recursivemenu project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*ujn#d@s0@q@&k!3#nt2=mftsg8=$54*wf$+q7y4!$^+vg(ivj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# ----------------------------------
'rest_framework',
'rest_framework_recursive',
'corsheaders',
'menu',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True # If this is used then `CORS_ORIGIN_WHITELIST` will not have any effect
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = [
'http://localhost:3000',
] # If this is used, then not need to use `CORS_ORIGIN_ALLOW_ALL = True`
CORS_ORIGIN_REGEX_WHITELIST = [
'http://localhost:3000',
]
ROOT_URLCONF = 'recursivemenu.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'recursivemenu.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"oneflower3@gmail.com"
] | oneflower3@gmail.com |
98d37f57fa3ecacf2b6bb7f589f958867c87d85d | 49534a317930f120f2595cdbce351e57911a7978 | /interviews/south_migrations/0004_auto__add_field_person_about.py | 3593f4d4886e634b1fdf579333dcb0069a36a7b5 | [] | no_license | jibaku/interviews | d4029a62a60e3abdc0bf9ec20b3891ac8de7c22b | 369661b0e62c1d1571142fe3938898445551702b | refs/heads/master | 2022-09-12T00:46:37.538831 | 2022-08-26T16:16:24 | 2022-08-26T16:16:24 | 8,299,954 | 0 | 0 | null | 2016-11-02T13:36:43 | 2013-02-19T21:31:23 | Python | UTF-8 | Python | false | false | 6,979 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Person.about'
db.add_column(u'interviews_person', 'about',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Person.about'
db.delete_column(u'interviews_person', 'about')
models = {
u'interviews.answer': {
'Meta': {'ordering': "['order']", 'unique_together': "(('interview', 'order'),)", 'object_name': 'Answer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interview': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['interviews.Interview']"}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'related_pictures': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['interviews.Picture']", 'symmetrical': 'False', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'interviews.brand': {
'Meta': {'ordering': "['title']", 'object_name': 'Brand'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'interviews.interview': {
'Meta': {'ordering': "['-published_on']", 'object_name': 'Interview'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'footnotes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'introduction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['interviews.Person']"}),
'published_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'interviews.interviewpicture': {
'Meta': {'object_name': 'InterviewPicture'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interview': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['interviews.Interview']"}),
'is_selected': ('django.db.models.fields.BooleanField', [], {}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['interviews.Picture']"})
},
u'interviews.interviewproduct': {
'Meta': {'object_name': 'InterviewProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interview': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'to': u"orm['interviews.Interview']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['interviews.Product']"})
},
u'interviews.person': {
'Meta': {'object_name': 'Person'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sex': ('django.db.models.fields.IntegerField', [], {})
},
u'interviews.picture': {
'Meta': {'object_name': 'Picture'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'interview': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['interviews.Interview']"}),
'legend': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'interviews.product': {
'Meta': {'ordering': "['title']", 'object_name': 'Product'},
'alternate_titles': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'amazon_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['interviews.Brand']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published_interviews_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'interviews.quote': {
'Meta': {'object_name': 'Quote'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quote': ('django.db.models.fields.TextField', [], {}),
'related_to': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['interviews.Answer']"})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['interviews'] | [
"github@x-phuture.com"
] | github@x-phuture.com |
27c931355f080a47b29ee27121e203671226c44f | 8e1c592d14583f296263ad0144f59e44843fb274 | /pj_hsintian/test_app/migrations/0024_auto_20200131_2222.py | cd705836c84b59e74a761941582811c09108388a | [] | no_license | boompieman/hsintian | 21ac6846c568f87bd6ddd4c6d0f16892ea008ada | 5e636ee276b508f43346b347b51309339af82a76 | refs/heads/master | 2023-02-07T13:01:24.867777 | 2020-12-28T22:38:03 | 2020-12-28T22:38:03 | 296,289,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # Generated by Django 2.2.4 on 2020-01-31 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('test_app', '0023_auto_20200131_2221'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='introducer',
field=models.CharField(blank=True, max_length=32, null=True),
),
]
| [
"t0915290092@gmail.com"
] | t0915290092@gmail.com |
7454c090e380ffdfd34d3bea0e9afa8c919c7598 | 0469f9c57df4081527c7c1447881b23543fcd4d7 | /app/test/election/test_routes.py | 005b7d08d7c63f77f99df5e1c9afd2b08419cdee | [] | no_license | Fajaragst/open-vote-api | 6585934977e5d0bc1c7d399b4212142c670a8380 | 011acf09ebd6493792d32bcb7410840ad97ca092 | refs/heads/master | 2023-03-24T20:55:45.751666 | 2019-07-21T14:11:12 | 2019-07-21T14:11:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,571 | py | """
Election Integration Testing Between Routes & Services
"""
import json
from app.test.base import BaseTestCase
class TestElectionRoutes(BaseTestCase):
def test_create_election(self):
""" test api call to create election """
result = self.create_election({
"name" : "some leection name",
"description": "some election description"
})
self.assertEqual(result.status_code, 201)
def test_update_election(self):
""" test api call to update election """
result = self.create_election({
"name" : "some leection name",
"description": "some election description"
})
response = result.get_json()
election_id = response["data"]["election_id"]
result = self.update_election({
"name" : "some leection name",
"description": "some election description"
}, election_id)
self.assertEqual(result.status_code, 204)
def test_remove_election(self):
""" test api call to remove election """
result = self.create_election({
"name" : "some leection name",
"description": "some election description"
})
response = result.get_json()
election_id = response["data"]["election_id"]
result = self.remove_election(election_id)
self.assertEqual(result.status_code, 204)
def test_get_election(self):
""" test api call to get election """
result = self.create_election({
"name" : "some leection name",
"description": "some election description"
})
response = result.get_json()
election_id = response["data"]["election_id"]
result = self.get_election(election_id)
self.assertEqual(result.status_code, 200)
def test_get_elections(self):
""" test api call to get election """
result = self.create_election({
"name" : "some leection name",
"description": "some election description"
})
response = result.get_json()
election_id = response["data"]["election_id"]
result = self.get_elections()
self.assertEqual(result.status_code, 200)
def test_create_candidates(self):
""" test api call to create candidates for specific election """
result = self.create_election({
"name" : "some leection name",
"description": "some election description"
})
self.assertEqual(result.status_code, 201)
response = result.get_json()
election_id = response["data"]["election_id"]
result = self.create_candidate({
"name" : "some candidate name",
"description": "some canddiate description"
}, election_id)
self.assertEqual(result.status_code, 201)
def test_update_candidate(self):
""" test api call to create candidates for specific election and update
the information"""
result = self.create_election({
"name" : "some leection name",
"description": "some election description"
})
self.assertEqual(result.status_code, 201)
response = result.get_json()
election_id = response["data"]["election_id"]
result = self.create_candidate({
"name" : "some candidate name",
"description": "some canddiate description"
}, election_id)
self.assertEqual(result.status_code, 201)
response = result.get_json()
candidate_id = response["data"]["candidate_id"]
result = self.update_candidate({
"name" : "some candidate name",
"description": "some canddiate description"
}, election_id, candidate_id)
self.assertEqual(result.status_code, 204)
def test_get_candidate(self):
""" test api call to create candidates for specific election and get
the information"""
result = self.create_election({
"name" : "some leection name",
"description": "some election description"
})
self.assertEqual(result.status_code, 201)
response = result.get_json()
election_id = response["data"]["election_id"]
result = self.create_candidate({
"name" : "some candidate name",
"description": "some canddiate description"
}, election_id)
self.assertEqual(result.status_code, 201)
response = result.get_json()
candidate_id = response["data"]["candidate_id"]
result = self.get_candidate(election_id, candidate_id)
self.assertEqual(result.status_code, 200)
def test_get_candidates(self):
"""" get all candidates for specific election """
result = self.create_election({
"name" : "some leection name",
"description": "some election description"
})
self.assertEqual(result.status_code, 201)
response = result.get_json()
election_id = response["data"]["election_id"]
result = self.create_candidate({
"name" : "some candidate name",
"description": "some canddiate description"
}, election_id)
self.assertEqual(result.status_code, 201)
response = result.get_json()
candidate_id = response["data"]["candidate_id"]
result = self.get_candidates(election_id)
self.assertEqual(result.status_code, 200)
| [
"kelvindsmn@gmail.com"
] | kelvindsmn@gmail.com |
959f6d92f5f9d4c4d93f612c2c05398fa13de09e | b2e4a597b2ebce244a62995e87c1d9ffad4426ae | /SIF_mini_demo/data/get_just_needed_vectors_py.py | 15bfb065248f9658a30def72d724e5514c56dacf | [
"MIT"
] | permissive | yangyuxue2333/NAMEABILITY | ee705f2aa972b33f03f0f5eafaec1cab1187ffda | 345df262ec5bc68ecfdcd6b129a68271c690bf90 | refs/heads/master | 2022-09-19T01:42:03.675502 | 2017-12-11T02:14:42 | 2017-12-11T02:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import pandas as pd
import string
responses = pd.read_csv('../../gvi_-_nameability_-_different_-_uw.csv')
allResponses = responses['response'].str.cat(sep=' ').lower() #concatenate all responses into one string and lowercase
allResponses = allResponses.translate(None, string.punctuation) #remove punctuation
uniqueWords = set(allResponses.split(' ')) #note that there are some spelling mistakes.. e.g., isoscles, parallelagram
output = open('newVectors.txt','w')
with open("GoogleNews-vectors-negative300.txt",'r') as f:
for line in f:
if line.split(" ")[0] in uniqueWords:
output.write(line)
output.close()
| [
"yangyuxue1994@gmail.com"
] | yangyuxue1994@gmail.com |
96665afe923a112f425c26145d7e2c2a3425b79a | 23c12f60a00adf406e39ab51f1f8af0304834b9f | /test/functional/test_framework/siphash.py | 92dd6c46244324d8d5e2af80f5515abc714c77c6 | [
"MIT"
] | permissive | DemoCoin-Dev/democoin | 473aed9f10aa8af37c873fa7b6c43801fd0e8b55 | 4f3ee2a4484a05140cc1066a299afae7c120b0d2 | refs/heads/master | 2020-04-01T12:33:06.601763 | 2018-10-16T03:04:45 | 2018-10-16T03:04:45 | 153,019,823 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,015 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Democoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Specialized SipHash-2-4 implementations.
This implements SipHash-2-4 for 256-bit integers.
"""
def rotl64(n, b):
return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b
def siphash_round(v0, v1, v2, v3):
v0 = (v0 + v1) & ((1 << 64) - 1)
v1 = rotl64(v1, 13)
v1 ^= v0
v0 = rotl64(v0, 32)
v2 = (v2 + v3) & ((1 << 64) - 1)
v3 = rotl64(v3, 16)
v3 ^= v2
v0 = (v0 + v3) & ((1 << 64) - 1)
v3 = rotl64(v3, 21)
v3 ^= v0
v2 = (v2 + v1) & ((1 << 64) - 1)
v1 = rotl64(v1, 17)
v1 ^= v2
v2 = rotl64(v2, 32)
return (v0, v1, v2, v3)
def siphash256(k0, k1, h):
n0 = h & ((1 << 64) - 1)
n1 = (h >> 64) & ((1 << 64) - 1)
n2 = (h >> 128) & ((1 << 64) - 1)
n3 = (h >> 192) & ((1 << 64) - 1)
v0 = 0x736f6d6570736575 ^ k0
v1 = 0x646f72616e646f6d ^ k1
v2 = 0x6c7967656e657261 ^ k0
v3 = 0x7465646279746573 ^ k1 ^ n0
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n0
v3 ^= n1
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n1
v3 ^= n2
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n2
v3 ^= n3
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n3
v3 ^= 0x2000000000000000
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= 0x2000000000000000
v2 ^= 0xFF
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
return v0 ^ v1 ^ v2 ^ v3
| [
"MerlinMagic2018@github.com"
] | MerlinMagic2018@github.com |
dd1ab22da7abbda6a667e2e7271f7f43e63b7fa4 | 76f11e5615bae1effb8ac00ff0255d2944e9a1c6 | /src/first_api/core/models.py | 8152bfaed3697d0a4139361b8ff5849b352b2402 | [] | no_license | ishworpanta10/django_restapi | 36f3ddad845888600c1b5b97a67121df8ee091af | 09de7d7271282483d5a6489c9b9ca15860069e8c | refs/heads/master | 2023-07-19T06:54:17.782296 | 2020-04-12T08:21:22 | 2020-04-12T08:21:22 | 255,002,377 | 1 | 0 | null | 2021-09-22T18:52:33 | 2020-04-12T03:38:44 | Python | UTF-8 | Python | false | false | 407 | py | from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
User = get_user_model()
class Post(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
| [
"ishworpanta10@gmail.com"
] | ishworpanta10@gmail.com |
37915dcccb67b3832f18cea7435c17a1d62a1f4c | 9c0f691393abbeb5754e1624e0c48dfcdf857352 | /2018/Helpers/day_20.py | 421b07af4553a3e34735608fd171a866d88b040e | [] | no_license | seligman/aoc | d0aac62eda3e6adc3c96229ca859bd2274398187 | 9de27ff2e13100770a3afa4595b15565d45bb6bc | refs/heads/master | 2023-04-02T16:45:19.032567 | 2023-03-22T15:05:33 | 2023-03-22T15:05:33 | 230,493,583 | 17 | 10 | null | null | null | null | UTF-8 | Python | false | false | 10,975 | py | #!/usr/bin/env python3
from collections import deque
import os
DAY_NUM = 20
DAY_DESC = 'Day 20: A Regular Map'
class Infinity:
def __init__(self, default="#"):
self.default = default
self.grid = [[default]]
self.x = 0
self.y = 0
def get(self, x, y):
x += self.x
y += self.y
if x < 0 or y < 0 or x >= len(self.grid[0]) or y >= len(self.grid):
return self.default
else:
return self.grid[y][x]
def set(self, x, y, value):
x += self.x
y += self.y
if x < 0 or y < 0 or x >= len(self.grid[0]) or y >= len(self.grid):
while x < 0:
for i in range(len(self.grid)):
self.grid[i] = [self.default] + self.grid[i]
x += 1
self.x += 1
while y < 0:
self.grid.insert(0, [self.default] * len(self.grid[0]))
y += 1
self.y += 1
while x >= len(self.grid[0]):
for i in range(len(self.grid)):
self.grid[i].append(self.default)
while y >= len(self.grid):
self.grid.append([self.default] * len(self.grid[0]))
self.grid[y][x] = value
def show(self, log):
for row in self.get_rows():
log(row)
def get_rows(self):
ret = []
ret.append(self.default * (len(self.grid[0]) + 2))
for row in self.grid:
ret.append(self.default + "".join(row) + self.default)
ret.append(self.default * (len(self.grid[0]) + 2))
return ret
def decode(value, i, x, y, level, grid):
i[0] += 1
stack_x, stack_y = x, y
while True:
if value[i[0]] in "NEWS":
if value[i[0]] == "N":
y -= 1
grid.set(x, y, "-")
y -= 1
if value[i[0]] == "S":
y += 1
grid.set(x, y, "-")
y += 1
if value[i[0]] == "W":
x -= 1
grid.set(x, y, "|")
x -= 1
if value[i[0]] == "E":
x += 1
grid.set(x, y, "|")
x += 1
grid.set(x, y, ".")
i[0] += 1
elif value[i[0]] in {")", "$"}:
i[0] += 1
break
elif value[i[0]] == "|":
x, y = stack_x, stack_y
i[0] += 1
else:
decode(value, i, x, y, level + 1, grid)
def calc(log, values, show, frame_rate, track_long=None, highlight=None):
grid = Infinity()
decode(values[0], [0], 0, 0, 0, grid)
floods = deque()
dirs = [(-1, 0), (1, 0), (0, -1), (0, 1)]
locs = {}
floods.append(None)
floods.append([0, 0, 0, [(0, 0)]])
grid.set(0, 0, "s")
if show:
log(" Before:")
grid.show(log)
total_frames = 0
file_number = 0
if frame_rate > 0:
if not os.path.isdir("floods"):
os.mkdir("floods")
while len(floods) > 0:
cur = floods.popleft()
if cur is None:
if len(floods) > 0:
floods.append(None)
if frame_rate > 0:
if total_frames % frame_rate == 0:
while os.path.isfile(os.path.join("floods", "flood_%05d.txt" % (file_number,))):
file_number += 1
print("Writing 'flood_%05d.txt'..." % (file_number,))
with open(os.path.join("floods", "flood_%05d.txt" % (file_number,)), "w") as f:
for row in grid.get_rows():
f.write(row + "\n")
total_frames += 1
else:
old_char = grid.get(cur[0], cur[1])
if old_char in {".", "|", "-", "s"}:
if highlight is not None and (cur[0], cur[1]) in highlight:
grid.set(cur[0], cur[1], "X")
else:
grid.set(cur[0], cur[1], "x")
if old_char in {"|", "-"}:
old_char = 1
else:
if (cur[0], cur[1]) in locs:
raise Exception()
locs[(cur[0], cur[1])] = (cur[2], cur[3])
old_char = 0
for x, y in dirs:
if track_long is None:
floods.append([x + cur[0], y + cur[1], cur[2] + old_char, []])
else:
floods.append([x + cur[0], y + cur[1], cur[2] + old_char, cur[3] + [(x + cur[0], y + cur[1])]])
if show:
log(" After:")
grid.show(log)
if track_long is not None:
for value in locs.values():
if track_long[0] is None or len(value[1]) > len(track_long[0]):
track_long[0] = value[1]
log("Shortest long distance: " + str(max([x[0] for x in locs.values()])))
log("1000 distance: " + str(sum([1 if x[0] >= 1000 else 0 for x in locs.values()])))
log("Total Frames: " + str(total_frames))
return max([x[0] for x in locs.values()])
def other_ffmpeg(describe, values):
if describe:
return "Generate final GIF"
import subprocess
src = os.path.join("floods", "flood_%05d.png")
dest = os.path.join("floods", "final.gif")
cmd = ["ffmpeg", "-y", "-framerate", "30", "-i", src, dest]
print("$ " + " ".join(cmd))
subprocess.check_call(cmd)
def other_animate_frames(describe, values):
if describe:
return "Animate all frames from dump_frames"
from PIL import Image, ImageDraw, ImageFont
from collections import deque
import os
file_number = -1
out_file = -1
last = None
older = deque()
colors = [
(128, 128, 128, 255),
(147, 147, 147, 255),
(166, 166, 166, 255),
(185, 185, 185, 255),
(204, 204, 204, 255),
]
first_file = None
for _ in range(10000):
if file_number == -1:
file_number = 0
else:
file_number += 5
filename = os.path.join("floods", "flood_%05d.txt" % (file_number,))
if not os.path.isfile(filename):
break
else:
with open(filename) as f:
rows = [x.strip().replace(".", " ") for x in f]
rows = "\n".join(rows)
fnt = ImageFont.truetype(os.path.join("Puzzles", "SourceCodePro.ttf"), 5)
if first_file is None:
txt = Image.new('RGBA', (620, 825), (0,0,0,255))
d = ImageDraw.Draw(txt)
else:
txt = Image.open(first_file)
d = ImageDraw.Draw(txt)
if first_file is None:
y = 10
for row in rows.split("\n"):
d.text((10, y), row, fill=(64, 64, 64, 255), font=fnt)
bg = "".join([x if x in {"#"} else " " for x in row])
d.text((10, y), bg, fill=(32, 32, 128, 255), font=fnt)
y += 4
else:
y = 10
empty = None
for row in rows.split("\n"):
if empty is None:
empty = " " * len(row)
fg = "".join(["*" if x in {"X"} else " " for x in row])
if fg != empty:
d.text((10, y), fg, fill=(255, 224, 224, 255), font=fnt)
fg = "".join(["x" if x in {"x"} else " " for x in row])
if fg != empty:
d.text((10, y), fg, fill=(255, 128, 128, 128), font=fnt)
y += 4
if last is None:
last = rows
temp = ""
for cur in last:
if cur == "\n":
temp += cur
else:
temp += " "
for _ in range(len(colors)):
older.append(temp)
else:
temp = ""
for i in range(len(rows)):
if rows[i] == "\n":
temp += "\n"
else:
if rows[i] == last[i]: # pylint: disable=e1136
temp += " "
else:
temp += rows[i]
older.append(temp)
older.popleft()
i = 0
empty = None
for cur in older:
y = 10
for row in cur.split("\n"):
if empty is None:
empty = " " * len(row)
if row != empty:
d.text((10, y), row, fill=colors[i], font=fnt)
fg = "".join(["*" if x in {"X"} else " " for x in row])
if fg != empty:
d.text((10, y), fg, fill=(255, 224, 224, 255), font=fnt)
y += 4
i += 1
last = rows
out_file += 1
txt.save(os.path.join("floods", "flood_%05d.png" % (out_file,)))
if first_file is None:
first_file = os.path.join("floods", "flood_%05d.png" % (out_file,))
print("Done with 'flood_%05d.png'" % (out_file,))
def test(log):
values = [
"^ENNWSWW(NEWS|)SSSEEN(WNSE|)EE(SWEN|)NNN$",
]
if calc(log, values, True, 0) == 18:
return True
else:
return False
def run(log, values):
log(calc(log, values, False, 0))
class DummyLog:
def __init__(self):
pass
def show(self, value):
print(value)
def other_dump_frames(describe, values):
if describe:
return "Dump out frame information"
print("Getting longest run")
long_run = [None]
calc(DummyLog(), values, False, 0, track_long=long_run)
highlight = set()
for cur in long_run[0]: # pylint: disable=e1133
highlight.add(cur)
print(calc(DummyLog(), values, False, 4, highlight=highlight))
def other_show(describe, values):
if describe:
return "Show the final map"
grid = Infinity()
decode(values[0], [0], 0, 0, 0, grid)
grid.set(0, 0, "s")
grid.show(DummyLog(), )
if __name__ == "__main__":
import sys, os
def find_input_file():
for fn in sys.argv[1:] + ["input.txt", f"day_{DAY_NUM:0d}_input.txt", f"day_{DAY_NUM:02d}_input.txt"]:
for dn in [[], ["Puzzles"], ["..", "Puzzles"]]:
cur = os.path.join(*(dn + [fn]))
if os.path.isfile(cur): return cur
fn = find_input_file()
if fn is None: print("Unable to find input file!\nSpecify filename on command line"); exit(1)
print(f"Using '{fn}' as input file:")
with open(fn) as f: values = [x.strip("\r\n") for x in f.readlines()]
print(f"Running day {DAY_DESC}:")
run(print, values)
| [
"scott.seligman@gmail.com"
] | scott.seligman@gmail.com |
823a6e4a7ac767f6f79ad0b9dc76f6462561093b | a372a816373d63ad626a9947077e137eac2e6daf | /test/leetcode/test_RomanToInteger.py | e1779578e1fb78743d60ab9239d857b1810cc42a | [] | no_license | DmitryPukhov/pyquiz | 07d33854a0e04cf750b925d2c399dac8a1b35363 | 8ae84f276cd07ffdb9b742569a5e32809ecc6b29 | refs/heads/master | 2021-06-13T14:28:51.255385 | 2021-06-13T08:19:36 | 2021-06-13T08:19:36 | 199,842,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | from unittest import TestCase
from pyquiz.leetcode.RomanToInteger import RomanToInteger
class TestRomanToInteger(TestCase):
alg = RomanToInteger()
def test_one_to_10(self):
self.assertEqual(1, self.alg.roman_to_int('I'))
self.assertEqual(2, self.alg.roman_to_int('II'))
self.assertEqual(3, self.alg.roman_to_int('III'))
self.assertEqual(4, self.alg.roman_to_int('IV'))
self.assertEqual(5, self.alg.roman_to_int('V'))
self.assertEqual(6, self.alg.roman_to_int('VI'))
self.assertEqual(7, self.alg.roman_to_int('VII'))
self.assertEqual(8, self.alg.roman_to_int('VIII'))
self.assertEqual(9, self.alg.roman_to_int('IX'))
self.assertEqual(10, self.alg.roman_to_int('X'))
def test_complex(self):
self.assertEqual(409, self.alg.roman_to_int('XICD')) # 10 -1 -100 + 500
self.assertEqual(390, self.alg.roman_to_int('XCD'))
| [
"dmitry.pukhov@gmail.com"
] | dmitry.pukhov@gmail.com |
d7e5f64c16ef151c07dd2c1aa9a010ecfdcbbdcb | d72e039484da19fab5716681c7d252f2c829f6a2 | /utils/function.py | 009d57b889b54c4ff0cd949ef07119885fd20019 | [] | no_license | GanYouHeng/blog | 1d6949f8927fadf9fd72513052bfbc048cd5abf2 | 85a12f04a0819159e0314ee625986bcfb39f755c | refs/heads/master | 2020-04-29T07:27:57.455631 | 2019-03-16T09:53:02 | 2019-03-16T09:53:02 | 175,953,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | """__author__ = 干友恒"""
from functools import wraps
from flask import session, url_for, redirect
def is_login(func):
@wraps(func)
def check():
user_id = session.get('user.id')
if user_id:
return func()
else:
return redirect(url_for('back.login'))
return check | [
"18108159775@163.com"
] | 18108159775@163.com |
fe6c0906b6711bc4dfaf0c42acab037fa002d71a | 10d98fecb882d4c84595364f715f4e8b8309a66f | /sgk/mbv1/main.py | a5714394ad7f15319cef2783b649bead6e085217 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 8,432 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main script for dense/sparse inference."""
import sys
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from sgk import driver
from sgk.mbv1 import config
from sgk.mbv1 import mobilenet_builder
# Crop padding for ImageNet preprocessing.
CROP_PADDING = 32
# Mean & stddev for ImageNet preprocessing.
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
FLAGS = flags.FLAGS
flags.DEFINE_string("runmode", "examples",
"Running mode: examples or imagenet.")
flags.DEFINE_string("ckpt_dir", "/tmp/ckpt/", "Checkpoint folders")
flags.DEFINE_integer("num_images", 5000, "Number of images to eval.")
flags.DEFINE_string("imagenet_glob", None, "ImageNet eval image glob.")
flags.DEFINE_string("imagenet_label", None, "ImageNet eval label file path.")
flags.DEFINE_float("width", 1.0, "Width for MobileNetV1 model.")
flags.DEFINE_float("sparsity", 0.0, "Sparsity for MobileNetV1 model.")
flags.DEFINE_bool("fuse_bnbr", False, "Whether to fuse batch norm, bias, relu.")
flags.DEFINE_integer("inner_steps", 1000, "Benchmark steps for inner loop.")
flags.DEFINE_integer("outer_steps", 100, "Benchmark steps for outer loop.")
# Disable TF2.
tf.disable_v2_behavior()
class InferenceDriver(driver.Driver):
"""Custom inference driver for MBV1."""
def __init__(self, cfg):
super(InferenceDriver, self).__init__(batch_size=1, image_size=224)
self.num_classes = 1000
self.cfg = cfg
def build_model(self, features):
with tf.device("gpu"):
# Transpose the input features from NHWC to NCHW.
features = tf.transpose(features, [0, 3, 1, 2])
# Apply image preprocessing.
features -= tf.constant(MEAN_RGB, shape=[3, 1, 1], dtype=features.dtype)
features /= tf.constant(STDDEV_RGB, shape=[3, 1, 1], dtype=features.dtype)
logits = mobilenet_builder.build_model(features, cfg=self.cfg)
probs = tf.nn.softmax(logits)
return tf.squeeze(probs)
def preprocess_fn(self, image_bytes, image_size):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([
offset_height, offset_width, padded_center_crop_size,
padded_center_crop_size
])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize_bicubic([image], [image_size, image_size])[0]
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def run_inference(self, ckpt_dir, image_files, labels):
with tf.Graph().as_default(), tf.Session() as sess:
images, labels = self.build_dataset(image_files, labels)
probs = self.build_model(images)
if isinstance(probs, tuple):
probs = probs[0]
self.restore_model(sess, ckpt_dir)
prediction_idx = []
prediction_prob = []
for i in range(len(image_files)):
# Run inference.
out_probs = sess.run(probs)
idx = np.argsort(out_probs)[::-1]
prediction_idx.append(idx[:5])
prediction_prob.append([out_probs[pid] for pid in idx[:5]])
if i % 1000 == 0:
logging.error("Processed %d images.", i)
# Return the top 5 predictions (idx and prob) for each image.
return prediction_idx, prediction_prob
def imagenet(self, ckpt_dir, imagenet_eval_glob, imagenet_eval_label,
num_images):
"""Eval ImageNet images and report top1/top5 accuracy.
Args:
ckpt_dir: str. Checkpoint directory path.
imagenet_eval_glob: str. File path glob for all eval images.
imagenet_eval_label: str. File path for eval label.
num_images: int. Number of images to eval: -1 means eval the whole
dataset.
Returns:
A tuple (top1, top5) for top1 and top5 accuracy.
"""
imagenet_val_labels = [int(i) for i in tf.gfile.GFile(imagenet_eval_label)]
imagenet_filenames = sorted(tf.gfile.Glob(imagenet_eval_glob))
if num_images < 0:
num_images = len(imagenet_filenames)
image_files = imagenet_filenames[:num_images]
labels = imagenet_val_labels[:num_images]
pred_idx, _ = self.run_inference(ckpt_dir, image_files, labels)
top1_cnt, top5_cnt = 0.0, 0.0
for i, label in enumerate(labels):
top1_cnt += label in pred_idx[i][:1]
top5_cnt += label in pred_idx[i][:5]
if i % 100 == 0:
print("Step {}: top1_acc = {:4.2f}% top5_acc = {:4.2f}%".format(
i, 100 * top1_cnt / (i + 1), 100 * top5_cnt / (i + 1)))
sys.stdout.flush()
top1, top5 = 100 * top1_cnt / num_images, 100 * top5_cnt / num_images
print("Final: top1_acc = {:4.2f}% top5_acc = {:4.2f}%".format(top1, top5))
return top1, top5
def benchmark(self, ckpt_dir, outer_steps=100, inner_steps=1000):
"""Run repeatedly on dummy data to benchmark inference."""
# Turn off Grappler optimizations.
options = {"disable_meta_optimizer": True}
tf.config.optimizer.set_experimental_options(options)
# Run only the model body (no data pipeline) on device.
features = tf.zeros([1, 3, self.image_size, self.image_size],
dtype=tf.float32)
# Create the model outside the loop body.
model = mobilenet_builder.mobilenet_generator(self.cfg)
# Call the model once to initialize the variables. Note that
# this should never execute.
dummy_iteration = model(features)
# Run the function body in a loop to amortize session overhead.
loop_index = tf.zeros([], dtype=tf.int32)
initial_probs = tf.zeros([self.num_classes])
def loop_cond(idx, _):
return tf.less(idx, tf.constant(inner_steps, dtype=tf.int32))
def loop_body(idx, _):
logits = model(features)
probs = tf.squeeze(tf.nn.softmax(logits))
return idx + 1, probs
benchmark_op = tf.while_loop(
loop_cond,
loop_body, [loop_index, initial_probs],
parallel_iterations=1,
back_prop=False)
with tf.Session() as sess:
self.restore_model(sess, ckpt_dir)
fps = []
for idx in range(outer_steps):
start_time = time.time()
sess.run(benchmark_op)
elapsed_time = time.time() - start_time
fps.append(inner_steps / elapsed_time)
logging.error("Iterations %d processed %f FPS.", idx, fps[-1])
# Skip the first iteration where all the setup and allocation happens.
fps = np.asarray(fps[1:])
logging.error("Mean, Std, Max, Min throughput = %f, %f, %f, %f",
np.mean(fps), np.std(fps), fps.max(), fps.min())
def main(_):
logging.set_verbosity(logging.ERROR)
cfg_cls = config.get_config(FLAGS.width, FLAGS.sparsity)
cfg = cfg_cls(FLAGS.fuse_bnbr)
drv = InferenceDriver(cfg)
if FLAGS.runmode == "imagenet":
drv.imagenet(FLAGS.ckpt_dir, FLAGS.imagenet_glob, FLAGS.imagenet_label,
FLAGS.num_images)
elif FLAGS.runmode == "benchmark":
drv.benchmark(FLAGS.ckpt_dir, FLAGS.outer_steps, FLAGS.inner_steps)
else:
logging.error("Must specify runmode: 'benchmark' or 'imagenet'")
if __name__ == "__main__":
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
d6c6ce5979c322c5f4d1974e5f166fbba3a71c96 | d5214b1331c9dae59d95ba5b3aa3e9f449ad6695 | /quintagroup.captcha.core/tags/0.4/setup.py | b66e082036a30897edf7142cd18f777739802d79 | [] | no_license | kroman0/products | 1661ee25a224c4b5f172f98110944f56136c77cf | f359bb64db22f468db5d1e411638790e94d535a2 | refs/heads/master | 2021-01-10T07:58:04.579234 | 2014-06-11T12:05:56 | 2014-06-11T12:05:56 | 52,677,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | from setuptools import setup, find_packages
import os
version = '0.4'
setup(name='quintagroup.captcha.core',
version=version,
description="A core package of simple captcha implementation",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Plone",
"Framework :: Plone :: 3.2",
"Framework :: Plone :: 3.3",
"Framework :: Plone :: 4.0",
"Framework :: Plone :: 4.1",
"Framework :: Plone :: 4.2",
"Framework :: Plone :: 4.3",
"Framework :: Zope2",
"Framework :: Zope3",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='plone captcha',
author='Quintagroup',
author_email='support@quintagroup.com',
url='http://svn.quintagroup.com/products/quintagroup.captcha.core',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['quintagroup', 'quintagroup.captcha'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
| [
"krzroman@gmail.com"
] | krzroman@gmail.com |
e676a3151cb2474fa112a5f701513e70e2333e0c | 993ac768660b3464c88df747a36e17ea1819f6f1 | /hbase/docker-hbase/happybaseDocker/python/tpArbres/2_manipulateArbre_ensemble.py | a125e1eaf3484e043b31ad4d8493353c67120ee5 | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | radioactivit/big-data | 8585d26100b2d74c6b81bba788dd434fe322f497 | 2431a1d8ca1a31480b13dfbfbc2321b9470c0281 | refs/heads/master | 2021-06-02T00:37:15.701976 | 2020-01-07T15:03:13 | 2020-01-07T15:03:13 | 114,544,896 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 6,298 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
familles = {
'genre': {'genre':13, 'espece':11, 'nom_commun':16, 'variete':12},
'infos': {'date_plantation':14, 'hauteur':8, 'circonference':7},
'adresse': {'geopoint':18, 'arrondissement':3, 'adresse':5}}
import happybase
import sys
table_name = "arbre_paris"
connection = happybase.Connection('hbasethrift')
table = connection.table(table_name)
#Un select ONE par ID
print('Recupérer l\'arbre arbre-5388')
key = 'arbre-5388'
row = table.row(key)
print row
print('Recupérer l\'arbre arbre-19251')
key = 'arbre-19251'
row = table.row(key)
print row
print('Recupérer le genre et le nom_commun de l\'arbre arbre-19251')
row = table.row(key,columns=["genre:genre","genre:nom_commun"])
print row
column_family_name = 'genre'
column_name = '{fam}:genre'.format(fam=column_family_name)
print('\t{}: {}'.format(key, row[column_name.encode('utf-8')]))
column_name = '{fam}:nom_commun'.format(fam=column_family_name)
print('\t{}: {}'.format(key, row[column_name.encode('utf-8')]))
#Un select all limité
print('Les 50 premiers arbres')
for key, data in table.scan(limit=50):
print(key, data)
print('Les 50 premiers noms et arondissements des arbres')
for key, data in table.scan(columns=["genre:nom_commun","adresse:arrondissement"],limit=50):
print(key, data)
#Un select all limité
print('Les 50 premiers arbres du 11eme arrondissement')
for key, data in table.scan(limit=50,
columns=["genre:nom_commun","adresse:arrondissement"],
filter="SingleColumnValueFilter('adresse','arrondissement',=, 'binary:PARIS 11E ARRDT',true,true)"):
print(key, data)
#Un select all limité
print('Les 50 arbres à partir de arbre-101334 du 11eme arrondissement')
for key, data in table.scan(row_start="arbre-101334", limit=50, columns=["genre:nom_commun","adresse:arrondissement"], filter="SingleColumnValueFilter('adresse','arrondissement',=, 'binary:PARIS 11E ARRDT',true,true)"):
print(key, data)
print('Les arbres à partir de arbre-100368 et jusqu\'à arbre-101334')
for key, data in table.scan(row_start="arbre-100368", row_stop="arbre-101334", columns=["genre:nom_commun","adresse:arrondissement"]):
print(key, data)
#row_start=None, row_stop=None
#50 arbres de plus de 20 mètres
#print('Les 50 premiers arbres de plus de 20 mètres du 11eme')
#for key, data in table.scan(limit=50, columns=["genre:nom_commun","adresse:arrondissement", "infos:hauteur"],
#filter="SingleColumnValueFilter('adresse','arrondissement',=, 'substring:11E',true,true) AND SingleColumnValueFilter('infos','hauteur',>, 'binary:20',true,true) AND SingleColumnValueFilter('infos','hauteur',=, 'regexstring:^[0-9][0-9]+',true,true)"):
# print(key, data)
#https://regex101.com/
#Example1: >, 'binary:abc' will match everything that is lexicographically greater than "abc"
#Example2: =, 'binaryprefix:abc' will match everything whose first 3 characters are lexicographically equal to "abc"
#Example3: !=, 'regexstring:ab*yz' will match everything that doesn't begin with "ab" and ends with "yz"
#Example4: =, 'substring:abc123' will match everything that begins with the substring "abc123"
#On va éditer le chêne, pour en faire un Chêne-liège
print('L\'arbre arbre-19251 devient un Chêne-liège')
key = 'arbre-19251'
table.put(key, {"genre:nom_commun": "Chêne-liège"})
print('On le vérifie')
row = table.row(key)
print row
# Timestamp
# Gestion des versions par timestamp
print('On récupère tous les Chêne-liège avec les timestamps de modif colonne')
for key, data in table.scan(limit=10, include_timestamp=True, filter="SingleColumnValueFilter('genre','nom_commun',=, 'substring:liège',true,true)"):
print(key, data)
print('On récupère le dernier Chêne-liège avec les timestamps de modif colonne')
row = table.row(key, include_timestamp=True)
print row
#Attention à mettre un timestamp cohérent ICI
print('On récupère le dernier Chêne-liège avec les timestamps de modif colonne')
row = table.row(key, include_timestamp=True, timestamp=1515506574000)
print row
print('On récupère la cellule nom_commun du chêne avec ses versions')
cells = table.cells(key,column='genre:nom_commun')
print cells
print('On récupère la cellule nom_commun du chêne avec ses versions et ses timestamps')
cells = table.cells(key,column='genre:nom_commun',include_timestamp=True)
print cells
#print('Et si on delete?')
#table.delete(key,columns=['genre:nom_commun'])
#print('Ben on delete toutes les versions')
#cells = table.cells(key,column='genre:nom_commun',include_timestamp=True)
#print cells
print('Et si on delete avec un timestamp?')
#table.put(key, {"genre:nom_commun": "Chêne-liège"})
#table.put(key, {"genre:nom_commun": "Chêne"})
#table.put(key, {"genre:nom_commun": "Chêne-liège"})
#table.put(key, {"genre:nom_commun": "Chêne"})
cells = table.cells(key,column='genre:nom_commun',include_timestamp=True)
print cells
#Opération sur le TIMESTAMP A FAIRE
table.delete(key,columns=['genre:nom_commun'],timestamp=1514992471431)
print('Ben on delete que les versions plus anciennes')
cells = table.cells(key,column='genre:nom_commun',include_timestamp=True)
print cells
#batch
batch = table.batch()
batch = table.batch(batch_size = 1000)
#batch.delete()
#On va retirer toutes les dates plantation en 1700-01-01 (seulement les 1000 premières en faites)
print('On retire les dates de plantations')
for key, data in table.scan(limit=1000, columns=["infos:date_plantation"], filter="SingleColumnValueFilter('infos','date_plantation',=, 'substring:1700',true,true)"):
print(key, data)
batch.delete(key,columns=["infos:date_plantation"])
batch.send()
print('On constate que ça c\'est bien passé')
row = table.row(key)
print row
sys.exit()
#On va mettre à jour une des infos sur les arbres, genre transformer les marronniers en chataigners
print('Les marronniers sont maintenant des Châtaigniers')
for key, data in table.scan(limit=10000, columns=["genre:nom_commun"], filter="SingleColumnValueFilter('genre','nom_commun',=, 'substring:marron',true,true)"):
print(key, data)
batch.put(key, {"genre:nom_commun": "Chataigner"})
batch.send()
print('On constate que ça c\'est bien passé')
row = table.row(key)
print row
sys.exit()
#timestamp
connection.close()
| [
"marin.franck@gmail.com"
] | marin.franck@gmail.com |
07262dbe6905bb6823f24993e371e4527c3ad7b4 | 7837bf43c268886e841952447965b71c6b5d45e8 | /PeopleCounter.py | dab60e82da1671208d29430a2a2f98886cbb5204 | [
"MIT"
] | permissive | SiddiqMohammed/Rain-Room-OpenCV | 1217ecc19d68523e9dee11aaf4ffd610da40d446 | a77062c1a1450ada65d1fc126125b2f7d5bf97a6 | refs/heads/master | 2022-12-11T13:09:05.785091 | 2020-09-08T19:36:11 | 2020-09-08T19:36:11 | 292,779,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,380 | py | import numpy as np
import cv2 as cv
try:
log = open('log.txt',"w")
except:
print( "No se puede abrir el archivo log")
#Contadores de entrada y salida
cnt_up = 0
cnt_down = 0
#Fuente de video
#cap = cv.VideoCapture(0)
cap = cv.VideoCapture('Test Files/videos/TestVideo.avi')
#Imprime las propiedades de captura a consola
for i in range(19):
print( i, cap.get(i))
if cap.isOpened():
h = cap.get(cv.CAP_PROP_FRAME_HEIGHT) # float
w = cap.get(cv.CAP_PROP_FRAME_WIDTH) # float
#Calculate Gx and Gy for grid lines
gX = int(w/3)
gY = int(h/3)
gx1 = gX
gy1 = gY
gx2 = gX*2
gy2 = gY*2
gx3 = int(w)
gy3 = int(h)
frameArea = h*w
areaTH = frameArea/250
print( 'Area Threshold', areaTH)
#Lineas de entrada/salida
line_up = int(2*(h/5))
line_down = int(3*(h/5))
up_limit = int(1*(h/5))
down_limit = int(4*(h/5))
#Substractor de fondo
fgbg = cv.createBackgroundSubtractorMOG2(detectShadows = True)
#Elementos estructurantes para filtros morfoogicos
kernelOp = np.ones((3,3),np.uint8)
kernelOp2 = np.ones((5,5),np.uint8)
kernelCl = np.ones((11,11),np.uint8)
#Variables
font = cv.FONT_HERSHEY_SIMPLEX
persons = []
max_p_age = 5
pid = 1
color1 = (255, 255, 255)
color2 = (0, 0, 255)
cg1 = color1
cg2 = color1
cg3 = color1
cg4 = color1
cg5 = color1
cg6 = color1
cg7 = color1
cg8 = color1
cg9 = color1
while(cap.isOpened()):
#Lee una imagen de la fuente de video
ret, frame = cap.read()
#Drawing the grid
# cv.line(frame, (0, gy1), (gx3, gy1), (150, 0, 200), 2)
# cv.line(frame, (0, gy2), (gx3, gy2), (150, 0, 200), 2)
# cv.line(frame, (gx1, 0), (gx1, gy3), (150, 0, 200), 2)
# cv.line(frame, (gx2, 0), (gx2, gy3), (150, 0, 200), 2)
# Row 1
cv.rectangle(frame, (0, 0), (gx1, gy1), cg1, 2)
cv.rectangle(frame, (gx1, 0), (gx2, gy1), cg2, 2)
cv.rectangle(frame, (gx2, 0), (gx3, gy1), cg3, 2)
# Row 2
cv.rectangle(frame, (0, gy1), (gx1, gy2), cg4, 2)
cv.rectangle(frame, (gx1, gy1), (gx2, gy2), cg5, 2)
cv.rectangle(frame, (gx2, gy1), (gx3, gy2), cg6, 2)
# Row 3
cv.rectangle(frame, (0, gy2), (gx1, gy3), cg7, 2)
cv.rectangle(frame, (gx1, gy2), (gx2, gy3), cg8, 2)
cv.rectangle(frame, (gx2, gy2), (gx3, gy3), cg9, 2)
for i in persons:
i.age_one() #age every person one frame
#Aplica substraccion de fondo
fgmask = fgbg.apply(frame)
fgmask2 = fgbg.apply(frame)
#Binariazcion para eliminar sombras (color gris)
try:
ret,imBin= cv.threshold(fgmask,200,255,cv.THRESH_BINARY)
ret,imBin2 = cv.threshold(fgmask2,200,255,cv.THRESH_BINARY)
#Opening (erode->dilate) para quitar ruido.
mask = cv.morphologyEx(imBin, cv.MORPH_OPEN, kernelOp)
mask2 = cv.morphologyEx(imBin2, cv.MORPH_OPEN, kernelOp)
#Closing (dilate -> erode) para juntar regiones blancas.
mask = cv.morphologyEx(mask , cv.MORPH_CLOSE, kernelCl)
mask2 = cv.morphologyEx(mask2, cv.MORPH_CLOSE, kernelCl)
except:
print('EOF')
print( 'UP:',cnt_up)
print ('DOWN:',cnt_down)
break
# RETR_EXTERNAL returns only extreme outer flags. All child contours are left behind.
contours0, hierarchy = cv.findContours(mask2,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
for cnt in contours0:
area = cv.contourArea(cnt)
if area > areaTH:
#Falta agregar condiciones para multipersonas, salidas y entradas de pantalla.
M = cv.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv.circle(frame,(cx,cy), 5, (0,0,255), -1)
rectSize = 50
cv.rectangle(frame,(cx+rectSize,cy+rectSize), (cx-rectSize,cy-rectSize), (0,0,255), 2)
text = cx, cy
cv.putText(frame, str(text), (cx,cy), font, 0.5, (255,0,0), 1, cv.LINE_AA)
# for ccx in range(1, 4):
# for ccy in range(1, 4):
if cx > 0 and cx < gx1 and cy > 0 and cy < gy1:
cg1 = color2
else:
cg1 = color1
str_up = 'UP: '+ str(cnt_up)
str_down = 'DOWN: '+ str(cnt_down)
cv.imshow('Frame',frame)
cv.imshow('Mask',mask)
k = cv.waitKey(30) & 0xff
if k == 27:
break
#END while(cap.isOpened())
log.flush()
log.close()
cap.release()
cv.destroyAllWindows()
| [
"noreply@github.com"
] | SiddiqMohammed.noreply@github.com |
ff1d23ecb15e23777cc2a8d28f1806e3988ce4cc | 4b25d88dec4b3239684cb2c7d837be75e6efa511 | /__init__.py | d58b296601b8fcaf4717c1182c907b42c81cb5c5 | [] | no_license | k155ir1/ml2 | 5758d4f7422c6cf1b230c25c3ae990e2e491c741 | cab775a0bd8268b643cb3ffda59158e292437ad9 | refs/heads/master | 2020-04-13T02:21:17.225729 | 2019-01-04T15:21:47 | 2019-01-04T15:21:47 | 162,900,286 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | # -*- coding: utf-8 -*-
"""
ML-2
~~~~~
ML-framework for simple and easy implementation of classifiers.
:copyright: (c) 2018 by Aleksej Kusnezov
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.1'
| [
"noreply@github.com"
] | k155ir1.noreply@github.com |
a900c62f14aada3437cc2084d664da44f7b1628c | 0311aaa76aad3f0f92cadca2804bc3fb94f73dec | /tests/conftest.py | 917f120a3e837c7703e3acf7c79800c9e249e6ff | [] | no_license | atarancon/flask-mega-tutorial | d446199ad082a83d57051803be2b1b6dd7ac51cb | 071436a70ad663e24e0e10159dcc221f58b770e7 | refs/heads/master | 2022-12-13T03:02:10.857182 | 2020-02-10T18:02:03 | 2020-02-10T18:02:03 | 227,940,690 | 1 | 0 | null | 2022-12-08T03:31:22 | 2019-12-13T23:49:47 | Python | UTF-8 | Python | false | false | 2,501 | py | import pytest
from config import setting
from app.app import create_app
from app.extensions import db as _db
from app.models import User
from config.setting import TestConfig
@pytest.yield_fixture(scope ='session')
def app():
"""
Set up our flask test app, this gets executed only once
:return: Flask app
"""
b_uri = '{0}_test'.format(setting.TestConfig.SQLALCHEMY_DATABASE_URI)
setting.TestConfig.SQLALCHEMY_DATABASE_URI = b_uri
_app = create_app(config_filename = 'config.setting.TestConfig')
#Establish an application context before running the tests.
ctx = _app.app_context()
ctx.push()
yield _app
ctx.pop()
@pytest.yield_fixture(scope='function')
def client(app):
"""
Setup an app client, this gets executed for each test function
:param app: Pytest fixture
:return: FLask app client
"""
yield app.test_client()
@pytest.yield_fixture(scope="session")
def db(app):
"""
Setup our database , this only gets executed once per session.
:param app: Pytest fixture
:return: SQLAlchemy database session
"""
_db.drop_all()
_db.create_all()
#create a single user because a lot of tests do not mutatate this user.
#It will result in faster tests.
# Create new entries in the database
admin = User(app.config['SEED_ADMIN_EMAIL'],"admin",app.config['SEED_ADMIN_PASSWORD'],True)
_db.session.add(admin)
_db.session.commit()
return _db
@pytest.yield_fixture(scope='function')
def session(db):
"""
Allow very fast tests by using roll backs and nested sessions. This does require that ypur database support SQL savepoints
, and Postgres does.
:param db: Pytest fixture
:return: None:
"""
db.session.begin_nested()
yield db.session
db.session.rollback()
@pytest.fixture(scope='function')
def users(db):
"""
Create user fixture. They reset per test.
:param db: Pytest fixture
:return: SQLALCHEMY database session
"""
#delete all users
users = db.session.query(User).all()
#iterate through object
for user in users:
db.session.delete(user)
db.session.commit()
#create new ones
# Create new entries in the database
admin = User(TestConfig.SEED_ADMIN_EMAIL,"admin",TestConfig.SEED_ADMIN_PASSWORD,True)
user = User ( "one@one.com" , "one" , "password" )
db.session.add(admin)
db.session.add(user)
db.session.commit()
return db
| [
"ataranco@umail.iu.edu"
] | ataranco@umail.iu.edu |
17d518711e1de807039c58db780f7a15ce57d99f | b40445f5070b9c0c1358034fe74b2492612fc9cc | /new/models.py | a107d410749ee8c0477affb844401fedb57b2ba6 | [] | no_license | rahilfaizan/maindj | 9901e37f8b6e45d5169605ff183473dc9eb5ee7b | d5cd21cf0da82c5af9b67d8a0c2a02b8c1fccd5d | refs/heads/main | 2023-06-17T14:04:30.313221 | 2021-07-12T06:36:43 | 2021-07-12T06:36:43 | 386,177,513 | 0 | 0 | null | 2021-07-15T05:41:44 | 2021-07-15T05:41:43 | null | UTF-8 | Python | false | false | 564 | py | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.urls import reverse
#u
class Usermodel(models.Model):
user_name = models.CharField(unique=True,max_length=50)
password = models.CharField(max_length=50)
user_type = models.CharField(max_length=50)
pri = models.CharField(max_length=100, default = None,blank= True)
class Privillages(models.Model):
privillages_name = models.CharField(max_length = 50)
def get_absolute_url(self):
return reverse("addpri")
| [
"sandeepbk01@gmail.com"
] | sandeepbk01@gmail.com |
02188a1abc50946d4badece32429520bca6491a3 | 97754328eefb53cd8dc2aee0dbc452fff9ebfea5 | /alistirma3.py | eafdb91aced34323af64e5d73bb1a4e3ea415bfa | [] | no_license | OzgeSevgiMert/Alistirmalar | 3766cf985bec6d8c647140e0ad1a4cf3e9a2f158 | a91be55779aa028bf204278f768a3eea216c0021 | refs/heads/main | 2022-12-30T11:47:39.400405 | 2020-10-20T11:59:57 | 2020-10-20T11:59:57 | 305,692,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | fact=1
sayac=0
for i in range(1,10000):
fact= fact*i
sayac += 1/fact
if i==10000:
break
e= 1+ sayac
print("e:",e)
| [
"noreply@github.com"
] | OzgeSevgiMert.noreply@github.com |
c391f8f9748fe696e8d837e10fe114a3f6e24301 | dc5db257646214f1140931f2d27bf1755f99a934 | /src/libs/enhancing.py | 0812e391c87e1a40287ca2d30ffc07f883ff8b8f | [] | no_license | mollihua/FingerSecure | ed54a12af6fcbccfe3c71e6c9961c7e4cae057ea | 300a859864456d24987496efb516d45e632b91e2 | refs/heads/master | 2020-12-28T12:07:36.478942 | 2020-09-21T06:52:01 | 2020-09-21T06:52:01 | 238,326,505 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,644 | py | import math
import numpy as np
import scipy
from scipy import ndimage, signal
import warnings
warnings.simplefilter("ignore")
import cv2
cv2.ocl.setUseOpenCL(False)
from libs.processing import thin_image, clean_points
def enhance_image(image: np.array, block_orientation: int = 16, threshold: float = 0.1,
sigma_gradient: int = 1, sigma_block: int = 7, sigma_orientation: int = 7,
block_frequency: int = 38, window_size: int = 5, min_wave_length: int = 5,
max_wave_length: int = 15, padding: int = None, skeletonise: bool = True):
"""
Image enhancement using gabor filters based on ridge orientation.
Adjusted from https: / / github.com / Utkarsh-Deshmukh / Fingerprint-Enhancement-Python
Based on the paper: Hong, L., Wan, Y., and Jain, A. K. '
Fingerprint image enhancement: Algorithm and performance evaluation'.
IEEE Transactions on Pattern Analysis and Machine Intelligence 20, 8 (1998), pp 777-789.
License: BSD 2
"""
# CLAHE adjusted image - histogram equalisation.
img_clahe = apply_clahe(image)
# Padding image for applying window frequency mask.
if padding is not None:
top, bottom, left, right = [padding] * 4
img_clahe = cv2.copyMakeBorder(img_clahe, top, bottom, left, right, cv2.BORDER_CONSTANT, value=255)
# Normalise images
img_normalised, mask = ridge_segment(img_clahe, block_orientation, threshold)
# Pixel orientation
img_orientation = ridge_orient(img_normalised, sigma_gradient, sigma_block, sigma_orientation)
# Ridge frequency
img_frequency, med = ridge_frequency(img_normalised, mask, img_orientation, block_frequency, window_size,
min_wave_length, max_wave_length)
# Gabor filter
image_filtered = ridge_filter(img_normalised, img_orientation, med * mask, .65, .65)
image_enhanced = (image_filtered < -3)
if skeletonise:
# Applies image thinning and sets background to white.
image_enhanced = thin_image(image_enhanced)
image_enhanced = clean_points(image_enhanced)
# Normalising image and processing background - and ridges.
# image_enhanced = image_enhanced // image_enhanced.max() # [0, 1] values
# Invert colours if the background is dark.
# image_enhanced = swap(image_enhanced) if image_enhanced.mean() < .5 else image_enhanced
# image_enhanced = image_enhanced.astype('uint8')
return image_enhanced.astype('uint8')
def ridge_orient(image: np.array, sigma_gradient: int, sigma_block: int, sigma_orientation: int):
"""
Extracts the orientation of the ridges.
"""
# Image gradients.
size = np.fix(6 * sigma_gradient)
if np.remainder(size, 2) == 0:
size = size + 1
gauss = cv2.getGaussianKernel(np.int(size), sigma_gradient)
# Gradient of Gaussian
f = gauss * gauss.T
fy, fx = np.gradient(f)
Gx = signal.convolve2d(image, fx, mode='same')
Gy = signal.convolve2d(image, fy, mode='same')
Gxx = np.power(Gx, 2)
Gyy = np.power(Gy, 2)
Gxy = Gx * Gy
# Smooth the covariance data to perform a weighted summation of the data.
size = np.fix(6 * sigma_block)
gauss = cv2.getGaussianKernel(np.int(size), sigma_block)
f = gauss * gauss.T
Gxx = ndimage.convolve(Gxx, f)
Gyy = ndimage.convolve(Gyy, f)
Gxy = 2 * ndimage.convolve(Gxy, f)
# Analytic solution of principal direction
denom = np.sqrt(np.power(Gxy, 2) + np.power((Gxx - Gyy), 2)) + np.finfo(float).eps
# Sine and cosine of doubled angles
sin2theta = Gxy / denom
cos2theta = (Gxx - Gyy) / denom
if sigma_orientation:
size = np.fix(6 * sigma_orientation)
if np.remainder(size, 2) == 0:
size = size + 1
gauss = cv2.getGaussianKernel(np.int(size), sigma_orientation)
f = gauss * gauss.T
cos2theta = ndimage.convolve(cos2theta, f) # Smoothed sine and cosine of
sin2theta = ndimage.convolve(sin2theta, f) # doubled angles
img_orientation = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2
return img_orientation
def ridge_frequency(image: np.array, mask, orient: int, block_size: int, window_size: int, min_wave_length: int,
max_wave_length: int) -> tuple:
"""
Ridge frequency computation.
"""
rows, cols = image.shape
freq = np.zeros((rows, cols))
for r in range(0, rows - block_size, block_size):
for c in range(0, cols - block_size, block_size):
block_image = image[r: r + block_size][:, c: c + block_size]
block_orientation = orient[r: r + block_size][:, c: c + block_size]
freq[r: r + block_size][:, c: c + block_size] = frequest(block_image, block_orientation, window_size,
min_wave_length, max_wave_length)
freq = freq * mask
freq_1d = np.reshape(freq, (1, rows * cols))
ind = np.where(freq_1d > 0)
ind = np.array(ind)
ind = ind[1, :]
non_zero_elems_in_freq = freq_1d[0][ind]
# median_freq = np.mean(non_zero_elems_in_freq)
# TODO: (Dragos) Review
median_freq = np.median(non_zero_elems_in_freq)
return freq, median_freq
def ridge_filter(im, orient, freq, kx, ky):
angleInc = 3
im = np.double(im)
rows, cols = im.shape
newim = np.zeros((rows, cols))
freq_1d = np.reshape(freq, (1, rows * cols))
ind = np.where(freq_1d > 0)
ind = np.array(ind)
ind = ind[1, :]
# Round the array of frequencies to the nearest 0.01 to reduce the
# number of distinct frequencies we have to deal with.
non_zero_elems_in_freq = freq_1d[0][ind]
non_zero_elems_in_freq = np.double(np.round((non_zero_elems_in_freq * 100))) / 100
unfreq = np.unique(non_zero_elems_in_freq)
# Generate filters corresponding to these distinct frequencies and
# orientations in 'angleInc' increments.
sigmax = 1 / unfreq[0] * kx
sigmay = 1 / unfreq[0] * ky
sze = np.round(3 * np.max([sigmax, sigmay]))
x, y = np.meshgrid(np.linspace(-sze, sze, (2 * sze + 1)), np.linspace(-sze, sze, (2 * sze + 1)))
reffilter = np.exp(-((np.power(x, 2)) / (sigmax * sigmax) + (np.power(y, 2)) / (sigmay * sigmay))) * np.cos(
2 * np.pi * unfreq[0] * x) # this is the original gabor filter
filt_rows, filt_cols = reffilter.shape
gabor_filter = np.array(np.zeros((180 // angleInc, filt_rows, filt_cols)))
for o in range(0, 180 // angleInc):
# Generate rotated versions of the filter. Note orientation
# image provides orientation * along * the ridges, hence +90
# degrees, and imrotate requires angles +ve anticlockwise, hence
# the minus sign.
rot_filt = scipy.ndimage.rotate(reffilter, - (o * angleInc + 90), reshape=False)
gabor_filter[o] = rot_filt
# Find indices of matrix points greater than maxsze from the image
# boundary
maxsze = int(sze)
temp = freq > 0
validr, validc = np.where(temp)
temp1 = validr > maxsze
temp2 = validr < rows - maxsze
temp3 = validc > maxsze
temp4 = validc < cols - maxsze
final_temp = temp1 & temp2 & temp3 & temp4
finalind = np.where(final_temp)
# Convert orientation matrix values from radians to an index value
# that corresponds to round(degrees / angleInc)
maxorientindex = np.round(180 / angleInc)
orientindex = np.round(orient / np.pi * 180 / angleInc)
# do the filtering
for i in range(0, rows):
for j in range(0, cols):
if orientindex[i][j] < 1:
orientindex[i][j] = orientindex[i][j] + maxorientindex
if orientindex[i][j] > maxorientindex:
orientindex[i][j] = orientindex[i][j] - maxorientindex
finalind_rows, finalind_cols = np.shape(finalind)
sze = int(sze)
for k in range(0, finalind_cols):
r = validr[finalind[0][k]]
c = validc[finalind[0][k]]
img_block = im[r - sze:r + sze + 1][:, c - sze:c + sze + 1]
newim[r][c] = np.sum(img_block * gabor_filter[int(orientindex[r][c]) - 1])
return newim
def normalise(image: np.array):
normed = (image - np.mean(image)) / (np.std(image))
return normed
def ridge_segment(im, blksze, thresh):
rows, cols = im.shape
im = normalise(im) # normalise to get zero mean and unit standard deviation
new_rows = np.int(blksze * np.ceil((np.float(rows)) / (np.float(blksze))))
new_cols = np.int(blksze * np.ceil((np.float(cols)) / (np.float(blksze))))
padded_img = np.zeros((new_rows, new_cols))
stddevim = np.zeros((new_rows, new_cols))
padded_img[0:rows][:, 0:cols] = im
for i in range(0, new_rows, blksze):
for j in range(0, new_cols, blksze):
block = padded_img[i:i + blksze][:, j:j + blksze]
stddevim[i:i + blksze][:, j:j + blksze] = np.std(block) * np.ones(block.shape)
stddevim = stddevim[0:rows][:, 0:cols]
mask = stddevim > thresh
mean_val = np.mean(im[mask])
std_val = np.std(im[mask])
normim = (im - mean_val) / (std_val)
return normim, mask
def frequest(im, orientim, windsze, min_wave_length, max_wave_length):
rows, cols = np.shape(im)
# Find mean orientation within the block. This is done by averaging the
# sines and cosines of the doubled angles before reconstructing the
# angle again. This avoids wraparound problems at the origin.
cosorient = np.mean(np.cos(2 * orientim))
sinorient = np.mean(np.sin(2 * orientim))
orient = math.atan2(sinorient, cosorient) / 2
# Rotate the image block so that the ridges are vertical
# ROT_mat = cv2.getRotationMatrix2D((cols / 2, rows / 2), orient / np.pi * 180 + 90, 1)
# rotim = cv2.warpAffine(im, ROT_mat, (cols, rows))
rotim = scipy.ndimage.rotate(im, orient / np.pi * 180 + 90, axes=(1, 0), reshape=False, order=3, mode='nearest')
# Now crop the image so that the rotated image does not contain any
# invalid regions. This prevents the projection down the columns
# from being mucked up.
cropsze = int(np.fix(rows / np.sqrt(2)))
offset = int(np.fix((rows - cropsze) / 2))
rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]
# Sum down the columns to get a projection of the grey values down
# the ridges.
proj = np.sum(rotim, axis=0)
dilation = scipy.ndimage.grey_dilation(proj, windsze, structure=np.ones(windsze))
temp = np.abs(dilation - proj)
peak_thresh = 2
maxpts = (temp < peak_thresh) & (proj > np.mean(proj))
maxind = np.where(maxpts)
rows_maxind, cols_maxind = np.shape(maxind)
# Determine the spatial frequency of the ridges by divinding the
# distance between the 1st and last peaks by the (No of peaks-1). If no
# peaks are detected, or the wavelength is outside the allowed bounds,
# the frequency image is set to 0
if cols_maxind < 2:
freqim = np.zeros(im.shape)
else:
peaks = cols_maxind
wave_length = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (peaks - 1)
if min_wave_length <= wave_length <= max_wave_length:
freqim = 1 / np.double(wave_length) * np.ones(im.shape)
else:
freqim = np.zeros(im.shape)
return freqim
def binarise_image(image: np.array, normalise: bool = True) -> np.array:
"""
OTSU threshold based binarisation
"""
_, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
if normalise:
# Normalize to 0 and 1 range
image[image == 255] = 1
return image
def apply_clahe(image: np.array, clip_limit: float = 2.0, tile_grid_size: tuple = (8, 8)):
"""
Contrast Limited Adaptive Histogram Equalization
"""
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size)
return clahe.apply(image)
def fourier_transform(image: np.array) -> np.array:
"""
2D Fourier transform image enhancement implementation.
32x32 pixel windows processed at a time.
np implementation of FFT for computing DFT
"""
f = np.fft.fft2(image)
return np.fft.fftshift(f)
def high_pass_filter(image: np.array) -> np.array:
"""
HPF implementation
"""
shifted = fourier_transform(image)
rows, cols = image.shape
crow, ccol = rows // 2 , cols // 2
shifted[crow - 30: crow + 30, ccol - 30: ccol + 30] = 0
f_ishift = np.fft.ifftshift(shifted)
return np.abs(np.fft.ifft2(f_ishift))
| [
"mochenserey@gmail.com"
] | mochenserey@gmail.com |
730edb27719a3962eac1676de8595e04b42efd24 | 1e1435cf6b89c9d60ad31c43761be8a718a1020f | /flask/Scripts/rst2html.py | 0ce75a5f4508ebe42bc61f063214cbc22e5b5b24 | [] | no_license | voronin601/vk_proof | 287ff7eae03b4d26c174c3f0851ffb33e13a8bef | 80ddd28fed5c3cb8d7875aca5a19ed45e8cc51ca | refs/heads/master | 2023-03-20T21:54:46.650064 | 2020-04-29T20:00:50 | 2020-04-29T20:00:50 | 260,029,871 | 0 | 0 | null | 2021-03-20T03:47:05 | 2020-04-29T19:59:28 | Python | UTF-8 | Python | false | false | 626 | py | #!e:\work\vk_proof\podpis\flask\scripts\python.exe
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| [
"vovavoronin1998@gmail.com"
] | vovavoronin1998@gmail.com |
e7573b39be3b536a953e2ef64a93f715acb75c24 | a41a0514992cb24ad21caefbd536adda019b6e0e | /single_cell2marker.py | ad2a57501bcaf8f2b62a2e7ef03e006cc34b218b | [] | no_license | stephengao0121/SI231-project | 89c9aa0e0cfc6ee1f0bd42f30e908b98f441bb3c | 5aace0baeae3fb4b7db845c7ad2d3ce7d301126b | refs/heads/main | 2023-02-04T18:41:52.825083 | 2020-12-22T11:34:08 | 2020-12-22T11:34:08 | 322,760,390 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,803 | py | '''
Prepare the marker matrix from single cell data
author: panxq
email: panxq@shanghaitech.edu.cn
'''
import argparse
import numpy as np
import pandas as pd
import scanpy
def set_parser(parser):
'''
Set the parser
Keyword Arguments:
parser - argparse.ArugumentParser type, parser to be set
'''
parser.add_argument('-E', '--expression_matrix',
dest='expression_matrix',
required=True,
help='expression matrix to be extracted')
parser.add_argument('-B', '--barcode',
dest='barcodes',
required=True,
help='barcode for the expression matrix in tsv format')
parser.add_argument('-F', '--feature',
dest='features',
required=True,
help='Features for the expression matrix in tsv format')
parser.add_argument('-C', '--clustering_result',
dest='clusters',
required=True,
help='clustering result in csv format')
parser.add_argument('-O', '-opath',
dest='opath',
required=True,
help='output path')
def main(args):
'''
The main function
Keyword Arguments:
args - arguments to be used
'''
# read in data
# barcodes and features read from tsv
barcodes = pd.read_csv(args.barcodes, sep='\t',
names=['barcode'])
features = pd.read_csv(args.features, sep='\t',
names=['ID', 'name', 'data_type'])
clusters = pd.read_csv(args.clusters)
cluster_names = clusters['Cluster'].unique()
# expression matrix read from mtx files
expression_matrix = scanpy.read(args.expression_matrix)
expression_matrix = expression_matrix.X.todense()
# expression matrix annotated by barcodes and features
expression_matrix = pd.DataFrame(data=expression_matrix,
index=features['ID'].values,
columns=barcodes.values[:, 0])
marker_matrix = pd.DataFrame(index=features['ID'].values)
# computing mean over clusters to obtain markers
for name in cluster_names:
cluster_barcodes = clusters.loc[clusters['Cluster'] == name, 'Barcode'].values
marker_matrix.loc[:, name] = expression_matrix.loc[:, cluster_barcodes].agg(np.mean, axis=1).values
marker_matrix.to_csv(args.opath)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Prepare the marker matrix from single cell data')
set_parser(parser)
args = parser.parse_args()
main(args)
| [
"827019851@qq.com"
] | 827019851@qq.com |
2dab798c00432625c88bf3e1a0ba98adf19055aa | 66f66155a7f9dd11b0f1bff092411523ac9366fc | /leds/led_service.py | eb8c0e366d071087962ac0f832286847c68f0c9a | [] | no_license | GarnetSquadron4901/rpi-vision-processing | c1807239a530f35a4dd513db89771a371ab6df56 | 92ccee0d1ac8fa6f9f3ba2017a8e1aa1ae4f4897 | refs/heads/master | 2021-05-31T14:49:27.649798 | 2016-03-27T18:52:04 | 2016-03-27T18:52:04 | 52,922,315 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,639 | py | # NeoPixel library strandtest example
# Author: Tony DiCola (tony@tonydicola.com)
#
# Direct port of the Arduino NeoPixel library strandtest example. Showcases
# various animations on a strip of NeoPixels.
import time
import sys
import math
import Pyro4
import threading
from neopixel import *
# LED strip configuration:
LED_COUNT = 24 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
class LED_Server(object):
def __init__(self):
# Create NeoPixel object with appropriate configuration.
self.strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
# Intialize the library (must be called once before other functions).
self.strip.begin()
self.progress = 0
self.completedColorR = 0
self.completedColorG = 0
self.completedColorB = 255
self.runningColorR = 255
self.runningColorG = 0
self.runningColorB = 255
self.loadingColorR = 0
self.loadingColorG = 0
self.loadingColorB = 255
self.errorColorR = 255
self.errorColorG = 0
self.errorColorB = 0
self.thread = threading.Thread(target=self.service)
self.leds_off()
def run(self):
self.error = False
self.run = True
self.thread.start()
def stop(self):
self.run = False
self.leds_off()
def setProgress(self, _progress):
self.progress = _progress
def setRunningColor(self, r, g, b):
self.runningColorR = r
self.runningColorG = g
self.runningColorB = b
def setLoadingColor(self, r, g, b):
self.loadingColorR = r
self.loadingColorG = g
self.loadingColorB = b
def setCompletedColor(self, r, g, b):
self.completedColorR = r
self.completedColorG = g
self.completedColorB = b
def setErrorColor(self, r, g, b):
self.errorColorR = r
self.errorColorG = g
self.errorColorB = b
def setError(self):
self.error = True
def clearError(self):
self.error = False
def leds_off(self):
for led in range(LED_COUNT):
self.strip.setPixelColor(led, Color(0, 0, 0))
self.strip.show()
def service(self):
i = 0
while self.run == True:
if self.error:
for led in range(LED_COUNT):
self.strip.setPixelColor(led, Color(self.errorColorG, self.errorColorR, self.errorColorB))
self.strip.show()
time.sleep(0.5)
else:
if self.progress < 100:
completed = int(LED_COUNT * (float(self.progress) / 100.0))
if i >= 100:
i = 0
else:
i += 1
for led in range (completed):
self.strip.setPixelColor(led, Color(self.completedColorG, self.completedColorR, self.completedColorB))
for led in range(completed, LED_COUNT):
gain = (math.sin((i / 100.0) * 6.28 ) + 1.0) / 2.0
self.strip.setPixelColor(led, Color(int(self.loadingColorG * gain), int(self.loadingColorR * gain), int(self.loadingColorB * gain)))
self.strip.show()
time.sleep(0.01)
else:
for led in range(LED_COUNT):
self.strip.setPixelColor(led, Color(self.runningColorG, self.runningColorR, self.runningColorB))
self.strip.show()
time.sleep(0.5)
if __name__ == "__main__":
ledService = LED_Server()
daemon = Pyro4.Daemon()
ns = Pyro4.locateNS()
uri = daemon.register(ledService)
ns.register('ledService', uri)
print 'Ready'
daemon.requestLoop()
| [
"ryannazaretian@gmail.com"
] | ryannazaretian@gmail.com |
4c7dff0f3048c5057e8104ea43beb06aba713674 | cae86200ff25ff941f7bacbe9b363e6604b83122 | /zw/rainUtil.py | de1929c5648b548e6e3b4cdb974cd6c8af437e35 | [] | no_license | enniefu/CIS-ML | 835ddedc3f7d052027163dd646081988c49d9d62 | 0617742dfe111e2eb2f3e49fea982bc5820c75e9 | refs/heads/master | 2020-06-22T15:32:07.126125 | 2019-08-01T08:37:50 | 2019-08-01T08:37:50 | 197,738,019 | 0 | 0 | null | 2019-07-21T08:22:09 | 2019-07-19T08:50:22 | Python | UTF-8 | Python | false | false | 7,089 | py | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.utils import normalize
from scipy import stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn import preprocessing
import seaborn as sns
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 1000)
def offerData(url, label = "RainTomorrow", removeCol = False, location = "", removeOutliers = False, intervals = 1, selectK = 1):
df = pd.read_csv(url)
print('Size of weather data frame is :',df.shape)
# countYes = 0
# countNo = 0
# for i in range(len(df['RainTomorrow'])):
# if (df['RainTomorrow'][i] == 'Yes') :
# countYes = countYes + 1
# else:
# countNo = countNo + 1
#
# print(countYes)
# print(countNo)
#增加几天后下雨的真实label
# RainTwoDay = []
# RainThreeDay = []
# RainFourDay = []
# RainFiveDay = []
# RainSixDay = []
# RainSevenDay = []
# for i in range(len(df['RainToday'])):
# if i + 1 >= len(df['RainToday']):
# RainTwoDay.append(np.nan)
# RainThreeDay.append(np.nan)
# RainFourDay.append(np.nan)
# RainFiveDay.append(np.nan)
# RainSixDay.append(np.nan)
# RainSevenDay.append(np.nan)
# continue
# if df['Location'][i] == df['Location'][i + 1]:
# RainTwoDay.append(df['RainTomorrow'][i + 1])
# else:
# RainTwoDay.append(np.nan)
# if i + 2 >= len(df['RainToday']):
# RainThreeDay.append(np.nan)
# RainFourDay.append(np.nan)
# RainFiveDay.append(np.nan)
# RainSixDay.append(np.nan)
# RainSevenDay.append(np.nan)
# continue
# if df['Location'][i] == df['Location'][i + 2]:
# RainThreeDay.append(df['RainTomorrow'][i + 2])
# else:
# RainThreeDay.append(np.nan)
# if i + 3 >= len(df['RainToday']):
# RainFourDay.append(np.nan)
# RainFiveDay.append(np.nan)
# RainSixDay.append(np.nan)
# RainSevenDay.append(np.nan)
# continue
# if df['Location'][i] == df['Location'][i + 3]:
# RainFourDay.append(df['RainTomorrow'][i + 3])
# else:
# RainFourDay.append(np.nan)
# if i + 4 >= len(df['RainToday']):
# RainFiveDay.append(np.nan)
# RainSixDay.append(np.nan)
# RainSevenDay.append(np.nan)
# continue
# if df['Location'][i] == df['Location'][i + 4]:
# RainFiveDay.append(df['RainTomorrow'][i + 4])
# else:
# RainFiveDay.append(np.nan)
# if i + 5 >= len(df['RainToday']):
# RainSixDay.append(np.nan)
# RainSevenDay.append(np.nan)
# continue
# if df['Location'][i] == df['Location'][i + 5]:
# RainSixDay.append(df['RainTomorrow'][i + 5])
# else:
# RainSixDay.append(np.nan)
# if i + 6 >= len(df['RainToday']):
# RainSevenDay.append(np.nan)
# continue
# if df['Location'][i] == df['Location'][i + 6]:
# RainSevenDay.append(df['RainTomorrow'][i + 6])
# else:
# RainSevenDay.append(np.nan)
#
# df['RainTwoDay'] = RainTwoDay
# df['RainThreeDay'] = RainThreeDay
# df['RainFourDay'] = RainFourDay
# df['RainFiveDay'] = RainFiveDay
# df['RainSixDay'] = RainSixDay
# df['RainSevenDay'] = RainSevenDay
#增加一个Month的label
Month = []
for i in range(len(df['Date'])):
Month.append(int(df['Date'][i][5:7]))
df['Month'] = Month
df = df.dropna(how='any')
print('Size of weather data frame is :', df.shape)
# df.to_csv('./newWeatherAUS.csv')
# if (intervals > 1):
# for i in range(len(texts)):
# X_tmp = []
# y_tmp = []
# if i + y_length + interval< len(texts) and i+interval+y_length+X_length <len(texts) :
# for j in range(i,i+X_length):
# X_tmp.append(texts[j])
# for j in range(i+interval+X_length,i+interval+y_length+X_length):
# y_tmp.append(texts[j][0])
# X.append(X_tmp)
# y.append(y_tmp)
df = df.drop(columns=['Date'], axis=1)
if removeCol:
df = df.drop(columns=['Sunshine','Evaporation','Cloud3pm','Cloud9am','Location','RISK_MM','Date'],axis=1)
categorical_columns = ['WindGustDir', 'WindDir3pm', 'WindDir9am']
else:
categorical_columns = ['WindGustDir', 'WindDir3pm', 'WindDir9am', 'Location']
df = df.dropna(how='any')
df['RainToday'].replace({'No': 0, 'Yes': 1}, inplace=True)
df['RainTomorrow'].replace({'No': 0, 'Yes': 1}, inplace=True)
# df['RainTwoDay'].replace({'No': 0, 'Yes': 1}, inplace=True)
# df['RainThreeDay'].replace({'No': 0, 'Yes': 1}, inplace=True)
# df['RainFourDay'].replace({'No': 0, 'Yes': 1}, inplace=True)
# df['RainFiveDay'].replace({'No': 0, 'Yes': 1}, inplace=True)
# df['RainSixDay'].replace({'No': 0, 'Yes': 1}, inplace=True)
# df['RainSevenDay'].replace({'No': 0, 'Yes': 1}, inplace=True)
if location != "":
df = df
# 非one-hot编码
# df['Date'] = df['Date'].astype('category').cat.codes
df['Location'] = df['Location'].astype('category').cat.codes
df['WindGustDir'] = df['WindGustDir'].astype('category').cat.codes
df['WindDir9am'] = df['WindDir9am'].astype('category').cat.codes
df['WindDir3pm'] = df['WindDir3pm'].astype('category').cat.codes
# one-hot
# df = pd.get_dummies(df, columns=categorical_columns)
if removeOutliers:
z = np.abs(stats.zscore(df._get_numeric_data()))
df = df[(z < 3).all(axis=1)]
df.reset_index(drop=True, inplace=True)
# y_RainToday = df['RainToday']
# y_RainTwoDay = df['RainTwoDay']
# y_RainThreeDay = df['RainThreeDay']
# y_RainFourDay = df['RainFourDay']
# y_RainFiveDay = df['RainFiveDay']
# y_RainSixDay = df['RainSixDay']
# y_RainSevenDay = df['RainSevenDay']
# scaler = preprocessing.MinMaxScaler()
# scaler.fit(df)
# df = pd.DataFrame(scaler.transform(df), index=df.index, columns=df.columns)
X = df.drop(['RainTomorrow'], axis=1)
# X = df.drop(['RainTomorrow', 'RainThreeDay', 'RainTwoDay', 'RainFourDay', 'RainFiveDay', 'RainSixDay',
# 'RainSevenDay'], axis=1)
X = X.loc[:, ]
X = df["Rainfall"]
y = df[label]
# selector = SelectKBest(chi2, k=selectK)
# selector.fit(X, y)
# print(selector)
# X_new = selector.transform(X)
# print(X.columns[selector.get_support(indices=True)]) # top 3 columns
return X, y#, y_RainToday, y_RainTwoDay, y_RainThreeDay, y_RainFourDay, y_RainFiveDay, y_RainSixDay, y_RainSevenDay
if __name__ == '__main__':
offerData('./weatherAUS.csv')
| [
"934406854@qq.com"
] | 934406854@qq.com |
02f18ec302f421dfa21b570e9bae1c13fb43e29f | 207a7ad4f5817b1c694b9af889b5aa4a5ca43b56 | /LanBroker/landbroker/apps.py | 2b8623f763ca66ea1217e00a63099e4a058eac1d | [] | no_license | wilsenmuts/LandBroker | 81b37c3ab7a18a1cab97092f6708d768e8cbfba1 | fbd8737069c6dbcb0b020b822da38a6eed5f8347 | refs/heads/master | 2020-12-26T06:18:26.294451 | 2020-10-30T11:10:13 | 2020-10-30T11:10:13 | 237,413,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from django.apps import AppConfig
class LandbrokerConfig(AppConfig):
name = 'landbroker'
| [
"wilsonmutebi41@gmail.com"
] | wilsonmutebi41@gmail.com |
7635478e2b2018690ee0956d2047ed7f2ae3e0b5 | fa2ede8940d284fc80e8e74b42bfb4b9b8791967 | /plots/fig3/Ropt_vs_T_comparison.py | 6dcbc95074d61582954ed06c35d18806fffe6d9f | [
"BSD-3-Clause"
] | permissive | lrudelt/hdestimator | 73c9c1a45618cdc8c9743b7038d6f9f3f9ffedfd | 415f8d7482d6538fd761fba8d1be758be2819aca | refs/heads/master | 2023-01-09T12:43:42.985878 | 2020-11-10T13:28:18 | 2020-11-10T13:28:18 | 278,210,799 | 0 | 0 | null | 2020-07-08T22:55:40 | 2020-07-08T22:55:39 | null | UTF-8 | Python | false | false | 6,196 | py |
"""Functions"""
import os
import sys
from sys import exit, stderr, argv, path, modules
from os.path import isfile, isdir, realpath, dirname, exists
from scipy.optimize import bisect
import csv
import yaml
import numpy as np
import pandas as pd
# plotting
import seaborn.apionly as sns
from scipy.optimize import bisect
from matplotlib import rc
import matplotlib.lines as mlines
import pylab as plt
import matplotlib
# ESTIMATOR_DIR = '{}/../..'.format(dirname(realpath(__file__)))
ESTIMATOR_DIR = '/home/lucas/research/projects/history_dependence/hdestimator'
path.insert(1, '{}/src'.format(ESTIMATOR_DIR))
# fig = dirname(realpath(__file__)).split("/")[-1]
fig = 'fig3'
PLOTTING_DIR = '/home/lucas/research/papers/history_dependence/arXiv/figs/{}'.format(
fig)
if 'hde_glm' not in modules:
import hde_glm as glm
import hde_utils as utl
import hde_plotutils as plots
recorded_system = 'Simulation'
rec_length = '90min'
sample_index = 0
"""Load data """
# load estimate of ground truth
R_tot_true = np.load('{}/analysis_data/R_tot_simulation.npy'.format(ESTIMATOR_DIR))
T_true, R_true = plots.load_analysis_results_glm_Simulation(ESTIMATOR_DIR)
# Load settings from yaml file
setup = 'full_bbc'
ANALYSIS_DIR, analysis_num_str, R_tot_bbc, T_D_bbc, T, R_bbc, R_bbc_CI_lo, R_bbc_CI_hi = plots.load_analysis_results(
recorded_system, rec_length, sample_index, setup, ESTIMATOR_DIR, regularization_method='bbc')
R_tot_bbc, T_D_index_bbc, max_valid_index_bbc = plots.get_R_tot(T, R_bbc, R_bbc_CI_lo)
glm_bbc_csv_file_name = '{}/ANALYSIS{}/glm_benchmark_bbc.csv'.format(
ANALYSIS_DIR, analysis_num_str)
glm_bbc_pd = pd.read_csv(glm_bbc_csv_file_name)
R_glm_bbc = np.array(glm_bbc_pd['R_GLM'])
setup = 'full_shuffling'
ANALYSIS_DIR, analysis_num_str, R_tot_shuffling, T_D_shuffling, T, R_shuffling, R_shuffling_CI_lo, R_shuffling_CI_hi = plots.load_analysis_results(
recorded_system, rec_length, sample_index, setup, ESTIMATOR_DIR, regularization_method='shuffling')
R_tot_shuffling, T_D_index_shuffling, max_valid_index_shuffling = plots.get_R_tot(T, R_shuffling, R_shuffling_CI_lo)
glm_shuffling_csv_file_name = '{}/ANALYSIS{}/glm_benchmark_shuffling.csv'.format(
ANALYSIS_DIR, analysis_num_str)
glm_shuffling_pd = pd.read_csv(glm_shuffling_csv_file_name)
R_glm_shuffling = np.array(glm_shuffling_pd['R_GLM'])
"""Plotting"""
rc('text', usetex=True)
matplotlib.rcParams['font.size'] = '15.0'
matplotlib.rcParams['xtick.labelsize'] = '15'
matplotlib.rcParams['ytick.labelsize'] = '15'
matplotlib.rcParams['legend.fontsize'] = '15'
matplotlib.rcParams['axes.linewidth'] = 0.6
fig, ((ax)) = plt.subplots(1, 1, figsize=(3.5, 3))
# fig.set_size_inches(4, 3)
# Colors
main_red = sns.color_palette("RdBu_r", 15)[12]
soft_red = sns.color_palette("RdBu_r", 15)[12]
main_blue = sns.color_palette("RdBu_r", 15)[1]
# sns.palplot(sns.color_palette("RdBu_r", 15)) #visualize the color palette
# ax.set_color_cycle(sns.color_palette("coolwarm_r",num_lines)) setting it
# as color cycle to do automatised color assignment
##########################################
########## Simulated Conventional ########
##########################################
ax.set_xscale('log')
ax.set_xlim((0.1, 3.5))
# ax.set_xticks(np.array([1, 10, 50]))
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.spines['bottom'].set_bounds(0.1, 3)
# ax.set_xlabel(r'memory depth $T_m$ (sec)')
##### y-axis ####
# ax.set_ylabel(r'$M$')
ax.set_ylim((0.1, .14))
ax.set_yticks([0.1, 0.12, 0.14])
ax.spines['left'].set_bounds(.1, 0.14)
##### Unset Borders #####
ax.spines['top'].set_bounds(0, 0)
ax.spines['right'].set_bounds(0, 0)
# GLM true max and R vs T
ax.plot([T[0], T[-1]], [R_tot_true, R_tot_true], '--', color='0.5', zorder=1)
ax.plot(T_true, R_true, color='.5', zorder=4)
# GLM for same embeddings as comparison
ax.plot(T, R_glm_bbc, '-.', color='.4', alpha=0.8,
zorder=3, label='true $R(T)$ (BBC)') # , label='Model'
ax.plot(T, R_glm_shuffling, ':', color='.4',
lw=1.8, alpha=0.8, zorder=2, label=r'true $R(T)$ (Shuffling)')
# Embedding optimized estimates and confidence intervals
ax.plot(T, R_bbc, linewidth=1.2, color=main_red, zorder=4)
ax.fill_between(T, R_bbc_CI_lo, R_bbc_CI_hi, facecolor=main_red, alpha=0.3)
ax.plot(T, R_shuffling, linewidth=1.2, color=main_blue, zorder=3)
ax.fill_between(T, R_shuffling_CI_lo, R_shuffling_CI_hi,
facecolor=main_blue, alpha=0.3)
ax.plot(T[T_D_index_bbc:max_valid_index_bbc], np.zeros(max_valid_index_bbc-T_D_index_bbc)+R_tot_bbc, color=main_red, linestyle='--')
ax.plot(T[T_D_index_shuffling:max_valid_index_shuffling], np.zeros(max_valid_index_shuffling-T_D_index_shuffling)+R_tot_shuffling, color=main_blue, linestyle='--')
# Rtot and Tdepth bbc
ax.axvline(x=T_D_bbc, ymax=0.7, color=main_red,
linewidth=0.5, linestyle='--')
ax.axhline(y=R_tot_bbc, xmax=.5, color=main_red,
linewidth=0.5, linestyle='--')
ax.plot([0.1], [R_tot_bbc], marker='d', markersize=3, color=main_red,
zorder=8)
ax.plot([T_D_bbc], [0.1], marker='d', markersize=3, color=main_red,
zorder=8)
ax.plot([T_D_bbc], [R_tot_bbc], marker='x', markersize=6, color=main_red,
zorder=8)
# ax.text(0.012, M_max + 0.02 * M_max, r'$\hat{R}_{tot}$')
ax.text(T_D_bbc + 0.15 * T_D_bbc, .101, r'$\hat{T}_D$')
# Rtot and Tdepth Shuffling
ax.axvline(x=T_D_shuffling, ymax=0.6, color=main_blue,
linewidth=0.5, linestyle='--')
ax.axhline(y=R_tot_shuffling, xmax=.45, color=main_blue,
linewidth=0.5, linestyle='--')
ax.plot([0.1], [R_tot_shuffling], marker='d', markersize=3, color=main_blue,
zorder=8)
ax.plot([T_D_shuffling], [0.1], marker='d', markersize=3, color=main_blue,
zorder=8)
ax.plot([T_D_shuffling], [R_tot_shuffling], marker='x', markersize=6, color=main_blue,
zorder=8)
# ax.text(0.012, M_max + 0.02 * M_max, r'$\hat{R}_{tot}$')
# ax.text(T_D_shuffling + 0.15 * Tm_eff, .101, r'$\hat{T}_D$')
ax.legend(loc=(.05, .83), frameon=False)
fig.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
plt.savefig('{}/Ropt_vs_T_comparison.pdf'.format(PLOTTING_DIR),
format="pdf", bbox_inches='tight')
plt.show()
plt.close()
| [
"lucas@viboras.lmp.ds.mpg.de"
] | lucas@viboras.lmp.ds.mpg.de |
69aa981fa4ca59e2b4d194f6c7f3461ca39a987d | 87410d147f3b708f3c047dd605e571ac0f564a3e | /HousingData/HousingData/run.py | c9eae93cd9aca336a13cc0128222ed02f0b9bb18 | [] | no_license | zoudaokou0804/SH_Life | 88617bc0bf8f14fd2691db463effcbfac1f5062b | 4d273b8da098a2133ded6b4aa8286fcc6f6c4519 | refs/heads/master | 2020-12-31T10:36:07.802585 | 2020-02-08T06:29:26 | 2020-02-08T06:29:26 | 239,003,990 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | #!/user/bin/env python
# -*- encoding:utf-8 -*_
'''
@File:run.py
@Time:2020/02/08 02:11:50
@Author:zoudaokou
@Version:1.0
@Contact:wangchao0804@163.com
'''
from scrapy.cmdline import execute
import os
from Handle import handle_data
# 获取当前文件路径
dirpath = os.path.dirname(os.path.abspath(__file__))
#切换到scrapy项目路径下
os.chdir(dirpath[:dirpath.rindex("\\")])
# 启动爬虫,第三个参数为爬虫name
execute(['scrapy','crawl','LianJia'],func=handle_data) | [
"39996528+zoudaokou0804@users.noreply.github.com"
] | 39996528+zoudaokou0804@users.noreply.github.com |
09f6d1fb57112c1f50258c21e64e2daf37b9d68a | 078b4d2495d2b915b53771dda298095551807202 | /django/filter/urls.py | 8284862edb76b97dc6d23e5450f47b998212d239 | [] | no_license | sashpro/you_posting | c9dd9c4ade23ac284647604054c66ac5c18db6fb | 8a9972735c6d4f70dfd2bb8cccbde5d4e401cacb | refs/heads/master | 2021-07-03T18:30:29.287727 | 2017-04-07T08:12:20 | 2017-04-07T08:12:20 | 87,197,892 | 0 | 0 | null | 2021-06-10T20:06:11 | 2017-04-04T14:40:03 | Python | UTF-8 | Python | false | false | 165 | py | from django.conf.urls import url
from filter import views
urlpatterns = [
#url(r'^$', views.index_filter),
url(r'^get_channels/$', views.filter_channels),
] | [
"sashpro@mail.ru"
] | sashpro@mail.ru |
d57ffa4da9a341d5cc7564a22c3065a2bb5f8ab7 | cbe264842df4eae3569b28ed4aae9489014ed23c | /books/PythonCleanCode/ch2_pythonic_code/caveats.py | 1f330412a4fd8ef9851949c867192bfbd60a2f7d | [
"MIT"
] | permissive | zeroam/TIL | 31e176c2f4c3e1ef72b1155353690cc2f7160f96 | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | refs/heads/master | 2021-07-23T01:43:34.135033 | 2021-07-10T06:47:17 | 2021-07-10T06:47:17 | 167,952,375 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | """Clean Code in Python - Chapter 2: Pythonic Code
> Caveats in Python
"""
from collections import UserList
def wrong_user_display(user_metadata: dict = {'name': 'John', 'age': 30}):
name = user_metadata.pop('name')
age = user_metadata.pop('age')
return f'{name} ({age})'
def user_display(user_metadata: dict = None):
user_metadata = user_metadata or {'name': 'John', 'age': 30}
name = user_metadata.pop('name')
age = user_metadata.pop('age')
return f'{name} ({age})'
class BadList(list):
def __getitem__(self, index):
value = super().__getitem__(index)
if index % 2 == 0:
prefix = 'even'
else:
prefix = 'odd'
return f'[{prefix}] {value}'
class GoodList(UserList):
def __getitem__(self, index):
value = super().__getitem__(index)
if index % 2 == 0:
prefix = 'even'
else:
prefix = 'odd'
return f'[{prefix}] {value}'
| [
"imdff0803@gmail.com"
] | imdff0803@gmail.com |
e98351b25584ebae053f2d95eeee850cea227ab6 | 671878e08ad7c1f273df9b77f8dc47d85997243b | /edinburgh_materials/hawaii-plot.py | 22555a79aed4dadd132b87ac48fd1b38257db994 | [] | no_license | BritishGeologicalSurvey/python-clinic | 3e1c0458d8dc9849c17f8556bc6dcb06b9d930dd | 7a2836d6a3a23c74c6e2a475a0c5d9ab923019f6 | refs/heads/master | 2023-08-08T04:53:02.765057 | 2023-03-21T16:28:13 | 2023-03-21T16:28:13 | 201,021,399 | 1 | 1 | null | 2023-07-25T21:21:54 | 2019-08-07T09:48:26 | Jupyter Notebook | UTF-8 | Python | false | false | 1,042 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 13:03:36 2019
@author: ahall
"""
import pandas as pd
from matplotlib import pyplot as plt
import datetime
names=['year',
'month',
'decimal date',
'average',
'interpolated',
'trend',
'num days']
df = pd.read_csv('co2_mm_mlo.txt',
comment='#',
sep="\s+",
header=None,
index_col=1,
names=names,
na_values=[-99.99, -1],
parse_dates={'date':[0, 1]}
)
fig=plt.figure()
plt.plot(df['date'],df['average'],color='r')
plt.plot(df['date'],df['trend'],color='k')
plt.xlabel('YEAR')
plt.ylabel('PARTS PER MILLION')
plt.grid(b=True, which='major', axis='both')
plt.text(datetime.date(1958,1,1), 400, 'Scripps Institution of Oceanography\nNOAA Earth System Research Laboratory')
fig.suptitle(r'Atmospheric $CO_2$ at Mauna Loa Observatory')
fig.savefig('hawaii_plot.png') | [
"jostev@bgs.ac.uk"
] | jostev@bgs.ac.uk |
0deac75a47407a99954d3757cf9236fad478eefb | ba71d99e2b21fa006e34d2e5057ede6691231ea5 | /venv/bin/easy_install-3.6 | 8610ad0c28dd03c08b33719d92f1560b631bd64f | [] | no_license | imvivek71/DjangoRest | c541097439c88fa7ff2a7af75382d55eba4ae1f5 | fe5d5c92d0ae7deb35519dabece17840f9946d9a | refs/heads/master | 2020-05-01T23:45:17.319370 | 2019-03-27T09:15:38 | 2019-03-27T09:15:38 | 177,667,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | 6 | #!/home/vivek/PycharmProjects/DjangoRest/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"vivekgoswami71@gmail.com"
] | vivekgoswami71@gmail.com |
e30aca7014068a8caa8a74971f7570e9ea7895d6 | caab7613d987576c81b5823711c669aa922a2569 | /randompaths/drunk.py | 235aab9a0b109e2b153ce0a9bb68b26b7f7eb57d | [] | no_license | pabloantipan/statistic-computing | a4a3a1b0ff077922eb4ba81021a072d114dee420 | 893bf5497ba33d931e1298af361a7fbff7af1d29 | refs/heads/master | 2023-04-13T22:23:28.179570 | 2021-04-22T17:56:31 | 2021-04-22T17:56:31 | 360,635,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | from random import choice
class Drunk:
def __init__(self, name):
self.name = name
class TypicalDrunk(Drunk):
def __init__(self, name):
super().__init__(name)
def walk(self):
result = choice([(1, 0), (-1, 0), (0, 1), (0, -1)])
# print(result)
return result
| [
"pablo@"
] | pablo@ |
3747bf5aa3872e8330f62268b8a1445cfa36170f | bc6fe053b1b36ccfa03f7e2a4f1d514657b43e84 | /tatk/evaluator/multiwoz_eval.py | ab56800bf25c670ab3261e1907ffa332dc461b9f | [
"Apache-2.0"
] | permissive | sunweibo/tatk | 9bdf785140ad222ac377cd90186c2365fba4a349 | 81594e30a5410aba98b90d7cac47d31e25bb666c | refs/heads/master | 2020-06-13T20:43:33.019642 | 2019-07-03T03:14:20 | 2019-07-03T03:14:20 | 194,605,366 | 0 | 0 | null | 2019-07-01T05:27:49 | 2019-07-01T05:27:49 | null | UTF-8 | Python | false | false | 11,531 | py | # -*- coding: utf-8 -*-
import re
import numpy as np
from copy import deepcopy
from tatk.evaluator.evaluator import Evaluator
from tatk.util.multiwoz.dbquery import dbs
requestable = \
{'attraction': ['post', 'phone', 'addr', 'fee', 'area', 'type'],
'restaurant': ['addr', 'phone', 'post', 'ref', 'price', 'area', 'food'],
'train': ['ticket', 'time', 'ref', 'id', 'arrive', 'leave'],
'hotel': ['addr', 'post', 'phone', 'ref', 'price', 'internet', 'parking', 'area', 'type', 'stars'],
'taxi': ['car', 'phone'],
'hospital': ['post', 'phone', 'addr'],
'police': ['addr', 'post', 'phone']}
belief_domains = requestable.keys()
mapping = {'restaurant': {'addr': 'address', 'area': 'area', 'food': 'food', 'name': 'name', 'phone': 'phone', 'post': 'postcode', 'price': 'pricerange'},
'hotel': {'addr': 'address', 'area': 'area', 'internet': 'internet', 'parking': 'parking', 'name': 'name', 'phone': 'phone', 'post': 'postcode', 'price': 'pricerange', 'stars': 'stars', 'type': 'type'},
'attraction': {'addr': 'address', 'area': 'area', 'fee': 'entrance fee', 'name': 'name', 'phone': 'phone', 'post': 'postcode', 'type': 'type'},
'train': {'id': 'trainID', 'arrive': 'arriveBy', 'day': 'day', 'depart': 'departure', 'dest': 'destination', 'time': 'duration', 'leave': 'leaveAt', 'ticket': 'price'},
'taxi': {'car': 'car type', 'phone': 'phone'},
'hospital': {'post': 'postcode', 'phone': 'phone', 'addr': 'address', 'department': 'department'},
'police': {'post': 'postcode', 'phone': 'phone', 'addr': 'address'}}
class MultiWozEvaluator(Evaluator):
def __init__(self):
self.sys_da_array = []
self.usr_da_array = []
self.goal = {}
self.cur_domain = ''
self.booked = {}
def _init_dict(self):
dic = {}
for domain in belief_domains:
dic[domain] = {'info':{}, 'book':{}, 'reqt':[]}
return dic
def _init_dict_booked(self):
dic = {}
for domain in belief_domains:
dic[domain] = None
return dic
def _expand(self, _goal):
goal = deepcopy(_goal)
for domain in belief_domains:
if domain not in goal:
goal[domain] = {'info':{}, 'book':{}, 'reqt':[]}
continue
if 'info' not in goal[domain]:
goal[domain]['info'] = {}
if 'book' not in goal[domain]:
goal[domain]['book'] = {}
if 'reqt' not in goal[domain]:
goal[domain]['reqt'] = []
return goal
def add_goal(self, goal):
"""
init goal and array
args:
goal: dict[domain] dict['info'/'book'/'reqt'] dict/dict/list[slot]
"""
self.sys_da_array = []
self.usr_da_array = []
self.goal = goal
self.cur_domain = ''
self.booked = self._init_dict_booked()
def add_sys_da(self, da_turn):
"""
add sys_da into array
args:
da_turn: dict[domain-intent] list[slot, value]
"""
for dom_int in da_turn:
domain = dom_int.split('-')[0].lower()
if domain in belief_domains and domain != self.cur_domain:
self.cur_domain = domain
slot_pair = da_turn[dom_int]
for slot, value in slot_pair:
da = (dom_int +'-'+slot).lower()
value = str(value)
self.sys_da_array.append(da+'-'+value)
if da == 'booking-book-ref' and self.cur_domain in ['hotel', 'restaurant', 'train']:
if not self.booked[self.cur_domain] and re.match(r'^\d{8}$', value):
self.booked[self.cur_domain] = dbs[self.cur_domain][int(value)]
elif da == 'train-offerbook-ref' or da == 'train-inform-ref':
if not self.booked['train'] and re.match(r'^\d{8}$', value):
self.booked['train'] = dbs['train'][int(value)]
elif da == 'taxi-inform-car':
if not self.booked['taxi']:
self.booked['taxi'] = 'booked'
def add_usr_da(self, da_turn):
"""
add usr_da into array
args:
da_turn: dict[domain-intent] list[slot, value]
"""
for dom_int in da_turn:
domain = dom_int.split('-')[0].lower()
if domain in belief_domains and domain != self.cur_domain:
self.cur_domain = domain
slot_pair = da_turn[dom_int]
for slot, value in slot_pair:
da = (dom_int +'-'+slot).lower()
value = str(value)
self.usr_da_array.append(da+'-'+value)
def _book_rate_goal(self, goal, booked_entity, domains=None):
"""
judge if the selected entity meets the constraint
"""
if domains is None:
domains = belief_domains
score = []
for domain in domains:
if goal[domain]['book']:
tot = len(goal[domain]['info'].keys())
if tot == 0:
continue
entity = booked_entity[domain]
if entity is None:
score.append(0)
continue
if domain == 'taxi':
score.append(1)
continue
match = 0
for k, v in goal[domain]['info'].items():
if k in ['destination', 'departure', 'name']:
tot -= 1
elif k == 'leaveAt':
try:
v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])
v_select = int(entity['leaveAt'].split(':')[0]) * 100 + int(entity['leaveAt'].split(':')[1])
if v_constraint <= v_select:
match += 1
except (ValueError, IndexError):
match += 1
elif k == 'arriveBy':
try:
v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])
v_select = int(entity['arriveBy'].split(':')[0]) * 100 + int(entity['arriveBy'].split(':')[1])
if v_constraint >= v_select:
match += 1
except (ValueError, IndexError):
match += 1
else:
if v.strip() == entity[k].strip():
match += 1
if tot != 0:
score.append(match / tot)
return score
def _inform_F1_goal(self, goal, sys_history, domains=None):
"""
judge if all the requested information is answered
"""
if domains is None:
domains = belief_domains
inform_slot = {}
for domain in domains:
inform_slot[domain] = set()
for da in sys_history:
domain, intent, slot, value = da.split('-', 3)
if intent in ['inform', 'recommend', 'offerbook', 'offerbooked'] and domain in domains and slot in mapping[domain]:
inform_slot[domain].add(mapping[domain][slot])
TP, FP, FN = 0, 0, 0
for domain in domains:
for k in goal[domain]['reqt']:
if k in inform_slot[domain]:
TP += 1
else:
FN += 1
for k in inform_slot[domain]:
# exclude slots that are informed by users
if k not in goal[domain]['reqt'] \
and k not in goal[domain]['info'] \
and k in requestable[domain]:
FP += 1
return TP, FP, FN
def book_rate(self, ref2goal=True, aggregate=True):
if ref2goal:
goal = self._expand(self.goal)
else:
goal = self._init_dict()
for domain in belief_domains:
if domain in self.goal and 'book' in self.goal[domain]:
goal[domain]['book'] = self.goal[domain]['book']
for da in self.usr_da_array:
d, i, s, v = da.split('-', 3)
if i == 'inform' and s in mapping[d]:
goal[d]['info'][mapping[d][s]] = v
score = self._book_rate_goal(goal, self.booked)
if aggregate:
return np.mean(score) if score else None
else:
return score
def inform_F1(self, ref2goal=True, aggregate=True):
if ref2goal:
goal = self._expand(self.goal)
else:
goal = self._init_dict()
for da in self.usr_da_array:
d, i, s, v = da.split('-', 3)
if i == 'inform' and s in mapping[d]:
goal[d]['info'][mapping[d][s]] = v
elif i == 'request':
goal[d]['reqt'].append(s)
TP, FP, FN = self._inform_F1_goal(goal, self.sys_da_array)
if aggregate:
try:
rec = TP / (TP + FN)
except ZeroDivisionError:
return None, None, None
try:
prec = TP / (TP + FP)
F1 = 2 * prec * rec / (prec + rec)
except ZeroDivisionError:
return 0, rec, 0
return prec, rec, F1
else:
return [TP, FP, FN]
def task_success(self, ref2goal=True):
"""
judge if all the domains are successfully completed
"""
book_sess = self.book_rate(ref2goal)
inform_sess = self.inform_F1(ref2goal)
# book rate == 1 & inform recall == 1
if (book_sess == 1 and inform_sess[1] == 1) \
or (book_sess == 1 and inform_sess[1] is None) \
or (book_sess is None and inform_sess[1] == 1):
return 1
else:
return 0
def domain_success(self, domain, ref2goal=True):
"""
judge if the domain (subtask) is successfully completed
"""
if domain not in self.goal:
return None
if ref2goal:
goal = {}
goal[domain] = deepcopy(self.goal[domain])
else:
goal = {}
goal[domain] = {'info':{}, 'book':{}, 'reqt':[]}
if 'book' in self.goal[domain]:
goal[domain]['book'] = self.goal[domain]['book']
for da in self.usr_da_array:
d, i, s, v = da.split('-', 3)
if d != domain:
continue
if i == 'inform' and s in mapping[d]:
goal[d]['info'][mapping[d][s]] = v
elif i == 'request':
goal[d]['reqt'].append(s)
book_rate = self._book_rate_goal(goal, self.booked, [domain])
book_rate = np.mean(book_rate) if book_rate else None
inform = self._inform_F1_goal(goal, self.sys_da_array, [domain])
try:
inform_rec = inform[0] / (inform[0] + inform[2])
except ZeroDivisionError:
inform_rec = None
if (book_rate == 1 and inform_rec == 1) \
or (book_rate == 1 and inform_rec is None) \
or (book_rate is None and inform_rec == 1):
return 1
else:
return 0 | [
"truthless11@gmail.com"
] | truthless11@gmail.com |
f577a3b03a97fbc52623dc8f9709605b6a1264ce | 09ed2cc42182379e25050f7977c6d5dbb4625b9b | /help/urls.py | a4199889cf8ed205eccf4ac80f18c02aac8e31bb | [] | no_license | pravu02280/NewEmployeeManagement | 21b18655a72415b5d25600649f6ce7ebb013b74e | 10b65c58356680fb52add5faa5cfb52c25524593 | refs/heads/master | 2020-03-20T20:33:40.457708 | 2018-06-16T07:20:25 | 2018-06-16T07:20:25 | 137,693,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | from django.urls import path
from . import views
app_name = 'help'
urlpatterns = [
path('help/', views.HelpView.as_view(), name='help'),
]
| [
"ajaykarki333@gmail.com"
] | ajaykarki333@gmail.com |
2493401a3937714c9f738d96ff417df94d1d831b | d4068f40b36613e2899d40c80d776b98c6986e9d | /test/test_findx.py | 954bf196e0550a1b5508854070c9b0d8cc088146 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jayvdb/findx | 4952a10d8248b9d0cc2549c29d2d184056ec8e36 | 14fab1cd4140432e9482b55410ac651c31b1d59b | refs/heads/master | 2021-01-16T20:44:10.925139 | 2016-05-19T01:02:19 | 2016-05-19T01:02:19 | 61,614,151 | 0 | 0 | null | 2016-06-21T07:56:55 | 2016-06-21T07:56:55 | null | UTF-8 | Python | false | false | 6,351 | py | #!/usr/bin/env python
import unittest
import findx
class TestFindx(unittest.TestCase):
def test_io_error(self):
with self.assertRaises(IOError):
raise IOError()
def test_has_meta(self):
f = findx.Findx()
self.assertTrue(f.has_meta('one*'))
self.assertFalse(f.has_meta('/two/three'))
def test_get_option_list(self):
f = findx.Findx()
f.args = ['-type', 'f', '-print0', '-fprintf', 'myfile', '%f']
option_list = f.get_option_list()
self.assertEqual(option_list, ['-type', 'f'])
option_list = f.get_option_list()
self.assertEqual(option_list, ['-print0'])
option_list = f.get_option_list()
self.assertEqual(option_list, ['-fprintf', 'myfile', '%f'])
self.assertEqual(f.args, [])
def test_get_option_list_var(self):
f = findx.Findx()
f.args = ['-exec', 'grep', '-i', ';', 'word']
option_list = f.get_option_list()
self.assertEqual(option_list, ['-exec', 'grep', '-i', ';'])
self.assertEqual(f.args, ['word'])
def test_get_option_list_underflow(self):
f = findx.Findx()
f.args = ['-printf']
with self.assertRaises(findx.MissingArgumentError):
f.get_option_list()
def test_no_dirs(self):
f = findx.Findx()
f.parse_command_line(''.split())
self.assertEqual(f.expression, ''.split())
self.assertEqual(f.dirs, '.'.split())
def test_one_dir(self):
f = findx.Findx()
f.parse_command_line('someDir'.split())
self.assertEqual(f.expression, ''.split())
self.assertEqual(f.dirs, 'someDir'.split())
def test_root_dir(self):
f = findx.Findx()
f.parse_command_line('-root someRoot'.split())
self.assertEqual(f.expression, ''.split())
self.assertEqual(f.dirs, 'someRoot'.split())
def test_late_path(self):
f = findx.Findx()
f.parse_command_line('-print somePath anotherPath'.split())
self.assertEqual(f.dirs, 'somePath anotherPath'.split())
self.assertEqual(f.expression, '( -print )'.split())
def test_pre_post_path_options(self):
f = findx.Findx()
f.parse_command_line('-print somePath -L anotherPath -depth'.split())
self.assertEqual(f.pre_path_options, '-L'.split())
self.assertEqual(f.dirs, 'somePath anotherPath'.split())
self.assertEqual(f.post_path_options, '-depth'.split())
self.assertEqual(f.expression, '( -print )'.split())
def test_simple_cmd(self):
f = findx.Findx()
f.parse_command_line('-type f -a -print0'.split())
self.assertEqual(f.expression, '( -type f -a -print0 )'.split())
def test_glob(self):
f = findx.Findx()
f.parse_command_line('*.c'.split())
self.assertEqual(f.expression, '( -name *.c )'.split())
def test_exclude(self):
f = findx.Findx()
f.parse_command_line('-e -type f -name *.exe'.split())
self.assertEqual(f.expression, '( -name *.exe )'.split())
self.assertEqual(f.excludes, '-type f'.split())
def test_exclude2(self):
f = findx.Findx()
f.parse_command_line(
'-print -e ( -type f -name *.exe ) -print'.split())
self.assertEqual(f.expression, '( -print -print )'.split())
self.assertEqual(f.excludes, '( -type f -name *.exe )'.split())
def test_distribute_option(self):
f = findx.Findx()
a = f.distribute_option('-type', ['f'])
self.assertEqual(a, '-type f'.split())
a = f.distribute_option('-type', ['f', 'd'])
self.assertEqual(a, '( -type f -o -type d )'.split())
def test_find_braced_range(self):
f = findx.Findx()
self.assertEqual(f.find_braced_range('hello'), (-1, -1))
self.assertEqual(f.find_braced_range('{hello}'), (1, 6))
self.assertEqual(f.find_braced_range('{hello}', 1), (-1, -1))
self.assertEqual(f.find_braced_range('{hel{mom}lo}', 1), (5, 8))
self.assertEqual(f.find_braced_range('[{]hel{mom}lo}'), (7, 10))
def test_find_multi(self):
f = findx.Findx()
self.assertEqual(f.find_multi('abcd', ['a']), (0, 'a'))
self.assertEqual(f.find_multi('abcd', ['d', 'c']), (2, 'c'))
self.assertEqual(f.find_multi('abcd', ['b']), (1, 'b'))
self.assertEqual(f.find_multi('abcd', ['z']), (-1, ''))
def test_find_cut_points(self):
f = findx.Findx()
self.assertEqual(f.find_cut_points('a|b|c'), [1, 3])
self.assertEqual(f.find_cut_points(',,a|b|c'), [0, 1, 3, 5])
self.assertEqual(f.find_cut_points('hello'), [])
self.assertEqual(f.find_cut_points('one[,]two'), [])
self.assertEqual(f.find_cut_points('one{a,b}two'), [])
self.assertEqual(f.find_cut_points('one{a,b{two'), [5])
def test_split_glob_outside_braces(self):
f = findx.Findx()
self.assertEqual(f.split_glob_outside_braces(''), [''])
self.assertEqual(f.split_glob_outside_braces('one'), ['one'])
self.assertEqual(f.split_glob_outside_braces('one|two'),
['one', 'two'])
self.assertEqual(f.split_glob_outside_braces('on{e|t}wo'),
['on{e|t}wo'])
def test_split_glob(self):
f = findx.Findx()
self.assertEqual(f.split_glob(''), [''])
self.assertEqual(f.split_glob('a'), ['a'])
self.assertEqual(f.split_glob('a|b'), ['a', 'b'])
self.assertEqual(f.split_glob('a,b'), ['a', 'b'])
self.assertEqual(f.split_glob('*.c,?.[ch]'), ['*.c', '?.[ch]'])
self.assertEqual(f.split_glob('a[,]b'), ['a[,]b'])
self.assertEqual(f.split_glob('{a,b}'), ['a', 'b'])
self.assertEqual(f.split_glob('{a|b}'), ['a', 'b'])
self.assertEqual(f.split_glob('a{b,c}d'), ['abd', 'acd'])
self.assertEqual(f.split_glob('a{b|c}d'), ['abd', 'acd'])
self.assertEqual(f.split_glob('{a,b}{c,d}'), ['ac', 'ad', 'bc', 'bd'])
self.assertEqual(f.split_glob('a{b,c,d}e'), ['abe', 'ace', 'ade'])
self.assertEqual(f.split_glob('a{b,c[}]d'), ['a{b', 'c[}]d'])
self.assertEqual(f.split_glob('a{b,c{d,e}f}g'),
['abg', 'acdfg', 'acefg'])
self.assertEqual(f.split_glob('a{b{c|d}e}f'), ['a{bce}f', 'a{bde}f'])
| [
"drmikehenry@drmikehenry.com"
] | drmikehenry@drmikehenry.com |
59c5ad5afc59d98e891abcb08c9188c3899965a2 | f3db01c17883059b71f82feb17ebc9a5825321a9 | /LinkedList/linked list.py | 87cc9cbd15d88c64a06697ea06028d81c11af181 | [] | no_license | RoshanXingh/DataStructure | 14cebe54b52189e77bee58ec4461187e03ed898d | 716249f9ce91f97d7e9df2b2a9e1048fd7f96980 | refs/heads/master | 2023-03-20T22:49:04.362335 | 2021-03-03T11:33:38 | 2021-03-03T11:33:38 | 333,363,161 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,224 | py | class node:
def __init__(self, val):
self.val = val
self.next = None
class llist:
def __init__(self):
self.head = None
def disp(self):
temp = self.head
print("linked list is:", end = "")
while(temp):
print(temp.val, end = " ")
temp = temp.next
print("\n")
class operation:
def __init__(self, ll):
self.ll = ll
def insert(self, data, pos):
temp = self.ll.head
i = 0
tn = temp.next
while(i != int(pos)-1):
temp = temp.next
tn = temp.next
i += 1
temp.next = node(data)
t = temp.next
t.next = tn
def delete(self, ll, value):
headval = ll.head
if(headval is not None):
if(headval.val == value):
ll.head = headval.next
headval = None
return
while(headval is not None):
if (int(headval.val) == int(value)):
break
prev = headval
headval = headval.next
if(headval == None):
return
prev.next = headval.next
headval = None
def length(self):
temp = self.ll.head
flag = 0
while(temp):
temp = temp.next
flag += 1
return flag
def search(self, sr, l):
temp = self.ll.head
if(temp.val == sr):
print("value is found at head")
else:
flag = 1
while(flag <=l and temp.val != sr):
temp = temp.next
flag += 1
if(flag == l+1):
print("element not found")
else:
print("element is found at {} position".format(flag))
#def update():
if(__name__ == "__main__"):
ll = llist()
h = int(input("enter head of linked list:"))
ll.head = node(h)
val = []
v = list(map(int, input("enter data of linked list saparated by space:").split()))
for i in v:
val.append(node(i))
ll.head.next = val[0]
for i in range(len(val)-1):
val[i].next = val[i+1]
ll.disp()
i = 1
while(int(i) == 1):
op = operation(ll)
choise = int(input(""" choose from the following operation to perfore
1. Insert in linked list
2. Delete element form linked list
3. find length of linked list
4. Search element from linked list
5. exit
:"""))
if (choise == 1):
data, pos = input("\nenter value and position of element to add:").split()
op.insert(int(data), int(pos))
ll.disp()
elif(choise == 2):
value = int(input("\nenter value to delete:"))
op.delete(ll, value)
ll.disp()
elif(choise == 3):
print("length of linked list is:", op.length())
elif(choise ==4):
sr = int(input("enter value to search:"))
l = op.length()
op.search(sr, l)
elif (choise == 5):
break
else:
print("Please enter valid option")
i = input("enter 1 to repeat, or any other key to exit:")
| [
"roshanrajsingh38@outlook.com"
] | roshanrajsingh38@outlook.com |
068d8dcf221bf47491b057fbc7f3fdc5dde47b3d | bf8f258b02611260c406b1f320c7b4e6bc6820bb | /srm/migrations/0015_auto_20180312_1716.py | fe71aaf55689d86baed36fc555c0cf277d14b398 | [] | no_license | Tagolfirg/kmvit | ccc248e516a10c677dcad0e4706bc754222f732c | 4ac1b1604dbe634bd74de0970512619158874858 | refs/heads/master | 2020-07-06T12:18:42.473774 | 2019-06-07T09:54:00 | 2019-06-07T09:54:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-03-12 14:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('srm', '0014_deal_description'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('phone', models.CharField(max_length=12)),
('email', models.EmailField(max_length=254)),
('company', models.CharField(max_length=200)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='srm.City')),
],
),
migrations.RemoveField(
model_name='deal',
name='city',
),
migrations.RemoveField(
model_name='deal',
name='company',
),
migrations.RemoveField(
model_name='deal',
name='phone',
),
]
| [
"root@justscoundrel.fvds.ru"
] | root@justscoundrel.fvds.ru |
dcb1463527e4ee20e0e214ae36dd87a6f99ce861 | 2a7fe6bba27b7fa29d1ceb274cba28469e1db21e | /Assignment 3/Source/svm.py | f63dcea7e8b78f4350d6bf3c9d1c851729c2369f | [] | no_license | iamgroot42/ML_assignments | 51625b86589fb15bb8b1c288c198660f1b77eb21 | c37b3dfb6f25217f24fe639cf33fc2a8a643127e | refs/heads/master | 2021-04-30T22:48:32.439363 | 2017-08-21T06:24:48 | 2017-08-21T06:24:48 | 66,487,399 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,563 | py | import misc
from sklearn.svm import SVC
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
import matplotlib.pyplot as plt
import numpy as np
from sklearn.externals import joblib
import warnings
# Ignore warnings based on convergence because of number of iterations
warnings.filterwarnings("ignore")
def SVM_fit_and_predict(X_train, Y_train, X_test, Y_test, c, kern, gam, plotROC = False):
# -1 : use all CPUs
digits = np.unique(Y_train)
multi_Y_train = label_binarize(Y_train, classes = digits)
multi_Y_test = label_binarize(Y_test, classes = digits)
SVM_K = OneVsRestClassifier(SVC(C = c, max_iter = 1000, kernel = kern, gamma = gam, verbose = True), -1)
SVM_K.fit(X_train, multi_Y_train)
if plotROC:
misc.plot_roc_curve(multi_Y_test, SVM_K.predict(X_test))
return SVM_K.score(X_test, multi_Y_test), SVM_K
def k_fold_cross_validation(X, Y, k, model, C, kernel, gamma):
start = 0
eff_k = int((k * len(Y))/100.0)
accuracies = []
for i in range(k):
# Split data
left_data = X[:start]
right_data = X[start + eff_k:]
modified_data = np.append(left_data, right_data, axis=0)
# Split labels
left_labels = Y[:start]
right_labels = Y[start + eff_k:]
modified_labels = left_labels
modified_labels = np.append(left_labels, right_labels, axis=0)
# Validation data
validation_data = X[start:start + eff_k]
validation_labels = Y[start:start + eff_k]
accuracies.append(model(modified_data, modified_labels,validation_data,
validation_labels, C, kernel, gamma)[0])
start += eff_k
mean_accuracy = np.mean(accuracies)
print "Cross validation accuracy for (",str(C),",",str(gamma),") :",mean_accuracy
return mean_accuracy
def grid_search(X, Y, k, model, kernel, grid1, grid2 = ['auto'], plot = False):
opt_val = (grid1[0], grid2[0])
opt_acc = 0.0
for gamma in grid2:
accuracies = []
for C in grid1:
accuracy = k_fold_cross_validation(X, Y, k, model, C, kernel, gamma)
accuracies.append(accuracy)
if accuracy > opt_acc:
opt_acc = accuracy
opt_val = (C, gamma)
if plot:
plt.figure()
plt.xlabel('Value of C')
plt.ylabel(str(k) + '-fold Cross validation accuracy')
plt.title('Accuracy v/s C, for gamma = ' + str(gamma))
plt.legend(loc="lower right")
plt.plot(grid1, accuracies, color = 'darkorange', lw = 2)
plt.show()
return opt_val
def training_phase(X_train, Y_train, X_test, Y_test):
C_grid = [1e-7, 1e-3, 1e1, 1e5]
gamma_grid = [1e-9, 1e-6, 1e-3]
# Part(a) : 3/8 binary classification
train_sampled_X, train_sampled_Y = misc.sample_data(X_train, Y_train, 2000)
test_sampled_X, test_sampled_Y = misc.sample_data(X_test, Y_test, 500)
EX, EY = misc.data_for_binary_classification(train_sampled_X, train_sampled_Y, 3, 8)
EX_, EY_ = misc.data_for_binary_classification(test_sampled_X, test_sampled_Y, 3, 8)
opt_C, opt_gamma = grid_search(EX, EY, 5, SVM_fit_and_predict, 'linear', C_grid)
test_accuracy, SVM_OBJ = SVM_fit_and_predict(EX, EY, EX_, EY_, opt_C, 'linear', opt_gamma, True)
print "Test accuracy for (",str(opt_C),",",str(opt_gamma),") :",test_accuracy
joblib.dump(SVM_OBJ, "../Models/model_linear.model")
# Part(b) : multi-class classification
opt_C, opt_gamma = grid_search(train_sampled_X, train_sampled_Y, 5, SVM_fit_and_predict, 'linear', C_grid)
test_accuracy, SVM_OBJ_2 = SVM_fit_and_predict(train_sampled_X, train_sampled_Y, test_sampled_X, test_sampled_Y, opt_C, 'linear', opt_gamma, True)
print "Test accuracy for (",str(opt_C),",",str(opt_gamma),") :",test_accuracy
joblib.dump(SVM_OBJ_2, "../Models/multi.model")
# misc.save_onevsall("../Models/multi", SVM_OBJ_2)
# Part(c) : RBF multi-class classification
opt_C, opt_gamma = grid_search(train_sampled_X, train_sampled_Y, 5, SVM_fit_and_predict, 'rbf', C_grid, gamma_grid)
test_accuracy, SVM_OBJ_3 = SVM_fit_and_predict(train_sampled_X, train_sampled_Y, test_sampled_X, test_sampled_Y, opt_C, 'rbf', opt_gamma, True)
print "Test accuracy for (",str(opt_C),",",str(opt_gamma),") :",test_accuracy
misc.save_onevsall("../Models/rbf", SVM_OBJ_3)
# joblib.dump(SVM_OBJ_3, "../Models/rbf.model")
def testing_phase(X_test, Y_test):
binary_digits = [3,8]
digits = [0,1,2,3,4,5,6,7,8,9]
EX_, EY_ = misc.data_for_binary_classification(X_test, Y_test, 3, 8)
binarized_EY_ = label_binarize(EY_, classes = binary_digits)
binarized_Y_test = label_binarize(Y_test, classes = digits)
# Part(a) : 3/8 binary classification
acc1,fpr1,tpr1 = misc.load_and_test_model("../Models/model_linear.model", EX_, binarized_EY_, True)
print "Test accuracy for [3,8] linear:", str(acc1)
misc.plot_roc_curve(fpr1,tpr1)
# Part(b) : multi-class classification
acc2,fpr2,tpr2 = misc.load_and_test_model("../Models/multi.model", X_test, binarized_Y_test, True)
print "Test accuracy for multi-linear:", str(acc2)
misc.plot_roc_curve(fpr2,tpr2)
# Part(c) : RBF multi-class classification
acc3,fpr3,tpr3 = misc.load_and_test_model("../Models/rbf.model", X_test, binarized_Y_test, True)
print "Test accuracy for multi-rbf:", str(acc3)
misc.plot_roc_curve(fpr3,tpr3)
misc.plot_roc_curve_together(fpr2,tpr2,fpr3,tpr3)
if __name__ == "__main__":
# Process data
X_train, Y_train = misc.process_data("MNIST/train-images.idx3-ubyte", "MNIST/train-labels.idx1-ubyte")
X_test, Y_test = misc.process_data("MNIST/t10k-images.idx3-ubyte", "MNIST/t10k-labels.idx1-ubyte")
test_sampled_X, test_sampled_Y = misc.sample_data(X_test, Y_test, 500)
# Train models
# training_phase(X_train, Y_train, X_test, Y_test)
# Test models
testing_phase(X_test, Y_test)
| [
"anshuman14021@iiitd.ac.in"
] | anshuman14021@iiitd.ac.in |
59d852a4343291babb24cabd51079b6380b8f50e | c7eefcabbbfa6efa94962494273dbb8f5d7fa29f | /tienda_mascotas/Tienda.py | fa18e5df3bc54de9f7a6fa8df7fdae2e00a8ccc3 | [] | no_license | parra06/taller1_lab_Python | 3f330623f2028c97090cc97cb14d1fe1e02284a9 | d575f59ba12a8c2f0e64204fe348406715261286 | refs/heads/main | 2023-08-11T06:33:07.863089 | 2021-10-05T00:52:54 | 2021-10-05T00:52:54 | 413,626,836 | 0 | 1 | null | 2021-10-05T13:29:59 | 2021-10-05T00:42:25 | Python | UTF-8 | Python | false | false | 7,890 | py | import os
import uuid
from tienda_mascotas.Dominio.Cuidador import Cuidador
from tienda_mascotas.Dominio.Especificacion import Especificacion
from tienda_mascotas.Dominio.Inventario import Inventario
from tienda_mascotas.Infraestructura.Operacion import Operacion
from tienda_mascotas.Dominio.Perro import Perro
from tienda_mascotas.Dominio.Configuracion import Configuracion
from tienda_mascotas.Dominio.Gato import Gato
from tienda_mascotas.Dominio.Hamster import Hamster
from tienda_mascotas.Dominio.Accesorio import Accesorio
from tienda_mascotas.Infraestructura.Persistencia import Persistencia
config = ""
def cargar_file():
global config
inventario.eliminar_listas()
for file in os.listdir("./Files"):
if '.json' in file:
inventario.agregar_objeto(saver.load_json(file))
for file in os.listdir("./config"):
if '.json' in file:
config=saver.load_json_config(file)
if __name__ == "__main__":
saver = Persistencia()
saver.connect()
inventario = Inventario()
cargar_file()
operacion = Operacion()
continuar=True
print(config.valor)
while continuar:
if config.valor == 'bd':
valor = int(input("\nEsta configurado para guardar en base de datos\n\n"
"Ingrese el numero 1 para continuar con esta configuracion \n"
"Ingrese el numero 2 para guardar con serializacion\n-->"))
if valor == 2:
os.remove('config/' + config.valor + '.json')
config.cambiar_valor('sr')
print("\nConfigurado con serializacion\n")
else:
print("\nConfigurado con base de datos\n")
else:
valor = int(input("\nEsta configurado para guardar con serializacion\n"
"Ingrese el numero 1 para continuar con esta configuracion \n"
"Ingrese el numero 2 para guardar con base de datos\n-->"))
if valor == 2:
os.remove('config/' + config.valor + '.json')
config.cambiar_valor('bd')
print("\nConfigurado con base de datos\n")
else:
print("\nConfigurado con serializacion\n")
saver.save_json(config)
opcion = int(input("\nPara ver inventarios ingrese 1\n"
"Para guardar una mascota ingrese 2\n"
"Para guardar un Accesorio ingrese 3\n"
"Para guardar un Cuidador ingrese 4\n"
"Para buscar Ingrese 5\n"
"Para vender un Accesorio o una mascota ingrese 6\n"
"Para salir ingrese 7\n-->"))
if opcion==1:
cargar_file()
opcion1=int(input("\nPara ver inventario Mascotas ingrese 1\n"
"Para ver inventario Accesorios ingrese 2\n"
"Para ver inventario Cuidadores ingrese 3\n"
"Para ver todos los inventarios ingrese 4\n-->"))
if opcion1 == 1:
if(len(list(inventario.mascotas)))==0:
print("\nNo hay Macotas en el Inventario\n")
else:
print("\nInventario Mascotas\n")
print(inventario.mascotas)
if opcion1 == 2:
if (len(list(inventario.accesorios))) == 0:
print("\nNo hay Accesorios en el Inventario\n")
else:
print("\nInventario Accesorios\n")
print(inventario.accesorios)
if opcion1 == 3:
if (len(list(inventario.cuidadores))) == 0:
print("\nNo hay Cuidadores en el Inventario\n")
else:
print("\nInventario Cuidadores\n")
print(inventario.cuidadores)
if opcion1 == 4:
if (len(list(inventario.mascotas))) == 0:
print("\nNo hay Macotas en el Inventario\n")
else:
print("\nInventario Mascotas\n")
print(inventario.mascotas)
if (len(list(inventario.accesorios))) == 0:
print("\nNo hay Accesorios en el Inventario\n")
else:
print("\nInventario Accesorios\n")
print(inventario.accesorios)
if (len(list(inventario.cuidadores))) == 0:
print("\nNo hay Cuidadores en el Inventario\n")
else:
print("\nInventario Cuidadores\n")
print(inventario.cuidadores)
##
if opcion == 2:
opcion2=int(input("\nPara guardar un Perro ingrese 1\n"
"Para guardar un Gato ingrese 2\n"
"Para guardar un Hamster ingrese 3\n-->"))
if opcion2 == 1:
print("\n")
nombre = input("Nombre: ")
raza = input("Raza: ")
edad = int(input("Edad: "))
color = input("Color: ")
peso = float(input("Peso: "))
precio = float(input("Precio: "))
perro = Perro(nombre,raza,edad,color,peso,precio)
if config.valor == 'bd':
saver.guardar_bd(perro)
else:
saver.save_json(perro)
if opcion2 == 2:
print("\n")
nombre = input("Nombre: ")
raza = input("Raza: ")
edad = int(input("Edad: "))
color = input("Color: ")
peso = float(input("Peso: "))
precio = float(input("Precio: "))
gato = Gato(nombre,raza,edad,color,peso,precio)
if config.valor == 'bd':
saver.guardar_bd(gato)
else:
saver.save_json(gato)
if opcion2 == 3:
print("\n")
nombre = input("Nombre: ")
edad = int(input("Edad: "))
color = input("Color: ")
peso = float(input("Peso: "))
longitud = float(input("Longitud: "))
precio = float(input("Precio: "))
hamster = Hamster(nombre,edad,color,peso,longitud,precio)
if config.valor == 'bd':
saver.guardar_bd(hamster)
else:
saver.save_json(hamster)
cargar_file()
if opcion == 3:
print("\n")
nombre = input("Nombre: ")
descripcion = input("Descripcion: ")
precio = float(input("Precio: "))
tipo_mascota = input("Tipo Mascota: ")
accesorio = Accesorio(nombre,descripcion,precio,tipo_mascota)
if config.valor == 'bd':
saver.guardar_bd(accesorio)
else:
saver.save_json(accesorio)
cargar_file()
if opcion == 4:
print("\n")
cedula = input("Cedula: ")
nombre = input("Nombre: ")
apellido = input("Apellido: ")
edad = int(input("Edad: "))
telefono = input("Telefono: ")
direccion = input("Direccion: ")
cuidador = Cuidador(cedula,nombre,apellido,edad,telefono,direccion)
if config.valor == 'bd':
saver.guardar_bd(cuidador)
else:
saver.save_json(cuidador)
cargar_file()
if opcion == 5:
print("\n")
operacion.buscar_objeto()
if opcion == 6:
print("\n")
operacion.vender()
if opcion == 7:
continuar = False
| [
"santiago.parra.0276@eam.edu.co"
] | santiago.parra.0276@eam.edu.co |
2418b92d287ca09345d43822beb2153d2bca0a13 | 3facea714ad064ed6170b1a49cc5dec7c5aa2b6e | /app/logging.py | f5f1eb89be77ed2eae009aed1496560ef62f2edd | [] | no_license | harindu95/CommentsMicroservice | b7da125eb85e9adc172584a972280fd5bc8c87be | 22d9bd058de5ae4347d3ed2bdf61708cfd807517 | refs/heads/master | 2022-10-20T08:01:30.413533 | 2020-04-03T01:45:04 | 2020-04-03T01:45:04 | 241,959,341 | 0 | 0 | null | 2022-09-16T18:21:43 | 2020-02-20T18:27:37 | Python | UTF-8 | Python | false | false | 248 | py | '''Global Logging configuration for the project'''
import logging
from datetime import datetime
filename = 'log/' + str(datetime.now()) +".log"
logging.basicConfig(filename=filename, filemode='a',format='%(asctime)s - %(message)s')
log = logging
| [
"harindudilshan95@gmail.com"
] | harindudilshan95@gmail.com |
75546a1cf320ecaf0eff09383bfb5530a403722d | c32abaf581b88e01969a8476e13eafcd382705df | /doc/sphinxext/compiler_unparse.py | 9d86f6e19ddec89bfe74041f493b0223a2c7d58d | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | juhasch/scikit-rf | c0e06573c545e0c115d34331f9e9d70243736412 | 3d51ba6c21b9a7a40cb7c3a6e7de4aae302c8a13 | refs/heads/master | 2021-01-17T18:15:07.976393 | 2016-08-24T15:26:06 | 2016-08-24T15:26:06 | 25,217,149 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,457 | py | """ Turn compiler.ast structures back into executable python code.
The unparse method takes a compiler.ast tree and transforms it back into
valid python code. It is incomplete and currently only works for
import statements, function calls, function definitions, assignments, and
basic expressions.
Inspired by python-2.5-svn/Demo/parser/unparse.py
fixme: We may want to move to using _ast trees because the compiler for
them is about 6 times faster than compiler.compile.
"""
import sys
import cStringIO
from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
def unparse(ast, single_line_functions=False):
s = cStringIO.StringIO()
UnparseCompilerAst(ast, s, single_line_functions)
return s.getvalue().lstrip()
op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
class UnparseCompilerAst:
""" Methods in this class recursively traverse an AST and
output source code for the abstract syntax; original formatting
is disregarged.
"""
#########################################################################
# object interface.
#########################################################################
def __init__(self, tree, file = sys.stdout, single_line_functions=False):
""" Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file.
"""
self.f = file
self._single_func = single_line_functions
self._do_indent = True
self._indent = 0
self._dispatch(tree)
self._write("\n")
self.f.flush()
#########################################################################
# Unparser private interface.
#########################################################################
### format, output, and dispatch methods ################################
def _fill(self, text = ""):
"Indent a piece of text, according to the current indentation level"
if self._do_indent:
self._write("\n"+" "*self._indent + text)
else:
self._write(text)
def _write(self, text):
"Append a piece of text to the current line."
self.f.write(text)
def _enter(self):
"Print ':', and increase the indentation."
self._write(": ")
self._indent += 1
def _leave(self):
"Decrease the indentation level."
self._indent -= 1
def _dispatch(self, tree):
"_dispatcher function, _dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self._dispatch(t)
return
meth = getattr(self, "_"+tree.__class__.__name__)
if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
return
meth(tree)
#########################################################################
# compiler.ast unparsing methods.
#
# There should be one method per concrete grammar type. They are
# organized in alphabetical order.
#########################################################################
def _Add(self, t):
self.__binary_op(t, '+')
def _And(self, t):
self._write(" (")
for i, node in enumerate(t.nodes):
self._dispatch(node)
if i != len(t.nodes)-1:
self._write(") and (")
self._write(")")
def _AssAttr(self, t):
""" Handle assigning an attribute of an object
"""
self._dispatch(t.expr)
self._write('.'+t.attrname)
def _Assign(self, t):
""" Expression Assignment such as "a = 1".
This only handles assignment in expressions. Keyword assignment
is handled separately.
"""
self._fill()
for target in t.nodes:
self._dispatch(target)
self._write(" = ")
self._dispatch(t.expr)
if not self._do_indent:
self._write('; ')
def _AssName(self, t):
""" Name on left hand side of expression.
Treat just like a name on the right side of an expression.
"""
self._Name(t)
def _AssTuple(self, t):
""" Tuple on left hand side of an expression.
"""
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element)
def _AugAssign(self, t):
""" +=,-=,*=,/=,**=, etc. operations
"""
self._fill()
self._dispatch(t.node)
self._write(' '+t.op+' ')
self._dispatch(t.expr)
if not self._do_indent:
self._write(';')
def _Bitand(self, t):
""" Bit and operation.
"""
for i, node in enumerate(t.nodes):
self._write("(")
self._dispatch(node)
self._write(")")
if i != len(t.nodes)-1:
self._write(" & ")
def _Bitor(self, t):
""" Bit or operation
"""
for i, node in enumerate(t.nodes):
self._write("(")
self._dispatch(node)
self._write(")")
if i != len(t.nodes)-1:
self._write(" | ")
def _CallFunc(self, t):
""" Function call.
"""
self._dispatch(t.node)
self._write("(")
comma = False
for e in t.args:
if comma: self._write(", ")
else: comma = True
self._dispatch(e)
if t.star_args:
if comma: self._write(", ")
else: comma = True
self._write("*")
self._dispatch(t.star_args)
if t.dstar_args:
if comma: self._write(", ")
else: comma = True
self._write("**")
self._dispatch(t.dstar_args)
self._write(")")
def _Compare(self, t):
self._dispatch(t.expr)
for op, expr in t.ops:
self._write(" " + op + " ")
self._dispatch(expr)
def _Const(self, t):
""" A constant value such as an integer value, 3, or a string, "hello".
"""
self._dispatch(t.value)
def _Decorators(self, t):
""" Handle function decorators (eg. @has_units)
"""
for node in t.nodes:
self._dispatch(node)
def _Dict(self, t):
self._write("{")
for i, (k, v) in enumerate(t.items):
self._dispatch(k)
self._write(": ")
self._dispatch(v)
if i < len(t.items)-1:
self._write(", ")
self._write("}")
def _Discard(self, t):
""" Node for when return value is ignored such as in "foo(a)".
"""
self._fill()
self._dispatch(t.expr)
def _Div(self, t):
self.__binary_op(t, '/')
def _Ellipsis(self, t):
self._write("...")
def _From(self, t):
""" Handle "from xyz import foo, bar as baz".
"""
# fixme: Are From and ImportFrom handled differently?
self._fill("from ")
self._write(t.modname)
self._write(" import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
def _Function(self, t):
""" Handle function definitions
"""
if t.decorators is not None:
self._fill("@")
self._dispatch(t.decorators)
self._fill("def "+t.name + "(")
defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
for i, arg in enumerate(zip(t.argnames, defaults)):
self._write(arg[0])
if arg[1] is not None:
self._write('=')
self._dispatch(arg[1])
if i < len(t.argnames)-1:
self._write(', ')
self._write(")")
if self._single_func:
self._do_indent = False
self._enter()
self._dispatch(t.code)
self._leave()
self._do_indent = True
def _Getattr(self, t):
""" Handle getting an attribute of an object
"""
if isinstance(t.expr, (Div, Mul, Sub, Add)):
self._write('(')
self._dispatch(t.expr)
self._write(')')
else:
self._dispatch(t.expr)
self._write('.'+t.attrname)
def _If(self, t):
self._fill()
for i, (compare,code) in enumerate(t.tests):
if i == 0:
self._write("if ")
else:
self._write("elif ")
self._dispatch(compare)
self._enter()
self._fill()
self._dispatch(code)
self._leave()
self._write("\n")
if t.else_ is not None:
self._write("else")
self._enter()
self._fill()
self._dispatch(t.else_)
self._leave()
self._write("\n")
def _IfExp(self, t):
self._dispatch(t.then)
self._write(" if ")
self._dispatch(t.test)
if t.else_ is not None:
self._write(" else (")
self._dispatch(t.else_)
self._write(")")
def _Import(self, t):
""" Handle "import xyz.foo".
"""
self._fill("import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
def _Keyword(self, t):
""" Keyword value assignment within function calls and definitions.
"""
self._write(t.name)
self._write("=")
self._dispatch(t.expr)
def _List(self, t):
self._write("[")
for i,node in enumerate(t.nodes):
self._dispatch(node)
if i < len(t.nodes)-1:
self._write(", ")
self._write("]")
def _Module(self, t):
if t.doc is not None:
self._dispatch(t.doc)
self._dispatch(t.node)
def _Mul(self, t):
self.__binary_op(t, '*')
def _Name(self, t):
self._write(t.name)
def _NoneType(self, t):
self._write("None")
def _Not(self, t):
self._write('not (')
self._dispatch(t.expr)
self._write(')')
def _Or(self, t):
self._write(" (")
for i, node in enumerate(t.nodes):
self._dispatch(node)
if i != len(t.nodes)-1:
self._write(") or (")
self._write(")")
def _Pass(self, t):
self._write("pass\n")
def _Printnl(self, t):
self._fill("print ")
if t.dest:
self._write(">> ")
self._dispatch(t.dest)
self._write(", ")
comma = False
for node in t.nodes:
if comma: self._write(', ')
else: comma = True
self._dispatch(node)
def _Power(self, t):
self.__binary_op(t, '**')
def _Return(self, t):
self._fill("return ")
if t.value:
if isinstance(t.value, Tuple):
text = ', '.join([ name.name for name in t.value.asList() ])
self._write(text)
else:
self._dispatch(t.value)
if not self._do_indent:
self._write('; ')
def _Slice(self, t):
self._dispatch(t.expr)
self._write("[")
if t.lower:
self._dispatch(t.lower)
self._write(":")
if t.upper:
self._dispatch(t.upper)
#if t.step:
# self._write(":")
# self._dispatch(t.step)
self._write("]")
def _Sliceobj(self, t):
for i, node in enumerate(t.nodes):
if i != 0:
self._write(":")
if not (isinstance(node, Const) and node.value is None):
self._dispatch(node)
def _Stmt(self, tree):
for node in tree.nodes:
self._dispatch(node)
def _Sub(self, t):
self.__binary_op(t, '-')
def _Subscript(self, t):
self._dispatch(t.expr)
self._write("[")
for i, value in enumerate(t.subs):
if i != 0:
self._write(",")
self._dispatch(value)
self._write("]")
def _TryExcept(self, t):
self._fill("try")
self._enter()
self._dispatch(t.body)
self._leave()
for handler in t.handlers:
self._fill('except ')
self._dispatch(handler[0])
if handler[1] is not None:
self._write(', ')
self._dispatch(handler[1])
self._enter()
self._dispatch(handler[2])
self._leave()
if t.else_:
self._fill("else")
self._enter()
self._dispatch(t.else_)
self._leave()
def _Tuple(self, t):
if not t.nodes:
# Empty tuple.
self._write("()")
else:
self._write("(")
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element)
self._write(")")
def _UnaryAdd(self, t):
self._write("+")
self._dispatch(t.expr)
def _UnarySub(self, t):
self._write("-")
self._dispatch(t.expr)
def _With(self, t):
self._fill('with ')
self._dispatch(t.expr)
if t.vars:
self._write(' as ')
self._dispatch(t.vars.name)
self._enter()
self._dispatch(t.body)
self._leave()
self._write('\n')
def _int(self, t):
self._write(repr(t))
def __binary_op(self, t, symbol):
# Check if parenthesis are needed on left side and then dispatch
has_paren = False
left_class = str(t.left.__class__)
if (left_class in op_precedence.keys() and
op_precedence[left_class] < op_precedence[str(t.__class__)]):
has_paren = True
if has_paren:
self._write('(')
self._dispatch(t.left)
if has_paren:
self._write(')')
# Write the appropriate symbol for operator
self._write(symbol)
# Check if parenthesis are needed on the right side and then dispatch
has_paren = False
right_class = str(t.right.__class__)
if (right_class in op_precedence.keys() and
op_precedence[right_class] < op_precedence[str(t.__class__)]):
has_paren = True
if has_paren:
self._write('(')
self._dispatch(t.right)
if has_paren:
self._write(')')
def _float(self, t):
# if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
# We prefer str here.
self._write(str(t))
def _str(self, t):
self._write(repr(t))
def _tuple(self, t):
self._write(str(t))
#########################################################################
# These are the methods from the _ast modules unparse.
#
# As our needs to handle more advanced code increase, we may want to
# modify some of the methods below so that they work for compiler.ast.
#########################################################################
# # stmt
# def _Expr(self, tree):
# self._fill()
# self._dispatch(tree.value)
#
# def _Import(self, t):
# self._fill("import ")
# first = True
# for a in t.names:
# if first:
# first = False
# else:
# self._write(", ")
# self._write(a.name)
# if a.asname:
# self._write(" as "+a.asname)
#
## def _ImportFrom(self, t):
## self._fill("from ")
## self._write(t.module)
## self._write(" import ")
## for i, a in enumerate(t.names):
## if i == 0:
## self._write(", ")
## self._write(a.name)
## if a.asname:
## self._write(" as "+a.asname)
## # XXX(jpe) what is level for?
##
#
# def _Break(self, t):
# self._fill("break")
#
# def _Continue(self, t):
# self._fill("continue")
#
# def _Delete(self, t):
# self._fill("del ")
# self._dispatch(t.targets)
#
# def _Assert(self, t):
# self._fill("assert ")
# self._dispatch(t.test)
# if t.msg:
# self._write(", ")
# self._dispatch(t.msg)
#
# def _Exec(self, t):
# self._fill("exec ")
# self._dispatch(t.body)
# if t.globals:
# self._write(" in ")
# self._dispatch(t.globals)
# if t.locals:
# self._write(", ")
# self._dispatch(t.locals)
#
# def _Print(self, t):
# self._fill("print ")
# do_comma = False
# if t.dest:
# self._write(">>")
# self._dispatch(t.dest)
# do_comma = True
# for e in t.values:
# if do_comma:self._write(", ")
# else:do_comma=True
# self._dispatch(e)
# if not t.nl:
# self._write(",")
#
# def _Global(self, t):
# self._fill("global")
# for i, n in enumerate(t.names):
# if i != 0:
# self._write(",")
# self._write(" " + n)
#
# def _Yield(self, t):
# self._fill("yield")
# if t.value:
# self._write(" (")
# self._dispatch(t.value)
# self._write(")")
#
# def _Raise(self, t):
# self._fill('raise ')
# if t.type:
# self._dispatch(t.type)
# if t.inst:
# self._write(", ")
# self._dispatch(t.inst)
# if t.tback:
# self._write(", ")
# self._dispatch(t.tback)
#
#
# def _TryFinally(self, t):
# self._fill("try")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# self._fill("finally")
# self._enter()
# self._dispatch(t.finalbody)
# self._leave()
#
# def _excepthandler(self, t):
# self._fill("except ")
# if t.type:
# self._dispatch(t.type)
# if t.name:
# self._write(", ")
# self._dispatch(t.name)
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _ClassDef(self, t):
# self._write("\n")
# self._fill("class "+t.name)
# if t.bases:
# self._write("(")
# for a in t.bases:
# self._dispatch(a)
# self._write(", ")
# self._write(")")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _FunctionDef(self, t):
# self._write("\n")
# for deco in t.decorators:
# self._fill("@")
# self._dispatch(deco)
# self._fill("def "+t.name + "(")
# self._dispatch(t.args)
# self._write(")")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _For(self, t):
# self._fill("for ")
# self._dispatch(t.target)
# self._write(" in ")
# self._dispatch(t.iter)
# self._enter()
# self._dispatch(t.body)
# self._leave()
# if t.orelse:
# self._fill("else")
# self._enter()
# self._dispatch(t.orelse)
# self._leave
#
# def _While(self, t):
# self._fill("while ")
# self._dispatch(t.test)
# self._enter()
# self._dispatch(t.body)
# self._leave()
# if t.orelse:
# self._fill("else")
# self._enter()
# self._dispatch(t.orelse)
# self._leave
#
# # expr
# def _Str(self, tree):
# self._write(repr(tree.s))
##
# def _Repr(self, t):
# self._write("`")
# self._dispatch(t.value)
# self._write("`")
#
# def _Num(self, t):
# self._write(repr(t.n))
#
# def _ListComp(self, t):
# self._write("[")
# self._dispatch(t.elt)
# for gen in t.generators:
# self._dispatch(gen)
# self._write("]")
#
# def _GeneratorExp(self, t):
# self._write("(")
# self._dispatch(t.elt)
# for gen in t.generators:
# self._dispatch(gen)
# self._write(")")
#
# def _comprehension(self, t):
# self._write(" for ")
# self._dispatch(t.target)
# self._write(" in ")
# self._dispatch(t.iter)
# for if_clause in t.ifs:
# self._write(" if ")
# self._dispatch(if_clause)
#
# def _IfExp(self, t):
# self._dispatch(t.body)
# self._write(" if ")
# self._dispatch(t.test)
# if t.orelse:
# self._write(" else ")
# self._dispatch(t.orelse)
#
# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
# def _UnaryOp(self, t):
# self._write(self.unop[t.op.__class__.__name__])
# self._write("(")
# self._dispatch(t.operand)
# self._write(")")
#
# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
# "FloorDiv":"//", "Pow": "**"}
# def _BinOp(self, t):
# self._write("(")
# self._dispatch(t.left)
# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
# self._dispatch(t.right)
# self._write(")")
#
# boolops = {_ast.And: 'and', _ast.Or: 'or'}
# def _BoolOp(self, t):
# self._write("(")
# self._dispatch(t.values[0])
# for v in t.values[1:]:
# self._write(" %s " % self.boolops[t.op.__class__])
# self._dispatch(v)
# self._write(")")
#
# def _Attribute(self,t):
# self._dispatch(t.value)
# self._write(".")
# self._write(t.attr)
#
## def _Call(self, t):
## self._dispatch(t.func)
## self._write("(")
## comma = False
## for e in t.args:
## if comma: self._write(", ")
## else: comma = True
## self._dispatch(e)
## for e in t.keywords:
## if comma: self._write(", ")
## else: comma = True
## self._dispatch(e)
## if t.starargs:
## if comma: self._write(", ")
## else: comma = True
## self._write("*")
## self._dispatch(t.starargs)
## if t.kwargs:
## if comma: self._write(", ")
## else: comma = True
## self._write("**")
## self._dispatch(t.kwargs)
## self._write(")")
#
# # slice
# def _Index(self, t):
# self._dispatch(t.value)
#
# def _ExtSlice(self, t):
# for i, d in enumerate(t.dims):
# if i != 0:
# self._write(': ')
# self._dispatch(d)
#
# # others
# def _arguments(self, t):
# first = True
# nonDef = len(t.args)-len(t.defaults)
# for a in t.args[0:nonDef]:
# if first:first = False
# else: self._write(", ")
# self._dispatch(a)
# for a,d in zip(t.args[nonDef:], t.defaults):
# if first:first = False
# else: self._write(", ")
# self._dispatch(a),
# self._write("=")
# self._dispatch(d)
# if t.vararg:
# if first:first = False
# else: self._write(", ")
# self._write("*"+t.vararg)
# if t.kwarg:
# if first:first = False
# else: self._write(", ")
# self._write("**"+t.kwarg)
#
## def _keyword(self, t):
## self._write(t.arg)
## self._write("=")
## self._dispatch(t.value)
#
# def _Lambda(self, t):
# self._write("lambda ")
# self._dispatch(t.args)
# self._write(": ")
# self._dispatch(t.body)
| [
"arsenovic@virginia.edu"
] | arsenovic@virginia.edu |
4de84c65144d32f737bd1494faf70002d7bcf4cc | acee5b8a6ce1a3652ee56779b816b3c5b6860473 | /core_currucular/1_computer_vision_deep_learning_and_sensor_fusion/6_Camera_Calibration/1-6-12_correction_for_distorion/correction_for_distrorion.py | adff0f6c8abd323939a82c678f45a9e443c98357 | [] | no_license | willembressers/Self-Driving-Car-Engineer | 72404d836c25c707c4881b5a6f07e1fac7a61d49 | b7fe4239322a0e15ae94700356ce20bcfbbead55 | refs/heads/main | 2023-06-25T14:32:56.043728 | 2021-07-21T09:55:33 | 2021-07-21T09:55:33 | 333,805,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in the saved objpoints and imgpoints
dist_pickle = pickle.load( open( "wide_dist_pickle.p", "rb" ) )
objpoints = dist_pickle["objpoints"]
imgpoints = dist_pickle["imgpoints"]
# Read in an image
img = cv2.imread('test_image.png')
# TODO: Write a function that takes an image, object points, and image points
# performs the camera calibration, image distortion correction and
# returns the undistorted image
def cal_undistort(img, objpoints, imgpoints):
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (img.shape[1], img.shape[0]), None, None)
undist = cv2.undistort(img, mtx, dist, None, None)
return undist
undistorted = cal_undistort(img, objpoints, imgpoints)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(undistorted)
ax2.set_title('Undistorted Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.savefig("output.png") | [
"dhr.bressers@gmail.com"
] | dhr.bressers@gmail.com |
2a1ccbdec65259db16ecf3ce096abf6a859872e8 | 463b0163a19b92250f2afb4164834a3f49a7a022 | /predict_service/__init__.py | 8469f5e29b3fdf4fccc486ef821f5885214afdb5 | [] | no_license | jaeoheeail/carouhack_car_price | d0e0a46ff41b507811e536a2f56a91dd03b00fe7 | 8bf46e394ada8a305ce1ba3e58a7593c987884b1 | refs/heads/master | 2022-12-12T03:04:24.208799 | 2018-11-29T06:09:04 | 2018-11-29T06:09:04 | 159,538,672 | 0 | 0 | null | 2022-12-08T01:19:31 | 2018-11-28T17:18:56 | Jupyter Notebook | UTF-8 | Python | false | false | 509 | py | from flask import Flask
from flask.ext import restful
from flask import make_response
from bson.json_util import dumps
import generate_model as gm
app = Flask(__name__)
# generate model
gm.generate()
def output_json(obj, code, headers=None):
resp = make_response(dumps(obj), code)
resp.headers.extend(headers or {})
return resp
DEFAULT_REPRESENTATIONS = {'application/json': output_json}
api = restful.Api(app)
api.representations = DEFAULT_REPRESENTATIONS
import predict_service.resources | [
"joel.foo@thecarousell.com"
] | joel.foo@thecarousell.com |
d4680cbab2555a1a950320a97cbd4c41a6a47632 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03274/s114027037.py | 2b2da66514e2ca781e41c1741d1a6fb5f0ce210e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | n, k = map(int, input().split())
x = list(map(int, input().split()))
x_neg = []
x_pos = []
for xx in x:
if xx < 0:
x_neg.append(-xx)
else:
x_pos.append(xx)
x_neg = x_neg[::-1]
ans = float('inf')
if k <= len(x_neg):
ans = min(ans, x_neg[k-1])
if k <= len(x_pos):
ans = min(ans, x_pos[k-1])
for i in range(1, k):
j = k - i
if i <= len(x_pos) and 1 <= j <= len(x_neg):
d1 = x_pos[i - 1]
d2 = x_neg[j - 1]
d = min(d1, d2) * 2 + max(d1, d2)
ans = min(ans, d)
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
867512a6e0488410abd923d4f58989172dc54f99 | 05f9b3014a16c66d11315fb6f85b801bf49b7273 | /test-repo/fabscript/openstack/glance.py | 31524f4c3c2a7e08ebd666923b04f5bd54a29670 | [
"MIT"
] | permissive | syunkitada-archive/fabkit-fablib_openstack | b645a6a8646ab5c8a9d75a44b62d10db8e04a436 | 5edea4f8eab26adb7e8b031acc6b8ce8f9060eb6 | refs/heads/master | 2023-05-12T23:47:24.617656 | 2018-05-13T13:55:55 | 2018-05-13T13:55:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | # coding: utf-8
from fabkit import task, parallel
from fablib.openstack import Glance
@task
@parallel
def setup():
glance = Glance()
glance.setup()
return {'status': 1}
| [
"syun.kitada@gmail.com"
] | syun.kitada@gmail.com |
7491127d0c31d6e8396cb3a68a1ab0110dc69489 | a9113018b40043c1785b83e9cd1ef34d12586f22 | /Idawof_odj.py | 8c78566e071d4aa89313023454c4b8ba89ad58a4 | [] | no_license | mareku/Idawof_odj | 1880188b43e40c0d73021c095ef0bf9b30feb45b | a64541f84a0d593cc796f2428d00acf2230128df | refs/heads/master | 2020-06-07T10:30:17.299626 | 2019-06-20T23:51:59 | 2019-06-20T23:51:59 | 193,000,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,498 | py | # -*- coding: utf-8 -*-
import sys
import glob
import csv
import os
import shutil
import codecs
import re
import dircache
import lib.time
import lib.fold
import lib.name
import lib.filemove
# fuwファイルを読み込む
def chkcsv(s_file):
fopen=open(s_file, 'rb')
try:
#fuwファイルの設定を読み込む
for row in csv.reader(fopen):
key=row[0].decode('utf-8') #移動キーワード
ren=row[1].decode('utf-8') #リネームキーワード
pa1=row[2].decode('utf-8') #移動元
pa2=row[3].decode('utf-8') #移動先
#print key
#print ren
#print pa1
#print pa2
# ファイルの検索
chkfilesort(key,ren,pa1,pa2)
except:
print('Error:chkcsv')
finally:
fopen.close()
# ファイルを正規表現で取得
def chkfilesort(key,ren,pa1,pa2):
try:
cm = re.compile(key)
for file in dircache.listdir(pa1):
#正規表現でファイルをフィルタ
if cm.search(file):
#print '=== FILE: %s ===' %(file)
# 移動元フォルダ
#print 'MovingSource: %s' %(pa1)
# 移動先フォルダ
moveDes = chktimeformatif(file, pa1, pa2)
#print 'moveDes: %s' %(moveDes)
# フォルダチェック
chkfold = Idawof.fold.fold(pa1, moveDes)
if (chkfold.chkfold() == False): chkfold.chknewfold()
#リネーム
reName = Idawof.name.name(file, ren)
#print 'reName: %s' %(reName.chkrename())
#ファイル移動
MovingSource = os.path.join(pa1, file)
Destination = os.path.join(moveDes, reName.chkrename())
#fiMove = Idawof.filemove.filemove(MovingSource, \
# Destination)
#fiMove.chkfilremove()
chkfilemove(MovingSource, Destination)
except:
print('Error:chkfilesort')
return 0
# 取得日付判別
def chktimeformatif(file,pa1,pa2):
# パスに指定の文字列が含まれているか判断するために大文字に変換
ItisConvertedToUppercase=pa2.upper()
time = Conversion.time.time()
time.setSelf(file, pa1, pa2)
if 'NOW' in ItisConvertedToUppercase: #今日の日付
return time.chktimenow()
elif 'STAMP' in ItisConvertedToUppercase: #ファイルのタイムスタンプ
return time.chktimestamp()
else: # そのまま出力
return pa2
# ファイル移動
def chkfilemove(file,moveFilePath):
try:
if os.path.exists(file) == 1:
#移動先に同じファイルがあると移動しない
if os.path.exists(moveFilePath)==0:
#リネームで移動できるかテスト
os.rename(file,moveFilePath)
except:
print('Error:chkfilremove')
return 0
if __name__ == '__main__':
# 作業フォルダを変更
os.chdir(os.path.dirname(sys.argv[0]) or '.')
# 起動パラメーターチェック
argvs = sys.argv
if len(argvs) == 1: #パラメーターがない場合
chkcsv('sorting.fuw')
else: #パラメーターがある場合
if '.fuw' in argvs[1]:
chkcsv(argvs[1])
else:
print 'Not fuw File'
| [
"0day.kiddie+Git1@gmail.com"
] | 0day.kiddie+Git1@gmail.com |
d811341a8c4ae0850f3214d241293fa71c7afea8 | 6b5c466d250a53d63de039fbf4ee7da9f176b2dd | /Codeforces Problems/Drinks/Drinks.py | 1d139fb9f7a2d8bfaf7f8e3b06ab11693086a6c8 | [
"MIT"
] | permissive | Social-CodePlat/Comptt-Coding-Solutions | d57e59e71a176612905f3d40adf86d8a2221f9c4 | 240732e6c1a69e1124064bff4a27a5785a14b021 | refs/heads/master | 2023-04-21T09:23:52.073090 | 2021-05-27T18:20:55 | 2021-05-27T18:20:55 | 303,801,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | n=int(input())
arr=[float(x) for x in input().split()]
perc=((sum(arr)/(100*len(arr)))*100)
print("{:.11f}".format(perc))
| [
"rajattheonlyhero@gmail.com"
] | rajattheonlyhero@gmail.com |
68c058d787413bd34d8500a3f70478e263c2b758 | 7312266874e50682cf909f4b77260c9a69f13999 | /python/packages/scipy-0.6.0/scipy/sandbox/timeseries/tests/test_dates.py | 2bdd73247012993079b6d4fa2caf808d0c77d668 | [
"LicenseRef-scancode-egenix-1.0.0"
] | permissive | mbentz80/jzigbeercp | e354695b90a72c7fe3c5c7ec7d197d9cbc18d7d9 | 1a49320df3db13d0a06fddb30cf748b07e5ba5f0 | refs/heads/master | 2021-01-02T22:44:16.088783 | 2008-08-27T23:05:47 | 2008-08-27T23:05:47 | 40,231 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 47,237 | py | # pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for Date handling.
:author: Pierre Gerard-Marchant & Matt Knox
:contact: pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
:version: $Id: test_dates.py 3040 2007-05-24 17:11:57Z mattknox_ca $
"""
__author__ = "Pierre GF Gerard-Marchant ($Author: mattknox_ca $)"
__version__ = '1.0'
__revision__ = "$Revision: 3040 $"
__date__ = '$Date: 2007-05-24 10:11:57 -0700 (Thu, 24 May 2007) $'
import types
import datetime
import numpy
import numpy.core.fromnumeric as fromnumeric
import numpy.core.numeric as numeric
from numpy.testing import NumpyTest, NumpyTestCase
from numpy.testing.utils import build_err_msg
import maskedarray
from maskedarray import masked_array
import maskedarray.testutils
from maskedarray.testutils import assert_equal, assert_array_equal
import timeseries as ts
from timeseries import const as C
from timeseries.parser import DateFromString, DateTimeFromString
from timeseries import *
from timeseries.cseries import freq_dict
class test_creation(NumpyTestCase):
"Base test class for MaskedArrays."
def __init__(self, *args, **kwds):
NumpyTestCase.__init__(self, *args, **kwds)
def test_fromstrings(self):
"Tests creation from list of strings"
print "starting test_fromstrings..."
dlist = ['2007-01-%02i' % i for i in range(1,15)]
# A simple case: daily data
dates = date_array_fromlist(dlist, 'D')
assert_equal(dates.freqstr,'D')
assert(dates.isfull())
assert(not dates.has_duplicated_dates())
assert_equal(dates, 732677+numpy.arange(len(dlist)))
# as simple, but we need to guess the frequency this time
dates = date_array_fromlist(dlist, 'D')
assert_equal(dates.freqstr,'D')
assert(dates.isfull())
assert(not dates.has_duplicated_dates())
assert_equal(dates, 732677+numpy.arange(len(dlist)))
# Still daily data, that we force to month
dates = date_array_fromlist(dlist, 'M')
assert_equal(dates.freqstr,'M')
assert(not dates.isfull())
assert(dates.has_duplicated_dates())
assert_equal(dates, [24073]*len(dlist))
# Now, for monthly data
dlist = ['2007-%02i' % i for i in range(1,13)]
dates = date_array_fromlist(dlist, 'M')
assert_equal(dates.freqstr,'M')
assert(dates.isfull())
assert(not dates.has_duplicated_dates())
assert_equal(dates, 24073 + numpy.arange(12))
# Monthly data w/ guessing
dlist = ['2007-%02i' % i for i in range(1,13)]
dates = date_array_fromlist(dlist, )
assert_equal(dates.freqstr,'M')
assert(dates.isfull())
assert(not dates.has_duplicated_dates())
assert_equal(dates, 24073 + numpy.arange(12))
print "finished test_fromstrings"
def test_fromstrings_wmissing(self):
"Tests creation from list of strings w/ missing dates"
print "starting test_fromstrings_wmissing..."
dlist = ['2007-01-%02i' % i for i in (1,2,4,5,7,8,10,11,13)]
dates = date_array_fromlist(dlist)
assert_equal(dates.freqstr,'U')
assert(not dates.isfull())
assert(not dates.has_duplicated_dates())
assert_equal(dates.tovalue(),732676+numpy.array([1,2,4,5,7,8,10,11,13]))
#
ddates = date_array_fromlist(dlist, 'D')
assert_equal(ddates.freqstr,'D')
assert(not ddates.isfull())
assert(not ddates.has_duplicated_dates())
#
mdates = date_array_fromlist(dlist, 'M')
assert_equal(mdates.freqstr,'M')
assert(not dates.isfull())
assert(mdates.has_duplicated_dates())
print "finished test_fromstrings_wmissing"
#
def test_fromsobjects(self):
"Tests creation from list of objects."
print "starting test_fromsobjects..."
dlist = ['2007-01-%02i' % i for i in (1,2,4,5,7,8,10,11,13)]
dates = date_array_fromlist(dlist)
dobj = [datetime.datetime.fromordinal(d) for d in dates.toordinal()]
odates = date_array_fromlist(dobj)
assert_equal(dates,odates)
dobj = [DateFromString(d) for d in dlist]
odates = date_array_fromlist(dobj)
assert_equal(dates,odates)
#
D = date_array_fromlist(dlist=['2006-01'])
assert_equal(D.tovalue(), [732312, ])
assert_equal(D.freq, C.FR_UND)
print "finished test_fromsobjects"
def test_consistent_value(self):
"Tests that values don't get mutated when constructing dates from a value"
print "starting test_consistent_value..."
freqs = [x[0] for x in freq_dict.values() if x[0] != 'U']
for f in freqs:
today = thisday(f)
assert_equal(Date(freq=f, value=today.value), today)
print "finished test_consistent_value"
def test_shortcuts(self):
"Tests some creation shortcuts. Because I'm lazy like that."
print "starting test_shortcuts..."
# Dates shortcuts
assert_equal(Date('D','2007-01'), Date('D',string='2007-01'))
assert_equal(Date('D','2007-01'), Date('D', value=732677))
assert_equal(Date('D',732677), Date('D', value=732677))
# DateArray shortcuts
n = today('M')
d = date_array(start_date=n, length=3)
assert_equal(date_array(n,length=3), d)
assert_equal(date_array(n, n+2), d)
print "finished test_shortcuts"
class test_date_properties(NumpyTestCase):
"Test properties such as year, month, day_of_week, etc...."
def __init__(self, *args, **kwds):
NumpyTestCase.__init__(self, *args, **kwds)
def test_properties(self):
a_date = Date(freq='A', year=2007)
q_date = Date(freq=C.FR_QTREDEC, year=2007, quarter=1)
qedec_date = Date(freq=C.FR_QTREDEC, year=2007, quarter=1)
qejan_date = Date(freq=C.FR_QTREJAN, year=2007, quarter=1)
qejun_date = Date(freq=C.FR_QTREJUN, year=2007, quarter=1)
qsdec_date = Date(freq=C.FR_QTREDEC, year=2007, quarter=1)
qsjan_date = Date(freq=C.FR_QTREJAN, year=2007, quarter=1)
qsjun_date = Date(freq=C.FR_QTREJUN, year=2007, quarter=1)
m_date = Date(freq='M', year=2007, month=1)
w_date = Date(freq='W', year=2007, month=1, day=7)
b_date = Date(freq='B', year=2007, month=1, day=1)
d_date = Date(freq='D', year=2007, month=1, day=1)
h_date = Date(freq='H', year=2007, month=1, day=1,
hour=0)
t_date = Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=0)
s_date = Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
assert_equal(a_date.year, 2007)
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date,
qsdec_date, qsjan_date, qsjun_date):
assert_equal((qd+x).qyear, 2007)
assert_equal((qd+x).quarter, x+1)
for x in range(11):
m_date_x = m_date+x
assert_equal(m_date_x.year, 2007)
if 1 <= x + 1 <= 3: assert_equal(m_date_x.quarter, 1)
elif 4 <= x + 1 <= 6: assert_equal(m_date_x.quarter, 2)
elif 7 <= x + 1 <= 9: assert_equal(m_date_x.quarter, 3)
elif 10 <= x + 1 <= 12: assert_equal(m_date_x.quarter, 4)
assert_equal(m_date_x.month, x+1)
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date-1).week, 52)
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.day_of_week, 0)
assert_equal(b_date.day_of_year, 1)
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.day_of_week, 0)
assert_equal(d_date.day_of_year, 1)
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.day_of_week, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
assert_equal(t_date.year, 2007)
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.day_of_week, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.day_of_week, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def dArrayWrap(date):
"wrap a date into a DateArray of length 1"
return date_array(start_date=date,length=1)
def noWrap(item): return item
class test_freq_conversion(NumpyTestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
NumpyTestCase.__init__(self, *args, **kwds)
self.dateWrap = [(dArrayWrap, assert_array_equal),
(noWrap, assert_equal)]
def test_conv_annual(self):
"frequency conversion tests: from Annual Frequency"
for dWrap, assert_func in self.dateWrap:
date_A = dWrap(Date(freq='A', year=2007))
date_AJAN = dWrap(Date(freq=C.FR_ANNJAN, year=2007))
date_AJUN = dWrap(Date(freq=C.FR_ANNJUN, year=2007))
date_ANOV = dWrap(Date(freq=C.FR_ANNNOV, year=2007))
date_A_to_Q_before = dWrap(Date(freq='Q', year=2007, quarter=1))
date_A_to_Q_after = dWrap(Date(freq='Q', year=2007, quarter=4))
date_A_to_M_before = dWrap(Date(freq='M', year=2007, month=1))
date_A_to_M_after = dWrap(Date(freq='M', year=2007, month=12))
date_A_to_W_before = dWrap(Date(freq='W', year=2007, month=1, day=1))
date_A_to_W_after = dWrap(Date(freq='W', year=2007, month=12, day=31))
date_A_to_B_before = dWrap(Date(freq='B', year=2007, month=1, day=1))
date_A_to_B_after = dWrap(Date(freq='B', year=2007, month=12, day=31))
date_A_to_D_before = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_A_to_D_after = dWrap(Date(freq='D', year=2007, month=12, day=31))
date_A_to_H_before = dWrap(Date(freq='H', year=2007, month=1, day=1,
hour=0))
date_A_to_H_after = dWrap(Date(freq='H', year=2007, month=12, day=31,
hour=23))
date_A_to_T_before = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=0))
date_A_to_T_after = dWrap(Date(freq='T', year=2007, month=12, day=31,
hour=23, minute=59))
date_A_to_S_before = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0))
date_A_to_S_after = dWrap(Date(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59))
date_AJAN_to_D_after = dWrap(Date(freq='D', year=2007, month=1, day=31))
date_AJAN_to_D_before = dWrap(Date(freq='D', year=2006, month=2, day=1))
date_AJUN_to_D_after = dWrap(Date(freq='D', year=2007, month=6, day=30))
date_AJUN_to_D_before = dWrap(Date(freq='D', year=2006, month=7, day=1))
date_ANOV_to_D_after = dWrap(Date(freq='D', year=2007, month=11, day=30))
date_ANOV_to_D_before = dWrap(Date(freq='D', year=2006, month=12, day=1))
assert_func(date_A.asfreq('Q', "BEFORE"), date_A_to_Q_before)
assert_func(date_A.asfreq('Q', "AFTER"), date_A_to_Q_after)
assert_func(date_A.asfreq('M', "BEFORE"), date_A_to_M_before)
assert_func(date_A.asfreq('M', "AFTER"), date_A_to_M_after)
assert_func(date_A.asfreq('W', "BEFORE"), date_A_to_W_before)
assert_func(date_A.asfreq('W', "AFTER"), date_A_to_W_after)
assert_func(date_A.asfreq('B', "BEFORE"), date_A_to_B_before)
assert_func(date_A.asfreq('B', "AFTER"), date_A_to_B_after)
assert_func(date_A.asfreq('D', "BEFORE"), date_A_to_D_before)
assert_func(date_A.asfreq('D', "AFTER"), date_A_to_D_after)
assert_func(date_A.asfreq('H', "BEFORE"), date_A_to_H_before)
assert_func(date_A.asfreq('H', "AFTER"), date_A_to_H_after)
assert_func(date_A.asfreq('T', "BEFORE"), date_A_to_T_before)
assert_func(date_A.asfreq('T', "AFTER"), date_A_to_T_after)
assert_func(date_A.asfreq('S', "BEFORE"), date_A_to_S_before)
assert_func(date_A.asfreq('S', "AFTER"), date_A_to_S_after)
assert_func(date_AJAN.asfreq('D', "BEFORE"), date_AJAN_to_D_before)
assert_func(date_AJAN.asfreq('D', "AFTER"), date_AJAN_to_D_after)
assert_func(date_AJUN.asfreq('D', "BEFORE"), date_AJUN_to_D_before)
assert_func(date_AJUN.asfreq('D', "AFTER"), date_AJUN_to_D_after)
assert_func(date_ANOV.asfreq('D', "BEFORE"), date_ANOV_to_D_before)
assert_func(date_ANOV.asfreq('D', "AFTER"), date_ANOV_to_D_after)
def test_conv_quarterly(self):
"frequency conversion tests: from Quarterly Frequency"
for dWrap, assert_func in self.dateWrap:
date_Q = dWrap(Date(freq='Q', year=2007, quarter=1))
date_Q_end_of_year = dWrap(Date(freq='Q', year=2007, quarter=4))
date_QEJAN = dWrap(Date(freq=C.FR_QTREJAN, year=2007, quarter=1))
date_QEJUN = dWrap(Date(freq=C.FR_QTREJUN, year=2007, quarter=1))
date_QSJAN = dWrap(Date(freq=C.FR_QTRSJAN, year=2007, quarter=1))
date_QSJUN = dWrap(Date(freq=C.FR_QTRSJUN, year=2007, quarter=1))
date_QSDEC = dWrap(Date(freq=C.FR_QTRSDEC, year=2007, quarter=1))
date_Q_to_A = dWrap(Date(freq='A', year=2007))
date_Q_to_M_before = dWrap(Date(freq='M', year=2007, month=1))
date_Q_to_M_after = dWrap(Date(freq='M', year=2007, month=3))
date_Q_to_W_before = dWrap(Date(freq='W', year=2007, month=1, day=1))
date_Q_to_W_after = dWrap(Date(freq='W', year=2007, month=3, day=31))
date_Q_to_B_before = dWrap(Date(freq='B', year=2007, month=1, day=1))
date_Q_to_B_after = dWrap(Date(freq='B', year=2007, month=3, day=30))
date_Q_to_D_before = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_Q_to_D_after = dWrap(Date(freq='D', year=2007, month=3, day=31))
date_Q_to_H_before = dWrap(Date(freq='H', year=2007, month=1, day=1,
hour=0))
date_Q_to_H_after = dWrap(Date(freq='H', year=2007, month=3, day=31,
hour=23))
date_Q_to_T_before = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=0))
date_Q_to_T_after = dWrap(Date(freq='T', year=2007, month=3, day=31,
hour=23, minute=59))
date_Q_to_S_before = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0))
date_Q_to_S_after = dWrap(Date(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59))
date_QEJAN_to_D_before = dWrap(Date(freq='D', year=2006, month=2, day=1))
date_QEJAN_to_D_after = dWrap(Date(freq='D', year=2006, month=4, day=30))
date_QEJUN_to_D_before = dWrap(Date(freq='D', year=2006, month=7, day=1))
date_QEJUN_to_D_after = dWrap(Date(freq='D', year=2006, month=9, day=30))
date_QSJAN_to_D_before = dWrap(Date(freq='D', year=2007, month=2, day=1))
date_QSJAN_to_D_after = dWrap(Date(freq='D', year=2007, month=4, day=30))
date_QSJUN_to_D_before = dWrap(Date(freq='D', year=2007, month=7, day=1))
date_QSJUN_to_D_after = dWrap(Date(freq='D', year=2007, month=9, day=30))
date_QSDEC_to_D_before = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_QSDEC_to_D_after = dWrap(Date(freq='D', year=2007, month=3, day=31))
assert_func(date_Q.asfreq('A'), date_Q_to_A)
assert_func(date_Q_end_of_year.asfreq('A'), date_Q_to_A)
assert_func(date_Q.asfreq('M', "BEFORE"), date_Q_to_M_before)
assert_func(date_Q.asfreq('M', "AFTER"), date_Q_to_M_after)
assert_func(date_Q.asfreq('W', "BEFORE"), date_Q_to_W_before)
assert_func(date_Q.asfreq('W', "AFTER"), date_Q_to_W_after)
assert_func(date_Q.asfreq('B', "BEFORE"), date_Q_to_B_before)
assert_func(date_Q.asfreq('B', "AFTER"), date_Q_to_B_after)
assert_func(date_Q.asfreq('D', "BEFORE"), date_Q_to_D_before)
assert_func(date_Q.asfreq('D', "AFTER"), date_Q_to_D_after)
assert_func(date_Q.asfreq('H', "BEFORE"), date_Q_to_H_before)
assert_func(date_Q.asfreq('H', "AFTER"), date_Q_to_H_after)
assert_func(date_Q.asfreq('T', "BEFORE"), date_Q_to_T_before)
assert_func(date_Q.asfreq('T', "AFTER"), date_Q_to_T_after)
assert_func(date_Q.asfreq('S', "BEFORE"), date_Q_to_S_before)
assert_func(date_Q.asfreq('S', "AFTER"), date_Q_to_S_after)
assert_func(date_QEJAN.asfreq('D', "BEFORE"), date_QEJAN_to_D_before)
assert_func(date_QEJAN.asfreq('D', "AFTER"), date_QEJAN_to_D_after)
assert_func(date_QEJUN.asfreq('D', "BEFORE"), date_QEJUN_to_D_before)
assert_func(date_QEJUN.asfreq('D', "AFTER"), date_QEJUN_to_D_after)
assert_func(date_QSJAN.asfreq('D', "BEFORE"), date_QSJAN_to_D_before)
assert_func(date_QSJAN.asfreq('D', "AFTER"), date_QSJAN_to_D_after)
assert_func(date_QSJUN.asfreq('D', "BEFORE"), date_QSJUN_to_D_before)
assert_func(date_QSJUN.asfreq('D', "AFTER"), date_QSJUN_to_D_after)
assert_func(date_QSDEC.asfreq('D', "BEFORE"), date_QSDEC_to_D_before)
assert_func(date_QSDEC.asfreq('D', "AFTER"), date_QSDEC_to_D_after)
def test_conv_monthly(self):
"frequency conversion tests: from Monthly Frequency"
for dWrap, assert_func in self.dateWrap:
date_M = dWrap(Date(freq='M', year=2007, month=1))
date_M_end_of_year = dWrap(Date(freq='M', year=2007, month=12))
date_M_end_of_quarter = dWrap(Date(freq='M', year=2007, month=3))
date_M_to_A = dWrap(Date(freq='A', year=2007))
date_M_to_Q = dWrap(Date(freq='Q', year=2007, quarter=1))
date_M_to_W_before = dWrap(Date(freq='W', year=2007, month=1, day=1))
date_M_to_W_after = dWrap(Date(freq='W', year=2007, month=1, day=31))
date_M_to_B_before = dWrap(Date(freq='B', year=2007, month=1, day=1))
date_M_to_B_after = dWrap(Date(freq='B', year=2007, month=1, day=31))
date_M_to_D_before = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_M_to_D_after = dWrap(Date(freq='D', year=2007, month=1, day=31))
date_M_to_H_before = dWrap(Date(freq='H', year=2007, month=1, day=1,
hour=0))
date_M_to_H_after = dWrap(Date(freq='H', year=2007, month=1, day=31,
hour=23))
date_M_to_T_before = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=0))
date_M_to_T_after = dWrap(Date(freq='T', year=2007, month=1, day=31,
hour=23, minute=59))
date_M_to_S_before = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0))
date_M_to_S_after = dWrap(Date(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59))
assert_func(date_M.asfreq('A'), date_M_to_A)
assert_func(date_M_end_of_year.asfreq('A'), date_M_to_A)
assert_func(date_M.asfreq('Q'), date_M_to_Q)
assert_func(date_M_end_of_quarter.asfreq('Q'), date_M_to_Q)
assert_func(date_M.asfreq('W', "BEFORE"), date_M_to_W_before)
assert_func(date_M.asfreq('W', "AFTER"), date_M_to_W_after)
assert_func(date_M.asfreq('B', "BEFORE"), date_M_to_B_before)
assert_func(date_M.asfreq('B', "AFTER"), date_M_to_B_after)
assert_func(date_M.asfreq('D', "BEFORE"), date_M_to_D_before)
assert_func(date_M.asfreq('D', "AFTER"), date_M_to_D_after)
assert_func(date_M.asfreq('H', "BEFORE"), date_M_to_H_before)
assert_func(date_M.asfreq('H', "AFTER"), date_M_to_H_after)
assert_func(date_M.asfreq('T', "BEFORE"), date_M_to_T_before)
assert_func(date_M.asfreq('T', "AFTER"), date_M_to_T_after)
assert_func(date_M.asfreq('S', "BEFORE"), date_M_to_S_before)
assert_func(date_M.asfreq('S', "AFTER"), date_M_to_S_after)
def test_conv_weekly(self):
"frequency conversion tests: from Weekly Frequency"
for dWrap, assert_func in self.dateWrap:
date_W = dWrap(Date(freq='W', year=2007, month=1, day=1))
date_WSUN = dWrap(Date(freq='W-SUN', year=2007, month=1, day=7))
date_WSAT = dWrap(Date(freq='W-SAT', year=2007, month=1, day=6))
date_WFRI = dWrap(Date(freq='W-FRI', year=2007, month=1, day=5))
date_WTHU = dWrap(Date(freq='W-THU', year=2007, month=1, day=4))
date_WWED = dWrap(Date(freq='W-WED', year=2007, month=1, day=3))
date_WTUE = dWrap(Date(freq='W-TUE', year=2007, month=1, day=2))
date_WMON = dWrap(Date(freq='W-MON', year=2007, month=1, day=1))
date_WSUN_to_D_before = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_WSUN_to_D_after = dWrap(Date(freq='D', year=2007, month=1, day=7))
date_WSAT_to_D_before = dWrap(Date(freq='D', year=2006, month=12, day=31))
date_WSAT_to_D_after = dWrap(Date(freq='D', year=2007, month=1, day=6))
date_WFRI_to_D_before = dWrap(Date(freq='D', year=2006, month=12, day=30))
date_WFRI_to_D_after = dWrap(Date(freq='D', year=2007, month=1, day=5))
date_WTHU_to_D_before = dWrap(Date(freq='D', year=2006, month=12, day=29))
date_WTHU_to_D_after = dWrap(Date(freq='D', year=2007, month=1, day=4))
date_WWED_to_D_before = dWrap(Date(freq='D', year=2006, month=12, day=28))
date_WWED_to_D_after = dWrap(Date(freq='D', year=2007, month=1, day=3))
date_WTUE_to_D_before = dWrap(Date(freq='D', year=2006, month=12, day=27))
date_WTUE_to_D_after = dWrap(Date(freq='D', year=2007, month=1, day=2))
date_WMON_to_D_before = dWrap(Date(freq='D', year=2006, month=12, day=26))
date_WMON_to_D_after = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_W_end_of_year = dWrap(Date(freq='W', year=2007, month=12, day=31))
date_W_end_of_quarter = dWrap(Date(freq='W', year=2007, month=3, day=31))
date_W_end_of_month = dWrap(Date(freq='W', year=2007, month=1, day=31))
date_W_to_A = dWrap(Date(freq='A', year=2007))
date_W_to_Q = dWrap(Date(freq='Q', year=2007, quarter=1))
date_W_to_M = dWrap(Date(freq='M', year=2007, month=1))
if Date(freq='D', year=2007, month=12, day=31).day_of_week == 6:
date_W_to_A_end_of_year = dWrap(Date(freq='A', year=2007))
else:
date_W_to_A_end_of_year = dWrap(Date(freq='A', year=2008))
if Date(freq='D', year=2007, month=3, day=31).day_of_week == 6:
date_W_to_Q_end_of_quarter = dWrap(Date(freq='Q', year=2007, quarter=1))
else:
date_W_to_Q_end_of_quarter = dWrap(Date(freq='Q', year=2007, quarter=2))
if Date(freq='D', year=2007, month=1, day=31).day_of_week == 6:
date_W_to_M_end_of_month = dWrap(Date(freq='M', year=2007, month=1))
else:
date_W_to_M_end_of_month = dWrap(Date(freq='M', year=2007, month=2))
date_W_to_B_before = dWrap(Date(freq='B', year=2007, month=1, day=1))
date_W_to_B_after = dWrap(Date(freq='B', year=2007, month=1, day=5))
date_W_to_D_before = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_W_to_D_after = dWrap(Date(freq='D', year=2007, month=1, day=7))
date_W_to_H_before = dWrap(Date(freq='H', year=2007, month=1, day=1,
hour=0))
date_W_to_H_after = dWrap(Date(freq='H', year=2007, month=1, day=7,
hour=23))
date_W_to_T_before = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=0))
date_W_to_T_after = dWrap(Date(freq='T', year=2007, month=1, day=7,
hour=23, minute=59))
date_W_to_S_before = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0))
date_W_to_S_after = dWrap(Date(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59))
assert_func(date_W.asfreq('A'), date_W_to_A)
assert_func(date_W_end_of_year.asfreq('A'), date_W_to_A_end_of_year)
assert_func(date_W.asfreq('Q'), date_W_to_Q)
assert_func(date_W_end_of_quarter.asfreq('Q'), date_W_to_Q_end_of_quarter)
assert_func(date_W.asfreq('M'), date_W_to_M)
assert_func(date_W_end_of_month.asfreq('M'), date_W_to_M_end_of_month)
assert_func(date_W.asfreq('B', "BEFORE"), date_W_to_B_before)
assert_func(date_W.asfreq('B', "AFTER"), date_W_to_B_after)
assert_func(date_W.asfreq('D', "BEFORE"), date_W_to_D_before)
assert_func(date_W.asfreq('D', "AFTER"), date_W_to_D_after)
assert_func(date_WSUN.asfreq('D', "BEFORE"), date_WSUN_to_D_before)
assert_func(date_WSUN.asfreq('D', "AFTER"), date_WSUN_to_D_after)
assert_func(date_WSAT.asfreq('D', "BEFORE"), date_WSAT_to_D_before)
assert_func(date_WSAT.asfreq('D', "AFTER"), date_WSAT_to_D_after)
assert_func(date_WFRI.asfreq('D', "BEFORE"), date_WFRI_to_D_before)
assert_func(date_WFRI.asfreq('D', "AFTER"), date_WFRI_to_D_after)
assert_func(date_WTHU.asfreq('D', "BEFORE"), date_WTHU_to_D_before)
assert_func(date_WTHU.asfreq('D', "AFTER"), date_WTHU_to_D_after)
assert_func(date_WWED.asfreq('D', "BEFORE"), date_WWED_to_D_before)
assert_func(date_WWED.asfreq('D', "AFTER"), date_WWED_to_D_after)
assert_func(date_WTUE.asfreq('D', "BEFORE"), date_WTUE_to_D_before)
assert_func(date_WTUE.asfreq('D', "AFTER"), date_WTUE_to_D_after)
assert_func(date_WMON.asfreq('D', "BEFORE"), date_WMON_to_D_before)
assert_func(date_WMON.asfreq('D', "AFTER"), date_WMON_to_D_after)
assert_func(date_W.asfreq('H', "BEFORE"), date_W_to_H_before)
assert_func(date_W.asfreq('H', "AFTER"), date_W_to_H_after)
assert_func(date_W.asfreq('T', "BEFORE"), date_W_to_T_before)
assert_func(date_W.asfreq('T', "AFTER"), date_W_to_T_after)
assert_func(date_W.asfreq('S', "BEFORE"), date_W_to_S_before)
assert_func(date_W.asfreq('S', "AFTER"), date_W_to_S_after)
def test_conv_business(self):
"frequency conversion tests: from Business Frequency"
for dWrap, assert_func in self.dateWrap:
date_B = dWrap(Date(freq='B', year=2007, month=1, day=1))
date_B_end_of_year = dWrap(Date(freq='B', year=2007, month=12, day=31))
date_B_end_of_quarter = dWrap(Date(freq='B', year=2007, month=3, day=30))
date_B_end_of_month = dWrap(Date(freq='B', year=2007, month=1, day=31))
date_B_end_of_week = dWrap(Date(freq='B', year=2007, month=1, day=5))
date_B_to_A = dWrap(Date(freq='A', year=2007))
date_B_to_Q = dWrap(Date(freq='Q', year=2007, quarter=1))
date_B_to_M = dWrap(Date(freq='M', year=2007, month=1))
date_B_to_W = dWrap(Date(freq='W', year=2007, month=1, day=7))
date_B_to_D = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_B_to_H_before = dWrap(Date(freq='H', year=2007, month=1, day=1,
hour=0))
date_B_to_H_after = dWrap(Date(freq='H', year=2007, month=1, day=1,
hour=23))
date_B_to_T_before = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=0))
date_B_to_T_after = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=23, minute=59))
date_B_to_S_before = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0))
date_B_to_S_after = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59))
assert_func(date_B.asfreq('A'), date_B_to_A)
assert_func(date_B_end_of_year.asfreq('A'), date_B_to_A)
assert_func(date_B.asfreq('Q'), date_B_to_Q)
assert_func(date_B_end_of_quarter.asfreq('Q'), date_B_to_Q)
assert_func(date_B.asfreq('M'), date_B_to_M)
assert_func(date_B_end_of_month.asfreq('M'), date_B_to_M)
assert_func(date_B.asfreq('W'), date_B_to_W)
assert_func(date_B_end_of_week.asfreq('W'), date_B_to_W)
assert_func(date_B.asfreq('D'), date_B_to_D)
assert_func(date_B.asfreq('H', "BEFORE"), date_B_to_H_before)
assert_func(date_B.asfreq('H', "AFTER"), date_B_to_H_after)
assert_func(date_B.asfreq('T', "BEFORE"), date_B_to_T_before)
assert_func(date_B.asfreq('T', "AFTER"), date_B_to_T_after)
assert_func(date_B.asfreq('S', "BEFORE"), date_B_to_S_before)
assert_func(date_B.asfreq('S', "AFTER"), date_B_to_S_after)
def test_conv_daily(self):
"frequency conversion tests: from Business Frequency"
for dWrap, assert_func in self.dateWrap:
date_D = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_D_end_of_year = dWrap(Date(freq='D', year=2007, month=12, day=31))
date_D_end_of_quarter = dWrap(Date(freq='D', year=2007, month=3, day=31))
date_D_end_of_month = dWrap(Date(freq='D', year=2007, month=1, day=31))
date_D_end_of_week = dWrap(Date(freq='D', year=2007, month=1, day=7))
date_D_friday = dWrap(Date(freq='D', year=2007, month=1, day=5))
date_D_saturday = dWrap(Date(freq='D', year=2007, month=1, day=6))
date_D_sunday = dWrap(Date(freq='D', year=2007, month=1, day=7))
date_D_monday = dWrap(Date(freq='D', year=2007, month=1, day=8))
date_B_friday = dWrap(Date(freq='B', year=2007, month=1, day=5))
date_B_monday = dWrap(Date(freq='B', year=2007, month=1, day=8))
date_D_to_A = dWrap(Date(freq='A', year=2007))
date_Deoq_to_AJAN = dWrap(Date(freq='A-JAN', year=2008))
date_Deoq_to_AJUN = dWrap(Date(freq='A-JUN', year=2007))
date_Deoq_to_ADEC = dWrap(Date(freq='A-DEC', year=2007))
date_D_to_QEJAN = dWrap(Date(freq=C.FR_QTREJAN, year=2007, quarter=4))
date_D_to_QEJUN = dWrap(Date(freq=C.FR_QTREJUN, year=2007, quarter=3))
date_D_to_QEDEC = dWrap(Date(freq=C.FR_QTREDEC, year=2007, quarter=1))
date_D_to_QSJAN = dWrap(Date(freq=C.FR_QTRSJAN, year=2006, quarter=4))
date_D_to_QSJUN = dWrap(Date(freq=C.FR_QTRSJUN, year=2006, quarter=3))
date_D_to_QSDEC = dWrap(Date(freq=C.FR_QTRSDEC, year=2007, quarter=1))
date_D_to_M = dWrap(Date(freq='M', year=2007, month=1))
date_D_to_W = dWrap(Date(freq='W', year=2007, month=1, day=7))
date_D_to_H_before = dWrap(Date(freq='H', year=2007, month=1, day=1,
hour=0))
date_D_to_H_after = dWrap(Date(freq='H', year=2007, month=1, day=1,
hour=23))
date_D_to_T_before = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=0))
date_D_to_T_after = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=23, minute=59))
date_D_to_S_before = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0))
date_D_to_S_after = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59))
assert_func(date_D.asfreq('A'), date_D_to_A)
assert_func(date_D_end_of_quarter.asfreq('A-JAN'), date_Deoq_to_AJAN)
assert_func(date_D_end_of_quarter.asfreq('A-JUN'), date_Deoq_to_AJUN)
assert_func(date_D_end_of_quarter.asfreq('A-DEC'), date_Deoq_to_ADEC)
assert_func(date_D_end_of_year.asfreq('A'), date_D_to_A)
assert_func(date_D_end_of_quarter.asfreq('Q'), date_D_to_QEDEC)
assert_func(date_D.asfreq(C.FR_QTREJAN), date_D_to_QEJAN)
assert_func(date_D.asfreq(C.FR_QTREJUN), date_D_to_QEJUN)
assert_func(date_D.asfreq(C.FR_QTREDEC), date_D_to_QEDEC)
assert_func(date_D.asfreq(C.FR_QTRSJAN), date_D_to_QSJAN)
assert_func(date_D.asfreq(C.FR_QTRSJUN), date_D_to_QSJUN)
assert_func(date_D.asfreq(C.FR_QTRSDEC), date_D_to_QSDEC)
assert_func(date_D.asfreq('M'), date_D_to_M)
assert_func(date_D_end_of_month.asfreq('M'), date_D_to_M)
assert_func(date_D.asfreq('W'), date_D_to_W)
assert_func(date_D_end_of_week.asfreq('W'), date_D_to_W)
assert_func(date_D_friday.asfreq('B'), date_B_friday)
assert_func(date_D_saturday.asfreq('B', "BEFORE"), date_B_friday)
assert_func(date_D_saturday.asfreq('B', "AFTER"), date_B_monday)
assert_func(date_D_sunday.asfreq('B', "BEFORE"), date_B_friday)
assert_func(date_D_sunday.asfreq('B', "AFTER"), date_B_monday)
assert_func(date_D.asfreq('H', "BEFORE"), date_D_to_H_before)
assert_func(date_D.asfreq('H', "AFTER"), date_D_to_H_after)
assert_func(date_D.asfreq('T', "BEFORE"), date_D_to_T_before)
assert_func(date_D.asfreq('T', "AFTER"), date_D_to_T_after)
assert_func(date_D.asfreq('S', "BEFORE"), date_D_to_S_before)
assert_func(date_D.asfreq('S', "AFTER"), date_D_to_S_after)
def test_conv_hourly(self):
"frequency conversion tests: from Hourly Frequency"
for dWrap, assert_func in self.dateWrap:
date_H = dWrap(Date(freq='H', year=2007, month=1, day=1, hour=0))
date_H_end_of_year = dWrap(Date(freq='H', year=2007, month=12, day=31,
hour=23))
date_H_end_of_quarter = dWrap(Date(freq='H', year=2007, month=3, day=31,
hour=23))
date_H_end_of_month = dWrap(Date(freq='H', year=2007, month=1, day=31,
hour=23))
date_H_end_of_week = dWrap(Date(freq='H', year=2007, month=1, day=7,
hour=23))
date_H_end_of_day = dWrap(Date(freq='H', year=2007, month=1, day=1,
hour=23))
date_H_end_of_bus = dWrap(Date(freq='H', year=2007, month=1, day=1,
hour=23))
date_H_to_A = dWrap(Date(freq='A', year=2007))
date_H_to_Q = dWrap(Date(freq='Q', year=2007, quarter=1))
date_H_to_M = dWrap(Date(freq='M', year=2007, month=1))
date_H_to_W = dWrap(Date(freq='W', year=2007, month=1, day=7))
date_H_to_D = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_H_to_B = dWrap(Date(freq='B', year=2007, month=1, day=1))
date_H_to_T_before = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=0))
date_H_to_T_after = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=59))
date_H_to_S_before = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0))
date_H_to_S_after = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59))
assert_func(date_H.asfreq('A'), date_H_to_A)
assert_func(date_H_end_of_year.asfreq('A'), date_H_to_A)
assert_func(date_H.asfreq('Q'), date_H_to_Q)
assert_func(date_H_end_of_quarter.asfreq('Q'), date_H_to_Q)
assert_func(date_H.asfreq('M'), date_H_to_M)
assert_func(date_H_end_of_month.asfreq('M'), date_H_to_M)
assert_func(date_H.asfreq('W'), date_H_to_W)
assert_func(date_H_end_of_week.asfreq('W'), date_H_to_W)
assert_func(date_H.asfreq('D'), date_H_to_D)
assert_func(date_H_end_of_day.asfreq('D'), date_H_to_D)
assert_func(date_H.asfreq('B'), date_H_to_B)
assert_func(date_H_end_of_bus.asfreq('B'), date_H_to_B)
assert_func(date_H.asfreq('T', "BEFORE"), date_H_to_T_before)
assert_func(date_H.asfreq('T', "AFTER"), date_H_to_T_after)
assert_func(date_H.asfreq('S', "BEFORE"), date_H_to_S_before)
assert_func(date_H.asfreq('S', "AFTER"), date_H_to_S_after)
def test_conv_minutely(self):
"frequency conversion tests: from Minutely Frequency"
for dWrap, assert_func in self.dateWrap:
date_T = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=0))
date_T_end_of_year = dWrap(Date(freq='T', year=2007, month=12, day=31,
hour=23, minute=59))
date_T_end_of_quarter = dWrap(Date(freq='T', year=2007, month=3, day=31,
hour=23, minute=59))
date_T_end_of_month = dWrap(Date(freq='T', year=2007, month=1, day=31,
hour=23, minute=59))
date_T_end_of_week = dWrap(Date(freq='T', year=2007, month=1, day=7,
hour=23, minute=59))
date_T_end_of_day = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=23, minute=59))
date_T_end_of_bus = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=23, minute=59))
date_T_end_of_hour = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=59))
date_T_to_A = dWrap(Date(freq='A', year=2007))
date_T_to_Q = dWrap(Date(freq='Q', year=2007, quarter=1))
date_T_to_M = dWrap(Date(freq='M', year=2007, month=1))
date_T_to_W = dWrap(Date(freq='W', year=2007, month=1, day=7))
date_T_to_D = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_T_to_B = dWrap(Date(freq='B', year=2007, month=1, day=1))
date_T_to_H = dWrap(Date(freq='H', year=2007, month=1, day=1, hour=0))
date_T_to_S_before = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0))
date_T_to_S_after = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59))
assert_func(date_T.asfreq('A'), date_T_to_A)
assert_func(date_T_end_of_year.asfreq('A'), date_T_to_A)
assert_func(date_T.asfreq('Q'), date_T_to_Q)
assert_func(date_T_end_of_quarter.asfreq('Q'), date_T_to_Q)
assert_func(date_T.asfreq('M'), date_T_to_M)
assert_func(date_T_end_of_month.asfreq('M'), date_T_to_M)
assert_func(date_T.asfreq('W'), date_T_to_W)
assert_func(date_T_end_of_week.asfreq('W'), date_T_to_W)
assert_func(date_T.asfreq('D'), date_T_to_D)
assert_func(date_T_end_of_day.asfreq('D'), date_T_to_D)
assert_func(date_T.asfreq('B'), date_T_to_B)
assert_func(date_T_end_of_bus.asfreq('B'), date_T_to_B)
assert_func(date_T.asfreq('H'), date_T_to_H)
assert_func(date_T_end_of_hour.asfreq('H'), date_T_to_H)
assert_func(date_T.asfreq('S', "BEFORE"), date_T_to_S_before)
assert_func(date_T.asfreq('S', "AFTER"), date_T_to_S_after)
def test_conv_secondly(self):
"frequency conversion tests: from Secondly Frequency"
for dWrap, assert_func in self.dateWrap:
date_S = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0))
date_S_end_of_year = dWrap(Date(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59))
date_S_end_of_quarter = dWrap(Date(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59))
date_S_end_of_month = dWrap(Date(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59))
date_S_end_of_week = dWrap(Date(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59))
date_S_end_of_day = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59))
date_S_end_of_bus = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59))
date_S_end_of_hour = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59))
date_S_end_of_minute = dWrap(Date(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59))
date_S_to_A = dWrap(Date(freq='A', year=2007))
date_S_to_Q = dWrap(Date(freq='Q', year=2007, quarter=1))
date_S_to_M = dWrap(Date(freq='M', year=2007, month=1))
date_S_to_W = dWrap(Date(freq='W', year=2007, month=1, day=7))
date_S_to_D = dWrap(Date(freq='D', year=2007, month=1, day=1))
date_S_to_B = dWrap(Date(freq='B', year=2007, month=1, day=1))
date_S_to_H = dWrap(Date(freq='H', year=2007, month=1, day=1,
hour=0))
date_S_to_T = dWrap(Date(freq='T', year=2007, month=1, day=1,
hour=0, minute=0))
assert_func(date_S.asfreq('A'), date_S_to_A)
assert_func(date_S_end_of_year.asfreq('A'), date_S_to_A)
assert_func(date_S.asfreq('Q'), date_S_to_Q)
assert_func(date_S_end_of_quarter.asfreq('Q'), date_S_to_Q)
assert_func(date_S.asfreq('M'), date_S_to_M)
assert_func(date_S_end_of_month.asfreq('M'), date_S_to_M)
assert_func(date_S.asfreq('W'), date_S_to_W)
assert_func(date_S_end_of_week.asfreq('W'), date_S_to_W)
assert_func(date_S.asfreq('D'), date_S_to_D)
assert_func(date_S_end_of_day.asfreq('D'), date_S_to_D)
assert_func(date_S.asfreq('B'), date_S_to_B)
assert_func(date_S_end_of_bus.asfreq('B'), date_S_to_B)
assert_func(date_S.asfreq('H'), date_S_to_H)
assert_func(date_S_end_of_hour.asfreq('H'), date_S_to_H)
assert_func(date_S.asfreq('T'), date_S_to_T)
assert_func(date_S_end_of_minute.asfreq('T'), date_S_to_T)
class test_methods(NumpyTestCase):
"Base test class for MaskedArrays."
def __init__(self, *args, **kwds):
NumpyTestCase.__init__(self, *args, **kwds)
def test_getitem(self):
"Tests getitem"
dlist = ['2007-%02i' % i for i in range(1,5)+range(7,13)]
mdates = date_array_fromlist(dlist, 'M')
# Using an integer
assert_equal(mdates[0].value, 24073)
assert_equal(mdates[-1].value, 24084)
# Using a date
lag = mdates.find_dates(mdates[0])
assert_equal(mdates[lag], mdates[0])
lag = mdates.find_dates(Date('M',value=24080))
assert_equal(mdates[lag], mdates[5])
# Using several dates
lag = mdates.find_dates(Date('M',value=24073), Date('M',value=24084))
assert_equal(mdates[lag],
DateArray([mdates[0], mdates[-1]], freq='M'))
assert_equal(mdates[[mdates[0],mdates[-1]]], mdates[lag])
#
assert_equal(mdates>=mdates[-4], [0,0,0,0,0,0,1,1,1,1])
dlist = ['2006-%02i' % i for i in range(1,5)+range(7,13)]
mdates = date_array_fromlist(dlist).asfreq('M')
#CHECK : Oops, what were we supposed to do here ?
def test_getsteps(self):
"Tests the getsteps method"
dlist = ['2007-01-%02i' %i for i in (1,2,3,4,8,9,10,11,12,15)]
ddates = date_array_fromlist(dlist)
assert_equal(ddates.get_steps(), [1,1,1,4,1,1,1,1,3])
def test_empty_datearray(self):
empty_darray = DateArray([], freq='b')
assert_equal(empty_darray.isfull(), True)
assert_equal(empty_darray.isvalid(), True)
assert_equal(empty_darray.get_steps(), None)
def test_cachedinfo(self):
D = date_array(start_date=thisday('D'), length=5)
Dstr = D.tostring()
assert_equal(D.tostring(), Dstr)
DL = D[[0,-1]]
assert_equal(DL.tostring(), Dstr[[0,-1]])
###############################################################################
#------------------------------------------------------------------------------
if __name__ == "__main__":
NumpyTest().run() | [
"mb434@cornell.edu"
] | mb434@cornell.edu |
7cf1e70487f372ffe71d4eef590134c767624115 | 1d080e3ebf1825ac74e7ea3be4b9a9e4cdb3bb0c | /Class2_Python3/Homework Aufgabe 2.3.py | c06a9ceaa12288e1382b1ed6c68ed9679c59d769 | [] | no_license | SariFink/wd1-20200323 | 61168f1a446b9eb4909fa914ef8f5e95da07be28 | 950d061abdad0c49fa5cd98a0b1db8b5fdf224b5 | refs/heads/master | 2021-05-24T12:22:00.394007 | 2020-05-03T11:09:29 | 2020-05-03T11:09:29 | 253,559,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # Sometimes you'd like to make some string lowercase. For example, you have a string like this:
#
# "Today Is A BeautiFul DAY"
# And you'd like to make it like this:
#
# "today is a beautiful day"
# There is a very nice solution in Python to do this. Use Google search and find out how to do it.
#
# Where would this come handy? For example if you ask user "Would you like to continue (yes/no)?", the user might respond: "yes", "Yes", "YES" or even "YeS". In this case, changing your user's response into lowercase letters would be very helpful in your if-else statement.
| [
"fink.sari@gmail.com"
] | fink.sari@gmail.com |
019faa8e9013ea673b664a28a30f07cdc41adfd6 | 8cef9162a14d6d5c2e2253fc6d912b5aef52f687 | /set-morn.py | bad3f47bd1307e9cfb9ac0f4e42fe82392526ed6 | [] | no_license | 7ooL/home_auto_scripts | 9b75755ac4e464e49aba870ac3cbb2997ce7b022 | 2ad224b9b2bd9c3d7b53cb774db17dc696a1f869 | refs/heads/master | 2022-12-15T18:31:06.117914 | 2020-09-20T20:25:31 | 2020-09-20T20:25:31 | 98,122,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | import myhouse
import pyInfinitude.pyInfinitude
import os, sys, datetime, time
import logging
import os
def main(argv):
now = datetime.datetime.now()
logging.info('Running set-morn script')
home = myhouse.Home()
home.public.set("mornings","updating", "yes")
home.saveSettings()
end = datetime.datetime.now()
logging.debug('finished '+str(end-now))
if __name__ == "__main__":
main(sys.argv[1:])
| [
"mr.matthew.f.martin@gmail.com"
] | mr.matthew.f.martin@gmail.com |
eeb5b87a30485c9e62c25f1a79691e9e6f9c4142 | 0e8836c5202e5bb389870df6a2727cd49b192daa | /peak-index-in-a-mountain-array/peak-index-in-a-mountain-array.py | 8eac0974dd570242db4a70bf7fb68c6a97fc935a | [] | no_license | OnlyLeetCoder/LeetCode | a0e3d1b1360a7e04533649ab62274f2e32dec2bf | 190fe578eab02548f7e2fd5471cef14469e63501 | refs/heads/main | 2023-07-05T19:44:06.396815 | 2021-08-22T17:08:17 | 2021-08-22T17:08:17 | 387,091,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | class Solution:
def peakIndexInMountainArray(self, arr: List[int]) -> int:
for i in range(1, len(arr)-1):
if arr[i-1] < arr[i] > arr[i+1]:
return i | [
"87592768+OnlyLeetCoder@users.noreply.github.com"
] | 87592768+OnlyLeetCoder@users.noreply.github.com |
77029a6329b00c293d80af7760c89f8db105eb59 | 78bc1615df60a593ea1b19febbff91ca1990f98f | /portfolio/manage.py | 2126344d46b079788a2de26abc0106c37014ab6c | [] | no_license | mgeraci/portfolio | 0010ee36584c5822d8e641521e343a7491ce2262 | e7c6ed40227f6ac614663bcb98bb639fe0d2b267 | refs/heads/master | 2023-07-20T00:17:49.516598 | 2023-07-13T00:18:07 | 2023-07-13T00:18:07 | 51,189,359 | 0 | 1 | null | 2023-07-18T20:31:02 | 2016-02-06T04:28:11 | JavaScript | UTF-8 | Python | false | false | 258 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "michael_dot_com.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"me@mgeraci.com"
] | me@mgeraci.com |
bf76f20dd5bbe81aaa16548f337c6de6d6e46d7d | a33f4e8eff21965e234531d1375b113ad1bb2064 | /qa/rpc-tests/maxblocksinflight.py | 76e553ee707ff7584f6c10614ca992bc7eb50e66 | [
"MIT"
] | permissive | zero24x/innova | c80d5abf71b515e395c99c9544ea4673bd450b4e | cde2cb27dd359d54b693d13a246583010101413a | refs/heads/master | 2020-03-21T06:35:07.634999 | 2019-06-07T15:17:27 | 2019-06-07T15:17:27 | 138,229,700 | 0 | 0 | MIT | 2019-06-07T14:38:17 | 2018-06-21T22:53:20 | C++ | UTF-8 | Python | false | false | 3,733 | py | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [8, 16, 128, 1024]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print "Round %d: success (total requests: %d)" % (count, total_requests)
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("INNOVAD", "innovad"),
help="Binary to test max block requests behavior")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| [
"root@DESKTOP-N2BRCQD.localdomain"
] | root@DESKTOP-N2BRCQD.localdomain |
7edffcac268c2ecc9f54017338ffdc0fd54d47bd | 4e3fcb8e40752d7df86a069d771c398a0801236a | /exchange_rate.py | db5cce2e3904be81a4eae567ac2ddfb402306f31 | [] | no_license | Gchesta/assignment_day_3 | fb8040b696037b3e2fed6627addc40b14bfe3084 | a8e6bce2e2e2bb90f82b78ff7c681ae6743663c1 | refs/heads/master | 2021-01-12T00:40:23.686367 | 2017-02-08T14:01:51 | 2017-02-08T14:01:51 | 81,293,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py |
def find_exchange_rate():
"""This is a simple command line application to convert one currency into the other
using the fixer.io API"""
from urllib2 import Request, urlopen
import json
#taking user inputs
curr1 = input("Convert FROM (e.g USD, GBP):")
curr1 = curr1.upper()
curr2 = input("Convert To (e.g GBP, USD)")
curr2 = curr2.upper()
amount = float(input("Please Enter the AMOUNT to be Converted:"))
#creates a URL for the GET method
api_url = ("http://api.fixer.io/latest?symbols=%s,%s") % (curr1, curr2)
r = Request(api_url)
#to access a JSON file
exchange_rate_json = json.loads(urlopen(r).read())
exchange_rate = exchange_rate_json["rates"][curr2]
converted_amount = amount * exchange_rate
return curr1 + str(amount) + " converts to " + curr2 + str(converted_amount)
| [
"elimushwari@gmail.com"
] | elimushwari@gmail.com |
11cced50a072f7285e48fe1f90871ba4d26499d4 | 00d5c1760d4ec54238e02400683670832f5e0722 | /main.py | 4ddbe04cec6ab7f250dea8dba41609866c60098d | [] | no_license | rafaelandrade/twitter-dataanalysis | c8ddc5a0cd09e74410c074b1de397db4376e7ed5 | 92fcd710266c89b12fad785c59f3679c6c4859ad | refs/heads/master | 2020-09-07T18:00:24.464667 | 2019-11-11T01:06:02 | 2019-11-11T01:06:02 | 220,869,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #importing library calls tweepy
import tweepy as tw
#credentials
import credentials.credentials as cr
#API - with twitter keys
auth = tw.OAuthHandler(cr.consumer_secret(), cr.consumer_secret())
auth.set_access_token(cr.acess_token_key(), cr.acess_token_secret())
api = tw.API(auth) | [
"rafasouza@protonmail.com"
] | rafasouza@protonmail.com |
dc62c2a6d968f42f6bacae2d04cf425b49f9a6b8 | 383e72352efce1631107ae87138930d92beb4b74 | /web_server/app.py | 1aff8ea31409b4fa28b1f53657f25b5b6029942f | [] | no_license | xeonselina/terminal_tools | bf032a9a886759f5ba7350b24b2d3ea65db99f06 | 72161cec0823b0d6c8262695e0e927b17eca4ce6 | refs/heads/master | 2021-01-11T23:33:48.312841 | 2017-03-31T06:02:51 | 2017-03-31T06:02:51 | 78,601,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,620 | py | # coding=utf-8
import tornado.escape
from tornado import ioloop
import tornado.options
import tornado.web
import tornado.websocket
import tornado.gen
import time
import json
import tornado.httpclient
from tornado.concurrent import Future
import zlib
import os
import requests
import tornado.httpserver
from Connector.DirListHandler import dir_list_handler
import uuid
import name_server
import base64
import t_server
import b64
import urllib
import logging
import subprocess
import mimetypes
import magic
from tornado import gen
from urllib import quote
import sys
import pty_module
from pty_module import PTYWSHandler
MAX_REQUEST = 50
config = {}
execfile('app.conf', config)
connected_web_client = {}
settings = {
'debug': True,
'static_path': os.path.join(os.path.dirname(__file__), "static"),
'template_path': os.path.join(os.path.dirname(__file__), "templates"),
"cookie_secret": "bZJc2sWbQLKos6GkHn/VB9oXwQt8S0R0kRvJ5/xJ89E=",
"login_url": "/login"
}
def parse_json_resp(json_message):
action_type = json_message.get('type')
result = json_message.get('result')
resp_id = json_message.get('respID')
return action_type, result, resp_id
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("username")
class BaseWsHandler(tornado.websocket.WebSocketHandler):
def _handle_request_exception(self, e):
logging.error('error')
# Request Routing
class Application(tornado.web.Application):
def __init__(self):
# /term is used for handling terminal req/resp, while /manager is used for request from browser
handlers = [(r"/resp", TerminalRespController),
(r"/", HomeController),
(r"/list_log", LogHandler),
(r"/list_dir", dir_list_handler),
(r"/show_log", Show_logHandler),
(r"/oper", Oper_Handler),
(r"/cmd", CMDHandler),
(r"/restart_cmd", Restart_CMDHandler),
(r"/sqlite", SqliteHandler),
(r"/web_upload", FineUploadHandler),
(r"/file_download", DownloadHandler),
(r"/file_view", FileViewHandler),
(r"/file_delete", FileDeleteHandler),
(r"/dir_tree", DirTreeHandler),
(r"/uploads/(.*)", tornado.web.StaticFileHandler, {"path": "uploads/"}),
(r"/ws", WSHandler),
(r"/pty_ws", PTYWSHandler),
(r"/cli_upload", ClientUploadHandler),
(r"/rename", RenameHandler),
(r"/unzip", UnzipHandler),
(r"/login", LoginHandler),
(r"/logout", LogoutHandler),
(r"/auth", AuthHandler),
(r"/process", GetProcessListHandler),
(r"/kill_proc", KillProcess),
(r"/restart_agent", RestartAgentHandler),
(r"/upgrade_agent", UpgradeAgentHandler),]
tornado.web.Application.__init__(self, handlers, **settings)
class WSHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
pass
def __init__(self, application, request, **kwargs):
tornado.websocket.WebSocketHandler.__init__(self, application, request, **kwargs)
self.wid = -1
self.tid = -1
pass
def open(self):
# Nothing to do untill we got the first heartbeat
print "new client connected"
pass
@gen.coroutine
def on_message(self, message):
print 'received web client message: %s' % base64.b64decode(message)
msg_obj = b64.b64_to_json(message)
cmd = msg_obj['cmd']
if cmd == 'reg':
connected_web_client[msg_obj['wid']] = self
print "new webclient connected, all connected_web_client.keys() are:"
print connected_web_client.keys()
self.wid = msg_obj['wid']
self.tid = msg_obj.get('tid', '')
elif cmd == 'pty_input':
# 收到页面xterm的输入,要把输入发送到client端
param = msg_obj['param']
connected_clients = yield name_server.get_connected_client()
if self.tid in connected_clients.keys():
cid = 'cid' + str(uuid.uuid1())
yield t_server.send_pty(self.tid, param, cid, self.wid)
elif cmd == 'pty_resize':
# 收到页面xterm的resize,要把输入发送到client端
param = msg_obj['param']
connected_clients = yield name_server.get_connected_client()
if self.tid in connected_clients.keys():
cid = 'cid' + str(uuid.uuid1())
yield t_server.send_pty_resize(self.tid, param, cid, self.wid)
pass
pass
def on_close(self):
if self.wid in connected_web_client.keys():
del connected_web_client[self.wid]
print "close %s" % self.wid
print "new webclient connected, all connected_web_client.keys() are:"
print connected_web_client.keys()
pass
pass
class ClientUploadHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
tid = self.get_argument('tid')
cid = self.get_argument('cid')
wid = self.get_argument('wid')
# 是否打開文件查看
is_view = self.get_argument('view', '')
fileinfo = self.request.files['zipfile'][0]
fname = fileinfo['filename']
print 'received file, filename is %s' % fname
fn_part = os.path.splitext(fname)
extn = None
if len(fn_part) > 1:
extn = fn_part[1]
sub_path = str(uuid.uuid4())
cname = "{0}{1}".format(fn_part[0], extn)
path_dir = 'uploads/' + sub_path
if not os.path.exists(path_dir):
os.makedirs(path_dir)
with open(path_dir + '/' + cname, 'w') as fh:
fh.write(fileinfo['body'])
print 'config is :'
print config
url = 'http://%s/uploads/%s/%s' % (config['web_server'], sub_path, cname)
print 'download url is: ' + url
print 'Is wid in connected_web_client:'
print wid in connected_web_client.keys()
print "all connected_web_client"
print connected_web_client.keys()
if wid in connected_web_client.keys():
ws = connected_web_client[wid]
if is_view:
# unzip
unzip_path = 'uploads/%s/%s' % (sub_path, 'unzip')
subprocess.call(['7za', 'x', 'uploads/%s/%s' % (sub_path, cname), '-y', '-o%s' % unzip_path])
# should be only one file in sub_path/unzip/
items = os.listdir(unzip_path)
items = [(os.path.join(unzip_path, t), t) for t in items]
for long, short in items:
print 'try to open file:'
print long
if os.path.isdir(long):
self.write(json.dumps({'result': False, 'msg': '不能打开目录'}))
return
else:
# (mtype, _) = mimetypes.guess_type(long)
mtype = magic.from_file(long)
# 文本文件才打开
if 'text' in mtype:
with open(long) as f:
cl = f.readlines()
cl = [c + '<br/>' for c in cl]
content = ""
content = content.join(cl)
try:
content = content.decode("gbk")
except:
pass
ws.write_message(
b64.json_to_b64(
{'cmd': 'view', 'param': {'result': True, 'title': short, 'content': content}}))
self.write(json.dumps({'result': True}))
return
pass
else:
ws.write_message(b64.json_to_b64({'cmd': 'view', 'param': {'result': False}}))
self.write(json.dumps({'result': True}))
return
pass
pass
pass
ws.write_message(b64.json_to_b64({'cmd': 'download', 'param': url}))
self.write(json.dumps({'result': True}))
else:
# 通知页面可以下载文件了
ws.write_message(b64.json_to_b64({'cmd': 'download', 'param': url}))
self.write(json.dumps({'result': True}))
pass
self.write(json.dumps({'result': False}))
pass
pass
class LoginHandler(BaseHandler):
def get(self):
# self.render('index.html', connect_total=len(name_server.get_connected_client()))
host = self.request.headers["host"]
self.redirect("http://sytest.cimc.com/sso/user/login?ref=http%3a%2f%2f{0}%2fauth".format(urllib.quote(host)))
class LogoutHandler(BaseHandler):
# http://sytest.cimc.com/sso/user/logout?ref=http%3A%2F%2Fticket.cimc.com%2Fuser%2Flogout
def get(self):
if (self.get_current_user()):
self.clear_cookie("username")
host = self.request.headers["host"]
self.redirect("http://sytest.cimc.com/sso/user/logout?ref=http%3a%2f%2f{0}".format(urllib.quote(host)))
class AuthHandler(BaseHandler):
def get(self):
# self.render('index.html', connect_total=len(name_server.get_connected_client()))
token = self.get_argument('token', '')
res = urllib.urlopen("http://sytest.cimc.com/sso/user/getUinfo?token=%s" % token)
data = json.loads(res.read())
if data.get("code") == 0:
self.set_secure_cookie("username", data["data"].get("name"))
self.redirect("/")
# self.render('index.html', connect_total=len(name_server.get_connected_client()))
self.redirect("/")
# http://sytest.cimc.com/sso/user/logout?ref=http%3A%2F%2Fticket.cimc.com%2Fuser%2Flogout
class FileViewHandler(BaseHandler):
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
print 'want to download file from client,request body is:'
print self.request.body
body = json.loads(self.request.body)
path = body['path']
path = requests.utils.unquote(path)
tid = body['tid']
wid = body['wid']
cid = 'cid' + str(uuid.uuid1())
future = Future()
_future_list[cid] = future
# upload file through which url
# url 是查看的url
url = 'http://%s/cli_upload?view=1&tid=%s&cid=%s&wid=%s' % (config['web_server'], tid, cid, wid)
print 'upload url is: ' + url
paths = [path]
t_server.request_upload(tid, paths, url, cid, wid)
result = yield tornado.gen.with_timeout(time.time() + 180, future)
del _future_list[cid]
print 'downloadHandler get response from terminal'
# handle response
r = result['param']
print "DownloadHandler r is:"
print r
if not r['result']:
self.write({'result': False, 'msg': '下发失败,请稍后再试'})
else:
# get file
self.write(json.dumps({'result': True}))
pass
class DownloadHandler(BaseHandler):
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
print 'want to download file from client,request body is:'
print self.request.body
body = json.loads(self.request.body)
paths = body['paths']
paths = [requests.utils.unquote(p) for p in paths]
tid = body['tid']
wid = body['wid']
cid = 'cid' + str(uuid.uuid1())
future = Future()
_future_list[cid] = future
# upload file through which url
url = 'http://%s/cli_upload?tid=%s&cid=%s&wid=%s' % (config['web_server'], tid, cid, wid)
print 'upload url is: ' + url
t_server.request_upload(tid, paths, url, cid, wid)
result = yield tornado.gen.with_timeout(time.time() + 180, future)
del _future_list[cid]
print 'downloadHandler get response from terminal'
# handle response
r = result['param']
print "DownloadHandler r is:"
print r
if not r['result']:
self.write({'result': False, 'msg': '下发失败,请稍后再试'})
else:
self.write(json.dumps({'result': True}))
pass
class RenameHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
param = json.loads(self.request.body)
tid = param['tid']
newValue = param['newValue']
fullName = param['fullPath']
oldValue = param['oldValue']
connected_client = yield name_server.get_connected_client().keys()
if tid in connected_client:
future = Future()
cid = 'cid' + str(uuid.uuid1())
_future_list[cid] = future
excuteCmd = json.dumps({'fullName': fullName, 'newValue': newValue, 'oldValue': oldValue},
ensure_ascii=False)
t_server.request_rename(tid, excuteCmd, cid)
pass
print time.asctime(time.localtime(time.time()))
# todo: may raise a TimeoutError
try:
result = yield tornado.gen.with_timeout(time.time() + 180, future)
except Exception as e:
result = {'param': e, "result": False}
del _future_list[cid]
print 'response to rename:%s' % result
r = result['param']
if not r['result']:
self.write({'result': False, 'msg': r['msg']})
else:
self.write(json.dumps({'result': True, 'msg': '重命名成功!'}))
pass
pass
class GetProcessListHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
param = json.loads(self.request.body)
tid = param['tid']
connected_client = yield name_server.get_connected_client()
if tid in connected_client.keys():
future = Future()
cid = 'cid' + str(uuid.uuid1())
_future_list[cid] = future
t_server.request_getprocesslist(tid, '', cid)
try:
result = yield tornado.gen.with_timeout(time.time() + 180, future)
except Exception as e:
result = {'param': e, "result": False}
del _future_list[cid]
print 'response to getprocesslist:%s' % result
r = result['param']
if not r['result']:
self.write({'result': False, 'msg': r['msg']})
else:
self.write(json.dumps({'result': True, 'msg': '获取成功!', 'list': r['list']}))
pass
class UpgradeAgentHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
body = self.request.body
param = json.loads(body)
tids = str.split(str(param['tids']), ',')
#upgrade package download url
url = param['url']
connected_client = yield name_server.get_connected_client()
future = Future()
for tid in tids:
#only upgrade linux agent
if tid in connected_client.keys() and connected_client[tid] == 'posix':
cid = 'cid' + str(uuid.uuid1())
yield t_server.request_upgrade(tid, url, cid)
pass
pass
pass
class RestartAgentHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
tid = self.get_argument('tid')
connected_client = yield name_server.get_connected_client()
if tid in connected_client.keys():
cid = 'cid' + str(uuid.uuid1())
t_server.restart_agent(tid, cid)
pass
pass
pass
class KillProcess(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
param = json.loads(self.request.body)
tid = param['tid']
pid = param['pid']
connected_client = yield name_server.get_connected_client()
if tid in connected_client.keys():
future = Future()
cid = 'cid' + str(uuid.uuid1())
_future_list[cid] = future
t_server.kill_proce(tid, pid, cid)
try:
result = yield tornado.gen.with_timeout(time.time() + 180, future)
except Exception as e:
result = {'param': e, "result": False}
del _future_list[cid]
print 'response to getprocesslist:%s' % result
r = result['param']
if not r['result']:
self.write({'result': False, 'msg': r['msg']})
else:
self.write(json.dumps({'result': True, 'msg': '操作成功!'}))
pass
class FileDeleteHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
param = json.loads(self.request.body)
tid = param['tid']
file_path = param['paths']
connected_client = yield name_server.get_connected_client()
if tid in connected_client.keys():
future = Future()
cid = 'cid' + str(uuid.uuid1())
_future_list[cid] = future
params = json.dumps({'filePath': file_path}, ensure_ascii=False)
t_server.request_delete_file(tid, params, cid)
pass
print time.asctime(time.localtime(time.time()))
# todo: may raise a TimeoutError
try:
result = yield tornado.gen.with_timeout(time.time() + 180, future)
except Exception as e:
result = {'param': e, "result": False}
del _future_list[cid]
print 'response to delete:%s' % result
r = result['param']
if not r['result']:
self.write({'result': False, 'msg': r['msg']})
else:
self.write(json.dumps({'result': True, 'msg': '删除成功!'}))
pass
pass
class UnzipHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
param = json.loads(self.request.body)
tid = param['tid']
file_path = param['path']
connected_client = yield name_server.get_connected_client()
if tid in connected_client.keys():
future = Future()
cid = 'cid' + str(uuid.uuid1())
_future_list[cid] = future
t_server.request_unzip_file(tid, file_path, cid)
pass
print time.asctime(time.localtime(time.time()))
# todo: may raise a TimeoutError
try:
result = yield tornado.gen.with_timeout(time.time() + 180, future)
except Exception as e:
result = {'param': e, "result": False}
del _future_list[cid]
print 'response to unzip:%s' % result
r = result['param']
if not r['result']:
self.write({'result': False, 'msg': r['msg']})
else:
self.write(json.dumps({'result': True, 'msg': '解压成功!'}))
pass
pass
class FineUploadHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
fileuuid = self.get_argument('qquuid')
filename = self.get_argument('qqfilename')
tid = self.get_argument('tid', None)
upload_file = self.request.files['qqfile'][0]
path = self.get_argument('path', None)
print 'received file, filename is %s' % filename
path_dir = 'uploads/' + fileuuid
if not os.path.exists(path_dir):
os.makedirs(path_dir)
path_file = path_dir + '/' + filename
with open(path_file, 'w') as fh:
fh.write(upload_file['body'])
url = ('http://%s/uploads/%s/%s' % (config['web_server'], fileuuid, filename)).encode('utf-8')
#未传入tid,path不是上传到终端,只需要返回下载路径
if not tid or not path:
self.write({'success':True, 'msg':'upload success', 'param': url})
return
cid = 'cid' + str(uuid.uuid1())
future = Future()
_future_list[cid] = future
print 'config is :'
print config
print 'download url is: ' + url
dest_path = (os.path.join(path, filename)).encode('utf-8')
t_server.request_download(tid, dest_path, url, cid)
result = yield tornado.gen.with_timeout(time.time() + 3600, future)
del _future_list[cid]
print 'downloadHandler get response from terminal'
# handle response
r = result['param']
if not r['result']:
self.write({'success': False, 'msg': '下发失败,请稍后再试'})
else:
self.write(json.dumps({'success': True}))
pass
class WebUploadHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
path = self.get_argument('path')
path = requests.utils.unquote(path)
tid = self.get_argument('tid')
cli_down = self.get_argument('cli_down', 1)
fileinfo = self.request.files['upload_file'][0]
fname = fileinfo['filename']
print 'received file, filename is %s' % fname
fn_part = os.path.splitext(fname)
extn = None
if len(fn_part) > 1:
extn = fn_part[1]
sub_path = str(uuid.uuid4())
cname = "{0}{1}".format(fn_part[0], extn)
path_dir = 'uploads/' + sub_path
if not os.path.exists(path_dir):
os.makedirs(path_dir)
with open(path_dir + '/' + cname, 'w') as fh:
fh.write(fileinfo['body'])
cid = 'cid' + str(uuid.uuid4())
future = Future()
_future_list[cid] = future
print 'config is :'
print config
url = 'http://%s/uploads/%s/%s' % (config['web_server'], sub_path, cname)
print 'download url is: ' + url
t_server.request_download(tid, path + "\\" + cname, url, cid)
result = yield tornado.gen.with_timeout(time.time() + 180, future)
del _future_list[cid]
print 'downloadHandler get response from terminal'
# handle response
r = result['param']
if not r['result']:
self.write({'result': False, 'msg': '下发失败,请稍后再试'})
else:
self.write(json.dumps({'result': True}))
pass
pass
class DirTreeHandler(BaseHandler):
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
path = self.get_argument('id')
pattern = self.get_argument('pattern', None)
path = requests.utils.unquote(path)
if pattern is not None:
pattern = requests.utils.unquote(pattern)
print [(urllib.unquote(urllib.unquote(path)))]
tid = self.get_argument('tid')
cid = 'cid' + str(uuid.uuid1())
t_server.get_file_list(tid, path, pattern, cid)
future = Future()
_future_list[cid] = future
result = yield tornado.gen.with_timeout(time.time() + 180, future)
del _future_list[cid]
# handle response
r = result['param']
if not r['result']:
self.write({'result': False, 'msg': r['msg'], 'data': {'dir_list': [], 'file_list': []}})
elif not r['list']:
self.write({'result': True, 'msg': '没有子目录了', 'data': {'dir_list': [], 'file_list': []}})
elif len(r['list']) == 0:
self.write({'result': True, 'msg': '没有子目录了', 'data': {'dir_list': [], 'file_list': []}})
else:
data = []
print 'r is :'
print r
dir_list = [t for t in r['list'] if t['type'] == 'dir']
file_list = [t for t in r['list'] if t['type'] == 'file']
self.write({'result': True, 'msg': 'success', 'data': {'dir_list': dir_list, 'file_list': file_list}})
pass
pass
class HomeController(BaseHandler):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
tid = self.get_argument('tid', '')
conn_t = yield name_server.get_connected_client()
self.render('index.html', connect_total=len(conn_t.keys()),
user=self.get_current_user(), tid=tid)
pass
@tornado.web.authenticated
@tornado.gen.coroutine
def post(self, *args, **kwargs):
tid = self.get_argument('tid', '')
if tid in (yield name_server.get_connected_client()).keys():
msg = 'ok'
self.render('oper_select.html', tid=tid, lastbeat=None)
else:
msg = 'no'
self.write(msg)
class LogHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
tid = self.get_argument('Terminal_ID', '')
self.render('terminal/look_log.html', tid=tid)
class Show_logHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
tid = self.get_argument('tid', '')
filename = self.get_argument('filename', '')
self.render('terminal/show_log.html', tid=tid, filename=filename)
views_dict = {
'cmd': 'exec_cmd.html',
'mysql': 'exec_mysql.html',
'list_dir': 'list_dir.html',
'sqlite': 'exec_sqlite.html',
'upd_agent': 'upgrade_agent.html'
}
_future_list = {}
class Oper_Handler(BaseHandler):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self, *args, **kwargs):
error = True
tid = self.get_argument('tid', '')
oper = self.get_argument('oper', '')
db_path = self.get_argument('db_path', '')
if db_path:
db_path = db_path.replace("\\", "\\\\")
connected_client = yield name_server.get_connected_client()
if tid in connected_client.keys():
error = False
res = {'result': None, 'lastbeat': None}
# linux的打开pty.html里面用xterm.js作为终端
if oper == 'cmd' and connected_client[tid] == 'posix':
session_id = 's' + str(uuid.uuid4())
cid = "c"+str(uuid.uuid1())
open_pty_future = Future()
yield t_server.open_pty(tid, session_id,config['web_server'],cid)
_future_list[cid] = open_pty_future
result = yield tornado.gen.with_timeout(time.time() + 180, open_pty_future)
self.render('terminal/%s' % 'pty.html', tid=tid, error=error, res=res, ws_host=config['web_server'],
session_id=session_id)
else:
self.render('terminal/%s' % views_dict[oper], tid=tid, error=error, res=res,db_path = db_path,
ws_host=config['web_server'])
else:
self.render('terminal/%s' % views_dict[oper], tid=tid, error=error)
pass
class CMDHandler(BaseHandler):
@tornado.web.authenticated
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self, *args, **kwargs):
param = self.get_argument('param')
tid = self.get_argument('tid')
wid = self.get_argument('wid')
res = {'result': None, 'lastbeat': None}
connected_client = yield name_server.get_connected_client()
if tid in connected_client.keys():
res['lastbeat'] = None
cid = 'cid' + str(uuid.uuid1())
yield t_server.send_cmd(tid, param, cid, wid)
# print base64.b64decode(result).decode('gb2312')
self.write({'result': True})
else:
pass
pass
class Restart_CMDHandler(BaseHandler):
@tornado.web.authenticated
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self, *args, **kwargs):
param = self.get_argument('param')
tid = self.get_argument('tid')
wid = self.get_argument('wid')
res = {'result': None, 'lastbeat': None}
connected_client = yield name_server.get_connected_client()
if tid in connected_client.keys():
res['lastbeat'] = None
cid = 'cid' + str(uuid.uuid1())
t_server.restart_cmd(tid, param, cid, wid)
# print base64.b64decode(result).decode('gb2312')
self.write({'result':True})
else:
pass
class SqliteHandler(BaseHandler):
@tornado.web.authenticated
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self, *args, **kwargs):
param = self.get_argument('param')
db_path = self.get_argument('db_path','')
tid = self.get_argument('tid')
res = {'result': None, 'lastbeat': None}
connected_client = yield name_server.get_connected_client()
if tid in connected_client.keys():
future = Future()
res['lastbeat'] = None
cid = 'cid' + str(uuid.uuid1())
_future_list[cid] = future
if db_path:
db_path = db_path.replace("\\","\\\\")
t_server.send_sqlite(tid, param, cid, db_path)
pass
try:
result = yield tornado.gen.with_timeout(time.time() + 180, future)
except Exception as e:
result = {'param': e, "result": False}
del _future_list[cid]
print 'response to sqlite_cmd:%s' % result
r = result['param']
res['result'] = r
self.write(r)
else:
pass
pass
'''
接收T server的推送
'''
class TerminalRespController(BaseHandler):
# @tornado.web.authenticated
def post(self, *args, **kwargs):
print 'get response from terminal_server'
body = b64.b64_to_json(self.request.body)
#print 'response is : ' + base64.b64decode(self.request.body)
tid = body['tid']
wid = body['wid']
cid = body['cid']
cmd = body['cmd']
param = body['param']
global _future_list
if cid in _future_list.keys():
_future_list[cid].set_result(body)
pass
if wid in connected_web_client.keys():
connected_web_client[wid].write_message(b64.json_to_b64(body))
pass
pass
pass
if __name__ == "__main__":
if not os.path.exists('downloads/'):
os.makedirs('downloads')
if not os.path.exists('uploads/'):
os.makedirs('uploads')
tornado.options.parse_command_line()
app = Application()
app.listen(9000)
# http_server=tornado.httpserver.HTTPServer(app)
# http_server.bind(8081,'0.0.0.0')
# http_server.start(num_processes=0)
print 'Server Start listening'
tornado.ioloop.IOLoop.instance().start()
| [
"junming.pan@cimc.com"
] | junming.pan@cimc.com |
315d72af834a6d2b7b8f27b7b1a8abb3749beb4c | ef61c5f177ee44ac08325335fc28a12f3fccbb58 | /multiple_interactors_sample/gyaan/interactors/presenters/dtos.py | 8f079a22414d8b3a6adca3c0fec653af4333e0b6 | [] | no_license | bammidichandini/resource_management-chandini | 3c11c7b2eb5e2f8d3df5b55e4d3ee86a27ed5c3a | aa4ec50f0b36a818bebc2033cb39ee928e5be13c | refs/heads/master | 2022-12-01T19:59:25.366843 | 2020-07-23T09:10:42 | 2020-07-23T09:10:42 | 269,610,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | """
Created on 11/06/20
@author: revanth
"""
from dataclasses import dataclass
from typing import List
from gyaan.interactors.storages.dtos import DomainDTO, DomainStatsDTO, \
UserDetailsDTO, DomainJoinRequestDTO, CompletePostDetails
@dataclass
class DomainDetailsDTO:
domain: DomainDTO
domain_stats: DomainStatsDTO
domain_experts: List[UserDetailsDTO]
join_requests: List[DomainJoinRequestDTO]
requested_users: List[UserDetailsDTO]
user_id: int
is_user_domain_expert: bool
@dataclass
class DomainDetailsWithPosts:
post_details: CompletePostDetails
domain_details: DomainDetailsDTO
| [
"chandini.bammidi123@gmail.com"
] | chandini.bammidi123@gmail.com |
caec951e632d67746caf8f1d17a4617f90d38e62 | 9397cb0c035204652fc99554fe848427b5fcb8ab | /hn_hiring_analysis.py | 8e62bf201c5f188db5414a2df423e3c3bcbc2f1b | [] | no_license | iblaine/hn-whoshiring-analysis | bf15bcd6a1838b7bee231a64bd237460ff1f3d83 | 46352014956891819d9b14850227ecf77311554f | refs/heads/main | 2023-07-20T04:02:53.066088 | 2023-07-13T02:26:09 | 2023-07-13T02:26:09 | 384,028,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,320 | py | # %%
import html
import json
import logging as log
import os.path
import re
import sys
from datetime import datetime
from typing import Dict, List, Tuple
from urllib.request import urlopen
import pandas as pd
from dateutil.relativedelta import relativedelta
from pandasql import sqldf
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chromedriver_location = "/Users/belliott/Downloads/chromedriver"
start_date = "2013-01-01"
end_date = "2021-07-01"
search_keywords = [
"data engineer",
"software engineer",
"full stack",
"fullstack",
"ruby",
"python",
"hadoop",
"snowflake",
"ipo",
"laid off",
"remote",
]
# %%
# logging settings
log.getLogger().setLevel(log.INFO)
log.basicConfig(
level=log.INFO,
format="%(asctime)s %(levelname)-6s | %(lineno)+4s:%(funcName)-20s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
log.info("Starting...")
# %%
# skip / debug
# Avoid running this accidentally outside of Interactive Mode in Visual Studio
log.error("Exiting...run this in Interactive Mode in Visual Studio...")
sys.exit(1)
# %%
def get_first_hn_link(month_to_search: str) -> str:
"""For a given month + year, find the first google result, return the url
Args:
month_to_search: YYYY-MM-01
"""
log.info(f"month_to_search: {month_to_search}")
options = Options()
options.add_argument("--headless")
options.add_argument("--disable-gpu")
browser = webdriver.Chrome(chromedriver_location, options=options)
search_string = f'"Ask HN: Who is hiring? ({datetime.strptime(month_to_search, "%Y-%m-%d").strftime("%B %Y")})"'
url = f"https://www.google.com/search?q={search_string}"
browser.get(url)
weblinks = browser.find_elements_by_xpath("//div[@class='g']//a[not(@class)]")
browser.close()
first_text = weblinks[0].text.split(" | ")[0]
first_link = weblinks[0].get_attribute("href")
# make sure the first link we pull matches the link we want
if first_text.lower() != search_string.lower().replace('"', ""):
log.error(
f"Unable to find correct HN link, url: {url}, month_to_search: {month_to_search}, search_string: {search_string}, first_text: {first_text}"
)
sys.exit(1)
return first_link
# %%
# skip / debug
curr_date = end_date
curr_hn_link = get_first_hn_link(curr_date)
hn_item_id = curr_hn_link.split("=")[1]
# %%
def get_string_stats(content: str, search_string: str) -> Tuple[int, int]:
"""Given a content string, return count of matches for search_string and number of unique matches"""
cnt_total = content.lower().count(search_string.lower())
cnt_unique = 1 if cnt_total > 0 else 0
return cnt_total, cnt_unique
# %%
def get_post_ids(hn_item_id: str) -> List[str]:
"""For a given hn link_id, return a list of all post_id values"""
hn_response = urlopen(
"https://hacker-news.firebaseio.com/v0/item/" + hn_item_id + ".json"
)
hn_json = json.load(hn_response)
post_ids = hn_json["kids"]
log.info(f"Number of post_ids found: {len(post_ids)}")
post_ids.sort(reverse=True)
return post_ids
# %%
def get_post_data(
post_id: str, search_keywords: List[str]
) -> Tuple[str, str, str, str, dict]:
"""For an hn link, structure the data, returning the:
- company_name
- location_name
- frequency count for each search_keyword
"""
url = f"https://hacker-news.firebaseio.com/v0/item/{str(post_id)}.json"
log.info(f"Extracting structured data from url: {url}")
hn_post_response = urlopen(url)
hn_post_json = json.load(hn_post_response)
search_results = {}
for search_keyword in search_keywords:
search_results[search_keyword] = {}
search_results[search_keyword]["cnt_total"] = 0
search_results[search_keyword]["cnt_unique"] = 0
company_name = location = position_type = position_name = ""
# verify hn_post_json has valid data
if (
(hn_post_json is not None)
and not ("deleted" in hn_post_json and hn_post_json["deleted"])
and ("text" in hn_post_json.keys())
):
post = hn_post_json["text"]
post_unescape = html.unescape(post)
post_fulltext = post_unescape.replace("\n", " ")
post_header = post_fulltext.split("<p>")[0].split(" | ")
company_name = post_header[0] if len(post_header) > 0 else ""
location = post_header[1] if len(post_header) > 1 else ""
position_type = post_header[2] if len(post_header) > 2 else ""
position_name = post_header[3] if len(post_header) > 3 else ""
for search_keyword in search_keywords:
if search_keyword > "":
cnt_total = post_fulltext.lower().count(search_keyword.lower())
search_results[search_keyword]["cnt_total"] += cnt_total
search_results[search_keyword]["cnt_unique"] += (
1 if cnt_total > 0 else cnt_total
)
return company_name, location, position_type, position_name, search_results
# %%
# skip / debug
post_ids = get_post_ids(hn_item_id)
# %%
# skip / debug
post_id = post_ids[0]
get_post_data(post_id, search_keywords)
# company_name, location, position_type, position_name, search_results = get_post_data(post_id, search_keywords)
# %%
# skip / debug
company_names = []
locations = []
position_types = []
position_names = []
search_results = {}
for post_id in post_ids[0:10]:
(
company_name,
location,
position_type,
position_name,
search_results,
) = get_post_data(post_id, search_keywords)
company_names.append(company_name)
locations.append(location)
position_types.append(position_type)
position_names.append(position_name)
# %%
# skip / debug
d_company_names = {i: company_names.count(i) for i in set(company_names)}
d_locations = {i: locations.count(i) for i in set(locations)}
d_position_types = {i: position_types.count(i) for i in set(position_types)}
d_position_names = {i: position_names.count(i) for i in set(position_names)}
hn_metrics = {}
hn_metrics[curr_date] = {}
hn_metrics[curr_date]["company_names"] = d_company_names
hn_metrics[curr_date]["locations"] = d_locations
hn_metrics[curr_date]["position_types"] = d_position_types
hn_metrics[curr_date]["position_names"] = d_position_names
hn_metrics[curr_date]["search_results"] = search_results
# %%
# load previously saved hn_metrics
hn_metrics_file = "hn_metrics.json"
saved_hn_metrics = {}
if os.path.isfile(hn_metrics_file):
f = open(hn_metrics_file)
saved_hn_metrics = json.load(f)
# %%
# update hn_metrics with new data
def update_hn_metrics(
start_date: str, end_date: str, curr_date: str, hn_metrics: dict
) -> Dict:
"""Update hn_metrics with data. Processes most recent months first.
Args:
start_date: Date to start, "YYYY-MM-DD"
end_date: Date to end, "YYYY-MM-DD"
curr_date: Used as an override, begin looping through data on this month, "YYYY-MM-DD"
hn_metrics: Dict containing existing data, if any
Return:
dict: Contains any found data
"""
log.info("Starting update_hn_metrics()")
curr_date = end_date if curr_date is None else curr_date
# skip dates that have already been populated
dates_to_skip = list(saved_hn_metrics.keys())
log.info(
f"start_date: {start_date}, end_date: {end_date}, dates_to_skip: {len(dates_to_skip)}"
)
while curr_date >= start_date:
if curr_date not in dates_to_skip:
log.info(f"Processing date {curr_date}...")
hn_link = get_first_hn_link(curr_date)
try:
hn_item_id = re.findall(r".*id=(\d+).*", hn_link)[0]
except Exception as e:
log.error(f"Unable to parse {hn_link}, exception: {e}")
sys.exit(1)
post_ids = get_post_ids(hn_item_id)
# placeholders to accumualte new data
company_names = []
locations = []
position_types = []
position_names = []
saved_search_results = {}
for search_keyword in search_keywords:
saved_search_results[search_keyword] = {}
saved_search_results[search_keyword]["cnt_total"] = 0
saved_search_results[search_keyword]["cnt_unique"] = 0
post_cnt = 0
for post_id in post_ids:
log.info(f"Loading {curr_date}: {post_cnt} / {len(post_ids)}")
(
company_name,
location,
position_type,
position_name,
search_results,
) = get_post_data(post_id, search_keywords)
# minor sanity check to clean up the surplus of garbage data
if company_name > "":
company_names.append(company_name)
locations.append(location)
position_types.append(position_type)
position_names.append(position_name)
for search_keyword in search_keywords:
saved_search_results[search_keyword][
"cnt_total"
] += search_results[search_keyword]["cnt_total"]
saved_search_results[search_keyword][
"cnt_unique"
] += search_results[search_keyword]["cnt_unique"]
post_cnt += 1
# copy new data to our main dict
hn_metrics[curr_date] = {}
# company_names
hn_metrics[curr_date]["company_names"] = {
i: company_names.count(i) for i in set(company_names)
}
# locations
hn_metrics[curr_date]["locations"] = {
i: locations.count(i) for i in set(locations)
}
# position_types
hn_metrics[curr_date]["position_types"] = {
i: position_types.count(i) for i in set(position_types)
}
# position_names
hn_metrics[curr_date]["position_names"] = {
i: position_names.count(i) for i in set(position_names)
}
# search_results
hn_metrics[curr_date]["search_results"] = saved_search_results
log.info(f"Finished date {curr_date}...")
with open("hn_metrics.json", "w") as outfile:
json.dump(hn_metrics, outfile)
else:
log.info(f"Skipping date {curr_date}...")
curr_date = (
datetime.strptime(curr_date, "%Y-%m-%d") - relativedelta(months=1)
).strftime("%Y-%m-%d")
log.info("No dates left process...")
return hn_metrics
# %%
# update hn_metrics
hn_metrics = update_hn_metrics(
start_date=start_date,
end_date=end_date,
curr_date=None,
hn_metrics=saved_hn_metrics,
)
# %%
# Save hn_metrics to disk
with open("hn_metrics.json", "w") as outfile:
json.dump(hn_metrics, outfile)
# %%
# Denormalize the data
denormalized_data = []
columns = [
"start_date",
"category",
"value",
"cnt",
"cnt_total",
"cnt_unique",
]
for start_date in sorted(hn_metrics.keys()):
# denormalize company_names
cnt_total = cnt_unique = 0
for company_name in hn_metrics[start_date]["company_names"]:
cnt = hn_metrics[start_date]["company_names"][company_name]
new_row = [
start_date,
"company_names",
company_name,
cnt,
cnt_total,
cnt_unique,
]
denormalized_data.append(new_row)
# denormalize locations
for location in hn_metrics[start_date]["locations"]:
cnt = hn_metrics[start_date]["locations"][location]
new_row = [
start_date,
"locations",
location,
cnt,
cnt_total,
cnt_unique,
]
denormalized_data.append(new_row)
# denormalize position_types
for position_type in hn_metrics[start_date]["position_types"]:
cnt = hn_metrics[start_date]["position_types"][position_type]
new_row = [
start_date,
"position_types",
position_type,
cnt,
cnt_total,
cnt_unique,
]
denormalized_data.append(new_row)
# denormalize position_names
for position_name in hn_metrics[start_date]["position_names"]:
cnt = hn_metrics[start_date]["position_names"][position_name]
new_row = [
start_date,
"position_names",
position_name,
cnt,
cnt_total,
cnt_unique,
]
denormalized_data.append(new_row)
# denormalize search_results
for search_result in hn_metrics[start_date]["search_results"].keys():
cnt = 0
cnt_total = hn_metrics[start_date]["search_results"][search_result]["cnt_total"]
cnt_unique = hn_metrics[start_date]["search_results"][search_result][
"cnt_unique"
]
new_row = [
start_date,
"search_results",
search_result,
cnt,
cnt_total,
cnt_unique,
]
denormalized_data.append(new_row)
df = pd.DataFrame(data=denormalized_data, columns=columns)
# %%
title = "HN Who's Hiring posts over time"
sql = """
SELECT
start_date AS start_date,
SUM(cnt) AS sum_cnt
FROM
df
WHERE
category = 'company_names'
GROUP BY
start_date
ORDER BY
start_date ASC
"""
sqldf(sql).plot("start_date", "sum_cnt", title=title)
# %%
title = "Remote keyword / num posts"
sql = """
WITH post_cnt AS (
SELECT
start_date AS start_date,
SUM(cnt) AS sum_cnt
FROM
df
WHERE
category = 'company_names'
GROUP BY
start_date
ORDER BY
start_date ASC
),
remote_cnt AS (
SELECT
start_date, SUM(cnt_total) AS sum_cnt_total, SUM(cnt_unique) AS sum_cnt_unique
FROM
df
WHERE
category = 'search_results'
AND value = 'remote'
AND start_date >= '2013-01-01'
GROUP BY start_date
ORDER BY start_date
)
SELECT
post_cnt.start_date,
ROUND(remote_cnt.sum_cnt_total,2) / ROUND(post_cnt.sum_cnt,2) AS remote_frequency
FROM
post_cnt
INNER JOIN
remote_cnt
ON
remote_cnt.start_date = post_cnt.start_date
ORDER BY
post_cnt.start_date
"""
sqldf(sql).plot("start_date", "remote_frequency", title=title)
# %%
title = "Frequency of Data Engineer over time"
sql = """
SELECT
start_date, SUM(cnt_total) AS sum_cnt_total, SUM(cnt_unique) AS sum_cnt_unique
FROM
df
WHERE
category = 'search_results'
AND value = 'data engineer'
AND start_date >= '2013-01-01'
GROUP BY start_date
ORDER BY start_date
"""
sqldf(sql).plot("start_date", "sum_cnt_total", title=title)
# %%
# How has "remote" factored into job descriptions since covid?
title = "Frequency of Remote over time"
sql = """
SELECT
start_date, SUM(cnt_total) AS sum_cnt_total, SUM(cnt_unique) AS sum_cnt_unique
FROM
df
WHERE
category = 'search_results'
AND value = 'remote'
AND start_date >= '2013-01-01'
GROUP BY start_date
ORDER BY start_date
"""
sqldf(sql).plot("start_date", "sum_cnt_total", title=title)
# %%
# What companies have been posting the most to HN Who's Hiring threads over time?
title = "Frequency of popular companies over time"
sql = """
SELECT
start_date AS start_date,
SUM(cnt) AS sum_cnt
FROM
df
WHERE
category = 'company_names'
GROUP BY
start_date
"""
sqldf(sql).plot("start_date", "sum_cnt_total", title=title)
# %%
# test sql
sqldf("SELECT * FROM df LIMIT 10")
# %%
# test graph w/SQL
sql = """
SELECT
value AS company_name,
SUM(cnt) AS sum_cnt,
MIN(start_date) AS min_start_date,
MAX(start_date) AS max_start_date
FROM
df
WHERE
category = 'company_names'
GROUP BY
value
ORDER BY
SUM(cnt) DESC
LIMIT 10
"""
sqldf(sql)
# %%
# test graph w/SQL
sql = """
SELECT
start_date, SUM(cnt_total) AS sum_cnt_total, SUM(cnt_unique) AS sum_cnt_unique
FROM
df
WHERE
category = 'search_results'
GROUP BY start_date
ORDER BY start_date
"""
sqldf(sql).plot("start_date", "sum_cnt_total")
# %%
| [
"belliott@onemedical.com"
] | belliott@onemedical.com |
31ad3cbce963b797afbb69ec236ca012a25b0668 | c807ab961d0dea5eb3936547e843673247639a07 | /Personel/Akshay/python/Assingment_1/2.squareroot.py | 0a55d90b9800dbd85b433f12985729bf1767b510 | [] | no_license | shankar7791/MI-11-DevOps | dbac94ca1fb4627ae44658701bcddcd22c65a3d4 | 63a0a65b05192439575ed2c47a6c3d33c5be87d2 | refs/heads/main | 2023-07-12T20:33:30.121801 | 2021-08-13T03:01:17 | 2021-08-13T03:01:17 | 355,145,424 | 0 | 4 | null | 2021-08-12T19:31:44 | 2021-04-06T10:19:10 | Python | UTF-8 | Python | false | false | 79 | py | import math
a = input("what do you wnat to square root : ")
print(math.sqrt(a)) | [
"akshay.patil5596@gmail.com"
] | akshay.patil5596@gmail.com |
06ef8f522a1f1f2eab2f5e32ce654eb89032fab2 | 486df73bb95a225ee405575aa6915a28f205dc56 | /weblogic_shutdown.py | 8cec55df686ceef6b325b55997dbab5ca904ad15 | [] | no_license | asdg-asdf/pyscripts | 8162ac690bdc472aedcbb4e33eceffe2f24caec0 | 269de7c5f7ec9d2da74790a35bc218d3f6634565 | refs/heads/master | 2021-01-24T04:20:21.898971 | 2019-03-29T08:58:08 | 2019-03-29T08:58:08 | 122,933,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | #!/usr/weblogic/bea/oracle/wlserver/common/bin/wlst.sh
import sys
print "shutdown testappsrv server....."
connect('weblogic','weblogic','t3://localhost:7001')
shutdown('testappsrv','Server','false',1000,'true', 'false')
print "shutdown testappsrv server Success........................"
| [
"noreply@github.com"
] | asdg-asdf.noreply@github.com |
1922f3173afe74ca57b506cdd6eed4c76a7f6572 | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_11/masa_project/branches/masa/hr_recruitment_budget/models/__init__.py | 8ee6f5f30b06f0e3515e4f5c44a97e3c72a8193c | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# NCTR, Nile Center for Technology Research
# Copyright (C) 2018-2019 NCTR (<http://www.nctr.sd>).
#
##############################################################################
from . import hr_recruitment_budget
| [
"bakry@exp-sa.com"
] | bakry@exp-sa.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.