blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7259ca3438f85859971f088f4b36164e0b9ba675
|
38d39a3c50b161e03599456ce8fb335bc3fb1b6e
|
/create-cpp-data.py
|
2e5f9fa2a9091c93aca63c7866e22368c1eb757d
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
rymis/tz.js
|
6e159e85c714286927eb55f52a7bb55d1014dcc4
|
a1136c58fec5fe9c389b073b62d2296fbb5fe6a6
|
refs/heads/master
| 2020-07-14T18:22:01.711073
| 2019-09-06T15:13:01
| 2019-09-06T15:13:01
| 205,372,623
| 0
| 0
|
NOASSERTION
| 2019-09-06T15:13:02
| 2019-08-30T11:53:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,612
|
py
|
#!/usr/bin/env python
import json
import os
import sys
import shutil
INPUT_DIR = os.path.abspath(os.path.dirname(sys.argv[0]))
OUTPUT_DIR = os.path.join(INPUT_DIR, "output")
DATA_JSON = os.path.join(OUTPUT_DIR, "data.json")
INPUT_CPP = os.path.join(INPUT_DIR, "tzcpp.cpp.in")
OUTPUT_CPP = os.path.join(OUTPUT_DIR, "tzcpp.cpp")
INPUT_H = os.path.join(INPUT_DIR, "tzcpp.h")
OUTPUT_H = os.path.join(OUTPUT_DIR, "tzcpp.h")
def build_cpp(version, tz_version, zones, links):
# Process zones
zones_list = []
rules_list = []
zones_last = 0
db_list = []
timezones_list = []
rules_map = { }
rule_names = { }
def rule_str(t):
res = ' { %d, "%s", %s },' % (t["o"], t["a"], str(t["d"]).lower())
if t["a"] not in rule_names:
rule_names[t["a"]] = set()
rule_names[t["a"]].add(res)
return res
def rule_idx(r):
s = rule_str(r)
if s not in rules_map:
rules_map[s] = len(rules_list)
rules_list.append(s)
return rules_map[s]
for nm, zone in zones.items():
begin = zones_last
zones_list.append("// %s" % nm)
for idx, t in zip(zone["ltidx"], zone["times"]):
zones_last += 1
zones_list.append(' { %d, %d },' % (rule_idx(zone["types"][idx]), t))
db_list.append(' { "%s", %d },' % (nm, len(timezones_list)))
timezones_list.append(' { %d, %d, "%s", "%s" },' % (begin, zones_last, zone["rule"], nm))
with open(INPUT_CPP, "rb") as cpp_in:
cpp_in_source = cpp_in.read()
cpp_in_source = cpp_in_source.replace("// @ZONE_RULES@", "\n".join(rules_list))
cpp_in_source = cpp_in_source.replace("// @ZONES@", "\n".join(zones_list))
cpp_in_source = cpp_in_source.replace("// @TIMEZONES@", "\n".join(timezones_list))
cpp_in_source = cpp_in_source.replace("// @DATABASE@", "\n".join(db_list))
cpp_in_source = cpp_in_source.replace("// @LINKS@", "\n".join([ ' { "%s", "%s" },' % (k, v) for k, v in links.items() ]))
cpp_in_source = cpp_in_source.replace("@VERSION@", version)
cpp_in_source = cpp_in_source.replace("@DATA_VERSION@", tz_version)
return cpp_in_source
if __name__ == '__main__':
data_json = json.load(sys.stdin)
links = data_json["links"]
zones = data_json["zones"]
version = data_json["version"]
tz_version = data_json["tzversion"]
cpp_source = build_cpp(version, tz_version, zones, links)
with open(OUTPUT_CPP, "wb") as cpp_out:
cpp_out.write(cpp_source)
shutil.copy(INPUT_H, OUTPUT_H)
|
[
"mikhail.ryzhov@kiwi.com"
] |
mikhail.ryzhov@kiwi.com
|
af21a46323d7415dbd6a00afd00643952cdfb613
|
e4309509368571a85439daed31a5467ab75d68a2
|
/PyTrain-master/session1/json_work.py
|
e344f885d0846a0b3dfa535ab15b1942d9e1dac6
|
[] |
no_license
|
ragavendranps/Python
|
f3e66bebdf397deee9b3dff64974df2ac0760a92
|
c67d6f571939f5f2ec81852b6741b3e07b72aa56
|
refs/heads/master
| 2022-11-05T10:52:34.008508
| 2020-06-22T18:03:10
| 2020-06-22T18:03:10
| 273,499,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
import json
# some JSON:
x = '{ "name":"John", "age":30, "city":"New York"}'
# parse x:
y = json.loads(x)
print(type(x))
print(type(y))
# the result is a Python dictionary:
print(y["age"])
|
[
"67148832+ragavendranps@users.noreply.github.com"
] |
67148832+ragavendranps@users.noreply.github.com
|
dbabe6ca1208d9ea8425eff6d26c6d13353f0b27
|
9124f0bd5e9c20f7e4bc44cf5b588b5953df195e
|
/DoutuWeb.py
|
ac1bad8e8e238dc60328fcce77e3c0be86f29b31
|
[] |
no_license
|
zhengquantao/crawl
|
d34d5704b0a1b772b97f63cc3074cdaf3bfdc1cc
|
dce7bfcf10c0080c2bc4d20465c5aba7687057ca
|
refs/heads/master
| 2020-04-10T09:26:15.600629
| 2018-12-08T12:19:39
| 2018-12-08T12:19:39
| 160,936,237
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
import requests
import re
import pymysql
db=pymysql.connect(host='127.0.0.1',port=3306,db='pysql',user='root',passwd='12345678',charset='utf8')
cursor=db.cursor()
cursor.execute("select*from images")
# print(cursor.fetchall())
def getHtml():
try:
for n in range(125, 151):
url = 'http://www.doutula.com/photo/list/?page='+str(n)
r=requests.get(url)
html=r.text
# re=r'data-original="(.*?)".*?alt="(.*?)"'
# reg=re.compile(re,re.S)
# lists=re.findall(reg,html)
geturl=re.findall(r'data-original="(.*?)"',html)
getname=re.findall(r'alt="(.*?)"',html)
# print(len(getname))
for i in range(len(geturl)):
geturls=geturl[i]
getnames=getname[i]
# print(geturls)
# cursor.execute("insert into images(~name~,~imageUrl~) values('{}','{}'".format(getnames,geturls))
cursor.execute("insert into images(name,imageUrl) values(%s,%s)",[getnames,geturls])
# print("正在保存%s"%getnames)
print("{:.2f}%".format(i/68*100))
# 提交更新
db.commit()
except:
return "a"
getHtml()
|
[
"1483906080@qq.com"
] |
1483906080@qq.com
|
ad8c5d77245df03ba8f4e8cf1b8eac1ca17bbbb9
|
871c67ed1bd1ef90d88f7c46afefeab0bb4dc965
|
/myProject/online/apps/courses/adminx.py
|
ec8d0265df9ef321db9f8ac447887c5146193838
|
[] |
no_license
|
Limenation/bishe
|
fb71f93a6c9d550db78bc6da1bf81f84bdf2169c
|
371180932e34efb96005700424ccc6905f68344a
|
refs/heads/master
| 2022-07-15T14:44:29.889204
| 2020-05-16T07:35:54
| 2020-05-16T07:35:54
| 262,919,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,204
|
py
|
import xadmin
from .models import Courseinfo, Lession, Video, CourseResource, CourseBanner
from organizations.models import Organizationinfo
class LessonInline:
"""添加课程的时候可以顺便添加章节"""
model = Lession
readonly_fields = ['add_time']
extra = 0
class CourseResourceInline:
"""添加课程的时候可以顺便添加课程资源"""
model = CourseResource
readonly_fields = ['add_time']
extra = 0
class CourseinfoAdmin:
"""课程信息管理"""
list_display = ['name', 'teacher', 'course_org', 'desc',
'category', 'degree', 'learn_time', 'students', 'fav_nums',
'click_nums', 'is_banner', 'add_time']
list_filter = ['course_org', 'teacher', 'name', 'image', 'desc',
'degree', 'students', 'learn_time', 'category', 'fav_nums',
'click_nums', 'detail', 'is_banner', 'before_know', 'teacher_tell']
search_fields = ['course_org', 'teacher', 'name', 'image', 'desc',
'degree', 'students', 'learn_time', 'category', 'fav_nums',
'click_nums', 'detail', 'is_banner', 'before_know', 'teacher_tell', 'add_time']
readonly_fields = ['fav_nums', 'click_nums', 'students', 'add_time']
# refresh_times = [3,5] # 设定页面刷新
def queryset(self):
"""筛选非轮播课程"""
qs = super(CourseinfoAdmin,self).queryset()
qs = qs.filter(is_banner=False)
return qs
def save_models(self):
"""在保存课程时,修改机构的课程总数"""
obj = self.new_obj
obj.save()
if obj.course_org is not None:
course_org = obj.course_org
course_org.course_nums = Courseinfo.objects.filter(course_org=course_org).count()
course_org.save()
class CourseBannerAdmin:
"""课程信息管理-轮播课程"""
list_display = ['name', 'teacher', 'course_org', 'desc',
'category', 'degree', 'learn_time', 'students', 'fav_nums',
'click_nums', 'is_banner', 'add_time']
list_filter = ['course_org', 'teacher', 'name', 'image', 'desc',
'degree', 'students', 'learn_time', 'category', 'fav_nums',
'click_nums', 'detail', 'is_banner', 'before_know', 'teacher_tell']
search_fields = ['course_org', 'teacher', 'name', 'image', 'desc',
'degree', 'students', 'learn_time', 'category', 'fav_nums',
'click_nums', 'detail', 'is_banner', 'before_know', 'teacher_tell', 'add_time']
readonly_fields = ['fav_nums', 'click_nums', 'students', 'add_time']
def queryset(self):
"""筛选轮播课程"""
qs = super(CourseBannerAdmin,self).queryset()
qs = qs.filter(is_banner=True)
return qs
def save_models(self):
"""在保存课程时,修改机构的课程总数"""
obj = self.new_obj
obj.save()
if obj.course_org is not None:
course_org = obj.course_org
course_org.course_nums = Courseinfo.objects.filter(course_org=course_org).count()
course_org.save()
class LessionAdmin:
"""章节管理"""
list_display = ['course', 'name', 'add_time']
list_filter = ['course', 'name']
search_fields = ['course', 'name', 'add_time']
readonly_fields = ['add_time']
class VideoAdmin:
"""课程小节/视频管理"""
list_display = ['lession', 'name', 'url', 'learn_time', 'add_time']
list_filter = ['lession', 'name', 'url', 'learn_time']
search_fields = ['lession', 'name', 'url', 'learn_time', 'add_time']
readonly_fields = ['add_time']
class CourseResourceAdmin:
"""课程资料管理"""
list_display = ['lession', 'name', 'download', 'add_time']
list_filter = ['lession', 'name', 'download']
search_fields = ['lession', 'name', 'download', 'add_time']
readonly_fields = ['add_time']
xadmin.site.register(Courseinfo, CourseinfoAdmin)
xadmin.site.register(CourseBanner, CourseBannerAdmin)
xadmin.site.register(Lession, LessionAdmin)
xadmin.site.register(Video, VideoAdmin)
xadmin.site.register(CourseResource, CourseResourceAdmin)
|
[
"1448918377@qq.com"
] |
1448918377@qq.com
|
c2d75f8bbadf428b1d890435ae40bd179a74edc5
|
1d1a21b37e1591c5b825299de338d18917715fec
|
/ML,DL, RL/Machine Learning/ml/m42_xgb_qpu.py
|
477dafeb04fe720c755d988a9fb2f21ae8325e6c
|
[] |
no_license
|
brunoleej/study_git
|
46279c3521f090ebf63ee0e1852aa0b6bed11b01
|
0c5c9e490140144caf1149e2e1d9fe5f68cf6294
|
refs/heads/main
| 2023-08-19T01:07:42.236110
| 2021-08-29T16:20:59
| 2021-08-29T16:20:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
# XGBoost
# tree_method = 'gpu_hist' : cPU 대신, 실행을 시켰을 때 전체 GPU는 활동을 안하는데 CUDA만 활동
# predictor='gpu_predictor' : GPU로 예측 수행
# predictor='cpu_predictor' : CPU로 예측 수행
# gpu_id=0 : GPU 선택하여 처리
from xgboost import XGBClassifier, XGBRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import r2_score
datasets = load_boston()
x = datasets.data
y = datasets.target
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=66)
model = XGBRegressor(n_estimators=100000, learning_rate=0.01,
tree_method = 'gpu_hist',
# predictor='gpu_predictor'
predictor='cpu_predictor',
gpu_id=0
)
model.fit(x_train, y_train, verbose=1, eval_metric=['rmse'],
eval_set =[(x_train, y_train), (x_test, y_test)],
early_stopping_rounds=10000
)
aaa = model.score(x_test, y_test)
print("model.score : ",aaa)
# model.score : 0.9254888275792001
|
[
"jk04059@naver.com"
] |
jk04059@naver.com
|
47b74b1775ebe7c948754a92b962e1cee4c592e8
|
4d327de5447519d3c00e6572f74362380783006f
|
/source/res/scripts/client/gui/Scaleform/daapi/view/lobby/rankedBattles/RankedBattlesCalendarPopover.py
|
1597d3315544c1d1c8513bc4b036cfea883af256
|
[] |
no_license
|
XFreyaX/WorldOfTanks-Decompiled
|
706ac55d919b766aa89f90c97a75672bf2142611
|
5025466edd0dd3e5e50a6c60feb02ae793f6adac
|
refs/heads/master
| 2021-09-21T15:10:32.655452
| 2018-08-28T07:34:00
| 2018-08-28T07:34:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,129
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/rankedBattles/RankedBattlesCalendarPopover.py
from datetime import datetime
import BigWorld
from gui.Scaleform.locale.COMMON import COMMON
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.Scaleform.managers.UtilsManager import UtilsManager
from gui.ranked_battles.ranked_models import CYCLE_STATUS
from helpers import i18n, dependency
from gui.Scaleform.daapi.view.meta.RankedBattlesCalendarPopoverMeta import RankedBattlesCalendarPopoverMeta
from gui.Scaleform.locale.RANKED_BATTLES import RANKED_BATTLES
from gui.shared.formatters import text_styles
from helpers import time_utils
from skeletons.gui.game_control import IRankedBattlesController
from skeletons.connection_mgr import IConnectionManager
ARROW_LEFT = 3
class RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):
rankedController = dependency.descriptor(IRankedBattlesController)
connectionMgr = dependency.descriptor(IConnectionManager)
arrowDirection = ARROW_LEFT
def __init__(self, ctx=None):
super(RankedBattlesCalendarPopover, self).__init__()
self.__seasonInfo = self.rankedController.getCurrentSeason()
self.__currentCycle = self.__seasonInfo.getNumber()
self.__selectedDate = time_utils.getCurrentLocalServerTimestamp()
self.__weekDays = self._createUtilsManager().getWeekDayNames(full=True, isLower=False, isUpper=False, useRegionSettings=False)
data = ctx.get('data', None)
if data is not None:
self.arrowDirection = data.arrowDirection
return
def _createUtilsManager(self):
return UtilsManager()
def _populate(self):
super(RankedBattlesCalendarPopover, self)._populate()
self.as_setDataS({'rawDate': self.__selectedDate,
'arrowDirection': self.arrowDirection,
'statusText': self.__getCurrnetCycleString(),
'statusTooltip': TOOLTIPS_CONSTANTS.RANKED_CALENDAR_STEPS_INFO})
self.onDaySelect(time_utils.getCurrentTimestamp())
calendar = self.__getCalendar()
if calendar is not None:
calendar.as_setMinAvailableDateS(self.__seasonInfo.getStartDate())
calendar.as_setMaxAvailableDateS(self.__seasonInfo.getEndDate())
calendar.as_openMonthS(self.__selectedDate)
calendar.as_selectDateS(self.__selectedDate)
calendar.as_setHighlightedDaysS([self.__seasonInfo.getCycleStartDate(), self.__seasonInfo.getCycleEndDate()])
calendar.as_setDayTooltipTypeS(TOOLTIPS_CONSTANTS.RANKED_CALENDAR_DAY_INFO)
return
def onDaySelect(self, date):
formattedDate = datetime.fromtimestamp(date)
selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]
self.as_setDayDataS({'primeTimeGroupData': self.__constructPrimeTimes(date),
'dayText': text_styles.superPromoTitle(formattedDate.day),
'dayNameText': text_styles.middleTitle(selectedDayOfWeek)})
def __getCycleListString(self):
key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM
cycles = self.__seasonInfo.getAllCycles()
result = []
for cycle in sorted(cycles.values()):
formatter = text_styles.main if cycle.status == CYCLE_STATUS.CURRENT else text_styles.standard
startDate = time_utils.getTimeStructInLocal(cycle.startDate)
endDate = time_utils.getTimeStructInLocal(cycle.endDate)
result.append(formatter(i18n.makeString(key, cycleNumber=self.__currentCycle, day0='{:02d}'.format(startDate.tm_mday), month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.format(endDate.tm_mday), month1='{:02d}'.format(endDate.tm_mon))))
def __constructPrimeTimes(self, selectedTime):
items = []
serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(selectedTime, groupIdentical=True)
frmt = BigWorld.wg_getShortTimeFormat
for serverName in sorted(serversPeriodsMapping.keys()):
periodsStr = []
dayPeriods = serversPeriodsMapping[serverName]
if dayPeriods:
for periodStart, periodEnd in dayPeriods:
periodsStr.append(i18n.makeString(RANKED_BATTLES.CALENDARDAY_TIME, start=frmt(periodStart), end=frmt(periodEnd)))
else:
periodsStr = i18n.makeString(COMMON.COMMON_DASH)
if dayPeriods:
items.append({'serverNameText': text_styles.highlightText(serverName),
'primeTimeText': '\n'.join(periodsStr)})
return items
def __getCurrnetCycleString(self):
key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM
cycles = self.__seasonInfo.getAllCycles()
for cycle in sorted(cycles.values()):
if cycle.status == CYCLE_STATUS.CURRENT:
formatter = text_styles.main
startDate = time_utils.getTimeStructInLocal(cycle.startDate)
endDate = time_utils.getTimeStructInLocal(cycle.endDate)
return formatter(i18n.makeString(key, cycleNumber=self.__currentCycle, day0='{:02d}'.format(startDate.tm_mday), month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.format(endDate.tm_mday), month1='{:02d}'.format(endDate.tm_mon)))
def __getAttentionText(self):
key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_ATTENTIONTEXT
cycleNumber = self.__currentCycle
timeDelta = time_utils.getTimeDeltaFromNow(self.__seasonInfo.getCycleEndDate())
endTimeStr = time_utils.getTillTimeString(timeDelta, RANKED_BATTLES.STATUS_TIMELEFT)
if timeDelta <= time_utils.ONE_HOUR:
formatter = text_styles.alert
else:
formatter = text_styles.neutral
return formatter(i18n.makeString(key, cycleNumber=cycleNumber, timeLeft=endTimeStr))
def __getCalendar(self):
return self.components.get(VIEW_ALIAS.CALENDAR)
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
56fbf2a47fa9865416f2c8ff06113e4b3ebbf002
|
934f170481a5f3807b14823f9e704fd877044d30
|
/SAGAN.py
|
5af51b32adb641eff5c37319b7332b3957d40e21
|
[] |
no_license
|
JustinLion83/Anime-GAN-tensorflow
|
c234bd28e197a801460683d07aa35d2d80cb96f9
|
e3a5fd726aeaf08d01445d8176468d84cd3295f4
|
refs/heads/master
| 2020-07-03T06:36:25.765500
| 2019-08-17T19:37:04
| 2019-08-17T19:37:04
| 201,822,987
| 0
| 0
| null | 2019-08-11T22:45:42
| 2019-08-11T22:45:42
| null |
UTF-8
|
Python
| false
| false
| 12,068
|
py
|
from layers import *
import numpy as np
import time
from utils import util
import os
class SAGAN_model(object):
def __init__(self, args):
self.args = args
self.d_loss_log = []
self.g_loss_log = []
self.layer_num = int(np.log2(self.args.img_size[0])) - 3
# inputs
self.is_training = tf.placeholder_with_default(False, (), name='is_training')
self.inputs = tf.placeholder(tf.float32,
[None, self.args.img_size[0], self.args.img_size[1], self.args.img_size[2]],
name='inputs')
self.z = tf.placeholder(tf.float32, [None, 1, 1, self.args.z_dim], name='z') # noise
# output of D for real images
real_logits = self.discriminator(self.inputs)
# output of D for fake images
self.fake_images = self.generator(self.z)
fake_logits = self.discriminator(self.fake_images, reuse=True)
# get loss for discriminator
self.d_loss = self.discriminator_loss(d_logits_real=real_logits, d_logits_fake=fake_logits)
# get loss for generator
self.g_loss = self.generator_loss(d_logits_fake=fake_logits)
# divide trainable variables into a group for D and a group for G
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'discriminator' in var.name]
g_vars = [var for var in t_vars if 'generator' in var.name]
# global step
self.global_step = tf.get_variable('global_step', initializer=tf.constant(0), trainable=False)
self.add_step = self.global_step.assign(self.global_step + 1)
# optimizers
self.d_lr = tf.train.exponential_decay(self.args.d_lr,
tf.maximum(self.global_step - self.args.decay_start_steps, 0),
self.args.decay_steps,
self.args.decay_rate)
self.g_lr = tf.train.exponential_decay(self.args.g_lr,
tf.maximum(self.global_step - self.args.decay_start_steps, 0),
self.args.decay_steps,
self.args.decay_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
d_grads = tf.gradients(self.d_loss, d_vars)
d_opt = tf.train.AdamOptimizer(self.d_lr, beta1=self.args.beta1, beta2=self.args.beta2)
self.train_d = d_opt.apply_gradients(zip(d_grads, d_vars))
g_grads = tf.gradients(self.g_loss, g_vars)
g_opt = tf.train.AdamOptimizer(self.g_lr, beta1=self.args.beta1, beta2=self.args.beta2)
self.train_g = g_opt.apply_gradients(zip(g_grads, g_vars))
# EMA for generator
with tf.variable_scope("EMA_Weights"):
if self.args.ema_decay is not None:
self.var_ema = tf.train.ExponentialMovingAverage(self.args.ema_decay, num_updates=self.global_step)
with tf.control_dependencies([self.train_g]):
self.ema_train_g = self.var_ema.apply(tf.trainable_variables(scope='generator'))
# assign ema weights
self.assign_vars = []
for var in tf.trainable_variables(scope='generator'):
v = self.var_ema.average(var)
if v is not None:
self.assign_vars.append(tf.assign(var, v))
def discriminator_loss(self, d_logits_real, d_logits_fake):
real_loss = tf.reduce_mean(tf.nn.relu(1.0 - d_logits_real))
fake_loss = tf.reduce_mean(tf.nn.relu(1.0 + d_logits_fake))
loss = real_loss + fake_loss
return loss
def generator_loss(self, d_logits_fake):
loss = -tf.reduce_mean(d_logits_fake)
return loss
def generator(self, z, reuse=False):
with tf.variable_scope("generator", reuse=reuse):
ch = self.args.g_filters
x = spectral_deconv2d(z, filters=ch, kernel_size=4, stride=1, is_training=self.is_training, padding='VALID',
use_bias=False, scope='deconv2d')
x = batch_norm(x, self.is_training, scope='batch_norm')
x = tf.nn.leaky_relu(x, alpha=0.2)
for i in range(self.layer_num // 2):
with tf.variable_scope('layer' + str(i)):
if self.args.up_sample:
x = up_sample(x, scale_factor=2)
x = spectral_conv2d(x, filters=ch // 2, kernel_size=3, stride=1, is_training=self.is_training,
padding='SAME', scope='up_conv2d_' + str(i))
else:
x = spectral_deconv2d(x, filters=ch // 2, kernel_size=4, stride=2, is_training=self.is_training,
use_bias=False, scope='deconv2d_' + str(i))
x = batch_norm(x, self.is_training, scope='batch_norm_' + str(i))
x = tf.nn.leaky_relu(x, alpha=0.2)
ch = ch // 2
# Self Attention
x = attention(x, ch, is_training=self.is_training, scope="attention", reuse=reuse)
for i in range(self.layer_num // 2, self.layer_num):
with tf.variable_scope('layer' + str(i)):
if self.args.up_sample:
x = up_sample(x, scale_factor=2)
x = spectral_conv2d(x, filters=ch // 2, kernel_size=3, stride=1, is_training=self.is_training,
padding='SAME', scope='up_conv2d_' + str(i))
else:
x = spectral_deconv2d(x, filters=ch // 2, kernel_size=4, stride=2, is_training=self.is_training,
use_bias=False, scope='deconv2d_' + str(i))
x = batch_norm(x, self.is_training, scope='batch_norm_' + str(i))
x = tf.nn.leaky_relu(x, alpha=0.2)
ch = ch // 2
if self.args.up_sample:
x = up_sample(x, scale_factor=2)
x = spectral_conv2d(x, filters=self.args.img_size[2], kernel_size=3, stride=1,
is_training=self.is_training,
padding='SAME', scope='G_conv_logit')
else:
x = spectral_deconv2d(x, filters=self.args.img_size[2], kernel_size=4, stride=2,
is_training=self.is_training,
use_bias=False, scope='G_deconv_logit')
x = tf.nn.tanh(x)
return x
def discriminator(self, x, reuse=False):
with tf.variable_scope("discriminator", reuse=reuse):
ch = self.args.d_filters
x = spectral_conv2d(x, filters=ch, kernel_size=4, stride=2, is_training=self.is_training, padding='SAME',
use_bias=False, scope='conv2d')
x = tf.nn.leaky_relu(x, alpha=0.2)
for i in range(self.layer_num // 2):
x = spectral_conv2d(x, filters=ch * 2, kernel_size=4, stride=2, is_training=self.is_training,
padding='SAME', use_bias=False,
scope='conv2d_' + str(i))
x = batch_norm(x, self.is_training, scope='batch_norm' + str(i))
x = tf.nn.leaky_relu(x, alpha=0.2)
ch = ch * 2
# Self Attention
x = attention(x, ch, is_training=self.is_training, scope="attention", reuse=reuse)
for i in range(self.layer_num // 2, self.layer_num):
x = spectral_conv2d(x, filters=ch * 2, kernel_size=4, stride=2, is_training=self.is_training,
padding='SAME', use_bias=False,
scope='conv2d_' + str(i))
x = batch_norm(x, self.is_training, scope='batch_norm' + str(i))
x = tf.nn.leaky_relu(x, alpha=0.2)
ch = ch * 2
x = spectral_conv2d(x, filters=1, kernel_size=4, padding='VALID', stride=1, is_training=self.is_training,
use_bias=False,
scope='D_logit')
x = tf.squeeze(x, axis=[1, 2])
return x
def preprocess(self, x):
x = x / 127.5 - 1
return x
def train_epoch(self, sess, saver, train_next_element, i_epoch, n_batch, truncated_norm, z_fix=None):
t_start = None
global_step = 0
for i_batch in range(n_batch):
if i_batch == 1:
t_start = time.time()
batch_imgs = sess.run(train_next_element)
batch_imgs = self.preprocess(batch_imgs)
batch_z = truncated_norm.rvs([self.args.batch_size, 1, 1, self.args.z_dim])
feed_dict_ = {self.inputs: batch_imgs,
self.z: batch_z,
self.is_training: True}
# update D network
_, d_loss, d_lr, g_lr = sess.run([self.train_d, self.d_loss, self.d_lr, self.g_lr], feed_dict=feed_dict_)
self.d_loss_log.append(d_loss)
# update G network
g_loss = None
if i_batch % self.args.n_critic == 0:
if self.args.ema_decay is not None:
_, g_loss, _, global_step = sess.run(
[self.ema_train_g, self.g_loss, self.add_step, self.global_step], feed_dict=feed_dict_)
else:
_, g_loss, _, global_step = sess.run([self.train_g, self.g_loss, self.add_step, self.global_step],
feed_dict=feed_dict_)
self.g_loss_log.append(g_loss)
last_train_str = "[epoch:%d/%d, global_step:%d] -d_loss:%.3f - g_loss:%.3f -d_lr:%.e -g_lr:%.e" % (
i_epoch + 1, int(self.args.epochs), global_step, d_loss, g_loss, d_lr, g_lr)
if i_batch > 0:
last_train_str += (' -ETA:%ds' % util.cal_ETA(t_start, i_batch, n_batch))
if (i_batch + 1) % 20 == 0 or i_batch == 0:
tf.logging.info(last_train_str)
# show fake_imgs
if global_step % self.args.show_steps == 0:
tf.logging.info('generating fake imgs in steps %d...' % global_step)
# do ema
if self.args.ema_decay is not None:
# save temp weights for generator
saver.save(sess, os.path.join(self.args.checkpoint_dir, 'temp_model.ckpt'))
sess.run(self.assign_vars, feed_dict={self.inputs: batch_imgs,
self.z:batch_z,
self.is_training: False})
tf.logging.info('After EMA...')
if z_fix is not None:
show_z = z_fix
else:
show_z = truncated_norm.rvs([self.args.batch_size, 1, 1, self.args.z_dim])
fake_imgs = sess.run(self.fake_images, feed_dict={self.z: show_z})
manifold_h = int(np.floor(np.sqrt(self.args.sample_num)))
util.save_images(fake_imgs, [manifold_h, manifold_h],
image_path=os.path.join(self.args.result_dir,
'fake_steps_' + str(global_step) + '.jpg'))
if self.args.ema_decay is not None:
# restore temp weights for generator
saver.restore(sess, os.path.join(self.args.checkpoint_dir, 'temp_model.ckpt'))
tf.logging.info('Recover weights over...')
return global_step, self.d_loss_log, self.g_loss_log
|
[
"ccjdurandal422@163.com"
] |
ccjdurandal422@163.com
|
81a07d5d37659d9d4980d83db5bc793b675b8033
|
6d0bb60824c99071dc974b04225dabb093b76e67
|
/Python Programs/pallindromic.py
|
ce3d2d5e4bc575bd8d2037aba48d7f1ab95407cc
|
[] |
no_license
|
chetan-mali/Python-Traning
|
be2274d6ed1e191194aaa12e65c35868eeaab33e
|
88028bd17b68f1ea1253477b8deda9ece921e4b0
|
refs/heads/master
| 2020-04-14T20:43:48.307381
| 2019-01-05T21:11:05
| 2019-01-05T21:11:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
def check(n):
temp=n
rev=0
while(n>0):
dig=n%10
rev=rev*10+dig
n=n//10
if temp==rev:
return True
flag=0
list1 = [12,61,12,12,14]
for i in list1:
if i<0:
print("False")
exit()
for i in list1:
if check(i)== True:
flag=1
if flag==1:
print("True")
else:
print("False")
|
[
"chetanrox520@gmail.com"
] |
chetanrox520@gmail.com
|
b9d7f5b0c705d83212e83386061033bed131b997
|
ab8c7c35f89581045000028cac4fce3a0dc6ba07
|
/chainer_maskrcnn/model/head/fpn_roi_keypoint_head.py
|
1a163421ebc7c1510d8e9182239858413ef6e6a1
|
[
"Apache-2.0"
] |
permissive
|
katotetsuro/chainer-maskrcnn
|
965b25c7b8e0411df7ab21841cfdf41c1a4c4b1b
|
f95eb067b99cdf555c5e9e90b34f1186df4ee342
|
refs/heads/master
| 2021-09-15T22:44:18.046410
| 2018-06-11T08:08:47
| 2018-06-11T08:08:47
| 115,767,724
| 20
| 1
|
Apache-2.0
| 2018-04-22T13:59:07
| 2017-12-30T02:14:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,304
|
py
|
import chainer
import chainer.links as L
import chainer.functions as F
from chainer.links.model.vision.resnet import ResNet50Layers, BuildingBlock, _global_average_pooling_2d
import numpy as np
import copy
from chainer_maskrcnn.functions.roi_align_2d_yx import _roi_align_2d_yx
class FPNRoIKeypointHead(chainer.Chain):
mask_size = 56
def __init__(self,
n_class,
n_keypoints,
roi_size_box,
roi_size_mask,
n_mask_convs=8,
loc_initialW=None,
score_initialW=None,
mask_initialW=None):
# n_class includes the background
super().__init__()
with self.init_scope():
# layers for box prediction path
self.conv1 = L.Convolution2D(
in_channels=None, out_channels=256, ksize=3, pad=1)
self.fc1 = L.Linear(None, 1024)
self.fc2 = L.Linear(None, 1024)
self.cls_loc = L.Linear(1024, 4, initialW=loc_initialW)
self.score = L.Linear(1024, n_class, initialW=score_initialW)
# mask prediction path
self.mask_convs = chainer.ChainList()
for i in range(n_mask_convs):
self.mask_convs.add_link(
L.Convolution2D(None, 256, ksize=3, pad=1))
self.deconv1 = L.Deconvolution2D(
in_channels=None,
out_channels=256,
ksize=2,
stride=2,
pad=0,
initialW=mask_initialW)
self.conv2 = L.Convolution2D(
in_channels=None,
out_channels=n_keypoints,
ksize=1,
stride=1,
pad=0,
initialW=mask_initialW)
self.n_class = n_class
self.roi_size_box = roi_size_box
self.roi_size_mask = roi_size_mask
def __call__(self, x, indices_and_rois, levels, spatial_scales):
pool_box = list()
levels = chainer.cuda.to_cpu(levels).astype(np.int32)
if len(np.unique(levels)) == 1:
pool_box = _roi_align_2d_yx(x[0], indices_and_rois, self.roi_size_box,
self.roi_size_box, spatial_scales[0])
else:
for l, i in zip(levels, indices_and_rois):
v = _roi_align_2d_yx(x[l], i[None], self.roi_size_box,
self.roi_size_box, spatial_scales[l])
pool_box.append(v)
pool_box = F.concat(pool_box, axis=0)
h = self.conv1(pool_box)
h = F.relu(h)
h = F.relu(self.fc1(h))
h = F.relu(self.fc2(h))
roi_cls_locs = self.cls_loc(h)
roi_scores = self.score(h)
# at prediction time, we use two pass method.
# at first path, we predict box location and class
# at second path, we predict mask with accurate location from first path
if chainer.config.train:
pool_mask = list()
for l, i in zip(levels, indices_and_rois):
pool_mask.append(_roi_align_2d_yx(x[l], i[None], self.roi_size_mask,
self.roi_size_mask, spatial_scales[l]))
mask = F.concat(pool_mask, axis=0)
for l in self.mask_convs.children():
mask = F.relu(l(mask))
mask = self.conv2(self.deconv1(mask))
*_, h, w = mask.shape
mask = F.resize_images(mask, output_shape=(2 * h, 2 * w))
return roi_cls_locs, roi_scores, mask
else:
# cache
self.x = x
return roi_cls_locs, roi_scores
def predict_mask(self, levels, indices_and_rois, spatial_scales):
pool_mask = list()
for l, i in zip(levels, indices_and_rois):
pool_mask.append(_roi_align_2d_yx(self.x[l], i[None], self.roi_size_mask,
self.roi_size_mask, spatial_scales[l]))
mask = F.concat(pool_mask, axis=0)
for l in self.mask_convs:
mask = F.relu(l(mask))
mask = self.conv2(self.deconv1(mask))
*_, h, w = mask.shape
mask = F.resize_images(mask, output_shape=(2 * h, 2 * w))
return mask
|
[
"nistetsurooy@gmail.com"
] |
nistetsurooy@gmail.com
|
4e49caf60862a2ba3e950e0b6ddc11b9b60b362c
|
d82c5af93dca23ee5b94458886e904c0fec6d9be
|
/appStatus/apps.py
|
77b478ffcae53c40e159584871c57699c7a060c1
|
[] |
no_license
|
saiken86807/college-application-portal
|
ec6b5280f39c2da4063a60cf09a195cc73f9edee
|
c1f88809b90f70b8862c6d1b599c75aded0de1d7
|
refs/heads/master
| 2022-12-27T09:01:53.309254
| 2020-09-21T00:04:34
| 2020-09-21T00:04:34
| 290,922,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
from django.apps import AppConfig
class AppstatusConfig(AppConfig):
name = 'appStatus'
|
[
"stefanieaiken@Stefanies-MacBook-Air.local"
] |
stefanieaiken@Stefanies-MacBook-Air.local
|
481dbc7401b824b85be94529bba4c8899243e7a3
|
9f908092d78566d1c2f0d4b9d7b575c1b0a90266
|
/01/flightscheduler/flights/urls.py
|
a848a69768d58cee1ca09090cb7ee7bf6bf59f0f
|
[] |
no_license
|
FernandoDaflon/Django_Rest_Angular_8-
|
478326d1677d53d67e090f0ebfd20efd2a3821f9
|
31be090910e80191fcde676473a15a316e1e11a2
|
refs/heads/master
| 2022-11-05T21:41:06.610890
| 2019-12-09T02:28:20
| 2019-12-09T02:28:20
| 226,766,189
| 0
| 1
| null | 2022-10-28T03:39:02
| 2019-12-09T02:18:09
|
Python
|
UTF-8
|
Python
| false
| false
| 394
|
py
|
from django.urls import path, include
from . import views
from rest_framework import routers
from django.conf.urls import url
# router = routers.DefaultRouter()
# router.register(r'users', views.UserViewSet)
urlpatterns = [
# path('', views.index, name='index'),
# path('', include(router.urls))
url(r'^$', views.flight_list),
url(r'^(?P<pk>[0-9]+)$', views.flight_detail)
]
|
[
"fernandodaflon@gmail.com"
] |
fernandodaflon@gmail.com
|
da17794fd3bc959633b932fe700adc8c9e21337f
|
c08d383814f3becb750cb91ff890302bf098b3b0
|
/lesson03/fib.py
|
5a14545aad6fa6197efae6713877d901c3c9ce1d
|
[] |
no_license
|
wangwei96/python_study
|
03e137ce8808ca39c5bd19b8b1ea2424a167aaae
|
b355c0000d6aee3d9e846118d51128bcc9a3bc61
|
refs/heads/master
| 2021-05-06T13:13:46.826038
| 2018-01-02T07:25:06
| 2018-01-02T07:25:06
| 113,256,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
#利用递归计算斐波那契数列第lim项的值
def fibnum(a=1,b=0,lim=4,lev=1):
if lev==lim:
return a
return fibnum(a+b,a,lim,lev+1)
#输入一个数字,调用求斐波那契数列该项数值的函数
def fib(i):
if i>0:
print(fibnum(lim=i))
else:
print('请输入正确的值!!!')
fib()
|
[
"965158007@qq.com"
] |
965158007@qq.com
|
944220470e54abb868ea7ab6dd1e61634a5a1f51
|
3e2bb496ee3f18dfad3506f871c69ec2a0d67fe9
|
/m2m-relations/articles/admin.py
|
bf4ea0245db8187328cd09190ea7052b0d2c877e
|
[] |
no_license
|
ns-m/netology_dj_py_Databases_2
|
0b80c8829e7272e2b65d1de0abdb16a296f29740
|
abc1aa198f30ac98b63c14cef735729130674f07
|
refs/heads/master
| 2023-05-31T11:27:00.368032
| 2020-05-24T16:26:22
| 2020-05-24T16:26:22
| 266,418,890
| 0
| 0
| null | 2021-06-10T22:57:09
| 2020-05-23T21:01:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
from django.contrib import admin
from django.core.exceptions import ValidationError
from django.forms import BaseInlineFormSet
from .models import Article, Section, ArticleSection
class ArticleSectionInlineFormset(BaseInlineFormSet):
def clean(self):
set_common_section = False
for form in self.forms:
common_section = form.cleaned_data.get('common_section')
if common_section:
if set_common_section:
raise ValidationError('Основным разделом может быть только один!')
set_common_section = True
if not set_common_section:
raise ValidationError('Укажите основной раздел!')
return super().clean()
class ArticleSectionInline(admin.TabularInline):
model = ArticleSection
formset = ArticleSectionInlineFormset
extra = 1
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
inlines = [ArticleSectionInline]
@admin.register(Section)
class SectionAdmin(admin.ModelAdmin):
pass
|
[
"9117479@gmail.com"
] |
9117479@gmail.com
|
dee31b9f45064ce4a3c41f646e56a24c989fd8b4
|
56c5f2fde5c929ac4304c0c318b13f9fd1ddacec
|
/test/unit/manager/wsgi/ports/test_routes.py
|
a7b7808d87742acdc5febe9cd99cd064babb5313
|
[] |
no_license
|
soulhez/Goperation
|
5e7b9b67910deeabe12b46a05fcfc82dc1d3d723
|
64b2651229504f24e9c854b9e30da58cc7741176
|
refs/heads/master
| 2022-03-08T06:35:19.979125
| 2019-11-11T08:49:20
| 2019-11-11T08:49:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
import routes
from simpleservice.wsgi import router
from goperation.manager.wsgi.port.routers import Routers as port_routes
mapper = routes.Mapper()
port_route = port_routes()
port_route.append_routers(mapper)
testing_route = router.ComposingRouter(mapper)
for x in mapper.matchlist:
print x.name
route_dict = mapper._routenames
for route_name in route_dict:
print route_name, route_dict[route_name].conditions.get('method'),
print route_dict[route_name].defaults.get('action'), route_dict[route_name].routepath
|
[
"lolizeppelin@gmail.com"
] |
lolizeppelin@gmail.com
|
f9a929408b32170e178231ad8907c38aa8647599
|
9cef4ef20efd0eec18846242e78be0b9be144c30
|
/teacher_cade/day19/14.greenlet.py
|
e2d537f9ff67e3f0659a5afb381d8128caa9ab71
|
[] |
no_license
|
Vaild/python-learn
|
4e6511a62a40b6104b081e0f8fe30f7d829901f5
|
5d602daf3b4b7e42349b7d9251df1f4dd62c299c
|
refs/heads/master
| 2022-11-19T00:47:48.808384
| 2020-07-20T14:27:49
| 2020-07-20T14:27:49
| 279,044,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
#!/usr/bin/python3
# coding=utf-8
from greenlet import greenlet
import time
def test1():
while True:
print("---A--")
gr2.switch()
time.sleep(0.5)
def test2():
while True:
print("---B--")
gr1.switch()
time.sleep(0.5)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
# 切换到gr1中运行
gr1.switch()
|
[
"cubersongwenbo@gmail.com"
] |
cubersongwenbo@gmail.com
|
834f0e536ae907ee6244def6d566646281d05324
|
9171126f2b4b5bfe620fe48fbf696b011881a938
|
/upload_python_scripts_and_client/BatchUploadv3.py
|
90bde36eef19bdaad1ab82e585fe52492bdbc177
|
[
"MIT"
] |
permissive
|
voicebase-support/voicebase-support.github.io
|
f63acf9f6b18193ee5135266f75be08d966ca206
|
a0d2b129b97682d2f2d54603a8fb8bbe618938ca
|
refs/heads/master
| 2023-01-27T12:27:42.369113
| 2023-01-25T17:58:31
| 2023-01-25T17:58:31
| 80,883,051
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,879
|
py
|
# list.csv is a list of file names on local machine
# any extra metadata fields under 'extended' need to be indexed on the VB platform before upload
# command line example
# python BatchUploadv3.py --list list.csv --mediadir ./media --results ./res.csv --token --priority low
import argparse
import csv
import json
import os
from VoiceBaseClient import VoiceBaseClient
# ********* def main ***********
def main():
parser = argparse.ArgumentParser(
description = "Batch uploader to VoiceBase V3"
)
parser.add_argument(
'--list',
help = "path to csv list of local files (one per line)",
required = True
)
parser.add_argument(
'--mediadir',
help = "path to local media files",
required = False,
default = './'
)
parser.add_argument(
'--results',
help = "path to output csv file of files, media ids, and status",
required = True
)
parser.add_argument(
'--token',
help = "Bearer token for V3 API authentication",
required = True
)
parser.add_argument(
'--priority',
help = "job priority of the uploads (low, normal, high), default = low",
required = False,
default = 'low',
choices = ['low', 'normal', 'high']
)
args = parser.parse_args()
upload(args.list, args.mediadir, args.results, args.token, args.priority)
# ********* def upload ***********
def upload(list_path, mdir, results_path, token, priority):
client = VoiceBaseClient(token = token)
media = client.media()
counter = 0
with open(list_path, 'r') as list_file:
with open(results_path, 'w') as results_file:
results_writer = csv.writer(
results_file, delimiter = ',', quotechar = '"'
)
results_writer.writerow([ 'file', 'mediaId', 'status' ]) # write headers
for raw_filename in list_file:
filename = raw_filename.rstrip()
counter = counter + 1
md = {
"externalId": filename,
"extended": {
"uploadversion": "1"
}
}
m_data = json.dumps(md)
pathandfile = os.path.join(mdir, filename)
response = upload_one(media, pathandfile, filename,generate_configuration(priority),m_data)
media_id = response['mediaId']
status = response['status']
results_writer.writerow([ filename, media_id, status ]);
# ********* def generate config json ***********
def generate_configuration(priority):
return json.dumps({
"transcript": {
"formatting" : {
"enableNumberFormatting" : False
}
}
})
# ********* def upload one ***********
def upload_one(media, filepath, filename, configuration, metadata):
with open(filepath, 'r') as media_file:
response = media.post(
media_file, filename, 'audio/mpeg', configuration = configuration, metadata = metadata
)
return response
if __name__ == "__main__":
main()
|
[
"lenore.alford@gmail.com"
] |
lenore.alford@gmail.com
|
3c3ea357ad2d4046b95cdcf5cca0e18967b13045
|
d3f147d0e7a3a9c10b79f8de00d884c031246013
|
/numan.py
|
9766bf3b178d29c35eff36c1a667da62788b652b
|
[] |
no_license
|
numanrayhan/numan
|
4509e8e51c373cb9a5608250e3dbab4383fea3d5
|
6a485d14c52b9753d79aab284046d84431a81274
|
refs/heads/main
| 2023-05-07T23:13:13.209815
| 2021-05-31T18:12:17
| 2021-05-31T18:12:17
| 372,589,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18
|
py
|
print("my output")
|
[
"noreply@github.com"
] |
noreply@github.com
|
e8591843cd5828be5194738ec22902074c0117ca
|
16c20abcf609629b925c9a438c3728800c0eb60d
|
/opencv/05/opencv-05_01.py
|
9b21102a8e93d409c5710a978ffcb4281f544813
|
[] |
no_license
|
AnDeoukKyi/tistory
|
66aa916daf2a2a4d0051796b9509cac3a03749ef
|
b98defeade2410723cf30353fd5190c44048c6b0
|
refs/heads/main
| 2023-08-13T12:13:18.228278
| 2021-10-19T13:37:58
| 2021-10-19T13:37:58
| 385,083,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
import cv2
img = cv2.imread("image.png")
cv2.imshow("origin", img)
#ROI1
ROI = img[100:150,50:150]
cv2.imshow("ROI1", ROI)
#ROI2
ROI = img[:150,50:]
cv2.imshow("ROI2", ROI)
cv2.waitKey(0)
|
[
"abdgf3@naver.com"
] |
abdgf3@naver.com
|
a60ad6b1c07224bf9f068c2ff88e18b2145189e0
|
fa92165be94abcf7add9e4f57d3b444587ee986d
|
/app.py
|
1e33022cce704a8a4f06e3ae4fa00755f40cb9ef
|
[] |
no_license
|
Diane10/dianefinal
|
2977ce46b136e377805276acb920ebf827e09f5f
|
1efa8483dacb4810561920fd8b8915e24a1105f6
|
refs/heads/main
| 2023-01-24T03:13:11.100006
| 2020-11-29T14:35:20
| 2020-11-29T14:35:20
| 317,114,593
| 0
| 0
| null | 2020-11-30T04:54:20
| 2020-11-30T04:54:20
| null |
UTF-8
|
Python
| false
| false
| 56,765
|
py
|
import os
import streamlit as st
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import streamlit as st
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score,plot_confusion_matrix,plot_roc_curve,precision_score,recall_score,precision_recall_curve,roc_auc_score,auc
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import pickle
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import LeaveOneOut
from sklearn.preprocessing import StandardScaler
try:
from enum import Enum
from io import BytesIO, StringIO
from typing import Union
import pandas as pd
import streamlit as st
except Exception as e:
print(e)
import streamlit.components.v1 as stc
""" Common ML Dataset Explorer """
st.title("Machine Learning Tutorial App")
st.subheader("Explorer with Streamlit")
html_temp = """
<div style="background-color:#000080;"><p style="color:white;font-size:50px;padding:10px">ML is Awesome</p></div>
"""
st.markdown(html_temp,unsafe_allow_html=True)
st.set_option('deprecation.showfileUploaderEncoding', False)
st.subheader("Dataset")
datasetchoice = st.radio("Do you what to use your own dataset?", ("Yes", "No"))
if datasetchoice=='No':
def file_selector(folder_path='./datasets'):
filenames = os.listdir(folder_path)
selected_filename = st.selectbox("Select A file",filenames)
return os.path.join(folder_path,selected_filename)
filename = file_selector()
st.info("You Selected {}".format(filename))
# Read Data
df = pd.read_csv(filename)
# Show Dataset
if st.checkbox("Show Dataset"):
st.dataframe(df)
# Show Columns
if st.button("Column Names"):
st.write(df.columns)
# Show Shape
if st.checkbox("Shape of Dataset"):
data_dim = st.radio("Show Dimensions By ",("Rows","Columns"))
if data_dim == 'Rows':
st.text("Number of Rows")
st.write(df.shape[0])
elif data_dim == 'Columns':
st.text("Number of Columns")
st.write(df.shape[1])
else:
st.write(df.shape)
# Select Columns
if st.checkbox("Select Columns To Show"):
all_columns = df.columns.tolist()
selected_columns = st.multiselect("Select",all_columns)
new_df = df[selected_columns]
st.dataframe(new_df)
# Show Values
if st.button("Value Counts"):
st.text("Value Counts By Target/Class")
st.write(df.iloc[:,-1].value_counts())
# Show Datatypes
if st.button("Data Types"):
st.write(df.dtypes)
# Show Summary
if st.checkbox("Summary"):
st.write(df.describe().T)
## Plot and Visualization
st.subheader("Data Visualization")
# Correlation
# Seaborn Plot
if st.checkbox("Correlation Plot[Seaborn]"):
st.write(sns.heatmap(df.corr(),annot=True))
st.pyplot()
# Pie C
if st.checkbox("Pie Plot"):
all_columns_names = df.columns.tolist()
if st.button("Generate Pie Plot"):
st.success("Generating A Pie Plot")
st.write(df.iloc[:,-1].value_counts().plot.pie(autopct="%1.1f%%"))
st.pyplot()
# Count Plot
if st.checkbox("Plot of Value Counts"):
st.text("Value Counts By Target")
all_columns_names = df.columns.tolist()
primary_col = st.selectbox("Primary Columm to GroupBy",all_columns_names)
selected_columns_names = st.multiselect("Select Columns",all_columns_names)
if st.button("Plot"):
st.text("Generate Plot")
if selected_columns_names:
vc_plot = df.groupby(primary_col)[selected_columns_names].count()
else:
vc_plot = df.iloc[:,-1].value_counts()
st.write(vc_plot.plot(kind="bar"))
st.pyplot()
# Customizable Plot
st.subheader("Customizable Plot")
all_columns_names = df.columns.tolist()
type_of_plot = st.selectbox("Select Type of Plot",["area","bar","line","hist","box","kde"])
selected_columns_names = st.multiselect("Select Columns To Plot",all_columns_names)
if st.button("Generate Plot"):
st.success("Generating Customizable Plot of {} for {}".format(type_of_plot,selected_columns_names))
# Plot By Streamlit
if type_of_plot == 'area':
cust_data = df[selected_columns_names]
st.area_chart(cust_data)
elif type_of_plot == 'bar':
cust_data = df[selected_columns_names]
st.bar_chart(cust_data)
elif type_of_plot == 'line':
cust_data = df[selected_columns_names]
st.line_chart(cust_data)
# Custom Plot
elif type_of_plot:
cust_plot= df[selected_columns_names].plot(kind=type_of_plot)
st.write(cust_plot)
st.pyplot()
if st.button("End of Data Exploration"):
st.balloons()
st.sidebar.subheader('Choose Classifer')
classifier_name = st.sidebar.selectbox(
'Choose classifier',
('KNN', 'SVM', 'Random Forest','Logistic Regression','gradientBoosting','Deep Learning','ADABoost','Unsupervised Learning(K-MEANS)')
)
label= LabelEncoder()
for col in df.columns:
df[col]=label.fit_transform(df[col])
if classifier_name == 'Unsupervised Learning':
st.sidebar.subheader('Model Hyperparmeter')
n_clusters= st.sidebar.number_input("number of clusters",2,10,step=1,key='clusters')
if st.sidebar.button("classify",key='classify'):
sc = StandardScaler()
X_transformed = sc.fit_transform(df)
pca = PCA(n_components=2).fit_transform(X_transformed) # calculation Cov matrix is embeded in PCA
kmeans = KMeans(n_clusters)
kmeans.fit(pca)
st.set_option('deprecation.showPyplotGlobalUse', False)
# plt.figure(figsize=(12,10))
plt.scatter(pca[:,0],pca[:,1], c=kmeans.labels_, cmap='rainbow')
plt.title('CLustering Projection');
st.pyplot()
Y = df.target
X = df.drop(columns=['target'])
X_train, X_test, y_train, y_test = train_test_split( X, Y, test_size=0.33, random_state=8)
from sklearn.preprocessing import StandardScaler
sl=StandardScaler()
X_trained= sl.fit_transform(X_train)
X_tested= sl.fit_transform(X_test)
class_name=['yes','no']
st.sidebar.subheader('Advanced Model Hyperparmeter')
model_optimizer = st.sidebar.selectbox(
'Choose Optimizer',
('Cross Validation', 'Voting'))
if model_optimizer == 'Cross Validation':
cv= st.sidebar.radio("cv",("Kfold","LeaveOneOut"),key='cv')
n_splits= st.sidebar.slider("maximum number of splits",1,30,key='n_splits')
if st.sidebar.button("optimize",key='opt'):
if cv=='Kfold':
kfold= KFold(n_splits=n_splits)
score = cross_val_score(SVC(),X,Y,cv=kfold)
st.write("Accuracy:",score.mean())
if cv=='LeaveOneOut':
loo = LeaveOneOut()
score = cross_val_score(SVC(),X,Y,cv=loo)
st.write("Accuracy:",score.mean())
if model_optimizer == 'Voting':
voting= st.sidebar.multiselect("What is the algorithms you want to use?",('LogisticRegression','DecisionTreeClassifier','SVC','KNeighborsClassifier','GaussianNB','LinearDiscriminantAnalysis','AdaBoostClassifier','GradientBoostingClassifier','ExtraTreesClassifier'))
estimator=[]
if 'LogisticRegression' in voting:
model1=LogisticRegression()
estimator.append(model1)
if 'DecisionTreeClassifier' in voting:
model2=DecisionTreeClassifier()
estimator.append(model2)
if 'SVC' in voting:
model3=SVC()
estimator.append(model3)
if 'KNeighborsClassifier' in voting:
model4=KNeighborsClassifier()
estimator.append(model4)
if st.sidebar.button("optimize",key='opt'):
ensemble = VotingClassifier(estimator)
results = cross_val_score(ensemble, X, Y)
st.write(results.mean())
if classifier_name == 'Deep Learning':
if st.sidebar.button("classify",key='classify'):
model = Sequential()
model.add(Flatten())
model.add(Dense(units=25,activation='relu'))
model.add(Dense(units=15,activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',optimizer='rmsprop', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=10)
test_loss, test_acc =model.evaluate(X_test, y_test, verbose=2)
st.write('Model accuracy: ',test_acc*100)
if classifier_name == 'SVM':
st.sidebar.subheader('Model Hyperparmeter')
c= st.sidebar.number_input("c(Reguralization)",0.01,10.0,step=0.01,key='c')
kernel= st.sidebar.radio("kernel",("linear","rbf"),key='kernel')
gamma= st.sidebar.radio("gamma(kernel coefficiency",("scale","auto"),key='gamma')
metrics= st.sidebar.multiselect("What is the metrics to plot?",('confusion matrix','roc_curve','precision_recall_curve'))
if st.sidebar.button("classify",key='classify'):
st.subheader("SVM result")
svcclassifier= SVC(C=c,kernel=kernel,gamma=gamma)
svcclassifier.fit(X_trained,y_train)
y_pred= svcclassifier.predict(X_tested)
acc= accuracy_score(y_test,y_pred)
st.write("Accuracy:",acc.round(2))
# st.write("precision_score:",precision_score(y_test,y_pred,average='micro').round(2))
st.write("recall_score:",recall_score(y_test,y_pred,average='micro').round(2))
if 'confusion matrix' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('confusion matrix')
plot_confusion_matrix(svcclassifier,X_tested,y_test)
st.pyplot()
if 'roc_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('plot_roc_curve')
plot_roc_curve(svcclassifier,X_tested,y_test,normalize=False)
st.pyplot()
if 'precision_recall_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('precision_recall_curve')
plot_roc_curve(svcclassifier,X_tested,y_test,normalize=False)
st.pyplot()
if classifier_name == 'Logistic Regression':
st.sidebar.subheader('Model Hyperparmeter')
c= st.sidebar.number_input("c(Reguralization)",0.01,10.0,step=0.01,key='Logistic')
max_iter= st.sidebar.slider("maximum number of iteration",100,500,key='max_item')
metrics= st.sidebar.multiselect("Wht is the metrics to plot?",('confusion matrix','roc_curve','precision_recall_curve'))
if st.sidebar.button("classify",key='classify'):
st.subheader("Logistic Regression result")
Regression= LogisticRegression(C=c,max_iter=max_iter)
Regression.fit(X_trained,y_train)
y_prediction= Regression.predict(X_tested)
acc= accuracy_score(y_test,y_prediction)
st.write("Accuracy:",acc.round(2))
st.write("precision_score:",precision_score(y_test,y_prediction,average='micro').round(2))
st.write("recall_score:",recall_score(y_test,y_prediction,average='micro').round(2))
if 'confusion matrix' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('confusion matrix')
plot_confusion_matrix(Regression,X_tested,y_test)
st.pyplot()
if 'roc_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('plot_roc_curve')
plot_roc_curve(Regression,X_tested,y_test)
st.pyplot()
if 'precision_recall_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('precision_recall_curve')
plot_roc_curve(Regression,X_tested,y_test)
st.pyplot()
if classifier_name == 'Random Forest':
st.sidebar.subheader('Model Hyperparmeter')
n_estimators= st.sidebar.number_input("Number of trees in the forest",100,5000,step=10,key='estimators')
max_depth= st.sidebar.number_input("maximum depth of tree",1,20,step=1,key='max_depth')
bootstrap= st.sidebar.radio("Boostrap sample when building trees",("True","False"),key='boostrap')
metrics= st.sidebar.multiselect("What is the metrics to plot?",('confusion matrix','roc_curve','precision_recall_curve'))
if st.sidebar.button("classify",key='classify'):
st.subheader("Random Forest result")
model= RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,bootstrap=bootstrap)
model.fit(X_trained,y_train)
y_prediction= model.predict(X_tested)
acc= accuracy_score(y_test,y_prediction)
st.write("Accuracy:",acc.round(2))
st.write("precision_score:",precision_score(y_test,y_prediction,average='micro').round(2))
st.write("recall_score:",recall_score(y_test,y_prediction,average='micro').round(2))
if 'confusion matrix' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('confusion matrix')
plot_confusion_matrix(model,X_tested,y_test)
st.pyplot()
if 'roc_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('plot_roc_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if 'precision_recall_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('precision_recall_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if classifier_name == 'KNN':
st.sidebar.subheader('Model Hyperparmeter')
n_neighbors= st.sidebar.number_input("Number of n_neighbors",5,30,step=1,key='neighbors')
leaf_size= st.sidebar.slider("leaf size",30,200,key='leaf')
weights= st.sidebar.radio("weight function used in prediction",("uniform","distance"),key='weight')
metrics= st.sidebar.multiselect("What is the metrics to plot?",('confusion matrix','roc_curve','precision_recall_curve'))
if st.sidebar.button("classify",key='classify'):
st.subheader("KNN result")
model= KNeighborsClassifier(n_neighbors=n_neighbors,leaf_size=leaf_size,weights=weights)
model.fit(X_trained,y_train)
y_prediction= model.predict(X_tested)
acc= accuracy_score(y_test,y_prediction)
st.write("Accuracy:",acc.round(2))
st.write("precision_score:",precision_score(y_test,y_prediction,average='micro').round(2))
st.write("recall_score:",recall_score(y_test,y_prediction,average='micro').round(2))
if 'confusion matrix' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('confusion matrix')
plot_confusion_matrix(model,X_tested,y_test)
st.pyplot()
if 'roc_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('plot_roc_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if 'precision_recall_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('precision_recall_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if classifier_name == 'ADABoost':
st.sidebar.subheader('Model Hyperparmeter')
n_estimators= st.sidebar.number_input("Number of trees in the forest",100,5000,step=10,key='XGBestimators')
seed= st.sidebar.number_input("learning rate",1,150,step=1,key='seed')
metrics= st.sidebar.multiselect("What is the metrics to plot?",('confusion matrix','roc_curve','precision_recall_curve'))
if st.sidebar.button("classify",key='classify'):
st.subheader("ADABoost result")
model=AdaBoostClassifier(n_estimators=n_estimators,learning_rate=seed)
model.fit(X_trained,y_train)
y_prediction= model.predict(X_tested)
acc= accuracy_score(y_test,y_prediction)
st.write("Accuracy:",acc.round(2))
st.write("precision_score:",precision_score(y_test,y_prediction,average='micro').round(2))
st.write("recall_score:",recall_score(y_test,y_prediction,average='micro').round(2))
if 'confusion matrix' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('confusion matrix')
plot_confusion_matrix(model,X_tested,y_test)
st.pyplot()
if 'roc_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('plot_roc_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if 'precision_recall_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('precision_recall_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if classifier_name == 'gradientBoosting':
st.sidebar.subheader('Model Hyperparmeter')
n_estimators= st.sidebar.number_input("Number of trees in the forest",100,5000,step=10,key='XGBestimators')
seed= st.sidebar.number_input("learning rate",1,150,step=1,key='seed')
metrics= st.sidebar.multiselect("What is the metrics to plot?",('confusion matrix','roc_curve','precision_recall_curve'))
if st.sidebar.button("classify",key='classify'):
st.subheader("gradientBoosting result")
model=GradientBoostingClassifier(n_estimators=n_estimators,learning_rate=seed)
model.fit(X_trained,y_train)
y_prediction= model.predict(X_tested)
acc= accuracy_score(y_test,y_prediction)
st.write("Accuracy:",acc.round(2))
st.write("precision_score:",precision_score(y_test,y_prediction,average='micro').round(2))
st.write("recall_score:",recall_score(y_test,y_prediction,average='micro').round(2))
if 'confusion matrix' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('confusion matrix')
plot_confusion_matrix(model,X_tested,y_test)
st.pyplot()
if 'roc_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('plot_roc_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if 'precision_recall_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('precision_recall_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
elif datasetchoice == 'Yes':
data_file = st.file_uploader("Upload CSV",type=['csv'])
# if st.button("Process"):
# if data_file is not None:
# file_details = {"Filename":data_file.name,"FileType":data_file.type,"FileSize":data_file.size}
# st.write(file_details)
# df = pd.read_csv(data_file)
# st.dataframe(df)
st.write("Note:if you want to do classification make sure you have target attributes")
def file_selector(dataset):
if dataset is not None:
file_details = {"Filename":dataset.name,"FileType":dataset.type,"FileSize":dataset.size}
st.write(file_details)
df = pd.read_csv(dataset)
return df
df = file_selector(data_file)
st.dataframe(df)
# def file_selector(folder_path='./datasets'):
# filenames = os.listdir(folder_path)
# selected_filename = st.selectbox("Select A file",filenames)
# return os.path.join(folder_path,selected_filename)
# filename = file_selector()
# st.info("You Selected {}".format(filename))
# # Read Data
# df = pd.read_csv(filename)
# # Show Dataset
if st.checkbox("Show Dataset"):
st.dataframe(df)
# Show Columns
if st.button("Column Names"):
st.write(df.columns)
# Show Shape
if st.checkbox("Shape of Dataset"):
data_dim = st.radio("Show Dimensions By ",("Rows","Columns"))
if data_dim == 'Rows':
st.text("Number of Rows")
st.write(df.shape[0])
elif data_dim == 'Columns':
st.text("Number of Columns")
st.write(df.shape[1])
else:
st.write(df.shape)
# Select Columns
if st.checkbox("Select Columns To Show"):
all_columns = df.columns.tolist()
selected_columns = st.multiselect("Select",all_columns)
new_df = df[selected_columns]
st.dataframe(new_df)
# Show Values
if st.button("Value Counts"):
st.text("Value Counts By Target/Class")
st.write(df.iloc[:,-1].value_counts())
# Show Datatypes
if st.button("Data Types"):
st.write(df.dtypes)
# Show Summary
if st.checkbox("Summary"):
st.write(df.describe().T)
## Plot and Visualization
st.subheader("Data Visualization")
# Correlation
# Seaborn Plot
if st.checkbox("Correlation Plot[Seaborn]"):
st.write(sns.heatmap(df.corr(),annot=True))
st.pyplot()
# Pie Chart
if st.checkbox("Pie Plot"):
all_columns_names = df.columns.tolist()
if st.button("Generate Pie Plot"):
st.success("Generating A Pie Plot")
st.write(df.iloc[:,-1].value_counts().plot.pie(autopct="%1.1f%%"))
st.pyplot()
# Count Plot
if st.checkbox("Plot of Value Counts"):
st.text("Value Counts By Target")
all_columns_names = df.columns.tolist()
primary_col = st.selectbox("Primary Columm to GroupBy",all_columns_names)
selected_columns_names = st.multiselect("Select Columns",all_columns_names)
if st.button("Plot"):
st.text("Generate Plot")
if selected_columns_names:
vc_plot = df.groupby(primary_col)[selected_columns_names].count()
else:
vc_plot = df.iloc[:,-1].value_counts()
st.write(vc_plot.plot(kind="bar"))
st.pyplot()
# Customizable Plot
st.subheader("Customizable Plot")
all_columns_names = df.columns.tolist()
type_of_plot = st.selectbox("Select Type of Plot",["area","bar","line","hist","box","kde"])
selected_columns_names = st.multiselect("Select Columns To Plot",all_columns_names)
if st.button("Generate Plot"):
st.success("Generating Customizable Plot of {} for {}".format(type_of_plot,selected_columns_names))
# Plot By Streamlit
if type_of_plot == 'area':
cust_data = df[selected_columns_names]
st.area_chart(cust_data)
elif type_of_plot == 'bar':
cust_data = df[selected_columns_names]
st.bar_chart(cust_data)
elif type_of_plot == 'line':
cust_data = df[selected_columns_names]
st.line_chart(cust_data)
# Custom Plot
elif type_of_plot:
cust_plot= df[selected_columns_names].plot(kind=type_of_plot)
st.write(cust_plot)
st.pyplot()
if st.button("End of Data Exploration"):
st.balloons()
st.sidebar.subheader('Choose Classifer')
classifier_name = st.sidebar.selectbox(
'Choose classifier',
('KNN', 'SVM', 'Random Forest','Logistic Regression','XGBOOST','Unsupervised Learning')
)
label= LabelEncoder()
for col in df.columns:
df[col]=label.fit_transform(df[col])
if classifier_name == 'Unsupervised Learning':
st.sidebar.subheader('Model Hyperparmeter')
n_clusters= st.sidebar.number_input("number of clusters",2,10,step=1,key='clusters')
if st.sidebar.button("classify",key='classify'):
sc = StandardScaler()
X_transformed = sc.fit_transform(df)
pca = PCA(n_components=2).fit_transform(X_transformed) # calculation Cov matrix is embeded in PCA
kmeans = KMeans(n_clusters)
kmeans.fit(pca)
st.set_option('deprecation.showPyplotGlobalUse', False)
# plt.figure(figsize=(12,10))
plt.scatter(pca[:,0],pca[:,1], c=kmeans.labels_, cmap='rainbow')
plt.title('CLustering Projection');
st.pyplot()
Y = df.target
X = df.drop(columns=['target'])
X_train, X_test, y_train, y_test = train_test_split( X, Y, test_size=0.33, random_state=8)
from sklearn.preprocessing import StandardScaler
sl=StandardScaler()
X_trained= sl.fit_transform(X_train)
X_tested= sl.fit_transform(X_test)
class_name=['yes','no']
if classifier_name == 'SVM':
st.sidebar.subheader('Model Hyperparmeter')
c= st.sidebar.number_input("c(Reguralization)",0.01,10.0,step=0.01,key='c')
kernel= st.sidebar.radio("kernel",("linear","rbf"),key='kernel')
gamma= st.sidebar.radio("gamma(kernel coefficiency",("scale","auto"),key='gamma')
metrics= st.sidebar.multiselect("What is the metrics to plot?",('confusion matrix','roc_curve','precision_recall_curve'))
if st.sidebar.button("classify",key='classify'):
st.subheader("SVM result")
svcclassifier= SVC(C=c,kernel=kernel,gamma=gamma)
svcclassifier.fit(X_trained,y_train)
y_pred= svcclassifier.predict(X_tested)
acc= accuracy_score(y_test,y_pred)
st.write("Accuracy:",acc.round(2))
# st.write("precision_score:",precision_score(y_test,y_pred,average='micro').round(2))
st.write("recall_score:",recall_score(y_test,y_pred,average='micro').round(2))
if 'confusion matrix' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('confusion matrix')
plot_confusion_matrix(svcclassifier,X_tested,y_test)
st.pyplot()
if 'roc_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('plot_roc_curve')
plot_roc_curve(svcclassifier,X_tested,y_test,normalize=False)
st.pyplot()
if 'precision_recall_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('precision_recall_curve')
plot_roc_curve(svcclassifier,X_tested,y_test,normalize=False)
st.pyplot()
if classifier_name == 'Logistic Regression':
st.sidebar.subheader('Model Hyperparmeter')
c= st.sidebar.number_input("c(Reguralization)",0.01,10.0,step=0.01,key='Logistic')
max_iter= st.sidebar.slider("maximum number of iteration",100,500,key='max_item')
metrics= st.sidebar.multiselect("Wht is the metrics to plot?",('confusion matrix','roc_curve','precision_recall_curve'))
if st.sidebar.button("classify",key='classify'):
st.subheader("Logistic Regression result")
Regression= LogisticRegression(C=c,max_iter=max_iter)
Regression.fit(X_trained,y_train)
y_prediction= Regression.predict(X_tested)
acc= accuracy_score(y_test,y_prediction)
st.write("Accuracy:",acc.round(2))
st.write("precision_score:",precision_score(y_test,y_prediction,average='micro').round(2))
st.write("recall_score:",recall_score(y_test,y_prediction,average='micro').round(2))
if 'confusion matrix' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('confusion matrix')
plot_confusion_matrix(Regression,X_tested,y_test)
st.pyplot()
if 'roc_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('plot_roc_curve')
plot_roc_curve(Regression,X_tested,y_test)
st.pyplot()
if 'precision_recall_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('precision_recall_curve')
plot_roc_curve(Regression,X_tested,y_test)
st.pyplot()
if classifier_name == 'Random Forest':
st.sidebar.subheader('Model Hyperparmeter')
n_estimators= st.sidebar.number_input("Number of trees in the forest",100,5000,step=10,key='estimators')
max_depth= st.sidebar.number_input("maximum depth of tree",1,20,step=1,key='max_depth')
bootstrap= st.sidebar.radio("Boostrap sample when building trees",("True","False"),key='boostrap')
metrics= st.sidebar.multiselect("What is the metrics to plot?",('confusion matrix','roc_curve','precision_recall_curve'))
if st.sidebar.button("classify",key='classify'):
st.subheader("Random Forest result")
model= RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,bootstrap=bootstrap)
model.fit(X_trained,y_train)
y_prediction= model.predict(X_tested)
acc= accuracy_score(y_test,y_prediction)
st.write("Accuracy:",acc.round(2))
st.write("precision_score:",precision_score(y_test,y_prediction,average='micro').round(2))
st.write("recall_score:",recall_score(y_test,y_prediction,average='micro').round(2))
if 'confusion matrix' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('confusion matrix')
plot_confusion_matrix(model,X_tested,y_test)
st.pyplot()
if 'roc_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('plot_roc_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if 'precision_recall_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('precision_recall_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if classifier_name == 'KNN':
st.sidebar.subheader('Model Hyperparmeter')
n_neighbors= st.sidebar.number_input("Number of n_neighbors",5,30,step=1,key='neighbors')
leaf_size= st.sidebar.slider("leaf size",30,200,key='leaf')
weights= st.sidebar.radio("weight function used in prediction",("uniform","distance"),key='weight')
metrics= st.sidebar.multiselect("What is the metrics to plot?",('confusion matrix','roc_curve','precision_recall_curve'))
if st.sidebar.button("classify",key='classify'):
st.subheader("KNN result")
model= KNeighborsClassifier(n_neighbors=n_neighbors,leaf_size=leaf_size,weights=weights)
model.fit(X_trained,y_train)
y_prediction= model.predict(X_tested)
acc= accuracy_score(y_test,y_prediction)
st.write("Accuracy:",acc.round(2))
st.write("precision_score:",precision_score(y_test,y_prediction,average='micro').round(2))
st.write("recall_score:",recall_score(y_test,y_prediction,average='micro').round(2))
if 'confusion matrix' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('confusion matrix')
plot_confusion_matrix(model,X_tested,y_test)
st.pyplot()
if 'roc_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('plot_roc_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if 'precision_recall_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('precision_recall_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if classifier_name == 'XGBOOST':
st.sidebar.subheader('Model Hyperparmeter')
n_estimators= st.sidebar.number_input("Number of trees in the forest",100,5000,step=10,key='XGBestimators')
seed= st.sidebar.number_input("number of the seed",1,150,step=1,key='seed')
metrics= st.sidebar.multiselect("What is the metrics to plot?",('confusion matrix','roc_curve','precision_recall_curve'))
if st.sidebar.button("classify",key='classify'):
st.subheader("XGBOOST result")
model= xgb.XGBClassifier(n_estimators=n_estimators,seed=seed)
model.fit(X_trained,y_train)
y_prediction= model.predict(X_tested)
acc= accuracy_score(y_test,y_prediction)
st.write("Accuracy:",acc.round(2))
st.write("precision_score:",precision_score(y_test,y_prediction,average='micro').round(2))
st.write("recall_score:",recall_score(y_test,y_prediction,average='micro').round(2))
st.write("ROC_AUC_score:",roc_auc_score(y_test,y_prediction,average='micro').round(2))
if 'confusion matrix' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('confusion matrix')
plot_confusion_matrix(model,X_tested,y_test)
st.pyplot()
if 'roc_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('plot_roc_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
if 'precision_recall_curve' in metrics:
st.set_option('deprecation.showPyplotGlobalUse', False)
st.subheader('precision_recall_curve')
plot_roc_curve(model,X_tested,y_test)
st.pyplot()
# import os
# import streamlit as st
# # EDA Pkgs
# import pandas as pd
# # Viz Pkgs
# import matplotlib.pyplot as plt
# import matplotlib
# import io
# matplotlib.use('Agg')
# import seaborn as sns
# def main():
# """ Common ML Dataset Explorer """
# st.title("Machine Learning Tutorial")
# st.subheader("Datasets For ML Explorer with Streamlit")
# # file = st.file_uploader("please Upload your dataset",type=['.csv'])
# st.set_option('deprecation.showfileUploaderEncoding', False)
# html_temp = """
# <div style="background-color:tomato;"><p style="color:white;font-size:50px;padding:10px">Streamlit is Awesome</p></div>
# """
# import csv
# st.markdown(html_temp,unsafe_allow_html=True)
# file_buffer = st.file_uploader("Choose a CSV Log File...", type="csv", encoding = None)
# dataset = pd.read_csv(file_buffer)
# with open(file_buffer,'r') as csv_file: #Opens the file in read mode
# csv_reader = csv.reader(csv_file)
# if dataset is not None:
# df = open(dataset)
# st.write(df)
# # Show Columns
# if st.checkbox("Show Dataset"):
# number = st.number_input("Number of Rows to View")
# st.dataframe(df.head(number))
# if st.button("Column Names"):
# st.write(df.columns)
# # Show Shape
# if st.checkbox("Shape of Dataset"):
# data_dim = st.radio("Show Dimension By ",("Rows","Columns"))
# if data_dim == 'Rows':
# st.text("Number of Rows")
# st.write(df.shape[0])
# elif data_dim == 'Columns':
# st.text("Number of Columns")
# st.write(df.shape[1])
# else:
# st.write(df.shape)
# # Select Columns
# if st.checkbox("Select Columns To Show"):
# all_columns = df.columns.tolist()
# selected_columns = st.multiselect("Select",all_columns)
# new_df = df[selected_columns]
# st.dataframe(new_df)
# # Show Values
# if st.button("Value Counts"):
# st.text("Value Counts By Target/Class")
# st.write(df.iloc[:,-1].value_counts())
# # Show Datatypes
# if st.button("Data Types"):
# st.write(df.dtypes)
# # Show Summary
# if st.checkbox("Summary"):
# st.write(df.describe().T)
# ## Plot and Visualization
# st.subheader("Data Visualization")
# # Correlation
# # Seaborn Plot
# if st.checkbox("Correlation Plot[Seaborn]"):
# st.write(sns.heatmap(df.corr(),annot=True))
# st.pyplot()
# # Pie Chart
# if st.checkbox("Pie Plot"):
# all_columns_names = df.columns.tolist()
# if st.button("Generate Pie Plot"):
# st.success("Generating A Pie Plot")
# st.write(df.iloc[:,-1].value_counts().plot.pie(autopct="%1.1f%%"))
# st.pyplot()
# # Count Plot
# if st.checkbox("Plot of Value Counts"):
# st.text("Value Counts By Target")
# all_columns_names = df.columns.tolist()
# primary_col = st.selectbox("Primary Columm to GroupBy",all_columns_names)
# selected_columns_names = st.multiselect("Select Columns",all_columns_names)
# if st.button("Plot"):
# st.text("Generate Plot")
# if selected_columns_names:
# vc_plot = df.groupby(primary_col)[selected_columns_names].count()
# else:
# vc_plot = df.iloc[:,-1].value_counts()
# st.write(vc_plot.plot(kind="bar"))
# st.pyplot()
# # Customizable Plot
# st.subheader("Customizable Plot")
# all_columns_names = df.columns.tolist()
# type_of_plot = st.selectbox("Select Type of Plot",["area","bar","line","hist","box","kde"])
# selected_columns_names = st.multiselect("Select Columns To Plot",all_columns_names)
# if st.button("Generate Plot"):
# st.success("Generating Customizable Plot of {} for {}".format(type_of_plot,selected_columns_names))
# # Plot By Streamlit
# if type_of_plot == 'area':
# cust_data = df[selected_columns_names]
# st.area_chart(cust_data)
# elif type_of_plot == 'bar':
# cust_data = df[selected_columns_names]
# st.bar_chart(cust_data)
# elif type_of_plot == 'line':
# cust_data = df[selected_columns_names]
# st.line_chart(cust_data)
# # Custom Plot
# elif type_of_plot:
# cust_plot= df[selected_columns_names].plot(kind=type_of_plot)
# st.write(cust_plot)
# st.pyplot()
# if st.button("Thanks"):
# st.balloons()
# st.sidebar.header("About App")
# st.sidebar.info("A Simple EDA App for Exploring Common ML Dataset")
# st.sidebar.header("Get Datasets")
# st.sidebar.markdown("[Common ML Dataset Repo]("")")
# #
# # st.sidebar.header("About")
# # st.sidebar.info("Jesus Saves@JCharisTech")
# # st.sidebar.text("Built with Streamlit")
# # st.sidebar.text("Maintained by Jesse JCharis")
# if __name__ == '__main__':
# main()
# import pickle
# #pickle.dump(kmeans,open('unsupervisedmodels.pkl','wb'))
# import streamlit as st
# import pickle
# # import numpy as np
# # from sklearn.cluster import KMeans
# # #kmeans=pickle.load(open('unsupervisedmodels.pkl','rb'))
# # from sklearn import datasets
# # from sklearn.manifold import TSNE
# # import matplotlib.pyplot as plt
# # from sklearn.preprocessing import MinMaxScaler
# # scaler = MinMaxScaler()
# # transformed = scaler.fit_transform(x)
# # # Plotting 2d t-Sne
# # x_axis = transformed[:,0]
# # y_axis = transformed[:,1]
# # kmeans = KMeans(n_clusters=4, random_state=42,n_jobs=-1)
# # #y_pred =kmeans.fit_predict(transformed)
# # # def predict_kmeans(CountryName,StringencyLegacyIndexForDisplay,StringencyIndexForDisplay, StringencyIndex,StringencyLegacyIndex,ContainmentHealthIndexForDisplay,ContainmentHealthIndex,GovernmentResponseIndexForDisplay,ConfirmedCases,ConfirmedDeaths,EconomicSupportIndexForDisplay,E2_Debtcontractrelief,EconomicSupportIndex,C3_Cancelpublicevents,C1_Schoolclosing):
# # # input=np.array([[CountryName,StringencyLegacyIndexForDisplay,StringencyIndexForDisplay, StringencyIndex,StringencyLegacyIndex,ContainmentHealthIndexForDisplay,ContainmentHealthIndex,GovernmentResponseIndexForDisplay,ConfirmedCases,ConfirmedDeaths,EconomicSupportIndexForDisplay,E2_Debtcontractrelief,EconomicSupportIndex,C3_Cancelpublicevents,C1_Schoolclosing]]).astype(np.float64)
# # # prediction=kmeans.predict(input)
# # # return prediction
# # st.title("Records of countries classified in the clusters")
# # html_temp = """
# # <div style="background-color:#025246 ;padding:12px">
# # <h2 style="color:white;text-align:center;">Unsupervised App </h2>
# # </div>
# # """
# # st.markdown(html_temp, unsafe_allow_html=True)
# # CountryName = st.text_input("CountryName","Type Here",key='0')
# # StringencyLegacyIndexForDisplay = st.text_input("StringencyLegacyIndexForDisplay","Type Here",key='1')
# # StringencyIndexForDisplay = st.text_input("StringencyIndexForDisplay","Type Here",key='2')
# # StringencyIndex = st.text_input("StringencyIndex","Type Here",key='3')
# # StringencyLegacyIndex = st.text_input("StringencyLegacyIndex","Type Here",key='4')
# # ContainmentHealthIndexForDisplay = st.text_input("ContainmentHealthIndexForDisplay","Type Here",key='5')
# # GovernmentResponseIndexForDisplay = st.text_input("GovernmentResponseIndexForDisplay","Type Here",key='6')
# # ContainmentHealthIndex = st.text_input("ContainmentHealthIndex","Type Here",key='7')
# # ConfirmedCases = st.text_input("ConfirmedCases","Type Here",key='8')
# # ConfirmedDeaths = st.text_input("ConfirmedDeaths","Type Here",key='9')
# # EconomicSupportIndexForDisplay = st.text_input("EconomicSupportIndexForDisplay","Type Here",key='9')
# # E2_Debtcontractrelief = st.text_input("E2_Debtcontractrelief","Type Here",key='10')
# # EconomicSupportIndex = st.text_input("EconomicSupportIndex","Type Here",key='11')
# # C3_Cancelpublicevents = st.text_input("C3_Cancelpublicevents","Type Here",key='12')
# # C1_Schoolclosing = st.text_input("C1_Schoolclosing","Type Here",key='13')
# # if st.button("Predict"):
# # output=predict_kmeans(CountryName,StringencyLegacyIndexForDisplay,StringencyIndexForDisplay, StringencyIndex,StringencyLegacyIndex,ContainmentHealthIndexForDisplay,ContainmentHealthIndex,GovernmentResponseIndexForDisplay,ConfirmedCases,ConfirmedDeaths,EconomicSupportIndexForDisplay,E2_Debtcontractrelief,EconomicSupportIndex,C3_Cancelpublicevents,C1_Schoolclosing)
# # st.success('This country located in this cluster {}'.format(output))
# # -*- coding: utf-8 -*-
# """Assignment3.ipynb
# """
# import pandas as pd
# data= pd.read_csv('https://raw.githubusercontent.com/Diane10/ML/master/assignment3.csv')
# # data.info()
# # data.isnull().sum()
# null_counts = data.isnull().sum().sort_values()
# selected = null_counts[null_counts < 8000 ]
# percentage = 100 * data.isnull().sum() / len(data)
# data_types = data.dtypes
# # data_types
# missing_values_table = pd.concat([null_counts, percentage, data_types], axis=1)
# # missing_values_table
# col=['CountryName','Date','StringencyLegacyIndexForDisplay','StringencyIndexForDisplay','ContainmentHealthIndexForDisplay','GovernmentResponseIndexForDisplay',
# 'EconomicSupportIndexForDisplay','C8_International travel controls','C1_School closing','C3_Cancel public events','C2_Workplace closing','C4_Restrictions on gatherings',
# 'C6_Stay at home requirements','C7_Restrictions on internal movement','H1_Public information campaigns','E1_Income support','C5_Close public transport','E2_Debt/contract relief','StringencyLegacyIndex','H3_Contact tracing','StringencyIndex','ContainmentHealthIndex','E4_International support','EconomicSupportIndex','E3_Fiscal measures','H5_Investment in vaccines','ConfirmedCases','ConfirmedDeaths']
# newdataset=data[col]
# newdataset= newdataset.dropna()
# from sklearn.preprocessing import LabelEncoder
# newdataset['CountryName']=LabelEncoder().fit_transform(newdataset['CountryName'])
# # # map features to their absolute correlation values
# # corr = newdataset.corr().abs()
# # # set equality (self correlation) as zero
# # corr[corr == 1] = 0
# # # of each feature, find the max correlation
# # # and sort the resulting array in ascending order
# # corr_cols = corr.max().sort_values(ascending=False)
# # # display the highly correlated features
# # display(corr_cols[corr_cols > 0.9])
# # len(newdataset)
# X=newdataset[['CountryName','StringencyLegacyIndexForDisplay','StringencyIndexForDisplay', 'StringencyIndex','StringencyLegacyIndex','ContainmentHealthIndexForDisplay','ContainmentHealthIndex','GovernmentResponseIndexForDisplay','ConfirmedCases','ConfirmedDeaths','EconomicSupportIndexForDisplay','E2_Debt/contract relief','EconomicSupportIndex','C3_Cancel public events','C1_School closing']]
# # X=newdataset[['CountryName','StringencyLegacyIndexForDisplay','StringencyIndexForDisplay', 'StringencyIndex','StringencyLegacyIndex','ContainmentHealthIndexForDisplay','ContainmentHealthIndex','GovernmentResponseIndexForDisplay','ConfirmedCases','ConfirmedDeaths']]
# # df_first_half = X[:1000]
# # df_second_half = X[1000:]
# # """Feature selector that removes all low-variance features."""
# from sklearn.feature_selection import VarianceThreshold
# selector = VarianceThreshold()
# x= selector.fit_transform(X)
# df_first_half = x[:5000]
# df_second_half = x[5000:]
# # """Create clusters/classes of similar records using features selected in (1), use an unsupervised learning algorithm of your choice."""
# # Commented out IPython magic to ensure Python compatibility.
# from sklearn.cluster import KMeans
# from sklearn.decomposition import PCA
# import pandas as pd
# from sklearn.preprocessing import MinMaxScaler
# from matplotlib import pyplot as plt
# import streamlit as st
# # wcss=[]
# # for i in range(1,11):
# # kmeans=KMeans(n_clusters=i, init='k-means++',random_state=0)
# # kmeans.fit(x)
# # wcss.append(kmeans.inertia_)
# # st.set_option('deprecation.showPyplotGlobalUse', False)
# # plt.plot(range(1,11),wcss)
# # plt.title('The Elbow Method')
# # plt.xlabel('Number of Clusters')
# # plt.ylabel('WCSS')
# # plt.show()
# # st.pyplot()
# model = KMeans(n_clusters = 6)
# pca = PCA(n_components=2).fit(x)
# pca_2d = pca.transform(x)
# model.fit(pca_2d)
# labels = model.predict(pca_2d)
# # labels
# # predicted_label = model.predict([[7.2, 3.5, 0.8, 1.6]])
# # pca = PCA(n_components=2).fit(df_first_half)
# # pca_2d = pca.transform(df_first_half)
# # pca_2d
# xs = pca_2d[:, 0]
# ys = pca_2d[:, 1]
# plt.scatter(xs, ys, c = labels)
# plt.scatter(model.cluster_centers_[:,0],model.cluster_centers_[:,1],color='purple',marker='*',label='centroid')
# kmeans = KMeans(n_clusters=10)
# kmeans.fit(df_first_half)
# plt.scatter(df_first_half[:,0],df_first_half[:,1], c=kmeans.labels_, cmap='rainbow')
# range_n_clusters = [2, 3, 4, 5, 6]
# # from sklearn.metrics import silhouette_samples, silhouette_score
# # import matplotlib.cm as cm
# # import numpy as np
# # for n_clusters in range_n_clusters:
# # # Create a subplot with 1 row and 2 columns
# # fig, (ax1, ax2) = plt.subplots(1, 2)
# # fig.set_size_inches(18, 7)
# # # The 1st subplot is the silhouette plot
# # # The silhouette coefficient can range from -1, 1 but in this example all
# # # lie within [-0.1, 1]
# # ax1.set_xlim([-0.1, 1])
# # # The (n_clusters+1)*10 is for inserting blank space between silhouette
# # # plots of individual clusters, to demarcate them clearly.
# # ax1.set_ylim([0, len(pca_2d) + (n_clusters + 1) * 10])
# # # Initialize the clusterer with n_clusters value and a random generator
# # # seed of 10 for reproducibility.
# # clusterer = KMeans(n_clusters=n_clusters, random_state=10)
# # cluster_labels = clusterer.fit_predict(pca_2d)
# # # The silhouette_score gives the average value for all the samples.
# # # This gives a perspective into the density and separation of the formed
# # # clusters
# # silhouette_avg = silhouette_score(pca_2d, cluster_labels)
# # print("For n_clusters =", n_clusters,
# # "The average silhouette_score is :", silhouette_avg)
# # # Compute the silhouette scores for each sample
# # sample_silhouette_values = silhouette_samples(pca_2d, cluster_labels)
# # y_lower = 10
# # for i in range(n_clusters):
# # # Aggregate the silhouette scores for samples belonging to
# # # cluster i, and sort them
# # ith_cluster_silhouette_values = \
# # sample_silhouette_values[cluster_labels == i]
# # ith_cluster_silhouette_values.sort()
# # size_cluster_i = ith_cluster_silhouette_values.shape[0]
# # y_upper = y_lower + size_cluster_i
# # color = cm.nipy_spectral(float(i) / n_clusters)
# # ax1.fill_betweenx(np.arange(y_lower, y_upper),
# # 0, ith_cluster_silhouette_values,
# # facecolor=color, edgecolor=color, alpha=0.7)
# # # Label the silhouette plots with their cluster numbers at the middle
# # ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# # # Compute the new y_lower for next plot
# # y_lower = y_upper + 10 # 10 for the 0 samples
# # ax1.set_title("The silhouette plot for the various clusters.")
# # ax1.set_xlabel("The silhouette coefficient values")
# # ax1.set_ylabel("Cluster label")
# # # The vertical line for average silhouette score of all the values
# # ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
# # ax1.set_yticks([]) # Clear the yaxis labels / ticks
# # ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# # # 2nd Plot showing the actual clusters formed
# # colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
# # ax2.scatter( pca_2d[:, 0], pca_2d[:, 1], marker='.', s=30, lw=0, alpha=0.7,
# # c=colors, edgecolor='k')
# # # Labeling the clusters
# # centers = clusterer.cluster_centers_
# # # Draw white circles at cluster centers
# # ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
# # c="white", alpha=1, s=200, edgecolor='k')
# # for i, c in enumerate(centers):
# # ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
# # s=50, edgecolor='k')
# # ax2.set_title("The visualization of the clustered data.")
# # ax2.set_xlabel("Feature space for the 1st feature")
# # ax2.set_ylabel("Feature space for the 2nd feature")
# # plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
# # "with n_clusters = %d" % n_clusters),
# # fontsize=14, fontweight='bold')
# # plt.show()
# #km.cluster_centers_
# from sklearn import datasets
# from sklearn.manifold import TSNE
# import matplotlib.pyplot as plt
# from sklearn.preprocessing import MinMaxScaler
# scaler = MinMaxScaler()
# transformed = scaler.fit_transform(x)
# # Plotting 2d t-Sne
# x_axis = transformed[:,0]
# y_axis = transformed[:,1]
# kmeans = KMeans(n_clusters=4, random_state=42,n_jobs=-1)
# y_pred =kmeans.fit_predict(transformed)
# predicted_label = kmeans.predict([[7,7.2, 3.5, 0.8, 1.6,7.2, 3.5, 0.8, 1.6,7.2, 3.5, 0.8, 1.67, 7.2, 3.5]])
# predicted_label
# # from sklearn.manifold import TSNE
# # tsne = TSNE(random_state=17)
# # X_tsne = tsne.fit_transform(transformed)
# # plt.figure(figsize=(12,10))
# # plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=y_pred,
# # edgecolor='none', alpha=0.7, s=40,
# # cmap=plt.cm.get_cmap('nipy_spectral', 10))
# # plt.colorbar()
# # plt.title('cluster. t-SNE projection');
# # pca = PCA(n_components=2)
# # X_reduced = pca.fit_transform(transformed)
# # print('Projecting %d-dimensional data to 2D' % X.shape[1])
# # plt.figure(figsize=(12,10))
# # plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y_pred,
# # edgecolor='none', alpha=0.7, s=40,
# # cmap=plt.cm.get_cmap('nipy_spectral', 10))
# # plt.colorbar()
# # plt.title('cluster. PCA projection');
# # st.pyplot()
# # """https://www.kaggle.com/kashnitsky/topic-7-unsupervised-learning-pca-and-clustering"""
# # import seaborn as sns
# # import pickle
# # pickle.dump(kmeans,open('unsupervisedmodels.pkl','wb'))
# # """Create a platform where new records of countries can be classified in the clusters"""
# # Commented out IPython magic to ensure Python compatibility.
# # %%writefile app.py
# import streamlit as st
# import pickle
# import numpy as np
# # kmeans=pickle.load(open('unsupervisedmodels.pkl','rb'))
# def predict_kmeans(CountryName,StringencyLegacyIndexForDisplay,StringencyIndexForDisplay, StringencyIndex,StringencyLegacyIndex,ContainmentHealthIndexForDisplay,ContainmentHealthIndex,GovernmentResponseIndexForDisplay,ConfirmedCases,ConfirmedDeaths,EconomicSupportIndexForDisplay,E2_Debtcontractrelief,EconomicSupportIndex,C3_Cancelpublicevents,C1_Schoolclosing):
# input=np.array([[CountryName,StringencyLegacyIndexForDisplay,StringencyIndexForDisplay, StringencyIndex,StringencyLegacyIndex,ContainmentHealthIndexForDisplay,ContainmentHealthIndex,GovernmentResponseIndexForDisplay,ConfirmedCases,ConfirmedDeaths,EconomicSupportIndexForDisplay,E2_Debtcontractrelief,EconomicSupportIndex,C3_Cancelpublicevents,C1_Schoolclosing]]).astype(np.float64)
# prediction=kmeans.predict(input)
# return prediction
# def main():
# st.title("Records of countries classified in the clusters")
# html_temp = """
# <div style="background-color:#025246 ;padding:10px">
# <h2 style="color:white;text-align:center;">Unsupervised ML App </h2>
# </div>
# """
# st.markdown(html_temp, unsafe_allow_html=True)
# CountryName = st.text_input("CountryName","Type Here",key='0')
# StringencyLegacyIndexForDisplay = st.text_input("StringencyLegacyIndexForDisplay","Type Here",key='1')
# StringencyIndexForDisplay = st.text_input("StringencyIndexForDisplay","Type Here",key='2')
# StringencyIndex = st.text_input("StringencyIndex","Type Here",key='3')
# StringencyLegacyIndex = st.text_input("StringencyLegacyIndex","Type Here",key='4')
# ContainmentHealthIndexForDisplay = st.text_input("ContainmentHealthIndexForDisplay","Type Here",key='5')
# GovernmentResponseIndexForDisplay = st.text_input("GovernmentResponseIndexForDisplay","Type Here",key='6')
# ContainmentHealthIndex = st.text_input("ContainmentHealthIndex","Type Here",key='7')
# ConfirmedCases = st.text_input("ConfirmedCases","Type Here",key='8')
# ConfirmedDeaths = st.text_input("ConfirmedDeaths","Type Here",key='9')
# EconomicSupportIndexForDisplay = st.text_input("EconomicSupportIndexForDisplay","Type Here",key='9')
# E2_Debtcontractrelief = st.text_input("E2_Debtcontractrelief","Type Here",key='10')
# EconomicSupportIndex = st.text_input("EconomicSupportIndex","Type Here",key='11')
# C3_Cancelpublicevents = st.text_input("C3_Cancelpublicevents","Type Here",key='12')
# C1_Schoolclosing = st.text_input("C1_Schoolclosing","Type Here",key='13')
# safe_html="""
# <div style="background-color:#F4D03F;padding:10px >
# <h2 style="color:white;text-align:center;"> Your forest is safe</h2>
# </div>
# """
# danger_html="""
# <div style="background-color:#F08080;padding:10px >
# <h2 style="color:black ;text-align:center;"> Your forest is in danger</h2>
# </div>
# """
# if st.button("Predict"):
# output=predict_kmeans(CountryName,StringencyLegacyIndexForDisplay,StringencyIndexForDisplay, StringencyIndex,StringencyLegacyIndex,ContainmentHealthIndexForDisplay,ContainmentHealthIndex,GovernmentResponseIndexForDisplay,ConfirmedCases,ConfirmedDeaths,EconomicSupportIndexForDisplay,E2_Debtcontractrelief,EconomicSupportIndex,C3_Cancelpublicevents,C1_Schoolclosing)
# st.success('This country located in this cluster {}'.format(output))
# if __name__=='__main__':
# main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
d12df3fe82e2b7cd6f21fa29ba094b5242d3d3bc
|
d57980cc6f4e1147f6f4fe1bc1c68f8d731bcca5
|
/train.py
|
d8d544f2bedfb30954a8a55fb7f42dfce3477a97
|
[] |
no_license
|
fuding/CAGFace
|
244813b572953dc218b05b0e38cacc395c19d619
|
1436d44a089647ee62918b496d85b37a162d8e49
|
refs/heads/master
| 2020-11-24T15:11:09.480152
| 2019-11-25T08:35:40
| 2019-11-25T08:35:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,114
|
py
|
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from model import *
from dataloader import Data_Loader
from torchvision import transforms
from optimizer import Optimizer
import matplotlib.pyplot as plt
pretrained = True
lr_start = 1e-16 #0.0000505
visualize = True
dl = Data_Loader('/home/yo0n/바탕화면/Flicker',256,1).loader()
if(pretrained):
model = torch.load('./checkpoints/cand.pth')
"""
model = nn.Sequential(
model,
conv3x3(3,3),
nn.ReLU(inplace=True)
)
model = model.cuda()
"""
else:
model = CAGFace(1).cuda()
print(" --- model loaded --- ")
print(model)
criteria = nn.SmoothL1Loss()
#criteria = nn.L1Loss()
## optimizer
#optimizer = optim.SGD(model.parameters(), lr=lr_start)
optimizer = optim.Adam(model.parameters(), lr=lr_start)
for epoch in range(10):
iter = 0
loss_lowest = 9999
loss_avg = []
for im, lb_512, lb_1024 in tqdm(dl):
im = im.cuda()
out = model(im)
loss = criteria(lb_512,out.cpu())
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_avg.append(loss.item())
iter += 1
if(iter%10 == 0):
print("iter : ",iter)
l = sum(loss_avg) / len(loss_avg)
print("loss : ",l)
outImage = out.data.cpu()
if visualize:
plt.figure(1)
plt.subplot(211)
plt.imshow(transforms.ToPILImage()(outImage.squeeze()))
plt.subplot(212)
plt.imshow(transforms.ToPILImage()(lb_512.cpu().squeeze()))
plt.pause(1)
plt.close("all")
if(l < loss_lowest):
loss_lowest = l
torch.save(model, "./checkpoints/"+str(epoch)+".pth")
print("improved!")
else:
torch.save(model, "./checkpoints/"+str(epoch)+"_update"+".pth")
print("epoch : ",epoch," \nloss : ",sum(loss_avg) / len(loss_avg))
torch.save(model, "./checkpoints/"+str(epoch)+"_final"+".pth")
|
[
"noreply@github.com"
] |
noreply@github.com
|
b94655700bbb6e94cf77d31130c311a69755a8b1
|
982b49c38e9e4184ef4d7e4fbc45d97ac433738c
|
/FeatureExtraction/extractExpressionFeatures.py
|
94cf92f8946934e0d4ed4f2b5a9618342e21ebe9
|
[] |
no_license
|
biswajitsc/LaSer
|
35fc236857615059678275a264954b61f0d3c1de
|
7484bc2a35dc07cd12eae5c1f1cbdb2088c582c7
|
refs/heads/master
| 2021-01-10T14:13:53.762777
| 2015-11-15T09:51:22
| 2015-11-15T09:51:22
| 40,284,558
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,640
|
py
|
# coding: utf-8
import re
import sys
import math
from convertMathMLExpression import *
def extract_MathMLUnigrams(mathML) :
unigrams = set()
for line in mathML :
words = line.split(' ')
for word in words :
if (len(word) > 0) :
unigrams.add(word)
print "Unigrams of MathML equations Extracted"
numDocs = len(mathML)
idf_scores = {}
unigrams_postinglist = {}
for unigram in unigrams :
unigrams_postinglist[unigram] = []
idf_scores[unigram] = 0
i = 0
for line in mathML :
i += 1
for unigram in unigrams :
string = str(unigram)
if string in line :
unigrams_postinglist[unigram].append((i, line.count(string)))
idf_scores[unigram] += 1
print "Unigram Features Postings List of MathML equations created"
return (unigrams, unigrams_postinglist, idf_scores)
def main() :
input_file = open(sys.argv[1],"r")
output_file_unigrams = open("../../Data/UnigramFeatures","w")
output_file_bigrams = open("../../Data/BigramFeatures","w")
output_file_trigrams = open("../../Data/TrigramFeatures","w")
output_file_expressions = open("../../Data/ExtractedExpressions","w")
output_file_idfs = open("../../Data/IDF-Scores","w")
data = input_file.read()
# data = data.replace("\n"," ")
# lines = data.split('<m:math')
lines = data.split('\n')
mathML = []
for line in lines :
temp_line = line
line = line.replace("<m:","<")
line = line.replace("</m:","</")
line = line.replace('\n', ' ')
symbol = unicode(line, "utf-8")
line = symbol.encode('ascii', 'backslashreplace')
# if len(line) == 0 :
# continue
# line = '<math' + line
line = line.replace('<?xml version="1.0"?>', "")
mathML.append(line)
# xmls = line.split('<?xml version="1.0"?>')
# for xml in xmls :
# xml = re.sub(' +',' ',xml)
# xml = xml.replace('\t', ' ')
# mathML.append(xml)
(unigrams_mathML, unigrams_postinglist, idf_scores) = extract_MathMLUnigrams(mathML)
expressions = convertEquation(mathML)
print "Num Expressions : ", len(expressions)
for expression in expressions :
output_file_expressions.write(expression.encode('utf-8') + '\n')
unigrams = set()
bigrams = set()
trigrams = set()
for line in expressions :
line = line.encode('utf-8')
words = line.split(' ')
for word in words :
if (len(word) > 0) :
unigrams.add(word)
print "Unigrams of expressions Extracted"
for line in expressions :
line = line.encode('utf-8')
words = line.split(' ')
if len(words) >= 2 :
i = 0
while (i < (len(words) - 1)) :
if (len(words[i]) > 0 and len(words[i + 1]) > 0) :
bigrams.add((words[i],words[i + 1]))
i += 1
print "Bigrams of expressions Extracted"
for line in expressions :
line = line.encode('utf-8')
words = line.split(' ')
if len(words) > 2 :
i = 0
while (i < (len(words) - 2)) :
if (len(words[i]) > 0 and len(words[i + 1]) > 0 and len(words[i + 2]) > 0) :
trigrams.add((words[i],words[i + 1],words[i + 2]))
i += 1
print "Trigrams of expressions Extracted"
print "Unigrams in MathML : ", len(unigrams_mathML), ", Unigrams in Expression : ", len(unigrams), ", Bigrams in Expression : ", len(bigrams), ", Trigrams in Expression : ", len(trigrams)
numDocs = len(mathML)
for unigram in unigrams :
unigrams_postinglist[unigram] = []
idf_scores[unigram] = 0
bigrams_postinglist = {}
for bigram in bigrams :
bigrams_postinglist[bigram] = []
idf_scores[bigram] = 0
trigrams_postinglist = {}
for trigram in trigrams :
trigrams_postinglist[trigram] = []
idf_scores[trigram] = 0
i = 0
for line in expressions :
line = line.encode('utf-8')
i += 1
for unigram in unigrams :
string = str(unigram)
if string in line :
unigrams_postinglist[unigram].append((i, line.count(string)))
idf_scores[unigram] += 1
print "Unigram Features Postings List created"
i = 0
for line in expressions :
line = line.encode('utf-8')
i += 1
if (i % 100 == 0) :
print str(i) + "th xml checked for bigrams"
for bigram in bigrams :
string = (str(bigram[0]) + ' ' + str(bigram[1]))
if string in line :
bigrams_postinglist[bigram].append((i, line.count(string)))
idf_scores[bigram] += 1
print "Bigram Features Postings List created"
i = 0
for line in expressions :
line = line.encode('utf-8')
i += 1
if (i % 100 == 0) :
print str(i) + "th xml checked for trigrams"
for trigram in trigrams :
string = (str(trigram[0]) + ' ' + str(trigram[1]) + ' ' + str(trigram[2]))
if string in line :
trigrams_postinglist[trigram].append((i, line.count(string)))
idf_scores[trigram] += 1
print "Trigram Features Postings List created"
i = 0
for unigram in unigrams_postinglist.keys() :
if len(unigrams_postinglist[unigram]) <= 5 :
unigrams_postinglist.pop(unigram, None)
i += 1
print i, " rare Unigram features removed"
i = 0
for bigram in bigrams_postinglist.keys() :
if len(bigrams_postinglist[bigram]) <= 5 :
bigrams_postinglist.pop(bigram, None)
i += 1
print i, " rare Bigram features removed"
i = 0
for trigram in trigrams_postinglist.keys() :
if len(trigrams_postinglist[trigram]) <= 5 :
trigrams_postinglist.pop(trigram, None)
i += 1
print i, " rare Trigram features removed"
output_file_unigrams.write(str(unigrams_postinglist))
output_file_bigrams.write(str(bigrams_postinglist))
output_file_trigrams.write(str(trigrams_postinglist))
for features in idf_scores :
idf_scores[features] = (1 + math.log(numDocs/idf_scores[features])) #check error
output_file_idfs.write(str(idf_scores))
# i = 0
# weight_matrix = []
# for line in mathML :
# values = {}
# i += 1
# if (i % 100 == 0) :
# print str(i) + "th xml's weights written"
# for unigram in unigrams :
# for doc_id_weight_pair in unigrams_postinglist[unigram] :
# if doc_id_weight_pair[0] == i :
# values[unigram] = (idf_scores[unigram] * (1 + math.log(doc_id_weight_pair[1])))
# else :
# values[unigram] = idf_scores[unigram]
# for bigram in bigrams :
# for doc_id_weight_pair in bigrams_postinglist[bigram] :
# if doc_id_weight_pair[0] == i :
# values[bigram] = (idf_scores[bigram] * (1 + math.log(doc_id_weight_pair[1])))
# else :
# values[bigram] = idf_scores[bigram]
# for trigram in trigrams :
# for doc_id_weight_pair in trigrams_postinglist[trigram] :
# if doc_id_weight_pair[0] == i :
# values[trigram] = (idf_scores[trigram] * (1 + math.log(doc_id_weight_pair[1])))
# else :
# values[trigram] = idf_scores[trigram]
# weight_matrix.append(values)
# # output_file_weights.write(str(values) + '\n')
# return weight_matrix
if __name__ == "__main__" :
main()
|
[
"agnivo.saha@gmail.com"
] |
agnivo.saha@gmail.com
|
6f2c689cf82b43d48f1d508aa2010818e990e7a7
|
a80f73c8b5f2b807b4ec6d1c5c1c781ba0bfdc3a
|
/projecteuler/data_prob61.py
|
7acbbb2144278544687cee62f4142cd2504e23a9
|
[] |
no_license
|
Himanshu-Mishr/projecteuler
|
215d30c1b2742bb2e8f95336db3cdb4799f78680
|
419be91e480c9f29911f3370c443f0abb528f033
|
refs/heads/master
| 2021-01-13T02:30:24.313301
| 2014-09-07T05:22:39
| 2014-09-07T05:22:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,339
|
py
|
##### triangle number
tri = ['1035',
'1081',
'1128',
'1176',
'1225',
'1275',
'1326',
'1378',
'1431',
'1485',
'1540',
'1596',
'1653',
'1711',
'1770',
'1830',
'1891',
'1953',
'2016',
'2080',
'2145',
'2211',
'2278',
'2346',
'2415',
'2485',
'2556',
'2628',
'2701',
'2775',
'2850',
'2926',
'3003',
'3081',
'3160',
'3240',
'3321',
'3403',
'3486',
'3570',
'3655',
'3741',
'3828',
'3916',
'4005',
'4095',
'4186',
'4278',
'4371',
'4465',
'4560',
'4656',
'4753',
'4851',
'4950',
'5050',
'5151',
'5253',
'5356',
'5460',
'5565',
'5671',
'5778',
'5886',
'5995',
'6105',
'6216',
'6328',
'6441',
'6555',
'6670',
'6786',
'6903',
'7021',
'7140',
'7260',
'7381',
'7503',
'7626',
'7750',
'7875',
'8001',
'8128',
'8256',
'8385',
'8515',
'8646',
'8778',
'8911',
'9045',
'9180',
'9316',
'9453',
'9591',
'9730',
'9870']
##### square number
sqr = ['1024',
'1089',
'1156',
'1225',
'1296',
'1369',
'1444',
'1521',
'1600',
'1681',
'1764',
'1849',
'1936',
'2025',
'2116',
'2209',
'2304',
'2401',
'2500',
'2601',
'2704',
'2809',
'2916',
'3025',
'3136',
'3249',
'3364',
'3481',
'3600',
'3721',
'3844',
'3969',
'4096',
'4225',
'4356',
'4489',
'4624',
'4761',
'4900',
'5041',
'5184',
'5329',
'5476',
'5625',
'5776',
'5929',
'6084',
'6241',
'6400',
'6561',
'6724',
'6889',
'7056',
'7225',
'7396',
'7569',
'7744',
'7921',
'8100',
'8281',
'8464',
'8649',
'8836',
'9025',
'9216',
'9409',
'9604',
'9801']
##### pentagonal number
penta = ['1001',
'1080',
'1162',
'1247',
'1335',
'1426',
'1520',
'1617',
'1717',
'1820',
'1926',
'2035',
'2147',
'2262',
'2380',
'2501',
'2625',
'2752',
'2882',
'3015',
'3151',
'3290',
'3432',
'3577',
'3725',
'3876',
'4030',
'4187',
'4347',
'4510',
'4676',
'4845',
'5017',
'5192',
'5370',
'5551',
'5735',
'5922',
'6112',
'6305',
'6501',
'6700',
'6902',
'7107',
'7315',
'7526',
'7740',
'7957',
'8177',
'8400',
'8626',
'8855',
'9087',
'9322',
'9560',
'9801']
##### hexagonal number
hexa = ['1035',
'1128',
'1225',
'1326',
'1431',
'1540',
'1653',
'1770',
'1891',
'2016',
'2145',
'2278',
'2415',
'2556',
'2701',
'2850',
'3003',
'3160',
'3321',
'3486',
'3655',
'3828',
'4005',
'4186',
'4371',
'4560',
'4753',
'4950',
'5151',
'5356',
'5565',
'5778',
'5995',
'6216',
'6441',
'6670',
'6903',
'7140',
'7381',
'7626',
'7875',
'8128',
'8385',
'8646',
'8911',
'9180',
'9453',
'9730']
##### heptagonal number
hepta = ['1071',
'1177',
'1288',
'1404',
'1525',
'1651',
'1782',
'1918',
'2059',
'2205',
'2356',
'2512',
'2673',
'2839',
'3010',
'3186',
'3367',
'3553',
'3744',
'3940',
'4141',
'4347',
'4558',
'4774',
'4995',
'5221',
'5452',
'5688',
'5929',
'6175',
'6426',
'6682',
'6943',
'7209',
'7480',
'7756',
'8037',
'8323',
'8614',
'8910',
'9211',
'9517',
'9828']
##### octogonal number
octa = ['1045',
'1160',
'1281',
'1408',
'1541',
'1680',
'1825',
'1976',
'2133',
'2296',
'2465',
'2640',
'2821',
'3008',
'3201',
'3400',
'3605',
'3816',
'4033',
'4256',
'4485',
'4720',
'4961',
'5208',
'5461',
'5720',
'5985',
'6256',
'6533',
'6816',
'7105',
'7400',
'7701',
'8008',
'8321',
'8640',
'8965',
'9296',
'9633',
'9976']
|
[
"himanshu.m786@gmail.com"
] |
himanshu.m786@gmail.com
|
757cb43a1fa89e94d8822ea4661c87d1dccfd79a
|
aefc47e459f2911416cf8f632142abe452fcced9
|
/CIS8005_Assignment1/Chapter3_Excercise16.py
|
c3537df2d4bbf4f839efcb41286d00ec26e27cd2
|
[] |
no_license
|
lequan26111991/CIS8005-Data-Programming-for-Analytics
|
ac2f0b2fd3ea3f83aff9571cf62ffa78b04321ba
|
a17109adb6a194292db850ab018ed0946157aed5
|
refs/heads/master
| 2020-07-29T23:25:28.456404
| 2019-09-21T14:49:40
| 2019-09-21T14:49:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
''' Chapter 3 Excercise 16 '''
from turtle import *
pensize(3)
penup()
goto(-200, -50)
pendown()
'''Draw a Triangle '''
right(60)
circle(40, steps = 3)
'''Draw a Square '''
left(15)
penup()
goto(-100, -50)
pendown()
begin_fill()
color("blue")
circle(40, steps = 4)
end_fill()
'''Draw a pentagon'''
left(9)
penup()
goto(0, -50)
pendown()
begin_fill()
color("green")
circle(40, steps = 5)
end_fill()
'''Draw a hexagon'''
left(7)
penup()
goto(100, -50)
pendown()
begin_fill()
color("yellow")
circle(40, steps = 6)
end_fill()
'''Draw a octagon'''
penup()
goto(200, -50)
pendown()
begin_fill()
color("purple")
'''Draw a circle'''
circle(40)
end_fill()
color("green")
''' Draw Words'''
penup()
goto(-100, 50)
pendown()
write("Cool Colorful Shapes", font = ("Times", 18, "bold"))
done()
|
[
"lequan26111991@gmail.com"
] |
lequan26111991@gmail.com
|
9bc70906c5a573ba42746d4a2f4efbf81e0e86c1
|
98f730ec6a43d8be4a34b0f2a44a9d35989d2287
|
/pynifi_client/models/tenants_entity.py
|
b4af3df3c70dc03de0e1a0bfb4fb63eb26b9a058
|
[] |
no_license
|
scottwr98/pynifi-client
|
9337a4f322536ee466d419a788b8b5948cdc62d7
|
013ac2ffa591284a0d6cbb9ed552681cc6f91165
|
refs/heads/master
| 2020-04-18T08:47:03.680749
| 2017-11-04T23:59:58
| 2017-11-04T23:59:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,219
|
py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from pynifi_client.models.tenant_entity import TenantEntity # noqa: F401,E501
class TenantsEntity(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'users': 'list[TenantEntity]',
'user_groups': 'list[TenantEntity]'
}
attribute_map = {
'users': 'users',
'user_groups': 'userGroups'
}
def __init__(self, users=None, user_groups=None): # noqa: E501
"""TenantsEntity - a model defined in Swagger""" # noqa: E501
self._users = None
self._user_groups = None
self.discriminator = None
if users is not None:
self.users = users
if user_groups is not None:
self.user_groups = user_groups
@property
def users(self):
"""Gets the users of this TenantsEntity. # noqa: E501
:return: The users of this TenantsEntity. # noqa: E501
:rtype: list[TenantEntity]
"""
return self._users
@users.setter
def users(self, users):
"""Sets the users of this TenantsEntity.
:param users: The users of this TenantsEntity. # noqa: E501
:type: list[TenantEntity]
"""
self._users = users
@property
def user_groups(self):
"""Gets the user_groups of this TenantsEntity. # noqa: E501
:return: The user_groups of this TenantsEntity. # noqa: E501
:rtype: list[TenantEntity]
"""
return self._user_groups
@user_groups.setter
def user_groups(self, user_groups):
"""Sets the user_groups of this TenantsEntity.
:param user_groups: The user_groups of this TenantsEntity. # noqa: E501
:type: list[TenantEntity]
"""
self._user_groups = user_groups
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TenantsEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"ajish@rootedinsights.com"
] |
ajish@rootedinsights.com
|
2a7cfcfdbd04f2e518be613466168c4984eeac5c
|
229de1ffeb88de97de4c3c2d8372ddab003a6fd4
|
/Theory/16_producer_consumer_threads.py
|
5a4982434f42d720468699fa9c0b253c6e4b8d82
|
[] |
no_license
|
KiraUnderwood/ParallelProgPython
|
f609cb2169b03e57a46e56ff09ce0f2d71edb369
|
5c8b92b3466553bd816fc95439570d101986e0bf
|
refs/heads/master
| 2021-01-14T02:09:11.191654
| 2020-02-23T18:15:26
| 2020-02-23T18:15:26
| 242,566,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
#!/usr/bin/env python3
""" Producers serving soup for Consumers to eat """
import queue
import threading
import time
serving_line = queue.Queue(maxsize=5) # producer and consumer use it, queue - for each changing obj bw mult threads
def soup_producer():
for i in range(20): # serve 20 bowls of soup
serving_line.put_nowait('Bowl #'+str(i))
print('Served Bowl #', str(i), '- remaining capacity:', \
serving_line.maxsize-serving_line.qsize())
time.sleep(0.2) # time to serve a bowl of soup
serving_line.put_nowait('no soup for you!') # msg to the consumer for 2 of them since there are 2
serving_line.put_nowait('no soup for you!')
def soup_consumer():
while True:
bowl = serving_line.get()
if bowl == 'no soup for you!': # retrieved from the q (put_nowait) and in this case breaks and terminates the thread
break
print('Ate', bowl)
time.sleep(0.3) # time to eat a bowl of soup
if __name__ == '__main__':
for i in range(2):
threading.Thread(target=soup_consumer).start()
threading.Thread(target=soup_producer).start()
|
[
"kpodlesnaya@mail.ru"
] |
kpodlesnaya@mail.ru
|
efca081d1b784241ee5ddf78a2d22f15c97f0013
|
bff07ee1ee983c01acb5a5f626941bfea720f5d9
|
/EstruturaSequencial/ex004.py
|
a7b1266db4966d7ef9b542fb2469f96257b7a62e
|
[] |
no_license
|
aldrinpscastro/PythonExercicios
|
52fb61c2e6a70d23b5e5f6768438504bac85520f
|
96d065030e37624668db8ed1bb44bbfb52898763
|
refs/heads/master
| 2020-05-28T08:04:48.444370
| 2019-08-02T07:19:47
| 2019-08-02T07:19:47
| 188,928,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
nota1 = float(input('Digite a primeira nota bimestral: '))
nota2 = float(input('Digite a segunda nota bimestral: '))
nota3 = float(input('Digite a terceira nota bimestral: '))
nota4 = float(input('Digite a quarta nota bimestral: '))
media = (nota1 + nota2 + nota3 + nota4) / 4
mediastr = ''
for i in str(media):
if i == '.':
i = ','
mediastr += i
print(f'A média das notas é: {mediastr}.')
|
[
"aldrinpdscastro@gmail.com"
] |
aldrinpdscastro@gmail.com
|
f3373b35d609b72af3d72b5f3fa8644a8a46377d
|
540597e8377f14d73a0e0c8716c67743876fac22
|
/todotracker/todolist/urls.py
|
29d2b4d40b41ecff47213b7a6a6f0e165ec9ce03
|
[] |
no_license
|
sram04/TodoApp
|
d88be6faa50d9508f7d80aaed8e44e1ba14af052
|
d60001cd9c4d8b8ed1465ed77439595f2e435901
|
refs/heads/master
| 2020-03-19T02:08:50.749507
| 2018-10-31T17:32:06
| 2018-10-31T17:32:06
| 135,603,115
| 0
| 0
| null | 2018-06-20T03:09:02
| 2018-05-31T15:35:41
|
HTML
|
UTF-8
|
Python
| false
| false
| 327
|
py
|
from rest_framework.routers import DefaultRouter
from .api import StatusViewSet, TaskItemViewSet, EventViewSet
router = DefaultRouter()
router.register(r'status', StatusViewSet)
router.register(r'taskitems', TaskItemViewSet, 'taskitem-list')
router.register(r'events', EventViewSet, 'events-list')
urlpatterns = router.urls
|
[
"sairamendreddy@gmail.com"
] |
sairamendreddy@gmail.com
|
896387b71de62c3f33d2bb9ccadb050d69c70b63
|
b5155a2ece4ee4ca5a1a9e79d104f7b8914de508
|
/OnlineprogrammingCourse.py
|
6e34455d59867ea7591de979844e9162af909c18
|
[] |
no_license
|
DeepakKumarMD/online-course-management
|
106d5ee3d2bc547e6f077ed26f4fbdc03f336a04
|
7c120db176a787db06f5e90f28cc3f9ddd706af9
|
refs/heads/main
| 2023-03-30T16:39:18.192858
| 2021-04-02T09:17:26
| 2021-04-02T09:17:26
| 353,967,706
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,487
|
py
|
from tkinter import *
from tkinter import ttk
import sqlite3
import tkinter.messagebox
from datetime import date
from tkinter import filedialog
import shutil
import os
from tkinter import Text, Tk
today = date.today()
print('software is runing......')
firstw = Tk()
firstw.title("ONLINE COURSE MANAGEMENT")
firstw.geometry("1600x1000+0+0")
label = Label(text="ONLINE COURSE MANAGEMENT", font=("times new roman", 35),fg='white', bg="#2A1B3D")
label.pack(side=TOP, fill=X)
user1 = Label(text="USERNAME", font=("forte", 23), fg="#3B8BEB")
user1.place(x=610, y=120)
user = Entry(width=17, bd=5, font=("arial", 20))
user.place(x=570, y=200)
label.pack(side=TOP, fill=X)
user2 = Label(text="PASSWORD", font=("forte", 23),fg="#3B8BEB")
user2.place(x=610, y=280)
user3 = Entry(width=17, show="*", bd=5, font=("arial", 20))
user3.place(x=570, y=360)
firstw.configure(bg='#F9D342')
def second():
global secondw
secondw = Tk()
secondw.title("online course management")
secondw.geometry("1600x1000+0+0")
def distroy4():
secondw.destroy()
root()
def student():
student1 = Tk()
student1.title("STUDENT DETAILS")
def studentid():
rot = Tk()
rot.title("VISITORS'S LIST ")
rot.geometry("1600x1000+0+0")
mainlabel = Label(rot, text="STUDENT DETAILS", font=("times new roman", 35), bg="MediumOrchid2")
mainlabel.pack(side=TOP, fill=X)
chat1 = ttk.Treeview(rot, height=20, columns=('name', 'sur', 'fee', 'branch'), selectmode="extended")
chat1.heading('#0', text='ID', anchor=CENTER)
chat1.heading('#1', text=' NAME', anchor=W)
chat1.heading('#2', text='FEES', anchor=W)
chat1.heading('#3', text='COURSE', anchor=W)
chat1.heading('#4', text="LAST NAME", anchor=W)
chat1.column('#1', stretch=YES, minwidth=50, width=100)
chat1.column('#3', stretch=YES, minwidth=100, width=200)
chat1.column('#4', stretch=YES, minwidth=50, width=100)
chat1.column('#2', stretch=YES, minwidth=50, width=100)
chat1.column('#0', stretch=YES, minwidth=50, width=70)
chat1.place(x=470, y=130)
ttk.Style().configure("Treeview", background="black", foreground="coral1")
ttk.Style().configure("Treeview.Heading", background="blue", foreground="palevioletRed1")
rot.configure(background='medium spring green')
vsb = ttk.Scrollbar(rot, orient="vertical", command=chat1.yview)
vsb.place(x=1027, y=150, height=400 + 20)
chat1.configure(yscrollcommand=vsb.set)
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute('SELECT id ,name, fee , branch,sur FROM kistar ')
for row1 in cur.fetchall():
chat1.insert('', 0, text=row1[0], values=(row1[1], row1[2], row1[3], row1[4]))
def viewenquiry2():
rt = Tk()
rt.title("VISITORS'S LIST")
rt.geometry("1600x1000+0+0")
mainlabel = Label(rt, text="VISITOR", font=("times new roman", 35), bg="MediumOrchid2")
mainlabel.pack(side=TOP, fill=X)
chat1 = ttk.Treeview(rt, height=20, columns=('EMAIL', 'ENQUIRY', 'DATE'), selectmode="extended")
chat1.heading('#0', text='NAME', anchor=CENTER)
chat1.heading('#1', text='EMAIL', anchor=CENTER)
chat1.heading('#2', text='ENQUIRY', anchor=CENTER)
chat1.heading('#3', text="DATE", anchor=CENTER)
chat1.column('#1', stretch=YES, minwidth=50, width=100)
chat1.column('#3', stretch=YES, minwidth=50, width=100)
chat1.column('#2', stretch=YES, minwidth=50, width=300)
chat1.column('#0', stretch=YES, minwidth=50, width=70)
vsb = ttk.Scrollbar(rt, orient="vertical", command=chat1.yview)
vsb.place(x=955, y=170, height=400 + 20)
chat1.configure(yscrollcommand=vsb.set)
chat1.place(x=400, y=170)
ttk.Style().configure("Treeview", background="red", foreground="coral1")
ttk.Style().configure("Treeview.heading", background="blue", foreground="palevioletRed1")
rt.configure(background="#3B8BEB")
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute('SELECT * FROM golu')
for row in cur.fetchall():
chat1.insert('', 0, text=row[0], values=(row[1], row[2], row[3]))
def distroy5():
secondw.destroy()
window()
mainlabel = Label(secondw, text="ONLINE COURSE MANAGEMENT", font=("times new roman", 35), fg='white', bg="black")
mainlabel.pack(side=TOP, fill=X)
button = Button(secondw, width=13,height=5, font=("illuma black", 20), text="COURSE\nREGISTRATION\n FORM",
fg="snow", bg="#C3073F", command=distroy4)
button.place(x=150, y=350)
enquiry = Button(secondw, width=13,height=5, font=("illuma black", 20), text="FEE\nPAYMENT\n PORTAL",
fg="snow", bg="#C3073F", command=distroy5)
enquiry.place(x=420, y=350)
fee_details = Button(secondw, width=13,height=5, font=("illuma black", 20), text="VISITOR'S\n PORTAL", fg="snow", bg="#C3073F", command=enquiry1)
fee_details.place(x=700, y=350)
viewenquiry = Button(secondw, width=13,height=5, font=("illuma black", 20), text="VIEW ENQUIRY",fg="snow", bg="#C3073F",
command=viewenquiry2)
viewenquiry.place(x=950, y=350)
viewenquiry1 = Button(secondw, width=13,height=5, font=("illuma black", 20), text="COURSE\n&\nSTUDENT\n DETAILS ",
fg="snow", bg="#C3073F",
command=studentid)
viewenquiry1.place(x=1200, y=350)
def distroy():
firstw.destroy()
def login():
if user.get() == "deeku" and user3.get() == "12345":
second()
distroy()
else:
t = tkinter.messagebox.showinfo("INVALID USERNAME OR PASSWORD ",
"YOU HAVE ENTERED INVALID USERNAME OR PASSWORD ")
user.delete(0, END)
user3.delete(0, END)
def root():
root = Tk()
root.geometry("1600x1000+0+0")
root.title("online course management")
global entry1
global entry2
global entry3
global entry4
global entry5
global box
global name
global radio1
global radio2
name = StringVar()
global sur
sur = StringVar()
global gander
gander = IntVar()
global var1
var1 = IntVar()
global var2
var2 = IntVar()
global branch
branch = StringVar()
global rollno
rollno = StringVar()
global email
email = StringVar()
global course
course = StringVar()
global python
python = IntVar()
global java
java = IntVar()
global c
c = IntVar()
global d
d = IntVar()
global calculate
calculate = StringVar()
id = IntVar()
search = IntVar()
NAME = name.get()
SUR = sur.get()
EMAIL = email.get()
BRANCH = branch.get()
GANDER = gander.get()
PYTHON = python.get()
JAVA = java.get()
C = c.get()
D = d.get()
CALCULATE = calculate.get()
calculation2 = 2000
label = Label(root, text="REGISTRATION FORM", font=("arial", 25), bg="violetred1")
label.pack(side=TOP, fill=X)
label1 = Label(root, text="NAME:", font=("arial", 17))
label1.place(x=300, y=150)
label2 = Label(root, text="SURNAME:", font=("arial", 17))
label2.place(x=300, y=210)
label3 = Label(root, text="EMAIL:", font=("arial", 17))
label3.place(x=300, y=270)
label3 = Label(root, text="GENDER:", font=("arial", 17))
label3.place(x=300, y=330)
label4 = Label(root, text="BRANCH:", font=("arial", 17))
label4.place(x=300, y=390)
label4 = Label(root, text="COURSE", font=("arial", 17))
label4.place(x=300, y=450)
label4 = Label(root, text="TOTAL FEE", font=("arial", 17))
label4.place(x=300, y=520)
root.configure(background='#116466')
# ==============================entryfield========================================
entry5 = Entry(root, textvar=calculate, state="readonly", width=20, font=("arial", 15, "bold"), bd=5)
entry5.place(x=500, y=515)
entry1 = Entry(root, bd=5, width=20, textvar=name, font=("arial", 15))
entry1.place(x=500, y=150)
# entry22=Entry(main,bd=5, width=20,textvar=sam ,font=("arial",15))
# entry22.place(x=500,y=150)
entry2 = Entry(root, bd=5, width=20, textvar=sur, font=("arial", 15))
entry2.place(x=500, y=210)
entry3 = Entry(root, bd=5, width=20, textvar=email, font=("arial", 15))
entry3.place(x=500, y=270)
entry4 = Entry(root, bd=5, text="enter roll no.", width=20, textvar=search, font=("arial", 15))
entry4.place(x=800, y=150)
search.set("")
entry4 = Entry(root, bd=5, text="enter roll no.", width=20, textvar=search, font=("arial", 15))
entry4.place(x=800, y=150)
# ================================radio buttton=======================================
radio1 = Radiobutton(root, text="MALE", variable=gander, value=1, font=("arial", 13))
radio1.place(x=510, y=340)
radio2 = Radiobutton(root, text="FEMALE", variable=gander, padx=20, value=0, font=("arial", 13))
radio2.place(x=570, y=340)
gander.set(3)
# ================================droplist======================================
box = ttk.Combobox(root, textvariable=branch, state="readonly", font=("arial", 12, "bold"), width=22)
box['values'] = ['SELECT', 'JAVA', 'C++', 'PYTHON', 'C']
box.current(0)
box.place(x=503, y=395)
# ===============================checkbutton====================================
checkbutton1 = Checkbutton(root, text="beginner", font=("helvetica bold", 10), variable=java)
checkbutton1.place(x=502, y=455)
checkbutton1 = Checkbutton(root, text="intermediate", font=("helvetica bold", 10), variable=c)
checkbutton1.place(x=590, y=455)
checkbutton1 = Checkbutton(root, text="advanced", font=("helvetica bold", 10), variable=d)
checkbutton1.place(x=700, y=455, )
# checkbutton1 = Checkbutton(root, text="PYTHON", variable=python)
# checkbutton1.place(x=650, y=455)
python.set(0)
java.set(0)
c.set(0)
d.set(0)
def dis():
root.destroy()
second()
# root.filename=filedialog.askopenfile(initialdir="/",title="select file",filetypes=(("jpeg files","*.jpg"),("all files","*.*")))
# print(root.filename)
# os.chdir('c:\\')
# shutil.move((root.filename),("C:\\Users\\HP\Desktop\\projectgui\\image"))
# =========================buttton==========================
button1 = Button(root, text="CALCULATE FEE", width=14, font=("helvetica bold", 15), bg="#FFCB9A",
command=calculation)
button1.place(x=800, y=510)
button12 = Button(root, text="BACK", width=17, font=("arial", 17), bg="#FFCB9A", command=dis)
button12.place(x=0, y=0)
button2 = Button(root, text="SUBMIT FORM", width=14, font=("helvetica bold", 15), bg="#FFCB9A", command=msg)
button2.place(x=600, y=630)
button3 = Button(root, text="RESET", width=14, font=("helvetica bold", 15), bg="#FFCB9A", command=golu)
button3.place(x=395, y=630)
button4 = Button(root, text="SEARCH", width=14, font=("helvetica bold", 15), bg="#FFCB9A", command=all)
button4.place(x=1100, y=150)
# button7 = Button(root, text="UPLOAD PHOTO", width=14, font=("arial", 10), bg="indianred1",command=file)
# button7.place(x=1100, y=210)
button4 = Button(root, text="UPDATE", width=14, font=("helvetica bold", 15), bg="#FFCB9A", command=update)
button4.place(x=1000, y=630)
button5 = Button(root, text="DELETE", width=14, font=("helvetica bold", 15,), bg="#FFCB9A", command=delete)
button5.place(x=800, y=630)
# button6=Button(root,text="ENQUIRY",width=14,font=("arial",10),bg="indianred1",command=window )
# button6.place(x=300 , y=630)
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute(
'CREATE TABLE IF NOT EXISTS kistar(id INTEGER primary key autoincrement ,name text,sur text,email, branch text,gander text, fee int, python int,java int,c int,d int)')
cur.execute("UPDATE SQLITE_SEQUENCE SET seq = 1000 WHERE name = 'kistar'")
cur.execute('CREATE TABLE IF NOT EXISTS golu (NAME TEXT, PHONE INT ,PURPOSE TEXT,DATE)')
cur.execute(
'CREATE TABLE IF NOT EXISTS FEEINSTALLMENT (id int ,TOTEL FEE INT, REMAIN FEE INT, PAID FEE INT ,INSTALLMENT INT,DATE)')
def ka():
NAMEE = entry23.get()
PHONE = entry24.get()
PURPOSE = box2.get()
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute('INSERT INTO golu(NAME,PHONE,PURPOSE,DATE)VALUES(?,?,?,?)', (NAMEE, PHONE, PURPOSE, today))
conn.commit()
def r():
j()
ka()
def enquiry1():
enquiry1 = Tk()
enquiry1.title("ENQUIRY")
enquiry1.geometry("1600x1000+0+0")
purpose = StringVar()
global entry23
global entry24
global box2
def enquiry1destroy():
enquiry1.destroy()
second()
label22 = Label(enquiry1, text="ENQUIRY", font=("arial", 25), bg="violetred1")
label22.pack(side=TOP, fill=X)
label1 = Label(enquiry1, text="NAME:", font=("arial", 17))
label1.place(x=300, y=150)
label2 = Label(enquiry1, text="PHONE NO.:", font=("arial", 17))
label2.place(x=300, y=210)
label3 = Label(enquiry1, text="PURPOSE:", font=("arial", 17))
label3.place(x=300, y=270)
entry23 = Entry(enquiry1, bd=5, width=20, font=("arial", 15))
entry23.place(x=500, y=150)
button = Button(enquiry1, text="submit", width=15, font=("comic sans ms", 20),fg='white',bg="#BA5536", command=r)
button.place(x=500, y=320)
button1 = Button(enquiry1, text="<< BACK", width=30, bg="violetred1", command=enquiry1destroy)
button1.place(x=0, y=0)
entry24 = Entry(enquiry1, bd=5, width=20, font=("arial", 15))
entry24.place(x=500, y=210)
box2 = ttk.Combobox(enquiry1, textvariable=purpose, state="readonly", font=("arial", 12, "bold"), width=22)
box2['values'] = ['SELECT', 'TO LEARN PROGRAMMING', 'TO LEARN MACHINE LEARNING', 'FEE DETAILS']
box2.current(0)
box2.place(x=500, y=270)
enquiry1.configure(background="#F9ED4E")
def cat():
z = IntVar()
FE = entry25.get()
x = entry26.get()
y = entry29.get()
FE = entry25.get()
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute('SELECT fee FROM kistar WHERE id=?', (FE,))
for row24 in cur.fetchall():
entry26.configure(state="normal")
entry26.delete(0, END)
entry26.insert(0, row24)
entry26.configure(state="disable")
cur.execute(' SELECT SUM(INSTALLMENT) FROM FEEINSTALLMENT WHERE id=? GROUP BY id ', (FE,))
for row23 in cur.fetchall():
entry27.delete(0, END)
entry27.insert(0, row23)
ye = entry27.get()
z = int(float((entry26.get()))) - int(float((entry27.get())))
# cur.execute('INSERT INTO FEEINSTALLMENT(id , TOTAL,INSTALLMENT,PAID ,REMAIN, DATE)VALUES(?,?,?,?,?,?)',(FE, x, y, ye, z, today,))
entry28.configure(state="normal")
entry28.delete(0, END)
entry28.insert(0, z)
print(row23)
entry27.configure(state="disable")
entry26.configure(state="disable")
entry28.configure(state="disable")
conn.commit()
print(x)
print(FE)
print(today)
def reset2():
entry26.configure(state="normal")
entry25.configure(state="normal")
# entry24.configure(state="normal")
entry27.configure(state="normal")
entry28.configure(state="normal")
entry29.configure(state="normal")
entry26.delete(0, END)
entry25.delete(0, END)
entry27.delete(0, END)
entry28.delete(0, END)
entry29.delete(0, END)
# box2.set("SELECT")
entry27.configure(state="disable")
entry26.configure(state="disable")
entry28.configure(state="disable")
def fee_add():
z = IntVar()
FE = entry25.get()
x = entry26.get()
y = entry29.get()
entry27.configure(state="normal")
entry28.configure(state="normal")
entry26.configure(state="normal")
cur.execute('INSERT INTO FEEINSTALLMENT(id , TOTEL,INSTALLMENT, DATE)VALUES(?,?,?,?)', (FE, x, y, today,))
cur.execute(' SELECT SUM(INSTALLMENT) FROM FEEINSTALLMENT WHERE id=? GROUP BY id ', (FE,))
for row23 in cur.fetchall():
entry27.delete(0, END)
entry27.insert(0, row23)
ye = entry27.get()
z = int(float((entry26.get()))) - int(float((entry27.get())))
cur.execute('UPDATE FEEINSTALLMENT SET PAID=? WHERE id=?', (ye, FE,))
cur.execute('UPDATE FEEINSTALLMENT SET REMAIN=? WHERE id=?', (z, FE,))
entry28.configure(state="normal")
entry28.delete(0, END)
entry28.insert(0, z)
print(row23)
entry27.configure(state="disable")
entry26.configure(state="disable")
entry28.configure(state="disable")
conn.commit()
print(x)
print(FE)
print(today)
def installment2():
if int(entry29.index("end")) > int(0):
fee_add()
else:
x = tkinter.messagebox.showinfo("NO FEE ADDED", "YOU HAVE NOT ADDED ANY FEE ")
def j():
PURPOSE = box2.get()
print(PURPOSE)
def r():
j()
ka()
def window():
global main
global namee
global phone
global purpose
global entry23
global entry24
global entry25
global entry26
global entry27
global entry28
global box2
global key
global fee3
global KEY
global ley
global sey
global ADDFEE
global entry29
# entry29=IntVar()
# entry26=IntVar()
# entry27=IntVar()
# key=StringVar()
# fee3=StringVar()
# ADDFEE=IntVar()
main = Tk()
main.geometry("1600x1000+0+0")
main.title("enqiry")
namee = StringVar()
phone = IntVar()
purpose = StringVar()
fe = StringVar()
key = IntVar()
ley = StringVar()
sey = StringVar()
# NAMEE=namee.get()
# PHONE=phone.get()
# PURPOSE=purpose.get()
def distroy3():
main.destroy()
second()
button = Button(main, text="BACK", width=30, bg="#FF1E27", command=distroy3)
button.place(x=0, y=0)
label3 = Label(main, text="ENTER STUDENT ID", font=("arial", 17))
label3.place(x=100, y=100)
label45 = Label(main, text =" FEE AMOUNT :",font=("arial",17))
label45.place(x= 610,y=100)
button22 = Button(main, text="ENTER", width=15, font=("comic sans ms", 17), bg="#8BD8BD", command=cat)
button22.place(x=170, y=250)
button23 = Button(main, text="PAY", width=8, font=("comic sans ms", 20),bg="#8BD8BD", command=installment2)
button23.place(x=670, y=250)
entry29 = Entry(main, bd=5, width=20, font=("arial", 15))
entry29.place(x=650, y=170)
button28 = Button(main, text="RESET", width=26, font=("arial", 10), bg="#FF1E27", command=reset2)
button28.place(x=1150, y=0)
label31 = Label(main, text="TOTAL FEE", font=("arial", 17))
label31.place(x=900, y=550)
label32 = Label(main, text="PAID FEE", font=("arial", 17))
label32.place(x=600, y=550)
label33 = Label(main, text="REMAIN FEE", font=("arial", 17))
label33.place(x=300, y=550)
entry25 = Entry(main, bd=5, width=20, font=("arial", 15))
entry25.place(x=170, y=170)
entry26 = Entry(main, bd=5, width=20, font=("arial", 15))
entry26.place(x=900, y=600)
entry27 = Entry(main, bd=5, width=20, font=("arial", 15))
entry27.place(x=600, y=600)
entry28 = Entry(main, bd=5, width=20, font=("arial", 15))
entry28.place(x=300, y=600)
main.configure(background='#8000FF')
# entry27=Entry(main,bd=5,textvariable=fee3, state="readonly", width=20 ,font=("arial",15))
# entry27.place(x=960,y=400)
# entry28=Entry(main,bd=5, width=20 ,font=("arial",15))
# entry28.place(x=900,y=400)
# =====================================define charecter=====================
# ==================================function==============================
calculation2 = 2000
def calculation():
NAME = entry1.get()
SUR = entry2.get()
EMAIL = entry3.get()
BOX = box.get()
GANDER = gander.get()
PYTHON = python.get()
JAVA = java.get()
C = c.get()
D = d.get()
print(PYTHON)
print(GANDER)
CALCULATE = calculate.get()
if NAME == ("") and SUR == ("") and EMAIL == ("") and BOX == ("SELECT") and GANDER == (3) and JAVA == (
0) and PYTHON == (0) and C == (0) and D == (0):
kal = tkinter.messagebox.showinfo(" DETAILS INVALID", "FILL ALL THE DETAILS")
else:
global x
if box.get() == "JAVA" and gander.get() == 0:
x = (calculation2 - calculation2 * 20 / 100)
entry5.configure(state="normal")
entry5.delete(0, END)
entry5.insert(0, x)
entry5.configure(state="disable")
if box.get() == "JAVA" and gander.get() == 1:
x = (calculation2 - calculation2 * 10 / 100)
entry5.configure(state="normal")
entry5.delete(0, END)
entry5.insert(0, x)
entry5.configure(state="disable")
if box.get() == "PYTHON" and gander.get() == 1:
x = (calculation2)
entry5.configure(state="normal")
entry5.delete(0, END)
entry5.insert(0, x)
entry5.configure(state="disable")
if box.get() == "PYTHON" and gander.get() == 0:
x = (calculation2 - calculation2 * 10 / 100)
entry5.configure(state="normal")
entry5.delete(0, END)
entry5.insert(0, x)
entry5.configure(state="disable")
if box.get() == "C++" and gander.get() == 0:
x = (calculation2 - calculation2 * 10 / 100)
entry5.configure(state="normal")
entry5.delete(0, END)
entry5.insert(0, x)
entry5.configure(state="disable")
if box.get() == "C" and gander.get() == 1:
x = (calculation2)
entry5.configure(state="normal")
entry5.delete(0, END)
entry5.insert(0, x)
entry5.configure(state="disable")
if box.get() == "C" and gander.get() == 0:
x = (calculation2 - calculation2 * 10 / 100)
entry5.configure(state="normal")
entry5.delete(0, END)
entry5.insert(0, x)
entry5.configure(state="disable")
def msg():
if branch.get() == "SELECT" or gander.get() == 3 or (
python.get() == 0 and java.get == 0 and c.get() == 0 and d.get() == 0):
calculate.set("PLESE FILL ALL")
if "@" and ".com" not in entry3.get():
kal = tkinter.messagebox.showinfo(" INVALID DETAILS", "ENTER VALID EMAIL ADDRESS")
entry3.delete(0, END)
else:
msg = tkinter.messagebox.askyesno("Form filling confarmation",
" WARNING: All data will be erase after 'YES' for new applicant")
if msg > 0:
NAME = entry1.get()
SUR = entry2.get()
EMAIL = entry3.get()
BRANCH = box.get()
GANDER = gander.get()
PYTHON = python.get()
JAVA = java.get()
C = c.get()
D = d.get()
CALCULATE = calculate.get()
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute(
'INSERT INTO kistar (name,sur, email, branch, gander,fee ,python,java,c,d ) VALUES(?,?,?,?,?,?,?,?,?,?)',
(NAME, SUR, EMAIL, BRANCH, GANDER, CALCULATE, PYTHON, JAVA, C, D,))
golu()
def golu():
entry1.delete(0, END)
entry2.delete(0, END)
entry3.delete(0, END)
box.set("SELECT")
gander.set(3)
python.set(0)
java.set(0)
c.set(0)
d.set(0)
calculate.set("")
entry4.delete(0, END)
def search_id():
SEARCH = entry4.get()
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute('SELECT name FROM kistar WHERE id=?', (SEARCH,))
for row1 in cur.fetchone():
name.set(row1)
def search_sur():
SEARCH = entry4.get()
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute('SELECT sur FROM kistar WHERE id=?', (SEARCH,))
for row2 in cur.fetchone():
sur.set(row2)
def search_email():
SEARCH = entry4.get()
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute('SELECT email FROM kistar WHERE id=?', (SEARCH,))
for row3 in cur.fetchone():
email.set(row3)
def search_branch():
SEARCH = entry4.get()
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute('SELECT branch FROM kistar WHERE id=?', (SEARCH,))
for row4 in cur.fetchone():
branch.set(row4)
def search_gander():
SEARCH = entry4.get()
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute('SELECT gander FROM kistar WHERE id=?', (SEARCH,))
for row5 in cur.fetchone():
gander.set(row5)
def search_course():
SEARCH = entry4.get()
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute('SELECT python FROM kistar WHERE id=?', (SEARCH,))
for row6 in cur.fetchone():
python.set(row6)
cur.execute('SELECT java FROM kistar WHERE id=?', (SEARCH,))
for row7 in cur.fetchone():
java.set(row7)
cur.execute('SELECT c FROM kistar WHERE id=?', (SEARCH,))
for row8 in cur.fetchone():
c.set(row8)
cur.execute('SELECT d FROM kistar WHERE id=?', (SEARCH,))
for row9 in cur.fetchone():
d.set(row9)
cur.execute('SELECT fee FROM kistar WHERE id=?', (SEARCH,))
for row10 in cur.fetchone():
calculate.set(row10)
def update():
box1 = tkinter.messagebox.askyesno("CONFARMATION", "if you update you will be unable to see previous data again")
if box1 > 0:
SEARCH = entry4.get()
NAME = entry1.get()
SUR = entry2.get()
EMAIL = entry3.get()
BRANCH = box.get()
GENDER = gander.get()
FEE = calculate.get()
PYTHON = python.get()
JAVA = java.get()
C = c.get()
D = d.get()
CALCULATE = entry5.get()
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute('UPDATE kistar SET name=? WHERE id=?', (NAME, SEARCH,))
cur.execute('UPDATE kistar SET sur=? WHERE id=?', (SUR, SEARCH,))
cur.execute('UPDATE kistar SET email=? WHERE id=?', (EMAIL, SEARCH,))
cur.execute('UPDATE kistar SET branch=? WHERE id=?', (BRANCH, SEARCH,))
cur.execute('UPDATE kistar SET gander=? WHERE id=?', (GENDER, SEARCH,))
cur.execute('UPDATE kistar SET fee=? WHERE id=?', (FEE, SEARCH,))
cur.execute('UPDATE kistar SET python=? WHERE id=?', (PYTHON, SEARCH,))
cur.execute('UPDATE kistar SET java=? WHERE id=?', (JAVA, SEARCH,))
cur.execute('UPDATE kistar SET c=? WHERE id=?', (C, SEARCH,))
cur.execute('UPDATE kistar SET d=? WHERE id=?', (D, SEARCH,))
conn.commit()
def delete():
box = tkinter.messagebox.askyesno("WARNING", "DATA WILL NOT BE RECOVER AGAIN")
if box > 0:
SEARCH = search.get()
NAME = name.get()
SUR = sur.get()
EMAIL = email.get()
BRANCH = branch.get()
GENDER = gander.get()
PYTHON = python.get()
JAVA = java.get()
C = c.get()
D = d.get()
CALCULATE = calculate.get()
else:
conn = sqlite3.connect("abcd12345.db")
with conn:
cur = conn.cursor()
cur.execute("DELETE FROM kistar WHERE id=?", (SEARCH,))
conn.commit()
golu()
def all():
search_id()
search_sur()
search_email()
search_branch()
search_gander()
search_course()
INQUIRY = Button(text="LOGIN", width=17, font=("arial", 20), bg="MediumOrchid2", command=login)
INQUIRY.place(x=560, y=480)
firstw.mainloop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
8d24c43bf6bb7b1561a333f3874efa67e29412ad
|
9faae58c50c2697173d5aad03d8a988652acb7c9
|
/docs/conf.py
|
987bc3cdc23452e160eaebcd9a2687b723db5380
|
[
"MIT"
] |
permissive
|
aryanNaik123/gql
|
7d78e5495348394fa7e2462b1614eaaa49f9631e
|
d7ad2a52a36479a885aa14790f49ffb341146499
|
refs/heads/master
| 2022-12-28T09:48:58.954918
| 2020-10-06T23:02:43
| 2020-10-06T23:02:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,604
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('./..'))
# -- Project information -----------------------------------------------------
project = 'gql 3'
copyright = '2020, graphql-python.org'
author = 'graphql-python.org'
# The full version, including alpha/beta/rc tags
from gql import __version__
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx_rtd_theme'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = 'gql-3-doc'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- AutoDoc configuration -------------------------------------------------
# autoclass_content = "both"
autodoc_default_options = {
'members': True,
'inherited-members': True,
'special-members': '__init__',
'undoc-members': True,
'show-inheritance': True
}
autosummary_generate = True
|
[
"noreply@github.com"
] |
noreply@github.com
|
2260196e7fd923b0fd097df46a0ff059efad88ed
|
7725595e01cdb454132ef8009726568f66148e86
|
/untitled2.py
|
f2c99aa4ef051b5660df637d91e1f9846492dd97
|
[] |
no_license
|
chintugupta07/cabmanagementsystem
|
0b2368efcf0ad19ede71fef3fd4434e78a185eca
|
7714463a636928c01142f5d514a0adc919e02c35
|
refs/heads/master
| 2022-11-15T10:06:09.013704
| 2020-07-09T17:07:51
| 2020-07-09T17:07:51
| 278,423,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,816
|
py
|
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import sqlite3
import re
class login:
a=0
b=0
c=0
mobile_no=0
def signup(self):
top=Tk()
top.title("create form")
top.geometry("500x450+425+75")
self.var=IntVar()
Label(top,text="Register Form",font="Helvetica 12 bold",height="3",width="500",fg="white",bg="black").pack()
Label(top,text="Firstname").place(x=100,y=100)
self.firstname = Entry(top)
self.firstname.place(x=180,y=100)
Label(top,text="Lastname").place(x=100,y=150)
self.lastname=Entry(top)
self.lastname.place(x=180,y=150)
Label(top,text="Gender").place(x=100,y=200)
R1=Radiobutton(top,text="Male",variable=self.var,value=1,relief=RAISED)
R1.place(x=200,y=200)
R2=Radiobutton(top,text="Female",variable=self.var,value=0,relief=RAISED)
R2.place(x=300,y=200)
Label(top,text="Mobile_No.").place(x=100,y=250)
self.mobile_no=Entry(top)
self.mobile_no.place(x=180,y=250)
Label(top,text="Email").place(x=100,y=300)
self.email=Entry(top)
self.email.place(x=180,y=300)
Label(top,text="Password:").place(x=100,y=350)
self.password=Entry(top)
self.password.place(x=180,y=350)
Button(top,text="Submit",command=self.validate).place(x=170,y=400)
top.mainloop()
def validate(self):
temp = self.mobile_no.get()
l = len(temp)
temp1=self.email.get()
temp2=self.firstname.get()
temp3=self.lastname.get()
if (l!=10 or temp.isdigit()==False) :
message=messagebox.showinfo("Warning...!!","INVALID MOBILE NUMBER")
pass
elif(temp1=='' or re.search('[@]',temp1) is None or re.search('[.]',temp1) is None):
message=messagebox.showinfo("Warning...!!","PLEASE ENTER VALID EMAIL")
elif(temp2==''):
message=messagebox.showinfo("Warning...!!","PLEASE ENTER FIRSTNAME")
elif(temp3==''):
message=messagebox.showinfo("Warning...!!","PLEASE ENTER LASTNAME")
else:
self.database()
def database(self):
name=self.firstname.get()
name1=self.lastname.get()
temp=self.var.get()
abc=int(temp)
if abc == 1:
g="Male"
else:
g="Male"
mb=self.mobile_no.get()
email=self.email.get()
passw=self.password.get()
conn=sqlite3.connect("untitled2.db")
c=conn.cursor()
#c.execute("CREATE TABLE form(firstname varchar(50), lastname varchar(50),gender text,mobile_no number,Email text,password text);")
c.execute("INSERT INTO form(firstname,lastname,gender,mobile_no,Email,password)values(?,?,?,?,?,?)",(name,name1,g,mb,email,passw))
c.execute("SELECT * from form")
for i in c:
print("name",i[0])
print("name1",i[1])
print("g",i[2])
print("mb",i[3])
print("email",i[4])
print("password",i[5])
c.close()
conn.commit()
def login(self):
top=Tk()
top.title("Login")
top.geometry("500x400+425+75")
Label(top,text="LOGIN PAGE",font="Helvetica 12 bold",height="3",width="500",fg="white",bg="black").pack()
n=Label(top, text="Email").place(x=100,y=100)
self.n1=Entry(top)
self.n1.place(x=180,y=100)
m=Label(top, text="Password").place(x=100,y=150)
self.m1=Entry(top)
self.m1.place(x=180,y=150)
button_1=Button(top,text='SUBMIT',fg='green',command=self.valid).place(x=170,y=200)
button_1=Button(top,text='SIGN UP',fg='green',command=self.signup).place(x=270,y=200)
top.mainloop()
def valid(self):
idd=self.n1.get()
pas=self.m1.get()
if(idd=='' or re.search('[@]',idd) is None or re.search('[.]',idd) is None):
message=messagebox.showinfo("Warning...!!","PLEASE ENTER VALID EMAIL")
elif(pas==''):
message=messagebox.showinfo("Warning...!!","PLEASE ENTER VALID PASSWORD")
else:
self.check()
def check(self):
conn=sqlite3.connect("untitled2.db")
c=conn.cursor()
if(self.n1.get() !='' and self.m1.get()!=''):
c.execute("select email,password from form where email=? and password=?",(self.n1.get(),self.m1.get()))
check = c.fetchone()
print(check)
if check is None:
message=messagebox.showinfo("Warning...","INVALID EMAIL ID & PASSWORD.")
elif check is not None :
self.set_trip()
def set_trip(self):
top=Tk()
top.title("Booking Request IN LPU")
top.geometry("500x600+425+75")
Label(top,text="BOOKING REQUEST",font="Helvetica 12 bold",height="3",width="500",fg="white",bg="black").pack()
lb1=Label(top,text="From Block NO.").place(x=100,y=150)
self.b=Entry(top,width=12)
self.b.place(x=200,y=150)
lb2=Label(top,text="To Block No.").place(x=100,y=200)
self.a=Entry(top,width=12)
self.a.place(x=200,y=200)
lb_date=Label(top,text="Date").place(x=100,y=250)
Var=IntVar()
Var.set(1)
spin=Spinbox(top,from_=1,to=31,width=10,textvariable=Var)
spin.place(x=200,y=250)
lb_month=Label(top,text="Month").place(x=100,y=300)
Var1=IntVar()
Var1.set(1)
spin=Spinbox(top,from_=1,to=12,width=10,textvariable=Var1)
spin.place(x=200,y=300)
lb_year=Label(top,text="Year").place(x=100,y=350)
Var2=IntVar()
Var2.set(2018)
spin=Spinbox(top,from_=2018,to=2020,width=10,textvariable=Var2)
spin.place(x=200,y=350)
button_1=Button(top,text='SUBMIT',fg='green',command=self.validd).place(x=150,y=400)
top.mainloop()
def validd(self):
fromm=self.b.get()
too=self.a.get()
if(fromm==''):
message=messagebox.showinfo("Warning...!!","PLEASE ENTER VALID ROUTE")
elif(too==''):
message=messagebox.showinfo("Warning...!!","PLEASE ENTER VALID ROUTE")
else:
self.fare()
def fare(self):
w=int(self.a.get())
q=int(self.b.get())
if w==q:
message=messagebox.showinfo("WARNING"," END AND START POINT OF TRIP ARE SAME ")
elif w>57 or w<1:
message=messagebox.showinfo("WARNING"," ENTER VALID BLOCK NUMBER FROM 1 TO 57 ")
elif q>57 or q<1:
message=messagebox.showinfo("WARNING"," ENTER VALID BLOCK NUMBER FROM 1 TO 57 ")
else:
if q>w:
self.c=(q-w)*2
else:
self.c=(w-q)*2
s="FARE IS :"+str(self.c)
message=messagebox.showinfo("THANK YOU",s)
def contact(self):
top=Tk()
top.title("Contact Us")
top.geometry("500x300+425+75")
Label(top,text="CONTACT US",font="Helvetica 12 bold",height="3",width="500",fg="white",bg="black").pack()
o=Label(top,text="KVM CAB DELHI").place(x=100,y=100)
o=Label(top,text="Plot No. 356 Near Miler ").place(x=100,y=120)
o=Label(top,text="Ganj, New Delhi. 148011").place(x=100,y=140)
o=Label(top,text="Email: KVMCAB@gmail.com").place(x=100,y=160)
o=Label(top,text="Mob. NO. 9875641235").place(x=100,y=180)
o=Label(top,text="Fax NO. 121454545").place(x=100,y=200)
top.mainloop()
def __init__(self):
root=Tk()
root.title("CAB MANAGEMENT SYSTEM")
root.geometry("500x400+425+125")
Label(root,text="WELCOME TO CAB BOOKING PORTAL",font="Helvetica 12 bold",height="3",width="500",fg="white",bg="black").pack()
Button(root,text="Login",bg="Yellow",width="15",height="3",command=self.login,relief=RAISED).place(x="210", y="100")
Button(root,text="New User",bg="Yellow",width="15",height="3",command=self.signup,relief=RAISED).place(x="210", y="160")
Button(root,text="Available Routes",bg="Yellow",width="15",height="3",command=self.set_trip,relief=RAISED).place(x="210", y="220")
Button(root,text="Contact Us",bg="Yellow",width="15",height="3",command=self.contact,relief=RAISED).place(x="210", y="280")
root.mainloop()
ob=login()
|
[
"noreply@github.com"
] |
noreply@github.com
|
92b5f668546f93d13b399359990056c7b27defd6
|
592ae7e2c02f61d4241afa7be091dcb0202ff652
|
/Practical2/SQLite Practise.py
|
47379bf706453b14e4aa345160b452b9b1416efc
|
[] |
no_license
|
JamesKeating/Internet_Of_Things
|
fc4a0398d20dbb5b6042e365dd0509f60c7f859a
|
e0e0c733d9f4100cb95214c60a7588ea4b95a6c9
|
refs/heads/master
| 2021-07-03T09:43:19.691083
| 2017-09-22T15:30:33
| 2017-09-22T15:30:33
| 104,490,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
import sqlite3
db = sqlite3.connect("chinook.db")
c = db.cursor()
c.execute('select * from customers')
row1 = c.fetchone()
print(row1)
print(row1[3])
db.close()
|
[
"james.keating@ucdconnect.ie"
] |
james.keating@ucdconnect.ie
|
d1af6798fa61b5eb64f308f4719273047da1e155
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractSleepysmutCom.py
|
315dbd399ffd084df7ad20e203281fee61738490
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 541
|
py
|
def extractSleepysmutCom(item):
'''
Parser for 'sleepysmut.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
5cae4351928b729521bafe551e04ae158fbbd2f3
|
d60acaac9e460c5693efe61449667b3c399c53c8
|
/diffeq/logisticbifurcation.py
|
392cc43dfa415350c9c23054e6d5784488977d9c
|
[] |
no_license
|
HussainAther/mathematics
|
53ea7fb2470c88d674faa924405786ba3b860705
|
6849cc891bbb9ac69cb20dfb13fe6bb5bd77d8c5
|
refs/heads/master
| 2021-07-22T00:07:53.940786
| 2020-05-07T03:11:17
| 2020-05-07T03:11:17
| 157,749,226
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,159
|
py
|
import matplotlib.pyplot as plt
import numpy as np
"""
Logistic map bifurcation
"""
def logistic(r, x):
"""
Logistic map function for nonlinear systems
"""
return r * x * (1 - x)
x = np.linspace(0, 1)
fig, ax = plt.subplots(1, 1)
ax.plot(x, logistic(2, x), "k")
def plotsystem(r, x0, n, ax=None):
"""
Plot the function and the y=x diagonal line.
"""
t = np.linspace(0, 1)
ax.plot(t, logistic(r, t), "k", lw=2)
ax.plot([0, 1], [0, 1], "k", lw=2)
# Recursively apply y=f(x) and plot two lines:
# (x, x) -> (x, y)
# (x, y) -> (y, y)
x = x0
for i in range(n):
y = logistic(r, x)
# Plot the two lines.
ax.plot([x, x], [x, y], "k", lw=1)
ax.plot([x, y], [y, y], "k", lw=1)
# Plot the positions with increasing
# opacity.
ax.plot([x], [y], "ok", ms=10,
alpha=(i + 1) / n)
x = y
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6),
sharey=True)
plotsystem(2.5, .1, 10, ax=ax1)
plotsystem(3.5, .1, 10, ax=ax2)
n = 10000
r = np.linspace(2.5, 4.0, n)
iterations = 1000
last = 100
x = 1e-5 * np.ones(n)
# lyapunov = np.zeros(n)
# fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 9),
# sharex=True)
for i in range(iterations):
x = logistic(r, x)
# We compute the partial sum of the
# Lyapunov exponent.
# lyapunov += np.log(abs(r - 2 * r * x))
# We display the bifurcation diagram.
if i >= (iterations - last):
ax1.plot(r, x, ",k", alpha=.25)
ax1.set_xlim(2.5, 4)
ax1.set_title("Bifurcation diagram")
# Display the Lyapunov exponent.
# Horizontal line.
# ax2.axhline(0, color="k", lw=.5, alpha=.5)
# Negative Lyapunov exponent.
# ax2.plot(r[lyapunov < 0],
# lyapunov[lyapunov < 0] / iterations,
# ".k", alpha=.5, ms=.5)
# Positive Lyapunov exponent.
# ax2.plot(r[lyapunov >= 0],
# lyapunov[lyapunov >= 0] / iterations,
# ".r", alpha=.5, ms=.5)
# ax2.set_xlim(2.5, 4)
# ax2.set_ylim(-2, 1)
# ax2.set_title("Lyapunov exponent")
# plt.tight_layout()
|
[
"shussainather@gmail.com"
] |
shussainather@gmail.com
|
0d633341fde2c9a14894b86f9176f60ad162fbec
|
33f8752eec25d8a621a0bf920455cc1b1e209a2c
|
/bvspca/core/migrations/0007_auto_20171128_1157.py
|
3ef8a24cd14686bf7a51fda80e7a399ccfe41b4e
|
[
"MIT"
] |
permissive
|
nfletton/bvspca
|
196d2a381a9e0cacf6ac4156ecee5c785e7d14a9
|
4f4bceb96ace010f1d4c273c44da4f0ab5ea7f6f
|
refs/heads/master
| 2023-07-06T12:03:15.037161
| 2023-06-29T16:59:16
| 2023-06-29T16:59:16
| 98,236,678
| 16
| 3
|
MIT
| 2023-03-21T12:55:02
| 2017-07-24T21:31:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,914
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-28 18:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('core', '0006_parentpage'),
]
operations = [
migrations.CreateModel(
name='AdoptionCentre',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('menu_title', models.CharField(blank=True, max_length=100, verbose_name='menu title')),
('picture_blocks', wagtail.core.fields.StreamField((('animals_link', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=50)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('page', wagtail.core.blocks.PageChooserBlock()))))),))),
],
options={
'verbose_name': 'Adoption Centre ',
},
bases=('wagtailcore.page', models.Model),
),
migrations.AlterField(
model_name='contentpage',
name='body',
field=wagtail.core.fields.StreamField((('heading_block', wagtail.core.blocks.StructBlock((('heading_text', wagtail.core.blocks.CharBlock(classname='title', required=True)), ('size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'Select a header size'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')], required=False))))), ('paragraph_block', wagtail.core.blocks.RichTextBlock(icon='fa-paragraph', label='Paragraph')), ('image_block', wagtail.core.blocks.StructBlock((('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('attribution', wagtail.core.blocks.CharBlock(required=False))))), ('block_quote', wagtail.core.blocks.StructBlock((('text', wagtail.core.blocks.TextBlock()), ('attribute_name', wagtail.core.blocks.CharBlock(blank=True, label='e.g. Mary Berry', required=False))))), ('embed_block', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-external-link-square', label='Embedded Media', template='core/blocks/embed_block.html')), ('table_block', wagtail.core.blocks.StructBlock((('table', wagtail.contrib.table_block.blocks.TableBlock()), ('caption', wagtail.core.blocks.CharBlock(required=False))))), ('raw_html', wagtail.core.blocks.StructBlock((('html', wagtail.core.blocks.RawHTMLBlock()),)))), blank=True, verbose_name='Page body'),
),
]
|
[
"nigel@thebluehut.com"
] |
nigel@thebluehut.com
|
78d13810a1e6bb93bb15484432df7fa156d11023
|
0b5eefd5ffe82115386967c7651f8b5597e8a290
|
/pytorch_fcn/tasks/grid_regression.py
|
14c8821ac2ba0cdced09787c55214a675854ddea
|
[] |
no_license
|
erictzeng/ssa-segmentation-release
|
18937313e734296d73f668c335b6fb7f0de8775a
|
273150518dae414e2444a37d350695c1d2f719d2
|
refs/heads/master
| 2020-07-25T15:16:29.157905
| 2019-09-29T02:32:40
| 2019-09-29T02:32:40
| 208,336,085
| 13
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,896
|
py
|
import logging
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from pytorch_fcn.data.util import Dispenser
from pytorch_fcn.data.util import JointDispenser
class GridRegression:
def __init__(self, net, source_dataset, target_dataset, source_val_dataset, target_val_dataset, *, batch_size, stride=256, name='gridregression'):
self.net = net
self.source_dataset = source_dataset
self.target_dataset = target_dataset
self.source_val_dataset = source_val_dataset
self.target_val_dataset = target_val_dataset
self.batch_size = batch_size
self.stride = stride
self.name = name
self.loss_fn = nn.MSELoss()
self.create_datasets()
self.create_head()
def create_datasets(self):
transform = transforms.Resize(1024)
crop_transform = self.net.transform
loaders = []
for dataset in [self.source_dataset, self.target_dataset]:
grid_dataset = GridRegressionWrapper(
dataset,
stride=self.stride,
transform=transform,
crop_transform=crop_transform
)
loader = DataLoader(
grid_dataset,
batch_size=self.batch_size // 2,
shuffle=True,
num_workers=4
)
loaders.append(loader)
val_loaders = []
for dataset in [self.source_val_dataset, self.target_val_dataset]:
grid_dataset = GridRegressionWrapper(
dataset,
stride=self.stride,
transform=transform,
crop_transform=crop_transform
)
loader = DataLoader(
grid_dataset,
batch_size=1,
shuffle=True,
num_workers=4
)
val_loaders.append(loader)
self.train_dispenser = JointDispenser(*loaders)
self.val_loaders = {
'source': val_loaders[0],
'target': val_loaders[1],
}
def create_head(self):
self.head = GridRegressionHead(self.net.out_dim)
self.net.attach_head(self.name, self.head)
def _predict_batch(self, im):
n, g, c, h, w = im.size()
im = im.view(n * g, c, h, w).cuda()
preds = self.net(im, task=self.name)
preds = preds.view(n * g, 2)
return preds
def step(self):
im, label = self.train_dispenser.next_batch()
label = label.view(-1, 2).cuda()
preds = self._predict_batch(im)
loss = self.loss_fn(preds, label)
return loss
def eval(self):
self.net.eval()
results = {}
for domain, loader in self.val_loaders.items():
correct = 0
total = 0
for im, label in loader:
with torch.no_grad():
label = label.view(-1, 2).cuda()
preds = self._predict_batch(im)
preds = preds.round()
correct += preds.eq(label).all(dim=1).sum().item()
total += label.size(0)
accuracy = correct / total
logging.info(f' {self.name}.{domain}: {accuracy}')
results[f'{self.name}.{domain}'] = accuracy
self.net.train()
return results
class GridRegressionHead(nn.Module):
def __init__(self, ft_dim):
super().__init__()
self.pool = nn.AdaptiveAvgPool2d((1, 1))
self.rot = nn.Conv2d(ft_dim, 2, 1)
def forward(self, x):
x = self.pool(x)
x = self.rot(x)
return x
class GridRegressionWrapper:
def __init__(self, dataset, stride=256, grid=(4, 2), transform=None, crop_transform=None):
self.dataset = dataset
self.stride = stride
self.grid = grid
self.transform = transform
self.crop_transform = crop_transform
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
im = self.dataset[index][0]
if self.transform is not None:
im = self.transform(im)
crops = []
targets = []
for x in range(self.grid[0]):
for y in range(self.grid[1]):
crop = self.crop(im, x, y)
if self.crop_transform is not None:
crop = self.crop_transform(crop)
crops.append(crop)
targets.append([float(x), float(y)])
im = torch.stack(crops, dim=0)
targets = torch.Tensor(targets)
return im, targets
def crop(self, im, x, y):
left = self.stride * x
right = self.stride * (x + 1)
up = self.stride * y
down = self.stride * (y + 1)
im = im.crop((left, up, right, down))
return im
|
[
"etzeng@eecs.berkeley.edu"
] |
etzeng@eecs.berkeley.edu
|
d972a62b9323c6f85d05028169be6448bfe93223
|
f9095daa91eff8d905fd07c19fab685efc8fbce0
|
/restro_chatbot/zomatopy.py
|
b3ce8a517c7c9d62ca84776a435173076f254a88
|
[] |
no_license
|
rajesh-1983/rasa-chatbot
|
9dc7caadc5530b149249a637be5c41225686f30e
|
c036dbbf2328bcd31e10717ec63b3cdbaca51dcd
|
refs/heads/main
| 2023-01-23T14:39:27.949677
| 2020-11-23T11:55:03
| 2020-11-23T11:55:03
| 314,994,844
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,994
|
py
|
import requests
import ast
base_url = "https://developers.zomato.com/api/v2.1/"
def initialize_app(config):
return Zomato(config)
class Zomato:
def __init__(self, config):
self.user_key = config["user_key"]
def get_categories(self):
"""
Takes no input.
Returns a dictionary of IDs and their respective category names.
"""
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "categories", headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
categories = {}
for category in a['categories']:
categories.update({category['categories']['id'] : category['categories']['name']})
return categories
def get_city_ID(self, city_name):
"""
Takes City Name as input.
Returns the ID for the city given as input.
"""
if city_name.isalpha() == False:
raise ValueError('InvalidCityName')
city_name = city_name.split(' ')
city_name = '%20'.join(city_name)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "cities?q=" + city_name, headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if len(a['location_suggestions']) == 0:
raise Exception('invalid_city_name')
elif 'name' in a['location_suggestions'][0]:
city_name = city_name.replace('%20', ' ')
if str(a['location_suggestions'][0]['name']).lower() == str(city_name).lower():
return a['location_suggestions'][0]['id']
else:
raise ValueError('InvalidCityId')
def get_city_name(self, city_ID):
"""
Takes City ID as input.
Returns the name of the city ID given as input.
"""
self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "cities?city_ids=" + str(city_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if a['location_suggestions'][0]['country_name'] == "":
raise ValueError('InvalidCityId')
else:
temp_city_ID = a['location_suggestions'][0]['id']
if temp_city_ID == str(city_ID):
return a['location_suggestions'][0]['name']
def get_collections(self, city_ID, limit=None):
"""
Takes City ID as input. limit parameter is optional.
Returns dictionary of Zomato restaurant collections in a city and their respective URLs.
"""
#self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
if limit == None:
r = (requests.get(base_url + "collections?city_id=" + str(city_ID), headers=headers).content).decode("utf-8")
else:
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
else:
r = (requests.get(base_url + "collections?city_id=" + str(city_ID) + "&count=" + str(limit), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
collections = {}
for collection in a['collections']:
collections.update({collection['collection']['title'] : collection['collection']['url']})
return collections
def get_cuisines(self, city_ID):
"""
Takes City ID as input.
Returns a sorted dictionary of all cuisine IDs and their respective cuisine names.
"""
self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "cuisines?city_id=" + str(city_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if len(a['cuisines']) == 0:
raise ValueError('InvalidCityId')
temp_cuisines = {}
cuisines = {}
for cuisine in a['cuisines']:
temp_cuisines.update({cuisine['cuisine']['cuisine_id'] : cuisine['cuisine']['cuisine_name']})
for cuisine in sorted(temp_cuisines):
cuisines.update({cuisine : temp_cuisines[cuisine]})
return cuisines
def get_establishment_types(self, city_ID):
"""
Takes City ID as input.
Returns a sorted dictionary of all establishment type IDs and their respective establishment type names.
"""
self.is_valid_city_id(city_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "establishments?city_id=" + str(city_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
temp_establishment_types = {}
establishment_types = {}
if 'establishments' in a:
for establishment_type in a['establishments']:
temp_establishment_types.update({establishment_type['establishment']['id'] : establishment_type['establishment']['name']})
for establishment_type in sorted(temp_establishment_types):
establishment_types.update({establishment_type : temp_establishment_types[establishment_type]})
return establishment_types
else:
raise ValueError('InvalidCityId')
def get_nearby_restaurants(self, latitude, longitude):
"""
Takes the latitude and longitude as inputs.
Returns a dictionary of Restaurant IDs and their corresponding Zomato URLs.
"""
try:
float(latitude)
float(longitude)
except ValueError:
raise ValueError('InvalidLatitudeOrLongitude')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "geocode?lat=" + str(latitude) + "&lon=" + str(longitude), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
nearby_restaurants = {}
for nearby_restaurant in a['nearby_restaurants']:
nearby_restaurants.update({nearby_restaurant['restaurant']['id'] : nearby_restaurant['restaurant']['url']})
return nearby_restaurants
def get_restaurant(self, restaurant_ID):
"""
Takes Restaurant ID as input.
Returns a dictionary of restaurant details.
"""
self.is_valid_restaurant_id(restaurant_ID)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "restaurant?res_id=" + str(restaurant_ID), headers=headers).content).decode("utf-8")
a = ast.literal_eval(r)
if 'code' in a:
if a['code'] == 404:
raise('InvalidRestaurantId')
restaurant_details = {}
restaurant_details.update({"name" : a['name']})
restaurant_details.update({"url" : a['url']})
restaurant_details.update({"location" : a['location']['address']})
restaurant_details.update({"city" : a['location']['city']})
restaurant_details.update({"city_ID" : a['location']['city_id']})
restaurant_details.update({"user_rating" : a['user_rating']['aggregate_rating']})
restaurant_details = DotDict(restaurant_details)
return restaurant_details
def restaurant_search(self, query="", latitude="", longitude="", cuisines="", limit=5):
"""
Takes either query, latitude and longitude or cuisine as input.
Returns a list of Restaurant IDs.
"""
cuisines = "%2C".join(cuisines.split(","))
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "search?q=" + str(query) + "&count=" + str(limit) + "&lat=" + str(latitude) + "&lon=" + str(longitude) + "&cuisines=" + str(cuisines + "&sort=rating" + "&order=desc"), headers=headers).content).decode("utf-8")
return r#a = ast.literal_eval(r)
def get_location(self, query="", limit=5):
"""
Takes either query, latitude and longitude or cuisine as input.
Returns a list of Restaurant IDs.
"""
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "locations?query=" + str(query) + "&count=" + str(limit), headers=headers).content).decode("utf-8")
return r
def restaurant_search_by_keyword(self, query="", cuisines="", limit=5):
"""
Takes either query, latitude and longitude or cuisine as input.
Returns a list of Restaurant IDs.
"""
cuisines = "%2C".join(cuisines.split(","))
if str(limit).isalpha() == True:
raise ValueError('LimitNotInteger')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "search?q=" + str(query) + "&count=" + str(limit) + "&cuisines=" + str(cuisines), headers=headers).content).decode("utf-8")
return r
def is_valid_restaurant_id(self, restaurant_ID):
"""
Checks if the Restaurant ID is valid or invalid.
If invalid, throws a InvalidRestaurantId Exception.
"""
restaurant_ID = str(restaurant_ID)
if restaurant_ID.isnumeric() == False:
raise ValueError('InvalidRestaurantId')
def is_valid_city_id(self, city_ID):
"""
Checks if the City ID is valid or invalid.
If invalid, throws a InvalidCityId Exception.
"""
city_ID = str(city_ID)
if city_ID.isnumeric() == False:
return True# raise ValueError('InvalidCityId')
def is_key_invalid(self, a):
"""
Checks if the API key provided is valid or invalid.
If invalid, throws a InvalidKey Exception.
"""
if 'code' in a:
if a['code'] == 403:
raise ValueError('InvalidKey')
def is_rate_exceeded(self, a):
"""
Checks if the request limit for the API key is exceeded or not.
If exceeded, throws a ApiLimitExceeded Exception.
"""
if 'code' in a:
if a['code'] == 440:
raise Exception('ApiLimitExceeded')
class DotDict(dict):
"""
Dot notation access to dictionary attributes
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
|
[
"samal.rajesh@gmail.com"
] |
samal.rajesh@gmail.com
|
f5912e2123f410a7612cf682035db02dc8bc18e9
|
e530adc12d601a0c29fe103cc93b2a63b1675f1c
|
/cours_2/devoir2-1.py
|
0392bad3aff4efba5370ebdde9821584ea91ea7c
|
[] |
no_license
|
floflogreco/Python
|
b7ee3502dc0653c650371a9dd3a4f7edd1334365
|
3180777cecc651d5266443faea06b195d3f69b0f
|
refs/heads/master
| 2021-01-10T11:41:14.036704
| 2016-02-28T22:21:10
| 2016-02-28T22:21:10
| 51,670,505
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
def conversion(grade):
try:
grade = float(grade)
if grade >= 18 and grade <= 20:
print "A"
elif grade >= 16 and grade < 18:
print "B"
elif grade >= 14 and grade < 16:
print "C"
elif grade >= 12 and grade < 14:
print "D"
elif grade >= 0 and grade < 12:
print "F"
else:
print "Note doit etre entre 0 et 20"
except:
print "Il faut une valeur num\xc3\xa9rique."
conversion(raw_input("Encoder la cote sur 20 : "))
|
[
"flo@MacBook-Pro-de-Florian.local"
] |
flo@MacBook-Pro-de-Florian.local
|
b0391f312af0eaea6305f39574b9a3f17f511b59
|
b1e7481f8b5bf40c2547c95b1863e25b11b8ef78
|
/Kai/crab/NANOv7_NoveCampaign/2018/crab_cfg_2018_ElMu_A.py
|
6670c230a950ef43e35c8eec67ec84cd44951904
|
[
"Apache-2.0"
] |
permissive
|
NJManganelli/FourTopNAOD
|
3df39fd62c0546cdbb1886b23e35ebdc1d3598ad
|
c86181ae02b1933be59d563c94e76d39b83e0c52
|
refs/heads/master
| 2022-12-22T22:33:58.697162
| 2022-12-17T01:19:36
| 2022-12-17T01:19:36
| 143,607,743
| 1
| 1
|
Apache-2.0
| 2022-06-04T23:11:42
| 2018-08-05T11:40:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2018_ElMu_A'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'crab_PSet_2018_ElMu_A.py'
config.JobType.maxMemoryMB = 3000
config.JobType.maxJobRuntimeMin = 1315
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2018_ElMu_A.sh'
config.JobType.inputFiles = ['crab_script_2018_ElMu_A.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = [] #['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/MuonEG/Run2018A-02Apr2020-v1/NANOAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
# config.Data.totalUnits = $TOTAL_UNITS
# config.Data.userInputFiles = []
# config.Data.outLFNDirBase = '/store/user/{user}/NoveCampaign'.format(user=getUsernameFromCRIC())
config.Data.outLFNDirBase = '/store/group/fourtop/NoveCampaign'
config.Data.publication = True
config.Data.outputDatasetTag = 'NoveCampaign'
config.section_("Site")
config.Site.storageSite = 'T2_BE_IIHE'
|
[
"nicholas.james.manganelli@cern.ch"
] |
nicholas.james.manganelli@cern.ch
|
7e37cadb16e33d7025914cf45a90e8dc8e0cc047
|
4672ac3fd27c770273c6ba7327734f977dbc6fbd
|
/MessageApp/MessageApp/wsgi.py
|
b34ff558477211529bf17f286dff9910d8ae158f
|
[] |
no_license
|
SiddharthaMishra/ITWS2proj
|
eaee65cc5e7ce0f010991fcd2c805dcbf64089d5
|
8f68e2437bcee3e0ee41986827b5ffd33a00e471
|
refs/heads/master
| 2020-03-14T12:53:20.204248
| 2018-05-15T15:38:28
| 2018-05-15T15:38:28
| 131,621,202
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for MessageApp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MessageApp.settings")
application = get_wsgi_application()
|
[
"sidm1999@gmail.com"
] |
sidm1999@gmail.com
|
799ed2778e3347fdfe1c20170364b25de640d56d
|
3d23630b055939c51ba7d117f74415326abfd483
|
/tcdev/service/_user.py
|
22d56fbcabd4e1d231789d02d0e72cdf17188769
|
[] |
no_license
|
cswxin/tcdev
|
fcaf305941f7a57ef232cc620c8d14e03584158a
|
299b66b4b224da105404e2de57435134e305b67b
|
refs/heads/master
| 2021-01-20T14:39:20.333742
| 2017-05-12T09:55:08
| 2017-05-12T09:55:08
| 90,641,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,672
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
from django.contrib.auth.models import User, Group
import logging
logger = logging.getLogger('logger')
#===============================================================================
# Model的直接操作方法,get_or_create、get、filter 等。
#===============================================================================
def create_user_by_params(**kwargs):
'''
@param kwargs: key=value 的键值对
@return: 元组(obj,boolean)
@note: 获取或创建 对象
'''
return User.objects.create(**kwargs)
def get_or_create_user_by_params(**kwargs):
'''
@param kwargs: key=value 的键值对
@return: 元组(obj,boolean)
@note: 获取或创建 对象
'''
return User.objects.get_or_create(**kwargs)
def get_user_by_params(**kwargs):
'''
@param kwargs: key=value 的键值对
@return: obj或None
@note: 获取 对象
'''
try:
return User.objects.get(**kwargs)
except User.DoesNotExist:
logger.error(u"Account对象不存在(%s)" % kwargs)
except User.MultipleObjectsReturned:
logger.error(u"Account对象存在多条记录(%s)" % kwargs)
return None
def get_users_by_params(**kwargs):
'''
@param kwargs: key=value 的键值对
@return: [obj,]
@note: 获取 对象列表
'''
return User.objects.filter(**kwargs)
#===============================================================================
# Model的直接操作方法,get_or_create、get、filter 等。
#===============================================================================
def create_group_by_params(**kwargs):
'''
@param kwargs: key=value 的键值对
@return: 元组(obj,boolean)
@note: 获取或创建 对象
'''
return Group.objects.create(**kwargs)
def get_or_create_group_by_params(**kwargs):
'''
@param kwargs: key=value 的键值对
@return: 元组(obj,boolean)
@note: 获取或创建 对象
'''
return Group.objects.get_or_create(**kwargs)
def get_group_by_params(**kwargs):
'''
@param kwargs: key=value 的键值对
@return: obj或None
@note: 获取 对象
'''
try:
return Group.objects.get(**kwargs)
except Group.DoesNotExist:
logger.error(u"Group对象不存在(%s)" % kwargs)
except Group.MultipleObjectsReturned:
logger.error(u"Group对象存在多条记录(%s)" % kwargs)
return None
def get_groups_by_params(**kwargs):
'''
@param kwargs: key=value 的键值对
@return: [obj,]
@note: 获取 对象列表
'''
return Group.objects.filter(**kwargs)
|
[
"OceAn@.(none)"
] |
OceAn@.(none)
|
ed2528da38b20df9cee6a54a28e6c2d709807177
|
6d38f69c99bd335754a035543fecc0258c3eb95c
|
/customSelectors/testSelector1.py
|
5fc4b93096912b9c483b697c7643372407afc397
|
[] |
no_license
|
gitter-badger/Command-Python-Code-2017
|
3be553cf2c98ee0e1dc7cbb1fddbabd315b87987
|
607ac03ca70f706d1714716058c2b5ed70402b81
|
refs/heads/master
| 2021-05-16T08:08:22.183149
| 2017-09-15T01:57:09
| 2017-09-15T01:57:09
| 104,015,605
| 0
| 0
| null | 2017-09-19T02:26:26
| 2017-09-19T02:26:26
| null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
from wpilib.command.command import Command
from wpilib.driverstation import DriverStation
from selector import Selector
class TestSelector(Command):
def __init__(self):
super().__init__()
self.testSelector = Selector([("row1", "op1", "op2", "op3")], (0,))
DriverStation.getInstance()
def initialize(self):
pass
def execute(self):
self.testSelector.selectorLogic()
""" def isFinished(self):
return not DriverStation.isDisabled() """
|
[
"amboyscout@aol.com"
] |
amboyscout@aol.com
|
35e747980377ad316e7fce095b226c1973eb94fb
|
03b7ff0f5f50b628c508be45a2e0d58b3f3a1325
|
/application/search/forms.py
|
4ef0f55e64dc4383f0d9c626b14edde31311edfa
|
[] |
no_license
|
matiastamsi/KalastajanKaveri
|
3600a57af9113ffb584a914e44c351416122b9e3
|
ad704d562e5b283f2c77808b2ce2f34120d30164
|
refs/heads/master
| 2022-09-13T14:27:14.178145
| 2022-09-02T03:44:08
| 2022-09-02T03:44:08
| 246,577,322
| 0
| 0
| null | 2022-09-02T03:44:09
| 2020-03-11T13:23:19
|
HTML
|
UTF-8
|
Python
| false
| false
| 238
|
py
|
from flask_wtf import FlaskForm
from wtforms import SelectField, BooleanField
class SearchForm(FlaskForm):
species = SelectField()
spot = SelectField()
orderBySize = BooleanField()
class Meta:
csrf = False
|
[
"matias.tamsi@gmail.com"
] |
matias.tamsi@gmail.com
|
3d1e485d4c0f308de487855b25b5b7b056570db7
|
fb803366dd0d26275359a00d68b6a36dc538d4ec
|
/venv/bin/easy_install
|
d704af6760b4ac52ba67a0a3feb58ef8eb9e1616
|
[] |
no_license
|
mlastovski/supremeaiarena
|
e0db3bd541be8abb66cff122c2709d8d6f264fb9
|
c2264babe3daefb78bad0ed26014c81589d4a01e
|
refs/heads/master
| 2021-04-23T14:35:34.984263
| 2020-05-11T10:48:04
| 2020-05-11T10:48:04
| 249,933,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
#!/Users/marklastovski/PycharmProjects/supremeaiarena_backend/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"mark.lastovskyy@gmail.com"
] |
mark.lastovskyy@gmail.com
|
|
204b10c2e43f97a77e33f63c1875e7d84a5e018c
|
6d470c2eaf67091fa47033f10da5344b8586bacc
|
/doc/examples/scrollbar.py
|
c1f4592ffba1d754af3be86260667b42a7487370
|
[
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
djkool/OcempGUI3
|
5828dd5e8df2141256a23388a3993fb8c3c99731
|
43a68033cb0dbad10654231299cb762cd18b7c25
|
refs/heads/master
| 2023-05-13T11:57:42.415200
| 2021-06-08T07:18:36
| 2021-06-08T07:18:36
| 374,358,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
# ScrollBar examples.
from ocempgui.widgets import *
from ocempgui.widgets.Constants import *
def create_scrollbar_view ():
table = Table (2, 2)
table.spacing = 5
table.set_row_align (0, ALIGN_TOP)
table.set_row_align (1, ALIGN_TOP)
# Simple ScrollBars.
frame = HFrame (Label ("Simple ScrollBars"))
frame.spacing = 5
hscroll = HScrollBar (100, 400)
vscroll = VScrollBar (100, 400)
frame.children = hscroll, vscroll
table.add_child (0, 0, frame)
# Insensitive ScrollBars.
frame = HFrame (Label ("Insensitive ScrollBars"))
frame.spacing = 5
hscroll = HScrollBar (100, 400)
hscroll.sensitive = False
vscroll = VScrollBar (100, 400)
vscroll.sensitive = False
frame.children = hscroll, vscroll
table.add_child (0, 1, frame)
# ScrollBars with a null range.
frame = HFrame (Label ("ScrollBars without a range"))
frame.spacing = 5
hscroll = HScrollBar (100, 100)
vscroll = VScrollBar (100, 100)
frame.children = hscroll, vscroll
table.add_child (1, 0, frame)
# Small ScrollBars.
frame = HFrame (Label ("Tiny ScrollBars"))
frame.spacing = 5
hscroll = HScrollBar (10, 100)
vscroll = VScrollBar (10, 100)
frame.children = hscroll, vscroll
table.add_child (1, 1, frame)
return table
if __name__ == "__main__":
# Initialize the drawing window.
re = Renderer ()
re.create_screen (320, 280)
re.title = "ScrollBar examples"
re.color = (234, 228, 223)
re.add_widget (create_scrollbar_view ())
# Start the main rendering loop.
re.start ()
|
[
"andrewpeterson86@gmail.com"
] |
andrewpeterson86@gmail.com
|
3fff1e8913ecade61c264485b8d0d2ab2e8f1eef
|
ff768174490619c119d166273365dcc480e7201c
|
/tuiuiu/tuiuiuimages/migrations/0008_image_created_at_index.py
|
dbcc329edff45d13de512a5f638dee64e8a53c0d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] |
permissive
|
caputomarcos/tuiuiu.io
|
15ea9323be09b69efb6b88490c2bb558ffb4cc55
|
d8fb57cf95487e7fe1454b2130ef18acc916da46
|
refs/heads/master
| 2022-03-02T12:56:43.889894
| 2017-09-23T22:53:51
| 2017-09-23T22:53:51
| 102,543,365
| 3
| 1
|
NOASSERTION
| 2022-02-02T10:46:32
| 2017-09-06T00:30:06
|
Python
|
UTF-8
|
Python
| false
| false
| 457
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tuiuiuimages', '0007_image_file_size'),
]
operations = [
migrations.AlterField(
model_name='image',
name='created_at',
field=models.DateTimeField(db_index=True, verbose_name='Created at', auto_now_add=True),
),
]
|
[
"caputo.marcos@gmail.com"
] |
caputo.marcos@gmail.com
|
23ead0a556ade621a30eb8d777ca691cda284a13
|
f9c4548007d75d55b7bdf0e2ed3098a11e4083e1
|
/preprocessing_package.py
|
36548e87a4421f8e98e4dec015dbff517b8fc15f
|
[] |
no_license
|
knut-henning/MovementClassification_Thesis
|
bbfa460e451a558a96432106d1f7e80326bcce6e
|
7d0eb14d218f8e1ca07bef24c0ebfda950a9871c
|
refs/heads/main
| 2023-06-11T19:56:19.228175
| 2021-06-29T19:25:53
| 2021-06-29T19:25:53
| 356,034,467
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,395
|
py
|
# -*- coding: utf-8 -*-
""" Preprocessing package
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import timedelta
plt.style.use('seaborn')
__author__ = 'Knut-Henning Kofoed'
__email__ = 'knut-henning@hotmail.com'
def import_activity(data_path):
"""
Imports activity data from given path, adds 'Kalv' column with boolean expressions, corrects time and sets
datatype.
Parameters
----------
data_path : str
String with path and name of csv file to import.
Returns
-------
pd.DataFrame
Dataframe object of csv file.
"""
# Import csv
activity_data = pd.read_csv(data_path, header=0, delimiter=';', dtype=str)
# Finds instances in "Type dyr" that contains "kalv", sets column value to True
activity_data['Kalv'] = activity_data['Type dyr'].map(lambda x: 'kalv' in x)
# Removes ' from the datetime string (occurs in Nofence provided activity data)
activity_data['Tid'] = activity_data['Tid'].str.rstrip("'")
# Convert 'Tid' from 'str' to 'datetime'
activity_data['Tid'] = pd.to_datetime(activity_data['Tid'])
# Convert "Nofence ID" type from "str" to "int64"
activity_data['Nofence ID'] = activity_data['Nofence ID'].astype('int64')
return activity_data
def correction_activity(activity_data):
"""
Used to correct the activity classified data provided by Nofence, contains some typos and
unwanted spaces that needs correction before usage.
Parameters
----------
activity_data : pd.DataFrame
Activity DataFrame to do correction on.
Returns
-------
activity_data : pd.DataFrame
Corrected DataFrame.
"""
# Correct column names
columns = ['Nofence ID', 'ID', 'Type dyr', 'Tid', 'Aktivitet', 'Dier kalv', 'Kommentar', 'Kalv']
# Sets the correct column names
activity_data.columns = columns
# Removes unwanted spaces to the right of the words.
activity_data['Aktivitet'] = activity_data['Aktivitet'].str.rstrip()
activity_data['Type dyr'] = activity_data['Type dyr'].str.rstrip()
# Correct typos in column "Aktivitet"
activity_data['Aktivitet'] = activity_data['Aktivitet'].replace({'Beter slutt': 'Beiter slutt'})
# Removes rows that contain the word "Aktivitet" in the column "Aktivitet"
activity_data = activity_data[~activity_data['Aktivitet'].str.contains('Aktivitet')]
activity_data = activity_data.reset_index(drop=True)
return activity_data
def offset_time(activity_data, column='Tid', hour=-2, finetune=False, second=0):
"""
Offset time of datetime column. (mainly used to convert datetime from CEST to UTC)
Parameters
----------
activity_data : pd.DataFrame
Dataframe to offset datetime on
column : str, optional
Name of column to do the offset on. The default is 'Tid'.
hour : int, optional
Number of hours to offset. The default is -2.
finetune : boolean, optional
Specify if finetuning of seconds is wanted
second : int, optional
Number of seconds to finetune
Returns
-------
activity_data : pd.DataFrame
DataFrame with offset datetime values.
"""
if finetune:
activity_data[column] = activity_data[column] + pd.DateOffset(hours=hour, seconds=second)
else:
activity_data[column] = activity_data[column] + pd.DateOffset(hours=hour)
return activity_data
def start_stop_corr(activity_data):
"""
Mainly used for activity classification data provided by Nofence. For the later functions to work activity
registration has to contain blocks with "VIDEO START" and "VIDEO SLUTT" to work. This function also prints how
many rows has missing blocks with these strings.
Parameters
----------
activity_data : pd.DataFrame
Activity registration data.
Returns
-------
activity_data : pd.DataFrame
Corrected for "VIDEO START" and "VIDEO SLUTT".
"""
missing = 0 # Variable that stores number of rows that do not contain START/SLUTT in "Aktivitet" column
row_iterator = activity_data.iterrows()
_, last = next(row_iterator) # First value of row_iterator
for i, row in row_iterator:
# Saves the index where "VIDEO START" AND "VIDEO SLUTT" is expected in the "Aktivitet" column
if (row['Nofence ID'] != last['Nofence ID']) & \
(row['Aktivitet'] != 'VIDEO START') & \
(last['Aktivitet'] != 'VIDEO SLUTT'):
df = pd.concat([pd.DataFrame({"Nofence ID": last['Nofence ID'],
"ID": last['ID'],
"Type dyr": last['Type dyr'],
"Tid": last['Tid'],
"Aktivitet": 'VIDEO SLUTT',
"Kommentar": '',
"Kalv": last['Kalv']},
index = [i + missing]
),
pd.DataFrame({"Nofence ID": row['Nofence ID'],
"ID": row['ID'],
"Type dyr": row['Type dyr'],
"Tid": row['Tid'],
"Aktivitet": 'VIDEO START',
"Kommentar": '',
"Kalv": row['Kalv']},
index = [i + missing + 1]
)
])
activity_data = pd.concat([activity_data.iloc[:df.index[0]],
df,
activity_data.iloc[df.index[0]:]
]).reset_index(drop=True)
missing += 2
# Saves the index where "VIDEO START" is expected in the "Aktivitet" column
elif (row['Nofence ID'] != last['Nofence ID']) & \
(row['Aktivitet'] != 'VIDEO START') & \
(last['Aktivitet'] == 'VIDEO SLUTT'):
df = pd.DataFrame({"Nofence ID": row['Nofence ID'],
"ID": row['ID'],
"Type dyr": row['Type dyr'],
"Tid": row['Tid'],
"Aktivitet": 'VIDEO START',
"Kommentar": '',
"Kalv": row['Kalv']},
index = [i + missing]
)
activity_data = pd.concat([activity_data.iloc[:df.index[0]],
df,
activity_data.iloc[df.index[0]:]
]).reset_index(drop=True)
missing += 1
# Saves the index where "VIDEO SLUTT" is expected in the "Aktivitet" column
elif (row['Nofence ID'] != last['Nofence ID']) & \
(last['Aktivitet'] != 'VIDEO SLUTT') & \
(row['Aktivitet'] == 'VIDEO START'):
df = pd.DataFrame({"Nofence ID": last['Nofence ID'],
"ID": last['ID'],
"Type dyr": last['Type dyr'],
"Tid": last['Tid'],
"Aktivitet": 'VIDEO SLUTT',
"Kommentar": '',
"Kalv": last['Kalv']},
index = [i + missing]
)
activity_data = pd.concat([activity_data.iloc[:df.index[0]],
df,
activity_data.iloc[df.index[0]:]
]).reset_index(drop=True)
missing += 1
last = row
# Checks if the last row contains "VIDEO SLUTT" in the column "Aktivitet"
if row['Aktivitet'] != 'VIDEO SLUTT':
df = pd.DataFrame({"Nofence ID": row['Nofence ID'],
"ID": row['ID'],
"Type dyr": row['Type dyr'],
"Tid": row['Tid'],
"Aktivitet": 'VIDEO SLUTT',
"Kommentar": '',
"Kalv": row['Kalv']},
index = [i + missing + 1]
)
activity_data = pd.concat([activity_data.iloc[:df.index[0]],
df,
activity_data.iloc[df.index[0]:]
]).reset_index(drop=True)
missing += 1
print('Activity dataframe have {} missing rows with "VIDEO START/SLUTT"'.format(missing))
return activity_data
def unique_serials(activity_data):
"""
Creates a list of unique serials that the "Nofence ID" column in dataframe contains.
Parameters
----------
activity_data : pd.DataFrame
Activity registration data.
Returns
-------
serials : list
List of serials.
"""
serials = list(activity_data['Nofence ID'].unique())
print('Serials from dataframe: {}'.format(serials))
return serials
def activity_time_interval(activity_data):
"""
Makes a dataframe with all "VIDEO START" "VIDEO SLUTT" intervals
Parameters
----------
activity_data : pd.DataFrame
Activity registration data.
Returns
-------
start_stop : pd.DataFrame
Rows containing "VIDEO START/SLUTT"
"""
start_stop = activity_data[(activity_data['Aktivitet'] == 'VIDEO START') | \
(activity_data['Aktivitet'] == 'VIDEO SLUTT')]
return start_stop
def acc_time_corr(acc_data):
"""
Gives the data better time resolution since it originally only updated every 32 observation.
Parameters
----------
acc_data : pd.DataFrane
Accelerometer data.
Returns
-------
acc_data : pd.DataFrame
Accelerometer data with better time resolution.
"""
times = acc_data[['date', 'header_date']]
unique_time = times.drop_duplicates(subset=['header_date'])
unique_time.loc[:,'time_delta'] = unique_time.loc[:,'header_date'] - unique_time.loc[:,'header_date'].shift()
unique_time = unique_time.append({'time_delta': timedelta(seconds = 3)}, ignore_index=True)
time_iterator = unique_time.iterrows()
_, last = next(time_iterator) # First value of time_iterator
for i, time in time_iterator:
dt = time['time_delta'].total_seconds()
dt = dt / 32
df_dt = pd.to_timedelta(acc_data['index'].iloc[(i-1)*32:32+((i-1)*32)] * dt, unit='s')
acc_data['header_date'].iloc[(i-1)*32:32+((i-1)*32)] = acc_data['header_date'].iloc[(i-1)*32:32+((i-1)*32)] \
+ df_dt
acc_data.loc[:,'header_date'] = acc_data.header_date.dt.ceil(freq='s')
return acc_data
def import_aks(serials, start_stop, acc_names='kalvelykke'):
"""
Sort relevant accelerometerdata based on activity registration data.
Parameters
----------
serials : list
List of serial numbers you want accelerometerdata from.
start_stop : pd.DataFrame
Dataframe containing activity registration intervals.
acc_names : str, optional
String of letters before -ID. The default is "kalvelykke".
Returns
-------
start_slutt_acc : pd.DataFrame
Accelerometer data from the timeintervals and serials expressed in serials and start_stop input.
"""
# Define column names
start_slutt_acc = pd.DataFrame(columns=['serial', 'date', 'header_date', 'index',
'x', 'y', 'z',
'xcal','ycal', 'zcal',
'norm'])
for serial in serials:
# Import files
df_acc = pd.read_csv('accelerometer\\{0}-{1}.csv'.format(acc_names, serial), header=1)
# Convert 'date' from str to datetime
df_acc['header_date'] = pd.to_datetime(df_acc['header_date'], format='%Y-%m-%dT%H:%M:%S')
# Makes a simple dataframe for all "VIDEO START/SLUTT" rows with selected serial
start_stop_ID = start_stop[(start_stop["Nofence ID"] == serial)]
# Makes simple dataframe for start and stop datetimes and combines to own interval dataframe
start_ID = start_stop_ID[(start_stop_ID["Aktivitet"] == 'VIDEO START')]
start_ID = start_ID['Tid'].reset_index(drop=True)
stop_ID = start_stop_ID[(start_stop_ID["Aktivitet"] == 'VIDEO SLUTT')]
stop_ID = stop_ID['Tid'].reset_index(drop=True)
intervals = pd.concat([start_ID, stop_ID], axis=1)
intervals.columns = ['start', 'stop']
# Combines all intervals to one dataframe with relevant data
for i in intervals.index:
df_interval = df_acc[(df_acc['header_date'] > intervals['start'][i]) & \
(df_acc['header_date'] <= intervals['stop'][i])]
df_interval = acc_time_corr(df_interval)
start_slutt_acc = start_slutt_acc.append(df_interval,
ignore_index=True)
return start_slutt_acc
def remove_dier(activity_data):
"""
Removes the rows i activity registrations that contain "Dier start" or "Dier slutt and that has "Kalv" == False.
Parameters
----------
activity_data : pd.DataFrame
Activity registration data.
Returns
-------
activity_data : pd.DataFrame
Activity registration data without "Dier start" and "Dier slutt" where "Kalv" == False.
"""
activity_data = activity_data[(~activity_data['Aktivitet'].str.contains('Dier start')) | \
(~activity_data['Aktivitet'].str.contains('Dier slutt')) & \
(activity_data['Kalv'] == True)
]
activity_data = activity_data.reset_index(drop=True)
return activity_data
def connect_data(activity_data, start_slutt_acc):
"""
Connects activity registrations and accelerometer data so that the accelerometer observations has lables.
Parameters
----------
activity_data : pd.DataFrame
Activity registration data.
start_slutt_acc : pd.DataFrame
Accelerometer data
Returns
-------
acc_act : pd.DataFrame
Accelerometer data with lables.
"""
# Activities: Resting = 0, Movement = 1, Grazing = 2, Suckle = 3, Ruminate = 4
start_slutt_acc['kalv'] = np.nan
start_slutt_acc['aktivitet'] = np.nan
# Iterates through list of activity registrations
acc = 0 # Start activity
row_iterator = activity_data.iterrows()
_, last = next(row_iterator) # First Value of row_iterator
for i, row in row_iterator:
# Makes a mask for relevant timeinterval from accelerometer data that is to be labeled
mask = (start_slutt_acc['serial'] == last['Nofence ID']) & \
(start_slutt_acc['header_date'] > last['Tid']) & \
(start_slutt_acc['header_date'] <= row['Tid'])
if last['Aktivitet'] == 'VIDEO START':
acc = 0 # All cases where the activity registration start the cow/calf is resting
start_slutt_acc.loc[mask, 'aktivitet'] = acc
elif last['Aktivitet'] == 'VIDEO SLUTT':
pass
elif last['Aktivitet'] == 'Legger seg':
acc = 0
start_slutt_acc.loc[mask, 'aktivitet'] = acc
elif last['Aktivitet'] == 'Reiser seg':
acc = 0
start_slutt_acc.loc[mask, 'aktivitet'] = acc
elif last['Aktivitet'] == 'Dier start':
start_slutt_acc.loc[mask, 'aktivitet'] = 3
elif last['Aktivitet'] == 'Dier slutt':
start_slutt_acc.loc[mask, 'aktivitet'] = acc
elif last['Aktivitet'] == 'Beiter start':
start_slutt_acc.loc[mask, 'aktivitet'] = 2
elif last['Aktivitet'] == 'Beiter slutt':
start_slutt_acc.loc[mask, 'aktivitet'] = acc
elif last['Aktivitet'] == 'Bevegelse start':
start_slutt_acc.loc[mask, 'aktivitet'] = 1
elif last['Aktivitet'] == 'Bevegelse slutt':
start_slutt_acc.loc[mask, 'aktivitet'] = acc
elif last['Aktivitet'] == 'Tygge start':
start_slutt_acc.loc[mask, 'aktivitet'] = 4
elif last['Aktivitet'] == 'Tygge slutt':
start_slutt_acc.loc[mask, 'aktivitet'] = acc
start_slutt_acc.loc[mask, 'kalv'] = last['Kalv'] # Makes a column that informs if data is calf or not
last = row
# Data has floating point precision errors that need correcting
acc_act = start_slutt_acc.round({'xcal': 3, 'ycal': 3, 'zcal': 3})
# Removes rows containing nan and converts the column "aktivitet" from float to int
acc_act = acc_act.dropna()
acc_act['aktivitet'] = acc_act['aktivitet'].astype('int64')
return acc_act
def select_serial(df_input, serial):
"""
Selects data based on serial
Parameters
----------
df_input : pd.DataFrame
DataFrame to do selection on.
serial : TYPE
Serial to select.
Returns
-------
df_output : TYPE
Selected data based on serial.
"""
df_output = df_input[df_input['serial'] == serial]
return df_output
def save_dataframe(data, path, index=False):
"""
Saves data to given path as csv.
Parameters
----------
data : pd.DataFrame
Data to be saved.
path : str
Location and file name of data to be saved.
index : boolean, optional
Specifies if index is wanted or not. The default is False.
Returns
-------
None.
"""
data.to_csv(path, index=index)
if __name__ == '__main__':
pass
|
[
"knut-henning@hotmail.com"
] |
knut-henning@hotmail.com
|
e8b6d9e6ee98021eea15cbb842f0600d7552fee4
|
b7508f8ecda22db0acc169ea7299b0a0d614405e
|
/Huffman.py
|
2d1826bf5cbb53d41ded06d6827a99c08526a569
|
[
"Unlicense"
] |
permissive
|
SundarGopal/HuffmanEncoding
|
2ad3576df72cf577997c641a92d688fae1171d77
|
c2a4accf2675e0e44bdc607e3ef45d4ae56c776e
|
refs/heads/main
| 2023-01-07T23:19:57.098096
| 2020-11-07T18:38:27
| 2020-11-07T18:38:27
| 310,909,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,801
|
py
|
'''
@author [Sundar G]
@email [sundargopal17@gmail.com]
@desc [Huffman Encoding Scheme]
'''
#file = open('doc_aede9a09-f36f-421c-ac4f-1c76376016da.docx','r')
import docx as d
import heapq
import sys
file = d.Document('HuffmanAssignment.docx')
p = file.paragraphs
dictionary ={}
for i in p:
for j in i.text:
num = dictionary.keys()
k = j.lower()
if k in num:
dictionary[k] +=1
else:
dictionary[k]=1
print(dictionary)
sum= dictionary.values()
j=0
frequency = {}
clean_dict={}
alphanum_dict = {key: value for key, value in dictionary.items() if key.isalpha()}
num_dict = {key: value for key, value in dictionary.items() if key.isnumeric()}
alphanum_dict.update(num_dict) #alphanumerical dictionary
print('\n')
for i in sum:
j = j+i
print('Total is :{}'.format(j))
def huffman_encoding(frequency):
heap=[[weight,[symbol,'']] for symbol,weight in frequency.items()]
heapq.heapify(heap)
while len(heap)>1:
lo = heapq.heappop(heap)
hi = heapq.heappop(heap)
for pair in lo[1:]:
pair[1] = '0'+ pair[1]
for pair in hi[1:]:
pair[1] = '1'+ pair[1]
heapq.heappush(heap,[lo[0]+hi[0]]+lo[1:]+hi[1:])
return sorted(heapq.heappop(heap)[1:],key=lambda p:(len(p[-1]),p))
coding_scheme=huffman_encoding(dictionary)
print(coding_scheme)
print('\n')
print('\t|Letter/Number |Probability |Huffman Code |')
print('\t-----------------------------------------------')
for d in coding_scheme:
print('\t|{}\t\t|{:.05f}\t|{}'.format(d[0],dictionary[d[0]]/4579.0,d[1]))
s=0
for i in coding_scheme:
s += dictionary[i[0]]/4579.0
print('Probability sum is:{}'.format(s))
new_dictionary = {}
for i in coding_scheme:
num = new_dictionary.keys()
new_dictionary[i[1]] = i[0]
name = list(input('Enter your name to encode:'))
roll_number = list(input('Enter your roll number to encode:'))
code1 = ''
code2 = ''
for i in name:
for d in coding_scheme:
if i==d[0]:
code1 +=d[1]
for j in roll_number:
for d in coding_scheme:
if j==d[0]:
code2 +=d[1]
print('Huffman Code for your name using the given coding scheme is: {}'.format(code1))
print('Huffman code for your roll number using the given coding scheme is: {}'.format(code2))
def huffman_decode(dictionary, text):
res = ''
while text:
for k in dictionary:
if text.startswith(k):
res += dictionary[k]
text = text[len(k):]
return res
a = huffman_decode(new_dictionary,code1)
b = huffman_decode(new_dictionary,code2)
print('\nDecoded entity 1 is :{}'.format(a))
print('Decoded entity 2 is :{}'.format(b))
|
[
"noreply@github.com"
] |
noreply@github.com
|
93104fbe2660bf5252f22f0c831b363c1025c19e
|
6c7a481f937e3979e9570e13eabc71434c94cfbb
|
/cfehome/settings.py
|
508b835a11de10cd6fcd53dcc0f69ea427f60d64
|
[
"Apache-2.0"
] |
permissive
|
snoop2head/docker_django_sample
|
59c3ba4bd4e83ec924b87f0b672e57858e278ec9
|
25403f1a656e42e36ae48ca4d5f2c338c178a28f
|
refs/heads/master
| 2021-09-27T19:45:23.451562
| 2020-06-24T01:54:59
| 2020-06-24T01:54:59
| 250,829,449
| 1
| 0
|
Apache-2.0
| 2021-06-10T18:53:42
| 2020-03-28T15:27:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,182
|
py
|
"""
Django settings for cfehome project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "c(=5(z-ig4(y9e_%j9oo8q1v2yb490nr7s_gsznp^tasg6_sz&"
# DEBUG can be True/False or 1/0
DEBUG = int(os.environ.get("DEBUG", default=1))
ALLOWED_HOSTS = [
"http://docker-sample-dev.ap-northeast-2.elasticbeanstalk.com/",
"docker-sample-dev.ap-northeast-2.elasticbeanstalk.com",
]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "cfehome.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "cfehome.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
|
[
"young_ahn@yonsei.ac.kr"
] |
young_ahn@yonsei.ac.kr
|
a6898783cf31e5ca77accada2f38bcc5d263acfb
|
e2733ff96927008c6b8c2c9a0fc36d57e61ad451
|
/bcn_rp/migrations/0007_auto_20170325_1713.py
|
6dad1aeed31ad81e17ba2886f880b8d45b69c47e
|
[] |
no_license
|
miquel-corral/bcn_rp
|
9504a19ed7b5abcd3d8490470883d4e6be500806
|
00b8fa83c9bdbc095916868afe552ddf23d88b0d
|
refs/heads/master
| 2020-04-06T04:06:56.615536
| 2017-03-26T20:32:34
| 2017-03-26T20:32:34
| 83,038,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-25 17:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bcn_rp', '0006_auto_20170325_1125'),
]
operations = [
migrations.AlterField(
model_name='assessmentstakeholder',
name='name',
field=models.CharField(max_length=250),
),
]
|
[
"miquel.corral@gmail.com"
] |
miquel.corral@gmail.com
|
254c477f7b6696bb460c4e385c44a4647974b388
|
42f1e442ee88a2bb541de1ce2d30513a183a5c3e
|
/api/cli/predict.py
|
1b5c4741639dff905c556d0e45280e86a87eb68e
|
[
"MIT"
] |
permissive
|
yastai/YastAI
|
535e6edd8851a54d98d3be0479a7359db672996d
|
f5a05841126da4acd9b7250c5bf6f627ac1703d5
|
refs/heads/master
| 2022-12-04T15:50:20.163698
| 2020-08-24T06:44:53
| 2020-08-24T06:44:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
def predict():
raise NotImplementedError
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser(description="Consume trainend model.")
args = vars(ap.parse_args())
predict()
|
[
"sebastian.borquez.g@gmail.com"
] |
sebastian.borquez.g@gmail.com
|
78689f753ab8940dfc5e35772d0880fff3150950
|
1f80f5f439e6fdbd80edeedb0e0bf1452ebb86d6
|
/vehicle/migrations/0001_initial.py
|
0c27e9db042a47cb3a854fa09fa7946450206925
|
[] |
no_license
|
asimejaz14/cabrooz-project
|
3465ddd3ddcd5071140dba59772e6d18277c1e45
|
47611d3fed2a66cfd908e31915ffea215b27485b
|
refs/heads/master
| 2023-07-23T06:50:30.507368
| 2023-07-18T12:01:00
| 2023-07-18T12:01:00
| 370,489,574
| 0
| 0
| null | 2023-07-18T11:59:22
| 2021-05-24T21:30:50
|
Python
|
UTF-8
|
Python
| false
| false
| 908
|
py
|
# Generated by Django 3.2.3 on 2021-05-29 08:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vehicle_name', models.CharField(blank=True, max_length=200, null=True)),
('registration_number', models.CharField(blank=True, max_length=200, null=True, unique=True)),
('vehicle_color', models.CharField(blank=True, max_length=200, null=True)),
('vehicle_model', models.CharField(blank=True, max_length=200, null=True)),
('vehicle_maker', models.CharField(blank=True, max_length=200, null=True)),
],
),
]
|
[
"asim.ejaz14@gmail.com"
] |
asim.ejaz14@gmail.com
|
f9684b93c85517e725cb733ab16376c145a8c709
|
8d285d4933c58557f3362838993e03e9a12ba6c0
|
/examen02.py
|
68ac113244272ac3f5241693250cd511c59aa72a
|
[] |
no_license
|
mprotti/CursoPhyton
|
18b4cec996cbb4b6a5c46ac4a6f5af0925309a62
|
23c64a48c65d315e1b5644f64555fb66a0d1b4b6
|
refs/heads/master
| 2020-05-30T23:18:11.151205
| 2019-06-13T16:49:02
| 2019-06-13T16:49:02
| 190,014,055
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
#Escribir una clase en python que revierta una cadena de palabras
#Entrada: "Mi Diario Python"
#Salida: "Python Diario Mi"
class revierta():
def __init__(self, string):
self.string = string
self.invertida = string.split()[::-1]
self.invertida = ' '.join(self.invertida)
def __str__(self):
return ('Frase original: {} \nFrase revertida: {}'.format(self.string, self.invertida))
string = "Mi Diario Python"
a = revierta(string)
print(a)
|
[
"m21protti@gmail.com"
] |
m21protti@gmail.com
|
311abd6f195d36542f90a27ae366b5bbe1325dd5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02586/s677278751.py
|
be1ac288360753caa4fa9d68ee573cb35be6d292
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
import sys
sys.setrecursionlimit(10**7)
readline = sys.stdin.buffer.readline
def readstr():return readline().rstrip().decode()
def readstrs():return list(readline().decode().split())
def readint():return int(readline())
def readints():return list(map(int,readline().split()))
def printrows(x):print('\n'.join(map(str,x)))
def printline(x):print(' '.join(map(str,x)))
r,c,k = readints()
a = [[0]*(c+1) for i in range(r+1)]
for i in range(k):
R,C,V = readints()
a[R][C] = V
dp = [0]*(r+1)*(c+1)*4
for x in range((r+1)*(c+1)*4):
i = x//((c+1)*4)
l = x%((c+1)*4)
j = l//4
l %= 4
if i==0 or j==0:
continue
if l==0:
dp[x] = max(dp[i*(c+1)*4 + (j-1)*4], dp[(i-1)*(c+1)*4 + j*4 + 3])
else:
dp[x] = max(dp[i*(c+1)*4 + (j-1)*4 + l], dp[(i-1)*(c+1)*4 + j*4 + 3]+a[i][j], dp[i*(c+1)*4 + (j-1)*4 + l-1]+a[i][j])
print(dp[-1])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b70d7b4c692b7c651a29ec4e41e3a02cc4a36414
|
9efead193cfde1d6330288e1a6916b764eff9a80
|
/discreet_mathematics/reverse_linked_list.py
|
20b54829231a5346f665298694c67ecb0e5ea01f
|
[] |
no_license
|
lukorito/ds
|
f860d0c84060065362342bbc0fca6c2e63c68380
|
d1a7136d63b5c807c7b4e263caa097d813fa1a0c
|
refs/heads/main
| 2023-04-30T02:40:27.121008
| 2021-05-18T07:35:10
| 2021-05-18T07:35:10
| 314,171,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
# Input: 1->2->3->4->5->NULL
# Output: 5->4->3->2->1->NULL
# ITERATIVE
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
prev = None
current = head
while current is not None:
next = current.next
current.next = prev
prev = current
current = next
return prev
# RECURSIVE
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if head is None or head.next is None:
return head
p = self.reverseList(head.next)
head.next.next=head
head.next = None
return p
|
[
"lukkitt@live.co.uk"
] |
lukkitt@live.co.uk
|
278c497eb215b7dcdf428ea2e4bf87f61ce8be94
|
c38555124875d475cac00703318983a492a91ff4
|
/scripts/ansi-to-gitlab-markdown.py
|
f34983c4d1202d4da22b72bc706364c1fa54d3f8
|
[] |
no_license
|
c-zuo/gcp-turbo
|
7e72de63fb012491dfc6a2239a0fa115f75027cb
|
1d034538d8e4c88b4d5643c7353036854ab1f4bb
|
refs/heads/master
| 2023-01-31T16:51:04.439322
| 2020-12-15T15:12:39
| 2020-12-15T15:12:39
| 321,696,872
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
#!/usr/bin/env python3
from sys import stdin
import re
addition = re.compile(r"(\x1B\[3\dm)(.+?)(\x1B\[0m)")
heading = re.compile(r"(\x1B\[1m)( *# .+)")
emphasis = re.compile(r"(\x1B\[1m)(.+?)(\x1B\[0m)")
ansi_seqs = re.compile(r"\x1B\[.+?m")
print('<pre>')
for line in stdin:
line = line.rstrip('\n')
line = heading.sub("<h4>\\2</h4>", line)
line = addition.sub("<b>\\2</b>", line)
line = emphasis.sub("<b>\\2</b>", line)
line = ansi_seqs.sub("", line)
print(line)
print('</pre>')
|
[
"chenzuo@google.com"
] |
chenzuo@google.com
|
8f34b168e70a6d53b046eb78589ccb3cdef153e7
|
a93cfeed4c2a2833f1896bf5f39aa31955f21efe
|
/Data Structures/Sorting Algorithms/Selection.Sort.py
|
c162f4c55b60231f107988ad8b2a5a2489a1a4c3
|
[] |
no_license
|
akshat343/Python-Programming
|
ae83d05408fb67d51d388df22492dfe743596b2a
|
f5a1540770388e49d65536352ce1816c406d5229
|
refs/heads/master
| 2023-08-05T19:58:40.293770
| 2021-10-07T07:25:46
| 2021-10-07T07:25:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
#Author:Robin Singh
#Problem on selection sort algorithm
# selection sort:in selection sort we consider two sub array list
# 1.)selection sort algorithm divides the input array list into two parts:
# 2.)a sorted sublist of items which is built in from left to right at the front (left side) of the array list
# 3.)and a sublist of the remaining unsorted items that occupy the rest of the array list
# 4.)Initially, the sorted sublist is empty and the unsorted sublist is the entire input array list
# 5.)Selection sort iterates all the elements and if the smallest element
# 6.)in the list is found then that number is swapped with the first
# 7.)and moving the sublist boundaries one element to the right
# this sorting technique is similar to insertion sort
#Time complexity has also been calculated both in BEST case and WORST case
#BEST CASE:O(n^2)
#WORST CASE :O(n^2)
def selectionsort(a,n):
for i in range(0,n-1):
min=i
for j in range(i+1,n):
if a[j]<a[min]:
min=j
if min !=i:
t=a[i]
a[i]=a[min]
a[min]=t
a =[5,3,2,6,89,42,11,75,2,8,9,0]
selectionsort(a,len(a))
print(a)
|
[
"Robin25Tech@gmail.com"
] |
Robin25Tech@gmail.com
|
fb9ace085f83b2c784be8b80bbbd36fd98b8cf71
|
d61fad70c07644b7ad6c1cb1d52fab9ab65db6f3
|
/PS2/proj2_code/projection_matrix.py
|
ae0ec16b1fa952e934218fca8cf72a33635110f2
|
[] |
no_license
|
meera1hahn/Computer_Vision_CS_6476
|
3991184c710dcb03c7e98fb064e2daedb76c0be8
|
fa812f4e125555d380e3cefbb6fe8bd0b24fb461
|
refs/heads/master
| 2023-08-30T23:56:31.462794
| 2020-08-12T04:54:16
| 2020-08-12T04:54:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,622
|
py
|
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from scipy.optimize import least_squares
from scipy.optimize import minimize
from scipy.linalg import rq
import time
def objective_func(x, **kwargs):
"""
Calculates the difference in image (pixel coordinates) and returns
it as a 2*n_points vector
Args:
- x: numpy array of 11 parameters of P in vector form
(remember you will have to fix P_34=1) to estimate the reprojection error
- **kwargs: dictionary that contains the 2D and the 3D points. You will have to
retrieve these 2D and 3D points and then use them to compute
the reprojection error.
Returns:
- diff: A N_points-d vector (1-D numpy array) of differences betwen
projected and actual 2D points
"""
diff = None
##############################
# TODO: Student code goes here
P=np.array([[x[0],x[1],x[2],x[3]], [x[4],x[5],x[6],x[7]], [x[8],x[9],x[10],1]])
x_3d= kwargs["pts3d"]
x_2d= kwargs["pts2d"]
xp= projection(P,x_3d)
diff= xp-x_2d
diff=diff.flatten()
# raise NotImplementedError
##############################
return diff
def projection(P: np.ndarray, points_3d: np.ndarray) -> np.ndarray:
"""
Computes projection from [X,Y,Z,1] in homogenous coordinates to
(x,y) in non-homogenous image coordinates.
Args:
- P: 3x4 projection matrix
- points_3d : n x 4 array of points [X_i,Y_i,Z_i,1] in homogenouos coordinates
or n x 3 array of points [X_i,Y_i,Z_i]
Returns:
- projected_points_2d : n x 2 array of points in non-homogenous image coordinates
"""
projected_points_2d = None
##############################
# TODO: Student code goes here
if(points_3d.shape[1]==3):
n_ones= np.ones((points_3d.shape[0],1))
points_3d= np.hstack((points_3d,n_ones))
projected_homo= np.dot(P,points_3d.T)
x=projected_homo[0]/projected_homo[2]
y=projected_homo[1]/projected_homo[2]
projected_points_2d = np.vstack((x,y)).T
# raise NotImplementedError
##############################
return projected_points_2d
def estimate_camera_matrix(pts2d: np.ndarray,
pts3d: np.ndarray,
initial_guess: np.ndarray) -> np.ndarray:
'''
Calls least_squres form scipy.least_squares.optimize and
returns an estimate for the camera projection matrix
Args:
- pts2d: n x 2 array of known points (x_i, y_i) in image coordinates
- pts3d: n x 3 array of known points in 3D, (X_i, Y_i, Z_i, 1)
- initial_guess: 3x4 projection matrix initial guess
Returns:
- P: 3x4 estimated projection matrix
Note: Because of the requirements of scipy.optimize.least_squares
you will have to pass the projection matrix P as a vector.
Since we will fix P_34 to 1 you will not need to pass all 12
matrix parameters.
You will also have to put pts2d and pts3d into a kwargs dictionary
that you will add as an argument to least squares.
We recommend that in your call to least_squares you use
- method='lm' for Levenberg-Marquardt
- verbose=2 (to show optimization output from 'lm')
- max_nfev=50000 maximum number of function evaluations
- ftol \
- gtol --> convergence criteria
- xtol /
- kwargs -- dictionary with additional variables
for the objective function
'''
P = None
start_time = time.time()
##############################
# TODO: Student code goes here
p0=initial_guess.flatten()[:-1]
kwargs_dict={"pts2d":pts2d,"pts3d":pts3d}
ls= least_squares(objective_func, p0, method='lm', verbose=2, max_nfev=50000, kwargs=kwargs_dict)
P=np.array([[ls.x[0],ls.x[1],ls.x[2],ls.x[3]], [ls.x[4],ls.x[5],ls.x[6],ls.x[7]], [ls.x[8],ls.x[9],ls.x[10],1]])
# raise NotImplementedError
##############################
print("Time since optimization start", time.time() - start_time)
return P
def decompose_camera_matrix(P: np.ndarray) -> (np.ndarray, np.ndarray):
'''
Decomposes the camera matrix into the K intrinsic and R rotation matrix
Args:
- P: 3x4 numpy array projection matrix
Returns:
- K: 3x3 intrinsic matrix (numpy array)
- R: 3x3 orthonormal rotation matrix (numpy array)
hint: use scipy.linalg.rq()
'''
K = None
R = None
##############################
# TODO: Student code goes here
M=P[:,:-1]
K, R = rq(M)
# raise NotImplementedError
##############################
return K, R
def calculate_camera_center(P: np.ndarray,
K: np.ndarray,
R_T: np.ndarray) -> np.ndarray:
"""
Returns the camera center matrix for a given projection matrix.
Args:
- P: A numpy array of shape (3, 4) representing the projection matrix
Returns:
- cc: A numpy array of shape (1, 3) representing the camera center
location in world coordinates
"""
cc = None
##############################
# TODO: Student code goes here
inv= np.linalg.inv(np.dot(K,R_T))
I_t= np.dot(inv,P)
cc=-I_t[:,-1]
# raise NotImplementedError
##############################
return cc
|
[
"tpasumarthi3@gatech.edu"
] |
tpasumarthi3@gatech.edu
|
e3f2b333eb10b93e318804ca96644a90273b6d39
|
0be7bbe668723f669257d759bec6da49bccc40c3
|
/数据处理/输入神经元的数据处理/3_GetPopularKeywordData.py
|
dd6a936b745eba80ee885fa877dc0716b655694c
|
[
"Apache-2.0"
] |
permissive
|
xingyu321/SZU_gwt_predict_network
|
57e8ded0a896c24068cde601d00cd164d88b116e
|
13e0191318f63cc8dae4d62ccd70fce87f5393ef
|
refs/heads/master
| 2022-11-13T07:53:02.455057
| 2020-07-13T06:15:46
| 2020-07-13T06:15:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
"""
这个脚本的目的是把数据中出现频率前1534名的关键词找出来
由2脚本可以知道前1534名的关键词正好就是出现次数不小于20的公文
"""
import json
# 读取数据
with open('数据处理\\输入神经元的数据处理\\2_word_weights.json', 'r', encoding='utf-8') as f:
_data = json.loads(f.read())
new_data_list = {}
# 找到并存储出现次数超过38的关键词
for keyword in _data.keys():
if _data[keyword]['total_doc_num'] >= 20:
new_data_list[keyword] = _data[keyword]
# 写入数据
with open('数据处理\\输入神经元的数据处理\\3_word_weights.json', 'a+', encoding='utf-8') as f:
f.write(json.dumps(new_data_list, ensure_ascii=False))
|
[
"56833537+cjw-bot@users.noreply.github.com"
] |
56833537+cjw-bot@users.noreply.github.com
|
6ae367f46c4e0630eabdc3e3c950b315dad2b7c9
|
7044947d6a1e5d0754af1c3b9ecb4407452d61c4
|
/pitstop/encodings/__init__.py
|
4ec53cf02b334023b962835cb60f2d0f39a05b81
|
[
"MIT"
] |
permissive
|
darvid/pitstop
|
1e2d3ea0f45b037f558ae1b4f28396bdfa89a51a
|
517fd3631b437bc5591cea10aacbecde7d21e9ac
|
refs/heads/master
| 2020-04-09T04:16:21.166646
| 2018-12-05T02:36:54
| 2018-12-05T02:36:54
| 160,016,392
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
"""Provides encoding interfaces for various configuration file formats."""
|
[
"david.gidwani@gmail.com"
] |
david.gidwani@gmail.com
|
ce1acce5ee2167090f5425a41c17edd5b087b61e
|
473727c4829597b0cba18126e2c56888ad8c71e3
|
/leetcode/213. House Robber II.py
|
3601d4ee49a093ee7d9c9a8023f85a59fba79672
|
[] |
no_license
|
unclemeth/python_au
|
d3fc99b01125ea0b6d3352e762df5556e21134fc
|
ff49977d39217614840fdc35e6460898a3e13fb6
|
refs/heads/master
| 2023-05-08T12:57:09.453617
| 2021-05-28T21:11:37
| 2021-05-28T21:11:37
| 302,641,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
class Solution:
def rob(self, nums):
if not nums: return 0
if len(nums) < 3:
return max(nums)
nums1 = nums[:-1]
if len(nums1) < 3: return max(nums1)
a = nums1[0]
b = max(nums1[0], nums1[1])
for i in range(2, len(nums1)):
Curr_max = max(a + nums1[i], b)
a = b
b = Curr_max
nums2 = nums[1:]
# if not nums2 : return 0
if len(nums2) < 3: return max(nums2)
c = nums2[0]
d = max(nums2[0], nums2[1])
for i in range(2, len(nums2)):
Curr_max = max(c + nums2[i], d)
c = d
d = Curr_max
return max(b, d)
|
[
"unclemeth@mail.ru"
] |
unclemeth@mail.ru
|
32f27d3345124aa64f6acab0a9804e45ee8ce109
|
975f7bb7a94d6dae5a2e13e27081f3ab3e7cc22e
|
/Contactmanager/Contactmanager/urls.py
|
c84a2b7c13e0f9e563aa8364764b104c3197d914
|
[] |
no_license
|
saipraneethkurmelkar/THIRDJANGO
|
91fa04d7bfcd06534015ab1acfe4780ca6c117c8
|
0fe9102f7491f9c7609a505baf51b13cc7929db5
|
refs/heads/master
| 2020-04-17T11:16:23.293622
| 2019-01-19T10:33:25
| 2019-01-19T10:33:25
| 166,534,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 855
|
py
|
"""Contactmanager URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from ContactsApp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.show),
url(r'^add/', views.add),
]
|
[
"info@info.com"
] |
info@info.com
|
63fbe68ce66e9cf557af763efa4ef7d6f3c9431e
|
589c7b665871fd901070461af0529a83bed50dbd
|
/Python_LetsKodeIt/Exception-Handling.py
|
d9d332b3cd03448029645666b1562e4b8bb34f25
|
[] |
no_license
|
rajeshvelur/django-deployments-example
|
661ab09a0e8385ece9078a1125d8a25030f30e93
|
baa03f066a45fe89cd670889dfab4c705f286112
|
refs/heads/master
| 2020-04-01T08:25:17.248545
| 2019-06-20T15:31:05
| 2019-06-20T15:31:05
| 153,029,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
# def exceptions_handling():
# try:
# a = 30
# b = 0
# c = 20
#
# d = (a/b)-c
# print(d)
# except:
# print("Zero Division Exception")
# raise Exception("this is an exception")
# else:
# print("Bcos no exception found")
# finally:
# print("this will execute always")
#
#
# exceptions_handling()
# print(x)
def exceptions_handling1():
car = {"make": "BMW", "model": "x3", "Year": 2016}
try:
print(car["year"])
except:
print("exception here")
finally:
print("this will final statement")
exceptions_handling1()
|
[
"rajesh.kumar1@spglobal.com"
] |
rajesh.kumar1@spglobal.com
|
d1dd1de24c57b4399179bb5d549a990b01e72380
|
754a7854a09fb9e3add486e32d0ccb86b02a8847
|
/14_ng.py
|
81fa21b37320b13e645093af48fbf3b37a3dda1d
|
[] |
no_license
|
AmanouToona/atcoder_Intermediate
|
724824d0aba932b0cbc09b1e05a91741a1435497
|
61a2f454a8a95a3dd775edaf59ec60ea95766711
|
refs/heads/master
| 2023-01-03T19:21:51.710058
| 2020-10-26T12:16:42
| 2020-10-26T12:16:42
| 267,562,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
# Square869120Contest #4 B - Buildings are Colorful!
import sys
N, K = map(int, sys.stdin.readline().strip().split())
a = list(map(int, sys.stdin.readline().strip().split()))
def dfs(height=a[0], cost=0, target=K, can_see=1, i=1):
if can_see == target:
return cost
# 端まで探索したとき
if i == len(a):
return float('inf')
# 建物 i を選ばない場合
if height >= a[i]:
cost1 = dfs(height=height, cost=cost, target=K, can_see=can_see, i=i + 1)
else:
cost1 = dfs(height=a[i], cost=cost, target=K, can_see=can_see + 1, i=i + 1)
# print('cost1', cost1, 'can_see', can_see, 'i', i)
# 建物 i を選んだ場合
cost += max(0, height + 1 - a[i])
can_see += 1
height = max(a[i], height + 1)
if can_see == target: # ここがいけない
# print('height', height)
# print('can_see', can_see)
# print('cost', cost)
return cost
cost2 = dfs(height=height, cost=cost, target=K, can_see=can_see, i=i + 1)
# print('cost2', cost2)
return min(cost1, cost2)
print(dfs(a[0], cost=0, target=K, can_see=1, i=1))
|
[
"amanou.toona@gmail.com"
] |
amanou.toona@gmail.com
|
f759243ca8cb43b3d912b30fca2464aca3d24563
|
fcbc308e67905631116f7f8bb8723fb615da328b
|
/week4/linked_list3.py
|
4e6ca97373fc75c67105e65bb7ace0e59d96bef7
|
[] |
no_license
|
Sally-A/Python_fundamentals
|
b08d8f4d838431c4a4bce9772d53e5fa130f5689
|
14707757b17170c4568635bdcb4f68acda7a7446
|
refs/heads/main
| 2023-08-10T11:33:41.516127
| 2021-09-11T17:17:18
| 2021-09-11T17:17:18
| 396,183,497
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def append(self, value):
new_node = Node(value)
if self.head is None:
self.head = new_node
print("Head Node created:", self.head.value)
return
node = self.head
while node.next is not None:
node = node.next
node.next = new_node
print("Apeended new Node with value:", node.next.value)
llist = LinkedList()
llist.append("First Node")
llist.append("Second Node")
llist.append("Third Node")
|
[
"Sally.Anderson@gmx.com"
] |
Sally.Anderson@gmx.com
|
9be2c18bb860725099d2e3a23e3256236c2683db
|
de482bf09ff73d60f59924c2f67b74431d91bd6d
|
/detector.py
|
e75714e19d71ce17a710049ca8f5d3f0fda734d3
|
[] |
no_license
|
fulequn/MTCNN
|
2f42b2f11dc363595a2480ca97c6b7cce028563e
|
6abcc11e1b69b34a989291c5d0c4210f079f20d4
|
refs/heads/master
| 2022-04-03T20:22:31.933960
| 2020-02-13T11:49:55
| 2020-02-13T11:49:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,571
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: JianboZhu
@Contact: jianbozhu1996@gmail.com
@Date: 2019/12/1
@Description:
"""
import os
import glob
import numpy as np
import cv2
from nets.mtcnn import p_net, o_net, r_net
from preprocess.utils import py_nms, process_image, convert_to_square
class Detector:
def __init__(self, weight_dir,
min_face_size=24,
threshold=None, # 概率大于threshold的bbox才用
scale_factor=0.65,
mode=3,
slide_window=False,
stride=2):
# mode
# 1:用p_net 生成r_net的数据
# 2:用p_net r_net 生成 o_net的数据
# 3:用p_net r_net o_net 最后生成结果
assert mode in [1, 2, 3]
# 实现图片金字塔
assert scale_factor < 1
# 暂时没有使用
self.slide_window = slide_window
self.stride = stride
self.mode = mode
# 图片金字塔,图片不能小于这个
self.min_face_size = min_face_size
# 概率大于threshold的bbox才用
self.threshold = [0.6, 0.7, 0.7] if threshold is None else threshold
# 实现图片金字塔,以这个比例缩小图片
self.scale_factor = scale_factor
self.p_net = None
self.r_net = None
self.o_net = None
self.init_network(weight_dir)
def init_network(self, weight_dir='saved_models'):
p_weights, r_weights, o_weights = self._get_weights(weight_dir)
print('PNet weight file is: {}'.format(p_weights))
self.p_net = p_net()
self.p_net.load_weights(p_weights)
if self.mode > 1:
self.r_net = r_net()
self.r_net.load_weights(r_weights)
if self.mode > 2:
self.o_net = o_net()
self.o_net.load_weights(o_weights)
def predict(self, image):
im_ = np.array(image)
if self.mode == 1:
return self.predict_with_p_net(im_)
elif self.mode == 2:
return self.predict_with_pr_net(im_)
elif self.mode == 3:
return self.predict_with_pro_net(im_)
else:
raise NotImplementedError('Not implemented yet')
def predict_with_p_net(self, im):
return self._detect_with_p_net(im)
def predict_with_pr_net(self, im):
boxes, boxes_c = self._detect_with_p_net(im)
return self._detect_with_r_net(im, boxes_c)
def predict_with_pro_net(self, im):
boxes, boxes_c = self._detect_with_p_net(im)
boxes, boxes_c = self._detect_with_r_net(im, boxes_c)
return self._detect_with_o_net(im, boxes_c)
def _detect_with_p_net(self, im):
# print('p_net_predict---')
net_size = 12
current_scale = float(net_size) / self.min_face_size # find initial scale
im_resized = process_image(im, current_scale)
current_height, current_width, _ = im_resized.shape
all_boxes = []
while min(current_height, current_width) > net_size:
inputs = np.array([im_resized])
# print('inputs shape: {}'.format(inputs.shape))
labels, bboxes = self.p_net.predict(inputs)
# labels = np.squeeze(labels)
# bboxes = np.squeeze(bboxes)
labels = labels[0]
bboxes = bboxes[0]
# 概率大于threshold的bbox才用
# print('labels',labels.shape)
# print('bboxes',bboxes.shape)
boxes = self._generate_bbox(labels[:, :, 1], bboxes, current_scale, self.threshold[0])
# 实现图片金字塔
current_scale *= self.scale_factor
im_resized = process_image(im, current_scale)
current_height, current_width, _ = im_resized.shape
if boxes.size == 0:
continue
keep = py_nms(boxes[:, :5], 0.7, 'union')
boxes = boxes[keep]
all_boxes.append(boxes)
if len(all_boxes) == 0:
return None, None
return self._refine_bboxes(all_boxes)
def _detect_with_r_net(self, im, dets):
h, w, c = im.shape
dets = convert_to_square(dets)
dets[:, 0:4] = np.round(dets[:, 0:4])
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self._pad(dets, w, h)
num_boxes = dets.shape[0]
cropped_ims = np.zeros((num_boxes, 24, 24, 3), dtype=np.float32)
for i in range(num_boxes):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
cropped_ims[i, :, :, :] = (cv2.resize(tmp, (24, 24))-127.5) / 128
# cls_scores : num_data*2
# reg: num_data*4
# landmark: num_data*10
cls_scores, reg = self.r_net.predict(cropped_ims)
cls_scores = cls_scores[:,1]
keep_inds = np.where(cls_scores > self.threshold[1])[0]
if len(keep_inds) > 0:
boxes = dets[keep_inds]
boxes[:, 4] = cls_scores[keep_inds]
reg = reg[keep_inds]
# landmark = landmark[keep_inds]
else:
return None, None
keep = py_nms(boxes, 0.6)
boxes = boxes[keep]
boxes_c = self._calibrate_box(boxes, reg[keep])
return boxes, boxes_c
def _detect_with_o_net(self, im, dets):
h, w, c = im.shape
dets = convert_to_square(dets)
dets[:, 0:4] = np.round(dets[:, 0:4])
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self._pad(dets, w, h)
num_boxes = dets.shape[0]
cropped_ims = np.zeros((num_boxes, 48, 48, 3), dtype=np.float32)
for i in range(num_boxes):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
cropped_ims[i, :, :, :] = (cv2.resize(tmp, (48, 48))-127.5) / 128
cls_scores, reg,landmark = self.o_net.predict(cropped_ims)
# prob belongs to face
cls_scores = cls_scores[:,1]
keep_inds = np.where(cls_scores > self.threshold[2])[0]
if len(keep_inds) > 0:
# pickout filtered box
boxes = dets[keep_inds]
boxes[:, 4] = cls_scores[keep_inds]
reg = reg[keep_inds]
landmark = landmark[keep_inds]
else:
return None, None, None
# width
w = boxes[:,2] - boxes[:,0] + 1
# height
h = boxes[:,3] - boxes[:,1] + 1
landmark[:,0::2] = (np.tile(w,(5,1)) * landmark[:,0::2].T + np.tile(boxes[:,0],(5,1)) - 1).T
landmark[:,1::2] = (np.tile(h,(5,1)) * landmark[:,1::2].T + np.tile(boxes[:,1],(5,1)) - 1).T
boxes_c = self._calibrate_box(boxes, reg)
boxes = boxes[py_nms(boxes, 0.6, "minimum")]
keep = py_nms(boxes_c, 0.6, "minimum")
boxes_c = boxes_c[keep]
landmark = landmark[keep]
return boxes, boxes_c, landmark
@staticmethod
def _refine_bboxes(all_boxes):
all_boxes = np.vstack(all_boxes)
# merge the detection from first stage
keep = py_nms(all_boxes[:, 0:5], 0.5, 'union')
all_boxes = all_boxes[keep]
boxes = all_boxes[:, :5]
bbw = all_boxes[:, 2] - all_boxes[:, 0] + 1
bbh = all_boxes[:, 3] - all_boxes[:, 1] + 1
# refine the boxes
boxes_c = np.vstack([all_boxes[:, 0] + all_boxes[:, 5] * bbw,
all_boxes[:, 1] + all_boxes[:, 6] * bbh,
all_boxes[:, 2] + all_boxes[:, 7] * bbw,
all_boxes[:, 3] + all_boxes[:, 8] * bbh,
all_boxes[:, 4]])
boxes_c = boxes_c.T
return boxes, boxes_c
@staticmethod
def _calibrate_box(bbox, reg):
bbox_c = bbox.copy()
w = bbox[:, 2] - bbox[:, 0] + 1
w = np.expand_dims(w, 1)
h = bbox[:, 3] - bbox[:, 1] + 1
h = np.expand_dims(h, 1)
reg_m = np.hstack([w, h, w, h])
aug = reg_m * reg
bbox_c[:, 0:4] = bbox_c[:, 0:4] + aug
return bbox_c
# @staticmethod
# def _convert_to_square(bbox):
#
# square_bbox = bbox.copy()
#
# h = bbox[:, 3] - bbox[:, 1] + 1
# w = bbox[:, 2] - bbox[:, 0] + 1
# max_side = np.maximum(h, w)
# square_bbox[:, 0] = bbox[:, 0] + w * 0.5 - max_side * 0.5
# square_bbox[:, 1] = bbox[:, 1] + h * 0.5 - max_side * 0.5
# square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1
# square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1
# return square_bbox
@staticmethod
def _pad(bboxes, w, h):
tmpw, tmph = bboxes[:, 2] - bboxes[:, 0] + 1, bboxes[:, 3] - bboxes[:, 1] + 1
num_box = bboxes.shape[0]
dx, dy = np.zeros((num_box,)), np.zeros((num_box,))
edx, edy = tmpw.copy() - 1, tmph.copy() - 1
x, y, ex, ey = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
tmp_index = np.where(ex > w - 1)
edx[tmp_index] = tmpw[tmp_index] + w - 2 - ex[tmp_index]
ex[tmp_index] = w - 1
tmp_index = np.where(ey > h - 1)
edy[tmp_index] = tmph[tmp_index] + h - 2 - ey[tmp_index]
ey[tmp_index] = h - 1
tmp_index = np.where(x < 0)
dx[tmp_index] = 0 - x[tmp_index]
x[tmp_index] = 0
tmp_index = np.where(y < 0)
dy[tmp_index] = 0 - y[tmp_index]
y[tmp_index] = 0
return_list = [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
return_list = [item.astype(np.int32) for item in return_list]
return return_list
@staticmethod
def _generate_bbox(cls_map, reg, scale, threshold, stride=2, cell_size=12):
t_index = np.where(cls_map > threshold)
# find nothing
if t_index[0].size == 0:
return np.array([])
# offset
dx1, dy1, dx2, dy2 = [reg[t_index[0], t_index[1], i] for i in range(4)]
reg = np.array([dx1, dy1, dx2, dy2])
score = cls_map[t_index[0], t_index[1]]
bbox = np.vstack([np.round((stride * t_index[1]) / scale),
np.round((stride * t_index[0]) / scale),
np.round((stride * t_index[1] + cell_size) / scale),
np.round((stride * t_index[0] + cell_size) / scale),
score,
reg])
return bbox.T
@staticmethod
def _get_weights(weights_dir):
# weights_files = glob.glob('{}/*.h5'.format(weights_dir))
# p_net_weight = None
# r_net_weight = None
# o_net_weight = None
# for wf in weights_files:
# if 'pnet' in wf:
# p_net_weight = wf
# elif 'rnet' in wf:
# r_net_weight = wf
# elif 'onet' in wf:
# o_net_weight = wf
# else:
# raise ValueError('No valid weights files !')
# print(p_net_weight,r_net_weight,o_net_weight)
# if p_net_weight is None and r_net_weight is None and o_net_weight is None:
# raise ValueError('No valid weights files !')
p_net_weight = os.path.join(weights_dir, 'pnet.h5')
r_net_weight = os.path.join(weights_dir, 'rnet.h5')
o_net_weight = os.path.join(weights_dir, 'onet.h5')
return p_net_weight, r_net_weight, o_net_weight
|
[
"1191353802@qq.com"
] |
1191353802@qq.com
|
1df0bc1d14f1f081ed9efbbc55acd412710f0d32
|
82050bff6e809419a170a992924f33b09bdf26fb
|
/functions/braceParser.py
|
b0177cffac00b112bed9b12e2e323ff5c08669f0
|
[] |
no_license
|
Phoshi/OMGbot
|
f1ffb9ae4d98bfe04bb68647c48335afdf39b224
|
402a50dcaa570dad3d17da6b84a201518c85b550
|
refs/heads/master
| 2016-09-05T16:34:39.830212
| 2012-02-29T23:23:24
| 2012-02-29T23:23:24
| 1,910,305
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,716
|
py
|
# -*- coding: utf-8 -*-
#BraceParser.py
class countError(Exception):
def __init__(self, expr, msg):
self.expr=expr
self.msg=msg
def __str__(self):
return self.msg
def parseBraces(input):
matchArray=[]
lookingForConditionals=True
conditionalArray=[]
conditionalSyntax=["(",")"]
matchSyntax=["{","}"]
conditionalCount=0
conditionalPosition=-1
matchCount=0
matchPosition=-1
for position,letter in enumerate(input):
if letter==conditionalSyntax[0] and lookingForConditionals:
conditionalCount+=1
if conditionalCount==1:
conditionalPosition=position
elif letter==conditionalSyntax[1] and lookingForConditionals:
conditionalCount-=1
if conditionalCount==0:
conditionalArray.append(input[conditionalPosition+1:position])
if letter==matchSyntax[0]:
if conditionalCount!=0:
continue
lookingForConditionals=False
matchCount+=1
if matchCount==1:
matchPosition=position
elif letter==matchSyntax[1]:
if conditionalCount!=0:
continue
matchCount-=1
if matchCount==0:
matchArray.append(input[matchPosition+1:position])
if conditionalCount!=0:
raise countError(input,"Unbalanced brackets!")
elif matchCount!=0:
raise countError(input,"Unbalanced parentheses!")
return (conditionalArray, matchArray)
if __name__=="__main__":
input="!if (dicks==true) AND (lasers==awesome) {superdicks;var dicks = false;} else {hyperdicks;var dicks = true;}"
print parseBraces(input)
|
[
"AHPhoshi@gmail.com"
] |
AHPhoshi@gmail.com
|
ae969e5baa7b48325cb6f6fdbe1d53881a439e63
|
c248b9e128eaf1fe8b49f5865e2a1e6f6dbf856d
|
/main_app/migrations/0004_finch_toys.py
|
b1a60f0048953a80b4e18f7025e7d40296a7956f
|
[] |
no_license
|
jenny-martin/finchcollector
|
8826c02689289a115e9c446631b7bc009c7beaf4
|
b6722d9ea018bf937608a3e0fa8791fa534f79c3
|
refs/heads/master
| 2020-06-12T01:09:42.708428
| 2019-06-27T18:49:29
| 2019-06-27T18:49:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# Generated by Django 2.2 on 2019-06-27 05:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0003_auto_20190627_0504'),
]
operations = [
migrations.AddField(
model_name='finch',
name='toys',
field=models.ManyToManyField(to='main_app.Toy'),
),
]
|
[
"jennymartin@Jennys-MacBook-Pro.local"
] |
jennymartin@Jennys-MacBook-Pro.local
|
192bda4fe1e44cd80c1eec10fcbed5a8fa12c812
|
27648171f6e9675ea1a2716445d34a4346693a86
|
/rnn_mnist.py
|
6773ed60e49b792ecba3763595642991632370fd
|
[] |
no_license
|
kniranjankumar/toyexamples
|
fc2700baec30f2d2888e3cb4f3f4f7b648402986
|
f30df3f4265fd4adc9093503abbfd09f65716443
|
refs/heads/master
| 2020-03-06T19:35:37.809594
| 2018-03-30T20:15:38
| 2018-03-30T20:15:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
import tensorflow as tf
from tensorflow.contrib import rnn
import tensorboard
#import mnist dataset
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("/tmp/data/",one_hot=True)
#define constants
#unrolled through 28 time steps
time_steps=28
#hidden LSTM units
num_units=128
#rows of 28 pixels
n_input=28
#learning rate for adam
learning_rate=0.001
#mnist is meant to be classified in 10 classes(0-9).
n_classes=10
#size of batch
batch_size=128
out_weights=tf.Variable(tf.random_normal([num_units,n_classes]))
out_bias=tf.Variable(tf.random_normal([n_classes]))
x = tf.placeholder(tf.float32,[None,time_steps,n_input])
y = tf.placeholder(tf.float32,[None,n_classes])
input = tf.unstack(x,time_steps,axis=1)
lstm_layer = rnn.BasicLSTMCell(num_units,forget_bias=1)
outputs,_ = rnn.static_rnn(lstm_layer,input,dtype=tf.float32)
print('out')
prediction = tf.layers.dense(inputs=outputs[-1],units=10)
# prediction=tf.matmul(outputs[-1],out_weights)+out_bias
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
#correct prediction
correct_pred = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))
tf.summary.scalar('loss',loss)
merged = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session() as sess:
train_writer = tf.summary.FileWriter('./train', sess.graph)
sess.run(init)
for i in range(1000):
batch_x,batch_y=mnist.train.next_batch(batch_size=batch_size)
batch_x=batch_x.reshape((batch_size,time_steps,n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
if i %10==0:
acc=sess.run(accuracy,feed_dict={x:batch_x,y:batch_y})
los,summary_out=sess.run([loss,merged],feed_dict={x:batch_x,y:batch_y})
train_writer.add_summary(summary_out, i)
print("For iter ",iter)
print("Accuracy ",acc)
print("Loss ",los)
print("__________________")
|
[
"kniranjankumar.eee@gmail.com"
] |
kniranjankumar.eee@gmail.com
|
053cc4671f3315c7c3119626f671455faea3e89f
|
2c04c06f8604a58ac9828aa6ee157593363a4806
|
/day_16.py
|
88dc24649a2f7f043f43246829d7a61ca0fd9fa4
|
[] |
no_license
|
mtskillman/advent_of_code_2015
|
e3ee16cb26127bb16a0617129ea8a5568c55a6aa
|
9b3efe1e63a519040dffc5442a65db859fa7475d
|
refs/heads/master
| 2020-09-14T23:41:23.163003
| 2019-12-18T22:08:58
| 2019-12-18T22:08:58
| 223,296,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
class Sue(object):
def __init__(self):
self.data = {
"Sue": None,
'children': None,
'cats': None,
'samoyeds': None,
'pomeranians': None,
'akitas': None,
'vizslas': None,
'goldfish': None,
'trees': None,
'cars': None,
'perfumes': None
}
mysue = Sue()
mysue.data = {
"Sue": None,
'children': 3,
'cats': 7,
'samoyeds': 2,
'pomeranians': 3,
'akitas': 0,
'vizslas': 0,
'goldfish': 5,
'trees': 3,
'cars': 2,
'perfumes': 1
}
list_of_sues = []
with open('data.txt', 'r') as shit:
mydata = shit.readlines()
for line in mydata:
newsue = Sue()
line = line.split(" ")
line[-1] = line[-1].strip("\n")
for i, value in enumerate(line):
if i % 2 == 1:
continue
else:
value = value.strip(":")
newsue.data[value] = int(line[i+1].strip(':,'))
list_of_sues.append(newsue)
for target in list_of_sues:
flag = 1
for k,v in target.data.items():
if v is None or k == "Sue":
continue
else:
dat = mysue.data[k]
if dat is None:
continue
else:
if k == "cats" or k == "trees":
if v <= dat:
flag = 0
elif k == 'goldfish' or k == 'pomeranians':
if v >= dat:
flag = 0
elif dat != v:
flag = 0
if flag:
print(target.data['Sue'])
|
[
"noreply@github.com"
] |
noreply@github.com
|
75e88cff3fe29feac841a80d10159553491a9f31
|
8c11776fc0e57b64eaae57ad858ee1a17bc7e468
|
/classes/room.py
|
8c6806ab0319e5da4a5589d0afa80882dcc32578
|
[] |
no_license
|
sluisdejesus/weekend_02_homework
|
1e6c42c595b1d839b06a24a7de49fe00af46f93f
|
7147fae8d9f42deb9e2cd7c9ad26be6dafee2a93
|
refs/heads/main
| 2023-06-05T16:34:30.715855
| 2021-06-27T20:42:37
| 2021-06-27T20:42:37
| 380,834,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
class Room:
def __init__(self, name, capacity, entry_fee):
self.name = name
self.capacity = capacity
self.entry_fee = entry_fee
self.songlist = []
self.guest_list =[]
def guest_list_length(self):
return len(self.guest_list)
def add_guest(self, guest):
self.guest_list.append(guest)
def check_guest_in(self, guest):
self.capacity -= 1
self.guest_list.append(self)
def check_guest_out(self):
self.capacity += 1
self.guest_list.remove(self)
def add_song(self, song):
self.songlist.append(song)
def song_list_length(self):
return len(self.songlist)
|
[
"sluisdejesus@gmail.com"
] |
sluisdejesus@gmail.com
|
b68050786382db1605fb5824277e5debdaa21223
|
c9829e9c06ac4f3c7de5056816373dd9239b32f9
|
/FIBD/fibd_nick2.py
|
e10303797467018ec1c395fe577e813fc289a4a1
|
[] |
no_license
|
KorfLab/Rosalind
|
74a6e741c7029adfa9b22d5726290aabbcd8c82a
|
ac6bcac289d56c88bcff524329b6905d7dc32c8e
|
refs/heads/master
| 2020-06-10T11:23:31.438577
| 2017-04-04T03:08:27
| 2017-04-04T03:08:27
| 75,968,327
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
#!/usr/bin/env Python2.7
#fibd_nick2.py
#actual recursion which is bad for runtime
#adults
def rab_a(n,m):
if n < 2:
return 0
else:
return rab_a(n-1,m) + rab_y(n-1,m) - rab_y(n-m,m)
#young
def rab_y(n,m):
if n == 0:
return 0
elif n == 1:
return 1
else:
return rab_a(n-1,m)
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description =
"""Determine the number of rabbits given number of months in which they
reproduce and a reproductive age of one month and the number of months
they live.""")
parser.add_argument("n", type=int, help="integer number of months")
parser.add_argument("m", type=int, help="integer number of months rabbits live")
args = parser.parse_args()
n = args.n
m = args.m
result = rab_a(n,m) + rab_y(n,m)
print result
|
[
"noreply@github.com"
] |
noreply@github.com
|
c973341b4f11c03a21a805bbdb4c380ecfb3f7b0
|
064370ae71f0c36acafcf3e862623b16768f4f5f
|
/profiles/admin.py
|
606cf2db8bf04f7a1236c4d4d00b1c6818bea6ed
|
[] |
no_license
|
Seishin/thesis-server
|
c2d6310679c13e870f7563a58583db59377c6164
|
f50220083805d1249a3403c8c67842258d808ab3
|
refs/heads/master
| 2021-01-18T18:16:52.703827
| 2013-10-14T18:50:54
| 2013-10-14T18:50:54
| 13,569,870
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
from django.contrib import admin
from profiles.models import *
admin.site.register(Profile)
admin.site.register(Student)
admin.site.register(Faculty)
admin.site.register(Major)
admin.site.register(Mark)
|
[
"seishin90@gmail.com"
] |
seishin90@gmail.com
|
264bf2869c0e3418827933e5e74ceb8aca07fb31
|
467db1b130cf66ed614bf79aaf3d6c8e614ed279
|
/Quesko/Quesko/asgi.py
|
dbc1ed039d2a809f3ac90c0eaf2b5470c1633578
|
[] |
no_license
|
prabhat510/Quesko
|
c84b2894122ca8896dc910fcf7b7f76a1f2dfe91
|
39a1c7f4e9835b895b57a90267282ba84fa5756e
|
refs/heads/master
| 2023-06-06T22:29:31.102631
| 2021-06-29T04:36:13
| 2021-06-29T04:36:13
| 380,930,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
ASGI config for Quesko project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Quesko.settings')
application = get_asgi_application()
|
[
"prabhatbhargava510@gmail.com"
] |
prabhatbhargava510@gmail.com
|
51bf928b89bd5bd6aae923b7e82be036bc0ee86b
|
6c646968638c7dc8702059cf9820ff5d8170b97f
|
/ML_HW3_Maxent.py
|
8f9da2f787e90ab14da66a24ba730af60a64c91d
|
[] |
no_license
|
AnushreeDesai35/ML_LogLinearModel
|
b131d9ed638c612e26ab9e1259993c23aeb19e01
|
1814f3dc177c80c99b65effddeb261a9ff7f1d89
|
refs/heads/master
| 2020-05-07T15:55:01.562397
| 2019-04-10T20:38:00
| 2019-04-10T20:38:00
| 180,659,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,877
|
py
|
#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
import os
try:
os.chdir(os.path.join(os.getcwd(), 'ML_HW3_Anushree_Desai_Code'))
print(os.getcwd())
except:
pass
#%%
import gzip, pickle
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import pandas as pd
from sklearn.metrics import classification_report
import seaborn as sn
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
with gzip.open('mnist_rowmajor.pkl.gz', 'rb') as data_fh:
data = pickle.load(data_fh, encoding='latin1')
train_images = data['images_train']
train_labels = data['labels_train']
images_train, images_dev, labels_train, labels_dev = train_test_split(train_images, train_labels, test_size=0.20, random_state=4)
images_test = data['images_test']
labels_test = data['labels_test']
# print(len(images_train))
# print(len(images_dev))
# print(len(labels_train))
# print(len(labels_dev))
# print(len(images_test))
# print(len(labels_test))
TRAIN_LENGTH = len(images_train)
DEV_LENGTH = len(images_dev)
TEST_LENGTH = len(images_test)
#%%
# Feature 1: Signing images
feature1_training_set = np.empty((TRAIN_LENGTH, 784))
for idx, i in enumerate(images_train):
signed_image_train = list(map((lambda x: 1 if (x > 0) else 0), i))
feature1_training_set[idx] = signed_image_train
feature1_dev_set = np.empty((DEV_LENGTH, 784))
for idx, i in enumerate(images_dev):
signed_image_dev = list(map((lambda x: 1 if (x > 0) else 0), i))
feature1_dev_set[idx] = signed_image_dev
#%%
feature1_test_set = np.zeros((TEST_LENGTH, 784))
for idx, i in enumerate(images_test):
signed_image_test = list(map((lambda x: 1 if (x > 0) else 0), i))
feature1_test_set[idx] = signed_image_test
complete_training = np.zeros((60000, 784))
for idx, i in enumerate(train_images):
temp = list(map((lambda x: 1 if (x > 0) else 0), i))
complete_training[idx] = temp
#%%
# Feature 2: transform if i,j & p,q > 0
def transform(row):
arr = np.zeros((783))
for k in range(len(row) - 1):
if(row[k] > 0 and row[k + 1] > 0):
arr[k] = 1
else:
arr[k] = 0
return arr
feature2_training_set = np.zeros((TRAIN_LENGTH, 783))
for idx, image in enumerate(images_train):
image = transform(image)
feature2_training_set[idx] = image
feature2_dev_set = np.zeros((DEV_LENGTH, 783))
for idx, image in enumerate(images_dev):
image = transform(image)
feature2_dev_set[idx] = image
#%%
def experimentEvaluation(y_correct, y_pred):
cm = confusion_matrix(y_correct.flatten(), y_pred.flatten())
df_cm = pd.DataFrame(cm.astype(int), range(10), range(10))
plt.figure(figsize = (10,10))
sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}, fmt="d")
plt.xlabel('Predicted label')
plt.ylabel('True label')
plt.show()
accuracy = accuracy_score(y_correct.flatten(), y_pred.flatten())
print('Accuracy: ', accuracy)
print(classification_report(y_correct.flatten(), y_pred.flatten()))
#%%
# Configuration 1: feature = signed images, regularization = l1
logisticRegression = LogisticRegression(penalty = 'l1')
logisticRegression.fit(feature1_training_set, labels_train)
predictionsConfig1 = logisticRegression.predict(feature1_dev_set)
experimentEvaluation(labels_dev, predictionsConfig1)
#%%
# Configuration 2: feature = signed images, regularization = l2
logisticRegression = LogisticRegression(penalty = 'l2')
logisticRegression.fit(feature1_training_set, labels_train)
predictionsConfig2 = logisticRegression.predict(feature1_dev_set)
experimentEvaluation(labels_dev, predictionsConfig2)
#%%
# Configuration 3: feature = transformed images, regularization = l1
logisticRegression = LogisticRegression(penalty = 'l1')
logisticRegression.fit(feature2_training_set, labels_train)
predictionsConfig3 = logisticRegression.predict(feature2_dev_set)
experimentEvaluation(labels_dev, predictionsConfig3)
#%%
# Configuration 4: feature = transformed images, regularization = l2
logisticRegression = LogisticRegression(penalty = 'l2')
logisticRegression.fit(feature2_training_set, labels_train)
predictionsConfig4 = logisticRegression.predict(feature2_dev_set)
experimentEvaluation(labels_dev, predictionsConfig4)
#%%
# Testing on Test Data
training_set = np.concatenate((feature1_training_set, feature1_dev_set), axis=0)
# print(training_set.shape)
# print(np.concatenate((labels_train, labels_dev), axis=0).shape)
logisticRegression = LogisticRegression(penalty = 'l1')
logisticRegression.fit(complete_training, train_labels)
predictions = logisticRegression.predict(feature1_test_set)
experimentEvaluation(labels_test, predictions.reshape((10000, 1)))
|
[
"anushreerdesai@gmail.com"
] |
anushreerdesai@gmail.com
|
90ca6cd3d511b423fe14fa6ce58530a108c3b1c8
|
525dbcabc7bc0103c25d31e665234d7288e2b109
|
/spider/ArticleSpider/ArticleSpider/spiders/fang.py
|
8555decd5c48703e0d538e3803b5ff019c346ff0
|
[] |
no_license
|
zihanbobo/spider
|
8ffce0bc33c2be52f27a3c5ede4953e903c2ae08
|
bfe14b64d4bc308e3625217ca10b8844628d0777
|
refs/heads/master
| 2022-10-29T19:05:39.504154
| 2020-06-22T07:20:12
| 2020-06-22T07:20:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,553
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import re
import copy
from urllib import parse
class FangSpider(scrapy.Spider):
name = 'fang'
allowed_domains = ['fang.com','esf.fang.com']
start_urls = ['https://www.fang.com/SoufunFamily.htm']
def parse(self, response):
trs = response.xpath('//div[@class="outCont"]//tr')
province = None
for tr in trs:
tds = tr.xpath('.//td[not(@class)]')
province_text = tds[0].xpath('.//text()').extract_first()
province_text = re.sub('\s','',province_text)
if province_text:
province = province_text
if province =="其它":
continue
for a in tds[1].xpath('.//a'):
city = a.xpath('.//text()').extract_first()
city_link = a.xpath('.//@href').extract_first()
url_model = re.split('//',city_link)
scheme = url_model[0]
domain = url_model[1]
city_id = domain.split('.')[0]
if 'bj.' in domain:
new_house_link = 'https://newhouse.fang.com/house/s/'
esf_link = 'https://esf.fang.com/'
else :
# 构建新房链接
new_house_link = scheme+'//'+city_id+'.newhouse.'+'fang.com/house/s/'
# 构建二手房链接
# esf_link = scheme+'//'+'city_id'+domain
esf_link = 'https://{}.zu.fang.com/house/a21/'.format(city_id)
print(esf_link)
# yield scrapy.Request(url=new_house_link,callback=self.parse_newhouse,\
# meta={'info':copy.deepcopy((province,city))})
# yield scrapy.Request(url=esf_link,callback=self.parse_esf,dont_filter=True,meta={'info': copy.deepcopy((province, city))})
break
break
def parse_newhouse(self,response):
# item = {}
# province,city = response.meta['info']
# item['provice'] = province
# item['city'] = city
# li_list = response.xpath('//div[contains(@class,"nl_con")]/ul/li')
# for li in li_list:
# item['name'] = li.xpath('.//div[@class="nlcd_name"]/a/text()').extract_first()
# if item['name']:
# item['name'] = item['name'].strip()
# item['price'] = li.xpath('.//div[contains(@class,"nhouse_price")]//text()').extract()
# if item['price']:
# item['price'] = re.sub('\s|\n|\t|广告','',''.join(item['price']))
# else:
# item['price'] = ''
# if li.xpath('.//div[contains(@class,"house_type")]/a/text()').extract():
# item['rooms'] = ','.join(list(filter(lambda x:x.endswith("居"),li.xpath('.//div[contains(@class,"house_type")]/a/text()').extract())))
# area = li.xpath('.//div[contains(@class,"house_type")]/text()').extract()
# if area:
# item['area'] = re.sub('\n|\t|\s|/|-', '', ''.join(area))
# else:
# item['area'] =''
# item['address'] = li.xpath('.//div[contains(@class,"address")]/a/@title').extract_first()
# district = re.sub('\n|\t|\s','',''.join(li.xpath('.//div[contains(@class,"address")]/a//text()').extract()))
# item['district'] = re.findall(r'.*\[(.+)\].*',district,re.S)
# if item['district']:
# item['district'] = item['district'][0]
# item['detail_link'] = li.xpath('.//div[contains(@class,"address")]/a/@href')
# if item['detail_link']:
# item['detail_link'] = 'https:'+item['detail_link'].extract_first()
# else:
# item['detail_link'] = ''
# item['sale'] = li.xpath('.//div[contains(@class,"fangyuan")]/span/text()').extract_first()
# yield item
# print(item)
print(response.url)
# next_url = response.xpath('//a[@class="next"]/@href').extract_first()
# # if next_url:
# # yield scrapy.Request(url=parse.urljoin(response.url,next_url),callback=self.parse_newhouse,meta={'info':copy.deepcopy((province,city))})
def parse_esf(self,response):
print(response.url)
# item = {}
# province, city = response.meta['info']
# item['provice'] = province
# item['city'] = city
# dls = response.xpath('//div[contains(@class,"shop_list")]/dl')
# for dl in dls:
# name = dl.xpath('./dd//a/span/text()').extract_first()
# if name:
# item['name'] = name.strip()
# else:
# item['name'] = ''
# info = ','.join(dl.xpath('.//p[@class="tel_shop"]/text()').extract())
#
# if info:
# infos = re.sub('\r|\n|\s','',info).split(',')
# rooms = None
# area = None
# toward = None
# floor = None
# year = None
# for inf in infos:
# if '厅' in inf:
# rooms = inf
# elif 'm' in inf:
# area = inf
# elif '层' in inf:
# floor = inf
# elif '向' in inf:
# toward = inf
# else :
# year = inf
# print(rooms, area, floor, toward, year)
|
[
"1723742002@qq.com"
] |
1723742002@qq.com
|
996bbdb7c1851bb11a386d797fe76ac642f526bd
|
8e096d61a991896d12ddcbea45481350610b70f9
|
/mysite/mysite/settings.py
|
3b6059f9f0d9af3357feffcb9d854df46efd2620
|
[] |
no_license
|
belac2014/Distributed-web
|
73f0317b55676e6f8b36dee082014c2047a8f814
|
dd73f5ff91d6a19866bf09a15feae3a5041bc999
|
refs/heads/master
| 2021-01-18T22:13:56.073045
| 2016-10-30T15:48:25
| 2016-10-30T15:48:25
| 72,358,270
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,166
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4$b()gjk^xcrqf4ez8%g@h0j4bvf6o-7fr&2xp7l9*vz!^^d!v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
[
"noreply@github.com"
] |
noreply@github.com
|
552c2eeb9f0700849e6c8187b82028428a7bf597
|
a4ab4d9fa6cfcc40735d3ce36b44bb75761bafcb
|
/train.py
|
20890b22f01f1b24de05a897f501b3792d0bbb41
|
[
"MIT"
] |
permissive
|
RemusMaz/tensorflow-deeplab-v3
|
5d2ee1e262777820682a8cf874d3962de47a7488
|
ceef67876c73d80d225817e5fbf7d640b63a7a9a
|
refs/heads/master
| 2020-06-22T22:11:55.472815
| 2019-07-23T11:09:25
| 2019-07-23T11:09:25
| 198,413,089
| 0
| 0
|
MIT
| 2019-07-23T11:03:10
| 2019-07-23T11:03:10
| null |
UTF-8
|
Python
| false
| false
| 10,484
|
py
|
"""Train a DeepLab v3 model using tf.estimator API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
import deeplab_model
from utils import preprocessing
from tensorflow.python import debug as tf_debug
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=str, default='./model',
help='Base directory for the model.')
parser.add_argument('--clean_model_dir', action='store_true',
help='Whether to clean up the model directory if present.')
parser.add_argument('--train_epochs', type=int, default=26,
help='Number of training epochs: '
'For 30K iteration with batch size 6, train_epoch = 17.01 (= 30K * 6 / 10,582). '
'For 30K iteration with batch size 8, train_epoch = 22.68 (= 30K * 8 / 10,582). '
'For 30K iteration with batch size 10, train_epoch = 25.52 (= 30K * 10 / 10,582). '
'For 30K iteration with batch size 11, train_epoch = 31.19 (= 30K * 11 / 10,582). '
'For 30K iteration with batch size 15, train_epoch = 42.53 (= 30K * 15 / 10,582). '
'For 30K iteration with batch size 16, train_epoch = 45.36 (= 30K * 16 / 10,582).')
parser.add_argument('--epochs_per_eval', type=int, default=1,
help='The number of training epochs to run between evaluations.')
parser.add_argument('--tensorboard_images_max_outputs', type=int, default=6,
help='Max number of batch elements to generate for Tensorboard.')
parser.add_argument('--batch_size', type=int, default=10,
help='Number of examples per batch.')
parser.add_argument('--learning_rate_policy', type=str, default='poly',
choices=['poly', 'piecewise'],
help='Learning rate policy to optimize loss.')
parser.add_argument('--max_iter', type=int, default=30000,
help='Number of maximum iteration used for "poly" learning rate policy.')
parser.add_argument('--data_dir', type=str, default='./dataset/',
help='Path to the directory containing the PASCAL VOC data tf record.')
parser.add_argument('--base_architecture', type=str, default='resnet_v2_101',
choices=['resnet_v2_50', 'resnet_v2_101'],
help='The architecture of base Resnet building block.')
parser.add_argument('--pre_trained_model', type=str, default='./ini_checkpoints/resnet_v2_101/resnet_v2_101.ckpt',
help='Path to the pre-trained model checkpoint.')
parser.add_argument('--output_stride', type=int, default=16,
choices=[8, 16],
help='Output stride for DeepLab v3. Currently 8 or 16 is supported.')
parser.add_argument('--freeze_batch_norm', action='store_true',
help='Freeze batch normalization parameters during the training.')
parser.add_argument('--initial_learning_rate', type=float, default=7e-3,
help='Initial learning rate for the optimizer.')
parser.add_argument('--end_learning_rate', type=float, default=1e-6,
help='End learning rate for the optimizer.')
parser.add_argument('--initial_global_step', type=int, default=0,
help='Initial global step for controlling learning rate when fine-tuning model.')
parser.add_argument('--weight_decay', type=float, default=2e-4,
help='The weight decay to use for regularizing the model.')
parser.add_argument('--debug', action='store_true',
help='Whether to use debugger to track down bad values during training.')
_NUM_CLASSES = 21
_HEIGHT = 513
_WIDTH = 513
_DEPTH = 3
_MIN_SCALE = 0.5
_MAX_SCALE = 2.0
_IGNORE_LABEL = 255
_POWER = 0.9
_MOMENTUM = 0.9
_BATCH_NORM_DECAY = 0.9997
_NUM_IMAGES = {
'train': 10582,
'validation': 1449,
}
def get_filenames(is_training, data_dir):
"""Return a list of filenames.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: path to the the directory containing the input data.
Returns:
A list of file names.
"""
if is_training:
return [os.path.join(data_dir, 'voc_train.record')]
else:
return [os.path.join(data_dir, 'voc_val.record')]
def parse_record(raw_record):
"""Parse PASCAL image and label from a tf record."""
keys_to_features = {
'image/height':
tf.FixedLenFeature((), tf.int64),
'image/width':
tf.FixedLenFeature((), tf.int64),
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'label/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'label/format':
tf.FixedLenFeature((), tf.string, default_value='png'),
}
parsed = tf.parse_single_example(raw_record, keys_to_features)
# height = tf.cast(parsed['image/height'], tf.int32)
# width = tf.cast(parsed['image/width'], tf.int32)
image = tf.image.decode_image(
tf.reshape(parsed['image/encoded'], shape=[]), _DEPTH)
image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))
image.set_shape([None, None, 3])
label = tf.image.decode_image(
tf.reshape(parsed['label/encoded'], shape=[]), 1)
label = tf.to_int32(tf.image.convert_image_dtype(label, dtype=tf.uint8))
label.set_shape([None, None, 1])
return image, label
def preprocess_image(image, label, is_training):
"""Preprocess a single image of layout [height, width, depth]."""
if is_training:
# Randomly scale the image and label.
image, label = preprocessing.random_rescale_image_and_label(
image, label, _MIN_SCALE, _MAX_SCALE)
# Randomly crop or pad a [_HEIGHT, _WIDTH] section of the image and label.
image, label = preprocessing.random_crop_or_pad_image_and_label(
image, label, _HEIGHT, _WIDTH, _IGNORE_LABEL)
# Randomly flip the image and label horizontally.
image, label = preprocessing.random_flip_left_right_image_and_label(
image, label)
image.set_shape([_HEIGHT, _WIDTH, 3])
label.set_shape([_HEIGHT, _WIDTH, 1])
image = preprocessing.mean_image_subtraction(image)
return image, label
def input_fn(is_training, data_dir, batch_size, num_epochs=1):
"""Input_fn using the tf.data input pipeline for CIFAR-10 dataset.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
Returns:
A tuple of images and labels.
"""
dataset = tf.data.Dataset.from_tensor_slices(get_filenames(is_training, data_dir))
dataset = dataset.flat_map(tf.data.TFRecordDataset)
if is_training:
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes have better performance.
# is a relatively small dataset, we choose to shuffle the full epoch.
dataset = dataset.shuffle(buffer_size=_NUM_IMAGES['train'])
dataset = dataset.map(parse_record)
dataset = dataset.map(
lambda image, label: preprocess_image(image, label, is_training))
dataset = dataset.prefetch(batch_size)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
def main(unused_argv):
# Using the Winograd non-fused algorithms provides a small performance boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
if FLAGS.clean_model_dir:
shutil.rmtree(FLAGS.model_dir, ignore_errors=True)
# Set up a RunConfig to only save checkpoints once per training cycle.
run_config = tf.estimator.RunConfig().replace(save_checkpoints_secs=1e9)
model = tf.estimator.Estimator(
model_fn=deeplab_model.deeplabv3_model_fn,
model_dir=FLAGS.model_dir,
config=run_config,
params={
'output_stride': FLAGS.output_stride,
'batch_size': FLAGS.batch_size,
'base_architecture': FLAGS.base_architecture,
'pre_trained_model': FLAGS.pre_trained_model,
'batch_norm_decay': _BATCH_NORM_DECAY,
'num_classes': _NUM_CLASSES,
'tensorboard_images_max_outputs': FLAGS.tensorboard_images_max_outputs,
'weight_decay': FLAGS.weight_decay,
'learning_rate_policy': FLAGS.learning_rate_policy,
'num_train': _NUM_IMAGES['train'],
'initial_learning_rate': FLAGS.initial_learning_rate,
'max_iter': FLAGS.max_iter,
'end_learning_rate': FLAGS.end_learning_rate,
'power': _POWER,
'momentum': _MOMENTUM,
'freeze_batch_norm': FLAGS.freeze_batch_norm,
'initial_global_step': FLAGS.initial_global_step
})
for _ in range(FLAGS.train_epochs // FLAGS.epochs_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rate',
'cross_entropy': 'cross_entropy',
'train_px_accuracy': 'train_px_accuracy',
'train_mean_iou': 'train_mean_iou',
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=10)
train_hooks = [logging_hook]
eval_hooks = None
if FLAGS.debug:
debug_hook = tf_debug.LocalCLIDebugHook()
train_hooks.append(debug_hook)
eval_hooks = [debug_hook]
tf.logging.info("Start training.")
model.train(
input_fn=lambda: input_fn(True, FLAGS.data_dir, FLAGS.batch_size, FLAGS.epochs_per_eval),
hooks=train_hooks,
# steps=1 # For debug
)
tf.logging.info("Start evaluation.")
# Evaluate the model and print results
eval_results = model.evaluate(
# Batch size must be 1 for testing because the images' size differs
input_fn=lambda: input_fn(False, FLAGS.data_dir, 1),
hooks=eval_hooks,
# steps=1 # For debug
)
print(eval_results)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
[
"rishizek@gmail.com"
] |
rishizek@gmail.com
|
14c287e40ef9f07fe3dd6944c53d3460a99de7cb
|
85b7487c00cabf70cbcf180c5015ac4886e78fb1
|
/test/support/__init__.py
|
bdbdb8f5f19075bc664a148dbdf532d577c3550c
|
[] |
no_license
|
mkatsimpris/test_jpeg
|
7e686f27ac54db4128f4edbeb42b7cd284db0fa4
|
ee626d87e26a08d5ce80f73a883f00703ff34e70
|
refs/heads/master
| 2020-04-06T04:49:58.952565
| 2016-08-17T21:41:25
| 2016-08-17T21:41:25
| 49,828,665
| 3
| 2
| null | 2016-07-25T16:50:52
| 2016-01-17T17:58:21
|
Verilog
|
UTF-8
|
Python
| false
| false
| 254
|
py
|
from __future__ import absolute_import
from .jpeg_prep_cosim import prep_cosim
from .jpeg_v1_intf import JPEGEncV1
from .jpeg_v2_intf import JPEGEncV2
from .jpegenc_v1_top import convert as convertv1
from .utils import set_default_args, get_cli_args
|
[
"chris.felton@gmail.com"
] |
chris.felton@gmail.com
|
ea73fc00d79fab77b32fe0f24d9a8ff83f5dd9d9
|
a50b0a95ea78261db784f6b18c2c261586ade594
|
/561L_output_to_BigWig_GFF3.py
|
3d1e7e679dc81986444b2556a8b8f63e74d0cdee
|
[] |
no_license
|
moss-lab/561L
|
b0763d6a383afb167ac30ed6f033156ddbc10d60
|
16e3da9b8f48be0da3c25b5a33b97f6407e33b26
|
refs/heads/master
| 2022-02-11T01:17:54.895702
| 2021-11-05T18:45:58
| 2021-11-05T18:45:58
| 119,561,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,784
|
py
|
#!usr/bin/python3.6 -w
#This program will take a the output from the ZScore_calculation.pl program append
# convert it into bigwig tracks.
#
# Usage:
#
# $ python3.6 thisScrip.py inputfile
#
import sys # this will allow for the use of system argument inputs
import re
import pyBigWig
filename = sys.argv[1] # this should be the output of a z-score analysis in tab-delimited format
#output = sys.argv[2]
gene_coordinate = input('What are the coordinates of your gene (e.g., chr1:2555639..2565382): ')
#chromosome = str(input("What chromsome is your gene on? (e.g., chr1): "))
#genomic_start = int(input("What is the starting coordinate of your gene sequence? (e.g., 2555639; without commas): " ))
step_size = int(input("What is the step size for your data? (e.g., 1): "))
window_size = int(input("What is the window size for your data? (e.g., 120): "))
strand = input("What strand is your gene on (+ or -)?: ")
chromosome_data = re.split('\:', gene_coordinate)
chromosome = chromosome_data[0]
genomic_coordinates = re.split('\.\.', chromosome_data[1])
genomic_start = genomic_coordinates[0]
genomic_end = genomic_coordinates[1]
# Create and open output files for writing
MFE_wig = pyBigWig.open(filename+'.strand('+strand+')_MFE.bw', 'w')
zscore_wig = pyBigWig.open(filename+'.strand('+strand+')_zscore.bw', 'w')
pscore_wig = pyBigWig.open(filename+'.strand('+strand+')_pvalue.bw', 'w')
ED_wig = pyBigWig.open(filename+'.strand('+strand+')_Ed.bw', 'w')
fMFE_wig = pyBigWig.open(filename+'.strand('+strand+')_fMFE.bw', 'w')
gff3file = open(filename+'.strand('+strand+').gff3', 'w')
#corrected_file = open(filename+'.corrected.txt', 'w')
# Write header for corrected file:
#corrected_file.write("i\tj\tMFE\trandomMFE\tZscore\tPscore\tED\tfMFE\tSequence\tFold\tCentroid\t#A\t#G\t#C\t#U\n")
MFE_wig.addHeader([("chr1",248956422),("chr2",242193529),("chr3",198295559),("chr4",190214555),("chr5",181538259),("chr6",170805979),("chr7",159345973),("chr8",145138636),("chr9",138394717),("chr10",133797422),("chr11",135086622),("chr12",133851895),("chr13",115169878),("chr14",107349540),("chr15",102531392),("chr16",90354753),("chr17",107349540),("chr18",78077248),("chr19",59128983),("chr20",63025520),("chr21",48129895),("chr22",51304566),("chrX",155270560),("chrY",59373566)])
zscore_wig.addHeader([("chr1",248956422),("chr2",242193529),("chr3",198295559),("chr4",190214555),("chr5",181538259),("chr6",170805979),("chr7",159345973),("chr8",145138636),("chr9",138394717),("chr10",133797422),("chr11",135086622),("chr12",133851895),("chr13",115169878),("chr14",107349540),("chr15",102531392),("chr16",90354753),("chr17",107349540),("chr18",78077248),("chr19",59128983),("chr20",63025520),("chr21",48129895),("chr22",51304566),("chrX",155270560),("chrY",59373566)])
pscore_wig.addHeader([("chr1",248956422),("chr2",242193529),("chr3",198295559),("chr4",190214555),("chr5",181538259),("chr6",170805979),("chr7",159345973),("chr8",145138636),("chr9",138394717),("chr10",133797422),("chr11",135086622),("chr12",133851895),("chr13",115169878),("chr14",107349540),("chr15",102531392),("chr16",90354753),("chr17",107349540),("chr18",78077248),("chr19",59128983),("chr20",63025520),("chr21",48129895),("chr22",51304566),("chrX",155270560),("chrY",59373566)])
ED_wig.addHeader([("chr1",248956422),("chr2",242193529),("chr3",198295559),("chr4",190214555),("chr5",181538259),("chr6",170805979),("chr7",159345973),("chr8",145138636),("chr9",138394717),("chr10",133797422),("chr11",135086622),("chr12",133851895),("chr13",115169878),("chr14",107349540),("chr15",102531392),("chr16",90354753),("chr17",107349540),("chr18",78077248),("chr19",59128983),("chr20",63025520),("chr21",48129895),("chr22",51304566),("chrX",155270560),("chrY",59373566)])
fMFE_wig.addHeader([("chr1",248956422),("chr2",242193529),("chr3",198295559),("chr4",190214555),("chr5",181538259),("chr6",170805979),("chr7",159345973),("chr8",145138636),("chr9",138394717),("chr10",133797422),("chr11",135086622),("chr12",133851895),("chr13",115169878),("chr14",107349540),("chr15",102531392),("chr16",90354753),("chr17",107349540),("chr18",78077248),("chr19",59128983),("chr20",63025520),("chr21",48129895),("chr22",51304566),("chrX",155270560),("chrY",59373566)])
MFE_list = []
zscore_list = []
pscore_list = []
ED_list = []
fMFE_list = []
length = (int(genomic_end) - int(genomic_start)) + 1
#print(length)
with open(filename, 'r') as g:
if strand == "+": # Generating GFF3 file for forward strand
glines = g.readlines()[1:]
#print(row)
for row in glines:
if not row.strip():
continue
else:
gdata = row.split('\t') # this splits each row based on "tab"
#print(len(str(gdata[8])), window_size)
if ((len(str(gdata[8])) == int(window_size)) and (len(str(gdata[9])) == int(window_size)) and (len(str(gdata[10])) == int(window_size)) and (float(gdata[3]) != 0)) or ((len(str(gdata[9])) == window_size) and (len(str(gdata[10])) == window_size) and (len(str(gdata[11])) == window_size) and (float(gdata[4]) == 0)):
#print("Sequence in column 9")
#float(gdata[7])
gdata = row.split('\t') # this splits each row based on "tab"
if len(gdata) > 15:
print("Errors found in file")
print("Error in column six:", gdata)
gdata.remove(gdata[5])
#corrected_row = ('\t'.join(data))
print("Error removed:", gdata)
#print(row)
#icoordinate = int((int(data[0])+int(genomic_start)-1))
#jcoordinate = int(int(data[1])+int(genomic_start))
icoordinate = (int(gdata[0])-1)+int(genomic_start)
jcoordinate = (int(gdata[1])-1)+int(genomic_start)
# if strand == '-1':
# icoordinate = int(int(genomic_start)+(int(length)-int(data[1])))
# jcoordinate = int(int(genomic_start)+(int(length)-int(data[0])))
gMFE = float(gdata[2])
grand_MFE = float(gdata[3])
gzscore = gdata[4]
if gzscore == "Undef":
gzscore = float(00000)
else:
gzscore = float(gdata[4])
gpvalue = gdata[5]
#try:
# pvalue = float(gdata[5])
#except ValueError:
# print(str(gdata[5]))
# pvalue =float(0)
gED = float(gdata[6])
gfMFE = float(gdata[7])
gsequence = gdata[8]
gfold = gdata[9]
gcentroid = gdata[10]
gfA = gdata[11]
gfG = gdata[12]
gfC = gdata[13]
gfU = gdata[14]
#corrected_file.write(gdata[0]+'\t'+gdata[1]+'\t'+gdata[2]+'\t'+gdata[3]+'\t'+gdata[4]+'\t'+gdata[5]+'\t'+gdata[6]+'\t'+gdata[7]+'\t'+gdata[8]+'\t'+gdata[9]+'\t'+gdata[10]+'\t'+gdata[11]+'\t'+gdata[12]+'\t'+gdata[13]+'\t'+gdata[14])
gff3file.write(chromosome+'\t'+'.'+'\t'+'sequence_attribute'+'\t'+str(icoordinate)+'\t'+str(jcoordinate)+'\t'+'.'+'\t'+strand+'\t'+'.\t'+'MFE='+str(gMFE)+';'+'Z-score='+str(gzscore)+';'+'P-value='+str(gpvalue)+';'+'EnsDiv='+str(gED)+';'+'fMFE='+str(gfMFE)+';'+'Sequence='+gsequence+';'+'MFE_Fold='+gfold+';'+'Centroid='+gcentroid+'\n')
else:
#print("passed length test")
icoordinate = (int(gdata[0])-1)+int(genomic_start)
jcoordinate = (int(gdata[1])-1)+int(genomic_start)
# if strand == '-1':
# icoordinate = int(int(genomic_start)+(int(length)-int(data[1])))
# jcoordinate = int(int(genomic_start)+(int(length)-int(data[0])))
gMFE = float(gdata[2])
grand_MFE = float(gdata[3])
gzscore = gdata[4]
if gzscore == "Undef":
gzscore = float(00000)
else:
gzscore = float(gdata[4])
gpvalue = gdata[5]
try:
gpvalue = float(gdata[5])
except ValueError:
print(str(gdata[5]))
gpvalue =float(0)
gED = float(gdata[6])
gfMFE = float(gdata[7])
gsequence = gdata[8]
gfold = gdata[9]
gcentroid = gdata[10]
gfA = gdata[11]
gfG = gdata[12]
gfC = gdata[13]
gfU = gdata[14]
#corrected_file.write(gdata[0]+'\t'+gdata[1]+'\t'+gdata[2]+'\t'+gdata[3]+'\t'+gdata[4]+'\t'+gdata[5]+'\t'+gdata[6]+'\t'+gdata[7]+'\t'+gdata[8]+'\t'+gdata[9]+'\t'+gdata[10]+'\t'+gdata[11]+'\t'+gdata[12]+'\t'+gdata[13]+'\t'+gdata[14])
gff3file.write(chromosome+'\t'+'.'+'\t'+'sequence_attribute'+'\t'+str(icoordinate)+'\t'+str(jcoordinate)+'\t'+'.'+'\t'+strand+'\t'+'.\t'+'MFE='+str(gMFE)+';'+'Z-score='+str(gzscore)+';'+'P-value='+str(gpvalue)+';'+'EnsDiv='+str(gED)+';'+'fMFE='+str(gfMFE)+';'+'Sequence='+gsequence+';'+'MFE_Fold='+gfold+';'+'Centroid='+gcentroid+'\n')
else:
#print("else")
if len(gdata) > 14:
print("Errors found in file")
print("Error in column five:", gdata)
gdata.remove(gdata[4])
#corrected_row = ('\t'.join(data))
print("Error removed:", gdata)
#print(row)
#icoordinate = int((int(data[0])+int(genomic_start)-1))
#jcoordinate = int(int(data[1])+int(genomic_start))
icoordinate = (int(gdata[0])-1)+int(genomic_start)
jcoordinate = (int(gdata[1])-1)+int(genomic_start)
# if strand == '-1':
# icoordinate = int(int(genomic_start)+(int(length)-int(data[1])))
# jcoordinate = int(int(genomic_start)+(int(length)-int(data[0])))
gMFE = float(gdata[2])
#rand_MFE = float(gdata[3])
gzscore = gdata[3]
if gzscore == "Undef":
gzscore = float(00000)
else:
gzscore = float(gdata[3])
gpvalue = gdata[4]
#try:
# pvalue = float(gdata[5])
#except ValueError:
# print(str(gdata[5]))
# pvalue =float(0)
gED = float(gdata[5])
gfMFE = float(gdata[6])
gsequence = gdata[7]
gfold = gdata[8]
gcentroid = gdata[9]
gfA = gdata[10]
gfG = gdata[11]
gfC = gdata[12]
gfU = gdata[13]
#corrected_file.write(gdata[0]+'\t'+gdata[1]+'\t'+gdata[2]+'\t'+gdata[3]+'\t'+gdata[4]+'\t'+gdata[5]+'\t'+gdata[6]+'\t'+gdata[7]+'\t'+gdata[8]+'\t'+gdata[9]+'\t'+gdata[10]+'\t'+gdata[11]+'\t'+gdata[12]+'\t'+gdata[13]+'\t'+gdata[14])
gff3file.write(chromosome+'\t'+'.'+'\t'+'sequence_attribute'+'\t'+str(icoordinate)+'\t'+str(jcoordinate)+'\t'+'.'+'\t'+strand+'\t'+'.\t'+'MFE='+str(gMFE)+';'+'Z-score='+str(gzscore)+';'+'P-value='+str(gpvalue)+';'+'EnsDiv='+str(gED)+';'+'fMFE='+str(gfMFE)+';'+'Sequence='+gsequence+';'+'MFE_Fold='+gfold+';'+'Centroid='+gcentroid+'\n')
else:
#print(len(gdata))
icoordinate = (int(gdata[0])-1)+int(genomic_start)
jcoordinate = (int(gdata[1])-1)+int(genomic_start)
# if strand == '-1':
# icoordinate = int(int(genomic_start)+(int(length)-int(data[1])))
# jcoordinate = int(int(genomic_start)+(int(length)-int(data[0])))
gMFE = float(gdata[2])
#grand_MFE = float(gdata[3])
gzscore = gdata[3]
if gzscore == "Undef":
gzscore = float(00000)
else:
gzscore = float(gdata[3])
gpvalue = gdata[3]
try:
gpvalue = float(gdata[4])
except ValueError:
print(str(gdata[4]))
gpvalue =float(0)
gED = float(gdata[5])
gfMFE = float(gdata[6])
gsequence = gdata[7]
gfold = gdata[8]
gcentroid = gdata[9]
gfA = gdata[10]
gfG = gdata[11]
gfC = gdata[12]
gfU = gdata[13]
#corrected_file.write(gdata[0]+'\t'+gdata[1]+'\t'+gdata[2]+'\t'+gdata[3]+'\t'+gdata[4]+'\t'+gdata[5]+'\t'+gdata[6]+'\t'+gdata[7]+'\t'+gdata[8]+'\t'+gdata[9]+'\t'+gdata[10]+'\t'+gdata[11]+'\t'+gdata[12]+'\t'+gdata[13]+'\t'+gdata[14])
gff3file.write(chromosome+'\t'+'.'+'\t'+'sequence_attribute'+'\t'+str(icoordinate)+'\t'+str(jcoordinate)+'\t'+'.'+'\t'+strand+'\t'+'.\t'+'MFE='+str(gMFE)+';'+'Z-score='+str(gzscore)+';'+'P-value='+str(gpvalue)+';'+'EnsDiv='+str(gED)+';'+'fMFE='+str(gfMFE)+';'+'Sequence='+gsequence+';'+'MFE_Fold='+gfold+';'+'Centroid='+gcentroid+'\n')
if strand == "-": #Generating GFF3 file for reverse strand
glines = g.readlines()[1:]
for row in glines:
if not row.strip():
continue
else:
gdata = row.split('\t') # this splits each row based on "tab"
if ((len(str(gdata[8])) == int(window_size)) and (len(str(gdata[9])) == int(window_size)) and (len(str(gdata[10])) == int(window_size)) and (float(gdata[3]) != 0)) or ((len(str(gdata[9])) == window_size) and (len(str(gdata[10])) == window_size) and (len(str(gdata[11])) == window_size) and (float(gdata[4]) == 0)):
#float(gdata[7])
if len(gdata) > 15:
print("Errors found in file")
print("Error in column six:", gdata)
gdata.remove(gdata[5])
#corrected_row = ('\t'.join(data))
print("Error removed:", gdata)
#print(row)
#icoordinate = int((int(data[0])+int(genomic_start)-1))
#jcoordinate = int(int(data[1])+int(genomic_start))
#print(data)
#print(gdata)
#print(data)
icoordinate = int(int(genomic_start)+(int(length)-int(gdata[1])))
jcoordinate = int(int(genomic_start)+(int(length)-int(gdata[0])))
gMFE = gdata[2]
g_rand_MFE = gdata[3]
gzscore = gdata[4]
if gzscore == "Undef":
gzscore = 00000
gpvalue = gdata[5]
gED = gdata[6]
gfMFE = gdata[7]
gsequence = gdata[8]
gfold = gdata[9]
gcentroid = gdata[10]
fA = gdata[11]
fG = gdata[12]
fC = gdata[13]
fU = gdata[14]
#corrected_file.write(gdata[0]+'\t'+gdata[1]+'\t'+gdata[2]+'\t'+gdata[3]+'\t'+gdata[4]+'\t'+gdata[5]+'\t'+gdata[6]+'\t'+gdata[7]+'\t'+gdata[8]+'\t'+gdata[9]+'\t'+gdata[10]+'\t'+gdata[11]+'\t'+gdata[12]+'\t'+gdata[13]+'\t'+gdata[14])
gff3file.write(chromosome+'\t'+'.'+'\t'+'sequence_attribute'+'\t'+str(icoordinate)+'\t'+str(jcoordinate)+'\t'+'.'+'\t'+strand+'\t'+'.\t'+'MFE='+str(gMFE)+';'+'Z-score='+str(gzscore)+';'+'P-value='+str(gpvalue)+';'+'EnsDiv='+str(gED)+';'+'fMFE='+str(gfMFE)+';'+'Sequence='+gsequence+';'+'MFE_Fold='+gfold+';'+'Centroid='+gcentroid+'\n')
else:
icoordinate = int(int(genomic_start)+(int(length)-int(gdata[1])))
jcoordinate = int(int(genomic_start)+(int(length)-int(gdata[0])))
gMFE = gdata[2]
g_rand_MFE = gdata[3]
gzscore = gdata[4]
if gzscore == "Undef":
gzscore = 00000
gpvalue = gdata[5]
gED = gdata[6]
gfMFE = gdata[7]
gsequence = gdata[8]
gfold = gdata[9]
gcentroid = gdata[10]
fA = gdata[11]
fG = gdata[12]
fC = gdata[13]
fU = gdata[14]
#corrected_file.write(gdata[0]+'\t'+gdata[1]+'\t'+gdata[2]+'\t'+gdata[3]+'\t'+gdata[4]+'\t'+gdata[5]+'\t'+gdata[6]+'\t'+gdata[7]+'\t'+gdata[8]+'\t'+gdata[9]+'\t'+gdata[10]+'\t'+gdata[11]+'\t'+gdata[12]+'\t'+gdata[13]+'\t'+gdata[14])
gff3file.write(chromosome+'\t'+'.'+'\t'+'sequence_attribute'+'\t'+str(icoordinate)+'\t'+str(jcoordinate)+'\t'+'.'+'\t'+strand+'\t'+'.\t'+'MFE='+str(gMFE)+';'+'Z-score='+str(gzscore)+';'+'P-value='+str(gpvalue)+';'+'EnsDiv='+str(gED)+';'+'fMFE='+str(gfMFE)+';'+'Sequence='+gsequence+';'+'MFE_Fold='+gfold+';'+'Centroid='+gcentroid+'\n')
else:
if len(gdata) > 14:
print("Errors found in file")
print("Error in column five:", gdata)
gdata.remove(gdata[4])
#corrected_row = ('\t'.join(data))
print("Error removed:", gdata)
#print(row)
#icoordinate = int((int(data[0])+int(genomic_start)-1))
#jcoordinate = int(int(data[1])+int(genomic_start))
#print(data)
#print(gdata)
#print(data)
icoordinate = int(int(genomic_start)+(int(length)-int(gdata[1])))
jcoordinate = int(int(genomic_start)+(int(length)-int(gdata[0])))
gMFE = gdata[2]
#g_rand_MFE = gdata[3]
gzscore = gdata[3]
if gzscore == "Undef":
gzscore = 00000
gpvalue = gdata[4]
gED = gdata[5]
gfMFE = gdata[6]
gsequence = gdata[7]
gfold = gdata[8]
gcentroid = gdata[9]
fA = gdata[10]
fG = gdata[11]
fC = gdata[12]
fU = gdata[13]
#corrected_file.write(gdata[0]+'\t'+gdata[1]+'\t'+gdata[2]+'\t'+gdata[3]+'\t'+gdata[4]+'\t'+gdata[5]+'\t'+gdata[6]+'\t'+gdata[7]+'\t'+gdata[8]+'\t'+gdata[9]+'\t'+gdata[10]+'\t'+gdata[11]+'\t'+gdata[12]+'\t'+gdata[13]+'\t'+gdata[14])
gff3file.write(chromosome+'\t'+'.'+'\t'+'sequence_attribute'+'\t'+str(icoordinate)+'\t'+str(jcoordinate)+'\t'+'.'+'\t'+strand+'\t'+'.\t'+'MFE='+str(gMFE)+';'+'Z-score='+str(gzscore)+';'+'P-value='+str(gpvalue)+';'+'EnsDiv='+str(gED)+';'+'fMFE='+str(gfMFE)+';'+'Sequence='+gsequence+';'+'MFE_Fold='+gfold+';'+'Centroid='+gcentroid+'\n')
else:
icoordinate = int(int(genomic_start)+(int(length)-int(gdata[1])))
jcoordinate = int(int(genomic_start)+(int(length)-int(gdata[0])))
gMFE = gdata[2]
#g_rand_MFE = gdata[3]
gzscore = gdata[3]
if gzscore == "Undef":
gzscore = 00000
gpvalue = gdata[4]
gED = gdata[5]
gfMFE = gdata[6]
gsequence = gdata[7]
gfold = gdata[8]
gcentroid = gdata[9]
fA = gdata[10]
fG = gdata[11]
fC = gdata[12]
fU = gdata[13]
#corrected_file.write(gdata[0]+'\t'+gdata[1]+'\t'+gdata[2]+'\t'+gdata[3]+'\t'+gdata[4]+'\t'+gdata[5]+'\t'+gdata[6]+'\t'+gdata[7]+'\t'+gdata[8]+'\t'+gdata[9]+'\t'+gdata[10]+'\t'+gdata[11]+'\t'+gdata[12]+'\t'+gdata[13]+'\t'+gdata[14])
gff3file.write(chromosome+'\t'+'.'+'\t'+'sequence_attribute'+'\t'+str(icoordinate)+'\t'+str(jcoordinate)+'\t'+'.'+'\t'+strand+'\t'+'.\t'+'MFE='+str(gMFE)+';'+'Z-score='+str(gzscore)+';'+'P-value='+str(gpvalue)+';'+'EnsDiv='+str(gED)+';'+'fMFE='+str(gfMFE)+';'+'Sequence='+gsequence+';'+'MFE_Fold='+gfold+';'+'Centroid='+gcentroid+'\n')
with open(filename, 'r') as f:
if strand == "+": #Generating BW tracks for forward strand.
genomic_start = int(genomic_start)
lines = f.readlines()[1:]
for row in lines:
if not row.strip():
continue
else:
data = row.split('\t') # this splits each row based on "tab"
if ((len(str(data[8])) == int(window_size)) and (len(str(data[9])) == int(window_size)) and (len(str(data[10])) == int(window_size)) and (float(data[3]) != 0)) or ((len(str(data[9])) == window_size) and (len(str(data[10])) == window_size) and (len(str(data[11])) == window_size) and (float(data[4]) == 0)):
if len(data) > 15:
#print("Errors found in file")
#print("Error in column six:", data)
data.remove(data[5])
#corrected_row = ('\t'.join(data))
#print("Error removed:", data)
i = data[0]
j = data[1]
genomic_end = int(genomic_start)+int(step_size)
MFE = float(data[2])
MFE_list.append(MFE)
if data[4] == "Undef":
zscore = float(00000)
else:
zscore = float(data[4])
zscore_list.append(zscore)
pscore = float(data[5])
pscore_list.append(pscore)
ED = float(data[6])
ED_list.append(ED)
fMFE = float(data[7])
fMFE_list.append(fMFE)
#print(len(data))
else:
i = data[0]
j = data[1]
genomic_end = int(genomic_start)+int(step_size)
MFE = float(data[2])
MFE_list.append(MFE)
if data[4] == "Undef":
zscore = float(00000)
else:
zscore = float(data[4])
zscore_list.append(zscore)
pscore = float(data[5])
pscore_list.append(pscore)
ED = float(data[6])
ED_list.append(ED)
fMFE = float(data[7])
fMFE_list.append(fMFE)
else:
if len(data) > 14:
#print("Errors found in file")
#print("Error in column five:", data)
data.remove(data[4])
#corrected_row = ('\t'.join(data))
#print("Error removed:", data)
i = data[0]
j = data[1]
genomic_end = int(genomic_start)+int(step_size)
MFE = float(data[2])
MFE_list.append(MFE)
if data[3] == "Undef":
zscore = float(00000)
else:
zscore = float(data[3])
zscore_list.append(zscore)
pscore = float(data[4])
pscore_list.append(pscore)
ED = float(data[5])
ED_list.append(ED)
fMFE = float(data[6])
fMFE_list.append(fMFE)
#print(len(data))
else:
i = data[0]
j = data[1]
genomic_end = int(genomic_start)+int(step_size)
MFE = float(data[2])
MFE_list.append(MFE)
if data[3] == "Undef":
zscore = float(00000)
else:
zscore = float(data[3])
zscore_list.append(zscore)
pscore = float(data[4])
pscore_list.append(pscore)
ED = float(data[5])
ED_list.append(ED)
fMFE = float(data[6])
fMFE_list.append(fMFE)
if strand == "-":
lines = reversed(open(filename).readlines()[1:])
start = genomic_start
genomic_start = int(start) + int(window_size)
for row in lines:
if not row.strip():
continue
else:
data = row.split('\t') # this splits each row based on "tab"
if ((len(str(data[8])) == int(window_size)) and (len(str(data[9])) == int(window_size)) and (len(str(data[10])) == int(window_size)) and (float(data[3]) != 0)) or ((len(str(data[9])) == window_size) and (len(str(data[10])) == window_size) and (len(str(data[11])) == window_size) and (float(data[4]) == 0)):
if len(data) > 15:
#print("Errors found in file")
#print("Error in column six:", data)
data.remove(data[5])
#corrected_row = ('\t'.join(data))
#print("Error removed:", data)
#print(row)
#print(len(data))
#print(row)
i = data[0]
j = data[1]
#genomic_start = int(genomic_start)+int(window_size)
MFE = float(data[2])
MFE_list.append(MFE)
if data[4] == "Undef":
zscore = float(00000)
else:
zscore = float(data[4])
zscore_list.append(zscore)
pscore = float(data[5])
pscore_list.append(pscore)
ED = float(data[6])
ED_list.append(ED)
fMFE = float(data[7])
fMFE_list.append(fMFE)
else:
i = data[0]
j = data[1]
#genomic_start = int(genomic_start)+int(window_size)
MFE = float(data[2])
MFE_list.append(MFE)
if data[4] == "Undef":
zscore = float(00000)
else:
zscore = float(data[4])
zscore_list.append(zscore)
pscore = float(data[5])
pscore_list.append(pscore)
ED = float(data[6])
ED_list.append(ED)
fMFE = float(data[7])
fMFE_list.append(fMFE)
else:
if len(data) > 14:
#print("Errors found in file")
#print("Error in column five:", data)
data.remove(data[4])
#corrected_row = ('\t'.join(data))
#print("Error removed:", data)
#print(row)
#print(len(data))
#print(row)
i = data[0]
j = data[1]
#genomic_start = int(genomic_start)+int(window_size)
MFE = float(data[2])
MFE_list.append(MFE)
if data[3] == "Undef":
zscore = float(00000)
else:
zscore = float(data[3])
zscore_list.append(zscore)
pscore = float(data[4])
pscore_list.append(pscore)
ED = float(data[5])
ED_list.append(ED)
fMFE = float(data[6])
fMFE_list.append(fMFE)
else:
i = data[0]
j = data[1]
#genomic_start = int(genomic_start)+int(window_size)
MFE = float(data[2])
MFE_list.append(MFE)
if data[3] == "Undef":
zscore = float(00000)
else:
zscore = float(data[3])
zscore_list.append(zscore)
pscore = float(data[4])
pscore_list.append(pscore)
ED = float(data[5])
ED_list.append(ED)
fMFE = float(data[6])
fMFE_list.append(fMFE)
#print(MFE_list)
#print(chromosome)
#print(step_size)
MFE_wig.addEntries(chromosome, genomic_start, values=MFE_list, span=step_size, step=step_size)
zscore_wig.addEntries(chromosome, genomic_start, values=zscore_list, span=step_size, step=step_size)
pscore_wig.addEntries(chromosome, genomic_start, values=pscore_list, span=step_size, step=step_size)
ED_wig.addEntries(chromosome, genomic_start, values=ED_list, span=step_size, step=step_size)
fMFE_wig.addEntries(chromosome, genomic_start, values=fMFE_list, span=step_size, step=step_size)
MFE_wig.close()
zscore_wig.close()
pscore_wig.close()
ED_wig.close()
fMFE_wig.close()
|
[
"randrews@iastate.edu"
] |
randrews@iastate.edu
|
8bd4bf5943f0f605a35ed8fb4e0e76f7c9860468
|
b781f91398860c1ecfd4d69a4c64b770e40ab602
|
/time.py
|
1b419b58c02feb893740ed5702d17d6e80ddc156
|
[] |
no_license
|
lclarke98/Hue-motion-sensor
|
90bc53aef710cac9d8156af8585eefcd465dbdbc
|
3a99e8a608997e713167f27beb8750723e155df3
|
refs/heads/master
| 2022-04-08T13:13:09.982197
| 2020-01-01T22:09:25
| 2020-01-01T22:09:25
| 196,085,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
import datetime
from astral import Astral
city_name = 'London'
a = Astral()
a.solar_depression = 'civil'
city = a[city_name]
print('Information for %s/%s\n' % (city_name, city.region))
timezone = city.timezone
print('Timezone: %s' % timezone)
print('Latitude: %.02f; Longitude: %.02f\n' % \
(city.latitude, city.longitude))
Latitude: 51.60; Longitude: 0.08
sun = city.sun(date=datetime.date(2019, 7, 9), local=True)
print('Dawn: %s' % str(sun['dawn']))
print('Sunrise: %s' % str(sun['sunrise']))
print('Noon: %s' % str(sun['noon']))
print('Sunset: %s' % str(sun['sunset']))
print('Dusk: %s' % str(sun['dusk']))
|
[
"48065423+lclarke98@users.noreply.github.com"
] |
48065423+lclarke98@users.noreply.github.com
|
3d7b976dfd0d9a0a52763a1c2002dca8ee09d178
|
4f1f639ab5ca26a5e9efb57902bdf8ff62443446
|
/t/vektivaSmarWi/smarwiEmulator.py
|
956c0d9136d5e2429d2db2d90b99e33827ceeba9
|
[
"BSD-3-Clause"
] |
permissive
|
CESNET/siotgateway
|
9965ae67d7c86014b6bb799d03b1e0a5063c11c6
|
f3f4bc59a27760dddb818ae83c7c3371a7c2d757
|
refs/heads/master
| 2023-04-15T09:38:47.509990
| 2019-11-03T20:09:26
| 2019-11-03T20:09:26
| 133,476,838
| 1
| 1
|
BSD-3-Clause
| 2018-11-04T20:51:18
| 2018-05-15T07:26:52
|
C++
|
UTF-8
|
Python
| false
| false
| 16,112
|
py
|
import argparse
import http.server
import json
import logging
import paho.mqtt.client as mqtt
import re
import signal
import socketserver
import sys
import time
from threading import Thread, Lock, Event, current_thread
DEFAULT_HTTP_SERVER_PORT = 8080
# HTTP server runs on localhost
DEFAULT_MQTT_URL = "localhost"
DEFAULT_MQTT_PORT = 1883
# Class Thread extended to be able to stop running thread
class StoppableThread(Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stopEvent = Event()
def stop(self):
self._stopEvent.set()
def stopped(self):
return self._stopEvent.is_set()
# Class for handling smarwis
class VektivaSmarwiHandler(mqtt.Client):
def __init__(self, router, *args):
logging.info("Vektiva Smarwi handler instantiated.")
self._smarwisData = []
self._threads = {}
self._router = router
mqtt.Client.__init__(self, *args)
def run(self, url, port):
self.connect(url, port, 60)
self.subscribe("ion/#")
self.loop_start()
def on_connect(self, mqttc, obj, flags, rc):
pass
#print("rc: "+str(rc))
def on_message(self, mqttc, obj, msg):
topicSplit = msg.topic.split("/")
if topicSplit and topicSplit[-1] == "cmd":
macAddr = topicSplit[-2][1:] # Second from the end and cut the first character out
msg2 = str(msg.payload)[2:-1]
msg2 = msg2.replace(";", "/") # in case of open;50 messages
route = "/" + macAddr + "/" + msg2
logging.info("MQTT message sending to router (" + route + ")")
self._router.route(route)
def on_publish(self, mqttc, obj, mid):
pass
#print("mid: "+str(mid))
def on_subscribe(self, mqttc, obj, mid, granted_qos):
pass
#print("Subscribed: "+str(mid)+" "+str(granted_qos))
def on_log(self, mqttc, obj, level, string):
pass
#print(string)
def smarwis(self):
return self._smarwisData
def addSmarwi(self, smarwi):
if (isinstance(smarwi, Smarwi) and self.getSmarwi(smarwi.getMAC()) == None):
logging.info("Smarwi (" + smarwi.getMAC() + ") added to the local database.")
smarwi.on(self)
self._smarwisData.append(smarwi)
return "OK"
return "Err"
def removeSmarwi(self, macAddr):
for smarwi in self._smarwisData:
if macAddr == smarwi.getMAC():
smarwi.eraseMessages(self)
self._smarwisData.remove(smarwi)
return "OK"
return "Err"
def getSmarwi(self, macAddr):
for smarwi in self._smarwisData:
if macAddr == smarwi.getMAC():
return smarwi
def route(self, reqType, body, route):
macAddr = route[0]
smarwi = self.getSmarwi(macAddr)
if (hasattr(smarwi, route[1])): # Checks if function for the second part of URL exists
method = getattr(smarwi, route[1]) # if exists, proper method is called
if (route[1] == "error"):
errno = route[2] if len(route) > 2 else "10"
thread = StoppableThread(target=smarwi.error, kwargs={'mqttClient':self, 'errno':errno})
else:
thread = StoppableThread(target=method, kwargs={'mqttClient':self})
oldThread = self._threads.get(macAddr)
if (oldThread == None or not oldThread.isAlive()): # if there is no old thread
self._threads.update({macAddr : thread})
else:
oldThread.stop()
self._threads.update({macAddr: thread})
thread.start()
return "OK"
return "Err"
class Router:
def __init__(self, mqttUrl, mqttPort):
logging.info("Router instantiated.")
self._smarwiHandler = VektivaSmarwiHandler(self)
self._smarwiHandler.run(mqttUrl, mqttPort)
def serializeSmarwis(self, smarwi):
properties = smarwi.getProperties()
return properties
def route(self, route, reqType = "GET", body = None):
# Splits route to list by slash
routeParts = route.split('/')
# Filters out empty strings e.g. in case of /open
routeFiltered = list(filter(None, routeParts))
if (routeFiltered and hasattr(self, routeFiltered[0])): # If list is not empty and checks the first route part
method = getattr(self, routeFiltered[0]) # If exists, proper method is called
return method(reqType = reqType, body = body, route = routeFiltered[1:])
elif (routeFiltered and re.search("^[a-fA-F0-9]{12}$", routeFiltered[0]) != None) and len(routeFiltered) > 1:
return self.control(reqType = reqType, body = body, route = routeFiltered)
else: # Else method index is called
return self.index(reqType = "GET", body = body, route = routeFiltered)
def index(self, reqType, body, route):
file = open("index.html", "r")
page = file.read()
file.close()
return page
def jquery(self, reqType, body, route):
jqueryFileName = "jquery.min.js"
try:
open(jqueryFileName, "r")
except IOError:
logging.critical(jqueryFileName + " file does not appear to exist.")
return
file = open(jqueryFileName, "r")
page = file.read()
file.close()
return page
# GET
# returns list of currently existing Smarwi devices
# POST
# creates a new device and adds it to the list
# DELETE
# deletes a Smarwi device with the MAC address equal to the MAC address specified in the URL
def devices(self, reqType, body, route):
if (reqType == "GET"):
jsonSmarwis = json.dumps(self._smarwiHandler.smarwis(), default=self.serializeSmarwis)
return jsonSmarwis
elif (reqType == "POST"):
logging.debug(body)
smarwiJson = json.loads(body)
if (smarwiJson.get("macAddr") == None or (re.search("^[a-fA-F0-9]{12}$", smarwiJson.get("macAddr")) == None)):
return "Err"
macAddr = smarwiJson.get("macAddr").lower()
smarwi = Smarwi(macAddr)
return self._smarwiHandler.addSmarwi(smarwi)
if (reqType == "DELETE"):
return self._smarwiHandler.removeSmarwi(route[0].lower())
def control(self, reqType, body, route):
route[0] = route[0].lower()
return self._smarwiHandler.route(reqType, body, route)
class VektivaHTTPHandler(http.server.BaseHTTPRequestHandler):
def __init__(self, router, *args):
self._router = router
http.server.BaseHTTPRequestHandler.__init__(self, *args)
def do_GET(self):
resp = self._router.route(self.path)
self.send_response(200)
if (resp == None or "<html>" in resp):
self.send_header('content-type', 'text/html')
else:
self.send_header('content-type', 'application/json')
self.send_header('content-type', 'charset="utf-8"')
if (resp != None):
self.send_header('content-length', len(resp))
self.end_headers()
if (resp != None):
self.wfile.write(resp.encode())
return VektivaHTTPHandler
def do_POST(self):
contentLen = int(self.headers['content-length'])
postBody = self.rfile.read(contentLen).decode('utf-8')
resp = self._router.route(self.path, "POST", postBody)
self.send_response(200)
if (resp == None or "<html>" in resp):
self.send_header('content-type', 'text/html')
else:
self.send_header('content-type', 'application/json')
self.send_header('content-type', 'charset="utf-8"')
if (resp != None):
self.send_header('content-length', len(resp))
self.end_headers()
if (resp != None):
self.wfile.write(resp.encode())
return VektivaHTTPHandler
def do_DELETE(self):
resp = self._router.route(self.path, "DELETE")
self.send_response(200)
if (resp == None or "<html>" in resp):
self.send_header('content-type', 'text/html')
else:
self.send_header('content-type', 'application/json')
self.send_header('content-type', 'charset="utf-8"')
if (resp != None):
self.send_header('content-length', len(resp))
self.end_headers()
if (resp != None):
self.wfile.write(resp.encode())
return VektivaHTTPHandler
class http_server:
def __init__(self, port, router):
logging.info("HTTP server instantiated.")
signal.signal(signal.SIGINT, self.emulatorShutdown)
signal.signal(signal.SIGTERM, self.emulatorShutdown)
def handler(*args):
VektivaHTTPHandler(router, *args)
self._router = router
socketserver.TCPServer.allow_reuse_address = True
self.httpd = socketserver.TCPServer(("", port), handler)
self.httpd.serve_forever()
def emulatorShutdown(self, signum, frame):
logging.info("Emulator is shutting down.")
def serverShutdown(server):
server.shutdown()
thread = StoppableThread(target=serverShutdown, kwargs={'server':self.httpd})
thread.start()
self.httpd.server_close()
logging.info("Server was shutted down.")
devicesJSONString = self._router.devices("GET", "", "")
devices = json.loads(devicesJSONString)
logging.debug("Devices: " + devicesJSONString)
for device in devices:
logging.info("Device " + device["macAddr"] + " is being deleted.")
self._router.devices("DELETE", "", [device["macAddr"]])
logging.info("All devices has been deleted.")
#Smarwi class representing standalone device
class Smarwi:
def __init__(self, macAddr):
self._macAddr = macAddr
self._online = False
self._isOpen = False
self._lock = Lock()
self._errno = "0"
self._errorScheduled = False
self._fixed = True
self._statusno = 250
self._ok = 1
self._ro = 0
def getMAC(self):
return self._macAddr
def getProperties(self):
return {
'macAddr': self._macAddr,
'online': self._online,
'isOpen': self._isOpen,
'errno': self._errno,
'errorScheduled': self._errorScheduled,
'fixed': self._fixed,
'statusno': self._statusno,
'ok': self._ok,
'ro': self._ro
}
def stop(self, mqttClient):
self._lock.acquire()
try:
if self._online:
self._fixed = False
self._errno = "0"
self._errorScheduled = False
self._statusno = 250
self._ok = 1
self._ro = 0
self.status(mqttClient)
finally:
self._lock.release()
def fix(self, mqttClient):
self._lock.acquire()
try:
if self._online:
self._statusno = 250
self._ok = 1
self._ro = 0
self._fixed = True
self.status(mqttClient)
finally:
self._lock.release()
def status(self, mqttClient):
if self._online:
mqttClient.publish("ion/dowaroxby/%" + self._macAddr + "/status",
't:swr\n'\
's:' + str(self._statusno) + '\n'\
'e:' + ("0" if self._errorScheduled else self._errno) + '\n'\
'ok:' + ("1" if self._errorScheduled or self._errno == "0" else "0") + '\n'\
'ro:' + str(self._ro) + '\n'\
'pos:' + ("o" if self._isOpen else "c") + '\n'\
'fix:' + ("1" if self._fixed else "0") + '\n'\
'a:-98\n'\
'fw:3.4.1-15-g3d0f\n'\
'mem:25344\n'\
'up:583521029\n'\
'ip:268446218\n'\
'cid:xsismi01\n'\
'rssi:-56\n'\
'time:' + str(int(time.time())) + '\n'\
'wm:1\n'\
'wp:1\n'\
'wst:3\n')
def error(self, mqttClient, errno = "10"):
self._lock.acquire()
if (self._online):
self._errno = errno
self._errorScheduled = True
self._lock.release()
def on(self, mqttClient):
self._lock.acquire()
try:
if not (self._online):
self._online = True
mqttClient.publish("ion/dowaroxby/%" + self._macAddr + "/online", "1", retain = True)
self.status(mqttClient)
finally:
self._lock.release()
def off(self, mqttClient):
self._lock.acquire()
try:
if (self._online):
self._online = False
mqttClient.publish("ion/dowaroxby/%" + self._macAddr + "/online", "0", retain = True)
self.status(mqttClient)
finally:
self._lock.release()
def eraseMessages(self, mqttClient):
self._lock.acquire()
try:
mqttClient.publish("ion/dowaroxby/%" + self._macAddr + "/online", '''''', retain = True)
mqttClient.publish("ion/dowaroxby/%" + self._macAddr + "/status", '''''', retain = True)
self._online = False
finally:
self._lock.release()
def open(self, mqttClient):
self._lock.acquire()
try:
# If error happened, SmarWi can not be controlled. It waits until "stop" message
if ((not self._errorScheduled) and (self._errno != "0")):
return
if (self._isOpen):
self._statusno = 212
self._fixed = True
self._isOpen = True
# Sending the first message
self.status(mqttClient)
#Before the second step, check if error should happen. If so,
#error message is published and then method ends.
if (self._errorScheduled and self._errno != "0"):
self._statusno = 10
self._ok = 0
self._errorScheduled = False
self.status(mqttClient)
return
time.sleep(3)
if current_thread().stopped():
return
self._statusno = 210
self.status(mqttClient)
time.sleep(3)
if current_thread().stopped():
return
self._statusno = 250
self.status(mqttClient)
else:
self._statusno = 200
self._fixed = True
self._isOpen = True
# Sending the first message
self.status(mqttClient)
#Before the second step, check if error should happen. If so,
#error message is published and then method ends.
if (self._errorScheduled and self._errno != "0"):
self._statusno = 10
self._ok = 0
self._errorScheduled = False
self.status(mqttClient)
return
time.sleep(3)
if current_thread().stopped():
return
self._statusno = 210
self.status(mqttClient)
time.sleep(3)
if current_thread().stopped():
return
self._statusno = 250
self.status(mqttClient)
finally:
self._lock.release()
def close(self, mqttClient):
self._lock.acquire()
try:
# If error happened, SmarWi can not be controlled. It waits until "stop"
if ((not self._errorScheduled) and (self._errno != "0")):
return
if (self._isOpen):
self._statusno = 220
self._isOpen = True
self._fixed = True
self.status(mqttClient)
#Before the second step, check if error should happen. If so,
#error message is published and then method ends.
if (self._errorScheduled and self._errno != "0"):
self._statusno = 10
self._ok = 0
self._errorScheduled = False
self.status(mqttClient)
return
time.sleep(3)
if current_thread().stopped():
return
self._statusno = 230
self._isOpen = False
self.status(mqttClient)
time.sleep(3)
if current_thread().stopped():
return
self._statusno = 250
self.status(mqttClient)
else:
self._statusno = 232
self._isOpen = False
self._fixed = True
self.status(mqttClient)
#Before the second step, check if error should happen. If so,
#error message is published and then method ends.
if (self._errorScheduled and self._errno != "0"):
self._statusno = 10
self._ok = 0
self._errorScheduled = False
self.status(mqttClient)
return
time.sleep(3)
if current_thread().stopped():
return
self._statusno = 232
self._isOpen = True
self.status(mqttClient)
time.sleep(3)
if current_thread().stopped():
return
self._statusno = 234
self._isOpen = True
self.status(mqttClient)
time.sleep(3)
if current_thread().stopped():
return
self._statusno = 230
self._isOpen = False
self.status(mqttClient)
time.sleep(3)
if current_thread().stopped():
return
self._statusno = 250
self._isOpen = False
self.status(mqttClient)
finally:
self._lock.release()
class main:
def __init__(self, httpServerPort, mqttUrl, mqttPort):
self.router = Router(mqttUrl, mqttPort)
self.server = http_server(httpServerPort, self.router)
if __name__ == '__main__':
logging.basicConfig(level = logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument("--http-port", metavar="HTTP_SERVER_PORT",
type=int, help="The emulator will run on specified port. The default port is 8080.")
parser.add_argument("--mqtt-port", metavar="MQTT_BROKER_PORT",
type=int, help="The MQTT client will attempt to connect on the specified port. The default port is 1883.")
parser.add_argument("--mqtt-url", metavar="MQTT_BROKER_URL",
help="The MQTT client will attempt to connect to the specified URL. The default URL is localhost.")
args = parser.parse_args()
httpServerPort = args.http_port if args.http_port is not None else DEFAULT_HTTP_SERVER_PORT
mqttUrl = args.mqtt_url if args.mqtt_url is not None else DEFAULT_MQTT_URL
mqttPort = args.mqtt_port if args.mqtt_port is not None else DEFAULT_MQTT_PORT
m = main(httpServerPort, mqttUrl, mqttPort)
|
[
"xbedna62@stud.fit.vutbr.cz"
] |
xbedna62@stud.fit.vutbr.cz
|
a02b765eefb1c6726e25ba1b5d0361858f7e85fa
|
c015cd73e1e8c48b4d4aea3c871835bef02941bd
|
/testing.py
|
118b6cd2e58873b1731d4bc1471ae5bb5059df07
|
[] |
no_license
|
RichaYadav/python
|
02fa218f9774f4a90df02ac4e9dcf33fc7813612
|
13fa1a0648a6e24ef7cd791cea3fb1fcbc3c66d2
|
refs/heads/master
| 2021-01-23T04:14:46.423229
| 2017-03-25T19:12:06
| 2017-03-25T19:12:06
| 86,180,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
import json
from pprint import pprint
from db import database
class Testit:
def parse_insert(self):
file = open("test2.json")
json_data = json.load(file)
pprint(json_data)
#for r in json_data['results']:
# kind = r['kind']
Testit().parse_insert()
|
[
"noreply@github.com"
] |
noreply@github.com
|
240c3495c4ce8e4a18d227a9481367db872addca
|
dd8db5054f3e9cb3aa434cdf72c68a23bf4ab4cb
|
/botCode.py
|
520e4981b7a2402707338ce9a4a9a9b852f2d298
|
[] |
no_license
|
Skinbow/MathBot
|
60f10bfdfcb23581a5318c314706899b4b6fedb6
|
a7917c3002434381db0a30391ae098fe75710e72
|
refs/heads/master
| 2020-05-15T18:39:21.012128
| 2019-04-20T18:05:49
| 2019-04-20T18:05:49
| 182,435,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,862
|
py
|
# -*- coding: utf-8 -*-
import config
import telebot
import random
import threading
flag = 0
x = 0
y = 0
score = 0
# 0 - игра не началась
# 1 - игрок ждет вопрос
# 2 - мы ожидаем ответ
# 3 - пауза
# 10 - начать ли заново?
# 11 - ожидать ответа
# 12 - начать заново
TimeOut = False
waitingThread = threading.Thread(target=None)
bot = telebot.TeleBot(config.token)
def SignalTimeIsOut():
global TimeOut
TimeOut = True
def WaitForTimeOut(id):
global TimeOut
while True:
if TimeOut == True:
TimeIsOut(id)
def TimeIsOut(id):
global thread1
thread1.join()
TimeOut = False
bot.send_message(id, "Время вышло!")
bot.send_message(id, "Новая задачя:")
push(id)
def getText(message):
checkPlayersAnswer(message)
def push(id):
global x, y, flag, thread1
x = random.randint(0, 100)
y = random.randint(0, 100)
keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)
button1 = telebot.types.KeyboardButton(str(x + y))
r = random.randint(0,200)
while r == x + y:
r = random.randint(0,200)
button2 = telebot.types.KeyboardButton(str(r))
if random.randint(0,2) == 0:
keyboard.add(button1)
keyboard.add(button2)
else:
keyboard.add(button2)
keyboard.add(button1)
ms = bot.send_message(id, str(x) + " + " + str(y), reply_markup=keyboard)
bot.register_next_step_handler(ms, getText)
flag = 2
thread1 = threading.Timer(4.0, SignalTimeIsOut)
thread1.start()
'''def deleteScore(id):
fname = "/Users/mikhail/Documents/Programming/Python/Bots/HenryBot/bot2/scores_" + str(id) + "_.txt"
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
if file_len(fname) > 50:
file = open(fname, "r")
min = int(file[0])
minIndex = 0'''
def saveScore(id):
global score
file = open("/Users/mikhail/Documents/Programming/Python/Bots/HenryBot/bot2/data/scores_" + str(id) + "_.txt", "a")
file.write(str(score) + "\n")
file.close()
def getHighScores(id):
file = open("/Users/mikhail/Documents/Programming/Python/Bots/HenryBot/bot2/data/scores_" + str(id) + "_.txt", "r")
scores = file.readlines()
file.close()
n = 0
for i in scores:
scores[n] = int(i)
n += 1
scores.sort(reverse=True)
bot.send_message(id, "Рекорды:")
for n in range(5):
try:
bot.send_message(id, str(scores[n]))
except:
break
def initiateGame(message):
global flag
global score
score = 0
bot.send_message(message.chat.id, "Игра началась :)")
flag = 1
bot.send_message(message.chat.id, "Счёт: " + str(score))
waitingThread.start()
push(message.chat.id)
return
def checkPlayersAnswer(message):
global flag
global score
global thread1
try:
if int(message.text) == x+y:
bot.send_message(message.chat.id, "Молодец!")
flag = 1
score += 1
bot.send_message(message.chat.id, "Счёт: " + str(score))
thread1.join()
push(message.chat.id)
else:
bot.send_message(message.chat.id, "Ты проиграл!!!")
#deleteScore(message.chat.id)
saveScore(message.chat.id)
score = 0
flag = 0
except:
bot.send_message(message.chat.id, "Ты проиграл!!!")
#deleteScore(message.chat.id)
saveScore(message.chat.id)
score = 0
flag = 0
@bot.message_handler(commands=["start", "stop", "pause", "resume", "get_my_scores"])
def react_to_commands(message):
global flag
global waitingThread
if message.text == "/start":
if flag == 0:
waitingThread = threading.Thread(target=WaitForTimeOut, args=(message.chat.id,))
initiateGame(message)
else:
flag = 10
react_to_text(message)
elif message.text == "/stop":
flag = 0
waitingThread.join()
saveScore(message.chat.id)
score = 0
elif message.text == "/pause":
flag = 3
elif message.text == "/resume":
flag = 1
push(message.chat.id)
elif message.text == "/get_my_scores":
getHighScores(message.chat.id)
#flag = 20
#react_to_text(message)
else:
bot.send_message(message.chat.id, "Неверная команда")
@bot.message_handler(content_types=["text"])
def react_to_text(message):
global flag
#bot.send_message(message.chat.id, "Игра началась :)")
if flag == 2:
checkPlayersAnswer(message)
elif flag == 10:
bot.send_message(message.chat.id, "Вы действительно хотите начать заново? (Y/N)")
flag = 11
elif flag == 11:
if message.text.lower() == 'y':
saveScore(message.chat.id)
flag = 1
initiateGame(message)
elif message.text.lower() == 'n':
flag = 2
else:
bot.send_message(message.chat.id, 'Введите Y чтобы ответить "да" или введите N чтобы ответить "нет".')
#elif flag == 20:
# bot.send_message(message.chat.id, "Сколько ")
if __name__ == '__main__':
bot.polling(none_stop=True)
|
[
"noreply@github.com"
] |
noreply@github.com
|
819f04b29755ef76d76e6ed4a25e45463c984b64
|
359bdc553338ff1d6a79b97fd2cb944d7dffe9d0
|
/db_repository/versions/054_migration.py
|
f7603392694742787cb8c69c6f8cbc0ce40244ff
|
[] |
no_license
|
HumanInteractionVirtuallyEnhanced/PythonAnywhere
|
3f8776906d1f41fb89baee95a1729f6ba03f55b6
|
d9f2fbc7d59d302b38728de5f0dd16830cd72860
|
refs/heads/master
| 2021-01-10T17:55:45.561457
| 2015-10-06T18:07:57
| 2015-10-06T18:07:57
| 43,767,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
user = Table('user', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('nickname', String(length=64)),
Column('email', String(length=120)),
Column('fb_id', Integer),
Column('role', SmallInteger, default=ColumnDefault(0)),
Column('is_private', Boolean, default=ColumnDefault(True)),
Column('recentLoc', String),
Column('recentLatLon', String),
Column('apsToken', String),
Column('fbfriends', String),
Column('recTime', DateTime),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['user'].columns['recTime'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['user'].columns['recTime'].drop()
|
[
"rijul.gupta@yale.edu"
] |
rijul.gupta@yale.edu
|
19f97b46e444e83a2f72744a9002611efe7ccf0a
|
69e5676a801c5446ddec5e1cfd8daf527dbb3ab9
|
/stringcheese/wrangling/get_fficutout.py
|
917178ace1351e624d1dfa092c3a7e383136a123
|
[
"MIT"
] |
permissive
|
lgbouma/stringcheese
|
96d8d48aaa8da9da92744401bba5498399758636
|
e7f5919335f18d54f331e67f4df1a48e4904526d
|
refs/heads/master
| 2020-07-07T10:11:29.697659
| 2020-03-29T19:05:52
| 2020-03-29T19:05:52
| 203,321,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
import requests
from astroquery.mast import Tesscut
def get_fficutout(c_obj, cutoutdir=None, sector=None):
# c_obj (SkyCoord): location of target star
print('beginning download tesscut for {}'.format(repr(c_obj)))
try:
tab = Tesscut.download_cutouts(c_obj, size=20, sector=sector,
path=cutoutdir)
except (requests.exceptions.HTTPError,
requests.exceptions.ConnectionError) as e:
print('got {}, try again'.format(repr(e)))
tab = Tesscut.download_cutouts(c_obj, size=20, sector=sector,
path=cutoutdir)
|
[
"bouma.luke@gmail.com"
] |
bouma.luke@gmail.com
|
eab276022ecd0a31712c12691147b17b34c02bb8
|
8c2fa488008257c9fd69b86b45e9a9842b70fdff
|
/PS2/Disparity_with_noise/contrast_disparity/disparity_with_contrast.py
|
cf01e106bb020548f7b4793a7e8a8da4897d456d
|
[] |
no_license
|
dheeraj141/Computer-Vision-Udacity-810-Problem-Sets
|
bf002f3c4e6fad274ec3a159f2c44a89b6828123
|
7b3439083f5706b552ad17cd5ab0d721def80cc9
|
refs/heads/master
| 2021-06-24T12:11:55.355716
| 2021-06-13T00:37:37
| 2021-06-13T00:37:37
| 208,757,980
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,507
|
py
|
import numpy as np
import cv2 as cv
import ps12 as ps
import sys
import math
import multiprocessing as mp
#main function to calculate SSD
def check_img(img):
if img is None:
return 0
else:
return 1
def add_gaussian_noise(mean, sd, left_image):
h,w = left_image.shape
noise = np.random.normal(mean, sd, (h,w))
left = np.asarray(left_image)
left = left.astype('float64')
left+=noise
left =left.astype('uint8')
return left
# function to calculate the SSD of the two images
# inputs : left_image , right_img direction to calculate SSD
# Direction 0 => from left to right and 1 means right to left_image
# window size should be odd for symmetry
# output : return the disparity map
def calculate_SSD(left_image, right_image,direction, window_size,max_disparity):
e1 = cv.getTickCount()
left = np.asarray(left_image)
right = np.asarray(right_image)
h,w = left_image.shape
window_size_half = int(window_size/2)
disparity_left =np.zeros((h,w))
#breakpoint()
for i in range(window_size_half,h - window_size_half):
#l = [0]*left_image.shape[1]
for j in range(window_size_half,w - window_size_half):
min_distance = 65535
min_j = 0
for disparity in range(max_disparity):
distance = 0
temp =0;
for l in range(-window_size_half, window_size_half):
for m in range(-window_size_half, window_size_half):
if(direction == 0):
temp= int(left[i+l, j+m]) - int(right[i+l, (j+m)-disparity])
else:
temp= int(right[i+l, j+m]) - int(left[i+l, (j+m+disparity)%w])
distance += temp*temp
if (distance <min_distance):
min_distance=distance
min_j = disparity
disparity_left[i,j] = min_j
e2 = cv.getTickCount()
print("time taken is ", (e2-e1)/cv.getTickFrequency())
return disparity_left
def add_contrast(image):
new_image = np.zeros(image.shape, image.dtype)
alpha = 1.0 # Simple contrast control
beta = 0 # Simple brightness control
# Initialize values
print(' Basic Linear Transforms ')
print('-------------------------')
try:
alpha = float(input('* Enter the alpha value [1.0-3.0]: '))
beta = int(input('* Enter the beta value [0-100]: '))
except ValueError:
print('Error, not a number')
# Do the operation new_image(i,j) = alpha*image(i,j) + beta
# Instead of these 'for' loops we could have used simply:
# new_image = cv.convertScaleAbs(image, alpha=alpha, beta=beta)
# but we wanted to show you how to access the pixels :)
for y in range(image.shape[0]):
for x in range(image.shape[1]):
new_image[y,x] = np.clip(alpha*image[y,x] + beta, 0, 255)
return new_image
def calculate_SSD_over_range(left_gray, right_gray):
left_gray_noise = add_gaussian_noise(0,15, left_gray)
right_gray_noise = add_gaussian_noise(0,15, right_gray)
for i in range(4,17,4):
print("Calculate disparity image with noise for window size {}".format(i))
disparity_left = calculate_SSD(left_gray_noise,right_gray_noise,0,i,40)
disparity_right = calculate_SSD(left_gray_noise,right_gray_noise,1,i,40)
disparity_left = ps.threshold_image(disparity_left,40)
disparity_right = ps.threshold_image(disparity_right,40)
#ps.display_image("disparity image", disparity_left)
file_name_left = "disparity_image_left" + "window_size" + str(i)+ "noise"
file_name_right = "disparity_image_right" + "window_size" + str(i)+"noise"
ps.save_image(file_name_left, disparity_left)
ps.save_image(file_name_right, disparity_right)
def main(argv):
if(len(argv) < 1):
print("not enough parameters\n")
print("usage PS1-1.py <path to image>\n")
return -1
left_image = cv.imread(argv[0],cv.IMREAD_COLOR)
right_image = cv.imread(argv[1], cv.IMREAD_COLOR)
x = check_img(left_image)
y = check_img(right_image)
if (x == 0 or y == 0):
print("Error opening image\n")
return -1
left_gray = cv.cvtColor(left_image,cv.COLOR_BGR2GRAY)
right_gray = cv.cvtColor(right_image, cv.COLOR_BGR2GRAY)
left_contrast_increased = add_contrast(left_gray)
disparity_left = calculate_SSD(left_contrast_increased ,right_gray,0,8,40)
disparity_right = calculate_SSD(left_contrast_increased ,right_gray,1,8,40)
disparity_left = ps.threshold_image(disparity_left,40)
disparity_right = ps.threshold_image(disparity_right,40)
#ps.display_image("disparity image", disparity_left)
file_name_left = "disparity_image_left" + "window_size" + str(12)+ "CONTRAST"
file_name_right = "disparity_image_right" + "window_size" + str(12)+"CONTRAST"
ps.save_image(file_name_left, disparity_left)
ps.save_image(file_name_right, disparity_right)
# if someone import this module then this line makes sure that it does not Run
#ion its own
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"dhiru5040@gmail.com"
] |
dhiru5040@gmail.com
|
1a6a6d190573f01bbf7d17a030aaa51dbe22656f
|
51dcd31096526bfa6aeae4baea9f0f45657c6623
|
/ocean/tests/util.py
|
dba4670d5424620103eed74a400c139a2532af16
|
[] |
no_license
|
sopac/ocean-portal-docker
|
eba5de774e5a2b3e9b019440c39e7f0041715dd9
|
159aeba7143e66fdd9ed253de935407f898b4873
|
refs/heads/master
| 2021-01-20T08:07:58.698449
| 2017-09-10T09:24:04
| 2017-09-10T09:24:04
| 90,103,531
| 1
| 5
| null | 2017-12-13T03:30:45
| 2017-05-03T03:19:57
|
Python
|
UTF-8
|
Python
| false
| false
| 826
|
py
|
#
# (c) 2012 Commonwealth of Australia
# Australian Bureau of Meteorology, COSPPac COMP
# All Rights Reserved
#
# Authors: Danielle Madeley <d.madeley@bom.gov.au>
import os
import os.path
from glob import glob
import pytest
from ocean.config import get_server_config
config = get_server_config()
def clear_cache(product, filetype='*'):
cachedir = config['outputDir']
s = os.path.join(cachedir, '%s*.%s' % (product, filetype))
for d in glob(s):
try:
os.unlink(d)
except IOError:
raise
def unique(iterable):
__tracebackhide__ = True
vals = set()
for i in iterable:
if i in vals: return False
vals.add(i)
return True
def get_file_from_url(url):
bn = os.path.basename(url)
return os.path.join(config['outputDir'], bn)
|
[
"sachindras@spc.int"
] |
sachindras@spc.int
|
a8d257e4514cb20cbd1b1b0e338ee8cdb6d97d59
|
80ab528c686fb2867fb35f067e0ea42cb29faed9
|
/playground/playground/urls.py
|
6268eb7111707afe3dff5070f8a766d6c4d0939e
|
[] |
no_license
|
HajimeK/playground
|
1f11698f5967adf53d2d2276457e62dbbccb8d81
|
586d335210e3588b0b907fc0a087eaf6a62cba84
|
refs/heads/master
| 2021-01-10T17:46:08.886233
| 2016-03-29T14:43:29
| 2016-03-29T14:43:29
| 54,985,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'playground.views.home', name='home'),
# url(r'^playground/', include('playground.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
[
"Boz@Hajime-no-MacBook-Air.local"
] |
Boz@Hajime-no-MacBook-Air.local
|
329c0354906ea7def69064595589467c698a674a
|
d2720ce687c6000b06255d51824770e0f91e04ca
|
/stepper.py
|
267c5e17ffb11e2af50b1bca497cfccc928eda5c
|
[] |
no_license
|
patildayananda/Raspberry-Pi
|
92eae96579b599b23640f2c9431a31fded3c7ed4
|
835ace7b9d6843ef8697e6b8b6efe7cf44f29282
|
refs/heads/master
| 2021-06-02T00:48:11.897537
| 2016-08-17T06:39:05
| 2016-08-17T06:39:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,646
|
py
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
coil_A_1_pin = 24 # pink
coil_A_2_pin = 23 # orange
coil_B_1_pin = 4 # blue
coil_B_2_pin = 17 # yellow
# adjust if different
StepCount = 8
Seq = range(0, StepCount) # seq size is of 8
Seq[0] = [0,1,0,0]
Seq[1] = [0,1,0,1]
Seq[2] = [0,0,0,1] # store value for steps
Seq[3] = [1,0,0,1]
Seq[4] = [1,0,0,0]
Seq[5] = [1,0,1,0]
Seq[6] = [0,0,1,0]
Seq[7] = [0,1,1,0]
GPIO.setup(enable_pin, GPIO.OUT)
GPIO.setup(coil_A_1_pin, GPIO.OUT)
GPIO.setup(coil_A_2_pin, GPIO.OUT)
GPIO.setup(coil_B_1_pin, GPIO.OUT)
GPIO.setup(coil_B_2_pin, GPIO.OUT)
GPIO.output(enable_pin, 1)
def setStep(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin, w1)
GPIO.output(coil_A_2_pin, w2)
GPIO.output(coil_B_1_pin, w3)
GPIO.output(coil_B_2_pin, w4)
def forward(delay, steps):
for i in range(steps):
for j in range(StepCount): # access the seq. list one by one(0-7)
setStep(Seq[j][0], Seq[j][1], Seq[j][2], Seq[j][3])
time.sleep(delay)
def backwards(delay, steps):
for i in range(steps):
for j in reversed(range(StepCount)): # access the seq. list one by one(7-0)
setStep(Seq[j][0], Seq[j][1], Seq[j][2], Seq[j][3])
time.sleep(delay)
if __name__ == '__main__':
while True:
delay = raw_input("Time Delay (ms)?")
steps = raw_input("How many steps forward? ")
forward(int(delay) / 1000.0, int(steps)) #function call for f/w
steps = raw_input("How many steps backwards? ")
backwards(int(delay) / 1000.0, int(steps)) #function call for b/w
|
[
"noreply@github.com"
] |
noreply@github.com
|
077dd4703b1e8f583618f83d20f53a9b6442c9c0
|
105c59fdd7e87d0d826177b4eba34ad7f6d22231
|
/tests/scripts/tree.py
|
479ec8df167885e5e44bb64b7bd2516a8ee52e1f
|
[
"MIT"
] |
permissive
|
Jesse-McDonald/EM-MAGIC
|
5af40e7b6b1f06b7ef9a39b4488cba5348af9445
|
5c0ef082bce75ba2bcc795a556b8738dc30fc503
|
refs/heads/master
| 2020-03-23T20:53:12.321459
| 2018-08-02T21:51:26
| 2018-08-02T21:51:26
| 142,068,953
| 0
| 0
|
MIT
| 2018-08-02T21:51:28
| 2018-07-23T21:01:02
|
C++
|
UTF-8
|
Python
| false
| false
| 433
|
py
|
#Returns the paths of all files in a directory and all sub directories relative to start directory
import os
def tree(directory,target="f"):
paths=[]
for currentDir,dirs,files in os.walk(directory):
if target=="f":
for file in files:
paths.append(currentDir+"/"+file)
for dir in dirs:
paths+=(tree(dir))
if target=="d":
paths.append(currentDir)
for dir in dirs:
paths+=(tree(dir,"d"))
return paths
|
[
"jamcdonald@lcmail.lcsc.edu"
] |
jamcdonald@lcmail.lcsc.edu
|
08fd520672df3be648ba10e90ded14bc29eb24ed
|
41c0259d0717443a641e7e5c883c8ff12d418c30
|
/week/9/writeup/part1.py
|
a0c811b57eba66af9b860bded74c28796ad3daf8
|
[] |
no_license
|
kodirupe/389Rfall18
|
9cae986e86c81a583ae82ab6c9f3601b1a517f18
|
d3f4639692c36893e8b5ffa177db592dfa89756c
|
refs/heads/master
| 2021-07-25T09:10:50.381596
| 2018-12-12T02:06:02
| 2018-12-12T02:06:02
| 147,029,368
| 0
| 0
| null | 2018-09-01T20:10:59
| 2018-09-01T20:10:59
| null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# importing a useful library -- feel free to add any others you find necessary
import hashlib
import string
# this will work if you place this script in your writeup folder
wordlist = open("../probable-v2-top1575.txt", 'r')
# a string equal to 'abcdefghijklmnopqrstuvwxyz'.
salts = string.ascii_lowercase
hash_fp = open("../hashes",'r')
hashes = hash_fp.readlines()
stripped_hashes = []
for x in hashes:
stripped_hashes.append(x.strip())
password = wordlist.readlines()
strip_pass = []
for x in password:
strip_pass.append(x.strip())
for salt in salts:
for x in strip_pass:
salt_password = salt+x
h = hashlib.sha512(salt_password)
for y in stripped_hashes:
if(y == h.hexdigest()):
print("Salt: " + salt + "\n" + "Password: " + x)
|
[
"rupe.kodi@gmail.com"
] |
rupe.kodi@gmail.com
|
897e083576693619a543b424205541faf51766de
|
2e912d2551874e238219a9cc4c84eed2422efe69
|
/util.py
|
3b31453d5da7401ce5b4b7b1fffa6fbad944798d
|
[
"MIT"
] |
permissive
|
snowfox1939/texstitch
|
a3ab631c9aa710b04187c134e8904424c45fd5b7
|
a719d675b2bded9e85641d570d461a552619ad36
|
refs/heads/master
| 2020-06-01T23:16:09.288981
| 2017-08-09T01:10:55
| 2017-08-09T01:10:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,508
|
py
|
from PIL import Image, ImageTk
from tkinter import filedialog as tk_fd
import tkinter as tk
import os
FILES_IMG = (("Image files", ("*.jpg", "*.png")), ("All files", "*.*"))
FILES_STITCH = (("Json file", ("*.json")), ("All files", "*.*"))
data_path = None
def input_int(prompt, imin=None, imax=None):
'''
Prompt the user to input an integer, and don't stop prompting until a valid
integer is given.
prompt - Prompt given to user
imin - Minimum value that will be accepted
imax - Maximum value that will be accepted
'''
while True:
i = input(prompt)
try:
val = int(i)
if imin is not None and val < imin:
print("Number should be at least {}".format(imin))
elif imax is not None and val > imax:
print("Number should be at most {}".format(imax))
else:
return val
except ValueError:
print("Not a valid integer!")
_icon_cache = {}
def load_icon(name):
if data_path is not None and data_path != "":
name = os.path.join(data_path, name)
if name in _icon_cache:
return _icon_cache[name]
img = Image.open(name)
imgtk = ImageTk.PhotoImage(img)
_icon_cache[name] = imgtk
return imgtk
def get_in_filename(initialdir, title, filetypes):
root = tk.Tk()
root.withdraw()
value = None
# while value is None or value == "" or value == ():
value = tk_fd.askopenfilename(
initialdir=initialdir,
title=title,
filetypes=filetypes)
root.destroy()
return value
def get_out_filename(initialdir, title, filetypes):
root = tk.Tk()
root.withdraw()
value = None
# while value is None or value == "" or value == ():
value = tk_fd.asksaveasfilename(
initialdir=initialdir,
title=title,
filetypes=filetypes)
root.destroy()
return value
def get_directory(initialdir, title):
root = tk.Tk()
root.withdraw()
value = None
# while value is None or value == "" or value == ():
value = tk_fd.askdirectory(
initialdir=initialdir,
title=title)
root.destroy()
return value
def get_many_files(initialdir, title, filetypes):
root = tk.Tk()
root.withdraw()
value = None
# while value is None or len(value) == 0:
value = tk_fd.askopenfilenames(
initialdir=initialdir,
title=title,
filetypes=filetypes)
root.destroy()
return value
|
[
"jellonator00@gmail.com"
] |
jellonator00@gmail.com
|
cf1dec6881d70e2ef54464bca3024d4a54e679c1
|
8a4cde6d1cf568b168100d561c6d6062f919160f
|
/stacks.dynamically.py
|
a118b5b56555ae37144633b50f01e9f79daf8e12
|
[] |
no_license
|
yash-saini/Python_Programes
|
0a4146aa687b9f27a1f0490875db832303e41cc7
|
5312f151c7c04bfac736769bdff77503cb98627b
|
refs/heads/master
| 2020-12-19T15:40:43.415349
| 2020-06-19T16:30:40
| 2020-06-19T16:30:40
| 235,777,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
class stacks():
def __init__(self,l=[]):
self.l=l
self.choice()
def choice(self):
while 1:
u=input('1=continue 2=exit')
if u==1:
n=input('push=1 pop=2')
if n==1:
data=input('data to be inseeted')
self.push(data)
elif n==2:
self.pop()
else:
print 'invalid input'
else:
break
def push(self,data=0):
self.l.append(data)
print self.l
def pop(self):
self.l.pop()
print self.l
def main():
c=input('enter the list')
s=stacks(c)
|
[
"noreply@github.com"
] |
noreply@github.com
|
570990629c0c9b3a5c2630cefadd7527dacb12c3
|
e9b91d2eb84fefcf9f245249b49c6c7967dc81d2
|
/udemy-data-science-course/9-data-projects/2-stock-analysis.py
|
2d0be8a7c87627db58decb47c77c23d43467d77d
|
[] |
no_license
|
sidchilling/python-data-analysis-learning
|
ea78b8f3db29f4699835b15df0f63b2a07f7fa6d
|
674a65f614c897635c4f4a463b7c08facf643b5b
|
refs/heads/master
| 2021-01-11T23:02:44.792783
| 2017-04-20T15:05:40
| 2017-04-20T15:05:40
| 78,539,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,574
|
py
|
from __future__ import division
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import matplotlib as mtp
mtp.use('TkAgg')
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import time
# 1. What was the change in price of the stock over time?
# 2. What was the daily return of the stock on average?
# 3. What was the moving average of the various stocks?
# 4. What was the correlation between different stocks' closing prices?
# 5. What was the correlation between different stocks' daily returns?
# 6. How much value we put at risk by investing in a particular stock?
# 7. How can we attempt to predict future stock behaviour?
sns.set_style('whitegrid') # setting the style as white
from pandas_datareader.data import DataReader
from datetime import datetime
import sys
def print_dataframes(dfs, num = 5):
for stock in dfs.keys():
print '--- DataFrame for {} ---'.format(stock)
print dfs[stock].head(n = num)
def flatten_axes(axes):
axes_array = []
for row in range(0, len(axes)):
for col in range(0, len(axes[row])):
axes_array.append(axes[row][col])
return axes_array
def convert_date(d):
# there might be timestamp data if we fetch the data from internet
# there will not be timestamp data if we fetch from files
if ' ' in d:
d = d.split(' ')[0].strip()
return datetime.strptime(d, '%Y-%m-%d').strftime("%d %b '%y")
tech_list = ['AAPL', 'GOOG', 'MSFT', 'AMZN']
end = datetime.utcnow()
start = datetime(end.year - 1, end.month, end.day)
dfs = {} # Map to store the dataframes for each stock
for stock in tech_list:
fetch_data = False
# first check whether this can be read from local file
try:
dfs[stock] = pd.read_csv('{}.csv'.format(stock))
except:
fetch_data = True
if fetch_data or dfs[stock].empty:
# fetch data from Yahoo
print 'Fetching data for: {}'.format(stock)
dfs[stock] = DataReader(name = stock, data_source = 'yahoo',
start = start, end = end)
# save it locally
dfs[stock].to_csv('{}.csv'.format(stock))
print_dataframes(dfs)
print dfs[tech_list[0]].describe()
# Set Date as Index
for stock in dfs.keys():
dfs[stock] = dfs[stock].reset_index()
dfs[stock]['Date'] = dfs[stock]['Date'].apply(str)
dfs[stock]['Date'] = dfs[stock]['Date'].apply(convert_date)
dfs[stock] = dfs[stock].set_index(keys = ['Date'])
print_dataframes(dfs)
def make_subplots(rows = 2, cols = 2):
fig, axes = plt.subplots(nrows = rows, ncols = 2)
plt.subplots_adjust(wspace = 1, hspace = 1)
axes_array = flatten_axes(axes)
return (fig, axes_array)
# Historical trend of closing prices
(fig, axes_array) = make_subplots()
index = 0
for stock in tech_list:
dfs[stock]['Adj Close'].plot(legend = True, title = '{} Adj Close Trend'.format(stock),
ax = axes_array[index], y = 'Date')
index = index + 1
plt.show()
# Show the Volume trend of AAPL
dfs['AAPL']['Volume'].plot(legend = True, figsize = (10, 4),
title = 'AAPL Volume Trend')
plt.show()
# Calculate moving average for all the stocks
ma_days = [10, 20, 50, 70, 100]
for ma in ma_days:
column_name = 'MA for {} days'.format(ma)
for stock in dfs.keys():
dfs[stock][column_name] = pd.rolling_mean(arg = dfs[stock]['Adj Close'],
window = ma)
print_dataframes(dfs, num = 100)
def plot_moving_averages(ma_days, close = True):
(fig, axes_array) = make_subplots()
index = 0
for stock in dfs.keys():
col_names = ['Adj Close'] if close else []
for ma in ma_days:
col_names.append('MA for {} days'.format(ma))
dfs[stock][col_names].plot(legend = True, title = '{} MA'.format(stock),
ax = axes_array[index])
index = index + 1
plt.show()
# Plot the Moving averages for all the stocks for Adj Close, 10, and 20
plot_moving_averages(ma_days = [10, 20])
# Plot the moving averages for all stocks for 50, 70, 100
plot_moving_averages(ma_days = [50, 70, 100], close = False)
## Daily Returns and Risk of the Stock
for stock in dfs.keys():
dfs[stock]['Daily Return'] = dfs[stock]['Adj Close'].pct_change()
print_dataframes(dfs)
(fig, axes_array) = make_subplots()
index = 0
for stock in dfs.keys():
dfs[stock]['Daily Return'].plot(legend = True, title = 'Daily Return {}'.format(stock),
linestyle = '--', marker = 'o',
ax = axes_array[index])
index = index + 1
plt.show()
# Show the daily returns on a histogram
(fig, axes_array) = make_subplots()
index = 0
for stock in dfs.keys():
g = sns.distplot(a = dfs[stock]['Daily Return'].dropna(), bins = 100, hist = True,
kde = True, rug = False, ax = axes_array[index])
g.set_title('{}'.format(stock))
index = index + 1
plt.show()
## Make a DataFrame of all the Adj Close prices for each stock
closing_dfs = DataFrame()
for stock in dfs.keys():
adj_close = dfs[stock]['Adj Close']
adj_close.name = '{}'.format(stock)
closing_dfs = pd.concat([closing_dfs, adj_close], axis = 1)
print closing_dfs.head()
tech_returns = closing_dfs.pct_change()
print tech_returns.head()
# Show correlation between same stock
sns.jointplot(x = 'GOOG', y = 'GOOG', data = tech_returns, kind = 'scatter',
color = 'seagreen')
plt.show()
# Correlation betwenn GOOG and MSFT
sns.jointplot(x = 'GOOG', y = 'MSFT', data = tech_returns, kind = 'scatter',
color = 'seagreen')
plt.show()
# Show correlation between all the stocks
sns.pairplot(data = tech_returns.dropna())
plt.show()
# Show correlation using PairGrid to control the types of graphs
returns_fig = sns.PairGrid(data = tech_returns.dropna())
returns_fig.map_upper(plt.scatter, color = 'purple')
returns_fig.map_lower(sns.kdeplot, cmap = 'cool_d')
returns_fig.map_diag(plt.hist, bins = 30)
plt.show()
# Correlation between Closing prices
returns_fig = sns.PairGrid(data = closing_dfs.dropna())
returns_fig.map_upper(plt.scatter, color = 'purple')
returns_fig.map_lower(sns.kdeplot, cmap = 'cool_d')
returns_fig.map_diag(plt.hist, bins = 30)
plt.show()
sns.linearmodels.corrplot(tech_returns.dropna(), annot = True)
plt.show()
sns.linearmodels.corrplot(closing_dfs.dropna(), annot = True)
plt.show()
## Quantify Risk
area = np.pi * 20 # so that the points that we draw are visible
plt.scatter(x = tech_returns.dropna().mean(),
y = tech_returns.dropna().std(),
s = area)
plt.xlabel('Expected Return')
plt.ylabel('Risk')
for label, x, y in zip(tech_returns.columns, tech_returns.dropna().mean(),
tech_returns.dropna().std()):
plt.annotate(label, xy = (x, y), xytext = (50, 50),
textcoords = 'offset points', ha = 'right', va = 'bottom',
arrowprops = {'arrowstyle' : '-', 'connectionstyle' : 'arc3,rad=-0.3'})
plt.show()
## Value at Risk
sns.distplot(a = dfs['AAPL']['Daily Return'].dropna(), bins = 100,
hist = True, kde = True, rug = False, color = 'purple')
plt.show()
## Bootstap Method
# Print the Quantiles
print tech_returns['AAPL'].dropna().quantile(0.05) # this means that for 95% times, this will be your worst loss
# Do the above for all the stocks
for col in tech_returns.columns:
stock = '{}'.format(col)
print '{}. Risk: {}'.format(stock,
tech_returns[stock].dropna().quantile(0.05))
## Monte-Carlo Method
def stock_monte_carlo(start_price, days, mu, sigma, dt):
price = np.zeros(days)
price[0] = start_price
shock = np.zeros(days)
drift = np.zeros(days)
for x in xrange(1, days):
shock[x] = np.random.normal(loc = mu * dt, scale = sigma * np.sqrt(dt))
drift[x] = mu * dt
price[x] = price[x - 1] + (price[x - 1] * (drift[x] + shock[x]))
return price
# Run the Monte carlo method for Google 100 times starting with the
# first opening price
start_price = dfs['GOOG']['Open'][0]
days = 365
dt = 1 / days
mu = tech_returns['GOOG'].mean()
sigma = tech_returns['GOOG'].std()
for run in xrange(100):
plt.plot(stock_monte_carlo(start_price = start_price, days = days,
mu = mu, sigma = sigma, dt = dt))
plt.xlabel('Days')
plt.ylabel('Price')
plt.title('Monte Carlo Analysis for Google')
plt.show()
# Run 10,000 Monte-Carlo Simulations for all stocks
for stock in dfs.keys():
runs = 10000
print 'Running {} Monte-Carlo Simulations for {}'.format(runs, stock)
simulations = np.zeros(runs)
start_price = dfs[stock]['Open'][0]
days = 365
dt = 1 / days
start_time = time.time()
mu = tech_returns[stock].mean()
sigma = tech_returns[stock].std()
for run in xrange(runs):
simulations[run] = stock_monte_carlo(start_price = start_price,
days = days, mu = mu,
sigma = sigma, dt = dt)[days - 1]
# in the previous step we are taking the final day's simulated price
q = np.percentile(simulations, 1)
plt.hist(simulations, bins = 200)
plt.figtext(x = 0.6, y = 0.8, s = 'Start Price: ${}'.format(round(float(start_price), 2)))
# Mean ending Price
plt.figtext(x = 0.6, y = 0.7,
s = 'Mean Final Price: ${}'.format(round(float(simulations.mean()), 2)))
# Variance of the price (with 99% confidence interval)
plt.figtext(x = 0.6, y = 0.6,
s = 'VaR(0.99): ${}'.format(round(float(start_price - q), 2)))
# Disply 1% quantile
plt.figtext(x = 0.15, y = 0.6,
s = 'q(0.99): {}'.format(round(float(q), 2)))
# Plot a line at the 1% quantile
plt.axvline(x = q, linewidth = 4, color = 'r')
# Title
plt.title('Final Price Distribution for {} after {} days'.format(stock, days),
weight = 'bold')
end_time = time.time()
print 'Time taken to run {} simulations for {}: {}'.format(runs, stock, (end_time - start_time))
plt.show()
|
[
"siddharthsaha@Webonises-Macbook-Pro.local"
] |
siddharthsaha@Webonises-Macbook-Pro.local
|
8c55b1b583c89eaaf63961ca00dde5c69b6b67c5
|
5e5799e0ccce7a72d514fbc76dcb0a2108013f18
|
/Textfile2DefDomGeom.py
|
710ab37655fe1cd3158b6347c04304f6a2e29644
|
[] |
no_license
|
sourcery-ai-bot/dash
|
6d68937d225473d06a18ef64079a4b3717b5c12c
|
e1d1c3a601cd397d2508bfd4bb12bdb4e878cd9a
|
refs/heads/master
| 2023-03-07T17:15:39.174964
| 2011-03-01T17:11:21
| 2011-03-01T17:11:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
#!/usr/bin/env python
#
# Use doms.txt or nicknames.txt file to create a default-dom-geometry file and
# print the result to sys.stdout
#
# URL: http://icecube.wisc.edu/~testdaq/database_files/nicknames.txt
import sys
from DefaultDomGeometry import DefaultDomGeometryReader, DomsTxtReader, \
NicknameReader
if __name__ == "__main__":
if len(sys.argv) < 2:
raise SystemExit("Please specify a file to load!")
if len(sys.argv) > 2:
raise SystemExit("Too many command-line arguments!")
if sys.argv[1].endswith("nicknames.txt"):
newGeom = NicknameReader.parse(sys.argv[1])
elif sys.argv[1].endswith("doms.txt"):
newGeom = DomsTxtReader.parse(sys.argv[1])
else:
raise SystemExit("File must be 'nicknames.txt' or 'doms.txt'," +
" not '%s'" % sys.argv[1])
oldDomGeom = DefaultDomGeometryReader.parse()
# rewrite the 64-DOM strings to 60 DOM strings plus 32 DOM icetop hubs
newGeom.rewrite(False)
oldDomGeom.rewrite()
oldDomGeom.mergeMissing(newGeom)
# dump the new default-dom-geometry data to sys.stdout
oldDomGeom.dump()
|
[
"dglo@icecube.wisc.edu"
] |
dglo@icecube.wisc.edu
|
3c1e2609185afba2ced84ebd4fc0350d03478685
|
a0fe82f6134fa6f0423d95116ffb5c4a15f6a299
|
/Eduspace/student/views/student.py
|
fb79a9c91e56322d53d0a27a17ea7dff2f19beb9
|
[] |
no_license
|
akshaykrsinghal/demorepository
|
bdc8bc81b4944ba2effcafc8f108a335a27fce4b
|
b845150740a4e7127718e2265c5e55d60b1c11c6
|
refs/heads/master
| 2022-12-19T17:07:38.027458
| 2020-10-10T10:37:46
| 2020-10-10T10:37:46
| 302,876,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from ..models import *
def student(request):
a=request.session['rollno']
# image=user.objects.filter(rollno=a)
image=user.objects.filter(rollno=a)
data={'images':image}
print(image)
return render(request,'dashboard.html',data)
|
[
"59149089+Akshaykumarsinghal@users.noreply.github.com"
] |
59149089+Akshaykumarsinghal@users.noreply.github.com
|
695d512109a6db109ae5579759132098cdb07cf1
|
b28e4c21d12f14bd0baeaada80ff813fa9b9c57a
|
/setup.py
|
545213907c4424bbe605de770e4e8ed9785be7ad
|
[
"MIT"
] |
permissive
|
bikegriffith/sanic
|
005c250b2d9d7ddb17995f3b6b084338bd7c7d81
|
385158b84d2c31e33e778a687936acc19efed950
|
refs/heads/master
| 2023-04-05T16:40:54.371995
| 2016-10-15T02:55:03
| 2016-10-15T02:55:03
| 70,962,717
| 0
| 0
|
MIT
| 2023-04-04T00:18:32
| 2016-10-15T03:05:37
|
Python
|
UTF-8
|
Python
| false
| false
| 573
|
py
|
"""
Sanic
"""
from setuptools import setup
setup(
name='Sanic',
version="0.1.0",
url='http://github.com/channelcat/sanic/',
license='BSD',
author='Channel Cat',
author_email='channelcat@gmail.com',
description='A microframework based on uvloop, httptools, and learnings of flask',
packages=['sanic'],
platforms='any',
install_requires=[
'uvloop>=0.5.3',
'httptools>=0.0.9',
'ujson>=1.35',
],
classifiers=[
'Development Status :: 1 - Alpha',
'Environment :: Web Environment',
],
)
|
[
"channelcat@gmail.com"
] |
channelcat@gmail.com
|
0945e2340abb7961a09bf19356b325727714a0a7
|
b92b0e9ba2338ab311312dcbbeefcbb7c912fc2e
|
/build/shogun_lib/examples/undocumented/python_modular/kernel_spherical_modular.py
|
ef002d63c31f4dc1896ca111b2223acffcd201b9
|
[] |
no_license
|
behollis/muViewBranch
|
384f8f97f67723b2a4019294854969d6fc1f53e8
|
1d80914f57e47b3ad565c4696861f7b3213675e0
|
refs/heads/master
| 2021-01-10T13:22:28.580069
| 2015-10-27T21:43:20
| 2015-10-27T21:43:20
| 45,059,082
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
from tools.load import LoadMatrix
from numpy import where
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list=[[traindat,testdat, 1.0],[traindat,testdat, 5.0]]
def kernel_spherical_modular (fm_train_real=traindat,fm_test_real=testdat, sigma=1.0):
from shogun.Features import RealFeatures
from shogun.Kernel import MultiquadricKernel
from shogun.Distance import EuclidianDistance
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
distance=EuclidianDistance(feats_train, feats_train)
kernel=MultiquadricKernel(feats_train, feats_train, sigma, distance)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('Spherical')
kernel_spherical_modular(*parameter_list[0])
|
[
"prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305"
] |
prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305
|
ec1e5dbc338ecf43d1bd53ded885b1450fb0c5be
|
da570c2047d335b3553e63c27ac7f60b57b28b7e
|
/images/urls.py
|
6c3df5aaf6b9ca607cd5fbcabe80ae605ee575b6
|
[
"MIT"
] |
permissive
|
mfannick/viewImages
|
8c799fc52566de03f4909d36f5ccc50e7fff9564
|
27e447faff455fba306ef3e677d5f2f63160065e
|
refs/heads/master
| 2021-09-09T11:53:42.786004
| 2019-10-14T09:21:16
| 2019-10-14T09:21:16
| 214,357,014
| 0
| 0
| null | 2021-09-08T01:21:15
| 2019-10-11T06:11:06
|
Python
|
UTF-8
|
Python
| false
| false
| 425
|
py
|
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url('^$' ,views.homePage,name='homePage'),
url(r'^search/', views.searchImageByCategory, name='searchImageByCategory'),
url(r'^description/(\d+)',views.imageDescription,name='imageDescription')
]
urlpatterns+=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"mfannick1@gmail.com"
] |
mfannick1@gmail.com
|
82bb97b65913316755124594969ad638d47401ba
|
657a0e7550540657f97ac3f7563054eb4da93651
|
/Boilermake2018/Lib/site-packages/chatterbot/logic/low_confidence.py
|
bb8ebfd230f6cde219dcb021a1575d6b17714cb8
|
[
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0"
] |
permissive
|
TejPatel98/voice_your_professional_email
|
faf4d2c104e12be61184638913ebe298893c5b37
|
9cc48f7bcd6576a6962711755e5d5d485832128c
|
refs/heads/master
| 2022-10-15T03:48:27.767445
| 2019-04-03T16:56:55
| 2019-04-03T16:56:55
| 179,291,180
| 0
| 1
|
CC0-1.0
| 2022-10-09T13:00:52
| 2019-04-03T13:01:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
from __future__ import unicode_literals
from chatterbot.conversation import Statement
from .best_match import BestMatch
class LowConfidenceAdapter(BestMatch):
"""
Returns a default response with a high confidence
when a high confidence response is not known.
:kwargs:
* *threshold* (``float``) --
The low confidence value that triggers this adapter.
Defaults to 0.65.
* *default_response* (``str``) or (``iterable``)--
The response returned by this logic adaper.
* *response_selection_method* (``str``) or (``callable``)
The a response selection method.
Defaults to ``get_first_response``.
"""
def __init__(self, **kwargs):
super(LowConfidenceAdapter, self).__init__(**kwargs)
self.confidence_threshold = kwargs.get('threshold', 0.65)
default_responses = kwargs.get(
'default_response', "I'm sorry, I do not understand."
)
# Convert a single string into a list
if isinstance(default_responses, str):
default_responses = [
default_responses
]
self.default_responses = [
Statement(text=default) for default in default_responses
]
def process(self, input_statement):
"""
Return a default response with a high confidence if
a high confidence response is not known.
"""
# Select the closest match to the input statement
closest_match = self.get(input_statement)
# Choose a response from the list of options
response = self.select_response(input_statement, self.default_responses)
# Confidence should be high only if it is less than the threshold
if closest_match.confidence < self.confidence_threshold:
response.confidence = 1
else:
response.confidence = 0
return response
|
[
"tpa244@uky.edu"
] |
tpa244@uky.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.