blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1bc2faa9d32323f38e27fe0dd5fc27f43aae4909
|
0acfb421ed222d1e9f2b9eaebec589ccf44a19cf
|
/base_example/products/products/dependencies.py
|
58517a6f97a4c85c9b5866cb0fdb67fcd3c38618
|
[] |
no_license
|
pap/simplebank
|
bd155a9d74996428613cbc028e7edcc6f0554489
|
0b0e8a9def25d2a8b466e0eb149567fd6e7b2de8
|
refs/heads/master
| 2020-12-02T08:13:01.311836
| 2017-07-10T13:59:50
| 2017-07-10T15:26:05
| 96,788,715
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,960
|
py
|
from nameko.extensions import DependencyProvider
import redis
from products.exceptions import NotFound
REDIS_URI_KEY = 'REDIS_URI'
class StorageWrapper:
"""
Product storage
A very simple example of a custom Nameko dependency. Simplified
implementation of products database based on Redis key value store.
Handling the product ID increments or keeping sorted sets of product
names for ordering the products is out of the scope of this example.
"""
NotFound = NotFound
def __init__(self, client):
self.client = client
def _format_key(self, product_id):
return 'products:{}'.format(product_id)
def _from_hash(self, document):
return {
'id': document[b'id'].decode('utf-8'),
'title': document[b'title'].decode('utf-8'),
'passenger_capacity': int(document[b'passenger_capacity']),
'maximum_speed': int(document[b'maximum_speed']),
'in_stock': int(document[b'in_stock'])
}
def get(self, product_id):
product = self.client.hgetall(self._format_key(product_id))
if not product:
raise NotFound('Product ID {} does not exist'.format(product_id))
else:
return self._from_hash(product)
def list(self):
keys = self.client.keys(self._format_key('*'))
for key in keys:
yield self._from_hash(self.client.hgetall(key))
def create(self, product):
self.client.hmset(
self._format_key(product['id']),
product)
def decrement_stock(self, product_id, amount):
return self.client.hincrby(
self._format_key(product_id), 'in_stock', -amount)
class Storage(DependencyProvider):
def setup(self):
self.client = redis.StrictRedis.from_url(
self.container.config.get(REDIS_URI_KEY))
def get_dependency(self, worker_ctx):
return StorageWrapper(self.client)
|
[
"odesassossegado@gmail.com"
] |
odesassossegado@gmail.com
|
c10fc81b2955f4a8bf4b961258893fbe33897bee
|
0cb075ce26f4b1562347f04fe8612e2acf726863
|
/calendars/views/occurrences.py
|
a52e6640b9394399f8fa43698fc3b0f13cdfdc51
|
[
"BSD-2-Clause"
] |
permissive
|
wkrause13/django-calendar
|
84c1d05f721025ff8ebc11da3f6590709d1750e6
|
5cec7f8ac49637a02e331064d470255d1cbaf096
|
refs/heads/master
| 2020-12-25T17:37:54.881236
| 2013-01-03T10:44:56
| 2013-01-03T10:44:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,941
|
py
|
# -*- coding: utf-8 -*-
'''
Created on Mar 20, 2011
@author: Mourad Mourafiq
@copyright: Copyright © 2011
other contributers:
'''
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext, Context, loader
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseServerError, HttpResponseForbidden, HttpResponseNotAllowed
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.views.decorators.csrf import csrf_exempt, requires_csrf_token
from django.utils import simplejson
from calendars.utilis import fetch_from_url_occ, errors_as_json
from calendars.forms.occurrence import OccurrenceForm
from calendars.models.cals import Occurrence
@login_required
def view_occ_date(request, event_slug):
"""
view an occurrence by date (for non persisted occurrences)
"""
(event, err, occurrence) = fetch_from_url_occ(request, event_slug)
if err:
return err
if not occurrence.cancelled:
c = RequestContext(request, {'occurrence': occurrence,
})
return render_to_response('calendars/occurrence_view.html', c)
@csrf_exempt
@requires_csrf_token
@login_required
def cancel_occ_date(request, event_slug):
"""
cancel a non persisted occurrence
"""
(event, err, occurrence) = fetch_from_url_occ(request, event_slug)
if err:
return err
if not occurrence.cancelled:
next = event.get_absolute_url()
occurrence.cancel()
return HttpResponseRedirect(next)
@login_required
def edit_occ_date(request, event_slug):
"""
edit an unpersisted occurrence
"""
(event, err, occurrence) = fetch_from_url_occ(request, event_slug)
if err:
return err
if not occurrence.cancelled:
form = OccurrenceForm(data=request.POST or None, instance=occurrence)
if request.method == 'POST':
if form.is_valid():
occurrence = form.save(commit=False)
occurrence.event = event
occurrence.save()
if not request.is_ajax():
return HttpResponseRedirect(occurrence.get_absolute_url())
response = ({'success':'True'})
else:
response = errors_as_json(form)
if request.is_ajax():
json = simplejson.dumps(response, ensure_ascii=False)
return HttpResponse(json, mimetype="application/json")
return render_to_response('calendars/occurrence_edit.html', {
'occ_form': form,
'occurrence': occurrence,
'action' : occurrence.get_edit_url(),
'event' : occurrence.event,
}, context_instance=RequestContext(request))
@login_required
def view_occ(request, occurrence_id):
"""
view an occurrence with its id (for persisted occurrences)
"""
occurrence = get_object_or_404(Occurrence, id=occurrence_id)
if not occurrence.cancelled:
c = RequestContext(request, {'occurrence': occurrence,
})
return render_to_response('calendars/occurrence_view.html', c)
@csrf_exempt
@requires_csrf_token
@login_required
def cancel_occ(request, occurrence_id):
"""
cancel a persisted occurrence
"""
occurrence = get_object_or_404(Occurrence, id=occurrence_id)
next = occurrence.event.get_absolute_url()
occurrence.cancel()
return HttpResponseRedirect(next)
@login_required
def reactivate_occ(request, occurrence_id):
"""
reactivate an occurrence
"""
occurrence = get_object_or_404(Occurrence, id=occurrence_id)
occurrence.uncancel()
return HttpResponseRedirect(occurrence.get_absolute_url())
@login_required
def edit_occ(request, occurrence_id):
"""
edit a persisted occurrence
"""
occurrence = get_object_or_404(Occurrence, id=occurrence_id)
if not occurrence.cancelled:
form = OccurrenceForm(data=request.POST or None, instance=occurrence)
if request.method == 'POST':
if form.is_valid():
occurrence = form.save()
if not request.is_ajax():
return HttpResponseRedirect(occurrence.get_absolute_url())
response = ({'success':'True'})
else:
response = errors_as_json(form)
if request.is_ajax():
json = simplejson.dumps(response, ensure_ascii=False)
return HttpResponse(json, mimetype="application/json")
return render_to_response('calendars/occurrence_edit.html', {
'occ_form': form,
'occurrence': occurrence,
'action' : occurrence.get_edit_url(),
'event' : occurrence.event,
}, context_instance=RequestContext(request))
|
[
"pipado@pipado"
] |
pipado@pipado
|
7f9f289a51777d3b315128656891fedee5087005
|
47fa5e7d9ebd04d3c9fe1a7a4c4fb1bd62050f5e
|
/geothmetic_meandian.py
|
35220f896931ff0b05ecf6f4037230e388aabd3b
|
[] |
no_license
|
setalyas/odd-means
|
438c1826d669f48100017c5014af8d3475c85d8e
|
db53c535e1e52ef7e70123d11860eef56dd183b1
|
refs/heads/master
| 2023-03-21T23:02:22.483373
| 2021-03-11T03:39:46
| 2021-03-11T03:39:46
| 235,227,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,751
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 11 03:10:11 2021
@author: setat
Geothmetic meandian: calculate the arithmetic mean, median, and geometric mean
for a list of numbers, until they converge.
From https://xkcd.com/2435/
"""
from scipy.stats import gmean
import numpy as np
trial = np.array([1, 1, 2, 3, 5])
print(np.mean(trial))
print(np.median(trial))
print(gmean(trial))
def gmdn_step(a_list):
"""Take a list one step towards the geothmetic meandian.
Parameters
----------
a_list : list
A list of positive numbers.
Returns
-------
g_mean : float
Geometric mean of the list
a_mean : float
Arithmetic mean of the list
medn : float
Median of the list
gmdned : list
A list of g_mean, a_mean, and medn"""
if sum(np.array(a_list) > 0) != len(a_list):
raise Exception("Sorry, only positive number lists allowed.")
a_mean = np.mean(a_list)
g_mean = gmean(a_list)
medn = np.median(a_list)
gmdned = np.array([g_mean, a_mean, medn])
return sorted(gmdned)
print(gmdn_step(trial))
print(gmdn_step(gmdn_step(trial)))
def gmdn(a_list, places):
"""Take a list to its geothmethic meandian.
Parameters
----------
a_list : list
A list of numbers.
places : int
Desired precision for how many decimal places the averages should match
to.
Returns
-------
geothmetic_meandian : float
The convergence of the averages"""
counter = 0
gmdned = a_list
while max(gmdned) - min(gmdned) > 10 ** (-places):
gmdned = gmdn_step(gmdned)
counter += 1
print("Converged within {} steps.".format(counter))
return gmdned
gmdn(trial, 3)
|
[
"setatimmi@gmail.com"
] |
setatimmi@gmail.com
|
9fbddab470ce95d6a31bb446fcd8a7ee812aa1d0
|
5399dd4580ea3f528753bc8b52a981743d62f8bb
|
/keras/keras26_LSTM_hamsu.py
|
10977c12c7ffbf0c14ef67ef7f6d8b6f2e3211d9
|
[] |
no_license
|
iwillbeaprogramer/Study
|
3ac7c118ffe3981d78b4ad263cb62432eae13970
|
3bfe571da5bbfc545b994e5878e217f9306bde14
|
refs/heads/main
| 2023-05-07T16:31:05.564973
| 2021-05-27T14:50:00
| 2021-05-27T14:50:00
| 324,044,441
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
# keras 23 _LSTM3_scale을 함수형으로 코딩
import numpy as np
x = np.array([[1,2,3],[2,3,4],[3,4,5],[4,5,6],[5,6,7],[6,7,8],[7,8,9],[8,9,10],[9,10,11],[10,11,12],[20,30,40],[30,40,50],[40,50,60]])
y = np.array([4,5,6,7,8,9,10,11,12,13,50,60,70])
x_pred = np.array([50,60,70]).reshape(1,3,1)
from tensorflow.keras.models import Sequential,Model
from tensorflow.keras.layers import Dense,LSTM,Input
from sklearn.metrics import r2_score
x = x.reshape(13,3,1)
inputs = Input(shape = (3,1))
lstm = LSTM(1024,activation='relu')(inputs)
dense1 = Dense(512,activation='relu')(lstm)
dense2 = Dense(256,activation='relu')(dense1)
dense3 = Dense(128,activation='relu')(dense2)
dense4 = Dense(64,activation='relu')(dense3)
dense5 = Dense(32,activation='relu')(dense4)
dense6 = Dense(8,activation='relu')(dense5)
dense7 = Dense(4,activation='relu')(dense6)
outputs = Dense(1)(dense7)
model = Model(inputs,outputs)
# model = Sequential()
# model.add(LSTM(1024,input_shape=(3,1),activation='relu'))
# model.add(Dense(512,activation='relu'))
# model.add(Dense(256,activation='relu'))
# model.add(Dense(128,activation='relu'))
# model.add(Dense(64,activation='relu'))
# model.add(Dense(32,activation='relu'))
# model.add(Dense(16,activation='relu'))
# model.add(Dense(8,activation='relu'))
# model.add(Dense(4,activation='relu'))
# model.add(Dense(1))
model.compile(loss='mse',optimizer='adam')
model.fit(x,y,epochs=500,batch_size=1)
loss = model.evaluate(x,y,batch_size=1)
y_pred = model.predict(x_pred)
print(y_pred)
print('loss : ',loss)
'''
[[81.13962]]
[[80.14889]]
loss : 0.05985087901353836
'''
|
[
"wisixicidi@gmail.com"
] |
wisixicidi@gmail.com
|
bc34826491a3f48e8a8c9762836e87d8d850ba07
|
da87574f78a9e8151747a5e55359ca71e1c19470
|
/pyramid_google_login/exceptions.py
|
a49184ddf174061ea6df30dc32eb21750ecd2ff8
|
[] |
no_license
|
mtlpy/pyramid_google_login
|
2bfc1adaaa76a60a1b26f433479a0a85893314c0
|
d71444a701d7a6cb82dc2a277d41f637c86a4d83
|
refs/heads/master
| 2021-09-28T03:23:07.326642
| 2018-11-13T21:32:48
| 2018-11-13T21:32:48
| 42,904,496
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
class Base(Exception):
pass
class AuthFailed(Base):
pass
class ApiError(Base):
pass
|
[
"hadrien@ectobal.com"
] |
hadrien@ectobal.com
|
2a46cba90659a56d1af070ee76242a046edd72a9
|
ff12b271c7538f0621b88e567b315d5bb44166af
|
/ambari_monitor/hbase_monitor/hbase_monitor_v2/conn_db.py
|
d483eee20dceb6a566d0b5d5b49a331740dd2f1d
|
[] |
no_license
|
witnesslq/big_data_operation
|
23ca6afd2f69fbe2b4f9debea4bd2f49f6d4a1c8
|
829422bfd3c52fbd99e0b54e3da7b9ac7ec4f3cd
|
refs/heads/main
| 2023-06-06T22:17:15.572951
| 2021-07-13T14:34:18
| 2021-07-13T14:34:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
#!/usr/bin/env python
# -*-coding:utf-8 -*-
# ***************************************************************************
# 文件名称:conn_db.py
# 功能描述:迁移Hive表
# 输 入 表:
# 输 出 表:
# 创 建 者:hyn
# 创建日期:20200808
# 修改日志:
# 修改日期:
# ***************************************************************************
# 程序调用格式:python conn_db.py
# ***************************************************************************
import os
import sys
from datetime import datetime
import datetime as date_time
import pymysql
mysql_sh = "mysql -h 172.19.168.22 -P 3308 -u zhao -pzhao zhao -e ' "
# 连接
def conn_db():
conn = pymysql.connect(host="192.168.195.233", port=20031, user="csapdmcfg", passwd="iEXIMt3w!TFL9vkO", db="csapdmcfg", charset="utf8")
return conn
# 查询数据
def select(sql):
conn = conn_db()
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
conn.close()
# print result
return result
# 插入及更新数据
def insert(sql):
conn = conn_db()
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
cursor.close()
conn.close()
# print type(result)
# print result
return result
# 批量插入及更新数据
def insert_batch(sql_list):
conn = conn_db()
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
# result = cursor.fetchall()
# print type(result)
# print result
return
|
[
"1342831695@qq.com"
] |
1342831695@qq.com
|
f0eefe22562432df713f9a164d1362e2892d2ea0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03495/s527457667.py
|
af89959a94f603b8e66e9c604d4ff5d4f266dce7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
n, k = map(int, input().split())
a = list(map(int, input().split()))
ball = {}
for i in a:
if i in ball:
ball[i] += 1
else:
ball[i] = 1
ball = sorted(ball.items(), key=lambda x: -x[1])
ans = 0
if len(ball) > k:
for i in range(k):
ans += ball[i][1]
ans = n - ans
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b072a1621c0b87142854230caf8d9c62cc21b634
|
64de13c52b6ac0643bfd8a6c9d4edefc48ea763d
|
/Modules/logparser.py
|
1197a28b0798f947eb2005dfc8d9d95e6e53a76c
|
[] |
no_license
|
AhmedAyman1196/PenHelper
|
35671330aa6dbfd2f7427aedd2148bd3cdc8dd8d
|
e46efe9dd9ce2a74082e3ccd9c35560bedb957b9
|
refs/heads/master
| 2020-12-13T21:02:47.299460
| 2020-03-26T13:06:59
| 2020-03-26T13:06:59
| 234,529,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
import re
class parser:
def __init__(self, file):
self.file = file
f = open(file, "r")
self.content = f.read().split("\n") # split log file by line
# urlRe = re.compile(r'(http://|https).+') --> extracting urls (work in progress)
ipRe = re.compile(r'\d?\d?\d\.\d?\d?\d\.\d?\d?\d\.\d?\d?\d')
ipDict = dict() # this will map the ip to the number of appearences
for i in self.content:
try :
ip = ipRe.search(i).group(0)
if ip in ipDict:
ipDict[ip] += 1
else :
ipDict[ip] = 1
except :
x = 1 # just ingore
# print("la2 , no ip hena noooo")
self.ipDict = ipDict
def printFile(self):
print("file is " + self.file)
def printIps(self):
res = ""
print("2y 7aga")
for i in self.ipDict :
res += "IP : " + i +"\nAppearences : " + str(self.ipDict[i]) +"\n"
return res
|
[
"ahmed_ayman1196@yahoo.com"
] |
ahmed_ayman1196@yahoo.com
|
e989dc83551e2ec60c25bdbda0740cea8c246b3a
|
9a5ba94d5b5742030e2bd94d303c8b528515ff32
|
/python_scripts/blueRapidTest.py
|
e683682ec9e7397f1a2c8a542ebdf86b635da3ba
|
[] |
no_license
|
locknono/twitter-sampling
|
8b0167585ac34a5f5413cd72c57a091269dbbd72
|
41b4122386345bce90179c2867519f8e50784efc
|
refs/heads/master
| 2023-01-24T07:27:26.408360
| 2022-12-20T10:09:41
| 2022-12-20T10:09:41
| 172,686,848
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,640
|
py
|
import g
import json
from blueRapid import blueRapid, Point
import logging
import numpy as np
from blueRapidEstimate import getRalationshipList, compareRelationshipList
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
idLdaDict = {}
estimates = []
disks = []
with open(g.ldaDir + 'idLdaDict.json', 'r', encoding='utf-8') as f:
idLdaDict = json.loads(f.read())
with open(g.dataPath + 'blueNoise/samplePoints-500-2485-0.12732489624429985.json', 'r', encoding='utf-8') as f:
points = json.loads(f.read())
disks = []
for p in points:
disk = []
disk.append(Point(p['id'], idLdaDict[p['id']]))
for p2 in p['pointsInDisk']:
disk.append(Point(p2['id'], idLdaDict[p2['id']]))
disks.append(disk)
originalValues = np.full(g.topicNumber, 0).tolist()
for k in idLdaDict:
for i in range(len(idLdaDict[k])):
originalValues[i] = idLdaDict[k][i]
l1 = getRalationshipList(originalValues)
ratioList = []
for i in range(0, 10):
estimates = blueRapid(disks, dimension=g.topicNumber, delta=0.05, c=1)
if estimates != None:
l2 = getRalationshipList(estimates)
ratio = compareRelationshipList(l1, l2)
ratioList.append(ratio)
else:
ratioList.append(None)
sum = 0
count = 0
for v in ratioList:
if v != None:
sum += v
count += 1
try:
print('same ratio:' + str(sum / count))
except Exception as e:
print(e)
|
[
"guozy0103@outlook.com"
] |
guozy0103@outlook.com
|
1e4ec69660f5980e00461dbe5783a03c23174204
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4131/codes/1758_1580.py
|
c80833ecede95654db5b447bb5eb5803ca08197f
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
from numpy import*
v=array(eval(input("notas:")))*1.0
n=array(eval(input("alunos:")))
i=0
faltas=0
aprovados=0
reprovados=0
soma=0
while(i<size(v)):
if(v[i]==-1.0):
faltas+=1
if(v[i]>=6):
aprovados+=1
soma+=v[i]
if(v[i]<6.0 and v[i]!=-1.0):
reprovados+=1
soma+=v[i]
if(v[i]==max(v)):
nome = n[i]
i=i+1
print(faltas)
print(aprovados)
print(reprovados)
print(round(soma/(aprovados+reprovados),2))
print(nome)
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
4a9a1969193c0c950911bbba6aaef225095e8732
|
5cbf1d3b17d04931d6e90ad2e79fbb8c668cca21
|
/page/setting_page.py
|
1d867934592a9f772a8ed4abf96c57befb268eb0
|
[] |
no_license
|
zqx7629487/test_bnhl_login
|
b1d91aeb7bd61e7a851e65fc6190de3092cbb5e7
|
4ad320369d614cd3f1c917866b6eb9bd6b7c6588
|
refs/heads/master
| 2020-05-29T20:42:31.879683
| 2019-05-30T10:54:36
| 2019-05-30T10:54:36
| 189,360,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
from Base.base import Base
from page.page_element import Elements
class SettingPage(Base):
def __init__(self, driver):
Base.__init__(self, driver)
def go_to_logout(self, tag=1):
# 滑动
self.slide_page()
# 点退出登录按钮
self.click_element(Elements.setting_logout_btn_id)
if int(tag) == 1:
# 点击确认退出
self.click_element(Elements.setting_acc_quit_btn_id)
else:
# 点击取消退出
self.click_element(Elements.setting_dis_quit_btn_id)
|
[
"wtfqxl0@qq.com"
] |
wtfqxl0@qq.com
|
087a2ceba2542308335379769432cd401ccaefea
|
7c3fc7c125961d931b773691890812ee2af7f4d8
|
/entries/request.py
|
d04deb926e6e0723b767b94071c9ffe5b980b369
|
[] |
no_license
|
jamalbrowning/daily_journal_server
|
67397683184bc69daaeeb71bd2553a2a5aeba1e7
|
c6eb42ffebe9e7c033d13fbfa48b61568d36c940
|
refs/heads/main
| 2023-01-06T09:47:51.371369
| 2020-10-30T00:37:40
| 2020-10-30T00:37:40
| 303,887,764
| 0
| 0
| null | 2020-10-30T00:37:41
| 2020-10-14T03:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,762
|
py
|
import sqlite3
import json
from models.entry import Entry
from models.mood import Mood
def get_all_entries():
with sqlite3.connect("./dailyjournal.db") as conn:
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT
a.id,
a.concept,
a.entry,
a.date,
a.moodId,
m.label mood_label
FROM entries a
JOIN moods m
ON m.id = a.moodId
""")
entries = []
dataset = db_cursor.fetchall()
for row in dataset:
entry = Entry(row['id'], row['concept'], row['entry'], row['date'], row['moodId'])
mood = Mood(row['moodId'], row['mood_label'])
entry.mood = mood.__dict__
entries.append(entry.__dict__)
return json.dumps(entries)
def get_single_entry(id):
with sqlite3.connect("./dailyjournal.db") as conn:
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
# Use a ? parameter to inject a variable's value
# into the SQL statement.
db_cursor.execute("""
SELECT
a.id,
a.concept,
a.entry,
a.date,
a.moodId
FROM entries a
WHERE a.id = ?
""", ( id, ))
data = db_cursor.fetchone()
entry = Entry(data['id'], data['concept'], data['entry'], data['date'], data['moodId'])
return json.dumps(entry.__dict__)
def search_for_entry(search_term):
with sqlite3.connect("./dailyjournal.db") as conn:
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT
a.id,
a.concept,
a.entry,
a.date,
a.moodId
FROM entries a
WHERE a.entry LIKE ?
""", ( '%'+search_term+'%', ))
entries = []
dataset = db_cursor.fetchall()
for row in dataset:
entry = Entry(row['id'], row['concept'], row['entry'], row['date'], row['moodId'])
entries.append(entry.__dict__)
return json.dumps(entries)
def delete_entry(id):
with sqlite3.connect("./dailyjournal.db") as conn:
db_cursor = conn.cursor()
db_cursor.execute("""
DELETE FROM entries
WHERE id = ?
""", (id, ))
def new_journal_entry(new_entry):
with sqlite3.connect("./dailyjournal.db") as conn:
db_cursor = conn.cursor()
db_cursor.execute("""
INSERT INTO entries
( concept, entry, date, moodId )
VALUES
( ?, ?, ?, ?);
""", (new_entry['concept'],new_entry['entry'],
new_entry['date'],new_entry['moodId'], ))
# The `lastrowid` property on the cursor will return
# the primary key of the last thing that got added to
# the database.
id = db_cursor.lastrowid
# Add the `id` property to the entry dictionary that
# was sent by the client so that the client sees the
# primary key in the response.
new_entry['id'] = id
return json.dumps(new_entry)
def update_entry(id, new_entry):
with sqlite3.connect("./dailyjournal.db") as conn:
db_cursor = conn.cursor()
db_cursor.execute("""
UPDATE entries
SET
concept = ?,
entry = ?,
date = ?,
moodId = ?
WHERE id = ?
""", (new_entry['concept'], new_entry['entry'],
new_entry['date'], new_entry['moodId'], id, ))
rows_affected = db_cursor.rowcount
if rows_affected == 0:
return False
else:
return True
|
[
"jamalebrowning@gmail.com"
] |
jamalebrowning@gmail.com
|
8497b6a892e539cd478c1a918d43d95089605987
|
20343e8a8435b3f839d5abd0c4063cf735f43341
|
/Experiment/ConfigDialog/StudentInfoGridTable.py
|
e5f398f3666aef303c0be98cf270feb4f0160c84
|
[] |
no_license
|
alading241/MoDeng
|
948f2099e2f7e4548d6e477b6e06b833bdf4f9bb
|
01819e58943d7d1a414714d64aa531c0e99dfe22
|
refs/heads/master
| 2021-05-23T11:39:41.326804
| 2020-04-05T06:06:01
| 2020-04-05T06:06:01
| 253,269,397
| 1
| 0
| null | 2020-04-05T15:38:33
| 2020-04-05T15:38:33
| null |
UTF-8
|
Python
| false
| false
| 3,910
|
py
|
# coding=utf-8
import wx.grid as grid
import wx
class StudentInfoGridTable(grid.PyGridTableBase):
def __init__(self, datas):
grid.GridTableBase.__init__(self)
self.datas = datas
self.colLabels = [u'姓名', u'性别', u'学校', u'专业', u'年级']
self.isModified = False
self.odd = grid.GridCellAttr()
self.odd.SetReadOnly(False)
self.odd.SetBackgroundColour('yellow')
self.even = grid.GridCellAttr()
self.even.SetReadOnly(False)
def SetValue(self, row, col, value):
print(str(row) + ";" + str(col) + ";" + value)
def innerSetValue(row, col, value):
try:
self.datas[row][col] = value
except IndexError:
# add a new row
self.datas.append([''] * self.GetNumberCols())
innerSetValue(row, col, value)
# tell the grid we've added a row
msg = grid.GridTableMessage(self, # The table
grid.GRIDTABLE_NOTIFY_ROWS_APPENDED, # what we did to it
1 # how many
)
self.GetView().ProcessTableMessage(msg)
innerSetValue(row, col, value)
def GetAttr(self, row, col, kind):
attr = [self.even, self.odd][row % 2]
attr.IncRef()
return attr
def GetNumberRows(self):
return len(self.datas)
def GetNumberCols(self):
return len(self.colLabels)
def GetColLabelValue(self, col):
return self.colLabels[col]
def GetRowLabelValue(self, row):
return str(row)
def GetValue(self, row, col):
return self.datas[row][col]
def IsModified(self):
return self.isModified
def InsertRows(self, pos=1, newData=None):
if newData is None:
newData = [u'', u'', u'', u'', u'']
self.datas.insert(pos, newData)
self.isModified = True
gridView = self.GetView()
gridView.BeginBatch()
insertMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_NOTIFY_ROWS_INSERTED, pos, 1)
gridView.ProcessTableMessage(insertMsg)
gridView.EndBatch()
getValueMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)
gridView.ProcessTableMessage(getValueMsg)
# if self.onGridValueChanged:
# self.onGridValueChanged()
return True
def AppendRows(self, newData=None):
if newData is None:
newData = [u'居里夫人1', u'女', u'WUST', u'化学', u'研3']
self.datas.append(newData)
self.isModified = True
gridView = self.GetView()
gridView.BeginBatch()
appendMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED, 1)
gridView.ProcessTableMessage(appendMsg)
gridView.EndBatch()
getValueMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)
gridView.ProcessTableMessage(getValueMsg)
# if self.onGridValueChanged:
# self.onGridValueChanged()
return True
def DeleteRows(self, pos=0, numRows=1):
if self.datas is None or len(self.datas) == 0:
return False
for rowNum in range(0, numRows):
self.datas.remove(self.datas[pos + rowNum])
gridView = self.GetView()
gridView.BeginBatch()
deleteMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED, pos, numRows)
gridView.ProcessTableMessage(deleteMsg)
gridView.EndBatch()
getValueMsg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)
gridView.ProcessTableMessage(getValueMsg)
# if self.onGridValueChanged:
# self.onGridValueChanged()
return True
|
[
"189916591@qq.com"
] |
189916591@qq.com
|
ade4e9790cde04ce39668c5e9228287df47dccf4
|
b7ea1c6fd5e83c045f21ffb3157c782bba01f9fc
|
/example.py
|
4a81ef5ab11d1fd3243b13b18166dda8fd59ed9c
|
[] |
no_license
|
aditya803/Coursera-Github-Course
|
fe0a94942d8137bbfbff905b028e1656c513c352
|
53f3926de052d77942b27cf1a75424551ea9b205
|
refs/heads/master
| 2022-11-13T02:18:16.312247
| 2020-07-02T18:13:21
| 2020-07-02T18:13:21
| 276,713,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
def git_operation():
print("yeet")
git_operation()
|
[
"adityapan25@gmail.com"
] |
adityapan25@gmail.com
|
884b08fd9998783a567a073b0aa6b2f9398141c2
|
d819727b9a9864f4c8ff1010f0a322b4fe878252
|
/Practice/a3Variable Reassignment.py
|
94a678b4e729a022c82c475c5813ddc36a47d708
|
[] |
no_license
|
KostaPapa/Python_Works
|
e32f40db163cc4fc6851f19c07cd7e002342f12e
|
03ae6a3e6b5430a4971b04ed9402eac2359b959b
|
refs/heads/master
| 2021-01-21T01:59:39.363518
| 2014-10-12T00:23:23
| 2014-10-12T00:23:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
import os
def variable_reassignment ():
''' This function will reassing a new value to a variable. '''
dollar = 3000 # Initial value of the variable dollar
print "I had $" , dollar , "in my account."
dollar = 1000000 # New value of the variable dollar
print "Now I have $", dollar, "in my account."
variable_reassignment ()
os.system ("pause")
|
[
"kp967@nyu.edu"
] |
kp967@nyu.edu
|
ded5374c01de41b2b14b1533154547bb1ed7d5d2
|
d5df2e08a1d4064d0c34db7e1e23823b44fa62fa
|
/demo/person/tests/project/infrastructure/environments/test_loader.py
|
3e0f1747196f2c412ae2068a729eb24865c627ee
|
[
"MIT"
] |
permissive
|
sharof2000/ms-fastapi-template
|
42c1f68f3260e8facad6984abada9f3d56363e05
|
5bbd6903305db07cc18330ec86fb04ca518e9dab
|
refs/heads/main
| 2023-07-06T21:28:13.558790
| 2021-08-13T13:52:12
| 2021-08-13T13:52:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
import pytest
from project.infrastructure.environments.loader import Configs
def test_loader_success():
configs = Configs()
assert configs.get_by_key("redis")
def test_loader_error_file():
configs = Configs()
assert pytest.raises(FileNotFoundError, configs.get_by_key, "security_token", "unknow_path")
def test_loader_error_config():
path = "./project/infrastructure/__init__.py"
configs = Configs()
assert pytest.raises(Exception, configs.get_by_key, "security_token", path)
|
[
"farlley@live.com"
] |
farlley@live.com
|
3778a108dab9ee7a1c7d4da33e2ebcc070b23d85
|
d2384f082395812f58c1f4611db9f6032b661b6f
|
/word_count.py
|
3d7ec1e76f0b9f7ac6730af7c5648e1dfcfd2b85
|
[] |
no_license
|
JudePark96/2020-datascience-kmu-wordcount
|
2c5208a17cc5c3f9332a46577a57aa24676c9a70
|
044d6a0b43469159f9c32ff5f65a8874d0d7fbc4
|
refs/heads/master
| 2022-12-17T06:54:40.287244
| 2020-09-20T14:49:02
| 2020-09-20T14:49:02
| 295,360,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,460
|
py
|
# -*- coding: utf-8 -*-
# 문제 1. 문서를 입력받아 자주 등장하는 단어 1000개를 자주 등장하는 순서대로 출력하는 python 프로그램 작성하기
__author__ = 'JudePark'
__email__ = 'judepark@kookmin.ac.kr'
__repository__ = 'https://github.com/JudePark96/2020-datascience-kmu-wordcount'
import logging
import re
import sys
from collections import defaultdict
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
vocab = defaultdict(int)
for idx, line in enumerate(sys.stdin):
# 영어 및 일부 특수 문자만 존재하도록
line = re.sub(r'[^\x00-\x7F]+', '', line.strip().lower())
# 길동 is one of Tom’s best friends. -> [‘is’, ‘one’, ‘of’, ‘tom’, ‘s’, ‘best’, ‘friends’] 로 . 이 빠져있다.
# . 을 지우도록...
line = re.sub(r'[_<>,\(\)\.\'%]', '', line).split(' ')
for word in line:
# defaultdict 이기 때문에 exception 생각안해도 됨.
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
if idx % 1000 == 0:
logger.info(f'loop index: {idx}')
logger.info(f'vocabulary size: {len(vocab)}')
k = 1000
top_k = sorted(vocab, key=vocab.get, reverse=True)[:k]
for item in top_k:
print(f'{item}\t{vocab[item]}')
|
[
"judepark@kookmin.ac.kr"
] |
judepark@kookmin.ac.kr
|
61d23996c3389a4f24122fb099018555888ddc99
|
f5cebd54885635081aae848a75f332f326a80fad
|
/nets/vgg.py
|
2c1ca145761c20ece4dc07d150182f207ad31576
|
[] |
no_license
|
maomingyang/Siamese-pytorch
|
11ac86365cac88f2b727c5799f700146aa4b4c5e
|
63779013a21cd14c9117c13f11737b55aa957757
|
refs/heads/master
| 2022-12-10T17:25:11.856575
| 2020-09-07T05:32:31
| 2020-09-07T05:32:31
| 293,669,321
| 2
| 0
| null | 2020-09-08T01:26:12
| 2020-09-08T01:26:12
| null |
UTF-8
|
Python
| false
| false
| 2,366
|
py
|
import torch
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False, in_channels = 3):
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
}
def VGG16(pretrained, in_channels, **kwargs):
model = VGG(make_layers(cfgs["D"], batch_norm = False, in_channels = in_channels), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url("https://download.pytorch.org/models/vgg16-397923af.pth", model_dir="./model_data")
model.load_state_dict(state_dict)
return model
|
[
"noreply@github.com"
] |
maomingyang.noreply@github.com
|
2689ebd6f20aa132cad611d4ab5f248f9fdf380d
|
55868885e97a08b21a98f3dc8e2f81939ec6761d
|
/BadgePy/subscriber.py
|
7f0f8b06c0b81e1b716273223f0220626eff1184
|
[
"MIT"
] |
permissive
|
orrinjelo/OpenWest2018
|
a1c6305a429c86806a098fff1e91920db240b348
|
13503465c0c2cf74c3bbf77d476b2ab5e69085bf
|
refs/heads/master
| 2020-03-19T12:40:43.449446
| 2018-06-08T15:42:27
| 2018-06-08T15:42:27
| 136,533,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
import paho.mqtt.client as mqtt
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("feeds/color1")
client.subscribe("feeds/color2")
client.subscribe("feeds/effect")
client.subscribe("feeds/effectspeed")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("10.155.0.214", 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
|
[
"orrin.jelo@gmail.com"
] |
orrin.jelo@gmail.com
|
b8430e364b3b0c9d528b71bbfe6536dd57a0cb27
|
d275949ee4c551b7a513e6bd0af048d8aad8b26e
|
/user/api/serializers.py
|
611d7eabe1287302056339cc18bfdf7c4648375d
|
[] |
no_license
|
djwesleyborges/apiPaintBall
|
e6a429adbf1e5dde0e901b987751317aa2489dda
|
a57f7325b69d5c339f276026e30791267bd0c0b1
|
refs/heads/master
| 2022-12-12T02:03:26.879059
| 2019-12-05T18:13:51
| 2019-12-05T18:13:51
| 172,746,032
| 0
| 0
| null | 2022-04-22T22:51:41
| 2019-02-26T16:14:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,575
|
py
|
from user.models import User
from perfil.models import Perfil
from rest_framework import serializers
from rest_framework.serializers import ModelSerializer
from perfil.api.serializers import PerfilSerializer
class UserListSerializer(ModelSerializer):
perfil = PerfilSerializer()
class Meta:
model = User
fields = ('id', 'first_name', 'last_name', 'email', 'perfil')
# read_only_fields = ('email',)
def update(self, instance, validated_data):
if validated_data.get('perfil').get('phone') is not None:
instance.perfil.phone = validated_data.get('perfil').get('phone')
if validated_data.get('perfil').get('city') is not None:
instance.perfil.city = validated_data.get('perfil').get('city')
instance.perfil.save()
return instance
class UserRegistrationSerializer(serializers.ModelSerializer):
perfil = PerfilSerializer()
"""Serializers registration requests and creates a new user."""
# Ensure passwords are at least 8 characters long, no longer than 128
# characters, and can not be read by the client.
password = serializers.CharField(
max_length=128,
min_length=8,
write_only=True
)
# The client should not be able to send a token along with a registration
# request. Making `token` read-only handles that for us.
token = serializers.CharField(max_length=255, read_only=True)
class Meta:
model = User
# List all of the fields that could possibly be included in a request
# or response, including fields specified explicitly above.
fields = ['email', 'token', 'password', 'first_name', 'last_name', 'perfil']
def create(self, validated_data):
# Use the `create_user` method we wrote earlier to create a new user.
perfil = validated_data['perfil']
del validated_data['perfil']
per = Perfil.objects.create(**perfil)
user = User.objects.create_user(**validated_data)
user.perfil = per
user.save()
return user
# return User.objects.create_user(**validated_data)
def update(self, instance, validated_data):
if instance.perfil.phone is not None:
instance.perfil.phone = validated_data.get('perfil').get('phone')
if instance.perfil.state is not None:
instance.perfil.state = validated_data.get('perfil').get('state')
if instance.perfil.city is not None:
instance.perfil.city = validated_data.get('perfil').get('city')
return instance
|
[
"djwesleyborges@gmail.com"
] |
djwesleyborges@gmail.com
|
593b64e70ff28f81f0068b7138ba2e9ba482f7f3
|
38d48857ffadf7917465036ae56d2e48a7b709ac
|
/plots.py
|
33c64a9309dcb012c720fab6acaa26d6c6f9bfe3
|
[] |
no_license
|
tlaure/PopModel2
|
b13ea0c52c9b26739644d9734c010ced6cafa1c5
|
d08ecf865d12494d900d020d04365ee7596cb7c1
|
refs/heads/master
| 2020-04-10T08:18:28.771995
| 2018-12-12T06:02:01
| 2018-12-12T06:02:01
| 160,902,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,897
|
py
|
def plotPyramid(x1,y1) :
import matplotlib.pyplot as plt
import numpy as np
y = np.arange(x1.size)
fig, axes = plt.subplots(ncols=2, sharey=True)
axes[0].barh(y, x1/1000, align='center', color='xkcd:steel blue')
axes[0].set(title='Number of Male')
axes[1].barh(y, y1/1000, align='center', color='xkcd:purplish')
axes[1].set(title='Number of Female')
axes[0].invert_xaxis()
axes[0].yaxis.tick_right()
plt.show()
def plot2Pyramid(x1,x2,y1,y2) :
import matplotlib.pyplot as plt
import numpy as np
y = np.arange(x1.size)
fig, axes = plt.subplots(ncols=2, sharey=True)
axes[0].barh(y, x2/1000, align='center', alpha=0.8, edgecolor='grey', facecolor='white')
axes[0].barh(y, x1/1000, align='center', alpha=0.3, edgecolor='white', facecolor='blue')
axes[0].set(title='Number of Male')
axes[1].barh(y, y2/1000, align='center', alpha=0.8, edgecolor='grey', facecolor='white')
axes[1].barh(y, y1/1000, align='center', alpha=0.3, edgecolor='white', facecolor='red')
axes[1].set(title='Number of Female')
axes[0].invert_xaxis()
axes[0].yaxis.tick_right()
plt.show()
def plotFun(y):
import matplotlib.pyplot as plt
plt.plot(y)
plt.show()
def plotFunL(y,x):
import matplotlib.pyplot as plt
plt.plot(y,x)
plt.show()
def plotNhor(array,step):
import matplotlib.pyplot as plt
for i in range(0,array[:,0].size,step):
plt.plot(array[i,:],label=str(i))
plt.show()
def plotNvert(array,step):
import matplotlib.pyplot as plt
import numpy as np
for i in range(0,array[0,:].size,step):
plt.plot(array[:,i],label=str(i))
plt.legend([np.arange(array[0,:].size,step)])
plt.show()
def scatterPlot(Array):
import matplotlib.pyplot as plt
plt.scatter(Array[:,0], Array[:,1])
plt.show
|
[
"thomas.laure@me.com"
] |
thomas.laure@me.com
|
27662a4fcf03d06563de81b5403cb694f76230a8
|
420aeef7c5408ceced77ad5d26d2b3ae7bcb1dcf
|
/Semiprime.py
|
9a3dc5bd6cb91bdf0481787253140cb5eefb61f3
|
[] |
no_license
|
Sagarmk079/Python-programs
|
bb629c8ecbbd6af3220b97cd68d577fa5cc14bdf
|
f8c80de10d44ccf82a7c6758465b07be2d3b62fe
|
refs/heads/master
| 2021-07-22T23:11:58.614385
| 2021-07-22T05:20:06
| 2021-07-22T05:20:06
| 207,059,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
import math
def checkSemiprime(num):
cnt = 0
for i in range(2, int(math.sqrt(num)) + 1):
while num % i == 0:
num /= i
cnt += 1 # Increment count
# of prime number
# If count is greater than 2,
# break loop
if cnt >= 2:
break
# If number is greater than 1, add it to
# the count variable as it indicates the
# number remain is prime number
if(num > 1):
cnt += 1
# Return '1' if count is equal to '2' else
# return '0'
return cnt == 2
c=0
n=int(input())
l=int((n/2)+1)
for i in range(2,l):
value1=checkSemiprime(i)
value2=checkSemiprime(n-i)
if(value1==True and value2==True):
print("Yes")
c=1
break;
if(c==0):
print("No")
|
[
"sagarmk079@gmail.com"
] |
sagarmk079@gmail.com
|
573e195a6ee0cf64d44548b0b3cf38f0233749b1
|
7c843f80a08db6725fd8d2e85099d9e6c13f6426
|
/lib/yllibInterface.py
|
ec7a24ba3216459a602d8f2be161df434745f8a3
|
[] |
no_license
|
wanfade/scaffolding_Seg
|
e983c1d1cdd60efcd7d381728c277993a1cf4721
|
12ba8892eb44d3ce47fa2609973b0510904c4753
|
refs/heads/master
| 2023-03-16T05:57:28.808341
| 2017-11-25T13:53:11
| 2017-11-25T13:53:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
# -*- coding: utf-8 -*-
import sys
from os.path import abspath,join,dirname
yllibPath = abspath(join(dirname(abspath(__file__)),'./yl'))
if yllibPath not in sys.path:
sys.path = [yllibPath] + sys.path
import tool
import ylimg as imglib
import ylml as mllib
import ylnp as nplib
from tool import *
from ylimg import *
from ylml import *
from ylnp import *
if __name__ == '__main__':
pass
|
[
"ylxx@live.com"
] |
ylxx@live.com
|
f256f72a4fd3aca0163f277da7fc1482de8abfd9
|
a14862ce7c4b0adfffe2c136ccd93205e6b4c8a9
|
/getElementsByID.py
|
d698baa4813a92abff2b93bcb5c220ab09bd79f7
|
[] |
no_license
|
Ahmadaj84/GetElementByID-
|
ab99accdf95fbf3cc43ef39b5319860c210f9f76
|
e7aa5bea19e2a231d828d70e2a74d04960dfe0aa
|
refs/heads/main
| 2023-05-07T03:22:16.798586
| 2021-05-31T11:13:57
| 2021-05-31T11:13:57
| 372,476,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
import csv
#open the html file as txt
f = open ('html.txt', 'r')
lst = []
#loop over the html text to get all the element which has and id
for l in f:
if "id" in l:
lst.append(l)
#open a new file to write in the ids of the elments
with open('out.csv' , 'w', newline = '')as file:
writer = csv.writer(file)
for i in lst:
if "input " in i:
#if the line has the word input it will split by " and will the the 5th element as it usualy can be the id
print (i.split('"')[5])
#it will write it in the csv file
writer.writerow([i.split('"')[5]])
|
[
"noreply@github.com"
] |
Ahmadaj84.noreply@github.com
|
b6b30a5211de514ccc29a84165549898bf818ab2
|
080c13cd91a073457bd9eddc2a3d13fc2e0e56ae
|
/GIT-USERS/TOM2/cs41long_lambda_mud_server/adv_project/settings.py
|
262f1bf5e8db671d85bf94889ff91cb1a1be8b13
|
[] |
no_license
|
Portfolio-Projects42/UsefulResourceRepo2.0
|
1dccc8961a09347f124d3ed7c27c6d73b9806189
|
75b1e23c757845b5f1894ebe53551a1cf759c6a3
|
refs/heads/master
| 2023-08-04T12:23:48.862451
| 2021-09-15T12:51:35
| 2021-09-15T12:51:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,698
|
py
|
"""
Django settings for adv_project project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', cast=bool)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'adventure',
'api',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'rest_auth.registration',
]
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'adv_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'adv_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# DATABASES = {}
import dj_database_url
# DATABASES['default'] = dj_database_url.config(default=config('DATABASE_URL'), conn_max_age=600)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
# DATABASES['default'].update(db_from_env)
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# DATABASES['default'] = dj_database_url.config(default=config('DATABASE_URL'))
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication
REST_FRAMEWORK = {
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
# ],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
}
CORS_ORIGIN_ALLOW_ALL=True
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
import django_heroku
django_heroku.settings(locals())
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
1ae4875a6b7dfa4a2bd80d10ab50d019aef43b4b
|
63eee042ccdb29c1959fa81e958efe974d14105b
|
/demo/app.py
|
cce7b5a882895551644334d3225196d4d64ab8ea
|
[] |
no_license
|
pycaret/pycaret-demo-td
|
487a5c5c93204ba06b76a6e7039e8a0a6cbab165
|
da64d3f62002f702a4f7a200de3995a02e4a3023
|
refs/heads/master
| 2022-11-30T02:04:30.320190
| 2020-07-29T13:46:35
| 2020-07-29T13:46:35
| 283,319,671
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,216
|
py
|
from flask import Flask,request, url_for, redirect, render_template, jsonify
from pycaret.regression import *
import pandas as pd
import pickle
import numpy as np
app = Flask(__name__)
model=load_model('deployment_28042020')
@app.route('/')
def home():
return render_template("home.html")
@app.route('/predict',methods=['POST'])
def predict():
int_features=[x for x in request.form.values()]
final=np.array(int_features)
col = ['age', 'sex', 'bmi', 'children', 'smoker', 'region']
data_unseen = pd.DataFrame([final], columns = col)
print(int_features)
print(final)
prediction=predict_model(model, data=data_unseen, round = 0)
prediction=int(prediction.Label[0])
return render_template('home.html',pred='Expected Bill will be {}'.format(prediction))
@app.route('/predict_api',methods=['POST'])
def predict_api():
'''
For direct API calls trought request
'''
data = request.get_json(force=True)
data_unseen = pd.DataFrame([data])
prediction = predict_model(model, data=data_unseen)
output = prediction.Label[0]
return jsonify(output)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug = True)
|
[
"noreply@github.com"
] |
pycaret.noreply@github.com
|
8ff3ceae1eb8d3bc91dc5680fc75b817371da33d
|
e1269d7a1722a2c8900c9459554827759f0817e7
|
/solution/std_deviation.py
|
ba7692f4393170a7a27dc4f38f2a237d9d39e64f
|
[
"MIT"
] |
permissive
|
1701Payal/AsmiClass
|
e7c108fd6c165c484b0974d28969718e361240e9
|
024b55bdbc4e9e2d3f15438c5124cff98f63124c
|
refs/heads/master
| 2023-04-29T16:28:28.147434
| 2021-05-22T15:21:00
| 2021-05-22T15:21:00
| 369,841,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
import math
# list of elements to calculate mean
import csv
with open('data.csv', newline='') as f:
reader = csv.reader(f)
file_data = list(reader)
data = file_data[0]
#Step1
# finding mean
def mean(data):
n= len(data)
total =0
for x in data:
total += int(x)
mean = total / n
return mean
#Step2& Step3
squared_list= []
for number in data:
print(int(number))
a = int(number) - mean(data)
a= a**2
squared_list.append(a)
#Step4
sum =0
for i in squared_list:
sum =sum + i
#dividing the sum by the total values
#Step5
result = sum/ (len(data)-1)
#Step6
# getting the deviation by taking square root of the result
std_deviation = math.sqrt(result)
print(std_deviation)
# print("derived using predefined function ",statistics.stdev(data))
|
[
"freelance2020project@gmail.com"
] |
freelance2020project@gmail.com
|
dace69fb2b01d9af69f69069555401deba70980e
|
f32f5e0d4c9523febcf8fed370e583cb18a22fe1
|
/day_26/coding exercises/exercise 4/main.py
|
0cabd33a98008270e4053e10c7e0ec3433405be0
|
[] |
no_license
|
guptaraghav01/100DaysOfCode
|
3b1cd4aa3e38a787096a705dba4bd1732184fc3b
|
f6b2e611c02f980116af7107070e6c88267a2ed4
|
refs/heads/master
| 2023-04-12T17:49:33.688269
| 2021-05-11T18:38:31
| 2021-05-11T18:38:31
| 345,348,770
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
sentence = "What is the Airspeed Velocity of an Unladen Swallow?"
# Don't change code above 👆
# Write your code below:
result = {word: len(word) for word in sentence.split()}
print(result)
|
[
"raghavgupta0112@gmail.com"
] |
raghavgupta0112@gmail.com
|
6c60dd674feb3ae9f0e96aa74a816af5e89af811
|
d66155cd936eb2cc88962756c6acb45a94ed09bc
|
/_bili_caicai/05特征选择02嵌入法.py
|
864e6f35ae4183048d4ce8b368a10225732fdd7f
|
[] |
no_license
|
wangyaochong/learn-python
|
fdf6b163095bc9fb8701e8414f0b1c02d47aedfb
|
2d5ee4c01ea01244c9996c938af1e6e9ed69ea03
|
refs/heads/master
| 2022-09-09T17:26:54.040111
| 2020-05-29T11:04:48
| 2020-05-29T11:04:48
| 265,125,972
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier as RFC
from zipfile import ZipFile
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import cross_val_score
# 一般优先使用过滤法,嵌入法运行速度比较慢
file = ZipFile('./digit recognizor.zip')
f = file.open('digit recognizor.csv')
df = pd.read_csv(f)
f.close()
file.close()
df.info()
x = df.iloc[:, 1:]
y = df.iloc[:, 0]
rfc = RFC(n_estimators=10, random_state=0)
x_result = SelectFromModel(rfc, threshold=0.005).fit_transform(x, y)
print(x_result.shape)
# 学习曲线找到最佳的阈值
# threshold = np.linspace(0, rfc.fit(x, y).feature_importances_.max(), 10) #第一次可以直接跑,
threshold = np.linspace(0, 0.001, 10) # 然后设置具体参数
score = []
for i in threshold:
x_result = SelectFromModel(rfc, threshold=i).fit_transform(x, y)
score.append(cross_val_score(rfc, x_result, y, cv=5).mean())
plt.plot(threshold, score)
plt.show()
|
[
"1162025261@qq.com"
] |
1162025261@qq.com
|
c05e4d3a81d0f595ef5688ff14ccf5bbe8773e2b
|
816fb0dc384216f9bdfcebe59f15f175f1c70e03
|
/blog/migrations/0001_initial.py
|
f488b2d76d2ea4c455ca703834adb039ea787b94
|
[] |
no_license
|
Arton-o/myfirst_project
|
24414ae359f2cac2a05980eeb5037be27d7c1d1b
|
4782eb980e1e397ff979f0c79caa0a70a5fb8d39
|
refs/heads/master
| 2020-12-13T19:22:19.425509
| 2020-01-17T08:27:58
| 2020-01-17T08:27:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Generated by Django 3.0.2 on 2020-01-14 02:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"atsumi.19@iamas.ac.jp"
] |
atsumi.19@iamas.ac.jp
|
3d3689106688934bb73954a5bffcf59896666ec8
|
fd63e5cfc880432d7bdb7c36481266d681539f76
|
/mix-list/mixlist01.py
|
49b73e895bda9394f7cbfd5d1b742f37891b73e4
|
[] |
no_license
|
MasterOlaga/mycode
|
de8425e5f32affcd803ecb44cb6bdba1f327f774
|
69f6b8747f1dbc5ae4762a39fb5aaa0b4650ab63
|
refs/heads/main
| 2023-04-10T18:57:48.585811
| 2021-05-04T12:05:11
| 2021-05-04T12:05:11
| 361,783,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
#!/usr/bin/env python3
my_list = [ "192.168.0.5", 5060, "UP" ]
iplist = [ 5060, "80", 55, "10.0.0.1", "10.20.30.1", "ssh" ]
print("The first item in the list (IP): " + my_list[0] )
print("The second item in the list (port): " + str(my_list[1]) )
print("The last item in the list (state): " + my_list[2] )
print("IP addresses: " + iplist[3] + ", and " + iplist[4])
print("IP addresses:", iplist[3], ", and", iplist[4])
print(f"IP addresses: {iplist[3]}, and {iplist[4]}")
|
[
"xcurl2009@hotmail.com"
] |
xcurl2009@hotmail.com
|
93bdceb56925c84adec55189bbf50fec88fccbe7
|
94d218e6b4cd6a4a426442fd98fc905e9acf5f6e
|
/myrecommendationsystem/pages/models.py
|
11fa98f89f6a34cee0a75c7141afd657d6c907f0
|
[
"MIT"
] |
permissive
|
rukesh-shrestha/Book-Recommendation-System
|
777c9bf12162b105345ed065bc66fcd162f4f27d
|
64c9cfafcff978d54abb15ba16001ec18c75e4b8
|
refs/heads/master
| 2023-07-21T13:43:48.883356
| 2023-07-13T07:28:03
| 2023-07-13T07:28:03
| 375,590,438
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,075
|
py
|
from django.db import models
from django.contrib.auth.models import Permission, User
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.urls import reverse
class Book(models.Model):
name = models.CharField(blank=False,max_length=100)
author = models.CharField(blank=False,max_length=100,default="Mr.Rukesh Shrestha")
descriptions = models.TextField(null=False,default="Four teenagers in detention discover an old video game console with a game they’ve never heard of. When they decide to play, they are immediately sucked into the jungle world of Jumanji in the bodies of their avatars (Dwayne Johnson, Jack Black, Kevin Hart, and Karen Gillan). They’ll have to complete the adventure of their lives filled with fun, thrills and danger or be stuck in the game forever!",max_length=500)
genre = models.CharField(max_length=50,default="Biography and Memoirs")
image = models.ImageField(upload_to='')
num_visits = models.IntegerField(default=0)
last_visits = models.DateTimeField(blank=True,null=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('detailhome',args=[str(self.id)])
class Myrating(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE)
places = models.ForeignKey(Book,on_delete=models.CASCADE)
rating = models.IntegerField(default=1,validators=[MaxValueValidator(5),MinValueValidator(0)])
def __str__(self):
return str(self.user)
# Create your models here.
class Contactus(models.Model):
name = models.CharField(max_length=50,null=True,blank=False)
emails = models.EmailField(max_length=200,null=True,blank=False)
subjects = models.CharField(max_length=100,null=True,blank=False)
descriptions = models.TextField(max_length=400,null=True,blank=False)
class Aboutus(models.Model):
descriptions_page = models.TextField(null=False,default="This is the amazing book. You must read it.")
descriptions_person = models.TextField(null=False,default="People vary in terms of their physical appearance and personalities, and the words that are used to describe them are just as varied. Some words are better suited to describing the physical appearance of someone, some are best used to describe the person’s style, and others are ideal for describing the person’s character traits.")
footerdetail = models.TextField(null=False,default="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
quotes = models.TextField(default="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
facebook = models.URLField(max_length=500,default="https://www.facebook.com/rukesh.shrestha.94651/")
linkedin = models.URLField(max_length=500,default="https://www.linkedin.com/in/rukesh-shrestha-07914b1a3/")
instragram = models.URLField(max_length=500,default="https://www.instagram.com/____rukesh____/")
image = models.ImageField(upload_to='')
|
[
"rukesh.shrestha11@gmail.com"
] |
rukesh.shrestha11@gmail.com
|
e2959bbf4b73f8f6af1087ddbe37c105d3ea8bf5
|
9b066b7c2bc2f59fca3415e867c46a8949f42867
|
/cci_partner/__openerp__.py
|
24407d7f005eb6f0df6bfae77e93dd796d9d9948
|
[] |
no_license
|
OdooPython/OpenERPOdoo
|
ccaa48666e0ca9690e0a07b2d05728200614136c
|
85c0936d17854f184b761c03f3b085e4c95566a1
|
refs/heads/master
| 2021-05-05T21:30:24.146839
| 2018-08-17T09:26:11
| 2018-08-17T09:26:11
| 115,533,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
# -*- coding: utf-8 -*-
{
'name': "CCI PARTENAIRE",
'description': """
CCI PARTENAIRE.
========================
Ce module gère les partenaires selon un workflow bien déterminé (brouillon, en attente de validation, validé, refusé). De tel sorte, que l'administrateur a le droit de valider l'ajout des partenaires.
""",
'author': "Marwa BEN MESSAOUD & Salwa KSILA & Houssem ABID I-way",
"website": "http://www.i-way.tn.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Partner',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base','sale','mail'],
# always loaded
'data': [
'security/ir.model.access.csv',
'views/res_partner_view.xml',
#'secteur_data.xml',
'views/res_partner_workflow.xml',
'views/res_partner_operateur_view.xml',
'views/res_partner_contact_view.xml',
#'views/product_view.xml',
],
# only loaded in demonstration mode
'demo': [
],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"marwa.benmessaoud@iway-tn.com"
] |
marwa.benmessaoud@iway-tn.com
|
f0201724254415022c8ee3d82158e0094dabfdad
|
a36a3f4e41cbbe111529921c30b2d75442c7ed64
|
/TIPS_Data_Analysis/TIPS_Data_Analysis/Leak_Analysis.py
|
c2433ba0314b59d0062bbe145c5639042009a88d
|
[
"MIT"
] |
permissive
|
rnsheehan/TIPS_Data_Analysis
|
47b5f8bf1bb8a945ed702a7ac5e52abc9b1d1862
|
8909fe9306a1ba57fa42cf2f41b5ed6ecbb60dda
|
refs/heads/master
| 2022-08-02T05:14:06.140928
| 2020-05-25T14:18:06
| 2020-05-25T14:18:06
| 266,798,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,087
|
py
|
import os
import glob
import re
import sys # access system routines
import math
import scipy
import numpy as np
import matplotlib.pyplot as plt
import Common
import Plotting
# variables that tell you what data is in each column
CURR_VAL = 0; # column containing current data
DFB_VAL = 1; SOA_VAL = 3; EAM_VAL = 5; PWR_VAL = 7; # columns containing measured data
DFB_ERR = 2; SOA_ERR = 4; EAM_ERR = 6; PWR_ERR = 8; # columns containing errors in measured data
# This module should include methods for reading the leakage analysis data
# and plotting the leakage data for various plots
# R. Sheehan 7 - 11 - 2017
class sweep_params(object):
# class that contains parameters for a sweep
# temperature = temperature of device during sweep
# EAM_bias = voltage across EAM during sweep
# current = current across section of device during sweep, section may be DFB or SOA
# sweep_type tells you if sweep is over DFB or SOA section
# if sweep_device == DFB: current is SOA current
# if sweep_device == SOA: current is DFB current
# constructor
# define default arguments inside
def __init__(self):
try:
self.temperature = 20.0 # device temperature
self.EAM_bias = 0.0 # EAM bias
self.static_device_current = 0.0 # current in other section of device
self.sweep_device = "" # string that tells you if sweep is over DFB section or SOA section.
self.static_device = "" # string that tells you if sweep is over DFB section or SOA section.
except TypeError:
print("Type Error in Leak_Analysis.sweep_params(object) instantiation")
# return a string the describes the class
def __str__(self):
return self.sweep_device + ", " + self.static_device + " Current = " + str(self.static_device_current) + " (mA), EAM bias = " + str(self.EAM_bias) + " (V)"
def read_Leak_data(leak_file, correct_power = True, loud = False):
# read the measured leakage data from the file
# Data is stored in columns of the form
# 0. Current (mA)
# 1. V_{DFB} (V)
# 2. \DeltaV_{DFB} (V)
# 3. V_{SOA} (V)
# 4. \Delta V_{SOA} (V)
# 5. I_{EAM} (mA)
# 6. \Delta I_{EAM} (mA)
# 7. P_{out} (dBm)
# 8. \Delta P_{out} (dBm)
# correct_power decides whether or not to correct the power being read from the file
# Due to an oversight the optical power was measured through the 10% arm of the power splitter after it had passed through the VOA
# While this will not affect the behaviour of the device it will mean that the measured power is less than it should be.
# When plotting the measured power remember to convert it to the 90% value and add 1 dB due to the IL of the VOA.
# R. Sheehan 7 - 11 - 2017
try:
if glob.glob(leak_file):
numbers = Common.extract_values_from_string(leak_file)
parameters = sweep_params()
parameters.temperature = float(numbers[1])
parameters.EAM_bias = float(numbers[3])
if leak_file.find("DFB") > 0:
parameters.static_device_current = float(numbers[2]) # current across DFB section
parameters.sweep_device = "SOA"; parameters.static_device = "DFB";
elif leak_file.find("SOA") > 0:
parameters.static_device_current = float(numbers[2]) # current across SOA section
parameters.sweep_device = "DFB"; parameters.static_device = "SOA";
else:
raise Exception
delim = '\t'
data = Common.read_matrix(leak_file, delim)
data = Common.transpose_multi_col(data)
if correct_power == True:
# adjust the power reading to the 90% value
# add insertion loss due to the VOA
slope = 1.0038; intercept = 9.4697; # values obtained from fit
insertion_loss = 0.8 # insertion loss due to VOA in dB
power_column = 7
for i in range(0, len(data[power_column]), 1):
data[power_column][i] = insertion_loss + intercept + slope*data[power_column][i]
return [parameters, data]
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.read_Leak_data")
def get_Leak_label(quantity):
# return a string to serve as a label on a plot based on the value of quantity
# R. Sheehan 8 - 11 - 2017
try:
c2 = True if quantity > 0 and quantity < 9 else False
if c2:
if quantity == 1:
return 'DFB Voltage $V_{DFB}$ (V)'
elif quantity == 2:
return 'DFB Voltage Variation $\Delta V_{DFB}$ (V)'
elif quantity == 3:
return 'SOA Voltage $V_{SOA}$ (V)'
elif quantity == 4:
return 'SOA Voltage Variation $\Delta V_{SOA}$ (V)'
elif quantity == 5:
return 'EAM Current $I_{EAM}$ (mA)'
elif quantity == 6:
return 'EAM Current Variation $\Delta I_{EAM}$ (mA)'
elif quantity == 7:
return 'Optical Power $P_{out}$ (dBm)'
elif quantity == 8:
return 'Optical Power Variation $\Delta P_{out}$ (dBm)'
else:
raise Exception
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.get_Leak_label()")
def get_Leak_name(quantity):
# return a string to serve as a name for a file containing a plot based on the value of quantity
# R. Sheehan 9 - 11 - 2017
try:
c2 = True if quantity > 0 and quantity < 9 else False
if c2:
if quantity == 1:
return 'DFB_Voltage'
elif quantity == 2:
return 'Delta_DFB_Voltage'
elif quantity == 3:
return 'SOA_Voltage'
elif quantity == 4:
return 'Delta_SOA_Voltage'
elif quantity == 5:
return 'EAM_Current'
elif quantity == 6:
return 'Delta_EAM_Current'
elif quantity == 7:
return 'Optical_Power'
elif quantity == 8:
return 'Delta_Optical_Power'
else:
raise Exception
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.get_Leak_label()")
def get_Leak_plot_range(quantity):
# return a list to serve as a plot range for a plot based on the value of quantity
# R. Sheehan 9 - 11 - 2017
try:
c2 = True if quantity > 0 and quantity < 9 else False
if c2:
if quantity == 1:
return [0, 180, 0, 2.1]
elif quantity == 2:
return [0, 180, 1.0e-4, 1.0]
elif quantity == 3:
return [0, 180, 0, 2.6]
elif quantity == 4:
return [0, 180, 1.0e-4, 1.0]
elif quantity == 5:
return [0, 180, -4, 0.0]
elif quantity == 6:
return [0, 180, 1.0e-6, 1.0]
elif quantity == 7:
return [0, 180, -90, 0.0]
elif quantity == 8:
return [0, 180, 0, 0.01]
else:
raise Exception
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.get_Leak_label()")
def plot_Leak_quantity(leak_data_1, leak_data_2, quantity, loud = False):
# plot a measured quantity from the leakage analysis
# sweep_type of each data set has to be the same
# R. Sheehan 8 - 11 - 2017
try:
c1 = True if leak_data_1[0].sweep_device == leak_data_1[0].sweep_device else False
c2 = True if quantity > 0 and quantity < 9 else False
c3 = True if leak_data_1[0].static_device_current == leak_data_1[0].static_device_current else False
if c1 and c2 and c3:
hv_data = []; labels = []; marks = [];
hv_data.append([ leak_data_1[1][CURR_VAL], leak_data_1[1][quantity] ] );
marks.append(Plotting.labs_lins[0]);
labels.append('$V_{EAM}$ = %(v1)0.2f V'%{ "v1":leak_data_1[0].EAM_bias } )
hv_data.append([leak_data_2[1][CURR_VAL], leak_data_2[1][quantity] ]);
marks.append(Plotting.labs_lins[1]);
labels.append('$V_{EAM}$ = %(v1)0.2f V'%{"v1":leak_data_2[0].EAM_bias})
arguments = Plotting.plot_arg_multiple()
arguments.loud = loud
arguments.crv_lab_list = labels
arguments.mrk_list = marks
arguments.x_label = leak_data_1[0].sweep_device + ' Current (mA)'
arguments.y_label = get_Leak_label(quantity)
arguments.plt_range = get_Leak_plot_range(quantity)
arguments.plt_title = leak_data_1[0].static_device + ' Current = ' + str(leak_data_1[0].static_device_current) + ' (mA)'
arguments.fig_name = get_Leak_name(quantity) + '_I' + leak_data_1[0].static_device + '_' + str(leak_data_1[0].static_device_current).replace('.0','')
if quantity%2 == 0 and quantity < PWR_ERR: arguments.log_y = True
Plotting.plot_multiple_curves(hv_data, arguments)
# error in this case is very small, best to do separate analysis on error data
#Plotting.plot_multiple_curves_with_errors(hv_data, arguments)
del hv_data; del labels; del marks; del arguments;
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.plot_Leak_quantity")
def SOA_Sweep_Plots(DFB_Current, plot_errors = False, loud = False):
# make plots of the data for the SOA Sweep
# R. Sheehan 8 - 11 - 2017
try:
files = glob.glob("TIPS_1_EAM_Lk_T_20_IDFB_%(v)d_VEAM*"%{"v":DFB_Current})
if files:
the_data = []
for f in files:
the_data.append( read_Leak_data(f) )
if plot_errors:
plot_Leak_quantity(the_data[1], the_data[0], DFB_ERR, loud)
plot_Leak_quantity(the_data[1], the_data[0], EAM_ERR, loud)
plot_Leak_quantity(the_data[1], the_data[0], PWR_ERR, loud)
else:
plot_Leak_quantity(the_data[1], the_data[0], DFB_VAL, loud)
plot_Leak_quantity(the_data[1], the_data[0], EAM_VAL, loud)
plot_Leak_quantity(the_data[1], the_data[0], PWR_VAL, loud)
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.SOA_Sweep_Plots")
def DFB_Sweep_Plots(SOA_Current, plot_errors = False, loud = False):
# make plots of the data for the DFB Sweep
# R. Sheehan 8 - 11 - 2017
try:
files = glob.glob("TIPS_1_EAM_PC_T_20_ISOA_%(v)d_VEAM*"%{"v":SOA_Current})
if files:
the_data = []
for f in files:
the_data.append( read_Leak_data(f) )
if plot_errors:
plot_Leak_quantity(the_data[1], the_data[0], SOA_ERR, loud)
plot_Leak_quantity(the_data[1], the_data[0], EAM_ERR, loud)
plot_Leak_quantity(the_data[1], the_data[0], PWR_ERR, loud)
else:
plot_Leak_quantity(the_data[1], the_data[0], SOA_VAL, loud)
plot_Leak_quantity(the_data[1], the_data[0], EAM_VAL, loud)
plot_Leak_quantity(the_data[1], the_data[0], PWR_VAL, loud)
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.DFB_Sweep_Plots")
def Error_Statistics(error_data, drop_terms = 1, neglect_zeroes = False, scale = True, scale_factor = 1000.0):
# compute the average value and standard deviation of a set of measured error values
# error_data should be input as numpy array
# use np.asarray(input)
# R. Sheehan 9 - 11 - 2017
# if you want to drop the first element use error_data[1:]
# In certain situtations error data may be dominated by zero values, too many zeroes will give an incorrect estimate of mean and std. dev
# if neglect_zeroes == True compute mean and std. dev of non-zero error_data only
try:
if error_data is not None:
if neglect_zeroes == False:
# compute mean and std. dev using all data in the array
average = np.mean(error_data[drop_terms:])
std_dev = np.std(error_data[drop_terms:], ddof = 1)
#std_dev = np.max(error_data[drop_terms:]) - np.min(error_data[drop_terms:])
else:
# compute mean and std. dev neglecting zero values
# use the corrected two-pass formula given in NRinC, sect. 14.1
#first step is to ensure that there are more than one non-zero terms
count = 0
for j in range(0, len(error_data), 1):
if math.fabs(error_data[j]) > 0.0:
count += 1
if count > 1:
count1 = count2 = 0
average = std_dev = ep = 0.0;
# first pass compute the average
for j in range(0, len(error_data), 1):
if math.fabs(error_data[j]) > 0.0:
average += error_data[j]
count1 += 1
average = average/count1
# second pass compute the variance
for j in range(0, len(error_data), 1):
if math.fabs(error_data[j]) > 0.0:
s = error_data[j] - average
ep += s
std_dev += s**2 # variance is stored in std_dev for now
count2 += 1
std_dev = (std_dev - (ep**2/count2))/(count2-1)
std_dev = math.sqrt(std_dev)
else:
# revert to whatever numpy wants to do if there's not enough data points
# in fact numpy will just return zero values under the same circumstances so may as well not bother with the numpy call
#print("Not enough data points to use corrected two-pass formula\nreverting to numpy"
#average = np.mean(error_data[drop_terms:])
#std_dev = np.std(error_data[drop_terms:], ddof = 1)
average = std_dev = 0.0
if scale == True:
average = scale_factor*average;
std_dev = scale_factor*std_dev;
return [ average, std_dev ]
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.Error_Statistics()")
def Print_Error_Analysis(static_device_name, static_device_current):
# go through the measured errors for each data set and determine the average measured error
# along with std. deviation.
# Make a plot of the results
# R. Sheehan 9 - 11 - 2017
DATA_HOME = "C:/Users/Robert/Research/EU_TIPS/Data/Exp-2/Leak_SWP/"
try:
if os.path.isdir(DATA_HOME):
os.chdir(DATA_HOME)
files = glob.glob("TIPS_1_EAM_*_T_20_I%(v1)s_%(v2)d_VEAM*"%{"v1":static_device_name, "v2":static_device_current})
if files:
drop_vals = 1
scale_data = False
neglect_zeroes = False
scale_factor = 1.0
volt_units = " (uV)" if scale_factor == 1.0e+6 else " (V)"
current_units = " (nA)" if scale_factor == 1.0e+6 else " (mA)"
for f in files:
data = read_Leak_data(f)
print(data[0])
print("DFB voltage error: ",Error_Statistics( np.asarray(data[1][2]), drop_vals, neglect_zeroes, scale_data, scale_factor )[0],volt_units)
print("SOA voltage error: ",Error_Statistics( np.asarray(data[1][4]), drop_vals, neglect_zeroes, scale_data, scale_factor )[0],volt_units)
print("EAM current error: ",Error_Statistics( np.asarray(data[1][6]), drop_vals, neglect_zeroes, scale_data, scale_factor )[0],current_units)
print("Optical power error: ",Error_Statistics( np.asarray(data[1][8]), drop_vals, neglect_zeroes, False )[0]," (dBm)\n")
del data;
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.Print_Error_Analysis()")
def Get_Error_Analysis_Data(static_device_name, static_device_current, eam_bias, quantity, remove_zeroes = False, loud = False):
# go through the measured errors for each data set and determine the average measured error
# along with std. deviation versus the applied current
# Make a plot of the results
# R. Sheehan 9 - 11 - 2017
DATA_HOME = "C:/Users/Robert/Research/EU_TIPS/Data/Exp-2/Leak_SWP/"
try:
if os.path.isdir(DATA_HOME):
os.chdir(DATA_HOME)
c1 = True if quantity > 1 and quantity < 9 else False
files = glob.glob("TIPS_1_EAM_*_T_20_I%(v1)s_%(v2)d_VEAM_%(v3)0.2f.txt"%{"v1":static_device_name, "v2":static_device_current, "v3":eam_bias})
if files and c1:
drop_vals = 0
ret_data = []
for f in files:
data = read_Leak_data(f)
dfb_err = Error_Statistics( np.asarray(data[1][quantity]), drop_vals, remove_zeroes, False) # no scaling is being applied when error statistics are computed
ret_data.append(data[0].static_device_current);
ret_data.append(data[0].temperature);
ret_data.append( math.fabs(dfb_err[0]) );
ret_data.append( math.fabs(dfb_err[1]) );
if loud:
scale_factor = 1.0e+3
volt_units = " (uV)" if scale_factor == 1.0e+6 else " (mV)"
current_units = " (nA)" if scale_factor == 1.0e+6 else " (uA)"
print(data[0])
print("DFB voltage error: ",Error_Statistics( np.asarray(data[1][2]), drop_vals, remove_zeroes, True, scale_factor),volt_units)
print("SOA voltage error: ",Error_Statistics( np.asarray(data[1][4]), drop_vals, remove_zeroes, True, scale_factor),volt_units)
print("EAM current error: ",Error_Statistics( np.asarray(data[1][6]), drop_vals, remove_zeroes, True, scale_factor),current_units)
print("Optical power error: ",Error_Statistics( np.asarray(data[1][8]), drop_vals, remove_zeroes, False)," (dBm)\n")
del data;
return ret_data
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.Get_Error_Analysis_Data()")
def Gather_Error_Analysis_Data(static_dev_name, eam_bias, quantity, remove_zeroes = False):
# gather together all the error analysis data in a format suitable for plotting
# this function returns the average error for each static device current
# value being computed is the average error over the swept current
# R. Sheehan 9 - 11 - 2017
DATA_HOME = "C:/Users/Robert/Research/EU_TIPS/Data/Exp-2/Leak_SWP/"
try:
if os.path.isdir(DATA_HOME):
os.chdir(DATA_HOME)
dev_current = [0, 50, 100, 140, 150, 160, 170, 180]
error_data = []
for ii in dev_current:
data = Get_Error_Analysis_Data(static_dev_name, ii, eam_bias, quantity, remove_zeroes)
error_data.append(data); del data;
del dev_current;
error_data = Common.transpose_multi_col(error_data)
return error_data
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.Gather_Error_Analysis_Data()")
def plot_Error_Analysis_data(swept_device_name, static_device_name, quantity, remove_zeroes = False, loud = False):
# make plots of the gathered error analysis data
# the error is averaged over all static_device_current values
# what is the scaling being applied when the plot is being made?
try:
VEAM = 0.0
ds1 = Gather_Error_Analysis_Data(static_device_name, VEAM, quantity, remove_zeroes)
VEAM = -0.5
ds2 = Gather_Error_Analysis_Data(static_device_name, VEAM, quantity, remove_zeroes)
if ds1 is not None and ds2 is not None:
hv_data = []; labels = []; marks = [];
hv_data.append([ds1[0], ds1[2]]); labels.append('$V_{EAM}$ = 0 (V)'); marks.append(Plotting.labs_pts[0])
hv_data.append([ds2[0], ds2[2]]); labels.append('$V_{EAM}$ = -0.5 (V)'); marks.append(Plotting.labs_pts[1])
arguments = Plotting.plot_arg_multiple()
arguments.loud = loud
arguments.crv_lab_list = labels; arguments.mrk_list = marks;
arguments.x_label = static_device_name + ' Current (mA)'
arguments.y_label = get_Leak_label(quantity)
#arguments.plt_range = get_Leak_plot_range(quantity) if quantity < PWR_ERR else None
arguments.plt_range = get_Leak_plot_range(quantity)
arguments.plt_title = 'Average Error while current sweeps across ' + swept_device_name
if quantity < PWR_ERR: arguments.log_y = True
arguments.fig_name = get_Leak_name(quantity-1) + '_' + swept_device_name + '_Sweep_Error'
Plotting.plot_multiple_curves(hv_data, arguments)
del ds1; del ds2; del hv_data; del marks; del labels;
else:
raise Exception
except Exception:
print("Error: Leak_Analysis.plot_Error_Analysis_data()")
def SOA_Sweep_Error_Plots(remove_zeroes = False, loud = False):
# Make a plot of the averaged measured error for each measurement
# R. Sheehan 15 - 11 - 2017
try:
swept_device_name = 'SOA'; static_device_name = 'DFB';
plot_Error_Analysis_data(swept_device_name, static_device_name, DFB_ERR, remove_zeroes, loud)
plot_Error_Analysis_data(swept_device_name, static_device_name, SOA_ERR, remove_zeroes, loud)
plot_Error_Analysis_data(swept_device_name, static_device_name, EAM_ERR, remove_zeroes, loud)
plot_Error_Analysis_data(swept_device_name, static_device_name, PWR_ERR, remove_zeroes, loud)
except Exception:
print("Error: Leak_Analysis.SOA_Sweep_Error_Plots()")
def DFB_Sweep_Error_Plots(remove_zeroes = False, loud = False):
# Make a plot of the averaged measured error for each measurement
# R. Sheehan 15 - 11 - 2017
try:
swept_device_name = 'DFB'; static_device_name = 'SOA';
plot_Error_Analysis_data(swept_device_name,static_device_name, DFB_ERR, remove_zeroes, loud)
plot_Error_Analysis_data(swept_device_name,static_device_name, SOA_ERR, remove_zeroes, loud)
plot_Error_Analysis_data(swept_device_name,static_device_name, EAM_ERR, remove_zeroes, loud)
plot_Error_Analysis_data(swept_device_name,static_device_name, PWR_ERR, remove_zeroes, loud)
except Exception:
print("Error: Leak_Analysis.SOA_Sweep_Error_Plots()")
def run_Leak_sweep_plots_all(error_plots = False):
# short script for plotting all measured data
Ival = [0, 50, 100, 140, 150, 160, 170, 180]
for ii in Ival:
SOA_Sweep_Plots(ii,error_plots)
DFB_Sweep_Plots(ii,error_plots)
def Compare_Higher_Bias(static_device, quantity, loud = False):
# compare data measured at higher bias
DATA_HOME = "C:/Users/Robert/Research/EU_TIPS/Data/Exp-2/Leak_SWP/Higher_Bias/"
try:
if os.path.isdir(DATA_HOME):
os.chdir(DATA_HOME)
files = glob.glob("TIPS_1_EAM*T_20_I%(v1)s*VEAM*"%{"v1":static_device})
if files:
for f in files: print(f)
else:
raise Exception
else:
raise EnvironmentError
except EnvironmentError:
print("Error: Leak_Analysis.Compare_Higher_Bias()")
print("Cannot find",DATA_HOME)
except Exception:
print("Error: Leak_Analysis.Compare_Higher_Bias()")
def Compare_Higher_Temperature(static_device, eam_bias, loud = False):
# compare data measured over higher temperatures
# read in data
# loop over quantity to make all the necessary plots
# power data at T = 25, 30 was collected without the need for correction
# R. Sheehan 16 - 11 - 2017
DATA_HOME = "C:/Users/Robert/Research/EU_TIPS/Data/Exp-2/Leak_SWP/Higher_Temperature/"
try:
if os.path.isdir(DATA_HOME):
os.chdir(DATA_HOME)
files = glob.glob("TIPS_1_EAM*T*I%(v1)s*VEAM_%(v2)0.2f.txt"%{"v1":static_device, "v2":eam_bias})
if files:
# read in all data
the_data = []
for i in range(0, len(files), 1):
numbers = Common.extract_values_from_string(files[i])
if int(numbers[1]) == 20:
correct_power = True
else:
correct_power = False
the_data.append( read_Leak_data(files[i], correct_power) )
del numbers
# loop over quantity to make the necessary plots
quantity = [DFB_VAL, SOA_VAL, EAM_VAL, PWR_VAL]
#eam_str = "%(v2)0.2f"%{"v2":eam_bias}
for q in quantity:
hv_data = []; labels = []; marks = [];
for i in range(0, len(the_data), 1):
hv_data.append( [ the_data[i][1][CURR_VAL], the_data[i][1][q] ] );
marks.append(Plotting.labs_lins[i])
labels.append('T = %(v1)0.0f C'%{"v1":the_data[i][0].temperature})
arguments = Plotting.plot_arg_multiple()
arguments.loud = loud
arguments.crv_lab_list = labels
arguments.mrk_list = marks
arguments.x_label = the_data[0][0].sweep_device + ' Current (mA)'
arguments.y_label = get_Leak_label(q)
#arguments.plt_range = get_Leak_plot_range(q)
arguments.plt_title = the_data[0][0].static_device + ' Current = ' + str(the_data[0][0].static_device_current) + ' (mA)'
arguments.fig_name = get_Leak_name(q) + '_I' + the_data[0][0].static_device + '_' + str(the_data[0][0].static_device_current).replace('.0','') + '_VEAM_' + '%(v2)0.2f'%{"v2":eam_bias} + '.png'
#if q%2 == 0 and q < PWR_ERR: arguments.log_y = True
Plotting.plot_multiple_curves(hv_data, arguments)
del hv_data; del labels; del marks;
del the_data; del files;
else:
raise Exception
else:
raise EnvironmentError
except EnvironmentError:
print("Error: Leak_Analysis.Compare_Higher_Temperature()")
print("Cannot find",DATA_HOME)
except Exception:
print("Error: Leak_Analysis.Compare_Higher_Temperature()")
def Make_Leak_plots():
# call the functions needed to generate the plots for TIPS Exp 2
# R. Sheehan 30 - 8 - 2017
DATA_HOME = "C:/Users/Robert/Research/EU_TIPS/Data/Exp-2/Leak_SWP/"
try:
if os.path.isdir(DATA_HOME):
os.chdir(DATA_HOME)
#ii = 170
#SOA_Sweep_Plots(ii, True, True)
#run_Leak_sweep_plots_all(error_plots = False)
#static_device_name = 'DFB'
#static_device_current = 180
#eam_bias = 0
#quantity = DFB_ERR
#loud = True
#remove_zeroes = False
#Get_Error_Analysis_Data(static_device_name, static_device_current, eam_bias, quantity, remove_zeroes, loud)
#remove_zeroes = True
#Get_Error_Analysis_Data(static_device_name, static_device_current, eam_bias, quantity, remove_zeroes, loud)
#remove_zeroes = True; loud = False
#SOA_Sweep_Error_Plots(remove_zeroes, loud)
#DFB_Sweep_Error_Plots(remove_zeroes, loud)
static_device = 'DFB'
quantity = DFB_VAL
loud = False
#Compare_Higher_Bias(static_device, DFB_VAL, loud)
eam_bias = -0.5
Compare_Higher_Temperature(static_device, eam_bias, loud)
else:
raise EnvironmentError
except EnvironmentError:
print("Error: Leak_Analysis.Make_Leak_plots()")
print("Cannot find",DATA_HOME)
except Exception:
print("Error: Leak_Analysis.Make_Leak_plots()")
|
[
"robertnsheehan@gmail.com"
] |
robertnsheehan@gmail.com
|
a1d7168df36367a6a9de58b2eef43b6e2a6c0481
|
14a58f0c6d0bcfeeb308a8a8719d0e9e728ee48e
|
/tests/test_custom.py
|
13238835eb34053c78b53794b33cf1e7e3e11830
|
[
"MIT"
] |
permissive
|
wesselb/lab
|
262da5a30c1b3a78e576014d9b752aae52959774
|
275d041bdd47bbbad1fce5a10bbce0d7beceefdb
|
refs/heads/master
| 2023-06-08T11:04:03.523207
| 2023-05-27T10:15:07
| 2023-05-27T10:15:07
| 127,299,861
| 62
| 6
|
MIT
| 2023-09-01T09:53:02
| 2018-03-29T14:02:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,677
|
py
|
import jax
import jax.numpy as jnp
import lab as B
import numpy as np
import pytest
import tensorflow as tf
import torch
from autograd import grad
from fdm import check_sensitivity, gradient
from lab.custom import (
toeplitz_solve,
s_toeplitz_solve,
bvn_cdf,
s_bvn_cdf,
expm,
s_expm,
logm,
s_logm,
)
from lab.tensorflow.custom import as_tf
from lab.torch.custom import as_torch
from plum import isinstance
# noinspection PyUnresolvedReferences
from .util import approx, check_lazy_shapes, check_function, PSD
def test_as_tf(check_lazy_shapes):
assert isinstance(as_tf(B.randn()), B.TFNumeric)
assert isinstance(as_tf((B.randn(),))[0], B.TFNumeric)
def test_as_torch(check_lazy_shapes):
assert isinstance(as_torch(B.randn()), B.TorchNumeric)
assert isinstance(as_torch((B.randn(),))[0], B.TorchNumeric)
def check_grad(f, args, kw_args=None, rtol=1e-8):
"""Check the gradients of a function.
Args:
f (function): Function to check gradients of.
args (tuple): Arguments to check `f` at.
kw_args (tuple, optional): Keyword arguments to check `f` at. Defaults
to no keyword arguments.
rtol (float, optional): Relative tolerance. Defaults to `1e-8`.
"""
# Default to no keyword arguments.
if kw_args is None:
kw_args = {}
# Get the associated function in LAB.
lab_f = getattr(B, f.__name__)
def create_f_i(i, args_):
# Create a function that only varies the `i`th argument.
def f_i(x):
return B.mean(lab_f(*(args_[:i] + (x,) + args_[i + 1 :]), **kw_args))
return f_i
# Walk through the arguments.
for i in range(len(args)):
# Numerically compute gradient.
f_i = create_f_i(i, args)
numerical_grad = gradient(f_i)(args[i])
# Check AutoGrad gradient.
autograd_grad = grad(f_i)(args[i])
approx(numerical_grad, autograd_grad, rtol=rtol)
# Check TensorFlow gradient.
tf_args = tuple([as_tf(arg) for arg in args])
f_i = tf.function(create_f_i(i, tf_args), autograph=False)
with tf.GradientTape() as t:
t.watch(tf_args[i])
tf_grad = t.gradient(f_i(tf_args[i]), tf_args[i]).numpy()
approx(numerical_grad, tf_grad, rtol=rtol)
# Check PyTorch gradient.
torch_args = tuple([as_torch(arg, grad=False) for arg in args])
f_i = torch.jit.trace(create_f_i(i, torch_args), torch_args[i])
arg = torch_args[i].requires_grad_(True)
f_i(arg).backward()
approx(numerical_grad, arg.grad, rtol=rtol)
# Check JAX gradient.
jax_args = tuple([jnp.asarray(arg) for arg in args])
f_i = create_f_i(i, jax_args)
jax_grad = jax.jit(jax.grad(f_i))(jax_args[i])
approx(numerical_grad, jax_grad, rtol=rtol)
def test_toeplitz_solve(check_lazy_shapes):
check_sensitivity(
toeplitz_solve, s_toeplitz_solve, (B.randn(3), B.randn(2), B.randn(3))
)
check_sensitivity(
toeplitz_solve, s_toeplitz_solve, (B.randn(3), B.randn(2), B.randn(3, 4))
)
check_grad(toeplitz_solve, (B.randn(3), B.randn(2), B.randn(3)))
check_grad(toeplitz_solve, (B.randn(3), B.randn(2), B.randn(3, 4)))
def test_bvn_cdf(check_lazy_shapes):
check_sensitivity(bvn_cdf, s_bvn_cdf, (B.rand(3), B.rand(3), B.rand(3)))
check_grad(bvn_cdf, (B.rand(3), B.rand(3), B.rand(3)))
# Check that function runs on both `float32`s and `float64`s.
a, b, c = B.rand(3), B.rand(3), B.rand(3)
approx(
B.bvn_cdf(a, b, c),
B.bvn_cdf(B.cast(np.float32, a), B.cast(np.float32, b), B.cast(np.float32, c)),
)
# Check that, in JAX, the function check the shape of the inputs.
with pytest.raises(ValueError):
B.bvn_cdf(
B.rand(jnp.float32, 2), B.rand(jnp.float32, 3), B.rand(jnp.float32, 3)
)
with pytest.raises(ValueError):
B.bvn_cdf(
B.rand(jnp.float32, 3), B.rand(jnp.float32, 2), B.rand(jnp.float32, 3)
)
with pytest.raises(ValueError):
B.bvn_cdf(
B.rand(jnp.float32, 3), B.rand(jnp.float32, 3), B.rand(jnp.float32, 2)
)
def test_expm(check_lazy_shapes):
check_sensitivity(expm, s_expm, (B.randn(3, 3),))
check_grad(expm, (B.randn(3, 3),))
def test_logm_forward(check_lazy_shapes):
# This test can be removed once the gradient is implemented and the below test
# passes.
check_function(B.logm, (PSD(3),))
@pytest.mark.xfail
def test_logm(check_lazy_shapes):
mat = B.eye(3) + 0.1 * B.randn(3, 3)
check_sensitivity(logm, s_logm, (mat,))
check_grad(logm, (mat,))
|
[
"wessel.p.bruinsma@gmail.com"
] |
wessel.p.bruinsma@gmail.com
|
dec25661ceb49ad799ce51d961b2b3a7d6f6b199
|
757366e7b025066d04775436c7d3115dc1d95bd2
|
/modeller/factors/factor_percentage_consecutive_requests.py
|
6322c0f2a366687b4ec347b01236ebe361d34c77
|
[] |
no_license
|
ryanchin18/heimdall
|
1f0ae927dc3fa44074395df0c31f84c461e58f8b
|
053548da33cd3997101229e0c0a4cd652f60e46b
|
refs/heads/master
| 2021-01-23T12:38:00.120467
| 2016-09-11T04:37:24
| 2016-09-11T04:37:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
from modeller.factors import BaseFactor
class FactorPercentageConsecutiveRequests(BaseFactor):
"""
To further elucidate the requester's interaction with a given site we additionally
consider how many of the requests made were consecutive as another window onto
frequency.
"""
def __init__(self, session, session_graph, traffic_record):
BaseFactor.__init__(self, session, session_graph, traffic_record)
self._FACTOR_INDEX = 4
self._FACTOR_KEY = "PercentageConsecutiveRequests"
pass
def compute(self):
"""
Compute the Percentage Consecutive Requests
Variables Required:
* Total number of requests (TR) (Number of edges)
* Number of consecutive requests (CR)
Calculation:
Percentage Consecutive Requests (PCR) = (CR / TR) * 100
Possible Analysis:
If the calculated value is close to 100, that implies the
requester consecutively requested for a specific resource.
"""
tr = self._session_graph.graph.num_edges()
cr = self._session_graph.get_graph_property('consecutive_requests')
pcr = float(cr) / float(tr) * 100
self.append_graph_factor('float', pcr)
print "Percentage Consecutive Requests : ", pcr
pass
pass
|
[
"ivone.perera@gmail.com"
] |
ivone.perera@gmail.com
|
e0741c9d552f0336469f31757144187784c60621
|
16bf89ec6848c78f4aad3e87b5e386bb86f92815
|
/Mypython-code-master/class.py
|
c128c49809f24aef457bfe26cb8af46defc888df
|
[] |
no_license
|
95ankitmishra/Program-data_structure
|
bae865e72800ade5ed99feb516015820ac876805
|
fc2e9a09c5e382a718b44d9e1ee03cad3442f2f7
|
refs/heads/master
| 2020-06-17T22:18:02.785493
| 2019-07-09T21:00:53
| 2019-07-09T21:00:53
| 196,077,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
y=10
print("outside function",y)
def f1():
global y
y=5 ;
print("inside function",y)
f1()
print("outside function",y)
|
[
"noreply@github.com"
] |
95ankitmishra.noreply@github.com
|
c98cff74e222c45dd648fc96417716977a4dd826
|
24773154047043586c002d35195b2d5fb345fe02
|
/oktaduosync/__init__.py
|
173dc1db4cfd09c3496521ab045922bb4d0bb7f6
|
[] |
no_license
|
sargun/oktaduosync
|
2494fd84f7562da1995807874e1fc6eb69c31d61
|
1ea2408c3a5469e46fd49c3fd9ef2e106090f3b6
|
refs/heads/master
| 2018-12-29T22:03:49.851402
| 2014-11-20T00:54:39
| 2014-11-20T00:54:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
import logging
import duo, okta
logger = logging.getLogger(__name__)
class OktaDuoSync(object):
def __init__(self, config):
self.config = config
self.okta = okta.Okta(self.config['okta']['token'])
self.duo = duo.Duo(self.config['duo'])
def sync(self):
logger.info('Beginning user sync')
okta_group = self.get_okta_group()
okta_users = okta_group.users
duo_users = self.get_duo_users()
self.sync_users(okta_users, duo_users)
def sync_users(self, okta_users, duo_users):
okta_user_dict = {}
duo_user_dict = {}
for i in okta_users:
okta_user_dict[i.email] = i
for i in duo_users:
duo_user_dict[i.email] = i
#Users that exist in okta, that don't exist in duo
users_to_add = set(okta_user_dict.keys()) - set(duo_user_dict.keys())
#Users to delete
users_to_del = set(duo_user_dict.keys()) - set(okta_user_dict.keys())
for key in users_to_add:
self.add_user(okta_user_dict[key])
for key in users_to_del:
self.del_user(duo_user_dict[key])
def del_user(self, duo_user):
logger.info('Deleting user from Duo : %s', duo_user)
self.duo.duo.delete_user(duo_user.user_id)
logger.info('Success')
def add_user(self, okta_user):
logger.info('Adding user to Duo: %s', okta_user)
user = self.duo.duo.add_user(okta_user.prefix,
realname = okta_user.realname,
email = okta_user.email)
params = {'username': user['username'], 'email': user['email']}
self.duo.duo.json_api_call('POST',
'/admin/v1/users/enroll',
params)
logger.info('Success')
def get_duo_users(self):
return self.duo.get_users()
def get_okta_group(self):
sync_group = self.config['okta']['group']
groups = self.okta.get_group(q = sync_group)
groups = filter((lambda x: x.name == sync_group), groups)
if len(groups) == 0:
raise Exception('Group: {0} for synchronization not found'.format(sync_group))
elif len(groups) > 1:
raise Exception('Too many synchronization groups found')
return groups[0]
|
[
"sargun@sargun.me"
] |
sargun@sargun.me
|
e92b8e4b15c288eff358cd8ca006e1290b4b5e34
|
36b04c5d9ae181a780d44764dd85bcc784e72101
|
/cifar10_resnet/ensemble/saved_models/load_model.py
|
5334b291c6071e015285bbce9ab03b85e6023c65
|
[] |
no_license
|
chengning-zhang/Ensemble-Method-in-Image-Classification
|
9783b6b30f3e174fad1504a44cca57093f8c8730
|
a7c4176334f8e7701fe9cae77fc31b3b6ed0704d
|
refs/heads/master
| 2022-12-30T09:07:30.546966
| 2020-10-21T20:03:51
| 2020-10-21T20:03:51
| 112,964,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
from keras.models import load_model
from resnet import *
if __name__ == "__main__":
# Training parameters
batch_size = 64
epochs = 2
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
n = 3
# Model version
# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)
version = 1
substract_pixel_mean = True
(x_train, y_train), (x_test, y_test), input_shape = prepare_data_for_resnet(substract_pixel_mean)
filename = "cifar10_resnet_model.01.h5"
model = load_model(filename)
|
[
"zhang623@wisc.edu"
] |
zhang623@wisc.edu
|
7f4727eec13919575df59195f7ce6f62cde82742
|
1973098bfe8f7251702ba7b9b922152ee50a3f40
|
/mysite/mysite/settings.py
|
c6e9f7c06409ba4ddc23d259c89f0429d9bc9ecf
|
[] |
no_license
|
eunsong8784/my-first-blog
|
3e0dd043bfc58b9534b6d67f9e450ab2c821c8c1
|
3af4621d719d57e3ca52279efa111dbfeb348792
|
refs/heads/master
| 2023-04-08T20:32:52.516410
| 2021-04-22T07:20:47
| 2021-04-22T07:20:47
| 359,710,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,433
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-bz%*fccij7l1b+#q1$dnl^=(j&3k&2wrc$ppn^e2sf(!8-m@^g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'blog.apps.BlogConfig',
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': str(BASE_DIR / 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'ko'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [ BASE_DIR / "static", ]
STATIC_ROOT = str(BASE_DIR / 'db.sqlite3')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"sh1997@snu.ac.kr"
] |
sh1997@snu.ac.kr
|
3fabae89d8e5b8b4ba507cdc9d0fcfd6a3198290
|
a6d966f362b5add0faabc0aaab1b6b6464307338
|
/ResNet/resnet.py
|
8298b08e18b8877e6b9f027b68f4022da0dabe03
|
[] |
no_license
|
jusepv/paper_implementation
|
772ed1d6e8353bd72c3abc3a0d13aee298dffe64
|
edd9b686866bde71be77786bff2f2e11f66603a2
|
refs/heads/main
| 2023-03-29T18:01:25.825261
| 2021-03-26T11:00:52
| 2021-03-26T11:00:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,495
|
py
|
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import argparse
import numpy as np
from . import block
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):
super(ResNet, self).__init__()
self.inplanes = 64
#inputs = 64x224x224
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
#outputs.shape = 64x112x112
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
#inputs = 64x112x112
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
#outputs = 64x56x56
self.layer1 = self._make_layer(block, 64, layer[0])
self.layer2 = self._make_layer(block, 128, layer[1], stride=2)
self.layer3 = self._make_layer(block, 256, layer[2], stride=2)
self.layer4 = self._make_layer(block, 512, layer[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(512*block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, BottleNeck):
nn.init.constant_(m.bn3.weight,0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
#self._make_layer(bottleneck, 64,3)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride), #conv1x1(64,256,1)
nn.BatchNorm2d(planes * block.expansion) #batchnorm2d(256)
) #downsample -> channel을 맞추기 위해
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
#for _ in range(1, 3):
layers.append(block(self.inplanes, planes)) #2번
return nn.Sequential(*layers)
#self.layer1 = [
# Bottleneck(64,64,1,downsample)
# Bottleneck(256,64)
#Bottlenect(256,64) ]
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [2,2,2,2], **kwargs) # 2*(2+2+2+2) + 1(conv1) + 1(fc) = 16+ 2
return model
def resnet50(pretrained=False, **kwargs):
model = ResNet(BottleNeck, [3,4,6,3], **kwargs) # 3 * (3+4+6+3) + 1(conv1) + 1(fc) = 50
return model
def resnet152(pretrained=False, **kwargs):
model ResNet(BottleNeck, [3,8,36,3], **kwargs) # 3 * (3+8+36+3) + 2 = 152
|
[
"jinju4948@naver.com"
] |
jinju4948@naver.com
|
290390532c65f675cb7a528a1cbd51f63a642701
|
a0c50f2da1fb49dc69337dbc059e0ed6d0a49ef7
|
/server/dataserver.py
|
ec818d7e0d72ce884959a40bab1c801583439fbf
|
[
"MIT"
] |
permissive
|
adminlove520/product-recommend
|
4d4cf87ace9261bfec741a60f59f3c3fb61d5ad2
|
4ac0c855c960ce7f9b8dbc316f184a3810297a84
|
refs/heads/master
| 2022-04-08T12:41:38.852850
| 2020-03-13T10:49:24
| 2020-03-13T10:49:24
| 256,377,399
| 1
| 0
|
MIT
| 2020-04-17T02:08:16
| 2020-04-17T02:08:16
| null |
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
from aliyun.log import LogClient
from aliyun.log import GetLogsRequest
import pymysql
class ossdataserver:
def __init__(self,endpoint,accessKeyId,accessKey,basename,tablename):
self.endpoint = endpoint #http://oss-cn-hangzhou.aliyuncs.com
# 用户访问秘钥对中的 AccessKeyId。
self.accessKeyId = accessKeyId
# 用户访问秘钥对中的 AccessKeySecret。
self.accessKey = accessKey
self.basename = basename
self.tablename = tablename
self.client = LogClient(self.endpoint, self.accessKeyId, self.accessKey)
def close(self):
#self.client.shutdown()
pass
# def database(self,basename,tablename):
# self.basename=basename
# self.tablename=tablename
class mysqlserver:
def __init__(self,host,port,user,passwd,db):
self.host=host
self.port=port
self.user=user
self.passwd=passwd
self.db=db
self.conn = pymysql.connect(self.host,port=self.port,user=self.user,passwd=self.passwd,db=self.db)
def close(self):
self.conn.close()
|
[
"2282420654@qq.com"
] |
2282420654@qq.com
|
41af35eaeb4c92034c8a2580cbad30a47d7bb7ec
|
689fcced10cc920c263e4d85bed5a51f85c76abb
|
/aragwas_server/gwasdb/migrations/0001_initial.py
|
4899285d5b7e268d3c50007dfc43290786692c94
|
[
"MIT"
] |
permissive
|
1001genomes/AraGWAS
|
ddb10ea3e476c8cee31e75f9db6dc2bd79f7f487
|
be02c0480bf18228b07853740e63f249fe31d7e5
|
refs/heads/master
| 2022-12-29T00:12:59.936918
| 2020-08-31T16:32:04
| 2020-08-31T16:32:04
| 82,693,787
| 13
| 9
|
MIT
| 2022-12-06T20:20:08
| 2017-02-21T15:11:31
|
Vue
|
UTF-8
|
Python
| false
| false
| 3,971
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11b1 on 2017-02-27 15:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Association',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('maf', models.FloatField()),
('pvalue', models.FloatField()),
('beta', models.FloatField(blank=True, null=True)),
('odds_ratio', models.FloatField(blank=True, null=True)),
('confidence_interval', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Gene',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('chromosome', models.IntegerField()),
('start_position', models.IntegerField()),
('end_position', models.IntegerField()),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Genotype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('version', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Phenotype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('date', models.DateTimeField(blank=True, null=True)),
('arapheno_link', models.URLField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='SNP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chromosome', models.IntegerField()),
('position', models.IntegerField()),
('annotation', models.CharField(max_length=255)),
('gene', models.ManyToManyField(to='gwasdb.Gene')),
('genotype', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gwasdb.Genotype')),
],
),
migrations.CreateModel(
name='Study',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('transformation', models.CharField(max_length=255)),
('method', models.CharField(max_length=255)),
('publications', models.URLField(blank=True, null=True)),
('easygwas_link', models.URLField(blank=True, null=True)),
('genotype', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gwasdb.Genotype')),
],
),
migrations.AddField(
model_name='association',
name='snp',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gwasdb.SNP'),
),
migrations.AddField(
model_name='association',
name='study',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gwasdb.Study'),
),
]
|
[
"timeu@users.noreply.github.com"
] |
timeu@users.noreply.github.com
|
bab3187381fd4b1c1d2fdf5633a0a4cd53bd72b3
|
ad2a685b28cd62b4ecb807f5eef1bd048d2c1821
|
/test/model/test_demo.py
|
5b0620da882de1a2e46bdd015438f614d732a798
|
[] |
no_license
|
InternetDataMiningLaboratory/qianfu
|
1657e5f721d74767bbef7ac84e9f54ba3ff5d762
|
9b4e653cfa7b1da98e8ffb5c76a0496dc75c6ef0
|
refs/heads/master
| 2021-01-10T17:55:20.147478
| 2016-03-22T09:34:54
| 2016-03-22T09:34:54
| 52,708,996
| 0
| 2
| null | 2016-03-22T09:34:54
| 2016-02-28T06:54:33
|
Python
|
UTF-8
|
Python
| false
| false
| 651
|
py
|
# -*- coding: utf-8 -*-
#
# Author: jimin.huang
#
'''
The test of ``DemoObject``
'''
from test.test_database import TestModel
from nose.tools import assert_equal
class TestDemoObject(TestModel):
'''
The test class of ``DemoObject`` which inherits from ``TestModel``
'''
def test_get_object_by_id(self):
'''
The test of ``DemoObject.get_object_by_id``
'''
from model.demo_action import DemoObject
# Mock the return value of ``connection.get``
self.mock_db.get.return_value = 'test'
# Assert the result
assert_equal(DemoObject.get_object_by_id(1), 'test')
|
[
"windworship2@163.com"
] |
windworship2@163.com
|
15bbb9337422a280ee086239d4255805318811aa
|
fc6bc070063c19298d0406d6d850345be6c373e8
|
/source.py
|
462841c4fa6cd44b1baca93dbf17408efa6be0c2
|
[] |
no_license
|
malarjeyanthi/python-programming
|
f46808aa27f72d1cd59ee636cf5cfc0389395f8b
|
61c2bb227121eda9842e0c5b0320beb47c88ef1f
|
refs/heads/master
| 2020-07-12T00:27:26.162568
| 2019-12-09T18:27:31
| 2019-12-09T18:27:31
| 204,675,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
print("user details")
a=input("enter name:")
b=int(input("enter phone:"))
source=int(input("enter the source:"))
destination=int(input("enter the destination"))
if source<0 or destination<0:
print("give valid number")
exit()
elif source==0 or destination==0:
print("source and destination are not valid")
exit()
else:
dis=abs(destination-source)
print("1.car\n 2.bike\n 3.bus\n")
select=input()
if select=="1.car":
x=dis*4
print(x)
elif select=="2.bike":
x=dis*3
print(x)
elif select=="3.bus":
x=dis*4
print(x)
else:
print("invalid")
print(a,'\n',b,'\n',source,'\n',des,'\n',dis,'\n',x)
|
[
"noreply@github.com"
] |
malarjeyanthi.noreply@github.com
|
2d73c2e24886dc741ce1d0a7e7c2efb1f6f6cda2
|
0e383ccac5fdf21dc5059502b9aae26412fd6a88
|
/shared_lib/readers.py
|
3ce6f7460240524ae740f15057482645e5bbafea
|
[
"MIT"
] |
permissive
|
jimsrc/seatos
|
63c8ad99f2b5d4ae5f203cdc8f8e061948f257f4
|
e775dba1a2a96ff44b837cf8d85101ccfef302b1
|
refs/heads/master
| 2021-01-02T08:38:51.349670
| 2017-09-01T01:59:35
| 2017-09-01T01:59:35
| 99,040,968
| 0
| 1
| null | 2017-09-01T01:59:36
| 2017-08-01T20:33:55
|
Python
|
UTF-8
|
Python
| false
| false
| 23,271
|
py
|
#!/usr/bin/env ipython
# -*- coding: utf-8 -*-
from scipy.io.netcdf import netcdf_file
import numpy as np
from numpy.linalg import norm
from datetime import datetime, timedelta
from h5py import File as h5
import os, sys, h5py, argparse
#--- shared libs
from shared.ShiftTimes import ShiftCorrection, ShiftDts
import shared.console_colors as ccl
from shared.shared_funcs import nans, My2DArray, selecc_window_ii
import shared.shared_funcs as sf
#+++++++++++++++++++++++++++++++++++++
#---- auxiliary functions for the
#---- data-handlers below
#+++++++++++++++++++++++++++++++++++++
def calc_beta(Temp, Pcc, B):
"""
Agarramos la definicion de OMNI, de:
http://omniweb.gsfc.nasa.gov/ftpbrowser/magnetopause/Reference.html
http://pamela.roma2.infn.it/index.php
Beta = [(4.16*10**-5 * Tp) + 5.34] * Np/B**2 (B in nT)
"""
beta = ((4.16*10**-5 * Temp) + 5.34) * Pcc/B**2
return beta
def dates_from_omni(t):
time = []
n = len(t)
for i in range(n):
yyyy = t[i][0]
mm = t[i][1]
dd = t[i][2]
HH = t[i][3]
MM = t[i][4]
SS = t[i][5]
uSS = t[i][6]
time += [datetime(yyyy, mm, dd, HH, MM, SS, uSS)]
return time
def date_to_utc(fecha):
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
sec_utc = (fecha - utc).total_seconds()
return sec_utc
def utc_from_omni(file):
t = np.array(file.variables['time'].data)
dates = dates_from_omni(t)
n = len(dates)
time = np.zeros(n)
for i in range(n):
time[i] = date_to_utc(dates[i])
return time
def read_hsts_data(fname, typic, ch_Eds):
"""
code adapted from ...ch_Eds_smoo2.py
"""
f = h5(fname, 'r')
# initial date
datestr = f['date_ini'].value
yyyy, mm, dd = map(int, datestr.split('-'))
INI_DATE = datetime(yyyy, mm, dd)
# final date
datestr = f['date_end'].value
yyyy, mm, dd = map(int, datestr.split('-'))
END_DATE = datetime(yyyy, mm, dd)
date = INI_DATE
tt, rr = [], []
ntot, nt = 0, 0
while date < END_DATE:
yyyy, mm, dd = date.year, date.month, date.day
path = '%04d/%02d/%02d' % (yyyy, mm, dd)
try:
dummy = f[path] # test if this exists!
except:
date += timedelta(days=1) # next day...
continue
ntanks = f['%s/tanks'%path][...]
cc = ntanks>150.
ncc = cc.nonzero()[0].size
if ncc>1: #mas de un dato tiene >150 tanques
time = f['%s/t_utc'%path][...] # utc secs
cts, typ = np.zeros(96, dtype=np.float64), 0.0
for i in ch_Eds:
Ed = i*20.+10.
cts += f['%s/cts_temp-corr_%04dMeV'%(path,Ed)][...]
typ += typic[i] # escalar
cts_norm = cts/typ
#aux = np.nanmean(cts_norm[cc])
tt += [ time[cc] ]
rr += [ cts_norm[cc] ]
ntot += 1 # files read ok
nt += ncc # total nmbr ok elements
date += timedelta(days=1) # next day...
#--- converting tt, rr to 1D-numpy.arrays
t, r = nans(nt), nans(nt)
ini, end = 0, 0
for i in range(ntot):
ni = len(tt[i])
t[ini:ini+ni] = tt[i]
r[ini:ini+ni] = rr[i]
ini += ni
f.close()
return t, r
class _read_auger_scals(object):
"""
reads different versions of corrected-scalers
"""
def __init__(self, fname_inp, data_name):
self.fname_inp = fname_inp
self.data_name = data_name
def read(self):
with h5py.File(self.fname_inp,'r') as f:
if 'auger' in f.keys():
return self.read_i()
elif 't_utc' in f.keys():
return self.read_ii()
else:
raise SystemExit('\
---> no reader setup for this version scaler file!\
')
def read_i(self):
"""
read first version of processed
corrected-scalers.
"""
f5 = h5py.File(self.fname_inp, 'r')
t_utc = f5['auger/time_seg_utc'][...].copy() #data_murdo[:,0]
CRs = f5['auger/sc_wAoP_wPres'][...].copy() #data_murdo[:,1]
print " -------> variables leidas!"
VARS = {
'CRs.'+self.data_name : {
'value' : CRs,
'lims' : [-1.0, 1.0],
'label' : 'Auger Scaler rate [%]',
},
}
return t_utc, VARS
def _pair_yyyymm(self, f, kname):
years = map(int, f[kname].keys())
ly, lm = [], []
for year in years:
months = map(int, f[kname+'/%04d'%year].keys())
nm = len(months)
ly += [year]*nm
lm += months
return zip(ly,lm)
def read_ii(self):
"""
read 2nd version of processed correctd-scalers.
We do NOT read the geop-height-corrected scalers, because
seems unphysical (i.e. geop height is not a parameter
for scalers correction!). So just use pressure-corrected ones.
"""
f = h5py.File(self.fname_inp,'r')
years_and_months = self._pair_yyyymm(f, 't_utc')
t_utc = My2DArray((3,), dtype=np.float32)
CRs = My2DArray((3,), dtype=np.float32)
n = 0
for yyyy, mm in years_and_months:
nt = f['t_utc/%04d/%02d'%(yyyy,mm)].size
t_utc[n:n+nt] = f['t_utc/%04d/%02d'%(yyyy,mm)][...]
CRs[n:n+nt] = f['wAoP_wPrs/%04d/%02d'%(yyyy,mm)][...]
n += nt
print " --> Auger scalers leidos!"
VARS = {
'CRs.'+self.data_name : {
'value' : CRs[:n],
'lims' : [-1.0, 1.0],
'label' : 'Auger Scaler rate [%]',
},
}
return t_utc[:n], VARS
def get_all_bartels():
dates = {}
ok2read = False
i = 0
for line in open('./bartels.txt','r').readlines():
if line in ('','\n'): continue
if line.startswith('Post L1 Insertion'): # cut here
ok2read = True
continue
if line.startswith(' *-Seconds'):
ok2read = False
continue
if ok2read:
#print line.split()
mm,dd,yyyy = map(int,line.split()[1].split('/'))
dates[i] = {
'bartel' : int(line.split()[0]), # Bartels rotation number
'date' : datetime(yyyy, mm, dd),
'ACEepoch' : float(line.split()[4]),
}
#print yyyy,mm,dd, dates[i]['ACEepoch']
i += 1
return dates
def deduce_fnms(bartels, ini, end, subdir=''):
fnms = []
n = len(bartels)
for i in range(n-1):
date = bartels[i]['date']
date_next = bartels[i+1]['date']
if date_next>=ini: #and date<end:
bart = bartels[i]['bartel'] # bartel rotation number
fnms += [subdir+'/mag_data_1sec_{bart}.hdf'.format(**locals())]
if date_next>end:
break ## FINISHED!
return fnms
def calc_rmsB(t_inp, B, width=3600., fgap=0.2, res_o=60):
"""
* t
time in seconds (be UTC-sec, GPS-sec, ACEepoch-sec, etc,
doesn't matter).
* B
vector such that, Bx=B[:,0], By=B[:,1], and Bz=B[:,2].
* width:
time size in seconds, of the width on which
we'll calculate the rmsB.
* fgap:
fraction of gaps that we'll tolerate.
* res_o:
output time resolution. Note that processing 1sec data
one by one, y VERY expensive; so an alternative approach
that we are using here, is to process one data point
every 60 points (i.e. with 1min cadence). NOTE: the
`width` must be INTEGER!!
"""
# to convert numpy warnings to errors
#np.seterr(all='raise')
t = t_inp.copy() # don't touch input data!
c1 = t<t[0] + 0.5*width
c2 = t>t[-1] - 0.5*width
# initial/final indexes on which we'll work
ini, end = c1.nonzero()[0][-1], c2.nonzero()[0][0]
# index list
t_indexes = np.arange(ini+1, end, res_o)
# outputs
rmsB = np.zeros(t_indexes.size, dtype=B.dtype)
rmsB_para = np.zeros(t_indexes.size, dtype=B.dtype)
rmsB_perp = np.zeros(t_indexes.size, dtype=B.dtype)
tnew = np.zeros(t_indexes.size, dtype=np.float64)
# half the size of width in number of index units
w2 = int(0.5*width)
for i, i_ in zip(t_indexes, range(t_indexes.size)):
tnew[i_] = t[i]
ts_ = slice(i-w2,i+w2+1) # time slice
ccg = ~np.isnan(B[ts_,0]) # False for gap values
# time indexes having good data, in our `ts_` window
ti = ts_.start + ccg.nonzero()[0] # {numpy.array} one-dimensional
# too many gaps
if (~ccg).nonzero()[0].size > (fgap*2*w2):
rmsB[i_] = np.nan
continue
#NOTE: a.std() is equivalent to np.sqrt(((a - a.mean())**2).sum()/a.size)
Bo = np.mean(B[ti,:], axis=0) # local Bo in the window `width`
dB = B[ti,:] - Bo # deviation of `B` from `Bo`
# parallel component of `dB` on `Bo`
dB_para = np.dot(dB, Bo/norm(Bo))
# perp component is `dB` minus the parallel part
"""
NOTE: np.outer() is the "outer product" of two vectors, so that
dB_para[0]*Bo/norm(Bo) is the parallel component of `dB` in
vector form (recall that `Bo` has a (3,) shape).
Then:
>>> dB[j,:] - np.outer(dB_para, Bo/norm(Bo))[j,:]
is the perpendicular component of `dB` for the time
index `j`.
"""
# rmsB
dB_perp = dB - np.outer(dB_para, Bo/norm(Bo))
ms = (np.square(dB)).sum()
ms /= 1.*ti.size
rmsB[i_] = np.sqrt(ms)
# rmsB (parallel)
ms = np.square(dB_para).sum()/(1.*ti.size)
rmsB_para[i_] = np.sqrt(ms)
# rmsB (perpendicular)
ms = np.square(dB_perp).sum()/(1.*ti.size)
rmsB_perp[i_] = np.sqrt(ms)
return tnew, rmsB, rmsB_para, rmsB_perp
#+++++++++++++++++++++++++++++++++++++
#----------- data handlers -----------
#+++++++++++++++++++++++++++++++++++++
class _data_ACE(object):
"""
to read the .nc file of ACE data, built from ASCII versions
"""
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
f_sc = netcdf_file(self.fname_inp, 'r')
print " leyendo tiempo..."
t_utc = utc_from_omni(f_sc)
print " Ready."
tb = kws['tb'] # datetimes of borders of all structures
bd = kws['bd'] # borders of the structures we will use
#+++++++++++++++++++++++++++++++++++++++++++
B = f_sc.variables['Bmag'].data.copy()
Vsw = f_sc.variables['Vp'].data.copy()
Temp = f_sc.variables['Tp'].data.copy()
Pcc = f_sc.variables['Np'].data.copy()
rmsB = f_sc.variables['dBrms'].data.copy()
alphar = f_sc.variables['Alpha_ratio'].data.copy()
beta = calc_beta(Temp, Pcc, B)
rmsBoB = rmsB/B
print " -------> variables leidas!"
#------------------------------------ VARIABLES
VARS = {}
# variable, nombre archivo, limite vertical, ylabel
VARS['B.'+data_name] = {
'value' : B,
'lims' : [5., 18.],
'label' : 'B [nT]'
}
VARS['V.'+data_name] = {
'value' : Vsw,
'lims' : [300., 650.],
'label' : 'Vsw [km/s]'
}
VARS['rmsBoB.'+data_name] = {
'value' : rmsBoB,
'lims' : [0.01, 0.2],
'label' : 'rms($\hat B$/|B|) [1]'
}
VARS['rmsB.'+data_name] = {
'value' : rmsB,
'lims' : [0.05, 2.0],
'label' : 'rms($\hat B$) [nT]'
}
VARS['beta.'+data_name] = {
'value' : beta,
'lims' : [0.001, 5.],
'label' : '$\\beta$ [1]'
}
VARS['Pcc.'+data_name] = {
'value' : Pcc,
'lims' : [2, 17.],
'label' : 'proton density [#/cc]'
}
VARS['Temp.'+data_name] = {
'value' : Temp,
'lims' : [1e4, 4e5],
'label' : 'Temp [K]'
}
VARS['AlphaRatio.'+data_name] = {
'value' : alphar,
'lims' : [1e-3, 0.1],
'label' : 'alpha ratio [1]'
}
#self.nvars = len(VARS.keys())
#---------
#self.aux = aux = {}
#aux['SELECC'] = self.SELECC
"""
NOTE: `bd` and `tb` have been shifted if
`self.FITLER['CorrShift']`==True in the
events_mgr() class.
"""
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
class _data_Auger_BandMuons(object):
"""
for muon band of Auger charge histograms
"""
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
"""
para leer la data de histogramas Auger
"""
f5 = h5(self.fname_inp, 'r')
ch_Eds = (10, 11, 12, 13)
# get the global-average histogram
nEd = 50
typic = np.zeros(nEd, dtype=np.float32)
for i in range(nEd):
Ed = i*20.+10.
typic[i] = f5['mean/corr_%04dMeV'%Ed].value
t_utc, CRs = read_hsts_data(self.fname_inp, typic, ch_Eds)
print " -------> variables leidas!"
VARS = {} #[]
VARS['CRs.'+data_name] = {
'value' : CRs,
'lims' : [-1.0, 1.0],
'label' : 'Auger (muon band) [%]'
}
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
class _data_Auger_BandScals(object):
"""
for muon band of Auger charge histograms
"""
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
"""
para leer la data de histogramas Auger
"""
f5 = h5(self.fname_inp, 'r')
ch_Eds = (3, 4, 5)
# get the global-average histogram
nEd = 50
typic = np.zeros(nEd, dtype=np.float32)
for i in range(nEd):
Ed = i*20.+10.
typic[i] = f5['mean/corr_%04dMeV'%Ed].value
t_utc, CRs = read_hsts_data(self.fname_inp, typic, ch_Eds)
print " -------> variables leidas!"
VARS = {} #[]
VARS['CRs.'+data_name] = {
'value' : CRs,
'lims' : [-1.0, 1.0],
'label' : 'Auger (muon band) [%]'
}
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
class _data_ACE_o7o6(object):
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
tb = self.tb
nBin = self.nBin
bd = self.bd
day = 86400.
self.f_sc = netcdf_file(self.fname_inp, 'r')
print " leyendo tiempo..."
t_utc = utc_from_omni(self.f_sc)
print " Ready."
#++++++++++++++++++++++++++++++++++++++++++++++++
o7o6 = self.f_sc.variables['O7toO6'].data.copy()
print " -------> variables leidas!"
#----------------------- VARIABLES
self.t_utc = t_utc
self.VARS = VARS = {}
# variable, nombre archivo, limite vertical, ylabel
VARS['o7o6'] = {
'value' : o7o6,
'lims' : [0.0, 1.5],
'label' : 'O7/O6 [1]'
}
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
class _data_Auger_scals(object):
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
"""
solo cargamos Auger Scalers
"""
opt = {
'fname_inp' : self.fname_inp,
'data_name' : data_name,
}
"""
the class `_read_auger_scals` reads both versions of
scalers (old & new).
"""
sc = _read_auger_scals(**opt)
t_utc, VARS = sc.read()
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
class _data_McMurdo(object):
def __init__(self, **kws):
self.fname_inp = kws['input']
def load(self, data_name, **kws):
fname_inp = self.fname_inp
data_murdo = np.loadtxt(fname_inp)
t_utc = t_utc = data_murdo[:,0]
CRs = data_murdo[:,1]
print " -------> variables leidas!"
VARS = {}
VARS['CRs.'+data_name] = {
'value' : CRs,
'lims' : [-8.0, 1.0],
'label' : 'mcmurdo rate [%]'
}
return {
't_utc' : t_utc,
'VARS' : VARS,
}
def grab_block(self, vname=None, **kws):
return selecc_window_ii(**kws)
#--- reader for ACE 1seg MAG data
class _data_ACE1sec(object):
"""
the parameters below are for the processing of deduced
observables, such as "rmsB".
They are used in `self.grab_block()`.
"""
width = 3600. # [sec] time width of rmsB-calculation
fgap = 0.2 # [1] gap-fraction to tolerate
res_o = 60 # [sec] output resolution
def __init__(self, **kws):
self.dir_inp = kws['input']
self.now = None
#@profile
def load(self, **kws):
import cython_wrapper
self.cw = cython_wrapper
# contains: bartels rotation numbers, ACEepochs, adn datetimes.
self.bartels = get_all_bartels() # {dict}
self.nbartels = len(self.bartels)
self.dname = dname = kws['data_name']
VARS = {}
"""
the keys if `VARS` will be used to iterate on the
possible values of `vname` in `self.grab_block()`.
"""
VARS['Bmag.'+dname] = {
'value' : None,
'lims' : [5., 18.],
'label' : 'B [nT]',
}
VARS['rmsB.'+dname] = {
'value' : None,
'lims' : [0.5, 11.],
'label' : 'rms($\hat B$) [nT]',
}
VARS['rmsB_ratio.'+dname] = {
'value' : None,
'lims' : [0.5, 50.],
'label' : '$\delta B^2_{{\perp}} / \delta B^2_{{\parallel}}$'+\
' ($\Delta t:$ {dt:2.1f} hr)'.format(dt=self.width/3600.),
}
return {
# this is the period for available data in our input directory
#'t_utc' : [883180800, 1468713600], # [utc sec]
't_utc' : [sf.date2utc(self.bartels[0]['date']),
sf.date2utc(self.bartels[self.nbartels-1]['date'])], # [utc sec]
'VARS' : VARS,
}
#@profile
def grab_block(self, vname=None, **kws):
# alias
OneDay = timedelta(days=1) # {timedelta}
# time extent of queried data, in terms of the
# size of the structure
nbef, naft = kws['nwndw']
# range of requested data
tini = kws['tini'] - nbef*OneDay # {datetime}
tend = kws['tend'] + naft*OneDay # {datetime}
# if the bounds of the events are out of the
# boundaries of the available data, return error
assert self.bartels[0]['date']<=tini and \
self.bartels[self.nbartels-1]['date']>=tend,\
"""
[-] ERROR:
# no data for this `vname` in
# this time window!!
--- window of available data:
ini: {d_ini}
end: {d_end}
--- window of requested data:
ini: {r_ini}
end: {r_end}
""".format(
r_ini = tini,
r_end = tend,
d_ini = self.bartels[0]['date'],
d_end = self.bartels[self.nbartels-1]['date'],
)
# -- deduce fnm_ls
subdir = '/media/hdd_extern_hegea/data_ace/mag_data_1sec'.format(**os.environ)
fnm_ls = deduce_fnms(self.bartels, tini, tend, subdir)
for fnm in fnm_ls:
print fnm
assert os.path.isfile(fnm)
# -- deduce ace_ini, ace_end
ace_ini = sf.date2ACEepoch(tini)
ace_end = sf.date2ACEepoch(tend)
m = self.cw.mag_l2(fnm_ls) # cython function
m.indexes_for_period(ace_ini, ace_end)
#NOTE: make `copy()` to avoid memory overlapping? (maybe
# some weird numpy implementation)
t_ace = m.return_var('ACEepoch').copy() # [ACE epoch seconds]
varname = vname.replace('.'+self.dname,'') # remove '.ACE1sec'
if varname.startswith('rmsB') and self.now!=(tini,tend):
"""
only do the rms calculation if we didn't
for this period (tini,tend) already!
"""
# deduced quantity
Bx = m.return_var('Bgse_x').copy()
By = m.return_var('Bgse_y').copy()
Bz = m.return_var('Bgse_z').copy()
cc = Bx<-900. # True for gaps
# fill gaps with NaNs
Bx[cc], By[cc], Bz[cc] = np.nan, np.nan, np.nan
self.t_out, self.rmsB, self.rmsB_para, self.rmsB_perp = calc_rmsB(
t_inp = t_ace,
B = np.array([Bx,By,Bz]).T,
width = self.width,
fgap = self.fgap,
res_o = self.res_o,
)
"""
NOTE: `t_out` is supposed to have a time resolution
of `res_o`. This can be tested by printing:
>>> print np.unique(t_out[1:]-t_out[:-1])
"""
# to avoid doing the calculation for the
# next rms quantity, in this same period (tini,tend).
self.now = (tini, tend)
if varname=='rmsB':
t_out = self.t_out
var = self.rmsB
elif varname=='rmsB_ratio':
t_out = self.t_out
var = np.square(self.rmsB_perp/self.rmsB_para)
else:
var = m.return_var(varname).copy()
t_out = t_ace
#assert len(var)!=1 and var!=-1, ' ## wrong varname!'
if type(var)==int:
assert var!=-1, " ## error: wrong varname "
cc = var<-100.
var[cc] = np.nan # put NaN in flags
t_utc = t_out + 820454400.0 # [utc sec] ACEepoch -> UTC-sec
kws.pop('data') # because its 'data' does not make sense here, and
# therefore we can replace it below.
return selecc_window_ii(
data=[t_utc, var],
**kws
)
#+++++++++++++++++++++++++++++++++++++
#------------ testing --------------
#+++++++++++++++++++++++++++++++++++++
def main():
ini, end = datetime(2005,1,1), datetime(2005,6,1)
bartels = get_all_bartels()
if __name__=='__main__':
main()
#EOF
|
[
"jimmy.ilws@gmail.com"
] |
jimmy.ilws@gmail.com
|
5155217b001f024d04577a620ef84c24673c7f6a
|
aac55459f0aa63942c7304a5324f3b7f6fe9874e
|
/removefiles.py
|
60c26887603ea894bfbc9dac33148062e5800804
|
[] |
no_license
|
RkTm100/Remove-files-project
|
d52b78193a95240cdf8e9a8dbcb631dd45796228
|
de8d23bb6f3e1a2c5e9a81b9f7f1b5793f553196
|
refs/heads/main
| 2023-06-23T02:11:27.466118
| 2021-07-15T01:56:09
| 2021-07-15T01:56:09
| 386,130,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
import os
import shutil
import time
def main():
deleted_files=0
deleted_folders=0
path="C:\Users\asish\OneDrive\Desktop\backup files for class\backfiles removing folder testing"
days=30
seconds=time.time(-days*24*60*60)
if os.path.exists(path):
for root_folder, folders, files in os.walk(path):
if seconds >= get_file_or_folder_age(root_folder):
remove_folder(root_folder)
deleted_folders+=1
break
else:
for folder in folders:
folder_path=os.path.join(root_folder,folder)
if seconds >= get_file_or_folder_age(folder_path):
remove_folder(folder_path)
deleted_folders+=1
for file in files:
file_path=os.path.join(root_folder,file)
if seconds >= get_file_or_folder_age(file_path):
remove_file(file_path)
deleted_files+=1
else:
print("there is no files or folders")
print(f"total folders deleted: + {deleted_folders}")
print(f"total files deleted: + {deleted_files}")
main()
|
[
"noreply@github.com"
] |
RkTm100.noreply@github.com
|
46465500fcd64b9c80c677b23f3c2d5ec50ef1f0
|
b7e0ea14903ac611413e490c741f5b7e4ffb29df
|
/MySQL数据库命令.py
|
d4c6c66dc2a7bd74f2e64fba37b1caaab13b8d63
|
[] |
no_license
|
pointworld/python
|
c729d6fc029edf28a3b331adf7de888af0216646
|
06bee716f2615526757a67dcd35be59abc31ff41
|
refs/heads/master
| 2021-07-13T13:14:55.540038
| 2017-10-09T14:45:50
| 2017-10-09T14:45:50
| 103,842,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,636
|
py
|
编程常见命令集合
MySQL数据库命令
登录到MySQL
mysql -h localhost -u root -p
localhost:IP地址;
root:用户名;
database:数据库名(可以省略,如果有,跟在-p面)
删除数据库:
mysqladmin -u root -pwrf956750621 drop awesome
初始化数据库:
mysql –u root –p密码 <D:\computer_learning\backup\schema.sql
mysql -u root -p
2.mysql -h localhost -u root -p database_name
列出数据库:
show databases;
选择数据库:
use databases_name;
列出数据表:
show tables;
显示表格列的属性:
show columns from table_name;
describe table_name;
导出整个数据库:
mysqldump -u user_name -p database_name > path_file_name
例如:mysqldump -u root -p test_db > d:/test_db.sql
导出一个表:
mysqldump -u user_name -p database_name table_name > /tmp/file_name
例如:mysqldump -u root -p test_db table1 > d:/table1.sql
导出一个数据库结构:
mysqldump -u user_name -p -d --add-drop-table database_name > file_name
例如:mysqldump -u root -p -d --add-drop-table test_db > test_db.sql
导入数据库:
source file_name;
或
mysql -u user_name -p database_name < file_name
例如:
source /tmp/bbs.sql;
source d:/bbs.sql;
mysql -u root -p bbs < "d:/bbs.sql"
mysql -u root -p bbs < "/tmp/bbs.sql"
将文本文件导入数据表中(excel与之相同)
load data infile "tables.txt" into table table_name;
例如:
load data infile "/tmp/bbs.txt" into table bbs;
load data infile "/tmp/bbs.xls" into table bbs;
load data infile "d:/bbs.txt" into table bbs;
load data infile "d:/bbs.xls" into table bbs;
将数据表导出为文本文件(excel与之相同)
select * into outfile "path_file_name" from table_name;
例如:
select * into outfile "/tmp/bbs.txt" from bbs;
select * into outfile "/tmp/bbs.xls" from bbs where id=1;
select * into outfile "d:/bbs.txt" from bbs;
select * into outfile "d:/bbs.xls" from bbs where id=1;
创建数据库时先判断数据库是否存在:
create database if not exists database_name;
例如:create database if not exists bbs
创建数据库:
create database database_name;
例如:create database bbs;
删除数据库:
1.drop database database_name;
例如:drop database bbs;
创建数据表:
1.mysql> create table <table_name> ( <column 1 name> <col. 1 type> <col. 1 details>,<column 2 name> <col. 2 type> <col. 2 details>, ...);
例如:create table (id int not null auto_increment primary key,name char(16) not null default "jack",date_year date not null);
删除数据表中数据:
delete from table_name;
例如:
delete from bbs;
delete from bbs where id=2;
删除数据库中的数据表:
drop table table_name;
例如:
drop table test_db;
rm -f database_name/table_name.* (linux下)
例如:
rm -rf bbs/accp.*
向数据库中添加数据:
insert into table_name set column_name1=value1,column_name2=value2;
例如:insert into bbs set name="jack",date_year="1993-10-01";
insert into table_name values (column1,column2,...);
例如:insert into bbs ("2","jack","1993-10-02")
insert into table_name (column_name1,column_name2,...) values (value1,value2);
例如:insert into bbs (name,data_year) values ("jack","1993-10-01");
查询数据表中的数据:
select * from table_name;
例如:select * from bbs where id=1;
修改数据表中的数据:
update table_name set col_name=new_value where id=1;
例如:update bbs set name="tom" where name="jack";
增加一个字段:
alter table table_name add column field_name datatype not null default "1";
例如:alter table bbs add column tel char(16) not null;
增加多个字段:(column可省略不写)
alter table table_name add column filed_name1 datatype,add column filed_name2 datatype;
例如:alter table bbs add column tel char(16) not null,add column address text;
删除一个字段:
alter table table_name drop field_name;
例如:alter table bbs drop tel;
修改字段的数据类型:
alter table table_name modify id int unsigned;//修改列id的类型为int unsigned
alter table table_name change id sid int unsigned;//修改列id的名字为sid,而且把属性修改为int unsigned
修改一个字段的默认值:
alter table table_name modify column_name datatype not null default "";
例如:alter table test_db modify name char(16) default not null "yourname";
对表重新命名:
alter table table_name rename as new_table_name;
例如:alter table bbs rename as bbs_table;
rename table old_table_name to new_table_name;
例如:rename table test_db to accp;
从已经有的表中复制表的结构:
create table table2 select * from table1 where 1<>1;
例如:create table test_db select * from accp where 1<>1;
查询时间:
select now();
查询当前用户:
select user();
查询数据库版本:
select version();
创建索引:
alter table table1 add index ind_id(id);
create index ind_id on table1(id);
create unique index ind_id on table1(id);//建立唯一性索引
删除索引:
drop index idx_id on table1;
alter table table1 drop index ind_id;
联合字符或者多个列(将id与":"和列name和"="连接)
select concat(id,':',name,'=') from table;
limit(选出10到20条)
select * from bbs order by id limit 9,10;
(从查询结果中列出第几到几条的记录)
增加一个管理员账号:
grant all on *.* to user@localhost identified by "password";
创建表是先判断表是否存在
create table if not exists students(……);
复制表:
create table table2 select * from table1;
例如:create table test_db select * from accp;
授于用户远程访问mysql的权限
grant all privileges on *.* to "root"@"%" identified by "password" with grant option;
或者是修改mysql数据库中的user表中的host字段
use mysql;
select user,host from user;
update user set host="%" where user="user_name";
查看当前状态
show status;
查看当前连接的用户
show processlist;
(如果是root用户,则查看全部的线程,得到的用户连接数同show status;里的 Threads_connected值是相同的)
qq邮箱授权密码: xgyphxmyyntjbfbg
|
[
"onerf@sina.com"
] |
onerf@sina.com
|
515ba7852c21cbbd7571153357208c1a9f99f9b1
|
e82eeedeb4a90ab6faf368da2eb3d9d579b72e2a
|
/video_uploader/video/migrations/0002_remove_video_name.py
|
ddda84b01ea94d51244f006bafc32b7955bac575
|
[] |
no_license
|
anuragregmi/video-uploader-backend
|
75c69202ad6c3dc4c0552d2414996e0ef1b28964
|
5d746837a7234c6720e04e46c3ec807fa5fe150d
|
refs/heads/master
| 2023-07-05T10:40:13.174805
| 2021-08-10T07:57:41
| 2021-08-10T07:57:41
| 393,454,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
# Generated by Django 3.2.5 on 2021-08-06 16:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('video', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='name',
),
]
|
[
"anuragregmi@protonmail.com"
] |
anuragregmi@protonmail.com
|
2ee77d79324b60b7b8ff9fb60468d0490a613e67
|
be328c1bc9e185ef378518d3e5a72ca354761235
|
/rast.py
|
8526e15b9f5c7c47c82f62c8ad81b4917609e0ef
|
[] |
no_license
|
gdmike/gdtest
|
9e38a6461621aa546d7d44bf324f74644b26d1da
|
40bff4b46092c8df5bfb1e88a1b68288232344ad
|
refs/heads/master
| 2020-04-01T12:39:02.226655
| 2013-08-09T18:44:57
| 2013-08-09T18:44:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
import sys
from math import cos, pi
args = sys.argv[1:20]
x = [float(i) for i in args]
s0 = 0.0
for i in xrange(len(x)):
t = x[i]
s0 += t*t - 10*cos(2.0 * pi * x[i])
print s0 + len(x)*10
|
[
"engineering@mikevargas.com"
] |
engineering@mikevargas.com
|
d4cb207b0b113e1652f2c36c3180ac1111e4a42e
|
5fc359e00cf5e4d4f7aaf8500e3277ee761ed95b
|
/task1.py
|
18ad829254c34cba99cc004b930a7c0f465bd980
|
[] |
no_license
|
sattwik21/spectrum-task-2
|
753c9923fe7bd2376c258f8d50c28369a3fa7895
|
abae659d7d6f98271b1e6ad74eed32498efee0ed
|
refs/heads/master
| 2022-12-24T04:54:18.468270
| 2020-10-01T05:14:09
| 2020-10-01T05:14:09
| 265,745,250
| 1
| 1
| null | 2020-10-01T05:14:10
| 2020-05-21T03:27:46
|
Python
|
UTF-8
|
Python
| false
| false
| 187
|
py
|
def fun1(a,b):
def fun2():
s = a+b
return s
c = fun2()
return c + 5
a = int(input("enter value of a:"))
b = int(input("enter value of b:"))
result = fun1(a,b)
print(result)
|
[
"sattwikpalai21@gmail.com"
] |
sattwikpalai21@gmail.com
|
97b5f0ef79377fda71bc581d5362b3ce878e0112
|
38ee3fcf01955180881c9b16f2c04bd3367a19f9
|
/Examples/life.py
|
ad6c2b7e3d43292c9336a52a03ad90fe15f69c41
|
[] |
no_license
|
DriesDD/python-workshop
|
bb0566cb9e89d5e97c4d860e0174653c230fadef
|
3b62f237838c421da85923ef286fe2cf840a661b
|
refs/heads/main
| 2023-01-16T01:50:20.331434
| 2020-11-24T21:09:30
| 2020-11-24T21:09:30
| 315,424,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,068
|
py
|
"""Game of Life simulation.
Conway's game of life is a classic cellular automation created in 1970 by John
Conway. https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life
Exercises
1. Can you identify any Still Lifes, Oscillators, or Spaceships?
2. How can you make the simulation faster? Or bigger?
3. How would you modify the initial state?
4. Try changing the rules of life :)
5. Try making it interactive! Make it possible to draw more cells, or pause or speed up the game.
"""
from random import choice
from turtle import *
cells = {}
def square(x, y, size, name):
"""Draw square at `(x, y)` with side length `size` and fill color `name`.
The square is oriented so the bottom left corner is at (x, y).
"""
import turtle
turtle.up()
turtle.goto(x, y)
turtle.down()
turtle.color(name)
turtle.begin_fill()
for count in range(4):
turtle.forward(size)
turtle.left(90)
turtle.end_fill()
def initialize():
"Randomly initialize the cells."
for x in range(-200, 200, 10):
for y in range(-200, 200, 10):
cells[x, y] = False
for x in range(-50, 50, 10):
for y in range(-50, 50, 10):
cells[x, y] = choice([True, False])
def step():
"Compute one step in the Game of Life."
neighbors = {}
for x in range(-190, 190, 10):
for y in range(-190, 190, 10):
count = -cells[x, y]
for h in [-10, 0, 10]:
for v in [-10, 0, 10]:
count += cells[x+h, y+v]
neighbors[x, y] = count
for cell, count in neighbors.items():
if cells[cell]:
if count < 2 or count > 3:
cells[cell] = False
elif count == 3:
cells[cell] = True
def draw():
"Draw all the squares."
step()
clear()
for (x, y), alive in cells.items():
color = 'green' if alive else 'black'
square(x, y, 10, color)
update()
ontimer(draw, 100)
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
initialize()
draw()
done()
|
[
"driesdedecker@gmail.com"
] |
driesdedecker@gmail.com
|
a517580985bfc806ff4b6ef05aadef44df036c49
|
823ee0f0a4d9902b31d9d5846f6ed0e95126708c
|
/models.py
|
d1d67db2408d03d658f95ebf3f421ba16a6aa3a5
|
[] |
no_license
|
z-cabric/blogz
|
e10486255b273bf04eae4f280cc6fa0f02343ac4
|
ab3f167494aefb6a8b4a7e09c15325867a2b2643
|
refs/heads/master
| 2023-01-11T14:31:48.714831
| 2018-02-05T03:50:03
| 2018-02-05T03:50:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
from app import db
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(40))
body = db.Column(db.String(500))
owner_id = db.Column(db.Integer, db.ForeignKey("user.id"))
def __init__(self, title, body, owner):
self.title = title
self.body = body
self.owner_id = owner
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32))
password = db.Column(db.String(32))
blogs = db.relationship("Blog", backref="owner")
def __init__(self, username, password):
self.username = username
self.password = password
|
[
"cabricz@gmail.com"
] |
cabricz@gmail.com
|
ac96698f9e823501b4e9678648199c29f14f8d32
|
236c6d7f53d61dfbddefa3b6574f181ccef72e60
|
/lessons/lesson18/demo/migrations/0001_initial.py
|
5c3a99d8634bbc9b10b49ec5bf5f502928440c92
|
[] |
no_license
|
maxchv/LearnPython
|
94193c9770dc7b788099076316a1dbd6a5112cf4
|
561f74f39644cd6694ef36869beb7ddb6ff006dc
|
refs/heads/master
| 2022-07-07T18:31:02.274159
| 2022-06-15T15:10:40
| 2022-06-15T15:10:40
| 65,457,162
| 1
| 7
| null | 2019-10-23T05:42:30
| 2016-08-11T09:30:53
|
Python
|
UTF-8
|
Python
| false
| false
| 677
|
py
|
# Generated by Django 4.0.4 on 2022-05-11 18:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('description', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"maxshaptala@gmail.com"
] |
maxshaptala@gmail.com
|
36f5343ff6a33617ffdbabae7e6f01da48966d40
|
a199d70c94ab465da2b4ca504f6fb6d69cf5eba0
|
/Python/ex9/ex9.py
|
4b90547461172385bff7ba86a82e48be4413cffa
|
[] |
no_license
|
robgoyal/LearnCodeTheHardWay
|
abd4f8665a618589c6bed41eb083d65904b1a857
|
b67f35060d72abc1b8ca1b258c4962f6c3530d4d
|
refs/heads/master
| 2021-01-20T20:21:32.106441
| 2016-08-08T20:07:02
| 2016-08-08T20:07:02
| 61,711,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
#!/bin/python2
# Printing, Printing, Printing
# Here's some new strange stuff, remember type it exactly.
days = "Mon Tue Wed Thu Fri Sat Sun"
months = "Jan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
print "Here are the days: ", days
print "Here are the months: ", months
print """
There's something going on here.
With the three double-quotes.
We'll be able to type as much as we like.
Even 4 lines if we want, or 5, or 6.
"""
|
[
"robin_goyal@hotmail.com"
] |
robin_goyal@hotmail.com
|
52336540f344c0aeb41dc50f505dcc622075cca4
|
94ec56a9c81085e04f15382a6d3ba0e8944fe589
|
/chapter1/month_name.py
|
65bbacda11490141e8ac4a885f7a3ed5975c5b70
|
[] |
no_license
|
indraputra147/pythonworkbook
|
39ccd4bb6b8ee24debed13938c88f119819603b0
|
af2ed0879234085ef3e2b6fc747f84dee703bc97
|
refs/heads/master
| 2022-08-28T18:31:46.151942
| 2020-05-25T12:04:06
| 2020-05-25T12:04:06
| 262,615,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
def mn(x):
if x == 1:
return "January"
elif x == 2:
return "February"
elif x == 3:
return "March"
elif x == 4:
return "April"
elif x == 5:
return "May"
elif x == 6:
return "June"
elif x == 7:
return "July"
elif x == 8:
return "August"
elif x == 9:
return "September"
elif x == 10:
return "October"
elif x == 11:
return "November"
elif x == 12:
return "December"
else:
return "Wrong input!"
|
[
"indraputra785@gmail.com"
] |
indraputra785@gmail.com
|
eb9c6c4846c223dc0bc16732c3ae5abb061b94d9
|
e84242b4e00b2afdcda6d9b68292631c3c86d9f1
|
/hangar_2019/vinogradov.py
|
bfe7181d8335edf9fb5ed44ce09f9ddd4b9a056b
|
[] |
no_license
|
Gleb-bit/astrobox-project
|
ac12b92255febafd196cf2ba717ecd4aa3771fb5
|
de6a74db001a4d4e9456d8946a741164190b32ae
|
refs/heads/main
| 2023-03-18T18:19:32.730946
| 2021-02-27T15:49:07
| 2021-03-03T16:06:31
| 346,435,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
from astrobox.core import Drone
ASTEROIDS_DRONES = {}
class VinogradovDrone(Drone):
def on_born(self):
if len(ASTEROIDS_DRONES) == 0:
self._fill_holder()
asteroid = self.choose_asteroid()
self.move_at(asteroid)
def on_stop_at_asteroid(self, asteroid):
if not self.mothership.is_full:
self.load_from(asteroid)
def on_load_complete(self):
asteroid = self.choose_asteroid()
if not self.is_full and asteroid is not None:
self.move_at(asteroid)
else:
self.move_at(self.my_mothership)
def on_stop_at_mothership(self, mothership):
self.unload_to(mothership)
def on_unload_complete(self):
asteroid = self.choose_asteroid()
if asteroid is not None:
self.move_at(asteroid)
def _fill_holder(self):
for asteroid in self.asteroids:
if asteroid.payload > 0:
if asteroid not in ASTEROIDS_DRONES:
ASTEROIDS_DRONES[asteroid] = []
def choose_asteroid(self):
for aster, drone in ASTEROIDS_DRONES.items():
if drone is self:
if aster.is_empty:
ASTEROIDS_DRONES.pop(aster)
asteroids_params = [asteroid for asteroid in self.asteroids if not asteroid.is_empty]
asteroids_params.sort(key=lambda ast: self.distance_to(ast)/ast.payload)
if len(asteroids_params) > 0:
for sorted_asteroid in asteroids_params:
asteroid_drones = ASTEROIDS_DRONES[sorted_asteroid]
free_space = [drone.free_space for drone in asteroid_drones if drone != self]
free_space.append(self.free_space)
free_space_sum = sum(free_space)
if sorted_asteroid.payload >= free_space_sum*.8:
ASTEROIDS_DRONES[sorted_asteroid].append(self)
return sorted_asteroid
return asteroids_params[0]
drone_class = VinogradovDrone
|
[
"suguby@gmail.com"
] |
suguby@gmail.com
|
1d75cf8dfda5387bbd92d44b25bf36a51f150889
|
6dd01979146e130c94cab4326baab54eeb3f3f68
|
/hangman.py
|
d65f3b0a1a733b07042e1276e78a61e6940351e7
|
[] |
no_license
|
harshitanand/6.00.1x-Computer-Programming-Using-Python
|
0f884ac13ac9e1b1eb2fed2ff125493ad3bfba4f
|
792c1d4dc7703d25e760dcb47e9004857779ed96
|
refs/heads/master
| 2016-09-15T14:32:26.016967
| 2014-10-29T19:17:27
| 2014-10-29T19:17:27
| 25,719,583
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
def hangman(secretWord):
print 'Welcome to the game, Hangman!', '\nI am thinking of a word that is ' + str(len(secretWord)) + ' letters long.'
lettersGuessed = []
win = False
miss = 8
while True:
print '-------------'
if miss == 0:
print 'Sorry, you ran out of guesses. The word was ' + secretWord + '.'
break;
win = isWordGuessed(secretWord, lettersGuessed)
if win == True:
print 'Congratulations, you won!'
break;
print 'You have ' + str(miss) + ' guesses left.', '\nAvailable letters: ' + getAvailableLetters(lettersGuessed)
guess = raw_input('Please guess a letter: ').lower()
if guess in lettersGuessed:
print 'Oops! You\'ve already guessed that letter: ', getGuessedWord(secretWord, lettersGuessed)
else:
lettersGuessed.append(guess)
lettersGuessed.sort()
if guess not in secretWord:
print 'Oops! That letter is not in my word: ',
miss -= 1
else:
print 'Good guess: ',
print getGuessedWord(secretWord, lettersGuessed)
|
[
"harshitanand94@gmail.com"
] |
harshitanand94@gmail.com
|
0a9b94032f274d1b3137ec9c09acdaf1e39f6998
|
70a9233ceddcbabe7dcce5484606401e567f6ba9
|
/message_board/posts/forms.py
|
a4bc741e35d125f682f3526b1d9fe2622ce38f9f
|
[
"MIT"
] |
permissive
|
robalford/message_board
|
e736ef6a4c2a3381ca83d712d62982b1f5a7f60e
|
744f1f524ee8a60c5b799fc7a9d911818a519bf1
|
refs/heads/master
| 2021-08-23T22:04:13.100410
| 2017-12-06T19:30:02
| 2017-12-06T19:30:02
| 111,590,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ['post', ]
widgets = {'post': forms.TextInput}
class LoatheForm(forms.Form):
post_id = forms.CharField(widget=forms.HiddenInput)
|
[
"robertsalford@gmail.com"
] |
robertsalford@gmail.com
|
f7d6c4be894a80d920335e88119b8b8f5cb8e7ba
|
325bee18d3a8b5de183118d02c480e562f6acba8
|
/germany/germany_l/germany/ScriptDir/Move_2_Nas.py
|
ad52465414998306bcd3649c403736db8a51f842
|
[] |
no_license
|
waynecanfly/spiderItem
|
fc07af6921493fcfc21437c464c6433d247abad3
|
1960efaad0d995e83e8cf85e58e1db029e49fa56
|
refs/heads/master
| 2022-11-14T16:35:42.855901
| 2019-10-25T03:43:57
| 2019-10-25T03:43:57
| 193,424,274
| 4
| 0
| null | 2022-11-04T19:16:15
| 2019-06-24T03:00:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,962
|
py
|
# -*- coding: utf-8 -*-
import pymysql
from ftplib import FTP
import os
class Move2Nas(object):
num = 0
def __init__(self):
self.conn = pymysql.connect(host="10.100.4.99", port=3306, db="opd_common", user="root", passwd="OPDATA", charset="utf8")
self.cursor = self.conn.cursor()
def get_fiscal_year(self, file_name):
"""获取财年"""
sql = "select fiscal_year from non_financial_statement_index where report_id=%s"
self.cursor.execute(sql, file_name.split(".")[0])
result = self.cursor.fetchone()
if result:
return str(result[0])
else:
sql = "select fiscal_year from financial_statement_index where report_id=%s"
self.cursor.execute(sql, file_name.split(".")[0])
result = self.cursor.fetchone()
if result:
return str(result[0])
else:
return "0000"
def ftpconnect(self, host, username, password):
"""建立连接"""
ftp = FTP()
ftp.connect(host, 21)
ftp.login(username, password)
print(ftp.getwelcome())
return ftp
def uploadfile(self, ftp, remotepath, localpath):
"""从本地上传文件到FTP"""
bufsize = 1024
fp = open(localpath, 'rb')
ftp.storbinary('STOR ' + remotepath, fp, bufsize)
ftp.set_debuglevel(0)
fp.close()
def Move2NasMain(self, LocalDir, NasDir):
ftp = self.ftpconnect("10.100.4.102", "admin", "originp123")
dir_list = os.listdir(LocalDir)
for temp in dir_list:
fiscal_year = self.get_fiscal_year(temp)
try:
ftp.mkd(NasDir + fiscal_year)
except:
pass
self.num += 1
self.uploadfile(ftp, NasDir + fiscal_year + "/" + temp, LocalDir + "/" + temp)
print("已上传%s个文件到NAS服务器" % self.num)
ftp.quit()
|
[
"1370153124@qq.com"
] |
1370153124@qq.com
|
73e68b63e46a4312ffdc6922e6c83c88b9c0bc31
|
f9b9b12b8dbe853b9a201cb95cd780916c67901e
|
/Backend/splitpay/splitpay/asgi.py
|
edfdbdcbe1f908a3896c4f31208e996264286fee
|
[
"MIT"
] |
permissive
|
tanmayb104/SplitPay-HCI
|
d83c5215ddd0e815ee0c40f7166551ac2defcc11
|
b7a22639670706fb7b173c25a7ebbb84e929cecb
|
refs/heads/main
| 2023-07-04T06:03:55.961814
| 2021-08-05T07:18:49
| 2021-08-05T07:18:49
| 373,949,358
| 3
| 0
|
MIT
| 2021-06-04T20:10:38
| 2021-06-04T20:10:38
| null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
ASGI config for splitpay project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'splitpay.settings')
application = get_asgi_application()
|
[
"tanmaybansal104@gmail.com"
] |
tanmaybansal104@gmail.com
|
dc417bf09f73d438048aee47429084185e48c7c6
|
68e05d3644962fbde0d5aac4ba0037589da4be8d
|
/parsr.py
|
d3b050d630763fa82d554dc332141a0c07318455
|
[] |
no_license
|
shubhamchdhary/ExampleLanguage
|
89a00fded24fe01dd4409e153a3834dd5ae67ac6
|
6281a553ea712968cb8efce0431041588148676d
|
refs/heads/master
| 2023-02-02T08:29:43.552479
| 2020-12-20T07:15:48
| 2020-12-20T07:15:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,348
|
py
|
# This is a parser file.
import lexr as lx
#Function for addition.
def ADD(*args):
t = 0
for ii in args:
t += ii
return t
#Function for subtraction.
def SUB(*args):
t = args[0]
for ii in args[1:]:
t -= ii
return t
#Function for division.
def DIV(*args):
t = args[0]
for ii in args[1:]:
t /= ii
return t
#Function for multiplication.
def MUL(*args):
t = 1
for ii in args:
t *= ii
return t
#Function for defining integer.
def INT(args):
op = args.replace(' ','')
equal_sign = op.find('=')
aftr_equal = int(op[equal_sign+1:])
op[0:equal_sign] = aftr_equal
#This is Parser.
func_vars = ["INT","STR","ADD","SUB","MUL","DIV","PRINT"]
t_list = []
#tempVar = [0,1,2,3,4,5,6,7,8,9]
def parser(args):
tokens = lx.lexer(args)
# print(values)
if tokens[0] in func_vars:
if tokens[0] == 'PRINT':
print(tokens[1])
elif tokens[0] != 'PRINT':
for ii in tokens[1]:
if ii in [str(i) for i in range(0,10)]:
operand = int(ii)
#t_list = []
t_list.append(operand)
# print(t_list)
if tokens[0] == 'ADD':
print(ADD(*t_list))
t_list.clear()
if tokens[0] == 'SUB':
print(SUB(*t_list))
t_list.clear()
if tokens[0] == 'MUL':
print(MUL(*t_list))
t_list.clear()
if tokens[0] == 'DIV':
print(DIV(*t_list))
t_list.clear()
else:
print('CommandError : Invalid command.')
|
[
"shubhamchaudharybrg@gmail.com"
] |
shubhamchaudharybrg@gmail.com
|
89cf0d50fb6e2df3124bee8b77421b3fd186c0fb
|
3940b4a507789e1fbbaffeb200149aee215f655a
|
/lc/primes.py
|
c910b9b14a132ba9a5b1670d224af1c8c2d70824
|
[] |
no_license
|
akimi-yano/algorithm-practice
|
15f52022ec79542d218c6f901a54396a62080445
|
1abc28919abb55b93d3879860ac9c1297d493d09
|
refs/heads/master
| 2023-06-11T13:17:56.971791
| 2023-06-10T05:17:56
| 2023-06-10T05:17:56
| 239,395,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
def setup():
primes = []
for num in range(2,100000+1):
is_prime = True
for p in primes:
if num%p == 0:
is_prime = False
break
if is_prime:
primes.append(num)
return primes
primes = setup()
def prime_factors(num):
ans = []
for prime in primes:
if num % prime == 0:
ans.append(prime)
if prime > num ** 0.5:
break
if len(ans) < 1:
return [num]
return ans
print(prime_factors(4))
print(prime_factors(6))
print(prime_factors(15))
print(prime_factors(35))
print(prime_factors(3*13*29*43*111))
|
[
"akimi.mimi.yano@gmail.com"
] |
akimi.mimi.yano@gmail.com
|
fc2e147737ded9347e22939f14736a0c9337dd6a
|
10f29942422d7f3b9899952d446a48069f70b631
|
/gitpull/apps.py
|
cb96b7c0cfa4f286774eb0f33f4fd32ef59d1b6f
|
[] |
no_license
|
manish-eng/pms
|
dd52a92e29b94fa8044183c8efdc3d7bda4aab0e
|
741f813f69a2f88699b20b7ec5983accf163e42a
|
refs/heads/master
| 2020-09-03T09:45:21.281844
| 2019-11-15T08:55:40
| 2019-11-15T08:55:40
| 219,437,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
from django.apps import AppConfig
class GitpullConfig(AppConfig):
name = 'gitpull'
|
[
"manish.kumar@digivalet.com"
] |
manish.kumar@digivalet.com
|
10f210817797689493a8e304960d8b07abdc6eb0
|
7df7952dfb484d8c629a49f89cac181da833bb76
|
/Ejercicios/TercTrim/Ejercicio7/Colecciones.py
|
d3f68aaf6d08750b6fc896bed785cd4f776cd540
|
[
"Apache-2.0"
] |
permissive
|
IhToN/DAW1-PRG
|
491d7111943116ee5c0e8f00b4017dd28b5fe17f
|
0fd7943b972a4e2e6293fdd5c539b168a19c0ea5
|
refs/heads/master
| 2020-05-30T08:43:20.188261
| 2017-05-25T07:49:31
| 2017-05-25T07:49:31
| 68,364,100
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 959
|
py
|
from Ejercicios.TercTrim.Ejercicio7 import Clases
from Ejercicios.TercTrim.Ejercicio7.ControlBD import ControlBD
class ColeccionClientes:
def __init__(self):
self.cbd = ControlBD()
self.clientes = dict()
self.cargar_clientes()
def obtener_cliente(self, dni):
return self.clientes[dni]
def cargar_clientes(self):
for cliente in self.cbd.obtener_clientes():
ncliente, nombre, telefono, direccion, ciudad = cliente
self.clientes[cliente[0]] = Clases.Cliente(ncliente, nombre, telefono, direccion, ciudad)
self.cbd.descoenctar()
def nuevo_cliente(self, ncliente, nombre, telefono, direccion, ciudad):
cliente = Clases.Cliente(ncliente, nombre, telefono, direccion, ciudad)
if ncliente not in self.clientes:
self.clientes[ncliente] = cliente
self.cbd.insertar_cliente(cliente)
self.cbd.descoenctar()
return cliente
|
[
"atalgaba@gmail.com"
] |
atalgaba@gmail.com
|
72c760eb56c3884eafcc21d150e1f4c00bf88b25
|
285e8a59b01b1eed7f647f1c2947ab07d50f1c2e
|
/mpact/mpact/export_to_CSV.py
|
116a29ea863056b856c3dd20d8f986bfabac8bb2
|
[] |
no_license
|
willchenko/mpact
|
2fb6586b0fefde735fdd03c5052f1027f85d032b
|
12ad2cc55fe72f111d36106f1f49422168cf2c29
|
refs/heads/main
| 2023-04-13T12:29:31.131556
| 2021-04-26T19:01:47
| 2021-04-26T19:01:47
| 360,989,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,649
|
py
|
# export dict's as a csv
import csv
import os
def create_new_problem(problem_name,mmdb_path):
problem_path = os.path.join(mmdb_path,r"problems",problem_name)
os.mkdir(problem_path)
os.chdir(problem_path)
create_pathway_csv_table()
create_reaction_csv_table()
create_metabolite_csv_table()
return
def create_pathway_csv_table():
with open('pathway_table.csv','w',newline='') as file:
writer = csv.writer(file)
writer.writerow(['id','name','rxns','product_id','product_tags','precursor_ids','litref','notes'])
return
def create_reaction_csv_table():
with open('reaction_table.csv','w',newline='') as file:
writer = csv.writer(file)
writer.writerow(['id','name','rxn_str','kegg_id','bigg_id','ec_number','notes'])
return
def create_metabolite_csv_table():
with open('metabolite_table.csv','w',newline='') as file:
writer = csv.writer(file)
writer.writerow(['id','name','formula','charge','kegg_id','bigg_id','notes'])
return
def add_to_pathway_csv_table(problem_name,pathway_dict, mmdb_path):
problem_path = os.path.join(mmdb_path,r"problems",problem_name)
os.chdir(problem_path)
for path in pathway_dict['pathways']:
with open('pathway_table.csv','a',newline='') as file:
writer = csv.writer(file)
writer.writerow([pathway_dict[path]['id'],pathway_dict[path]['name'],pathway_dict[path]['rxns'],pathway_dict[path]['product_id'][:-2],'',pathway_dict[path]['precursor_ids'][:-2],'',''])
return
def add_to_reaction_csv_table(problem_name,reaction_dict, mmdb_path):
problem_path = os.path.join(mmdb_path,r"problems",problem_name)
os.chdir(problem_path)
for rxn in reaction_dict['reactions']:
with open('reaction_table.csv','a',newline='') as file:
writer = csv.writer(file)
writer.writerow([reaction_dict[rxn]['bigg_rxn'],reaction_dict[rxn]['bigg_name'],reaction_dict[rxn]['bigg_string'],reaction_dict[rxn]['kegg_rxn'],reaction_dict[rxn]['bigg_rxn'],'',''])
return
def add_to_metabolite_csv_table(problem_name,metabolite_dict, mmdb_path):
problem_path = os.path.join(mmdb_path,r"problems",problem_name)
os.chdir(problem_path)
with open('metabolite_table.csv','a',newline='') as file:
writer = csv.writer(file)
for met in metabolite_dict['metabolites']:
writer.writerow([metabolite_dict[met]['bigg_id'][:-2],met,metabolite_dict[met]['formula'],metabolite_dict[met]['charge'],metabolite_dict[met]['kegg_id'],metabolite_dict[met]['bigg_id'],''])
return
|
[
"noreply@github.com"
] |
willchenko.noreply@github.com
|
bb8fa43a7bfe9505237f700a8ee96895d2c4759d
|
fdd82539504bef18dac31be6cc5ac4c322a54e53
|
/Validating_Postal_Codes.py
|
a87c2f0b52b1cdd693769ebb3250a3d80f941b00
|
[] |
no_license
|
the-preeti-singh/PythonWithHackerrank
|
0c8521b7a091e1aba12c33412e5ef15727209887
|
fb606ffd712eefde69669eec38289ab0741f1e8b
|
refs/heads/master
| 2018-10-21T04:12:52.262321
| 2018-08-03T17:07:10
| 2018-08-03T17:07:10
| 119,407,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
import re
P = raw_input()
print len(re.findall(r'(?=(\d)\d\1)',P)) < 2 and bool(re.match(r'^[1-9][0-9]{5}$',P))
|
[
"noreply@github.com"
] |
the-preeti-singh.noreply@github.com
|
63a59f8b9288d2e727ad38525c0981576555d3a4
|
0779a098cbd060a35143390afa171225f2974fef
|
/bin/hateno-update
|
1d57d71adcb3121a80df70bf6c631c6b6f4de0fa
|
[
"MIT"
] |
permissive
|
JeremyHeleine/Hateno
|
14943d9d5017bedd05c057b1e08975e8f08ed912
|
0a5e4068b69b871df6069a7737fb15d670ff37de
|
refs/heads/master
| 2022-11-08T09:31:14.324035
| 2021-08-22T20:47:26
| 2021-08-22T20:47:26
| 249,705,288
| 0
| 1
|
MIT
| 2022-10-17T09:35:06
| 2020-03-24T12:42:28
|
Python
|
UTF-8
|
Python
| false
| false
| 977
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
This script uses the Manager class to update a simulations folder.
'''
import argparse
from hateno.folder import Folder
from hateno.manager import Manager
from hateno.ui import UI
from hateno.utils import utils
def addArguments(parser):
pass
def main(args):
folder = utils.findFolder()
if folder is None:
print('No Hateno folder found')
return
with Manager(folder) as manager:
n_simulations = manager.getSimulationsNumber()
if n_simulations == 0:
print('This folder is empty.')
return
ui = UI()
infos_line = ui.addTextLine('Updating the simulations list…')
progress_bar = ui.addProgressBar(n_simulations)
manager.update(callback = progress_bar.update)
ui.removeItem(progress_bar)
infos_line.text = 'Simulations list updated'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Update a simulations folder.')
addArguments(parser)
main(parser.parse_args())
|
[
"jeremy.heleine@gmail.com"
] |
jeremy.heleine@gmail.com
|
|
0f049c3cb4578e72deb75294061f8abf410f3f3c
|
879f4ef926cbcc7dc8ee6625b63fd9a4635539fb
|
/models/helper.py
|
c83b717f71d6b4a7cc108e05b18bde472add5345
|
[] |
no_license
|
KrakenCode/PyMarket
|
758138e18a65b5f043557e2cce9b28cd4f6b305f
|
572d4fa59e5c593eea333e9be35acd76a34dc749
|
refs/heads/master
| 2020-03-17T15:42:37.858615
| 2018-05-03T00:07:49
| 2018-05-03T00:07:49
| 133,721,008
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
from models.user import User
from models.product import Product
from models.customer import Customer
from models.address import Address
from models.creditcard import CreditCard
from models.shoppingcart import ShoppingCart
from models.cartitem import CartItem
from models.product import Product
from models.productbatch import ProductBatch
from models.purchaseorder import PurchaseOrder
from models.incomingtransaction import IncomingTransaction
from models.outgoingtransaction import OutgoingTransaction
model_classes = {
'User': User,
'Product': Product,
'Customer': Customer,
'Address': Address,
'CreditCard': CreditCard,
'ShoppingCart': ShoppingCart,
'CartItem': CartItem,
'Product': Product,
'ProductBatch': ProductBatch,
'PurchaseOrder': PurchaseOrder,
'IncomingTransaction': IncomingTransaction,
'OutgoingTransaction': OutgoingTransaction
}
|
[
"daltonsumrall@gmail.com"
] |
daltonsumrall@gmail.com
|
a4469bce9b75969f349a26f3a6ebba64c8c31cb8
|
deeeeb5f90a99bd3c071b42e9b87a78e526fe837
|
/demo_test/demo_test/settings.py
|
a9eac1c3b9509a91104fb972b4009c4f1915a663
|
[] |
no_license
|
Rounak21/Demo_serializer_test
|
c4e48adc6db602c6353fe50a735283ff0062fd38
|
bb389d2ec947333ba9ae0f6863d1dd9f6648f121
|
refs/heads/master
| 2022-11-10T06:41:21.892828
| 2020-06-21T10:39:11
| 2020-06-21T10:39:11
| 273,846,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,142
|
py
|
"""
Django settings for demo_test project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+zhh%*3#l&su=g_0s^um3dg3gsokpa@e2h1rv*)khb+tq81z94'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'demo_serializer',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demo_test.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demo_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"babisantos34@gmail.com"
] |
babisantos34@gmail.com
|
300adea8e18706917d08603580493fab395ee6f5
|
2e29b5855d427d701ad1b613529fe613a16bbca9
|
/pinax/stripe/migrations/0018_subscriptionitem.py
|
208202628202d810d010af379faf82a99169335e
|
[
"MIT"
] |
permissive
|
baseup/pinax-stripe
|
dacd3d62adc38ac40e9ef10769e4fb425ea9d018
|
6ac7053a1bb67f0c254eeb3b87441335df59a8e2
|
refs/heads/master
| 2022-05-22T06:57:10.793400
| 2022-03-09T07:40:53
| 2022-03-09T07:40:53
| 145,524,522
| 0
| 0
| null | 2018-08-21T07:30:59
| 2018-08-21T07:30:59
| null |
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-10-23 06:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('pinax_stripe', '0017_auto_20181004_0801'),
]
operations = [
migrations.CreateModel(
name='SubscriptionItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', models.CharField(max_length=191, unique=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(blank=True, null=True)),
('metadata', jsonfield.fields.JSONField(blank=True, null=True)),
('quantity', models.IntegerField()),
('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Plan')),
('subscription', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pinax_stripe.Subscription')),
],
options={
'abstract': False,
},
),
]
|
[
"w.jbarreraact@gmail.com"
] |
w.jbarreraact@gmail.com
|
e93607e3f8cedbe714f6544a3fc4138ec54f8498
|
24cbfefc98c5c34dc2d29021889c06d40734108d
|
/LeetCodeEasy/findUnsortedSubarray.py
|
02a1f546fdc37d7863df130127a14b855d934427
|
[] |
no_license
|
surmayi/CodePython
|
6bbf5967de9b72aa6e8915725fda53d6c3d82fc5
|
c0dd25471e34a2c7ce2d1ce6a10dbd6e0f7042f1
|
refs/heads/master
| 2022-05-16T11:59:39.289329
| 2022-05-02T19:33:48
| 2022-05-02T19:33:48
| 234,530,224
| 0
| 0
| null | 2020-06-05T18:48:33
| 2020-01-17T10:55:34
|
Python
|
UTF-8
|
Python
| false
| false
| 975
|
py
|
class Solution(object):
def findUnsortedSubarray(self, nums):
if nums[0]> nums[-1]:
return len(nums)
if len(nums) in[0,1]:
return 0
l=len(nums)
frontind =0
backind =l-1
i=0
frontdone =False
backdone = False
while(i<l-1):
if(frontdone is False and nums[i]<=nums[i+1]):
frontind+=1
else:
frontdone=True
if(backdone is False and nums[l-i-1]>=nums[l-i-2]):
backind-=1
else:
backdone =True
i+=1
if frontind>=backind:
return 0
temp = nums[frontind:backind+1]
tmin = min(temp)
tmax = max(temp)
while frontind>0 and nums[frontind-1]>tmin:
frontind-=1
while backind<l-1 and nums[backind+1]<tmax:
print(backind)
backind+=1
return backind-frontind+1
|
[
"surmayi7@gmail.com"
] |
surmayi7@gmail.com
|
dcfba9a7338513aac165adb376398954d32bedf7
|
6cd0e360aef530b47dacbf2468f3dab44cd5ed0d
|
/atlatlTest/settings.py
|
42c2c29f08f08b0fd473135348ce7de465ff14d7
|
[] |
no_license
|
hughybarra/atlatlTest
|
16e0f943d8692f4d0c8642a8aad963148b9e02d2
|
42a2ffff6534e464e3685bfa4458bfd208d163a2
|
refs/heads/master
| 2021-01-22T09:16:40.145504
| 2014-07-06T01:43:30
| 2014-07-06T01:43:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,996
|
py
|
"""
Django settings for atlatlTest project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=#($&ca1&5(t=_dos#vkslysa5en7k%vf5-#m*gkn5o=8h)zu!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myApp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'atlatlTest.urls'
WSGI_APPLICATION = 'atlatlTest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
[
"hugh.ybarra@gmaillcom"
] |
hugh.ybarra@gmaillcom
|
2a79ebc762ce1b21a171102de8c4921db995312e
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/ggH_SF/Full2018/DNN/configuration_2018.py
|
b9aa3c9058017e04aa91fb0713b0804969ea1ced
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943
| 2023-08-18T09:23:34
| 2023-08-18T09:23:34
| 39,819,875
| 10
| 63
| null | 2023-08-10T14:08:04
| 2015-07-28T07:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 917
|
py
|
# example of configuration file
treeName= 'Events'
tag = 'ggH_SF_split2'
# used by mkShape to define output directory for root files
outputDir = 'rootFile'
# file with TTree aliases
aliasesFile = 'aliases.py'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts_all.py'
# file with list of samples
samplesFile = 'samples.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 35.867
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
# outputDirPlots = '~/www/plotCR'
outputDirPlots = 'plot_'+tag+'_DNN_signal'
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
structureFile = 'structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
|
[
"davide.di.croce@cern.ch"
] |
davide.di.croce@cern.ch
|
9817974e6268ed72a04ce521f186f689f4307d49
|
0de3a3e453f49edadf9ecc9aebb04981394a05af
|
/day10-OO/exo01.py
|
9e3b25a8e8e71e59114a0c418e936b6b1dd56b0e
|
[] |
no_license
|
LU-Jingyu/aid-month01-updated
|
17e0ae31d3a38944544693a2b1eb3138714337a9
|
8df88bbf29d4cb07c10cf5accd5172b56e96d75a
|
refs/heads/main
| 2023-07-03T04:25:12.653062
| 2021-08-11T08:51:13
| 2021-08-11T08:51:13
| 393,339,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
"""
"""
import Student
"""
用学生类来循环录入学生信息
"""
# dict_prod = {}
# while True:
# nom = input("请输入商品信息: ")
# if nom == "":
# break
# prix = input("请输入商品单价: ")
# dict_prod[nom] = prix
# for k, v in dict_prod.items():
# print("商品名:" + k, "单价:" + v)
list_stud = []
while True:
name = input("请输入学生姓名: ")
if name == "":
break
sex = input("请输入性别: ")
age = input("请输入年龄: ")
list_stud.append(Student.Student(name, sex, age))
for item in list_stud:
item.print()
list_stud[0].print()
|
[
"724027461@qq.com"
] |
724027461@qq.com
|
57b410d649fbf712a2dc614d4c684ae1b830064c
|
46900ffadf08b92f656ff5d0a0b71a7717e4415c
|
/old_trash/renthop_src/stacking/stacking_no_mngr_medians.py
|
0813eec07659754692009389b16c91cbee03e00e
|
[] |
no_license
|
ubbikk/kaggle
|
9765ca9530d139525752b4286c20e971b0a97be7
|
cc7ea173ad8215a42108a973abe2cf0095517588
|
refs/heads/master
| 2021-01-19T10:32:44.615755
| 2017-11-11T19:28:42
| 2017-11-11T19:28:42
| 82,200,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,496
|
py
|
import json
import os
import traceback
from time import time, sleep
import seaborn as sns
import pandas as pd
from collections import OrderedDict
import sys
from matplotlib import pyplot
from scipy.sparse import coo_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
import numpy as np
import xgboost as xgb
from sklearn.metrics import log_loss
from xgboost import plot_importance
from sklearn.model_selection import train_test_split
from scipy.stats import boxcox
from scipy.spatial import KDTree
import math
from pymongo import MongoClient
TARGET = u'interest_level'
TARGET_VALUES = ['low', 'medium', 'high']
MANAGER_ID = 'manager_id'
BUILDING_ID = 'building_id'
LATITUDE = 'latitude'
LONGITUDE = 'longitude'
PRICE = 'price'
BATHROOMS = 'bathrooms'
BEDROOMS = 'bedrooms'
DESCRIPTION = 'description'
DISPLAY_ADDRESS = 'display_address'
STREET_ADDRESS = 'street_address'
LISTING_ID = 'listing_id'
PRICE_PER_BEDROOM = 'price_per_bedroom'
F_COL = u'features'
CREATED_MONTH = "created_month"
CREATED_DAY = "created_day"
CREATED_MINUTE = 'created_minute'
CREATED_HOUR = 'created_hour'
DAY_OF_WEEK = 'dayOfWeek'
CREATED = 'created'
LABEL = 'lbl'
BED_NORMALIZED = 'bed_norm'
BATH_NORMALIZED = 'bath_norm'
COL = 'normalized_features'
NEI_1 = 'nei1'
NEI_2 = 'nei2'
NEI_3 = 'nei3'
NEI = 'neighbourhood'
BORO = 'boro'
INDEX_COPY = 'index_copy'
FEATURES = [u'bathrooms', u'bedrooms', u'building_id', u'created',
u'description', u'display_address', u'features',
u'latitude', u'listing_id', u'longitude', MANAGER_ID, u'photos',
u'price', u'street_address']
sns.set(color_codes=True)
sns.set(style="whitegrid", color_codes=True)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 5000)
train_file = '../../data/redhoop/train.json'
test_file = '../../data/redhoop/test.json'
train_geo_file = '../../data/redhoop/with_geo/train_geo.json'
test_geo_file = '../../data/redhoop/with_geo/test_geo.json'
rent_file = '../../data/neis_from_renthop_lower.json'
seeds_fp = '../../seeds.json'
splits_big_fp='../../splits_big.json'
splits_small_fp='../../splits_small.json'
magic_file = '../../data/redhoop/listing_image_time.csv'
# train_file = '../data/redhoop/train.json'
# test_file = '../data/redhoop/test.json'
# train_geo_file = '../data/redhoop/with_geo/train_geo.json'
# test_geo_file = '../data/redhoop/with_geo/test_geo.json'
# rent_file = 'with_geo/data/neis_from_renthop_lower.json'
# seeds_fp = '../../seeds.json'
# splits_big_fp='../../splits_big.json'
# splits_small_fp='../../splits_small.json'
#########################################################################################
# Mongo Control
#########################################################################################
SEEDS = json.load(open(seeds_fp))
SPLITS_BIG=json.load(open(splits_big_fp))
SPLITS_SMALL=json.load(open(splits_small_fp))
def getN(mongo_host, name, experiment_max_time):
client = MongoClient(mongo_host, 27017)
db = client[name]
collection = db['splits_control'.format(name)]
res = [x for x in collection.find()]
res.sort(key=lambda s: s['N'])
for con in res:
if (not con['finished']) and (time()-con['time'] > experiment_max_time):
N = con['N']
collection.replace_one({'N': N}, {'N': N, 'time': time(), 'finished': False})
return N
N = len(res)
collection.insert_one({'N': N, 'time': time(), 'finished': False})
return N
def split_from_N(df, N):
N=N%5
return df.loc[SPLITS_BIG[N],:], df.loc[SPLITS_SMALL[N], :]
def complete_split_mongo(N, name, mongo_host, probs, test_indexes, losses, importance, f_names):
client = MongoClient(mongo_host, 27017)
db = client[name]
collection = db['probs']
collection.insert_one({'N': N, 'val': probs, 'index':test_indexes})
collection = db['losses']
collection.insert_one({'N': N, 'val': losses})
collection = db['importance']
collection.insert_one({'N': N, 'val': importance})
collection = db['features']
collection.insert_one({'N': N, 'val': f_names})
collection = db['splits_control'.format(name)]
collection.replace_one({'N': N}, {'N': N, 'time': time(), 'finished': True})
def get_probs_from_est(estimator, proba, test_df):
classes = [x for x in estimator.classes_]
res = {}
for cl in classes:
p=proba[:, classes.index(cl)]
res[cl] = [a.item() for a in p]
return res, [x for x in test_df.index.values]
def complete_split_file(ii, l, name):
fp = name + '_results.json'
ii_fp = name + '_importance.json'
with open(fp, 'w+') as f:
json.dump(l, f)
with open(ii_fp, 'w+') as f:
json.dump(ii, f)
#########################################################################################
# Mongo Control
#########################################################################################
#########################################################################################
# Writing Results
#########################################################################################
def write_results(N, name, mongo_host, probs, test_indexes, l_results, ii_importance, f_names):
losses = l_results[len(l_results) - 1]
importance = ii_importance[len(ii_importance) - 1]
retries = 5
while retries >= 0:
try:
complete_split_mongo(N, name, mongo_host, probs, test_indexes, losses, importance, f_names)
break
except:
traceback.print_exc()
retries -= 1
sleep(30)
try:
complete_split_file(ii_importance, l_results, name)
except:
traceback.print_exc()
def out(l, loss, l_1K, loss1K, num, t):
print '\n\n'
print '#{}'.format(num)
if loss1K is not None:
print 'loss1K {}'.format(loss1K)
print 'avg_loss1K {}'.format(np.mean(l_1K))
print get_3s_confidence_for_mean(l_1K)
print
print 'loss {}'.format(loss)
print 'avg_loss {}'.format(np.mean(l))
print get_3s_confidence_for_mean(l)
print 'std {}'.format(np.std(l))
print 'time {}'.format(t)
def get_3s_confidence_for_mean(l):
std = np.std(l) / math.sqrt(len(l))
m = np.mean(l)
start = m - 3 * std
end = m + 3 * std
return '3s_confidence: [{}, {}]'.format(start, end)
#########################################################################################
# Writing Results
#########################################################################################
#########################################################################################
# loading data
#########################################################################################
def load_df(file, geo_file):
df = pd.read_json(file)
geo = pd.read_json(geo_file)
df[NEI] = geo[NEI]
df['tmp'] = df[NEI].apply(transform_geo_to_rent)
df[NEI_1] = df['tmp'].apply(lambda s: None if s is None else s[0])
df[NEI_2] = df['tmp'].apply(lambda s: None if s is None else s[1])
df[NEI_3] = df['tmp'].apply(lambda s: None if s is None else s[2])
normalize_bed_bath(df)
return basic_preprocess(df)
def load_train():
df = load_df(train_file, train_geo_file)
df[LABEL] = 'train'
return df
def load_test():
df = load_df(test_file, test_geo_file)
df[LABEL] = 'test'
return df
def load_rent():
m = json.load(open(rent_file))
res = {}
for boro, boro_m in m.iteritems():
for sub_boro, neis in boro_m.iteritems():
for n in neis:
res[n] = [n, sub_boro, boro]
return res
def basic_preprocess(df):
df['num_features'] = df[u'features'].apply(len)
df['num_photos'] = df['photos'].apply(len)
df['word_num_in_descr'] = df['description'].apply(lambda x: len(x.split(' ')))
df["created"] = pd.to_datetime(df["created"])
# df["created_year"] = df["created"].dt.year
df[CREATED_MONTH] = df["created"].dt.month
df[CREATED_DAY] = df["created"].dt.day
df[CREATED_HOUR] = df["created"].dt.hour
df[CREATED_MINUTE] = df["created"].dt.minute
df[DAY_OF_WEEK] = df['created'].dt.dayofweek
bc_price, tmp = boxcox(df['price'])
df['bc_price'] = bc_price
df[INDEX_COPY] = df.index.values
return df
def fix_index(df):
df.index = df[INDEX_COPY]
#########################################################################################
# loading data
#########################################################################################
#########################################################################################
# Creating Neis
#########################################################################################
def normalize_bed_bath(df):
df['bed_bath']=df[[BEDROOMS, BATHROOMS]].apply(lambda s: (s[BEDROOMS], s[BATHROOMS]), axis=1)
def norm(s):
bed=s[0]
bath=s[1]
if bed==0:
if bath>=1.5:
return [0,2.0]
elif bed==1:
if bath>=2.5:
return [1,2.0]
elif bed==2:
if bath>=3.0:
return [2,3.0]
elif bed==3:
if bath>=4.0:
return [3,4.0]
elif bed==4:
if bath==0:
return [4,1]
elif bath>=4.5:
return [4,4.5]
elif bed>=5:
if bath <=1.5:
return [5,1.5]
elif bath <=2.5:
return [5,2.5]
elif bath <=3.5:
return [5,3]
else:
return [5,4]
return [bed, bath]
df['bed_bath']=df['bed_bath'].apply(norm)
df[BED_NORMALIZED]=df['bed_bath'].apply(lambda s:s[0])
df[BATH_NORMALIZED]=df['bed_bath'].apply(lambda s:s[1])
EXACT_MAP = {
'gramercy': 'gramercy park',
'clinton': "hell's kitchen",
'turtle bay': 'midtown east',
'tudor city': 'midtown east',
'sutton place': 'midtown east',
'hamilton heights': 'west harlem',
'bedford stuyvesant': 'bedford-stuyvesant',
'hunters point': 'long island city',
'battery park': 'battery park city',
'manhattanville': 'west harlem',
'carnegie hill': 'upper east side',
'stuyvesant town': 'stuyvesant town - peter cooper village',
'downtown': 'downtown brooklyn',
'morningside heights': 'west harlem',
'spuyten duyvil': 'riverdale',
'prospect lefferts gardens': 'flatbush',
'greenwood': 'greenwood heights',
'fort hamilton': 'bay ridge',
'high bridge': 'highbridge',
'columbia street waterfront district': 'carroll gardens',
'ocean parkway': 'midwood',
'north riverdale': 'riverdale',
'astoria heights': 'astoria',
'tremont': 'mount hope',
'homecrest': 'sheepshead bay',
'new utrecht': 'borough park',
'fieldston': 'riverdale',
'georgetown': 'upper east side',
'tottenville': 'washington heights',
'hillcrest': 'kew gardens hills',
'oakland gardens': 'forest hills',
'pomonok': 'washington heights',
'wingate': 'east flatbush',
'fordham': 'fordham manor',
'forest hills gardens': 'forest hills',
'columbus circle': "hell's kitchen"
}
SPECIAL = {
'midtown': ('midtown', 'midtown manhattan', 'manhattan'),
'harlem': ('harlem', 'upper manhattan', 'manhattan')
}
ONLY_SECOND = {
'castle hill': ('2nd', 'east bronx', 'bronx'),
'throggs neck': ('2nd', 'east bronx', 'bronx'),
'soundview': ('2nd', 'east bronx', 'bronx'),
'port morris': ('2nd', 'east bronx', 'bronx'),
}
ONLY_THIRD = {
'queens village': ('3rd', '3rd', 'queens'),
'laurelton': ('3rd', '3rd', 'queens')
}
def transform_geo_to_rent(s):
if s is None:
return s
s = s.lower()
rent = load_rent()
if s in rent:
return rent[s]
if s in EXACT_MAP:
return rent[EXACT_MAP[s]]
if s in SPECIAL:
return SPECIAL[s]
return ('not_mapped_yet', 'not_mapped_yet', 'not_mapped_yet')
#########################################################################################
# Creating Neis
#########################################################################################
#########################################################################################
# MNGR HCC
#########################################################################################
def hcc_encode(train_df, test_df, variable, binary_target, k=5, f=1, g=1, r_k=0.01, folds=5):
"""
See "A Preprocessing Scheme for High-Cardinality Categorical Attributes in
Classification and Prediction Problems" by Daniele Micci-Barreca
"""
prior_prob = train_df[binary_target].mean()
hcc_name = "_".join(["hcc", variable, binary_target])
seed = int(time())
print 'seed hcc {}'.format(seed)
skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=seed)
for big_ind, small_ind in skf.split(np.zeros(len(train_df)), train_df['interest_level']):
big = train_df.iloc[big_ind]
small = train_df.iloc[small_ind]
grouped = big.groupby(variable)[binary_target].agg({"size": "size", "mean": "mean"})
grouped["lambda"] = 1 / (g + np.exp((k - grouped["size"]) / f))
grouped[hcc_name] = grouped["lambda"] * grouped["mean"] + (1 - grouped["lambda"]) * prior_prob
if hcc_name in small.columns:
del small[hcc_name]
small = pd.merge(small, grouped[[hcc_name]], left_on=variable, right_index=True, how='left')
small.loc[small[hcc_name].isnull(), hcc_name] = prior_prob
small[hcc_name] = small[hcc_name] * np.random.uniform(1 - r_k, 1 + r_k, len(small))
train_df.loc[small.index, hcc_name] = small[hcc_name]
grouped = train_df.groupby(variable)[binary_target].agg({"size": "size", "mean": "mean"})
grouped["lambda"] = 1 / (g + np.exp((k - grouped["size"]) / f))
grouped[hcc_name] = grouped["lambda"] * grouped["mean"] + (1 - grouped["lambda"]) * prior_prob
test_df = pd.merge(test_df, grouped[[hcc_name]], left_on=variable, right_index=True, how='left')
test_df.loc[test_df[hcc_name].isnull(), hcc_name] = prior_prob
return train_df, test_df, hcc_name
def process_mngr_categ_preprocessing(train_df, test_df):
col = MANAGER_ID
new_cols = []
for df in [train_df, test_df]:
df['target_high'] = df[TARGET].apply(lambda s: 1 if s == 'high' else 0)
df['target_medium'] = df[TARGET].apply(lambda s: 1 if s == 'medium' else 0)
for binary_col in ['target_high', 'target_medium']:
train_df, test_df, new_col = hcc_encode(train_df, test_df, col, binary_col)
new_cols.append(new_col)
return train_df, test_df, new_cols
#########################################################################################
# MNGR HCC
#########################################################################################
#########################################################################################
# MNGR NUM
#########################################################################################
def process_manager_num(train_df, test_df):
mngr_num_col = 'manager_num'
df = train_df.groupby(MANAGER_ID)[MANAGER_ID].count()
# df[df<=1]=-1
df = df.apply(float)
df = df.to_frame(mngr_num_col)
train_df = pd.merge(train_df, df, left_on=MANAGER_ID, right_index=True)
test_df = pd.merge(test_df, df, left_on=MANAGER_ID, right_index=True, how='left')
return train_df, test_df, [mngr_num_col]
#########################################################################################
# MNGR NUM
#########################################################################################
#########################################################################################
# BID HCC
#########################################################################################
def process_bid_categ_preprocessing(train_df, test_df):
col = BUILDING_ID
new_cols = []
for df in [train_df, test_df]:
df['target_high'] = df[TARGET].apply(lambda s: 1 if s == 'high' else 0)
df['target_medium'] = df[TARGET].apply(lambda s: 1 if s == 'medium' else 0)
for binary_col in ['target_high', 'target_medium']:
train_df, test_df, new_col = hcc_encode(train_df, test_df, col, binary_col)
new_cols.append(new_col)
return train_df, test_df, new_cols
#########################################################################################
# BID HCC
#########################################################################################
#########################################################################################
# BID NUM
#########################################################################################
def process_bid_num(train_df, test_df):
bid_num_col = 'bid_num'
df = train_df.groupby(BUILDING_ID)[BUILDING_ID].count()
# df[df<=1]=-1
df = df.apply(float)
df = df.to_frame(bid_num_col)
train_df = pd.merge(train_df, df, left_on=BUILDING_ID, right_index=True)
test_df = pd.merge(test_df, df, left_on=BUILDING_ID, right_index=True, how='left')
return train_df, test_df, [bid_num_col]
#########################################################################################
# BID NUM
#########################################################################################
#########################################################################################
# Listing id
#########################################################################################
def process_listing_id(train_df, test_df):
return train_df, test_df, [LISTING_ID]
#########################################################################################
# Listing id
#########################################################################################
#########################################################################################
# NEI 123
#########################################################################################
def dummy_col(col_name, val):
return '{}_{}'.format(col_name, val)
def get_dummy_cols(col_name, col_values):
return ['{}_{}'.format(col_name, val) for val in col_values]
def process_nei123(train_df, test_df):
df = pd.concat([train_df, test_df])
normalize_bed_bath(df)
sz = float(len(df))
# neis_cols = [NEI_1, NEI_2, NEI_3]
new_cols = []
for col in [NEI_1, NEI_2]:
new_col = 'freq_of_{}'.format(col)
df[new_col] = df.groupby(col)[PRICE].transform('count')
df[new_col] = df[new_col] / sz
new_cols.append(new_col)
beds_vals = [0, 1, 2, 3]
for col in [NEI_1, NEI_2, NEI_3]:
for bed in beds_vals:
new_col = 'freq_of_{}, with bed={}'.format(col, bed)
df[new_col] = df.groupby([col, BED_NORMALIZED])[PRICE].transform('count')
df[new_col] = df[new_col] / sz
new_cols.append(new_col)
for col in [NEI_1, NEI_2]:
new_col = 'median_ratio_of_{}'.format(col)
df['tmp'] = df.groupby([col, BEDROOMS])[PRICE].transform('median')
df[new_col] = df[PRICE] - df['tmp']
df[new_col] = df[new_col] / df['tmp']
new_cols.append(new_col)
for col in [NEI_1, NEI_2, NEI_3]:
vals = set(df[col])
if None in vals:
vals.remove(None)
df = pd.get_dummies(df, columns=[col])
dummies = get_dummy_cols(col, vals)
new_cols += dummies
df_to_merge = df[[LISTING_ID] + new_cols]
train_df = pd.merge(train_df, df_to_merge, on=LISTING_ID)
test_df = pd.merge(test_df, df_to_merge, on=LISTING_ID)
return train_df, test_df, new_cols
#########################################################################################
# NEI 123
#########################################################################################
#########################################################################################
# MNGR AVG PRICE
#########################################################################################
def process_mngr_avg_median_price(train_df, test_df):
df = pd.concat([train_df, test_df])
bed_bath_median = 'bed_bath_median'
df[bed_bath_median] = df.groupby([BED_NORMALIZED, BATH_NORMALIZED])[PRICE].transform('median')
bed_bath_diff = 'bed_bath_diff'
df[bed_bath_diff]=df[PRICE]-df[bed_bath_median]
bed_bath_raio = 'bed_bath_ratio'
df[bed_bath_raio]=df[bed_bath_diff]/df['bed_bath_median']
group_by = df.groupby(MANAGER_ID)[bed_bath_diff]
df['gr_by_mngr_bed_bath_diff_median']= group_by.transform('median')
df['gr_by_mngr_bed_bath_diff_quantile_0.25']= group_by.transform('quantile', 0.25)
df['gr_by_mngr_bed_bath_diff_quantile_0.75']= group_by.transform('quantile', 0.75)
df['gr_by_mngr_bed_bath_diff_mean']= group_by.transform('mean')
group_by = df.groupby(MANAGER_ID)[bed_bath_raio]
df['gr_by_mngr_bed_bath_ratio_median']= group_by.transform('median')
df['gr_by_mngr_bed_bath_ratio_quantile_0.25']= group_by.transform('quantile', 0.25)
df['gr_by_mngr_bed_bath_ratio_quantile_0.75']= group_by.transform('quantile', 0.75)
df['gr_by_mngr_bed_bath_ratio_mean']= group_by.transform('mean')
new_cols= ['bed_bath_diff','bed_bath_ratio','bed_bath_median',
'gr_by_mngr_bed_bath_diff_median','gr_by_mngr_bed_bath_diff_mean',
'gr_by_mngr_bed_bath_diff_quantile_0.25','gr_by_mngr_bed_bath_diff_quantile_0.75',
'gr_by_mngr_bed_bath_ratio_median', 'gr_by_mngr_bed_bath_ratio_mean' ,
'gr_by_mngr_bed_bath_ratio_quantile_0.25', 'gr_by_mngr_bed_bath_ratio_quantile_0.75'
]
df_to_merge = df[[LISTING_ID] + new_cols]
train_df = pd.merge(train_df, df_to_merge, on=LISTING_ID)
test_df = pd.merge(test_df, df_to_merge, on=LISTING_ID)
return train_df, test_df, new_cols
#########################################################################################
# MNGR AVG PRICE
#########################################################################################
#########################################################################################
# OTHER MEDIANS
#########################################################################################
def process_other_mngr_medians(train_df, test_df):
features = ['num_features', 'num_photos', 'word_num_in_descr', BED_NORMALIZED, BATH_NORMALIZED]
df = pd.concat([train_df, test_df])
new_cols = []
for f in features:
col = 'get_by_mngr_{}_mean'.format(f)
df[col] = df.groupby(MANAGER_ID)[f].transform('mean')
new_cols.append(col)
if f in [BATH_NORMALIZED, BED_NORMALIZED]:
continue
col = 'get_by_mngr_{}_median'.format(f)
new_cols.append(col)
df[col] = df.groupby(MANAGER_ID)[f].transform('median')
df_to_merge = df[[LISTING_ID] + new_cols]
train_df = pd.merge(train_df, df_to_merge, on=LISTING_ID)
test_df = pd.merge(test_df, df_to_merge, on=LISTING_ID)
return train_df, test_df, new_cols
#########################################################################################
# OTHER MEDIANS
#########################################################################################
#########################################################################################
# OTHER MEDIANS nEW
#########################################################################################
def get_main_value(s):
n = int(0.66*len(s))
vals = {k:0 for k in set(s)}
for x in s:
vals[x]+=1
for k,v in vals.iteritems():
if v>=n:
return k
def process_other_mngr_medians_new(train_df, test_df):
df = pd.concat([train_df, test_df])
total_minutes_col='total_minutes'
df[total_minutes_col] = df[CREATED_MINUTE]+24*df[CREATED_HOUR]
features = [PRICE, LATITUDE, LONGITUDE, total_minutes_col]
new_cols = []
for f in features:
col = 'get_by_mngr_{}_mean'.format(f)
df[col] = df.groupby(MANAGER_ID)[f].transform('mean')
new_cols.append(col)
col = 'get_by_mngr_{}_median'.format(f)
new_cols.append(col)
df[col] = df.groupby(MANAGER_ID)[f].transform('median')
main_hour='main_hour'
bl = df.groupby(MANAGER_ID)[CREATED_HOUR].apply(get_main_value).to_frame(main_hour)
df = pd.merge(df, bl, left_on=MANAGER_ID, right_index=True)
new_cols.append(main_hour)
df_to_merge = df[[LISTING_ID] + new_cols]
train_df = pd.merge(train_df, df_to_merge, on=LISTING_ID)
test_df = pd.merge(test_df, df_to_merge, on=LISTING_ID)
return train_df, test_df, new_cols
#########################################################################################
# OTHER MEDIANS NEW
#########################################################################################
#########################################################################################
# FEATURES
#########################################################################################
GROUPING_MAP = OrderedDict(
[('elevator', {'vals': ['elevator'], 'type': 'in'}),
('hardwood floors', {'vals': ['hardwood'], 'type': 'in'}),
('cats allowed', {'vals': ['cats'], 'type': 'in'}),
('dogs allowed', {'vals': ['dogs'], 'type': 'in'}),
('doorman', {'vals': ['doorman', 'concierge'], 'type': 'in'}),
('dishwasher', {'vals': ['dishwasher'], 'type': 'in'}),
('laundry in building', {'vals': ['laundry'], 'type': 'in'}),
('no fee', {'vals': ['no fee', 'no broker fee', 'no realtor fee'], 'type': 'in'}),
('reduced fee', {'vals': ['reduced fee', 'reduced-fee', 'reducedfee'], 'type': 'in'}),
('fitness center', {'vals': ['fitness'], 'type': 'in'}),
('pre-war', {'vals': ['pre-war', 'prewar'], 'type': 'in'}),
('roof deck', {'vals': ['roof'], 'type': 'in'}),
('outdoor space',
{'vals': ['outdoor space', 'outdoor-space', 'outdoor areas', 'outdoor entertainment'], 'type': 'in'}),
('common outdoor space',
{'vals': ['common outdoor', 'publicoutdoor', 'public-outdoor', 'common-outdoor'], 'type': 'in'}),
('private outdoor space', {'vals': ['private outdoor', 'private-outdoor', 'privateoutdoor'], 'type': 'in'}),
('dining room', {'vals': ['dining'], 'type': 'in'}),
('high speed internet', {'vals': ['internet'], 'type': 'in'}),
('balcony', {'vals': ['balcony'], 'type': 'in'}),
('swimming pool', {'vals': ['swimming', 'pool'], 'type': 'in'}),
('new construction', {'vals': ['new construction'], 'type': 'in'}),
('terrace', {'vals': ['terrace'], 'type': 'in'}),
('exclusive', {'vals': ['exclusive'], 'type': 'equal'}),
('loft', {'vals': ['loft'], 'type': 'in'}),
('garden/patio', {'vals': ['garden'], 'type': 'in'}),
('wheelchair access', {'vals': ['wheelchair'], 'type': 'in'}),
('fireplace', {'vals': ['fireplace'], 'type': 'in'}),
('simplex', {'vals': ['simplex'], 'type': 'in'}),
('lowrise', {'vals': ['lowrise', 'low-rise'], 'type': 'in'}),
('garage', {'vals': ['garage'], 'type': 'in'}),
('furnished', {'vals': ['furnished'], 'type': 'equal'}),
('multi-level', {'vals': ['multi-level', 'multi level', 'multilevel'], 'type': 'in'}),
('high ceilings', {'vals': ['high ceilings', 'highceilings', 'high-ceilings'], 'type': 'in'}),
('parking space', {'vals': ['parking'], 'type': 'in'}),
('live in super', {'vals': ['super'], 'vals2': ['live', 'site'], 'type': 'two'}),
('renovated', {'vals': ['renovated'], 'type': 'in'}),
('green building', {'vals': ['green building'], 'type': 'in'}),
('storage', {'vals': ['storage'], 'type': 'in'}),
('washer', {'vals': ['washer'], 'type': 'in'}),
('stainless steel appliances', {'vals': ['stainless'], 'type': 'in'})])
def normalize_df(df):
df[COL] = df[F_COL].apply(lambda l: [x.lower() for x in l])
def lambda_in(in_arr):
def is_in(l):
for f in l:
for t in in_arr:
if t in f:
return 1
return 0
return is_in
def lambda_equal(val):
def is_equal(l):
for f in l:
if f.strip() == val:
return 1
return 0
return is_equal
def lambda_two_arr(arr1, arr2):
def is_in(l):
for f in l:
for x in arr1:
for y in arr2:
if x in f and y in f:
return 1
return 0
return is_in
def process_features(df):
normalize_df(df)
new_cols = []
for col, m in GROUPING_MAP.iteritems():
new_cols.append(col)
tp = m['type']
if tp == 'in':
df[col] = df[COL].apply(lambda_in(m['vals']))
elif tp == 'equal':
df[col] = df[COL].apply(lambda_equal(m['vals'][0]))
elif tp == 'two':
df[col] = df[COL].apply(lambda_two_arr(m['vals'], m['vals2']))
else:
raise Exception()
return df, new_cols
#########################################################################################
# FEATURES
#########################################################################################
####################################################
#MAGIC
#######################################################
def process_magic(train_df, test_df):
image_date = pd.read_csv(magic_file)
image_date.loc[80240,"time_stamp"] = 1478129766
# image_date.loc[image_date['Listing_Id']==7119094, "time_stamp"] = 1478129766
image_date["img_date"] = pd.to_datetime(image_date["time_stamp"], unit="s")
image_date["img_days_passed"] = (image_date["img_date"].max() - image_date["img_date"]).astype(
"timedelta64[D]").astype(int)
image_date["img_date_month"] = image_date["img_date"].dt.month
image_date["img_date_week"] = image_date["img_date"].dt.week
image_date["img_date_day"] = image_date["img_date"].dt.day
image_date["img_date_dayofweek"] = image_date["img_date"].dt.dayofweek
image_date["img_date_dayofyear"] = image_date["img_date"].dt.dayofyear
image_date["img_date_hour"] = image_date["img_date"].dt.hour
image_date["img_date_minute"] = image_date["img_date"].dt.minute
image_date["img_date_second"] = image_date["img_date"].dt.second
image_date["img_date_monthBeginMidEnd"] = image_date["img_date_day"].apply(
lambda x: 1 if x < 10 else 2 if x < 20 else 3)
df = pd.concat([train_df, test_df])
df = pd.merge(df, image_date, left_on=LISTING_ID, right_on='Listing_Id')
new_cols = ["img_days_passed","img_date_month","img_date_week",
"img_date_day","img_date_dayofweek","img_date_dayofyear",
"img_date_hour", "img_date_monthBeginMidEnd",
"img_date_minute", "img_date_second"]#+["img_date", "time_stamp"]
df_to_merge = df[[LISTING_ID] + new_cols]
train_df = pd.merge(train_df, df_to_merge, on=LISTING_ID)
test_df = pd.merge(test_df, df_to_merge, on=LISTING_ID)
return train_df, test_df, new_cols
####################################################
#MAGIC
#######################################################
def shuffle_df(df):
return df.iloc[np.random.permutation(len(df))]
def get_loss_at1K(estimator):
results_on_test = estimator.evals_result()['validation_1']['mlogloss']
return results_on_test[999]
def loss_with_per_tree_stats(train_df, test_df, new_cols):
features, test_df, train_df = process_split(train_df, test_df, new_cols)
train_target, test_target = train_df[TARGET].values, test_df[TARGET].values
del train_df[TARGET]
del test_df[TARGET]
train_df = train_df[features]
test_df = test_df[features]
train_arr, test_arr = train_df.values, test_df.values
print features
seed = int(time())
print 'XGB seed {}'.format(seed)
estimator = xgb.XGBClassifier(n_estimators=1000,
objective='mlogloss',
subsample=0.8,
colsample_bytree=0.8,
seed=seed)
eval_set = [(train_arr, train_target), (test_arr, test_target)]
estimator.fit(train_arr, train_target, eval_set=eval_set, eval_metric='mlogloss', verbose=False)
proba = estimator.predict_proba(test_arr)
loss = log_loss(test_target, proba)
loss1K = get_loss_at1K(estimator)
return loss, loss1K, xgboost_per_tree_results(estimator), \
estimator.feature_importances_, get_probs_from_est(estimator, proba, test_df), features
def process_split(train_df, test_df, new_cols):
features = []
features += new_cols
train_df, test_df, new_cols = process_mngr_categ_preprocessing(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
train_df, test_df, new_cols = process_bid_categ_preprocessing(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
return features, test_df, train_df
def process_all_name(train_df, test_df):
features = ['bathrooms', 'bedrooms', 'latitude',
'longitude', 'price',
'num_features', 'num_photos', 'word_num_in_descr',
"created_month", "created_day",
CREATED_HOUR, CREATED_MINUTE, DAY_OF_WEEK]
train_df, test_df, new_cols = process_manager_num(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
train_df, test_df, new_cols = process_bid_num(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
train_df, test_df, new_cols = process_listing_id(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
train_df, test_df, new_cols = process_nei123(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
train_df, new_cols = process_features(train_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features+=new_cols
# train_df, test_df, new_cols = process_mngr_avg_median_price(train_df, test_df)
# train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
# features += new_cols
#
#
# train_df, test_df, new_cols = process_other_mngr_medians(train_df, test_df)
# train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
# features += new_cols
#
#
# train_df, test_df, new_cols = process_other_mngr_medians_new(train_df, test_df)
# train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
# features += new_cols
train_df, test_df, new_cols = process_magic(train_df, test_df)
train_df, test_df = shuffle_df(train_df), shuffle_df(test_df)
features += new_cols
return train_df, test_df, features
def xgboost_per_tree_results(estimator):
results_on_test = estimator.evals_result()['validation_1']['mlogloss']
results_on_train = estimator.evals_result()['validation_0']['mlogloss']
return {
'train': results_on_train,
'test': results_on_test
}
def do_test_xgboost(name, mongo_host, experiment_max_time=15*60):
all_losses = []
l_results_per_tree = []
losses_at_1K = []
train_df = load_train()
test_df = load_test()
train_df, test_df, features = process_all_name(train_df, test_df)
fix_index(train_df)
fix_index(test_df)
ii_importance = []
for counter in range(15):
cur_time = time()
N = getN(mongo_host, name, experiment_max_time)
train, test = split_from_N(train_df.copy(), N)
loss, loss1K, losses_per_tree, importance, probs_data, f_names = \
loss_with_per_tree_stats(train, test, features)
probs, test_indexes = probs_data
ii_importance.append(importance.tolist())
cur_time = time() - cur_time
all_losses.append(loss)
losses_at_1K.append(loss1K)
l_results_per_tree.append(losses_per_tree)
out(all_losses, loss, losses_at_1K, loss1K, counter, cur_time)
write_results(N, name, mongo_host, probs,test_indexes, l_results_per_tree, ii_importance, f_names)
do_test_xgboost('stacking_no_mngr_medians', sys.argv[1])
|
[
"dd.petrovskiy@gmail.com"
] |
dd.petrovskiy@gmail.com
|
ee8c553e504a4d6bea4de956dab956ce19d12a5f
|
7f77a1e5b005bb57a43276ce65e18743776aa4e6
|
/user/wechat.py
|
1ae74343f0dab9dc0251118b62bb94624b1907bd
|
[] |
no_license
|
lihuigang/python
|
aba3944e5f9ea4bae5a98a4fff1e87a0ed8723b1
|
e71c80782b1398ce0dcdaf8c572ac33811d843bd
|
refs/heads/master
| 2021-04-27T13:40:37.740391
| 2018-03-23T08:25:04
| 2018-03-23T08:25:04
| 116,439,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42
|
py
|
#微信登录
import itchat
itchat.login()
|
[
"lihuigang0104@lihuigang.com"
] |
lihuigang0104@lihuigang.com
|
2f5c2879a35b47476100a62654cda58a38c71639
|
fa16e1d0ed18fd6f9e25bc57f83eec3a4b559e28
|
/sotaai/rl/gym_wrapper.py
|
29d26980a2f9d529b2f34c63ab88d1bc33b4f2a2
|
[] |
no_license
|
stateoftheartai/sotaai-beta
|
83d2d9d01b9321d20ba36f66c6160daa8883adc3
|
0013716ca644cc0434774f1ebc16106dfdd6d100
|
refs/heads/master
| 2023-03-31T04:51:48.009977
| 2021-03-30T22:08:39
| 2021-03-30T22:08:39
| 353,137,512
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
# -*- coding: utf-8 -*-
# Author: Tonio Teran <tonio@stateoftheart.ai>
# Copyright: Stateoftheart AI PBC 2021.
'''OpenAI's Gym library wrapper.'''
import gym # pylint: disable=unused-import
|
[
"noreply@github.com"
] |
stateoftheartai.noreply@github.com
|
89fd42deb21f848164c189a464567bc999be8cb9
|
b37b4d39a205ef8229742c4c5c6a96a227557b32
|
/cycleGAN/generator.py
|
182c6ccafb4ff2f9307a213ac9a91d3d456be59c
|
[] |
no_license
|
aedismorah/cycleGAN
|
89c03767ed5f4c0873ef5e5a8904b72b335b5d2d
|
b77d32815123b6535bf26748ff18fd8d4678a99a
|
refs/heads/main
| 2023-03-02T12:20:03.883130
| 2021-02-14T11:35:54
| 2021-02-14T11:35:54
| 336,785,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,368
|
py
|
import torch.nn as nn
def c7s1(in_channels, out_channels):
return nn.Sequential(
nn.ReflectionPad2d(2),
nn.Conv2d(in_channels, out_channels, kernel_size=7, stride=1),
nn.InstanceNorm2d(out_channels),
nn.ReLU(True)
)
def dk(in_channels, out_channels):
return nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2),
nn.InstanceNorm2d(out_channels),
nn.ReLU(True)
)
def Rk(in_channels, out_channels):
return nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1),
nn.ReflectionPad2d(1),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1),
nn.InstanceNorm2d(out_channels)
)
def uk(in_channels, out_channels):
return nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, stride=2),
nn.InstanceNorm2d(out_channels),
nn.ReLU(True)
)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.first = nn.Sequential(
c7s1(3, 64),
dk(64, 128),
dk(128, 256)
)
self.R1 = Rk(256, 256)
self.R2 = Rk(256, 256)
self.R3 = Rk(256, 256)
self.R4 = Rk(256, 256)
self.R5 = Rk(256, 256)
self.R6 = Rk(256, 256)
self.R7 = Rk(256, 256)
self.R8 = Rk(256, 256)
self.R9 = Rk(256, 256)
self.last = nn.Sequential(
uk(256, 128),
uk(128, 64),
c7s1(64, 3),
nn.Conv2d(3, 3, kernel_size=2, stride=1)
)
self.tanh=nn.Tanh()
self.relu=nn.ReLU()
def forward(self, input):
x1 = self.first(input)
x2 = self.relu(x1 + self.R1(x1))
x3 = self.relu(x2 + self.R2(x2))
x4 = self.relu(x3 + self.R3(x3))
x5 = self.relu(x2 + x4 + self.R4(x4))
x6 = self.relu(x5 + self.R5(x5))
x7 = self.relu(x6 + self.R6(x6))
x8 = self.relu(x3 + x5 + x7 + self.R7(x7))
x9 = self.relu(x8 + self.R8(x8))
x10 = self.relu(x2 + x6 + x9 + self.R9(x9))
x = self.last(x10) + input
return self.tanh(x)
|
[
"noreply@github.com"
] |
aedismorah.noreply@github.com
|
5bcca5b2ff7d223ee53b29f2949498ac1dfe992c
|
06c2d172b28cfa3705e76ee53cb002c256b895ce
|
/goodBoyBigHands.py
|
074621c4f92bb68439089655ca0a03e983043898
|
[] |
no_license
|
charss/algolympics2021
|
2288de652f9e4d0705d6271e6fdbf5112a2eb7ba
|
527557d88bbcbbb2c78989bea1b6b58861d40843
|
refs/heads/main
| 2023-03-05T21:10:04.243770
| 2021-02-13T13:37:36
| 2021-02-13T13:37:36
| 338,304,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
import re
fin = []
temp1 = []
l = int(input())
text = ''
for i in range(l):
# for i in range(32, 127):
# text += chr(i)
text = input().lower().split()
for s in text:
x = re.sub("[^a-zA-Z]","", s)
if x != '':
fin.append(x)
print(fin)
n = int(input())
for i in range(n):
l = int(input())
for i in range(l):
temp1 = []
text = input().lower().split()
for s in text:
x = re.sub("[^a-zA-Z]","", s)
if x != '':
temp1.append(x)
print(temp1)
temp = [x for x in temp1 if x in fin]
print(temp)
if len(temp) >= len(temp1) / 2:
print("GOOD BOY!")
else:
print("BIG HANDS!")
|
[
"charleskylereyes16@gmail.com"
] |
charleskylereyes16@gmail.com
|
9d0d06450dae426bb8132ab21d7681e52090a89b
|
dc59a18b105dc12a951416b9f72c92b8960e612b
|
/tSimMicrometer.py
|
9027788df0b883f31fed15a938e119e48a525460
|
[] |
no_license
|
DanMailman/SCADAVIS
|
539b122890720fc0d998511e53956e54ac01d589
|
59c9b1da396131fca9f05fc6879f99ab5f52f419
|
refs/heads/main
| 2023-06-15T12:56:35.310516
| 2021-07-02T00:58:34
| 2021-07-02T00:58:34
| 377,230,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
from sys import dont_write_bytecode
from Utilities import LimitVal
class tSimMicrometer:
dictDefaultConfig = { 'Min': 0, 'Max': 100, 'Units': 'mm'}
def __init__(self, oDevToMeasure, dictConfig = dictDefaultConfig):
# TODO: What if Min != 0
print(f'tSimMicrometer(): INIT!')
self.oDev = oDevToMeasure
self.dictConfig = dictConfig
self.dictSCADA = { 'pos': { 'get': self.GetMeas }}
def GetMeas(self):
return LimitVal(self.oDev.dictSCADA['pos']['get'](),
self.dictConfig['Min'],
self.dictConfig['Max'])
def GetUnits(self):
return self.sUnits
if __name__ == "__main__":
class tMMDevice:
dictDefaultConfig = { 'Min': 0, 'Max': 10, 'Units': 'mm'}
def __init__(self,dictConfig = dictDefaultConfig):
self.dictConfig = dictConfig
self.nVal = 0
self.dictSCADA = { 'pos': { 'get': self.GetMeas } }
def GetMeas(self):
self.nVal +=1
return self.nVal
def demo():
oMMeter = tSimMicrometer(tMMDevice())
for i in range(10):
print(f"oMMeter: {oMMeter.dictSCADA['pos']['get']()} {oMMeter.dictConfig['Units']}")
demo()
|
[
"dmailman@gmail.com"
] |
dmailman@gmail.com
|
d374f557f75fbfa71084b996ee9029ef19b9f982
|
fa7e0fd1013762eac8d878b70582544a5598600d
|
/django_3_2_18/users/views.py
|
748e20b3538248543b14b3b7547299dacb80c910
|
[] |
no_license
|
gy0109/django_2.18
|
b8c48005027720ab46e7e050ff139b57437ff5f6
|
0c9971fae249a4f1932b3fc20d29fc7232b244ab
|
refs/heads/master
| 2020-04-24T01:41:05.553801
| 2019-02-21T12:37:36
| 2019-02-21T12:37:36
| 171,608,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,666
|
py
|
import json
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
# Create your views here. 视图
# /user/index/
def index(request):
"""
index视图部分
:param request: 接收到的请求对象
:return: 响应对象
"""
return HttpResponse('hello world!')
def weather(request, city, year):
print('city=%s' % city)
print('year=%s' % year)
return HttpResponse('ok!')
def weather1(request, city, year):
print('city=%s' % city)
print('year=%s' % year)
return HttpResponse('ok! weather1! ')
def weather2(request, city, year):
print('city=%s' % city)
print('year=%s' % year)
a = request.GET.get('a')
b = request.GET.get('b')
a_list = request.GET.getlist('a')
print(a, b, a_list)
return HttpResponse('ok! weather1! ')
def get_post_params(request):
# POST请求获取表单数据
a = request.POST.get('a')
b = request.POST.get('b')
a_list = request.POST.getlist('a')
print(a, b, a_list)
return HttpResponse('get_post')
def get_body_json(request):
json_str = request.body.decode()
req_data = json.loads(json_str)
print(request.META['CONTENT_TYPE'])
print(request.META['SERVER_NAME'])
print(request.method)
print(request.user)
print(request.encoding)
print(request.path)
print(request.files)
print(req_data['a'])
print(req_data['b'])
return HttpResponse('OK get_body')
# 自定义响应体
def response_json(request):
json_dict = {'name': 'gy', 'age': 12}
# return HttpResponse('OK', content_type='text/html', status=200)
return JsonResponse(json_dict)
|
[
"1974326896@qq.com"
] |
1974326896@qq.com
|
7f0bca736406ad4c6513c80772cdfc7f57e1b426
|
bc9f60ecf1d3faff2fb893a015eee1a002512dd3
|
/newLstm/lstm.py
|
0e16680f877218b44bba7fa100a2981ea6abbae4
|
[] |
no_license
|
syahmifouzi/Lessons
|
933e93004e90f65ff1cecc18b1987700d21ead28
|
6777026d20ddbd3a583d5084aac7acb36ca7da32
|
refs/heads/master
| 2020-04-11T00:00:53.764217
| 2019-06-04T07:00:37
| 2019-06-04T07:00:37
| 161,372,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,391
|
py
|
import random
import numpy as np
import math
import sys
import json
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def sigmoid(x):
return 1. / (1 + np.exp(-x))
def sigmoid_derivative(values):
return values*(1-values)
def tanh_derivative(values):
return 1. - values ** 2
# createst uniform random array w/ values in [a,b) and shape args
def rand_arr(a, b, *args):
np.random.seed(0)
return np.random.rand(*args) * (b - a) + a
#fp = 0
#fc = 0
class LstmParam:
def __init__(self, mem_cell_ct, x_dim):
self.mem_cell_ct = mem_cell_ct
# print("self.mem_cell_ct", self.mem_cell_ct)
self.x_dim = x_dim
# print("self.x_dim", self.x_dim)
concat_len = x_dim + mem_cell_ct
# print("concat_len", concat_len)
# weight matrices
self.wg = rand_arr(-0.1, 0.1, mem_cell_ct, concat_len)
self.wi = rand_arr(-0.1, 0.1, mem_cell_ct, concat_len)
self.wf = rand_arr(-0.1, 0.1, mem_cell_ct, concat_len)
self.wo = rand_arr(-0.1, 0.1, mem_cell_ct, concat_len)
# print("self.wg", self.wg)
# print("self.wi", self.wi)
# print("self.wf", self.wf)
# print("self.wo", self.wo)
# print("self.wo", np.shape(self.wo))
# bias terms
self.bg = rand_arr(-0.1, 0.1, mem_cell_ct)
self.bi = rand_arr(-0.1, 0.1, mem_cell_ct)
self.bf = rand_arr(-0.1, 0.1, mem_cell_ct)
self.bo = rand_arr(-0.1, 0.1, mem_cell_ct)
# print("self.bo", self.bo)
# print("self.bo", np.shape(self.bo))
# diffs (derivative of loss function w.r.t. all parameters)
self.wg_diff = np.zeros((mem_cell_ct, concat_len))
self.wi_diff = np.zeros((mem_cell_ct, concat_len))
self.wf_diff = np.zeros((mem_cell_ct, concat_len))
self.wo_diff = np.zeros((mem_cell_ct, concat_len))
self.bg_diff = np.zeros(mem_cell_ct)
self.bi_diff = np.zeros(mem_cell_ct)
self.bf_diff = np.zeros(mem_cell_ct)
self.bo_diff = np.zeros(mem_cell_ct)
# print("self.wo_diff", np.shape(self.wo_diff))
#json_dump = json.dumps({'bg': self.bg}, cls=NumpyEncoder)
#f = open('wg.txt', 'w')
#json.dump(json_dump, f)
#f.close()
def apply_diff(self, lr = 1):
#if self.wg_diff[0] == 0:
# print("its zero")
# print("self.wg B4", self.wg)
# print("lr", lr)
# print("self.wg_diff", self.wg_diff)
self.wg -= lr * self.wg_diff
# print("self.wg AFTER", self.wg)
self.wi -= lr * self.wi_diff
self.wf -= lr * self.wf_diff
self.wo -= lr * self.wo_diff
self.bg -= lr * self.bg_diff
self.bi -= lr * self.bi_diff
self.bf -= lr * self.bf_diff
self.bo -= lr * self.bo_diff
# reset diffs to zero
self.wg_diff = np.zeros_like(self.wg)
self.wi_diff = np.zeros_like(self.wi)
self.wf_diff = np.zeros_like(self.wf)
self.wo_diff = np.zeros_like(self.wo)
self.bg_diff = np.zeros_like(self.bg)
self.bi_diff = np.zeros_like(self.bi)
self.bf_diff = np.zeros_like(self.bf)
self.bo_diff = np.zeros_like(self.bo)
#print("self.wg_diff", self.wg_diff)
class LstmState:
def __init__(self, mem_cell_ct, x_dim):
self.g = np.zeros(mem_cell_ct)
# print("self.g", self.g)
self.i = np.zeros(mem_cell_ct)
self.f = np.zeros(mem_cell_ct)
self.o = np.zeros(mem_cell_ct)
self.s = np.zeros(mem_cell_ct)
self.h = np.zeros(mem_cell_ct)
self.bottom_diff_h = np.zeros_like(self.h)
self.bottom_diff_s = np.zeros_like(self.s)
class LstmNode:
def __init__(self, lstm_param, lstm_state):
# store reference to parameters and to activations
self.state = lstm_state
# print("self.state", self.state)
self.param = lstm_param
# print("self.param", self.param)
# non-recurrent input concatenated with recurrent input
self.xc = None
# print("self.param", self.param)
def bottom_data_is(self, x, s_prev = None, h_prev = None):
# if this is the first lstm node in the network
if s_prev is None: s_prev = np.zeros_like(self.state.s)
if h_prev is None: h_prev = np.zeros_like(self.state.h)
# save data for use in backprop
self.s_prev = s_prev
self.h_prev = h_prev
# print("self.s_prev", self.s_prev)
# print("self.h_prev", self.h_prev)
# concatenate x(t) and h(t-1)
xc = np.hstack((x, h_prev))
# print("x", x)
# print("h_prev", h_prev)
# print("xc", xc)
self.state.g = np.tanh(np.dot(self.param.wg, xc) + self.param.bg)
self.state.i = sigmoid(np.dot(self.param.wi, xc) + self.param.bi)
self.state.f = sigmoid(np.dot(self.param.wf, xc) + self.param.bf)
self.state.o = sigmoid(np.dot(self.param.wo, xc) + self.param.bo)
self.state.s = self.state.g * self.state.i + s_prev * self.state.f
self.state.h = self.state.s * self.state.o
self.xc = xc
def top_diff_is(self, top_diff_h, top_diff_s):
#global fp
#global fc
#fc += 1
#print("calling", fc)
# notice that top_diff_s is carried along the constant error carousel
# print("top_diff_h", top_diff_h)
ds = self.state.o * top_diff_h + top_diff_s
do = self.state.s * top_diff_h
di = self.state.g * ds
dg = self.state.i * ds
df = self.s_prev * ds
# diffs w.r.t. vector inside sigma / tanh function
di_input = sigmoid_derivative(self.state.i) * di
df_input = sigmoid_derivative(self.state.f) * df
do_input = sigmoid_derivative(self.state.o) * do
dg_input = tanh_derivative(self.state.g) * dg
# diffs w.r.t. inputs
#if fp == 0:
#print("self.param.wg_diff B4", self.param.wg_diff)
#print("di_input", dg_input)
#print("self.xc", self.xc)
#print("OUTER",np.outer(dg_input, self.xc))
# print("OUTER SHAPE", np.shape(np.outer(di_input, self.xc)))
#print("self.param.wg_diff B4", self.param.wg_diff[0][0])
# print("di_input", np.shape(di_input))
# print("self.xc", np.shape(self.xc))
# sys.exit()
self.param.wi_diff += np.outer(di_input, self.xc)
# print("self.param.wi_diff AFTER", self.param.wi_diff)
self.param.wf_diff += np.outer(df_input, self.xc)
self.param.wo_diff += np.outer(do_input, self.xc)
self.param.wg_diff += np.outer(dg_input, self.xc)
self.param.bi_diff += di_input
self.param.bf_diff += df_input
self.param.bo_diff += do_input
self.param.bg_diff += dg_input
#if fp == 0:
#print("self.param.wg_diff AFTER", self.param.wg_diff)
# fp = fp + 1
#if fp == 0:
# fp = fp + 1
# compute bottom diff
dxc = np.zeros_like(self.xc)
dxc += np.dot(self.param.wi.T, di_input)
dxc += np.dot(self.param.wf.T, df_input)
dxc += np.dot(self.param.wo.T, do_input)
dxc += np.dot(self.param.wg.T, dg_input)
# save bottom diffs
self.state.bottom_diff_s = ds * self.state.f
self.state.bottom_diff_h = dxc[self.param.x_dim:]
# print("dxc", dxc)
# print("self.param.x_dim", self.param.x_dim)
# print("self.state.bottom_diff_h", self.state.bottom_diff_h)
class LstmNetwork():
def __init__(self, lstm_param):
self.lstm_param = lstm_param
self.lstm_node_list = []
# input sequence
self.x_list = []
def y_list_is(self, y_list, loss_layer):
"""
Updates diffs by setting target sequence
with corresponding loss layer.
Will *NOT* update parameters. To update parameters,
call self.lstm_param.apply_diff()
"""
assert len(y_list) == len(self.x_list)
# print("self.x_list", self.x_list)
idx = len(self.x_list) - 1
# print("idx", idx)
# first node only gets diffs from label ...
loss = loss_layer.loss(self.lstm_node_list[idx].state.h, y_list[idx])
# print("state_h", self.lstm_node_list[idx].state.h)
# print("y_list", y_list)
diff_h = loss_layer.bottom_diff(self.lstm_node_list[idx].state.h, y_list[idx])
# here s is not affecting loss due to h(t+1), hence we set equal to zero
diff_s = np.zeros(self.lstm_param.mem_cell_ct)
# print("diff_h", diff_h)
# print("loss", loss)
self.lstm_node_list[idx].top_diff_is(diff_h, diff_s)
idx -= 1
### ... following nodes also get diffs from next nodes, hence we add diffs to diff_h
### we also propagate error along constant error carousel using diff_s
while idx >= 0:
loss += loss_layer.loss(self.lstm_node_list[idx].state.h, y_list[idx])
diff_h = loss_layer.bottom_diff(self.lstm_node_list[idx].state.h, y_list[idx])
diff_h += self.lstm_node_list[idx + 1].state.bottom_diff_h
diff_s = self.lstm_node_list[idx + 1].state.bottom_diff_s
self.lstm_node_list[idx].top_diff_is(diff_h, diff_s)
idx -= 1
#global fp
#fp = 0
return loss
def x_list_clear(self):
self.x_list = []
def x_list_add(self, x):
self.x_list.append(x)
if len(self.x_list) > len(self.lstm_node_list):
# need to add new lstm node, create new state mem
lstm_state = LstmState(self.lstm_param.mem_cell_ct, self.lstm_param.x_dim)
self.lstm_node_list.append(LstmNode(self.lstm_param, lstm_state))
# get index of most recent x input
idx = len(self.x_list) - 1
if idx == 0:
# no recurrent inputs yet
self.lstm_node_list[idx].bottom_data_is(x)
else:
s_prev = self.lstm_node_list[idx - 1].state.s
h_prev = self.lstm_node_list[idx - 1].state.h
# print("h_prev", h_prev)
self.lstm_node_list[idx].bottom_data_is(x, s_prev, h_prev)
|
[
"syahmi.fouzi@gmail.com"
] |
syahmi.fouzi@gmail.com
|
b2155da0b6053ea84fa3ba2291e9437337804439
|
17a2201de4ea55752b3707ec416278934eb1785f
|
/src/authentication/admin.py
|
859cf6016e8e17a1829a9492a0bfe87dd3e8062f
|
[] |
no_license
|
thepsalmist/role_based_access_control
|
134eb4022c8e6ebefe209e09e14978925de6cc49
|
8b0f28fe555ee2f8e4a43c0c14735e43082da495
|
refs/heads/master
| 2023-03-05T00:49:45.210097
| 2021-02-14T19:15:53
| 2021-02-14T19:15:53
| 338,590,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import CustomUser
class CustomUserAdmin(UserAdmin):
model = CustomUser
list_display = (
"email",
"is_staff",
"is_active",
)
list_filter = (
"email",
"is_staff",
"is_active",
)
fieldsets = (
(None, {"fields": ("email", "password")}),
("Permissions", {"fields": ("is_staff", "is_active")}),
)
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": ("email", "password1", "password2", "is_staff", "is_active"),
},
),
)
search_fields = ("email",)
ordering = ("email",)
admin.site.register(CustomUser, CustomUserAdmin)
|
[
"76.thealchemist@gmail.com"
] |
76.thealchemist@gmail.com
|
d8d8b3a049def033de5d8a9e4ba1045238c02c95
|
2c649895227ac385897c6103df57d65f5381cea1
|
/Faq/views.py
|
367c773395a3052279f742d3779815598d31f9cc
|
[] |
no_license
|
ifekeyz/KendraGroup
|
59b71773da37287fec16d1da513d834df6572f3d
|
885e54af6aeb6a576368b3bd745ce0a791f4ee2b
|
refs/heads/master
| 2023-07-16T13:56:12.447060
| 2021-09-06T02:16:39
| 2021-09-06T02:16:39
| 403,452,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
from django.shortcuts import render, redirect
from .models import FAQs, Message
from django.contrib import messages
# Create your views here.
def index(request):
faqs = FAQs.objects.all()
context = {
'faqs':faqs
}
return render(request, 'faq/faq.html',context)
def message(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
message = request.POST['message']
message = Message(name=name, email=email,subject=subject, message=message)
message.save()
messages.success(request, 'Your request has been recieved, a team member will get back')
return redirect ('about')
|
[
"feranmi.micheal@gmail.com"
] |
feranmi.micheal@gmail.com
|
609a202b9806a22d3f07180acfcc1ea99da4b3d4
|
ab8714c73f34a21ae3756e3e8f90a673e6ffd4d5
|
/env/src/orders/admin.py
|
f53b75532f73ae8fcdf31503dc9375668f96bac3
|
[] |
no_license
|
ambaresthete/AnybodyCook
|
f89b858f9a2a02d68c6c8ddf0dfa9fb92c1f1451
|
94af7320e3a6ed32822c5f288d8ba18e12cf6fa7
|
refs/heads/master
| 2020-03-20T21:53:22.014763
| 2018-11-19T17:24:29
| 2018-11-19T17:24:29
| 137,766,205
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
from django.contrib import admin
from .models import Order
# Register your models here.
admin.site.register(Order)
|
[
"t.ambareesh@github.com"
] |
t.ambareesh@github.com
|
3115dba334b96fae367bec65fe0d65a2afd69de0
|
c9fbb498e0077d69069a8ca122ef30f76b7576f8
|
/pyesm/components/pism/time_bounds.py
|
c145901cc5c5e10e68432e9f88ed71e2122730ae
|
[] |
no_license
|
pgierz/pyesm
|
f465e526b5ccb46248a02d80a94283dc9f644d20
|
de479a5337dae1e377807318792db559ef7e542a
|
refs/heads/master
| 2021-06-06T06:16:34.316245
| 2019-11-11T07:52:33
| 2019-11-11T07:52:33
| 164,526,660
| 0
| 0
| null | 2021-06-01T23:18:06
| 2019-01-08T01:18:47
|
Python
|
UTF-8
|
Python
| false
| false
| 759
|
py
|
import numpy as np
from pyesm.core.time_control.esm_calendar import Date
time_array = np.empty((24, 1))
time_bounds_array = np.empty((24, 2))
date1 = Date("1850-01-01")
date2 = Date("1851-01-01")
end_day = 0
for year_number, date in enumerate([date1, date2]):
for index, month in enumerate(date._calendar.monthnames):
index += year_number * 12
length_of_this_month = date._calendar.day_in_month(date.year, month)
end_day += length_of_this_month
middle_day = end_day - (length_of_this_month / 2)
start_day = 1 + end_day - length_of_this_month
time_array[index] = middle_day
time_bounds_array[index, 0], time_bounds_array[index, 1] = start_day, end_day
print(time_array)
print(time_bounds_array)
|
[
"pgierz@awi.de"
] |
pgierz@awi.de
|
f65d7f4ed9bbaf5c442a94a94788f83ebbb04726
|
8e0cf61eb6d7b68077be2d6676bed8444e105a07
|
/P3/multiagent/multiAgentsNoSabe.py
|
3c2b786dc1d0446fa67b8bb5b19eaf891c21b564
|
[] |
no_license
|
marcrabat/AI
|
487e623c1d3919c2defe7b3daf98fb434d5203a8
|
8e4254342107835e9d4133ee7f32f5e1aac4215e
|
refs/heads/master
| 2021-09-10T11:17:50.642321
| 2018-03-25T12:36:19
| 2018-03-25T12:36:19
| 118,330,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,975
|
py
|
# multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from util import manhattanDistance
from game import Directions
import random, util, sys
from game import Agent
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
"Add more of your code here if you want to"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
'''
python pacman.py -p ReflexAgent -l testClassic
Try out your reflex agent on the default mediumClassic layout with one ghost or two (and animation off to speed up the display):
python pacman.py --frameTime 0 -p ReflexAgent -k 1
python pacman.py --frameTime 0 -p ReflexAgent -k 2
'''
"*** YOUR CODE HERE ***"
remainingFood = newFood.asList()
maxScore = sys.maxint
if len(remainingFood) == 0:
return maxScore
closer_food = min([util.manhattanDistance(newPos, food) for food in remainingFood])
for ghost in newGhostStates:
ghostPos = ghost.getPosition()
closer_ghost = min([util.manhattanDistance(newPos, ghostPos) for ghost in newGhostStates])
score = successorGameState.getScore() + (closer_ghost-closer_food)
return score
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent):
"""
Your minimax agent (question 2)
"""
def minimax(self, gameState, depth, agent):
best_action = None
if depth == 0 or (gameState.isWon() or gameState.isLose()):
return self.evaluationFunction
if agent%3 == 0:
best_score = float('-inf')
for action in gameState.getLegalActions(agent):
score = minimax(gameState.generateSuccessor(action), depth+1, agent)
best_score = max(best_score, score)
return best_score
else:
best_score = float('inf')
for action in gameState.getLegalActions(agent):
score = minimax(gameState.generateSuccessor(action), depth, agent+1)
best_score = min(best_score, score)
return best_score
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
"*** YOUR CODE HERE ***"
pacman = 0
return self.minimax(gameState, 0, self.index)
util.raiseNotDefined()
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 3)
"""
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: <write something here so we know what you did>
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
# Abbreviation
better = betterEvaluationFunction
|
[
"marc.rabat01@estudiant.upf.edu"
] |
marc.rabat01@estudiant.upf.edu
|
9d5a6ba6446140a91ac8195ae11cdf52026435c4
|
ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1
|
/res/packages/scripts/scripts/client/tutorial/control/quests/__init__.py
|
e0557fa51737e0fce1a443bfd0b2043093d7aa71
|
[] |
no_license
|
webiumsk/WOT-0.9.20.0
|
de3d7441c5d442f085c47a89fa58a83f1cd783f2
|
811cb4e1bca271372a1d837a268b6e0e915368bc
|
refs/heads/master
| 2021-01-20T22:11:45.505844
| 2017-08-29T20:11:38
| 2017-08-29T20:11:38
| 101,803,045
| 0
| 1
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 3,036
|
py
|
# 2017.08.29 21:51:51 Střední Evropa (letní čas)
# Embedded file name: scripts/client/tutorial/control/quests/__init__.py
from tutorial.control.lobby.context import LobbyBonusesRequester
from tutorial.control.quests import queries
from tutorial.data.effects import EFFECT_TYPE
from tutorial.control import ControlsFactory
from tutorial.control import context as core_ctx
from tutorial.control import functional as core_func
from tutorial.control.chains import functional as chains_func
from tutorial.control.lobby import functional as lobby_func
from tutorial.control.quests import functional as quests_func
class QuestsControlsFactory(ControlsFactory):
def __init__(self):
effects = {EFFECT_TYPE.ACTIVATE: core_func.FunctionalActivateEffect,
EFFECT_TYPE.DEACTIVATE: core_func.FunctionalDeactivateEffect,
EFFECT_TYPE.GLOBAL_ACTIVATE: core_func.FunctionalGlobalActivateEffect,
EFFECT_TYPE.GLOBAL_DEACTIVATE: core_func.FunctionalGlobalDeactivateEffect,
EFFECT_TYPE.SET_GUI_ITEM_CRITERIA: core_func.FunctionalSetGuiItemCriteria,
EFFECT_TYPE.SET_ACTION: core_func.FunctionalSetAction,
EFFECT_TYPE.REMOVE_ACTION: core_func.FunctionalRemoveAction,
EFFECT_TYPE.REFUSE_TRAINING: core_func.FunctionalRefuseTrainingEffect,
EFFECT_TYPE.REQUEST_BONUS: core_func.FunctionalRequestBonusEffect,
EFFECT_TYPE.NEXT_CHAPTER: core_func.FunctionalNextChapterEffect,
EFFECT_TYPE.CLEAR_SCENE: core_func.FunctionalClearScene,
EFFECT_TYPE.GO_SCENE: core_func.GoToSceneEffect,
EFFECT_TYPE.SHOW_HINT: chains_func.FunctionalShowHint,
EFFECT_TYPE.CLOSE_HINT: chains_func.FunctionalCloseHint,
EFFECT_TYPE.SHOW_WINDOW: quests_func.ShowSharedWindowEffect,
EFFECT_TYPE.SELECT_VEHICLE_IN_HANGAR: quests_func.SelectVehicleInHangar,
EFFECT_TYPE.SAVE_TUTORIAL_SETTING: quests_func.SaveTutorialSettingEffect,
EFFECT_TYPE.SAVE_ACCOUNT_SETTING: quests_func.SaveAccountSettingEffect,
EFFECT_TYPE.RUN_TRIGGER: quests_func.QuestsFunctionalRunTriggerEffect,
EFFECT_TYPE.SHOW_UNLOCKED_CHAPTER: chains_func.FunctionalShowUnlockedChapter,
EFFECT_TYPE.SHOW_AWARD_WINDOW: chains_func.FunctionalShowAwardWindow,
EFFECT_TYPE.ENTER_QUEUE: chains_func.FunctionalSwitchToRandom}
_queries = {'awardWindow': queries.AwardWindowContentQuery}
ControlsFactory.__init__(self, effects, _queries)
def createBonuses(self, completed):
return LobbyBonusesRequester(completed)
def createSoundPlayer(self):
return core_ctx.NoSound()
def createFuncScene(self, sceneModel):
return core_func.FunctionalScene(sceneModel)
def createFuncInfo(self):
return lobby_func.FunctionalLobbyChapterInfo()
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\tutorial\control\quests\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:51:51 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
b24be479fbc1e14fe1264446b6a3b99c4514a9f2
|
2dd30033a83d8684b304eda9b2d535bdffea2cc6
|
/src/motor.py
|
012e9b29003a03ad679c543c6647b5d8c8bd9e71
|
[
"MIT"
] |
permissive
|
dmaicher/rpi-quadcopter
|
e83ff8610a3c99244706a9f7f660727a3d1d196e
|
096572ba27bc1dd5ca5db17ddefae35ce859eba9
|
refs/heads/master
| 2020-04-18T22:12:11.205896
| 2014-05-03T12:31:42
| 2014-05-03T12:31:42
| 19,201,046
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
class Motor:
MIN_TICK = 150
MAX_TICK = 600
DIFF_TICK = MAX_TICK - MIN_TICK
def __init__(self, id, pwm, channel):
self.id = id
self.power = 0 #percent 0-100
self.pwm = pwm
self.channel = channel
self.pwm_set_power()
def change_power(self, diff):
self.set_power(self.power + diff)
def set_power(self, power):
self.power = max(0, min(100, power))
self.pwm_set_power()
def __str__(self):
return "motor #" + self.id + ", " + str(self.power)
def pwm_set_power(self):
self.pwm.setPWM(self.channel, 0, int(self.MIN_TICK + self.power/100.0 * self.DIFF_TICK))
|
[
"mail@dmaicher.de"
] |
mail@dmaicher.de
|
400bd9c3f0c6ff4b9ec361588aa8bc0ee3f82070
|
31c738876a8037d106a767f0761505282de842c1
|
/django_ical/feedgenerator.py
|
232fb705e415df63b05957ca3d7b9f253bb587ab
|
[] |
no_license
|
mick88/django-ical
|
e8ddc84e2ac5b3922915393f0fefc638c6371559
|
94558a542ce2baa687cf50ab2f989537caa4b0ed
|
refs/heads/master
| 2021-01-10T06:37:34.786239
| 2015-02-15T17:41:45
| 2015-02-15T17:41:45
| 43,001,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,939
|
py
|
#:coding=utf-8:
"""
iCalendar feed generation library -- used for generating
iCalendar feeds.
Sample usage:
>>> from django_ical import feedgenerator
>>> from datetime import datetime
>>> feed = feedgenerator.ICal20Feed(
... title=u"My Events",
... link=u"http://www.example.com/events.ical",
... description=u"A iCalendar feed of my events.",
... language=u"en",
... )
>>> feed.add_item(
... title="Hello",
... link=u"http://www.example.com/test/",
... description="Testing."
... start_datetime=datetime(2012, 5, 6, 10, 00),
... end_datetime=datetime(2012, 5, 6, 12, 00),
... )
>>> fp = open('test.ical', 'w')
>>> feed.write(fp, 'utf-8')
>>> fp.close()
For definitions of the iCalendar format see:
http://www.ietf.org/rfc/rfc2445.txt
"""
from icalendar import Calendar, Event
from django.utils.feedgenerator import SyndicationFeed
__all__ = (
'ICal20Feed',
'DefaultFeed',
)
FEED_FIELD_MAP = (
('product_id', 'prodid'),
('method', 'method'),
('title', 'x-wr-calname'),
('description', 'x-wr-caldesc'),
('timezone', 'x-wr-timezone'),
)
ITEM_EVENT_FIELD_MAP = (
# 'item_guid' becomes 'unique_id' when passed to the SyndicationFeed
('unique_id', 'uid'),
('title', 'summary'),
('description', 'description'),
('start_datetime', 'dtstart'),
('end_datetime', 'dtend'),
('updated', 'last-modified'),
('created', 'created'),
('timestamp', 'dtstamp'),
('transparency', 'transp'),
('location', 'location'),
('geolocation', 'geo'),
('link', 'url'),
('organizer', 'organizer'),
('rrule', 'rrule'),
)
class ICal20Feed(SyndicationFeed):
u"""
iCalendar 2.0 Feed implementation.
"""
mime_type = 'text/calendar; charset=utf8'
def write(self, outfile, encoding):
u"""
Writes the feed to the specified file in the
specified encoding.
"""
cal = Calendar()
cal.add('version', '2.0')
cal.add('calscale', 'GREGORIAN')
for ifield, efield in FEED_FIELD_MAP:
val = self.feed.get(ifield)
if val is not None:
cal.add(efield, val)
self.write_items(cal)
to_ical = getattr(cal, 'as_string', None)
if not to_ical:
to_ical = cal.to_ical
outfile.write(to_ical())
def write_items(self, calendar):
"""
Write all events to the calendar
"""
for item in self.items:
event = Event()
for ifield, efield in ITEM_EVENT_FIELD_MAP:
val = item.get(ifield)
if val is not None:
event.add(efield, val)
calendar.add_component(event)
DefaultFeed = ICal20Feed
|
[
"Michal@Michal-pc"
] |
Michal@Michal-pc
|
ae8959b431a7168d2ad9a743c5bb013247a0bc6a
|
96956ec099a1213047e067b70773ca9f642ef7b7
|
/item/migrations/0003_item_owner.py
|
3a7007c5df8fefb59f6e803044fa0c3e52c9eeb8
|
[] |
no_license
|
svyatoslavgalas/mp_vision
|
107701e3c703b733d8d8768115f07cce9837ea66
|
3db17fb28cb4630c72e8db40f4fe64bb68e36b6a
|
refs/heads/main
| 2023-08-17T04:09:25.884660
| 2021-09-27T09:09:33
| 2021-09-27T09:09:33
| 410,812,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
# Generated by Django 3.2.7 on 2021-09-26 19:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('item', '0002_alter_item_category'),
]
operations = [
migrations.AddField(
model_name='item',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='items', to=settings.AUTH_USER_MODEL, verbose_name='Владелец'),
),
]
|
[
"v*?%4=463pyU[d*"
] |
v*?%4=463pyU[d*
|
0a5ffbe378c430de0e47891dad38a24150a9bbf2
|
67c6db3455463a00b30fafc83240de3fbe1fb7d6
|
/Topik10_HIMPUNAN (DATA SET)/Tugas1.py
|
a8adc0cbb31570d7c05951d2e640e9a11c5eb259
|
[] |
no_license
|
rbnset/Teknik_Pemrogaman
|
fe5e2439b9af27696154af9b2c108f7b7e29edb0
|
36ecfd494804363d3a6d61011480be508e378325
|
refs/heads/main
| 2023-08-19T01:04:10.327289
| 2021-07-19T03:14:31
| 2021-07-19T03:14:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,214
|
py
|
# Program himpunan makanan favorit
Nasgor = {'Robin', 'Taufiq', 'Iqbal', 'Bayu', 'Dwi'}
Bakso = {'Galih', 'Taufiq', 'Niken', 'Anggie', 'Dwi'}
Mie = {'Robin', 'Dwi', 'Bayu', 'Nimas'}
Nasgor.add('Argo')
Bakso.remove('Taufiq')
print('Daftar yang menyukai Nasgor : ', Nasgor)
print('Daftar yang menyukai Bakso : ', Bakso)
print('Daftar yang menyukai Mie : ', Mie)
print()
print('Daftar yang menyukai Nasgor dan Bakso : ', (Nasgor & Bakso))
print('Jumlah yang menyukai Nasgor dan Bakso : ', len(Nasgor & Bakso))
print('Daftar yang tidak menyukai Nasgor dan Bakso : ', (Nasgor ^ Bakso))
print('Jumlah yang menyukai Nasgor dan Bakso : ', len(Nasgor ^ Bakso))
print()
print('Daftar yang menyukai Nasgor dan Mie : ', (Nasgor & Mie))
print('Jumlah yang menyukai Nasgor dan Mie : ', len(Nasgor & Mie))
print('Daftar yang tidak menyukai Nasgor dan Mie : ', (Nasgor ^ Mie))
print('Jumlah yang tidak menyukai Nasgor dan Mie : ', len(Nasgor ^ Mie))
print()
print('Daftar yang menyukai Bakso dan Mie : ', (Bakso & Mie))
print('Jumlah yang menyukai Bakso dan Mie : ', len(Bakso & Mie))
print('Daftar yang tidak menyukai Bakso dan Mie : ', (Bakso ^ Mie))
print('Jumlah yang tidak menyukai Bakso dan Mie : ', len(Bakso ^ Mie))
|
[
"rbn.setiyawan@gmail.com"
] |
rbn.setiyawan@gmail.com
|
5ba7f81e1658fabbaf3412669f7616ed03993089
|
0d4885832213478e621d6b58a894d1d3fbf10e89
|
/catkin_ws/src/testing/src/test_import.py
|
3a90e40fc7eec7c31e5895366519fea9fdc44a6c
|
[] |
no_license
|
dtbinh/ros_basic
|
38be51e0cc157a2a7a69065f771a3cb5dea6cd31
|
5c9b2de1d01b84924a75f5e7f725f43d09033e33
|
refs/heads/master
| 2022-07-28T13:35:37.192202
| 2020-05-27T15:45:41
| 2020-05-27T15:45:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
#! /usr/bin/env python
import rospy
import time
from common_dir.common_things import cool, CmdVelPub
if __name__ == '__main__':
cool('TheConstruct')
stop_time = 1
move_time = 3
rospy.init_node('test_import', log_level=rospy.INFO)
move_object = CmdVelPub()
rospy.loginfo("Starting...")
move_object.move_robot(direction="stop")
time.sleep(stop_time)
rospy.loginfo("Forwards...")
move_object.move_robot(direction="forwards")
time.sleep(move_time)
rospy.loginfo("Stopping...")
move_object.move_robot(direction="stop")
time.sleep(stop_time)
rospy.loginfo("Forwards...")
move_object.move_robot(direction="backwards")
time.sleep(move_time)
rospy.loginfo("Stopping...")
move_object.move_robot(direction="stop")
|
[
"osong@ucsd.edu"
] |
osong@ucsd.edu
|
0af08865ace2e4eed3d32921381267d797d9d853
|
411b56e76febcf809e85505bb38387a8e5518ab3
|
/django_date_picker/wsgi.py
|
61b1acc38e71532d4a9b693c29586a71e9e445dd
|
[] |
no_license
|
Ngahu/Django-Date-picker-
|
865835ee674c0f65d4deab2ca21f48d2b88d2254
|
ec32ce697bb4778eb563741ac3a7f4de26edf825
|
refs/heads/master
| 2020-05-23T23:24:38.445389
| 2019-05-16T10:15:05
| 2019-05-16T10:15:05
| 186,993,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
"""
WSGI config for django_date_picker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_date_picker.settings")
application = get_wsgi_application()
|
[
"ngahu@joels-MacBook-Pro.local"
] |
ngahu@joels-MacBook-Pro.local
|
35bf39dd863f6c5d6245f5da78ea0fdf09490e02
|
161a765674b837cf4a2f48879a987dce3f40d15f
|
/api_dev/conftest.py
|
b2d36162d3cf30037f9f43e1065cff26c17a45e0
|
[
"MIT"
] |
permissive
|
ManhTai01/api
|
19662cae94f9182dabb183a37bf7715be04254d9
|
09d7a8dd0a11c1552c34cd9006c5afae94897afc
|
refs/heads/master
| 2023-05-23T17:09:36.339576
| 2021-06-14T12:19:13
| 2021-06-14T12:19:13
| 376,811,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
import pytest
from api_dev.users.models import User
from api_dev.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
|
[
"lemanhtaictbp@gmail.com"
] |
lemanhtaictbp@gmail.com
|
d1e5895ff33ca6ce5a15f6feacdd7059200c09c0
|
a94941c225c3029755efe3bb56eb13e1c8bcfde5
|
/examples/batch_compute_LiNaMgAl_rvd+neq.py
|
942d239904b9fd2b7eadcee0d4aa00c4fcff8861
|
[] |
no_license
|
shuhebing/cavd
|
80d3d48ec19cfde727d67ecb8d74deac084c19a1
|
e5146e89a0cfdf35bcc5677808c9cc4ea96dae95
|
refs/heads/master
| 2022-11-12T06:51:32.110927
| 2019-12-14T14:47:17
| 2019-12-14T14:47:24
| 276,862,815
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,001
|
py
|
import os
import cavd
#数据挖掘获取的标准值
std_surf_list = [0.8018, 1.1705, 0.8435, 0.6119]
std_list = [2.0562, 2.4439, 2.0644, 1.8552]
ions = ["Li", "Na", "Mg", "Al"]
for i in range(len(ions)):
filenames=[]
path = "../../Li_Na_Mg_Al_cifs_order_revise/" + ions[i] + "/"
if not os.path.exists(path+"results"):
os.mkdir(path+"results")
print("create results directory successful!")
else:
print(path+"results already exit!")
output_path = path+"results/"
com_status = open(output_path+"channel_channel_com_status_" + ions[i] + ".csv","w")
com_status.write('filename,status\n')
results = open(output_path+"com_results_" + ions[i] + ".csv","w")
results.write('filename\tradii\tsymmSymbolInCif\tsymmNumInCif\tsymprec\tvoids_num\tsym_opt_num\tuni_voids_num\tnei_dises\tRT_a\tRT_b\tRT_c\tR_j\talpha\tmigrate_rad\ta\tb\tc\tdimofNetwork\tdimofChannels\trecover_rate\trecover_state\tmigrate_mindis\tCoordinates\n')
for j in os.listdir(path):
if ".cif" in j:
filenames.append(j)
for filename in filenames:
filename = path+filename
print(filename)
try:
radii,symm_sybol,symm_number,symprec,voids_num,sym_opt_num,uni_voids_num,minRad,migrant_alpha,nei_dises,migrant_radius,conn_val,connect,dim_network,dims_channel,recover_rate,recover_state,migrate_mindis,coordination_list = cavd.AllCom8(filename,std_surf_list[i],ions[i],True,True,None)
results.write(filename)
results.write('\t')
results.write(str(radii))
results.write('\t')
results.write(symm_sybol)
results.write('\t')
results.write(str(symm_number))
results.write('\t')
results.write(str(symprec))
results.write('\t')
results.write(str(voids_num))
results.write('\t')
results.write(str(sym_opt_num))
results.write('\t')
results.write(str(uni_voids_num))
results.write('\t')
results.write(str(nei_dises))
results.write('\t')
results.write(str(conn_val[0]))
results.write('\t')
results.write(str(conn_val[1]))
results.write('\t')
results.write(str(conn_val[2]))
results.write('\t')
results.write(str(minRad))
results.write('\t')
results.write(str(migrant_alpha))
results.write('\t')
results.write(str(migrant_radius))
results.write('\t')
results.write(str(connect[0]))
results.write('\t')
results.write(str(connect[1]))
results.write('\t')
results.write(str(connect[2]))
results.write('\t')
results.write(str(dim_network))
results.write('\t')
results.write(str(dims_channel))
results.write('\t')
results.write(str(recover_rate))
results.write('\t')
results.write(str(recover_state))
results.write('\t')
results.write(str(migrate_mindis))
results.write('\t')
whole_coords = []
for coord in coordination_list:
nei_list = []
for nei in coord["coord_nei"]:
near_tmp = (nei[0].species_string,nei[1])
nei_list.append(near_tmp)
tmp = (coord["label"],nei_list)
whole_coords.append(tmp)
results.write(str(whole_coords))
results.write("\n")
print(filename+" compute complete!")
out = filename+','+'compute complete!'+'\n'
com_status.write(out)
except Exception as e:
print(filename,str(e))
com_status.write(filename+','+str(e)+'\n')
continue
print(ions[i]+" contained file compute completed!")
print()
print("All File compute completed!")
|
[
"yeanjiang11@qq.com"
] |
yeanjiang11@qq.com
|
37d97636dfa963b89a41c572f266ecd760ddab16
|
f6670ecfba1471f4ebb12f3bf63a4d5c7fcfda83
|
/Search.py
|
50a1b42aec225d9298364f9fc48c66885012b368
|
[] |
no_license
|
wfyhz/alfred-pkmanager
|
b4d3eebd1ce5412b06c1829f14670a27f14e2454
|
c0e08f49855c75ede1f4439598d2c455424071a5
|
refs/heads/master
| 2022-12-30T06:53:27.260156
| 2020-10-25T16:31:57
| 2020-10-25T16:31:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,771
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# --------------------------------------
# Created by Konfido on 2020-07-16
# --------------------------------------
import re
import Config
from Items import Items, Display
from Utils import Utils as U
C = Config.Config().configs
class File():
def __init__(self):
self.path = ""
self.file_name = ""
self.yaml = ""
self.content = ""
self.title = ""
@staticmethod
def get_yaml_item(item, content):
match = re.search(
r'^---.*?\b{}s?: (.*?)\n.*?---'.format(item), content, re.I | re.S)
return match.group(1) if match is not None else None
def get_file_title(self, path):
""" yaml_title > level_one_title > file_name """
self.path = path
self.file_name = U.get_file_name(self.path)
all_text = U.get_file_content(self.path)
match = re.search(r'(---.*?---)([\s\S]+)', all_text, re.I | re.S)
if match and len(match.groups()) == 2:
self.yaml = match.group(1)
self.content = match.group(2).strip()
else:
U.log(self.file_name)
self.yaml, self.content = "", all_text
yaml_title = self.get_yaml_item("title", self.yaml)
level_one_title = re.search(r'^# (\s+)', self.content)
self.title = yaml_title or level_one_title or self.file_name or ""
return self.title
@classmethod
def get_file_info(cls, path):
""" get file's info in a dict """
cls.get_file_title(cls, path)
folder = path.split("/")[-2] if len(path.split('/'))>2 else "<root>"
size = U.get_file_meta(cls.path, "st_size")
ctime_float = U.get_file_meta(cls.path, "st_birthtime")
mtime_float = U.get_file_meta(cls.path, "st_mtime")
cdate_string = U.format_date(ctime_float, "%Y-%m-%d")
mdate_string = U.format_date(mtime_float, "%Y-%m-%d")
file_infos = {
'path': cls.path,
'file_name': cls.file_name,
'yaml': cls.yaml,
'content': cls.content,
'title': cls.title,
'folder': folder,
'cdate': cdate_string,
'mdate': mdate_string,
'ctime': ctime_float,
'mtime': mtime_float,
'size': size,
'synonyms': cls.get_yaml_item('synonyms', cls.yaml),
'hidden': cls.get_yaml_item('hidden', cls.yaml)
}
return file_infos
class Search():
""" Handle all search problems"""
@classmethod
def get_sorted_files(cls, paths, reverse=True):
"""Get info_list of all files sorted by modification time in reserve """
all_paths = U.get_all_files_path(paths)
if not all_paths:
return None
else:
matched_list = list()
for path in all_paths:
info = File.get_file_info(path)
if not info["hidden"] == 'True':
matched_list.append(info)
sorted_files = sorted(
matched_list, key=lambda k: k['mtime'], reverse=reverse)
return sorted_files
@classmethod
def title_search(cls, search_terms, dicted_files):
"""Only refer to the first word of search terms"""
def _synonyms_search(search_terms):
# Return a list of matched notes' title
synonyms = U.json_load(U.path_join(Config.CONFIG_DIR, 'synonyms.json'))
key = []
for k in list(synonyms.keys()):
for s in synonyms[k]:
if search_terms[0].lower() in s.lower():
key.append(k)
return key
matched_list = []
for f in dicted_files:
if f['title'].lower() in [t.lower() for t in _synonyms_search(search_terms)]:
matched_list.append(f)
elif search_terms[0].lower() in f['title'].lower():
matched_list.append(f)
return matched_list
@classmethod
def and_search(cls, search_terms, dicted_files):
def _matched(terms, string):
for term in terms:
if not re.search(term, string, re.I):
return False
return True
matched_list = []
for f in dicted_files:
if _matched(search_terms, f['content']):
matched_list.append(f)
return matched_list
@classmethod
def or_search(cls, search_terms, dicted_files):
def _matched(terms, file):
for term in terms:
if re.search(term, file, re.I):
return True
return False
matched_list = []
for f in dicted_files:
if _matched(search_terms, f['content']):
matched_list.append(f)
return matched_list
@classmethod
def metric_search(cls, metric, keys, dicted_files):
# Search notes by keys of specific metrics (tag, snippet ...)
matched_notes = []
for f in dicted_files:
has_keys = []
# Check YAML frontier to get note's assigned metrics
match = File.get_yaml_item(metric, f["content"])
if match:
has_keys.extend(match.strip('[]').split(', '))
# Check content to get note's specific metrics
if metric in ["tag","tags"] and not C["search_tag_yaml_only"]:
has_keys.extend(re.findall(r'\b#(.*?)\b', f['content'], re.I))
elif metric == "language" and not C["search_snippet_yaml_only"]:
has_keys.extend(re.findall(r'```(\S+)', f['content'], re.I))
if not has_keys:
continue
else:
for k in keys:
if k in has_keys:
matched_notes.append(f)
return matched_notes
@staticmethod
def markdown_links_search(path, filename=False):
"Get a list of Markdown links which contained in the file (by given path/filename)"
abs_path = path if not filename else U.get_abspath(path, query_dict=True)
content = U.get_file_content(abs_path)
# exclude images link: ![]() and url: [](https://)
links_info = re.findall(
r'(?<!!)(\[(.*?)\]\(((?!http).*?md)\))', content)
link_list = [l[2] for l in links_info]
return link_list
@staticmethod
def backlinks_search(filename):
"Query dict with path/filename to get a backlink list"
filename = U.get_file_name(filename, with_ext=True)
backlinks = U.json_load(U.path_join(Config.CONFIG_DIR, 'backlinks.json'))
matched_list = backlinks[filename] if backlinks.__contains__(filename) else []
return matched_list
|
[
"konfido.du@outlook.com"
] |
konfido.du@outlook.com
|
9f7d68ae346094a658eaf03f008763831f5e1c0d
|
230a41bc0f05a4e2a02a4d671f0d48e517833b1f
|
/youtube_api.py
|
d2891f5c18d5f7150f8cfcc21e3dc053507288f2
|
[] |
no_license
|
FuckBrains/autoTube
|
7c9288d6ba09799b91592e2950aa96ff75c0526c
|
4bbd6e319f7b6143f64f43e13c8ef66d04b03781
|
refs/heads/master
| 2023-04-18T05:48:31.653287
| 2021-04-30T07:56:09
| 2021-04-30T07:56:09
| 363,264,201
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,887
|
py
|
import httplib2
import http.client as httplib
import os
import sys
import time
import logging
import settings
import game_config
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaFileUpload
from google_auth_oauthlib.flow import InstalledAppFlow
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the Google Developers Console at
# https://console.developers.google.com/.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
with information from the Developers Console
https://console.developers.google.com/
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
"""
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
SCOPES = ['https://www.googleapis.com/auth/youtube']
scopes = ['https://www.googleapis.com/auth/youtube.readonly', 'https://www.googleapis.com/auth/youtube.upload']
API_SERVICE_NAME = 'youtube'
API_VERSION = 'v3'
def get_authenticated_service(game):
# flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS_FILE, SCOPES)
# credentials = flow.run_console()
# return build(API_SERVICE_NAME, API_VERSION, credentials=credentials)
flow = flow_from_clientsecrets(settings.RESULT_DIRECTORY + game['client_secrets_file'],
scopes,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage(settings.RESULT_DIRECTORY + game['credentials'])
credentials = storage.get()
if credentials is None or credentials.invalid:
args = argparser.parse_args()
args.noauth_local_webserver = True
credentials = run_flow(flow, storage, args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(youtube, file_path, title, description, category, tags, language):
body = dict(
snippet=dict(
title=title,
description=description,
tags=tags,
categoryId=category,
defaultLanguage=language,
defaultAudioLanguage=language
),
status=dict(
privacyStatus="public"
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
media_body=MediaFileUpload(file_path, chunksize=1024*1024, resumable=True)
)
video_id = resumable_upload(insert_request)
return video_id
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
logging.info("Uploading file...")
status, response = insert_request.next_chunk()
if 'id' in response:
logging.info("Video id '%s' was successfully uploaded." %
response['id'])
return response['id']
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = "A retriable error occurred: %s" % e
except Exception as e:
if error is not None:
logging.error(e)
if error is not None:
logging.error(error)
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
logging.info("Sleeping %f seconds and then retrying..." % sleep_seconds)
time.sleep(sleep_seconds)
def upload_video(game, title, file_path, description, category, tags, fallback_title):
logging.info('Starting youtube auth service')
youtube = get_authenticated_service(game)
language = game['language']
try:
logging.info('Starting upload process')
video_id = initialize_upload(youtube, file_path, title,
description, category, tags, language)
logging.info('video uploaded')
return video_id
except HttpError as e:
logging.error("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
if str(e.content).find('invalidTitle'):
video_id = initialize_upload(youtube, file_path, fallback_title,
description, category, tags, language)
logging.info('video uploaded with fallback title')
return video_id
def upload_thumbnail(game, video_id, file):
try:
youtube = get_authenticated_service(game)
youtube.thumbnails().set(
videoId=video_id,
media_body=file
).execute()
logging.info('Thumbnail uploaded')
except HttpError as e:
logging.warning("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
def get_video_thumbnail(video_id):
thumbnails_game_credentials = game_config.GAMES['marbella_vice'] #marbella_vice credentials are the ones with read videos scope access
try:
youtube = get_authenticated_service(thumbnails_game_credentials)
request = youtube.videos().list(part="snippet", id=video_id
)
response = request.execute()
thumbnail_url = response['items'][0]['snippet']['thumbnails']['maxres']['url']
return thumbnail_url
except HttpError as e:
logging.warning("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
|
[
"chuski1212@gmail.com"
] |
chuski1212@gmail.com
|
1d5855b109ac9744bb8b5e63efff59fe7fbe918a
|
8a8d24cbd349ab9d590dd8de5b82f6b664131a37
|
/dangdangwang/spiders/dangdang.py
|
d8cd4716af842cf91e8afcac2140a386f200ad86
|
[] |
no_license
|
gaoxiaofan0530/dangdang-redis
|
2a06d54a58207b7ce222e2a0a052d308cb734b9c
|
8e0b8496d27d79d1e0705698f3a196491b111359
|
refs/heads/master
| 2020-06-22T23:31:19.649543
| 2019-07-23T13:06:49
| 2019-07-23T13:06:49
| 198,431,153
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,959
|
py
|
# -*- coding: utf-8 -*-
import re
import requests
import scrapy
from ..items import DangdangwangItem
from scrapy_redis.spiders import RedisCrawlSpider
class DangdangSpider(RedisCrawlSpider):
name = 'dangdang'
redis_key = 'dangdang:start_urls'
allowed_domains = ['dangdang.com']
# start_urls = ['http://search.dangdang.com/?key=python']
def parse(self, response):
url_list = response.xpath('//p[@class="name"]/a/@href').extract()
for url in url_list:
print(url)
yield scrapy.Request(url=url,callback=self.parse_item)
next_url = response.xpath('//li[@class="next"]/a/@href').extract_first()
next_url = 'http://search.dangdang.com'+next_url
print('详情页:' + next_url)
if next_url != '':
yield scrapy.Request(url=next_url,callback=self.parse)
def parse_item(self, response):
urls = str(response)
m = re.search('http://product.dangdang.com/\d+.html', urls)
# 使用正则匹配地址,判断是不是电子书
# 不是电子书
if m:
item = DangdangwangItem()
# 获取书名
book_name = response.xpath('//div[@class="name_info"]/h1/@title').extract_first()
item['book_name'] = book_name
# 获取书的价钱
book_price = response.xpath('//p[@id="dd-price"]/text()')[1].extract().strip()
item['book_price'] = book_price
# # 获取书的标题
# book_introduction = response.xpath('//div[@id="product_info"]//h2/span/text()').extract_first().strip()
# # book_introduction = ''.join(response.xpath('//div[@id="content"]/div[@class="descrip"]/text()').extract())
# if book_introduction == '':
# item['book_introduction'] = '该书没有标明标题'
# else:
# item['book_introduction'] = re.sub('\s', '', book_introduction) # 将其中的所有空白字符删除
# 获取到作者,出版社,出版时间的父标签
spans = response.xpath('//div[@class="messbox_info"]/span')
count = 0
book_authors = '' # 作者
book_publishinghouse = '' # 出版社
book_publisheddate = '' # 出版时间
for span in spans: # 遍历span标签,获取子节点,因为有三个子节点,分别包含作者、出版社以及出版时间
if count == 0: # 获取作者
ass = span.css('a')
for a in ass: # 作者可能有多个,需要遍历
book_authors += a.css('::text').extract_first() + ','
if count == 1: # 获取出版社
ass = span.css('a')
if ass is None:
pass
book_publishinghouse = "没有标明出版社"
else:
book_publishinghouse = str(span.css('a::text').extract_first())
if count == 2: # 获取出版时间
book_publisheddate = str(span.css('::text').extract_first())
count += 1
item['book_authors'] = book_authors
# 判断作者是否为空
if item['book_authors'] == '':
item['book_authors'] = '暂无作者信息'
else:
item['book_authors'] = book_authors[:-1] # 使用切片删除字符串的最后一个字符,因为最后一个字符是逗号
# item['book_publishinghouse'] = book_publishinghouse
# 判断出版社是否为空
book_publishinghouse = re.sub(' ', '', book_publishinghouse) # 使用正则表达式爸空格字符转换成空字符,因为有的出版社信息是字符串
item['book_publishinghouse'] = book_publishinghouse
if item['book_publishinghouse'] == '': # 判断出版社是否是空值
item['book_publishinghouse'] = '没有标明出版社'
elif item['book_publishinghouse'] == 'None': # 判断出版社信息是否等于None,注意:这里的None是字符串.而不是关键字
if item['book_authors'] == '': # 如果出版社等于None就说明没有获取到数据,这里判断作者是否为空,因为有的书没有标明作者
item['book_publishinghouse'] = response.xpath(
'//div[@class="messbox_info"]/span/a/text').extract_first().strip()
# item['book_publishinghouse'] = book_publishinghouse
# item['book_publishinghouse'] = '没有标明出版社'
if item['book_publishinghouse'] == '':
item['book_publishinghouse'] = '没有标明出版社'
else:
item['book_publishinghouse'] = book_publishinghouse
item['book_publisheddate'] = book_publisheddate
# 判断出版时间是否为空
book_publisheddate = re.sub(' ', '', book_publisheddate)
item['book_publisheddate'] = book_publisheddate
if item['book_publisheddate'] is None:
item['book_publisheddate'] = '没有标明出版时间'
elif item['book_authors'] != '' and item['book_publishinghouse'] == 'None' or item[
'book_publishinghouse'] == '':
# 判断作者是否不等于空,并且出版社等于空
item['book_publisheddate'] = response.xpath('//div[@class="messbox_info"]/span/a/text')[1].extract().strip()
elif item['book_authors'] == '' and item['book_publishinghouse'] == '' and item['book_publisheddate'] == '':
item['book_authors'] = ''
item['book_publishinghouse'] = ''
item['book_publisheddate'] = ''
elif item['book_authors'] == '' and item['book_publishinghouse'] == '':
# 判断作者和出版社是否都为空
# 测试
item['book_publisheddate'] = response.xpath(
'//div[@class="messbox_info"]/span/a/text').extract_first().strip()
elif item['book_authors'] == '':
# 判断作者是否为空
item['book_publisheddate'] = response.xpath('//div[@class="messbox_info"]/span/a/text')[
1].extract().strip()
else:
item['book_publisheddate'] = book_publisheddate
# 获取评论数
item['book_commentsnumbers'] = ''.join(response.xpath('//a[@id="comm_num_down"]/text()').extract())
# 获取书的网址
item['book_url'] = response.url
if item['book_publisheddate'] == '':
item['book_publisheddate'] = '没有标明出版时间'
else:
item['book_publisheddate'] = book_publisheddate
item['book_commentsnumbers'] = ''.join(response.xpath('//a[@id="comm_num_down"]/text()').extract())
# 获取书的网址
item['book_url'] = response.url
yield item
else:
# 电子书
item = DangdangdemoWangItem()
# 获取书名
book_name = response.xpath('//div[@id="productBookDetail"]/h1/span/@title').extract_first()
item['book_name'] = book_name
# 获取书的价钱
urls = response.url # 获取json接口
num = ''.join(re.findall('\d', urls))
url = 'http://e.dangdang.com/media/api.go?action=getMedia&deviceSerialNo=html5&macAddr=' \
'html5&channelType=html5&permanentId=20190513102517003402311866144833876&returnType=j' \
'son&channelId=70000&clientVersionNo=6.8.0&platformSource=DDDS-P&fromPlatform=106&device' \
'Type=pconline&token=&refAction=browse&saleId=' + num + '&promotionType=1'
print(url)
price = requests.get(url) # 想浏览器发送请求
info = price.json() # 解析json
price1 = info['data']['mediaSale']
price2 = dict(price1['mediaList'][0]) # 找到json中的数据
price3 = price2['salePrice'] # 获取到价格
book_price = price3 # 把价格赋值给book_price
item['book_price'] = book_price
# # 获取书的简介
# book_introduction = price2['descs']
# item['book_introduction'] = book_introduction
# if book_introduction == '':
# item['book_introduction'] = '该书没有标明简介'
# else:
# # item['book_introduction'] = re.sub('\s', '', book_introduction) # 将其中的所有空白字符删除
# book_introduction = ''.join(book_introduction)
# item['book_introduction'] = re.sub('\s', '', book_introduction)
# 获取到作者,出版社,出版时间的父标签
spans = response.xpath('//div[@class="explain_box"]')
count = 0
book_authors = '' # 作者
for span in spans: # 遍历span标签,获取子节点
if count == 0: # 获取作者
ass = span.css('p#author')
for a in ass: # 作者可能有多个,需要遍历
book_authors += a.css('span a::text').extract_first() + ','
count += 1
# 判断作者是否为空
if book_authors == '':
item['book_authors'] = '没有标明作者'
else:
item['book_authors'] = book_authors[:-1] # 使用切片删除字符串的最后一个字符,因为最后一个字符是逗号
# 获取出版社
book_publishinghouse = response.xpath('//p[@id="publisher"]/span/a/text()').extract_first() # 出版社
# 判断出版社是否为空
if book_publishinghouse == '':
item['book_publishinghouse'] = '该书没有标明出版社'
elif book_publishinghouse is None:
item['book_publishinghouse'] = '该书没有标明出版社'
else:
book_publishinghouse = response.xpath(
'//p[@id="publisher"]/span/a/text()').extract_first().strip() # 出版社
item['book_publishinghouse'] = book_publishinghouse
# 判断出版社是否为空
if item['book_publishinghouse'] == '':
item['book_publishinghouse'] = '没有标明出版社'
else:
item['book_publishinghouse'] = book_publishinghouse
# 获取出版时间
book_publisheddate = response.xpath('//div[@class="explain_box"]/p/text()')[2].extract() # 出版时间
# 判断出版时间是否为空
if book_publisheddate == '':
item['book_publisheddate'] = '没有标明出版时间'
# 判断出版时间是否为空
elif book_publisheddate == '':
item['book_publisheddate'] = '没有标明出版时间'
else:
nian = book_publisheddate[5:9] # 获取出版时间中的年
yue = book_publisheddate[10:12] # 获取出版时间中的月
# ri = book_publisheddate[13:15] # 获取出版时间中的日
book_publisheddate = '出版时间:' + nian + '年' + yue + '月'
item['book_publisheddate'] = book_publisheddate
# 获取评论数
book_commentsnumber = response.xpath('//div[@class="count_per"]/em/text()')[0].extract()
# item['book_commentsnumbers'] = book_commentsnumbers[0:1]
book_commentsnumbers = re.sub('人评论', '', book_commentsnumber) # 使用正则去空格
item['book_commentsnumbers'] = book_commentsnumbers
# 获取书的网址
item['book_url'] = response.url
yield item
|
[
"gaoxiaofan0419@gmail.com"
] |
gaoxiaofan0419@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.