blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eae4139235f9f8a9e3db84a997374aaec24d1ad3
|
25d48f4bd5daa18346c4e5722ba293b7f9e1ed17
|
/advice_app/apps.py
|
418aeb8865140663ab9c3359ea6ddf0b20b0a317
|
[] |
no_license
|
collins-kipkoech/advice-forum
|
4c66775dc6cccbd22e302996cd924c2e17ea3d76
|
76a3256bb776978738cfe04d06455eb4b84c9a68
|
refs/heads/master
| 2023-01-25T05:40:29.027979
| 2020-12-10T10:00:02
| 2020-12-10T10:00:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
from django.apps import AppConfig
class AdviceAppConfig(AppConfig):
name = 'advice_app'
|
[
"collinskipkoech95@gmail.com"
] |
collinskipkoech95@gmail.com
|
a26f167e87d65c7455661ef744c0b729f55514f3
|
7b74ed82584ad4b321782b33077efb4755253d8b
|
/ForumMediaScraper/__init__.py
|
ffa479e3a232cf050e934bb3be1a197fe5a76d18
|
[
"MIT"
] |
permissive
|
jesseVDwolf/ForumMediaScraper
|
b64768dcafb72eeab591ea791a6e5341669ecb61
|
097e2af26a31bf510503d3133007ff31e29f986f
|
refs/heads/master
| 2020-09-02T06:10:26.010033
| 2020-03-16T19:20:29
| 2020-03-16T19:20:29
| 219,151,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
py
|
from .Scraper import SeleniumScraper, ScraperConfig
|
[
"jessevanderwolf@gmail.com"
] |
jessevanderwolf@gmail.com
|
dcbe0bf334ec32945138f9a38f9e12e4932ceb1d
|
7a8254888cc804918c9dc37e5ac98020bcd1e263
|
/Y2Project/jobs/migrations/0004_auto_20180423_1100.py
|
2287671c679348c8aed2d557e8d883db6b039d04
|
[] |
no_license
|
X00136016/2ndYearProj
|
70e78b6501f7b4bf2841ef8fa0a8ecb0192c9389
|
8e7bcc4ec593413f97d5237c6e72f783beba56a2
|
refs/heads/master
| 2021-04-06T02:51:35.412247
| 2018-04-23T11:03:52
| 2018-04-23T11:03:52
| 125,371,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
# Generated by Django 2.0.4 on 2018-04-23 11:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0003_auto_20180420_1316'),
]
operations = [
migrations.AlterField(
model_name='category',
name='slug',
field=models.CharField(max_length=30, unique=True),
),
migrations.AlterField(
model_name='company',
name='slug',
field=models.CharField(max_length=30, unique=True),
),
migrations.AlterField(
model_name='job',
name='slug',
field=models.CharField(max_length=30, unique=True),
),
]
|
[
"post159510@gmail.com"
] |
post159510@gmail.com
|
b1c9cec48e52149224b7f6b46ab40418c4a04bd4
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_268/ch119_2020_03_29_20_51_10_410341.py
|
ec2aff375f7597950ddebfff3175c1b7883f2146
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
def fatorial (n):
fat=1
while n>0:
fat*=n
n-=1
return fat
def calcula_euler(x,n):
a=0
e=0
while a<n:
e+= 1*(x**a)/fatorial(a)
a+=1
return e
|
[
"you@example.com"
] |
you@example.com
|
58384bc02df8bda751e75beae7ca9478f6527032
|
91157da2a8f415d956a70c6ba50c9871ea3ea484
|
/COIRequestReal/mysite/mysite/urls.py
|
0006442896c9a08556d5a1d11db67825943f771d
|
[] |
no_license
|
hussainjhaveri/COIWebsite-repository
|
b2cf5b3f0d6ea26eeee19793b00f5e102dfd633c
|
9af534990d6e360a2d918686f6b1eb95fbbc0c81
|
refs/heads/master
| 2022-12-13T22:01:23.766216
| 2020-08-27T18:00:30
| 2020-08-27T18:00:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.views import LoginView
from django.urls import path, include
from django.contrib.auth import views as auth_views
urlpatterns = [
path('admin/', admin.site.urls),
path('polls/', include('polls.urls')),
#path('login/', auth_views.LoginView.as_view(), name='login'),
#path('polls/', include('django.contrib.auth.urls')),
path('accounts/', include('accounts.urls')),
path('login/', LoginView.as_view(), name="login_url"),
]
|
[
"CAlfano1999@gmail.com"
] |
CAlfano1999@gmail.com
|
1277622ac4f58f872e1be501b2991954f9e631fa
|
a4d09564fdd8d5c805fef82cdcbc1950fcfb9aa1
|
/whatsup/whatsup/urls.py
|
b11f316e7ba29fd8174ff17f2dcf23358615253f
|
[
"MIT"
] |
permissive
|
ed-commits/whats-up
|
f644ad87719e44ca15ef53382eb3af6851eae9d4
|
6e6e13a6d73420da625d278fab31171e09511d98
|
refs/heads/main
| 2023-02-01T13:26:44.545310
| 2020-12-17T14:17:21
| 2020-12-17T14:17:21
| 315,723,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
"""whatsup URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
path('app/', include('whatsupapp.urls')),
]
from django.views.generic import RedirectView
urlpatterns += [
path('', RedirectView.as_view(url='app/', permanent=True)),
]
|
[
"edward.alexander.cant@gmail.com"
] |
edward.alexander.cant@gmail.com
|
f0a83f9fdaa016a77c36ded1c32bac9542760796
|
aaf3b75157eca3c17ad88696ae7d75caf849782e
|
/manage.py
|
1f1bc122b8263e997c092e9d702539c1ddc46f41
|
[] |
no_license
|
shiluanzzz/flask_movie
|
0dba2ff99ad37795e5f3fcd69223a78fa7fd67eb
|
98b2ba04295533ca32d94c8e396fe1a51bbad2b9
|
refs/heads/master
| 2020-06-19T11:29:39.213224
| 2019-07-23T16:49:16
| 2019-07-23T16:49:16
| 196,692,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
# -*- coding:utf-8 -*-
# __author__ = "shitou6"
from app import app
if __name__ == '__main__':
app.run( )
|
[
"shiluanzzz@foxmai.com"
] |
shiluanzzz@foxmai.com
|
8c127a8f0667ec8e0a1946eab3c2c673b709a627
|
e785232fbbf1b105e7b6dd6dfcfc84cc9877f98a
|
/Initializers/RandomInitializer.py
|
dc7a7d884abb091f89355ae0438d29d8b5258114
|
[
"Apache-2.0"
] |
permissive
|
firestrand/pyOptimize
|
243db846fcd3a7a9d9ae12b06c9dd4edea5c21a3
|
03f8e7094e4ffa1836685842f218cd148b038e44
|
refs/heads/master
| 2023-07-20T10:30:24.584858
| 2023-07-11T17:40:22
| 2023-07-11T17:40:22
| 236,263,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
import numpy as np
import numpy.typing as npt
from Initializers.Initializer import Initializer
class RandomInitializer(Initializer):
def __init__(self, dimensions: int):
super().__init__(dimensions)
def next(self, n: int, lower_bound: npt.ArrayLike, upper_bound: npt.ArrayLike) -> npt.ArrayLike:
rand_value = np.random.random(self.dimensions)
return rand_value * (upper_bound - lower_bound) + lower_bound
|
[
"92124324+tsilvers-ms@users.noreply.github.com"
] |
92124324+tsilvers-ms@users.noreply.github.com
|
fa89f8908b26b40626e1695e044d65aae04fbdce
|
5a74fbfb317c116a911e3e6d404ed050bdd2baab
|
/problems/email.py
|
1e40b136a9d2965aa4c0d069e58d60bac296a8d0
|
[] |
no_license
|
yorkypy/selise-intervie
|
39571fd14efca77240ee33d2ab770c86cb9103da
|
82da5fc6399652bb080cd3977d6a4c60c47db2a1
|
refs/heads/master
| 2023-01-23T05:14:52.903886
| 2020-12-05T10:50:53
| 2020-12-05T10:50:53
| 318,727,091
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
'''Python code to generate emails from a list of names'''
def generateEmail(names):
email=[]
temp='@gmail.com'
for i in range(len(names)):
for j in range(len(names[i])-1,-1,-1):
if names[i][j] != ' ':
temp=names[i][j]+temp
email.append(temp.lower())
temp='@gmail.com'
return email
#Driver Code
if __name__ == "__main__":
print(generateEmail(['Nima Yonten', 'Sangay Choden', 'Pema Norbu', 'Rangdrel Loop']))
|
[
"nimayonten@gmail.com"
] |
nimayonten@gmail.com
|
0d8a1b443a8d3029209e6655fe7608dc24209d93
|
8f5c1ad76f3f9aa67d6720154b4884c9fab2ecbc
|
/toontown/parties/DistributedPartyCannonActivityAI.py
|
49b0b8129521331eae5f0c50f38c13862667377b
|
[] |
no_license
|
RegDogg/ttr-2014-dev
|
eb0d9da3e91b9504b83804c27e1a00d87a0b7220
|
8a392ea4697cf15bd83accd01dcf26d0f87557eb
|
refs/heads/master
| 2023-07-13T02:40:56.171517
| 2021-07-12T00:31:28
| 2021-07-12T00:31:28
| 372,103,145
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,355
|
py
|
from direct.directnotify import DirectNotifyGlobal
from toontown.parties.DistributedPartyActivityAI import DistributedPartyActivityAI
from toontown.toonbase import TTLocalizer
import PartyGlobals
class DistributedPartyCannonActivityAI(DistributedPartyActivityAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedPartyCannonActivityAI")
def __init__(self, air, parent, activityTuple):
DistributedPartyActivityAI.__init__(self, air, parent, activityTuple)
self.cloudColors = {}
self.cloudsHit = {}
def setMovie(self, todo0, todo1):
pass
def setLanded(self, toonId):
avId = self.air.getAvatarIdFromSender()
if avId != toonId:
self.air.writeServerEvent('suspicious', avId=avId, issue='Toon tried to land someone else!')
return
if not avId in self.toonsPlaying:
self.air.writeServerEvent('suspicious', avId=avId, issue='Toon tried to land while not playing the cannon activity!')
return
self.toonsPlaying.remove(avId)
reward = self.cloudsHit[avId] * PartyGlobals.CannonJellyBeanReward
if reward > PartyGlobals.CannonMaxTotalReward:
reward = PartyGlobals.CannonMaxTotalReward
av = self.air.doId2do.get(avId, None)
if not av:
self.air.writeServerEvent('suspicious', avId=avId, issue='Toon tried to award beans while not in district!')
return
# TODO: Pass a msgId(?) to the client so the client can use whatever localizer it chooses.
# Ideally, we shouldn't even be passing strings that *should* be localized.
self.sendUpdateToAvatarId(avId, 'showJellybeanReward', [reward, av.getMoney(), TTLocalizer.PartyCannonResults % (reward, self.cloudsHit[avId])])
av.addMoney(reward)
self.sendUpdate('setMovie', [PartyGlobals.CANNON_MOVIE_LANDED, avId])
del self.cloudsHit[avId]
def b_setCannonWillFire(self, cannonId, rot, angle, toonId):
self.toonsPlaying.append(toonId)
self.cloudsHit[toonId] = 0
self.sendUpdate('setCannonWillFire', [cannonId, rot, angle])
def cloudsColorRequest(self):
avId = self.air.getAvatarIdFromSender()
self.sendUpdateToAvatarId(avId, 'cloudsColorResponse', [self.cloudColors.values()])
def requestCloudHit(self, cloudId, r, g, b):
avId = self.air.getAvatarIdFromSender()
if not avId in self.toonsPlaying:
self.air.writeServerEvent('suspicious', avId=avId, issue='Toon tried to hit cloud in cannon activity they\'re not using!')
return
self.cloudColors[cloudId] = [cloudId, r, g, b]
self.sendUpdate('setCloudHit', [cloudId, r, g, b])
self.cloudsHit[avId] += 1
def setToonTrajectoryAi(self, launchTime, x, y, z, h, p, r, vx, vy, vz):
self.sendUpdate('setToonTrajectory', [self.air.getAvatarIdFromSender(), launchTime, x, y, z, h, p, r, vx, vy, vz])
def setToonTrajectory(self, todo0, todo1, todo2, todo3, todo4, todo5, todo6, todo7, todo8, todo9, todo10):
pass
def updateToonTrajectoryStartVelAi(self, vx, vy, vz):
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('updateToonTrajectoryStartVel', [avId, vx, vy, vz])
def updateToonTrajectoryStartVel(self, todo0, todo1, todo2, todo3):
pass
|
[
"regdogg.acr@gmail.com"
] |
regdogg.acr@gmail.com
|
1d79d3999f2cd00590a314b2dd60798da2fd4bad
|
8bc451f94f1c944ee53819b8e8bdf369bc637bcf
|
/python/copyLocalRemote.py
|
43569583f75d64f414bc0fd4b5da7d5782243b92
|
[] |
no_license
|
vigneshpalanivelr/all_scripts
|
e1b78bd7f90458e6fb19aa1838496da0fa22371e
|
c865157b44be7b2e8ae8fbe1f5d6182e09f3f302
|
refs/heads/master
| 2022-11-24T01:47:10.563315
| 2020-04-26T09:02:16
| 2020-05-29T10:32:55
| 198,067,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,163
|
py
|
#!/bin/python2
import os
import sys
# import yum
# import glob
import yaml
import json
import time
import stat
import shutil
import fnmatch
import argparse
import requests
import fileinput
import subprocess
# import prettytable
#import custom modules
# sys.path.append(os.path.dirname('/var/lib/jenkins/workspace/playbook-provisioning-job/all_scripts/python/pySetenv/variables'))
# sys.path.append(os.path.dirname('/var/lib/jenkins/workspace/playbook-provisioning-job/all_scripts/python/pySetenv/packages'))
# sys.path.append(os.path.dirname('/root/all_scripts/python/pySetenv/variables/'))
# sys.path.append(os.path.dirname('/root/all_scripts/python/pySetenv/packages/'))
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/pySetenv/variables/' )
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/pySetenv/packages/' )
import logger
import global_vars
# print sys.path
class loadLogDirectory():
def __init__(self):
if not os.path.exists(logDirectory): os.makedirs(logDirectory)
class CopyExtract(object):
def __init__(self):
execLog.debug('Object - Created Class Object')
pass
def copy_remote(self, allDirs=None, source='../', destination=None):
execLog.debug('Action - Started coping to Remote path')
for i in allDirs:
self.copytree(source+i, destination+i)
execLog.debug('Done - Completed coping to Remote path')
def copy_destination(self, allDirs=None, source=None, yumDir=None, initDir=None, systmdDir=None, RHEL=None, repos=None):
execLog.debug('Action - Started coping to Destination path')
for i in allDirs:
if 'repo' in i:
execLog.debug('Action - Found Repo files')
for basename, filename in self.find_files(source+i, '*repo*'):
self.copytree(filename , yumDir, permission = variables['yumDirPer'])
if 'epel' in basename and 'epel' in repos:
self.find_replace(yumDir+'/'+basename, 'OS_VERSION', RHEL)
self.get_GPG_KEY(
variables['repositories']['epel']['repo']+variables['repositories']['epel']['gpgurl']+RHEL,
variables['GPG_KEY_Dir']+variables['repositories']['epel']['gpgkey']+RHEL
)
if 'jenkins' in basename and 'jenkins' in repos:
self.find_replace(yumDir+'/'+basename, 'JENKINS_REPO', variables['repositories']['jenkins']['repo'])
self.find_replace(yumDir+'/'+basename, 'JENKINS_GPGKEY', variables['repositories']['jenkins']['gpgkey'])
self.get_GPG_KEY(
variables['repositories']['jenkins']['repo']+variables['repositories']['jenkins']['gpgurl'],
variables['GPG_KEY_Dir']+variables['repositories']['jenkins']['gpgkey']
)
if 'artifactory' in basename and 'artifactory' in repos:
self.find_replace(yumDir+'/'+basename, 'ARTIFACTORY_REPO', variables['repositories']['artifactory']['repo'])
if 'jfrog-cw' in basename and 'jfrog-cw' in repos:
self.find_replace(yumDir+'/'+basename, 'CW_LOCAL_REPO', variables['repositories']['jfrog-cw']['repo'])
self.find_replace(yumDir+'/'+basename, 'OS_VERSION', RHEL)
if 'jfrog-epel' in basename and 'jfrog-epel' in repos:
self.find_replace(yumDir+'/'+basename, 'EPEL_LOCAL_REPO', variables['repositories']['jfrog-epel']['repo'])
self.find_replace(yumDir+'/'+basename, 'OS_VERSION', RHEL)
if 'jfrog-jenkins' in basename and 'jfrog-jenkins' in repos:
self.find_replace(yumDir+'/'+basename, 'JENKINS_LOCAL_REPO', variables['repositories']['jfrog-jenkins']['repo'])
self.find_replace(yumDir+'/'+basename, 'OS_VERSION', RHEL)
if 'jfrog-postgresql' in basename and 'jfrog-postgresql' in repos:
self.find_replace(yumDir+'/'+basename, 'POSTGRESQL_LOCAL_REPO', variables['repositories']['jfrog-postgresql']['repo'])
self.find_replace(yumDir+'/'+basename, 'OS_VERSION', RHEL)
elif 'service' in i and RHEL == '6':
execLog.debug('Action - Found Service files for RHEL {}'.format(RHEL))
for basename, filename in self.find_files(source+i, '*Initd*'):
self.copytree(filename , initDir, permission = variables['initDirPer'])
elif 'service' in i and RHEL == '7':
execLog.debug('Action - Found Service files for RHEL {}'.format(RHEL))
for basename, filename in self.find_files(source+i, '*Initd*'):
self.copytree(filename , initDir, permission = variables['initDirPer'])
elif 'service' in i and RHEL == '8':
execLog.debug('Action - Found Service files for RHEL {}'.format(RHEL))
for basename, filename in self.find_files(source+i, '*Services*'):
self.copytree(filename , systmdDir, permission = variables['systemdDirPer'])
elif 'python' in i:
execLog.debug('Action - Found Python files')
for basename, filename in self.find_files(source+i, '*.py*'):
self.permission_restore(filename, variables['pythonPer'],)
execLog.info('Permisn - {} : {} : {}'.format(source+i, filename, variables['pythonPer']))
execLog.debug('Done - Completed coping to Destination path')
def copytree(self, src, dst, symlinks = False, ignore = None, permission = None):
# Ref : https://stackoverflow.com/questions/1868714/how-do-i-copy-an-entire-directory-of-files-into-an-existing-directory-using-pyth
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
execLog.info('New File - {} : {}'.format(src, dst))
if os.path.isdir(src):
copy_list = os.listdir(src)
elif os.path.isfile(src):
copy_list = [src.split('/')[-1]]
src = '/'.join(src.split('/')[:-1])
if ignore:
exclude = ignore(src, copy_list)
copy_list = [x for x in copy_list if x not in exclude]
execLog.warning('Files Excluded List: {}'.format(exclude))
for item in copy_list:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
self.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
if permission == None:
execLog.info('Copying - {} : {}'.format(s, d))
elif permission != None:
self.permission_restore(d , permission)
execLog.info('Copying - {} : {} : {}'.format(s, d, permission))
def find_files(self, directory, regex):
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, regex):
filename = os.path.join(root, basename)
yield basename, filename
def find_replace(self, filename, textToSearch, textToReplace):
with open(filename,'r+') as file:
filedata = file.read()
filedata = filedata.replace(textToSearch, textToReplace)
file.truncate(0)
file.seek(0)
file.write(filedata)
execLog.info('Find Re - {} : {} : {}'.format(filename, textToSearch, textToReplace))
def permission_restore(self, file, permission):
subprocess.call(['chmod', permission, file])
def get_GPG_KEY(self, url, key_file):
with open(key_file, 'w') as gpg_file:
gpg_file.write(requests.get(url).text)
execLog.info('Set GPG - {}'.format(key_file))
if __name__ == '__main__':
# Argparse Argments and variables defination
parser = argparse.ArgumentParser(description='Copy scripts from local to remote and enable services and repos')
parser.add_argument('RHEL' ,action='store' ,help='RHEL Major Version' ,choices=['6','7','8'] )
parser.add_argument('YAMLvarFile' ,action='store_const' ,help='Load Variables from Ansible Vars' ,const='../ansible/vars/vars.yml' )
parser.add_argument('-repos' ,action='append' ,help='Add list of repos to enable' ,dest='repos' ,default=[] )
# arguments = parser.parse_args(['7','-repos','epel','-repos','jenkins','-repos','artifactory'])
arguments = parser.parse_args()
RHEL = arguments.RHEL
YAMLvarFile = arguments.YAMLvarFile
repos = arguments.repos
# Load variables from ansible vars
variables = global_vars.get_ansible_vars(YAMLvarFile)
logDirectory = variables['scriptHomeDir']+'/'+variables['scriptsDir']+'/'+variables['logsDir']
# Execute a class object to make log dir
loadLogDirectory()
print 'Created Log Directory : {}'.format(logDirectory)
# Define logging module, File Handler & Stream Handler
# Define Log file name for later use
execLogger = 'cp-local-remote-log' + time.strftime('-%Y-%m-%d-%Hh-%Mm-%Ss-%Z') + '.log'
execLog = logger.setupLogger('Copy Local to Remote', logDirectory +'/'+ execLogger)
execLog.debug('Object - Successfully Loadded Ansible Vars')
# Creating class object
copy_extract = CopyExtract()
copy_extract.copy_remote(
allDirs = [variables['srcPythonDir'],variables['srcRepoDir'],variables['srcServicesDir']],
source = '../',
destination = variables['scriptHomeDir']+'/'+variables['scriptsDir']+'/'
)
copy_extract.copy_destination(
allDirs = [variables['srcPythonDir'],variables['srcRepoDir'],variables['srcServicesDir']],
source = variables['scriptHomeDir']+'/'+variables['scriptsDir']+'/',
yumDir = variables['yumDir'],
initDir = variables['initDir'],
systmdDir = variables['systemdDir'],
RHEL = RHEL,
repos = repos
)
|
[
"vignesh.palanivelr@gmail.com"
] |
vignesh.palanivelr@gmail.com
|
2548483638d7dd06d52cb259f10dc85706bedab2
|
1c51468a57ee27a56a206f4781d83e40c5906dc9
|
/IT6036_AppSecurityand ServiceSideDevelopment_SecureWebApp/NZataglance_WebApp/nzataglance_StudentCopy/nzataglance/settings.py
|
372c293c5c654ff4e4a11eef8986ebd04d4fda6b
|
[] |
no_license
|
91031431/IT6036_Project_security_WebApp
|
c17b23c2611c12011d9af47e415bc8da083eee95
|
c4dc45607fd2d340e31daa3d7e3a9454f0453fac
|
refs/heads/master
| 2020-09-13T00:40:08.546683
| 2019-11-28T05:38:45
| 2019-11-28T05:38:45
| 222,608,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,394
|
py
|
"""
Django settings for nzataglance project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$9w@wu45qbjfy@%cejqt+zma9-9y!u&9w_yfu#5#^8wux1urkw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
'story',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'nzataglance.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'nzataglance.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL ='/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
[
"91031431@learn.cpp.ac.nz"
] |
91031431@learn.cpp.ac.nz
|
713619704f16c067823bf3168e44b435e5b19360
|
f31e73f282c645206a3d8d666fb774a4c17256ab
|
/lab_exam/stripped_lab_exam_0_files/python/test_linear.py
|
e489076d36e59268218b27bf34fe5f4eb963089a
|
[] |
no_license
|
ATanggara/Statistical_Machine_Learning_2019
|
e59d13efbfa18362dea670d9c79859030c2f5668
|
746fbea75d25ebfe5a08f1e9569dc24518b1213f
|
refs/heads/master
| 2020-04-28T08:47:19.644026
| 2019-04-08T00:54:39
| 2019-04-08T00:54:39
| 175,141,766
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,697
|
py
|
import numpy as np
import traceback
import sys
import linear
def test_linear(x, y, w_true):
################################################
#
# INPUTS:
# x: input data, shape d by n
# y: output targets, shape n by 1
# w_true: true weights, shape d by 1, or None
#
# OUTPUTS:
# w: linear regression weights, d by 1
#
################################################
d, n = x.shape
assert y.shape == (n, 1)
try:
# call the student's code:
w_student = linear.linear_least_squares(x, y)
except:
# the student's code raised an exception:
traceback.print_exc()
return False
assert w_student.shape == (d, 1)
if w_true is not None:
# check if the correct weights have been calculated:
w_matches = np.allclose(w, w_student)
print('w_matches', w_matches)
return w_matches
else:
# check if the predictions match on the training data:
y_student = np.dot(x.T, w_student)
y_matches = np.allclose(y, y_student)
print('y_matches', y_matches)
return y_matches
def random_data(d, n):
print('d = %i n = %i' % (d, n))
# linear model weights:
w_true = np.random.randn(d, 1)
# input datapoints:
x = np.random.randn(d, n)
# scalar outputs:
y = np.dot(x.T, w_true)
return x, y, w_true
if __name__ == '__main__':
total_marks = 0
print('First test:')
x, y, w = random_data(d=2, n=3)
if test_linear(x=x, y=y, w_true=w):
print('passed')
total_marks = total_marks + 1
else:
print('failed')
print()
print('Second test:')
x, y, w = random_data(d=2, n=1)
if test_linear(x=x, y=y, w_true=None):
print('passed')
total_marks = total_marks + 1
else:
print('failed')
print()
print('Tests completed, total_marks =', total_marks)
|
[
""
] | |
786f3bbb1beef416669dead093cf35a4cc4a0190
|
5e861593f0c43e2a863c282c49f9bc4d15ef57f8
|
/config.py
|
607540c37b73c789a7d13df6eef1fdde69512632
|
[] |
no_license
|
wddzz/Taobao-Spider
|
2e974a61d65cdc7ebf8aea491801b45dbd75df81
|
4749ab5c7073c1c0b8baa78b73b2cdcd174d3c8c
|
refs/heads/master
| 2020-03-14T16:21:05.059612
| 2018-05-01T12:36:21
| 2018-05-01T12:36:21
| 131,696,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68
|
py
|
MONGO_URL = "localhost"
MONGO_DB = "taobao"
MONGO_TABLE = "product"
|
[
"406452653@qq.com"
] |
406452653@qq.com
|
ad32732cdc2c11b4cb11a0fd2ce443d0845a1ff0
|
aac44b0f483abd424a2237d1ace5c71b2c58fcf7
|
/collective/recipe/zopeinstancemultiplier/tests/test_docs.py
|
b0ec06207ba1de5c95c700460a549a2fc9ae7b3a
|
[] |
no_license
|
collective/collective.recipe.zopeinstancemultiplier
|
5d3b2bde3772435e92e885a2e59c96b9b09129f2
|
5c296aad241f8801d4b619fc2d954b53abd8253c
|
refs/heads/master
| 2023-08-24T08:39:40.697499
| 2017-07-18T20:01:42
| 2017-07-18T20:01:42
| 97,505,280
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
# -*- coding: utf-8 -*-
"""Doctest runner."""
__docformat__ = 'restructuredtext'
from zope.testing import renormalizing
import doctest
import unittest
import zc.buildout.testing
import zc.buildout.tests
optionflags = (doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_ONLY_FIRST_FAILURE)
def setUp(test):
zc.buildout.testing.buildoutSetUp(test)
# Install the recipe in develop mode
zc.buildout.testing.install_develop('collective.recipe.zopeinstancemultiplier', test)
# Install any other recipes that should be available in the tests
def test_suite():
suite = unittest.TestSuite((
doctest.DocFileSuite(
'../../../../README.rst',
setUp=setUp,
tearDown=zc.buildout.testing.buildoutTearDown,
optionflags=optionflags,
checker=renormalizing.RENormalizing([
# If want to clean up the doctest output you
# can register additional regexp normalizers
# here. The format is a two-tuple with the RE
# as the first item and the replacement as the
# second item, e.g.
# (re.compile('my-[rR]eg[eE]ps'), 'my-regexps')
zc.buildout.testing.normalize_path,
]),
),
))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
[
"rafaelbco@gmail.com"
] |
rafaelbco@gmail.com
|
747ee26960ee81836192aa2bc3f54b34ff0cb185
|
b72cbafda25941a411a06bd688004fb6ec8b0397
|
/easy/107.二叉树的层次遍历II.py
|
a69e85a80af385b83429b308ff23e60d41529079
|
[] |
no_license
|
GGjin/algorithm_python
|
92d8ea04c8298bb2fcb65299d8f925a0c750209d
|
de36824647a10199963c64b0d32b8fb581c9eef6
|
refs/heads/master
| 2023-05-06T22:21:55.505057
| 2021-05-28T07:14:44
| 2021-05-28T07:14:44
| 276,314,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
# 给定一个二叉树,返回其节点值自底向上的层次遍历。 (即按从叶子节点所在层到根节点所在的层,逐层从左向右遍历)
#
# 例如:
# 给定二叉树 [3,9,20,null,null,15,7],
#
# 3
# / \
# 9 20
# / \
# 15 7
#
#
# 返回其自底向上的层次遍历为:
#
# [
# [15,7],
# [9,20],
# [3]
# ]
#
# Related Topics 树 广度优先搜索
# 👍 364 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
res = []
def dfs(root: TreeNode, depth: int):
if not root:
return res
if len(res) < depth + 1:
res.append([])
res[depth].append(root.val)
dfs(root.left, depth + 1)
dfs(root.right, depth + 1)
dfs(root, 0)
return res[::-1]
# leetcode submit region end(Prohibit modification and deletion)
|
[
"gg.jin.yu@gmail.com"
] |
gg.jin.yu@gmail.com
|
961442e0a4c5e31226154616a33f17f55e8b0e2e
|
1652e4eed966da8c1eeccb29e29c87d74985de6d
|
/randomdatatools/xmldata.py
|
a86f48f0abcb0f45d61425ddaa824bdfb3e75f23
|
[] |
no_license
|
mchamlee/randomdatatools
|
7f7beab4a96e8235e9d81b829a242108e2954094
|
6711594e45219adfe7d4bae4d3d1253d168133ef
|
refs/heads/master
| 2020-04-27T05:17:59.894218
| 2019-03-27T06:07:37
| 2019-03-27T06:07:37
| 174,077,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,317
|
py
|
import os
from pathlib import Path
import glob
from xml.etree import ElementTree as ET
def xml_split(file_to_split, tag_to_split_on, splits_to_generate=0):
"""Splits an XML file into multiple files. Places output files in output subdirectory of input file location.
Args:
file_to_split: Path to the file to split.
tag_to_split_on: Tag/element within the XML to split on. Should be a top-level tag, just under the root.
splits_to_generate: Number of files (splits) to generate, distribution round robin. If not specified, will
split each found tag_to_split_on into it's own file.
"""
# Quickly open and close file to capture file name/path
input_file = open(file_to_split)
input_file_name = input_file.name.replace(Path(input_file.name).name, Path(input_file.name).stem)
input_file.close()
split_tag = tag_to_split_on
num_files = splits_to_generate
context = ET.iterparse(file_to_split, events=('end',))
if num_files == 0:
index = 0
for event, elem in context:
if elem.tag == split_tag:
index += 1
with open(input_file_name + '_' + str(index).rjust(4, '0') + '.xml', 'wb') as f:
f.write(b"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
f.write(ET.tostring(elem))
else:
xml_element_tree_list = [None for i in range(num_files)]
tree_num = 0
for event, elem in context:
if elem.tag == split_tag:
if xml_element_tree_list[tree_num] is None:
xml_element_tree_list[tree_num] = ET.Element('XMLELEMENT')
xml_element_tree_list[tree_num].extend(elem.iter(split_tag))
else:
xml_element_tree_list[tree_num].extend(elem.iter(split_tag))
if tree_num >= (num_files - 1):
tree_num = 0
else:
tree_num += 1
for file_num in range(num_files):
if xml_element_tree_list[file_num] is not None:
with open(input_file_name + '_' + str(file_num).rjust(4, '0') + '.xml', 'wb') as f:
f.write(ET.tostring(xml_element_tree_list[file_num]))
def xml_combine(files_to_combine, tag_to_combine_on):
"""Combines multiple XML files into one. Places output file in output subdirectory of input file location.
Args:
files_to_combine: Path containing the files to combine.
tag_to_combine_on: Tag/element within the XML to combine on. Ideally a top-level tag, just under the root.
"""
tag_to_find = ".//" + tag_to_combine_on
xml_files = glob.glob(files_to_combine + "/*.xml")
xml_element_tree = ET.Element('XMLELEMENT')
# See if output subdir exists, and if not create it
if not os.path.exists(files_to_combine + "/output"):
os.makedirs(files_to_combine + "/output")
for xml_file in xml_files:
data = ET.parse(xml_file).getroot()
if data.tag != tag_to_combine_on:
data = ET.parse(xml_file).find(tag_to_find)
xml_element_tree.append(data)
if xml_element_tree is not None:
output_file = open(files_to_combine + "/output/output.xml", 'wb')
output_file.write(ET.tostring(xml_element_tree))
|
[
"matthew.chamlee@gmail.com"
] |
matthew.chamlee@gmail.com
|
8a94d031c49228851ad8353b0268e43fedd56a90
|
8eca8fbc9bb38599f040485a7cbe741b665afb78
|
/flats_tools.py
|
389fc00cffb4f6b74a391adf8bac36f76b9a21e6
|
[] |
no_license
|
andrrew-c/property-data-scraper
|
f06b2469814942c306e016837250cdfe8a166c2d
|
441927ba0fe65726ce022a56d15d5ebb24df632a
|
refs/heads/master
| 2022-11-15T18:42:47.597552
| 2020-07-02T19:03:32
| 2020-07-02T19:03:32
| 276,642,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53,342
|
py
|
"""
Name: Flats Tools - Property Data
Purpose: This script contains a number of functions which are used to control
the web scraper.
Description:
Author: Andrew Craik
Date: February 2018
"""
import requests
from bs4 import BeautifulSoup as bs
import json
import sqlite3
from datetime import datetime
import time
import collections
import os
import sys
## Random
import random
import csv
# Debug
import pdb
#import testing_db_update as tdu
def getLocationID(url_search, url_find, postcode):
"""
Returns the outcode when given a postcode and some soup
The outcode is a code used by the property website from postcode->outcode
Returns string in format "OUTCODE^xxxxx"
Inputs:
url_search: string holding the first part of the property website search
url_find: string containing the 'find properties' url - used to check whether searching a REGION or an OUTCODE
postcode: The postcode of interest
"""
## Create full URL
url_location = url_search + "?searchLocation={}&locationIdentifier=&useLocationIdentifier=false&buy=For+sale".format(postcode)
## Make request, then soup
res_location = requests.get(url_location)
#with open('res.txt', 'wb') as f: f.write(res_location.content)
#print("******** Writing out to a text file")
soup = bs(res_location.content, 'html.parser')
## Find the outcode dictionary
location_id = soup.find_all('input', id="locationIdentifier")
## Outcode
rm_outcode = location_id[0]['value']
##################################################
## Some places need a 'REGION' some 'OUTCODE'
##################################################
## Need to test a request
sCode = "OUTCODE REGION".split()
locationIdentifier = [i + rm_outcode[rm_outcode.find('^'):len(rm_outcode)] for i in sCode]
## Try both
for i in range(len(sCode)):
## Make request
res = requests.get(url_find, params={'locationIdentifier':locationIdentifier[i], 'index':0})
## Request status
res_stat = str(res.raise_for_status)
status = res_stat[res_stat.find('[')+1:res_stat.find(']')]
## Check that the status works
if not status == '404':
print("Using location code type '{}'".format(sCode[i]))
return locationIdentifier[i]
## Else, need to raise an error
print("ERROR (getLocationID): no match found in '{}'".format(sCode))
def getPropertyURLs(soup):
"""
DISCONTINUED
Returns a list of the property URLs for a given postcode (or area)
"""
## Properties (?)
props1 = soup.find_all('a', class_='propertyCard-link')
props2 = [i for i in props1 if i.find('address')]
## Extract properties URLs
prop_urls = [i['href'] for i in props2 if i['href']!='']
#
return prop_urls
def getPropertyBasicInfo(url, soup):
"""
Returns a list of dictionaries holding basic information on each property
in the search results
"""
##Now pull out some basic information on all properites
scripts = soup.find_all('script')
sc = [s.text for s in scripts if str(s).find('jsonModel')!=-1 and str(s).find('properties')!=-1]
## Extract JSON text string from scripts
try:
js_txt = [s[s.find('{'):] for s in sc][0]
js_dict = json.loads(js_txt)
## All the other information comes from the individual
## We take, ID, summary, display address
##, property description, property URL, property images
## Init property info list
props = []
## Loop through each property in JSON data
for p in js_dict['properties']:
## Added or reduced - separate
addOrReduceDate = ''
## If 'on' not found in string (i.e. 'on dd/mm/yyyy')
if p['addedOrReduced'].find('on') == -1:
addOrReduce = p['addedOrReduced']
## Else, date string contained
else:
addOrReduce = p['addedOrReduced'][:p['addedOrReduced'].find('on')-1]
addOrReduceDate = p['addedOrReduced'][p['addedOrReduced'].find('on')+3:]
###
## Extract information on property
props.append({'propertyId':p['id']
, 'propURL':url+p['propertyUrl']
, 'displayAddress':p['displayAddress']
, 'propertyDescription':p['propertyTypeFullDescription']
, 'listingUpdateReason':p['listingUpdate']['listingUpdateReason']
, 'listingUpdateDate':p['listingUpdate']['listingUpdateDate']
, 'addedOrReduced':addOrReduce
, 'addedOrReducedDate':addOrReduceDate
})
## Returns the list
return props
except:
return []
def getAllPropertyURLs(postcode, url_find, url, result_per_page, payload):
"""
Given a URL this function will return a list of the unique URLs
related to properties.
For example, URL will relate to the search terms for a single postcode area,
this function will iterate through each of the pages to get all of the properties URLs
and return those as a list, removing duplicates.
"""
## Init list to hold all URLs
all_props = []
## Init loop stop to False
blank_results = False
## Init idx to zero
idx = 0
## Sleep time in second
sleeptime = 2
## While resulsts are not blank
while not blank_results:
## Sleep time between iterations
print("getAllPropertyURLs: Sleeping for {} seconds.".format(sleeptime))
## Update query payload
payload.update({'index':idx*result_per_page})
## Make request
print("\nMaking request for postcode {}, page = {}".format(postcode, idx+1))
res = requests.get(url_find, params=payload)
time.sleep(sleeptime)
#with open('res.txt', 'wb') as f: f.write(res.content)
#print("******** Writing out to a text file")
## Soup
print("Making soup for postcode '{}'".format(postcode))
soup = bs(res.content, 'html.parser')
## Get properties results for a single iteration
print("Getting properties URLs")
new_list = getPropertyBasicInfo(url, soup)
#
print("\nThere are {:,} properties in the current list".format(len(all_props)))
## If there are properties in the soup
if len(new_list) > 0:
## Add URLs to list
all_props.extend(new_list)
print("Current list of properties now = {:,}".format(len(all_props)))
#
## Iterate index loop
idx += 1
else:
## Else, we have run out of pages, return list
final_result = []
#
final_result.append(all_props[0])
for p in range(1, len(all_props)):
if all_props[p]['propertyId'] not in [i['propertyId'] for i in final_result]:
final_result.append(all_props[p])
#
return final_result
def sleepProgram(bigProcess=False):
"""
For big processes we should have 15 seconds inclued
For others, no need (e.g. iterating through serach results)
"""
## Make sure process doesn't throttle website
rand = random.uniform(0,1)
if bigProcess and rand>0.8:
sleep = 15
print("Big process, sleeping for {} seconds.".format(sleep))
time.sleep(sleep)
else:
if rand>0.6:
sleep = 5
print("getPropertyInfo: Sleeping for {} seconds".format(sleep))
time.sleep(sleep)
else:
time.sleep(1)
def getPropertyInfo_TidyVars(basic_dict):
pass
#return dictionary
## Price
tidy = {'price':{None:-9}}
for k in basic_dict.keys():
## If this key should be tidied
if k in tidy.keys():
if basic_dict[k] in tidy[k].keys():
#
basic_dict.update({k:tidy[k][basic_dict[k]]})
return basic_dict
def getPropertyInfo(c, basic_dict, index, ofSize):
"""
Purpose: Get the property information for a single property (given the basic information scraped from the search results)
Given a base URL (for the website) and a 'properties specific' URL, return a dictionary of attributes for the properties.
c: sqlite3 cursor for a connection
basic_dict: dictionary holding the basic information for a single property gained from the search results
index: integer holding the index of the current property within the postcode
ofSize: integer holding the total size of properties in the postcode
"""
#
## Separate URL for this properties
#
full_url = basic_dict['propURL']
## If listing update date contains a string - recode it
#
if basic_dict['listingUpdateDate'] != None:
basic_dict.update({'listingUpdateDate2':basic_dict['listingUpdateDate']})
basic_dict.update({'listingUpdateDate':None})
#
## Else, give it something
else:
basic_dict.update({'listingUpdateDate2':None})
## Copy original for final
final_result = dict.copy(basic_dict)
## If property has chanegs
if propertyShouldBeUpdated(c, basic_dict):
## Updating property information to log
print("Property {} of {} ({:.1%}) with ID {} being updated/loaded".format(index+1, ofSize, (index+1)/ofSize, basic_dict['propertyId']))
prop_res = requests.get(full_url)
sleepProgram(True)
## Trying to make soup for properties - strugglingencoding = r.encoding if 'charset' in r.headers.get('content-type', '').lower() else None
soup_prop = bs(prop_res.content, 'html.parser')
#print("Length of soup = {}".format(len(soup_prop)))
#with open('property_info.txt', 'wb') as f: f.write(prop_res.content)
########################################################
## Now with properties soup - let's get the information
########################################################
prop_info = [i for i in soup_prop.find_all('script') if i.text.find('(function(k,v)')!=-1]
#
#print("Length of properties info for a single properties = {}".format(len(prop_info)))
if len(prop_info)>0:
## Need to turn the key information into a dictionary
js_text = prop_info[0].text[prop_info[0].text.find('property'):]
try:
prop_dict = json.loads(js_text[js_text.find('{'):js_text.find(')')])
#
## Key features (held separately) and full description
other_sections = soup_prop.find_all('div', class_='sect')
kf_string = 'Key features'
fd_string = 'Full description'
## Key features (if there) and full description, separate
kf_l = [i.text for i in other_sections if i.text.find(kf_string)!=-1]
fd_l = [i.text[i.text.find(fd_string)+len(fd_string):] for i in other_sections if i.text.find(fd_string)!=-1]
## Extract text from first element of list, if there is any
kf_full = extractFirstIfListHasIt(kf_l)
#
kf = None
if kf_full != None:
if len(kf_full) > 0:
kf = getKeyFeatures(kf_full, kf_string)
tenure, fd = extractTenureAndDescription(fd_l)
## Images
#print("Getting image URLs")
images = soup_prop.find_all('meta', itemprop='contentUrl')
## Now isolate the image content
imgs = [i['content'] for i in images]
#
#
#print("\nURL = {}".format(prop_url)) # and number of floor plans = {}".format(prop_url, prop_dict['floorplanCount']))
## Floor plan
## Reset floor plan list
fp_imgs = []
if prop_dict['floorplanCount'] >= 1:
## List all image links
all_images = soup_prop.find_all('img')
## Isolate the 'src' tag from any tags which contain 'FLOOR' - unique list
fp_imgs = list(set([i['src'] for i in all_images if i.prettify().upper().find('FLOOR')!=-1]))
#
## Update dictionary: Key features, full description, images
prop_dict.update({'tenure':tenure
, 'fullDescription':fd
, 'key_features': kf
, 'imageURLs':imgs
, 'floorplan':fp_imgs})
## Bring together
final_result.update(flatten(prop_dict))
## Tidy up vars
#
final_result = getPropertyInfo_TidyVars(final_result)
## Finally, return dictionary
return final_result
except:
strUpdate = "ERROR: Please check dictionary for property with URL '{}'\n".format(full_url)
strStars = "*"*len(strUpdate)
print("{0}\n{1}\n{0}".format(strStars, strUpdate))
## Add to errors list
#errors.append(basic_dict)
#
return {}
## Else, property should not be updated in DB
else:
#print("No need to request property information")
#
lastSeen = {'propertyId':basic_dict['propertyId'], 'lastFoundInSearch':datetime.today().strftime('%Y%m%d')}
return lastSeen
def getKeyFeatures(string, ignore_string):
"""
Return list of key features
"""
## Split string into list
str_list = [i.replace(ignore_string, '').replace("'",'').replace('"','') for i in string.split('\n')]
str_list2 = [i for i in str_list if i != '']
## Return values
#
return str_list2
def extractFirstIfListHasIt(listname, info='text'):
"""
Returns the first element's text or other information
"""
if len(listname)>0:
## If user hasn't specified type of info, then get text
if info == 'text':
return listname[0]
## Otherwise, get the specified information
else:
return listname[0][info]
else:
return None
def extractTenureAndDescription(listname):
"""
Returns the first element's text or other information
"""
ctr = "\0".split()
charsToRemove = str.maketrans(dict.fromkeys(ctr))
#
## String used to find tenure
tenure_string = 'Tenure:'
## If list has some information in it, look for tenure and/or full description
if len(listname)>0:
## Simpler to type
fullstring = listname[0]
## TENURE ##
## If tenure string in text
if fullstring.find(tenure_string)!=-1:
## Remove the tenure string from text
t1 = fullstring[fullstring.find(tenure_string)+len(tenure_string):]
## Now keep tenure type to output
tenure_type = t1[:t1.find('\n')].replace(' ', '').upper()
#
## Now get the rest of the string
fullDescription = t1[t1.find('\n')+1:].strip().replace('"', "'").translate(charsToRemove)
return tenure_type, fullDescription
## Else, tenure string not found
else:
## Else, no tenure, but there is a 'full description'
fullDescription = fullstring[fullstring.find('\n')+1:].strip().replace('"', "'")
return None, fullDescription
else:
return None, None
def getPropertyVariables(dictionary):
######################################
## properties information
######################################
propertyId = dictionary['propertyId']
propURL = dictionary['propURL']
added = dictionary['added']
listingUpdateReason = dictionary['listingUpdateReason']
listingUpdateDate = dictionary['listingUpdateDate']
listingUpdateDate2 = dictionary['listingUpdateDate2']
addedOrReduced = dictionary['addedOrReduced']
addedOrReducedDate = dictionary['addedOrReducedDate']
propertyDescription = dictionary['propertyDescription']
fullDescription = dictionary['fullDescription']
propertyType = dictionary['propertyType']
propertySubType = dictionary['propertySubType']
price = float(dictionary['price'])
beds = int(dictionary['beds'])
tenure = dictionary['tenure']
soldSTC = 1 if dictionary['soldSTC'] == True else 0
retirement = dictionary['retirement']
preOwned = dictionary['preOwned']
ownership = dictionary['ownership']
auctionOnly = 1 if dictionary['auctionOnly'] == True else 0
letAgreed = 1 if dictionary['letAgreed'] == True else 0
lettingType = dictionary['lettingType']
furnishedType = dictionary['furnishedType']
minSizeFt = dictionary['minSizeFt']
maxSizeFt = dictionary['maxSizeFt']
minSizeAc = dictionary['minSizeAc']
maxSizeAc = dictionary['maxSizeAc']
businessForSale = 1 if dictionary['businessForSale'] == True else 0
priceQualifier = dictionary['priceQualifier']
currency = dictionary['currency']
## Return values
return propertyId, propURL, added \
, listingUpdateReason, listingUpdateDate, listingUpdateDate2, addedOrReduced, addedOrReducedDate \
, propertyDescription, fullDescription, propertyType, propertySubType \
, price, beds, tenure \
, soldSTC, retirement, preOwned, ownership \
, auctionOnly, letAgreed, lettingType, furnishedType \
, minSizeFt, maxSizeFt, minSizeAc, maxSizeAc \
, businessForSale, priceQualifier, currency
def dict_factory(cursor, row):
""" Returns a dictionary from row ('header':value) """
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def SQLGetMostRecentRecord(c, tablename, propertyId):
""" With cursor and propertyID return dictoinary"""
## Check whether any variables differ
sql_return = """SELECT *
FROM {}
WHERE propertyId = {}
GROUP BY propertyId
HAVING MAX(timestamp)
""".format(tablename, propertyId)
## Get results from table for this properties
result = runSQLCommands(c, [sql_return], getResults=True)[0]
## Result used to name columns too
#
## Get dictionary of most recent update
if result != None:
return dict_factory(c, result)
## Else, return empty dictionary
else:
return {}
def SQLGetAllRecords(c, tablename, propertyId, valuename):
## Initialise result
# records_values = []
## SQL to be executed - returns all values that match the latest timestamp
sql_return = """SELECT A.{1}
FROM {2} AS A
INNER JOIN
(SELECT DISTINCT propertyID, timestamp
FROM {2}
WHERE propertyID = {3}
GROUP BY propertyId
HAVING COUNT(*) > 1 AND MAX(timestamp)
) AS B
ON A.propertyID = B.propertyID
and A.timestamp = B.timestamp """.format("propertyId", valuename, tablename, propertyId)
## Get results from table for this properties
result = runSQLCommands(c, [sql_return], getResults=True)
## c.execute(sql_return)
## result = c.fetchall()
## Put values into single list, or empty list []
records_values = [i[0] for i in result] if result!=None else []
## Return list holding values (either list of property features, or image URLs
return records_values
def propertyShouldBeUpdated(c, dictionary):
"""
With SQL connection check whether table should be updated
A property should be upadted if:
- propertyid not in DB
- if the listing update date in scraped data is after the last one
"""
#
## Property ID we're interested in
propid = dictionary['propertyId']
## SQL to pull out from DB
sql = """SELECT propertyId
, listingUpdateDate
, addedOrReducedDate
, added
, listingUpdateDate2
, timestamp
FROM property
WHERE propertyId = {}
GROUP BY propertyId
HAVING timestamp = MAX(timestamp)
""".format(propid)
## Excecute SQL
c.execute(sql)
result = c.fetchone()
## If property doesn't exist already in DB, then update it (function ends)
if result == None:
return True
## listingUpdateDate2: Else, check scraped dictionary update date matches the DB, return False
elif dictionary['listingUpdateDate2'] == result[4]:
return False
# We need to check the data in the scraped data
else:
## Scraped date from website
newListDate = dictionary['listingUpdateDate2'] if dictionary['listingUpdateDate'] == None else dictionary['listingUpdateDate']
###########################################################
## Check if listing update date contains a string
## This is something that was added after the program
## was first written
###########################################################
if newListDate == None:
if result[5] != '':
## dbDate is integer (or 'None')
dbDate = datetime.strptime(result[5][:8], '%Y%m%d')
## If scraped date is after
if newListDate > dbDate:
return True
## Else, no need to update
else:
return False
## If string contains a time element
elif newListDate.find('T') != -1:
####################################
## Isolate date from from DB
####################################
## If the 'addedOrReducedDate' is null then the best I can do is check the 'added' date
if result[2]=='':
## Added date
if result[3] != 'None':
dbDate = datetime.strptime(str(result[3]), '%Y%m%d')
## Else, there are no dates in DB, use the timestamp
else:
print("Called by '{}'.\nThere are no dates for propertyid '{}', using timestamp.\nWorth checking".format(__file__, result[0]))
dbDate = datetime.strptime(result[5][:8], '%Y%m%d')
## Else, take the date from 'addedOrReducedDate'
else:
dbDate = datetime.strptime(result[2], '%d/%m/%Y')
## Now, extract the date and compare with DB value (addedOrReducedDate)
day = newListDate[8:10]
month = newListDate[5:7]
year = newListDate[:4]
## Now combine into date string (which we'll use to write to DB)
datestring = '/'.join([day, month, year])
## Create python date object to compare with addedOrReducedDate
newListDate = datetime.strptime(datestring, '%d/%m/%Y')
## If scraped date is after the DB
if newListDate > dbDate:
return True
## Else, no need to update
else:
return False
###
## Else, the string for listingUpdateDate is in the numeric format, compare this
else:
## dbDate is integer (or 'None')
dbDate = result[1]
## If scraped date is after
if newListDate > dbDate:
return True
## Else, no need to update
else:
return False
def SQLtoUpdateTable(c, sql_statements, timestamp, dictionary, forceRun=False):
""" If SQL statements are over 1000 or forced to run, execute commands
Return empty list if this happens"""
## Just check if force run has been provided - execute updates
if forceRun and sql_statements != None:
## Run the commands
runSQLCommands(c, sql_statements, 'to update DB')
## Reset list
return []
## If, for whatever reason, the dictionary is null, return an empty list
if dictionary == None:
return []
## Else, do something
else:
## If SQL commands >= 1000
if forceRun or len(sql_statements) > 2000:
print("Forcerun = {} and len(sql_statements)={}".format(forceRun, len(sql_statements)))
## Run the commands
runSQLCommands(c, sql_statements, 'to update DB')
## Reset list
return []
## Else, DB is not to be run, return updated SQL statements
else:
## Init sql commands (individual ones)
sql_command_property = ''
sql_command_location = ''
sql_command_features = []
sql_command_images = []
## If property should be added to DB
## (i.e. listingUpdateDate is > than DB or ID not in DB)
#
## I chose to check for propURL as it will not be in the dictionary
## if the property hasn't been updated
if dictionary.get('propURL') != None:
sql_command_property = SQLProperty(dictionary, timestamp)
sql_command_location = SQLLocation(dictionary, timestamp)
sql_command_features = SQLFeatures(dictionary, timestamp)
sql_command_images = SQLImages(dictionary, timestamp)
sql_commands = [sql_command_property, sql_command_location] \
+ sql_command_features + sql_command_images
## Else, no need to update this property in DB
else:
#print("Only updating 'last seen' date for ID = {}".format(dictionary['propertyId']))
sql_commands = [SQLPropertyUpdateLastSeen(dictionary, timestamp)]
## Sql statements should be extended with list
sql_statements.extend(sql_commands)
## Sql commands
return sql_statements
def runSQLCommands(c, sql_commands, string, getResults=False):
"""
This will attempt to execute the SQL commands to the cursor (c)
if getResults is True then the function will return the values
"""
## Init boolean to keep retrying, keep count of number
committment_needed = True
iAttempts = 0
## Integer holding number of updates (to print to output)
iCnt = 0
## Keep trying until committment = False (i.e. successfully run)
while committment_needed:
## Iterate number of attempts
iAttempts += 1
print("Attempt {} to {}\n".format(iAttempts, string))
## Loop though each command, execute and commit
for s in sql_commands:
## Number of SQL statements executed
iCnt += 1
#print(iCnt)
if iCnt%100 == 0:
print("runSQLCommands: Updating DB iteration {}".format(iCnt))
try:
c.execute(s)
except sqlite3.OperationalError as e:
#print(s)
print("ERROR: '{}'\nWill wait, then try again - 15 seconds".format(e))
time.sleep(15)
break
except ValueError as e:
print(e)
next
except:
raise
## We've run through all the SQL commands - turn of the infinite loop
committment_needed = False
## Now, we seem to have executed the SQL commands
##- so let's try and commit the changes
## Seems like all is working - let's get some committment going in our lives!
print("Committing changes to DB")
c.connection.commit()
## Once, the query has been run, If user wants, results, return them
if getResults:
res = c.fetchall()
return res
def update_table_old(con, c, timestamp, dictionary):
####################################################
### THINK THIS IS NO LONGER USED - 2018-03-04
####################################################
## Init sql commands (individual ones)
sql_command_property = ''
sql_command_location = ''
sql_command_features = []
sql_command_images = []
## If property should be added to DB
## (i.e. listingUpdateDate is > than DB or ID not in DB)
if dictionary.get('listingUpdateDate') != None:
sql_command_property = SQLProperty(dictionary, timestamp)
sql_command_location = SQLLocation(dictionary, timestamp)
sql_command_features = SQLFeatures(dictionary, timestamp)
sql_command_images = SQLImages(dictionary, timestamp)
sql_commands = [sql_command_property, sql_command_location] \
+ sql_command_features + sql_command_images
for s in sql_commands:
#
c.execute(s)
## Init boolean and counter
committment_needed = False
iAttempts = 0
## Keep trying
while committment_needed:
iAttempts += 1
print("Attempt {} to commit changes to DB".format(iAttempts))
try:
con.commit()
committment_needed = False
except sqlite3.OperationalError:
print("Database locked. Will wait, then try again - 15 seconds")
time.sleep(15)
## Else, no need to update this property in DB
else:
#print("Only updating 'last seen' date for ID = {}".format(dictionary['propertyId']))
sql_command_property = SQLPropertyUpdateLastSeen(dictionary, timestamp)
c.execute(sql_command_property)
con.commit()
def update_lastFoundInSearch(con, c, dictionary):
propid = dictionary['propertyId']
sql = """ UPDATE TABLE {}
SET lastFoundInSearch = "{}"
WHERE propertyId = {}
GROUP BY propertyId
HAVING timestamp = MAX(timestamp)
""".format('property', today, dictionary['propertyId'])
runSQLCommands(c, [sql])
## c.execute(sql)
## con.commit()
#
def flatten(d, parent_key='', sep='_'):
""" Flatten out nested dictionary"""
items = []
for k, v in d.items():
#new_key = parent_key + sep + k if parent_key else k
new_key = k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def SQLPropertyUpdateLastSeen(dictionary, timestamp):
tablename = 'property'
var = 'lastFoundInSearch'
today = timestamp[:8]
sql = """ UPDATE {0}
SET {1} = '{2}'
WHERE propertyID = {3}
AND timestamp = (SELECT timestamp
FROM {0}
WHERE propertyID = {3}
GROUP BY propertyID
HAVING MAX(timestamp) = timestamp)
""".format(tablename, var, today, dictionary['propertyId'])
#print(sql)
#
return sql
def SQLProperty(dictionary, timestamp, changes='None'):
"""
Returns the sql command to insert a properties record
"""
pass
######################################
## properties information
######################################
propertyId, propURL, added \
, listingUpdateReason, listingUpdateDate, listingUpdateDate2, addedOrReduced, addedOrReducedDate \
, propertyDescription, fullDescription, propertyType, propertySubType \
, price, beds, tenure \
, soldSTC, retirement, preOwned, ownership \
, auctionOnly, letAgreed, lettingType, furnishedType \
, minSizeFt, maxSizeFt, minSizeAc, maxSizeAc \
, businessForSale, priceQualifier, currency = getPropertyVariables(dictionary)
#
## properties information
sql = """INSERT INTO property
(propertyId, timestamp, propURL, added
, listingUpdateReason, listingUpdateDate, listingUpdateDate2, addedOrReduced, addedOrReducedDate
, propertyDescription, fullDescription, propertyType, propertySubType
, price, beds, tenure
, soldSTC, retirement, preOwned, ownership
, auctionOnly, letAgreed, lettingType, furnishedType
, minSizeFt, maxSizeFt, minSizeAc, maxSizeAc
, businessForSale, priceQualifier, currency, changes)
VALUES ({}, "{}", "{}", "{}"
, "{}", "{}", "{}", "{}", "{}"
, "{}", "{}", "{}", "{}"
, {}, {}, "{}"
, {}, "{}", "{}", "{}"
, {}, {}, "{}", "{}"
, "{}", "{}", "{}", "{}"
, {}, "{}", "{}", "{}" )
""".format(propertyId, timestamp, propURL, added
, listingUpdateReason, listingUpdateDate, listingUpdateDate2, addedOrReduced, addedOrReducedDate
, propertyDescription, fullDescription, propertyType, propertySubType
, price, beds, tenure
, soldSTC, retirement, preOwned, ownership
, auctionOnly, letAgreed, lettingType, furnishedType
, minSizeFt, maxSizeFt, minSizeAc, maxSizeAc
, businessForSale, priceQualifier, currency, changes)
#
return sql
def SQLLocation(dictionary, timestamp):
########################################
## Location
########################################
# ## Pull out the values
#loc_d = dictionary['location']
propertyId = int(dictionary['propertyId'])
displayAddress = dictionary['displayAddress']
postcode = dictionary['postcode']
country = dictionary['country']
lat = dictionary['latitude']
long = dictionary['longitude']
## Location information
sql = """INSERT INTO location (propertyId, timestamp, displayAddress, postcode, country, latitude, longitude)
VALUES ({}, "{}", "{}", "{}", "{}", "{}", "{}")
""".format(propertyId, timestamp, displayAddress, postcode, country, lat, long)
return sql
def SQLImages(dictionary, timestamp):
"""
Returns the a list of sql commands to insert a new image record
"""
pass
sql = []
##########################################
## properties images (including floorplan)
##########################################
propertyId = dictionary['propertyId']
imageURLs = dictionary['imageURLs']
floorplanURLs = dictionary['floorplan']
## properties images
for i in range(len(imageURLs)):
#
sql.append("""INSERT INTO images (propertyId, timestamp, num, imgType, imgURL)
VALUES ({}, "{}", {}, "{}", "{}")
""".format(propertyId, timestamp, i+1, 'Image', imageURLs[i]))
## properties floor plans
for f in range(len(floorplanURLs)):
#
sql.append("""INSERT INTO images (propertyId, timestamp, num, imgType, imgURL)
VALUES ({}, "{}", {}, "{}", "{}")
""".format(propertyId, timestamp, f+1, 'Floorplan', floorplanURLs[f]))
return sql
def SQLFeatures(dictionary, timestamp):
"""
Returns a list of sql commmands to insert a new feature
"""
pass
#########################################
## Key features
#########################################
kfs = dictionary['key_features']
propertyId = dictionary['propertyId']
sql = []
if kfs != None:
for kf in range(len(kfs)):
sql.append("""INSERT INTO features (propertyId, timestamp, num, feature)
VALUES ({}, "{}", {}, "{}")
""".format(propertyId, timestamp, kf+1, kfs[kf]))
return sql
def SQLpropExists(c, propertyId):
""" Return True if ID exist in table, False, otherwise"""
## Select ID from properties table
sql = """SELECT propertyId FROM properties where propertyId = {0}""".format(propertyId)
## Execute and fetch results
result = runSQLCommands(c, [sql], getResults=True)[0]
## c.execute(sql)
## result = c.fetchone()
## If there is at least observation, return True
if result != None:
#sql_property(propertyId)
return True
## Else, properties not found, return False
else:
return False
def create_tables(dbname):
"""
This function creates a database (if not already existing)
and then creates the required tables (if they don't exist
"""
conn = sqlite3.connect(dbname)
c = conn.cursor()
## Location information
c.execute("""CREATE TABLE IF NOT EXISTS location
(propertyId INTEGER
, timestamp STRING
, displayAddress STRING
, postcode STRING
, country STRING
, latitude STRING
, longitude STRING
, changes STRING)""")
## properties information
c.execute("""CREATE TABLE IF NOT EXISTS property
(propertyId LONG
, timestamp STRING
, propURL STRING
, added STRING
, listingUpdateReason STRING
, listingUpdateDate STRING
, addedOrReduced STRING
, addedOrReducedDate STRING
, propertyDescription STRING
, fullDescription STRING
, propertyType STRING
, propertySubType STRING
, price DOUBLE
, beds INTEGER
, tenure STRING
, soldSTC BOOLEAN
, retirement STRING
, preOwned STRING
, ownership STRING
, auctionOnly INTEGER
, letAgreed BOOLEAN
, lettingType STRING
, furnishedType STRING
, minSizeFt STRING
, maxSizeFt STRING
, minSizeAc STRING
, maxSizeAc STRING
, businessForSale BOOLEAN
, priceQualifier STRING
, currency STRING
, changes STRING)""")
## Key features
c.execute("""CREATE TABLE IF NOT EXISTS features
(propertyId LONG
, timestamp STRING
, num INTEGER
, feature STRING)""")
## Images
c.execute("""CREATE TABLE IF NOT EXISTS images
(propertyId LONG
, timestamp STRING
, num INTEGER
, imgType STRING
, imgURL STRING)""")
## Postcode/Outcode lookup
sql = """CREATE TABLE IF NOT EXISTS postcode
(postcode STRING
, timestamp STRING
, outcode STRING)"""
c.execute(sql)
## Commit changes
conn.commit()
class FileOperations:
def __init__(self, path, filename):
self.filename = filename
self.path = path
self.fullpath = path + filename
"""Initialise file operation to write out log"""
## If log path doesn't exist
if not os.path.isdir(path):
## output note to user and create directory
print("Path {} doesn't exist\nCreating it in {}".format(path, currentpath))
os.mkdir(path)
## Open files
def openFile(self):
self.file = open(self.fullpath, 'wt')
## Close files when done
def closeFile(self):
self.file.close()
def setAsLog(self):
print("Setting standard python output to '{}'".format(self.fullpath))
sys.stdout = self.file
def TidyUp(exittype, e, con, c, sql_statements, timestamp, prop_info):
if len(prop_info)>0:
print("\nSome information is available in prop_info with {} properties.\nThis will be output to connection".format(len(prop_info)))
## For each property in property information
for i in range(len(prop_info)):
## Try and get the SQL code to update the information retrieved
try:
result = SQLtoUpdateTable(c, sql_statements, timestamp, prop_info[i])
if result == []:
sql_statements = []
except:
print("ERROR: Error in python code trying to update property with ID {}".format(prop_info[i]['propertyId']))
raise
next
## Run the commands
print("Called by TidyUp. Commit changes to DB")
runSQLCommands(c, sql_statements, 'to commit changes to DB')
else:
print("No information was written to connection")
print("\nClosing connection to DB and log")
con.commit()
con.close()
try:
logfile.closeFile()
except:
print("Log file wasn't open - nothing to close")
def TidyUpAll(prop_info, dbname):
""" Tidy up all information as a last chance saloon (bit hard-coded)
"""
#
con = sqlite3.connect(dbname)
c = con.cursor()
sql_statements = [] ## Init
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
if len(prop_info)>0:
print("\nSome information is available in prop_info with {} properties.\nThis will be output to connection".format(len(prop_info)))
## For each property in property information
for i in range(len(prop_info)):
print(i)
## If the list is not []
if prop_info[i] != None:
try:
result = SQLtoUpdateTable(con, c, sql_statements, timestamp, prop_info[i])
print(len(result))
if result == []:
sql_statements = []
except:
print("ERROR: Error in python code trying to update property with ID {}".format(prop_info[i]['propertyId']))
#raise
next
finally:
pass
print("Called by TidyUpAll. Commit changes to DB")
## Run the commands
runSQLCommands(c, sql_statements, 'to commit changes to DB')
def variableExists(c, table, var):
"""
Returns true if variable exists in table
c - SQL cursor
table - string table name
var = string, variable name
"""
# Command - execute
sql = """ PRAGMA table_info('{}');""".format(table)
c.execute(sql)
## Get results
res = c.fetchall()
## Create list of var names
varnames = [var[1] for var in res]
result = var in varnames
return result
def AddColumn(dbname, table, columnName):
con = sqlite3.connect(dbname)
c = con.cursor()
## Property table - add lastFoundInSearch
if not variableExists(c, table, columnName):
sql = """ ALTER TABLE {}
ADD {} STRING
""".format(table, columnName)
c.execute(sql)
con.commit()
con.close()
def DeleteStringAdded(dbname):
tables = ['location', 'images', 'features', 'property']
sql = """ DELETE FROM {}
WHERE propertyid IN
(SELECT DISTINCT propertyID
FROM property
WHERE addedOrReducedDate LIKE '%d%'
)
"""
sqlcommands = []
for t in tables:
sqlcommands.append(sql.format(t))
con = sqlite3.connect(dbname)
c = con.cursor()
for s in sqlcommands:
c.execute(s)
#print(s)
con.commit()
def clearOldFilesWithPrefix(prefix='', ask=True):
"""
Deletes all files in working folder which match the prefix given
Where the last 8 characters (YYYYMMDD) are not today
"""
files = os.listdir()
## All matches
matches = [f for f in files if f[:len(prefix)]==prefix]
# All matches - excluding today
today = datetime.today().strftime('%Y%m%d')
oldMatches = [m for m in matches if m[-len(today):]!=today]
## Delete all files
[print("{}".format(i)) for i in oldMatches]
if len(oldMatches)>0:
if ask:
check = input('Do you want to delete these {} file(s)?\nY/N? '.format(len(oldMatches))).upper()
if not ask or check == 'Y':
[os.unlink(f) for f in oldMatches]
print("Files deleted.")
return oldMatches
def LoadPostcodes(filename):
""" Reads in .csv file delimited by comma and quote '"'
and returns a list of postcodes and labels for the postcodes"""
## Init data to append to
data = []
## Open .csv file
with open(filename, 'rt') as file:
## Get reader object to iterate over
redear = csv.reader(file, dialect='excel', quotechar='"')
## Loop over each line in file
for r in redear:
data.append(r)
#
postcodes = [p[0] for p in data[1:]]
labels = [p[1] for p in data[1:]]
return postcodes, labels
def ReturnRandomSelection(seed, havelist, num):
#
## Set seed
random.seed(seed)
## Create list of random numbers matching the size of the list
indices = [i for i in range(len(havelist))]
## Celiing for largest number
minnum = min(len(indices), num)
#s
## Get random sample
sample = random.sample(indices, minnum)
## Return these items
final = [havelist[i] for i in sample]
return final
def SelectRandomPostcodes2(seed, pcodesfile, dbname, num=1):
"""
Updated 2019-02-05:
Read in postcodes file (saved as a .csv)
Check whether any postcodes are missing from the DB
If so, these are prioritised
If not, randomly select, the number (num) provided by user
Changes:
Brought back the logic to check the postcodes in the list against the DB
"""
#pdb.set_trace()
## Get list of all postcodes from list
postcodes, labels = LoadPostcodes(pcodesfile)
## Get list of postcode areas from DB
con = sqlite3.connect(dbname)
c = con.cursor()
sql = """ SELECT DISTINCT SUBSTR(postcode, 1,INSTR(postcode, ' ')-1)
FROM location"""
## Run query (with re-connection function)
results = runSQLCommands(c, [sql], 'to select random postcodes', getResults=True)
## Isolate the interesting part of the postcode from the results
results = [r[0] for r in results]
## Get list of postcodes with no data in DB
#nodata_postcodes = [p for p in postcodes if p not in results]
nodata_postcodes = results
## If user provided -1, return all postcodes
if num == -1:
return results
else:
## Select random sample of postcodes from DB
#sample = ReturnRandomSelection(seed, results, num)
sample = ReturnRandomSelection(seed, nodata_postcodes, num)
return sample
def SelectRandomPostcodes(seed, pcodesfile, dbname, num=1):
"""
Read in postcodes file (saved as a .csv)
Check whether any postcodes are missing from the DB
If so, these are prioritised
If not, randomly select, the number (num) provided by user
"""
## Get list of all postcodes from list
#postcodes, labels = LoadPostcodes(pcodesfile)
## Get list of postcode areas from DB
con = sqlite3.connect(dbname)
c = con.cursor()
sql = """ SELECT DISTINCT SUBSTR(postcode, 1,INSTR(postcode, ' ')-1)
FROM location"""
## Run query (with re-connection function)
results = runSQLCommands(c, [sql], 'to select random postcodes', getResults=True)
## Isolate the interesting part of the postcode from the results
results = [r[0] for r in results]
## If user provided -1, return all postcodes
if num == -1:
return results
else:
## Select random sample of postcodes from DB
sample = ReturnRandomSelection(seed, results, num)
return sample
def SelectRandomPostcodesOld(seed, pcodesfile, dbname, num=1):
### THIS HAS BEEN DECOMISSIONED BECAUSE SOME POSTCODES WERE
### NOT APPEARING AS THEY DID IN THE LIST
### e.g. SW1 actually was SW1A, SW1B, ...,
### DATE: March 2018
### Author: Andrew Craik
"""
Read in postcodes file (saved as a .csv)
Check whether any postcodes are missing from the DB
If so, these are prioritised
If not, randomly select, the number (num) provided by user
"""
## Get list of all postcodes from list
postcodes, labels = LoadPostcodes(pcodesfile)
## Get list of postcode areas from DB
con = sqlite3.connect(dbname)
c = con.cursor()
sql = """ SELECT DISTINCT SUBSTR(postcode, 1,INSTR(postcode, ' ')-1)
FROM location"""
## Run query (with re-connection function)
results = runSQLCommands(c, [sql], 'to select random postcodes', getResults=True)
## Isolate the interesting part of the postcode from the results
results = [r[0] for r in results]
## Get list of postcodes with no data in DB
nodata_postcodes = [p for p in postcodes if p not in results]
## If 'nodata' list is null then write the list out to a file
if nodata_postcodes == []:
print("All postcodes in the .csv file '{}' are in the DB.\nSelecting {} of them.".format(pcodesfile), num)
## Randomly select from results
sample = ReturnRandomSelection(seed, results, num)
## Else, select a random set of postcodes from list
else:
## Randomly select from (hopefully decreasing list of postcodes NOT in DB
print("There are {} postcodes in the .csv file which are not in the DB\nWill select {} of them".format(len(nodata_postcodes), num))
sample = ReturnRandomSelection(seed, nodata_postcodes, num)
return sample
pass
## Create DB tables
if __name__ == '__main__':
pass
#SelectRandomPostcodes(seed, pcodes, dbname, 2)
|
[
"andrrew-c@users.noreply.github.com"
] |
andrrew-c@users.noreply.github.com
|
120d9dd50c836104fd6d730d447ccc95fa97a548
|
61693f629c148400a885c7731ee28c18a3f1e36b
|
/ifnclass/ifnplot.py
|
ecd6f665d4aa01d5c27d3e417db548cb60433bf6
|
[] |
no_license
|
dakirby/IFNmodeling
|
c95b58d07a142cc5cb19aae05f19d42cad1d411c
|
5996de12c9071b0e54371da50bb031b80dc34050
|
refs/heads/master
| 2022-12-14T10:18:39.570071
| 2021-09-09T14:29:50
| 2021-09-09T14:29:50
| 136,951,791
| 0
| 0
| null | 2022-12-08T12:01:53
| 2018-06-11T16:20:34
|
Python
|
UTF-8
|
Python
| false
| false
| 24,205
|
py
|
from numpy import ndarray, shape
import matplotlib.pyplot as plt
try:
from .ifndata import IfnData
from .ifnmodel import IfnModel
except (ImportError, ModuleNotFoundError):
from ifndata import IfnData
from ifnmodel import IfnModel
from numpy import linspace, logspace, float64, divide
import time
import os
class Trajectory:
"""
Documentation - A Trajectory object is an augmented IfnData object which simply includes metainformation on the
desired plotting style
Parameters
----------
data (IfnData): the data to be plotted
plot_type (string): the type of matplotlib plotting function to call - can be one of 'plot', 'scatter', 'errorbar'
line_style (string): the argument to pass to the plotting function for colour and style (eg. line_style = 'k--')
if plot_type == 'scatter' then line_style can be up to two characters, one for color and the
other for marker shape. line_style[0] will be passed as colour and line_style[1] as marker
shape in scatter()
label (string): the label to use in the plot legend for this trajectory; default is None, in which case default
plotting choices will be used
Methods
-------
t(): returns times for a TimecoursePlot
y(): returns responses for a TimecoursePlot
d(): return the doses for a DoseresponsePlot
z(): returns the responses for a DoseresponsePlot
"""
# Initializer / Instance Attributes
def __init__(self, data: IfnData, plot_type: str, line_style, label='', **kwargs):
self.data = data
self.plot_type = plot_type
self.line_style = line_style
self.label = label
self.timeslice = kwargs.get('timeslice', None)
self.doseslice = kwargs.get('doseslice', None)
self.dose_species = kwargs.get('dose_species', None)
self.dose_norm = kwargs.get('dose_norm', 1)
self.color = kwargs.get('color', None)
self.linewidth = kwargs.get('linewidth', 1.5)
self.alpha = kwargs.get('alpha', 1)
def t(self): # times
if self.timeslice is None:
if type(self.data.data_set.columns[0]) == str:
try:
return [int(el) for el in self.data.data_set.columns if el != 'Dose_Species' and el != 'Dose (pM)']
except ValueError:
return [float(el) for el in self.data.data_set.columns if el != 'Dose_Species' and el != 'Dose (pM)']
else:
return [el for el in self.data.data_set.columns if el != 'Dose_Species' and el != 'Dose (pM)']
else:
return [self.timeslice]
def y(self): # timecourse response values
idx = self.t()
if self.doseslice is None:
return self.data.data_set[idx].values[0]
else:
try:
return self.data.data_set.loc[self.dose_species].loc[self.doseslice].values
except KeyError:
print(self.data.data_set)
print(self.data.data_set.loc[self.dose_species])
print(self.data.data_set.loc[self.dose_species].index)
raise KeyError
def d(self): # doses
if self.timeslice is None:
return divide(self.data.data_set.loc[self.dose_species].index.values, self.dose_norm)
else:
return divide(self.data.data_set.loc[self.dose_species].index.values, self.dose_norm)
def z(self): # dose-response response values
if self.timeslice is None:
return self.data.data_set.xs(self.dose_species).values
else:
try:
return self.data.data_set.xs(self.dose_species).loc[:, self.timeslice].values
except (KeyError, TypeError):
try:
if type(self.timeslice) == list:
temp = [str(el) for el in self.timeslice]
else:
temp = str(self.timeslice)
return self.data.data_set.xs(self.dose_species).loc[:, temp].values
except KeyError:
print(self.data.data_set.xs(self.dose_species).columns)
#print(self.data.data_set.xs(self.dose_species).loc[:, self.timeslice])
raise KeyError("Something went wrong indexing times")
#return self.data.data_set.xs(self.dose_species).loc[:, [float(el) for el in self.timeslice][0]].values
class TimecoursePlot:
"""
Documentation - A TimecoursePlot holds IfnModels and IfnData instances which are to be plotted in a
Matplotlib figure.
Parameters
----------
shape (tuple): tuple of ints for nrows and ncols in the figure
Attributes
----------
nrows (int): the number of subplot rows in the figure
ncols (int): the number of subplot columns in the the figure
fig (matplotlib figure): the figure to plot
axes (matplotlib axes): the axes object to plot
trajectories (list): a list of Trajectory objects describing trajectories, experimental or simulated
subplot_indices (list): a list of subplot locations
Methods
-------
add_trajectory(self, data: IfnData, plot_type: str, line_style, subplot_idx, label='')
Builds a Trajectory instance and adds it to the TimecoursePlot object's list of trajectories. Remembers which
subplot to put this trajectory in, given by subplot_idx = (row, col)
- row and col in subplot_idx are 0-indexed
remove_trajectory(index: int)
Removes a trajectory from the TimecoursePlot instance, as indicated by its index in self.trajectories list
"""
# Initializer / Instance Attributes
def __init__(self, shape, figsize=[6.4, 4.8]):
self.nrows = shape[0]
self.ncols = shape[1]
self.fig, self.axes = plt.subplots(nrows=self.nrows, ncols=self.ncols, figsize=figsize)
self.trajectories = []
self.subplot_indices = []
if self.nrows > 1 and self.ncols > 1:
for row in range(self.nrows):
for column in range(self.ncols):
if row == self.nrows - 1:
self.axes[row][column].set_xlabel('Time (min)')
if column == 0:
self.axes[row][column].set_ylabel('Response')
elif self.ncols > 1:
for column in range(self.ncols):
self.axes[column].set_xlabel('Time (min)')
if column == 0:
self.axes[column].set_ylabel('Response')
elif self.nrows > 1:
for row in range(self.nrows):
if row == 0:
self.axes[row].set_xlabel('Time (min)')
self.axes[row].set_ylabel('Response')
else:
self.axes.set_xlabel('Time (min)')
self.axes.set_ylabel('Response')
self.match_axes = False
# Instance methods
def add_trajectory(self, data: IfnData, plot_type: str, line_style, subplot_idx: tuple, label='', **kwargs):
t = Trajectory(data, plot_type, line_style, label=label, **kwargs)
self.trajectories.append(t)
if self.nrows == 1 and self.ncols == 1:
self.subplot_indices.append((None, None))
elif self.nrows == 1:
self.subplot_indices.append((None, subplot_idx[1]))
elif self.ncols == 1:
self.subplot_indices.append((subplot_idx[0], None))
else:
self.subplot_indices.append(subplot_idx)
def get_axis_object(self, idx):
if idx == (None, None):
return self.axes
elif idx[0] == None:
return self.axes[idx[1]]
elif idx[1] == None:
return self.axes[idx[0]]
else:
return self.axes[idx[0]][idx[1]]
def remove_trajectory(self, index):
del self.trajectories[index]
del self.subplot_indices[index]
def show_figure(self, show_flag=True, save_flag=False, save_dir=''):
for trajectory_idx in range(len(self.trajectories)):
trajectory = self.trajectories[trajectory_idx]
plt_idx = self.subplot_indices[trajectory_idx]
ax = self.get_axis_object(plt_idx)
if trajectory.plot_type == 'plot':
if type(trajectory.line_style) == str:
if trajectory.color is not None:
ax.plot(trajectory.t(), [el[0] for el in trajectory.y()], trajectory.line_style,
label=trajectory.label, color=trajectory.color, linewidth=trajectory.linewidth,
alpha=trajectory.alpha)
ax.legend()
else:
ax.plot(trajectory.t(), [el[0] for el in trajectory.y()], trajectory.line_style,
label=trajectory.label, linewidth=trajectory.linewidth, alpha=trajectory.alpha)
ax.legend()
else:
ax.plot(trajectory.t(), [el[0] for el in trajectory.y()], c=trajectory.line_style,
label=trajectory.label, linewidth=trajectory.linewidth, alpha=trajectory.alpha)
ax.legend()
elif trajectory.plot_type == 'scatter':
if type(trajectory.line_style) == str:
if trajectory.color is not None:
plot_times = trajectory.t()
plot_responses = [el[0] for el in trajectory.y()]
ax.scatter(plot_times, plot_responses, marker=trajectory.line_style, label=trajectory.label, color=trajectory.color)
ax.legend()
else:
ax.scatter(trajectory.t(), [el[0] for el in trajectory.y()], c=trajectory.line_style[0],
marker=trajectory.line_style[1], label=trajectory.label)
ax.legend()
else:
ax.scatter(trajectory.t(), [el[0] for el in trajectory.y()], c=trajectory.line_style,
label=trajectory.label)
ax.legend()
elif trajectory.plot_type == 'errorbar':
x = trajectory.t()
y = [el[0] for el in trajectory.y()]
sigmas = [el[1] for el in trajectory.y()]
if type(trajectory.line_style) == str:
if trajectory.color is not None:
ax.errorbar(x, y, yerr=sigmas, fmt=trajectory.line_style,
label=trajectory.label, color=trajectory.color)
else:
ax.errorbar(x, y, yerr=sigmas, fmt=trajectory.line_style, label=trajectory.label)
else:
ax.errorbar(x, y, yerr=sigmas, fmt='--', label=trajectory.label, color=trajectory.line_style)
ax.legend()
if self.match_axes:
ymax = 0
ymin = 0
for ax in self.axes.flat:
if ymax < ax.get_ylim()[1]:
ymax = ax.get_ylim()[1]
if ymin > ax.get_ylim()[0]:
ymin = ax.get_ylim()[0]
for ax in self.axes.flat:
ax.set_ylim([ymin, ymax])
if save_flag:
if save_dir == '':
plt.savefig(os.path.join(save_dir, 'fig{}.pdf'.format(int(time.time()))))
else:
plt.savefig(save_dir)
if show_flag:
plt.legend()
plt.show()
return self.fig, self.axes
def save_figure(self, save_dir=''):
fig, axes =self.show_figure(show_flag=False, save_flag=True, save_dir=save_dir)
return fig, axes
class DoseresponsePlot:
"""
Documentation - A DoseresponsePlot holds IfnModels and IfnData instances which are to be plotted in a
Matplotlib figure.
Parameters
----------
shape (tuple): tuple of ints for nrows and ncols in the figure
Attributes
----------
nrows (int): the number of subplot rows in the figure
ncols (int): the number of subplot columns in the the figure
fig (matplotlib figure): the figure to plot
axes (matplotlib axes): the axes object to plot
trajectories (list): a list of Trajectory objects describing trajectories, experimental or simulated
subplot_indices (list): a list of subplot locations
Methods
-------
add_trajectory(self, data: IfnData, time: float, plot_type: str, line_style, subplot_idx, observable_species, label='')
Builds a Trajectory instance and adds it to the DoseresponsePlot object's list of trajectories. Remembers which
subplot to put this trajectory in, given by subplot_idx = (row, col). Time for dose-response curve must be given
- row and col in subplot_idx are 0-indexed
remove_trajectory(index: int)
Removes a trajectory from the DoseresponsePlot instance, as indicated by its index in self.trajectories list
"""
# Initializer / Instance Attributes
def __init__(self, shape, figsize=[6.4, 4.8]):
self.nrows = shape[0]
self.ncols = shape[1]
self.fig, self.axes = plt.subplots(nrows=self.nrows, ncols=self.ncols, figsize=figsize)
if self.nrows > 1 and self.ncols > 1:
for row in range(self.nrows):
for column in range(self.ncols):
self.axes[row][column].set(xscale='log', yscale='linear')
if row == self.nrows - 1:
self.axes[row][column].set_xlabel('Dose (pM)')
if column == 0:
self.axes[row][column].set_ylabel('Response')
elif self.ncols > 1:
for column in range(self.ncols):
self.axes[column].set(xscale='log', yscale='linear')
self.axes[column].set_xlabel('Dose (pM)')
if column == 0:
self.axes[column].set_ylabel('Response')
elif self.nrows > 1:
for row in range(self.nrows):
self.axes[row].set(xscale='log', yscale='linear')
if row == 0:
self.axes[row].set_xlabel('Dose (pM)')
self.axes[row].set_ylabel('Response')
else:
self.axes.set(xscale='log', yscale='linear')
self.axes.set_xlabel('Dose (pM)')
self.axes.set_ylabel('Response')
self.trajectories = []
self.subplot_indices = []
# Instance methods
def add_trajectory(self, data: IfnData, time, plot_type: str, line_style, subplot_idx: tuple,
dose_species: str, label='', dn: float = 1., **kwargs):
t = Trajectory(data, plot_type, line_style, label=label, timeslice=time, dose_species=dose_species,
dose_norm=dn, **kwargs)
self.trajectories.append(t)
if self.nrows == 1 and self.ncols == 1:
self.subplot_indices.append((None, None))
elif self.nrows == 1:
self.subplot_indices.append((None, subplot_idx[1]))
elif self.ncols == 1:
self.subplot_indices.append((subplot_idx[0], None))
else:
self.subplot_indices.append(subplot_idx)
def get_axis_object(self, idx):
if idx == (None, None):
return self.axes
elif idx[0] == None:
return self.axes[idx[1]]
elif idx[1] == None:
return self.axes[idx[0]]
else:
return self.axes[idx[0]][idx[1]]
def remove_trajectory(self, index):
del self.trajectories[index]
del self.subplot_indices[index]
def show_figure(self, show_flag=True, save_flag=False, save_dir='', tight=False):
for trajectory_idx in range(len(self.trajectories)):
trajectory = self.trajectories[trajectory_idx]
plt_idx = self.subplot_indices[trajectory_idx]
ax = self.get_axis_object(plt_idx)
if trajectory.plot_type == 'plot':
x = trajectory.d()
if type(trajectory.z()[0]) == tuple:
z = [el[0] for el in trajectory.z()]
else:
z = trajectory.z()
if x[0] == 0:
x = x[1:]
z = z[1:]
if type(trajectory.line_style) == str:
if trajectory.color is not None:
ax.plot(x, z, trajectory.line_style, label=trajectory.label, color=trajectory.color,
linewidth=trajectory.linewidth, alpha=trajectory.alpha)
else:
ax.plot(x, z, trajectory.line_style, label=trajectory.label, linewidth=trajectory.linewidth,
alpha=trajectory.alpha)
else:
ax.plot(x, z, c=trajectory.line_style, label=trajectory.label, linewidth=trajectory.linewidth,
alpha=trajectory.alpha)
ax.legend()
elif trajectory.plot_type == 'scatter':
x = trajectory.d()
z = [el[0] for el in trajectory.z()]
if x[0] == 0:
x = x[1:]
z = z[1:]
if len(trajectory.line_style) == 2:
if trajectory.color is not None:
ax.scatter(x, z, marker=trajectory.line_style[1], label=trajectory.label, color=trajectory.color)
else:
ax.scatter(x, z, c=trajectory.line_style[0],
marker=trajectory.line_style[1], label=trajectory.label)
elif len(trajectory.line_style) == 1:
ax.scatter(x, z, c=trajectory.line_style[0], label=trajectory.label)
else:
try:
ax.scatter(x, z, c=[trajectory.line_style], label=trajectory.label)
except:
print("Could not interpret line style")
raise
elif trajectory.plot_type == 'errorbar':
x = trajectory.d()
z = [el[0] for el in trajectory.z()]
if x[0] == 0:
x = x[1:]
z = z[1:]
sigmas = [el[1] for el in trajectory.z()]
if type(trajectory.line_style) == str:
if trajectory.color is not None:
ax.errorbar(x, z, yerr=sigmas, fmt=trajectory.line_style,
label=trajectory.label, color=trajectory.color, alpha=trajectory.alpha)
else:
ax.errorbar(x, z, yerr=sigmas, fmt=trajectory.line_style, label=trajectory.label, alpha=trajectory.alpha)
else:
ax.errorbar(x, z, yerr=sigmas, fmt='--', label=trajectory.label, color=trajectory.line_style, alpha=trajectory.alpha)
ax.legend()
elif trajectory.plot_type == 'envelope':
x = trajectory.d()
z = [el[0] for el in trajectory.z()]
if x[0] == 0:
x = x[1:]
z = z[1:]
sigmas = [el[1] for el in trajectory.z()]
z_plus_sigma = [z[i] + sigmas[i] for i in range(len(sigmas))]
z_minus_sigma = [z[i] - sigmas[i] for i in range(len(sigmas))]
if type(trajectory.line_style) == str:
if trajectory.color is not None:
ax.plot(x, z, trajectory.line_style, label=trajectory.label, color=trajectory.color,
linewidth=trajectory.linewidth, alpha=1.0)
ax.fill_between(x, z_plus_sigma, z_minus_sigma, facecolor=trajectory.color,
alpha=trajectory.alpha)
else:
ax.plot(x, z, trajectory.line_style, label=trajectory.label, color=trajectory.color,
linewidth=trajectory.linewidth, alpha=1.0)
ax.fill_between(x, z_plus_sigma, z_minus_sigma, facecolor=trajectory.color,
alpha=trajectory.alpha)
else:
ax.plot(x, z, c=trajectory.line_style, label=trajectory.label, linewidth=trajectory.linewidth,
alpha=1.0)
ax.fill_between(x, z_plus_sigma, z_minus_sigma, facecolor=trajectory.line_style,
alpha=trajectory.alpha)
ax.legend()
if tight:
plt.tight_layout()
if save_flag:
if save_dir == '':
plt.savefig(os.path.join(save_dir, 'fig{}.pdf'.format(int(time.time()))))
else:
plt.savefig(save_dir)
if show_flag:
plt.show()
return self.fig, self.axes
def save_figure(self, save_dir='', tight=False):
fig, axes = self.show_figure(show_flag=False, save_flag=True, save_dir=save_dir, tight=tight)
return fig, axes
if __name__ == '__main__':
testData = IfnData("20181113_B6_IFNs_Dose_Response_Bcells")
testModel = IfnModel('Mixed_IFN_ppCompatible')
# testParameters = {'kpa': 4.686e-05, 'kSOCSon': 2.687e-06, 'kd4': 0.236, 'k_d4': 0.2809, 'R1': 108, 'R2': 678} 'kpa': 1e-07,
#best_fit_old20min = {'kSOCSon': 1e-07, 'krec_a1': 0.0001, 'krec_a2': 0.001, 'R1': 1272, 'R2': 1272,
# 'krec_b1': 1.0e-05, 'krec_b2': 0.0001, 'kpu':1E-2, 'kpa':1E-7}
best_fit_old20min = {'kpu': 0.004, 'kpa': 1e-6,
'R2': 1742, 'R1': 1785,
'k_d4': 0.06, 'kd4': 0.3,
'k_a2': 8.3e-13, 'k_a1': 4.98e-14,
'ka4': 0.001,
'kSOCS': 0.01, 'kSOCSon': 2e-3, 'SOCSdeg': 0.2,
'kint_b': 0.0, 'kint_a': 0.04}
dose = 1000 # in pM
best_fit_old20min.update({'Ib': dose * 6.022E23 * 1E-5 * 1E-12, 'Ia': 0})
testModel.set_parameters(best_fit_old20min)
print(testModel.parameters)
tc = testModel.timecourse(list(linspace(0, 60)), 'TotalpSTAT', return_type='dataframe',
dataframe_labels=['Beta', dose])
tcIfnData = IfnData('custom', df=tc, conditions={'Beta': {'Ia': 0}})
testplot = TimecoursePlot((1, 1))
testplot.add_trajectory(tcIfnData, 'plot', 'g', (0, 0))
testplot.show_figure()
exit()
dra = testModel.doseresponse([0, 2.5, 5, 15, 30, 60], 'TotalpSTAT', 'Ia', list(logspace(-3, 6)),
parameters={'Ib': 0}, return_type='dataframe', dataframe_labels='Alpha')
drb = testModel.doseresponse([0, 2.5, 5, 15, 30, 60], 'TotalpSTAT', 'Ib', list(logspace(-3, 6)),
parameters={'Ia': 0}, return_type='dataframe', dataframe_labels='Beta')
draIfnData = IfnData('custom', df=dra, conditions={'Alpha': {'Ib': 0}})
drbIfnData = IfnData('custom', df=drb, conditions={'Beta': {'Ia': 0}})
testplot = DoseresponsePlot((1, 1))
testplot.add_trajectory(draIfnData, '15', 'plot', 'r:', (0, 0), 'Alpha', dn=1)
testplot.add_trajectory(drbIfnData, '15', 'plot', 'g:', (0, 0), 'Beta', dn=1)
testplot.add_trajectory(draIfnData, '30', 'plot', 'r--', (0, 0), 'Alpha', dn=1)
testplot.add_trajectory(drbIfnData, '30', 'plot', 'g--', (0, 0), 'Beta', dn=1)
testplot.add_trajectory(draIfnData, '60', 'plot', 'r', (0, 0), 'Alpha', dn=1)
testplot.add_trajectory(drbIfnData, '60', 'plot', 'g', (0, 0), 'Beta', dn=1)
testtraj = testplot.show_figure()
|
[
"39995281+dakirby@users.noreply.github.com"
] |
39995281+dakirby@users.noreply.github.com
|
d5a065d71589b63422d232773704e9ae5c49e792
|
97b6ba4ce0dab62629fa11d491d3c0ae95e38aa6
|
/SVM/SMOWithKernel/SMOWithKernel.py
|
d8d965ffda17ee4a743940d3114c995e9f0cb0be
|
[] |
no_license
|
WCC-wcc/MachineLearningInAction
|
36303296c81002c5e114ed04076dc6969fd442a8
|
b3d57757a575fd981b2f5f8fa81d67a905c1f829
|
refs/heads/master
| 2020-08-30T18:52:12.825731
| 2019-10-30T07:32:37
| 2019-10-30T07:32:37
| 218,461,864
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,163
|
py
|
from numpy import *
import matplotlib.pyplot as plt
#读取文件函数
def loadDataSet(fileName):
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]),float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat,labelMat
#核转换函数
def kernelTrans(X,A,kTup): #输入参数为2个数值型变量和一个元组信息 元祖Ktup给出核函数信息,第一个字符变量是核函数类型,第二个变量是给定的sigma值
#X 表示所有数据集 A 表示数据集中的一行
m,n = shape(X)
K = mat(zeros((m,1))) #初始化K值,列向量,记录的是 所有数据集 依次和 数据集中的每一行 的高斯函数值
if kTup[0] == 'lin' : #线性情况下 取得是向量内积
K = X * A.T
elif kTup[0] == 'rbf' : #高斯径向基核函数的计算方式
for j in range(m) :
deltaRow = X[j,:] - A #首先计算(x - y)
K[j] = deltaRow * deltaRow.T #计算(x - y) ^ 2 由于 (x-y)是向量, 令 deltaRow * deltaRow.T即可
K = exp(K / (-kTup[1] ** 2)) #高斯径向基核函数表达式为 : K(xy) = exp( - ( x - y ) ^ 2 / 2 * sigma ^2) 2作为常数可以忽略
else :
raise NameError('There is a problem about kernel.')
return K
#辅助函数 在某个范围区间内随机选择一个整数
def selectJrand(i,m): #输入参数i是第一个alpha下标 m是所有alpha数目
j = i #这个函数是为了随机选择另一个alpha值
while(j == i):
j = int(random.uniform(0,m))
return j
#辅助函数 调整小于L,大于H的alpha值 #调整alpha值,使得其在取值区间内
def clipAlpha(aj,L,H):
if aj < L:
aj = L
if aj > H:
aj = H
return aj
#建造一个数据结构保存重要的值 __init__ 前后是两个_ 通过一个对象完成
class optStruct:
def __init__(self,dataMatIn,classLabels,C,toler,kTup) :
self.X = dataMatIn #记录数据样本
self.labelMat = classLabels #记录标签
self.C = C #常数C
self.tol = toler #容错率
self.m = shape(dataMatIn)[0] #记录数据个数
self.alphas = mat(zeros((self.m,1))) #数据个数对应的alphas值
self.b = 0 #纪录常数b
self.eCache = mat(zeros((self.m,2))) #增加一个 m * 2 的矩阵成员变量eCache 第一列是eCache是否有效的标志位 第二列是实际的E值
self.K = mat(zeros((self.m,self.m))) #初始化矩阵K 是一个100 * 100 的矩阵 每一行得
for i in range(self.m) : #依次计算总数据向量 和每一个数据向量 的高斯函数值
self.K[:,i] = kernelTrans(self.X,self.X[i,:],kTup) #返回值是一列数值,赋值给对应的self.K[:,i]
#计算误差E值并返回 注意 E值 是动态变化的 E = wx + b - y w = alphai * yi * xi (i = 1,2...100 )
def calcEk(oS,k) :
fXk = float(multiply(oS.alphas,oS.labelMat).T * oS.K[:,k] + oS.b) #multiply(oS.alphas,oS.labelMat)内积,对应位置相乘
Ek = fXk - float(oS.labelMat[k])
return Ek
#选择内循环的alpha 保证每次优化中采用最大步长
def selectJ(i,oS,Ei) : # i - 标号为i的数据的索引值 oS - 数据结构 Ei - 标号为i的数据误差
maxK = -1
maxDeltaE = 0
Ej = 0
oS.eCache[i] = [1,Ei] #根据Ei更新误差缓存,将Ei在缓存中设置成有效的
validEcacheList = nonzero(oS.eCache[:,0].A)[0] #返回误差不为0的数据的索引值,是一个列表(也就是对计算过的Ej值进行操作)
#oS.eCache是一个 100 * 2 的矩阵
#oS.eCache[:,0]表示取出100行中第一列的数据
#oS.eCache[:,0].A表示将矩阵转换为array数组类型(100 * 1)
#nonzero(oS.eCache[:,0].A)表示找到非零的项 返回值和oS.eCache[:,0].A维数对应
#对应100 * 1 数组情况,第一个维数对应行数 第二个维数对应列数
if (len(validEcacheList)) > 1 : #遍历,找到最大的Ek
for k in validEcacheList : #此时全局变量中的缓存值存在Ej信息
if k == i : #不计算i,浪费时间
continue
Ek = calcEk(oS,k) #计算误差Ek
deltaE = abs(Ei - Ek) #计算步长
if (deltaE > maxDeltaE) : #取最大步长
maxK = k
maxDeltaE = deltaE #找到maxDeltaE
Ej = Ek
return maxK,Ej #maxK 标号为maxK的数据的索引值 Ej - 标号为j的数据误差
else : #若此时Ej值没有计算过,就先随机选取j
j = selectJrand(i,oS.m) #随机选择alpha_j的索引值
Ej = calcEk(oS,j)
return j,Ej #j标号为j的数据的索引值 Ej - 标号为j的数据误差
#计算误差值,更新缓存
def updateEk(oS,k) :
Ek = calcEk(oS,k)
oS.eCache[k] = [1,Ek]
#决策边界优化历程,优化的SMO算法
def innerL(i,oS) : #输入参数索引值i 数据结构oS
Ei = calcEk(oS,i)
if ((oS.labelMat[i] * Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)) :#在这里判断KTT条件
#对正间隔和负间隔进行测试,同时判断alpha[i]是否超出0和C的界限,若在边界上就不能再减小或增大了
#也就不用在进行优化
#对三个KTT条件同时比较
#alpha > 0 yi * (wx + b) <= 1 这里判断的是违反情况
#alpha < C yi * (wx + b) >= 1
j,Ej = selectJ(i,oS,Ei) #第二个alpha的启发式选择
alphaIold = oS.alphas[i].copy() #python通过引用的方式传递所有列表,所以要明确分配内存,方便比较新旧alpha的值
alphaJold = oS.alphas[j].copy() #计算alphaIold,alphaJold;alphaIold,alphaJold为初始可行解
if (oS.labelMat[i] != oS.labelMat[j]) : #接下来计算L,H的值,用于将alphas[j]调整到0-C之间
#y1 y2 取值有四种情况 同号(1 1)(-1 -1) 异号(1 -1)(-1 1)
L = max(0,oS.alphas[j] - oS.alphas[i]) #异号情况
H = min(oS.C,oS.C + oS.alphas[j] - oS.alphas[i])
else : #同号情况
L = max(0,oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C,oS.alphas[j] + oS.alphas[i])
if L == H :
print('L == H') #如果LH相等,退出这次循环
return 0
eta = 2.0 * oS.K[i,j] - oS.K[i,i] - oS.K[j,j]
#eta是alpha[j]的最优修改量eta=(K11+K22-2*K12),这里取得是负数eta
if eta >= 0: #当eta < 0 不是二次函数,不能用求导方式对alpha求极值,跳过这次循环
print('eta >= 0')
return 0
oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta #alpha2_new = a2pha1_old + y2(E1-E2)/(k11+k22-2k12)
oS.alphas[j] = clipAlpha(oS.alphas[j],L,H) #调整alpha的值,下界L。上界H
updateEk(oS,j) #更新误差缓存
if (abs(oS.alphas[j] - alphaJold) < 0.00001) : #判断alpha调整的幅度,太小的话就跳过循环
print('j not moving enough')
return 0
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (alphaJold - oS.alphas[j])
#alpha1_old * y1 + alpha2_old * y2 = alpha1_new * y1 + alpha2_new * y2
#alpha1_new = alpha1_old + y1 * y2 *(alpha2_old - alpha2_new)
updateEk(oS,i) #更新误差缓存
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i,i] - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[i,j]
b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i,j] - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[j,j]
#更新b1,b2
#y1 = wx1 + b1
# w = alphai * yi * xi (1 <= i <= m)
#y1 = alphai * yi * ki1 +b1 (1 <= i <= m)
#b1_new = y1 - alphai * yi * ki1 - alpha1_new * k11 * y1 - alpha2_new *k21 *y2 (3 <= i <= m)
#E1 = g(x1) - y1
#g(xi) = wxi + bi (1 <= i <= m)
#E1 = g(x1) - y1 = wx1 + b1 - y1 = alphai * yi * ki1 +b1 - y1 (1 <= i <= m)
#E1 = alphai * yi * ki1 + alpha1_old * k11 * y1 + alpha2_old *k21 *y2 + b_old - y1 (3 <= i <= m)
#b1_new = b_old - (-alpha_old1 + alpha1_new) * y1 * k11 - (-alpha2_old + alpha2_new) * y2 * k12 -E1
#b2_new = b_old - (-alpha_old1 + alpha1_new) * y1 * k12 - (-alpha2_old + alpha2_new) * y2 * k22 -E2
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]):
oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]) :
oS.b = b2
else :
oS.b = (b1 + b2) / 2.0
return 1
else :
return 0
def smoP(dataMatIn,classLabels,C,toler,maxIter,kTup = ('lin',0)) : #新变量kTup,使用的是高斯核函数
oS = optStruct(mat(dataMatIn),mat(classLabels).transpose(),C,toler,kTup) #导入数据结构
iter = 0
entireSet = True
alphaPairsChanged = 0
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)) : #迭代次数大于最大迭代次数 上次遍历整个集合无alpha修改 退出循环
alphaPairsChanged = 0
if entireSet :
for i in range(oS.m) : #遍历所有的值,i是第一个选取的alpha
alphaPairsChanged += innerL(i,oS) #调用innerL函数,选取第二个alpha,判断是否有alpha改变,计数
print('fullSet, iter: %d i: %d,pairs changed %d' % (iter,i,alphaPairsChanged))
iter += 1
else :
noBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0] #oS.alphas.A > 0 判断oS.alphas中各个数据是否大于0 返回值是true
#oS.alphas.A < C 判断oS.alphas中各个数据是否小于C 或false
#(oS.alphas.A > 0) * (oS.alphas.A < C) 列表相乘
#nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]取得下标值,是一个列表
# print(noBoundIs)
for i in noBoundIs : #遍历所有非边界的值
alphaPairsChanged += innerL(i,oS) #调用innerL函数,选取第二个alpha,判断是否有alpha改变,计数
print('non-bound, iter: %d i: %d,pairs changed %d' % (iter,i,alphaPairsChanged))
iter += 1
if entireSet : entireSet = False #交替进行非边界循环和完整遍历循环,这次遍历全部集合,下次遍历非边界点
# 因为随着多次子优化过程,边界变量倾向于留在边界,而非边界变量倾向于波动,这一步启发式的选择算法是基于节省时间考虑的,并且算法会一直在非边界变量集合上遍历,
# 直到所有非边界变量都满足KKT条件(self-consistent)随后算法继续在整个集合上遍历寻找违反KKT条件的变量作为优化的第一个变量
# 要注意的是,算法在整个集合上最多只连续遍历一次,但在非边界变量集合上可能连续遍历多次
elif (alphaPairsChanged == 0) :
entireSet = True
print('iteration number : %d' % iter)
return oS.b,oS.alphas
def calculateW(dataArr,labelArr,alphas): #dataArr是100 * 2 的数组,下面生成的Mw是一个2 * 1的数组,初始化为[0,0]
X = mat(dataArr)
labelMat = mat(labelArr).transpose()
m,n = shape(X)
w = zeros((n,1))
for i in range(m):
w += multiply(alphas[i] * labelMat[i],X[i,:].T)
return w.tolist()
# Mw = matrix(zeros(shape(dataArr)[1])) #shape(dataArr)[0]表示数组dataArr是几维的 shape(dataArr)[1]表示数组第一条记录中有几个数据
# for i in range (shape(alpha)[0]):
# if alpha[i]>0:
# Mw += multiply(labelArr[i]*alpha[i],dataArr[i]) #计算w的值 w = alphai * yi * xi (1 <= i <= m) 当alphai . 0 时
# w = Mw.T.tolist() #将w转置再转换成列表 2*1
# return w
#画出数据集和simpleSVM回归最佳拟合直线
def drawing(dataArr,labelArr,alphas):
n = shape(labelArr)[0] #取数,数据点分为四类 class1 class-1 和class1 SV class-1 SV
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
xcord3 = []; ycord3 = []
xcord4 = []; ycord4 = []
for i in range(n):
if int(labelArr[i])== 1:
if alphas[i]>0:
xcord3.append(dataArr[i][0]); ycord3.append(dataArr[i][1])
else:
xcord1.append(dataArr[i][0]); ycord1.append(dataArr[i][1])
else:
if alphas[i]>0:
xcord4.append(dataArr[i][0]); ycord4.append(dataArr[i][1])
else:
xcord2.append(dataArr[i][0]); ycord2.append(dataArr[i][1])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=40, c='yellow', marker='s',label='class 1') #开始绘图
ax.scatter(xcord2, ycord2, s=40, c='green',label='class -1')
ax.scatter(xcord3, ycord3, s=40, c='red',marker='s',label='SV 1')
ax.scatter(xcord4, ycord4, s=40, c='red',label='SV -1')
ax.legend(loc='best')
# x = arange(2.7, 6.6, 0.1) #取出40个x进行绘图,这里处理的是分隔超平面
# y1 = (b+(calculateW(dataArr,labelArr,alphas)[0][0])*x)/(-calculateW(dataArr,labelArr,alphas)[1][0]) #w * x + b = 0 就是分割超平面,由于是二维的,计算y表达式
# y = mat(y1).T #w1x1 +w2x2 + b = 0 x2 = (b + w1x1)/(-w2) w1 =w[0][0] w2 =w[1][0]
# ax.plot(x, y,'-') #画出x,y关系图像
plt.xlabel('X1')
plt.ylabel('X2');
# plt.savefig('SMO.png',dpi=2000)
plt.show()
#利用核函数进行分类的径向基测试函数
def testRbf(k1 = 1.5):
dataArr,labelArr = loadDataSet('testSetRBF.txt')
b,alphas = smoP(dataArr,labelArr,200,0.0001,10000,('rbf',k1))
dataMat = mat(dataArr)
labelMat = mat(labelArr).transpose()
svInd = nonzero(alphas.A > 0)[0] #找到支持向量下标
sVs = dataMat[svInd] #保存支持向量对应x y
labelSV = labelMat[svInd] #支持向量的类别标签
print('there are %d Support Vectors' % shape(sVs)[0])
m,n = shape(dataMat)
errorCount = 0
for i in range(m) :
kernelEval = kernelTrans(sVs,dataMat[i,:],('rbf',k1)) #w = alpha * label * x 这里的计算步骤是先把 支持向量 * 每一个行向量
predict = kernelEval.T * multiply(labelSV,alphas[svInd]) + b #label = w * x + b 然后再把他们的乘积 * (alpha * label) 这里的alpha label 都是SV
if (sign(predict) != sign(labelArr[i])) : #sign(predict) 是符号函数 predict < 0 值为 -1 predict > 0 值为 1
errorCount += 1 #预测错误 错误数加一下
print('the training error rate is %f ' % (float(errorCount / m)))
dataArr,labelArr = loadDataSet('testSetRBF2.txt')
dataMat = mat(dataArr)
labelMat = mat(labelArr).transpose()
errorCount = 0
m,n = shape(dataMat)
for i in range(m) :
kernelEval = kernelTrans(sVs,dataMat[i,:],('rbf',k1))
predict = kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if (sign(predict) != sign(labelArr[i])) :
errorCount += 1
print('the test error rate is %f ' % (float(errorCount / m)))
drawing(dataArr,labelArr,alphas)
testRbf(k1 = 1.3);
|
[
"41832588+WCC-wcc@users.noreply.github.com"
] |
41832588+WCC-wcc@users.noreply.github.com
|
9fafc54a808fe954f6a9cced561d54308e404226
|
aec28a032dd5788d9201d6325f2efa285116696e
|
/big/plot_grid3d.py
|
8e763a48aa2b2c2ce84dab68bc3abbc78087d1f9
|
[] |
no_license
|
pletzer/pyterp_tests
|
346addfe89ff14613e986ca2b9a14206f9b41d45
|
56be0634d8f7402ce5322a6a67c1843a593d31de
|
refs/heads/master
| 2020-05-29T08:50:40.072549
| 2017-07-20T03:23:34
| 2017-07-20T03:23:34
| 69,289,048
| 1
| 3
| null | 2017-03-22T19:15:02
| 2016-09-26T20:15:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,031
|
py
|
import vtk
import argparse
import iris
import numpy
import math
iris.FUTURE.netcdf_promote = True
parser = argparse.ArgumentParser(description='Plot grid in 3d')
parser.add_argument('--src_file', type=str, dest='src_file', default='coords_CF_ORCA12_GO6-2.nc',
help='Source data file name')
parser.add_argument('--dst_file', type=str, dest='dst_file', default='dst.nc',
help='Destination data file name')
parser.add_argument('--src_field', type=str, dest='src_field', default='ocndept',
help='Destination data field name')
parser.add_argument('--dst_field', type=str, dest='dst_field', default='pointData',
help='Destination data field name')
args = parser.parse_args()
def readCube(filename, fieldname):
cube = None
cubes = iris.load(filename)
for cb in cubes:
if cb.var_name == fieldname:
cube = cb
return cube
def createPipeline(cube, pngfile, color=(1.,1.,1.), radius=1.0, show_mesh_as_surface=False, nlines=10):
n0, n1 = cube.data.shape
numPoints = n0 * n1
sg = vtk.vtkStructuredGrid()
pt = vtk.vtkPoints()
#pt.SetNumberOfPoints(numPoints)
coords = cube.coords()
lats = coords[0].points
lons = coords[1].points
steps0 = n0 // nlines
steps1 = n1 // nlines
k = 0
for i1 in range(0, n1, steps1):
for i0 in range(0, n0, steps0):
x = radius * math.cos(lats[i0, i1] * math.pi/180.) * math.cos(lons[i0, i1] * math.pi/180.)
y = radius * math.cos(lats[i0, i1] * math.pi/180.) * math.sin(lons[i0, i1] * math.pi/180.)
z = radius * math.sin(lats[i0, i1] * math.pi/180.)
pt.InsertPoint(k, x, y, z)
k += 1
sg = vtk.vtkStructuredGrid()
sg.SetDimensions(1, n0, n1)
sg.SetPoints(pt)
mp, ac = None, None
# show mesh as a surface
if show_mesh_as_surface:
mp = vtk.vtkDataSetMapper()
mp.SetInputData(sg)
ac = vtk.vtkActor()
ac.SetMapper(mp)
# show the grid as tubes
ed = vtk.vtkExtractEdges()
et = vtk.vtkTubeFilter()
em = vtk.vtkPolyDataMapper()
ea = vtk.vtkActor()
et.SetRadius(0.01)
ed.SetInputData(sg)
et.SetInputConnection(ed.GetOutputPort())
em.SetInputConnection(et.GetOutputPort())
ea.SetMapper(em)
ea.GetProperty().SetColor(color)
return [ea], em, et, ed, mp, sg
def render(actors):
# rendering stuff
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
for a in actors:
if a is not None:
renderer.AddActor(a)
renderer.SetBackground(.0, .0, .0)
renderWindow.Render()
renderWindowInteractor.Start()
src_cube = readCube(args.src_file, args.src_field)
src_pipeline = createPipeline(src_cube, pngfile='src.png', color=(0.5, 0.5, 0.5), radius=0.99, show_mesh_as_surface=False)
#dst_cube = readCube(args.dst_file, args.dst_field)
#dst_pipeline = createPipeline(dst_cube, pngfile='dst.png', color=(1.0, 0.0, 0.0), radius=1.05)
render(src_pipeline[0]) # + dst_pipeline[0])
|
[
"pletzera@niwa-1007520.niwa.local"
] |
pletzera@niwa-1007520.niwa.local
|
3e10823cddf7156502afb121f4d4ca37fa2d458b
|
ba57226113a8a9c73af79b6d6033769299c7f05b
|
/setup_bgmapi.py
|
a1f053b8079db1077148ab913813573a0bdbf5e3
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
wattlebird/Bangumi_Spider
|
fe0e8f95b37f89510d4053ba1fd1b3193faebf7b
|
61ede2551e697f6851ab51b357b843e4e61f4afa
|
refs/heads/master
| 2023-08-02T17:32:03.277992
| 2023-07-27T13:35:28
| 2023-07-27T13:35:28
| 26,780,910
| 27
| 7
|
BSD-2-Clause
| 2022-10-29T12:15:24
| 2014-11-17T22:42:45
|
Python
|
UTF-8
|
Python
| false
| false
| 207
|
py
|
from setuptools import setup, find_packages
setup(
name = 'project',
version = '1.0',
packages = find_packages(),
entry_points = {'scrapy': ['settings = bgmapi.settings']},
)
|
[
"geniusxiaoguai@gmail.com"
] |
geniusxiaoguai@gmail.com
|
a9f68c3ba6d77e4afa5766a4916619102fbe1640
|
0eb0535abcf8c1a70aacad76a031992bfd0bc745
|
/Intervals/Intervals.py
|
4d16cfd32e0ceca0f38d302df51c7d22a814e9ee
|
[] |
no_license
|
gyllbane99/CS313E
|
eb2d35166e121cec2800dc6e3f54a5d9ab6098d0
|
715d1d8e1579d20edc691c3f4235f98ce5a8df78
|
refs/heads/master
| 2020-09-26T07:14:34.328646
| 2020-04-15T17:59:01
| 2020-04-15T17:59:01
| 226,200,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,276
|
py
|
# File: Intervals.py
# Description: This program takes intervals from a text file, orders them and then collapses the intervals that overlaps.
# Student Name: Brock Brennan
# Student UT EID: btb989
# Course Name: CS 313E
# Unique Number: 50205
# Date Created: 9/8/2019
# Date Last Modified: 9/9/2018
def readfile():
#Reads file and removes new line character
file = open("Intervals.txt", "r")
intervalslist = file.readlines()
intervalslist = [x.replace('\n', '') for x in intervalslist ]
intervalslist.sort
return intervalslist
def converttotuple(intervalslist):
#Converts items of the list into tuples and splits them using the space in the middle.
intervalslisttuple = []
for item in intervalslist:
smallint = []
x = item.split(" ")
firstnumber = int(x[0])
secondnumber = int(x[1])
smallint.append(firstnumber)
smallint.append(secondnumber)
x = tuple(smallint)
intervalslisttuple.append(x)
return intervalslisttuple
def collapsetheintervals(intervalslist):
# Takes each tuple piece by piece and compares the values then files them into the final list for printing.
collapsedlist = []
for tup in intervalslist:
if collapsedlist:
lower = collapsedlist[-1]
if tup[0] <= lower[1]:
highest = max(lower[1], tup[1])
collapsedlist[-1] = (lower[0], highest)
else:
collapsedlist.append(tup)
else:
collapsedlist.append(tup)
return collapsedlist
def printfinalList(collapsedlist):
#Prints the intervals out
print("Non-intersecting Intervals:")
for tup in (collapsedlist):
print((tup),end="\n")
def main():
#Read all of the intervals off of the file and then set them in a list
intervals = readfile()
#Convert the elements of the lists to ints and then place them into tuples.
tup = converttotuple(intervals)
#Sort the Tuple List
tup.sort()
#Collapse the possible intervals
collapsed = collapsetheintervals(tup)
#Prints out the intervals in proper format
printfinalList(collapsed)
main()
|
[
"noreply@github.com"
] |
gyllbane99.noreply@github.com
|
2b7790cc834c668d9144482a4c26723262e9a0c1
|
906448c0fd113471f19b56f549387a8c2b4123cf
|
/tienda_zapatos/app/migrations/0001_initial.py
|
b868fecaf001b284608acbebe1cdc366ebba4df3
|
[] |
no_license
|
usac201504229/proyectos_980
|
c79158556d12dfb0791f81b3f29e7b7c92fca8f6
|
243c730989ab57571e42b87b2f0351d8cea72c3f
|
refs/heads/master
| 2023-01-24T20:50:35.691650
| 2020-11-19T03:29:55
| 2020-11-19T03:29:55
| 312,479,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
# Generated by Django 3.0 on 2020-11-12 19:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Marca',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Producto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
('precio', models.ImageField(upload_to='')),
('descripcion', models.TimeField()),
('nuevo', models.BooleanField()),
('fecha_fabricacion', models.DateField()),
('marca', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='app.Marca')),
],
),
]
|
[
"2468064780101@ingenieria.usac.edu.gt"
] |
2468064780101@ingenieria.usac.edu.gt
|
c89d6f6dc63c1dd1ed236bd4f6ae25a485a2197e
|
2029d339e0e45ae37b814dbbfca8c8b3b1876f52
|
/users/views.py
|
703aead4fac582774698e3f73c68f6d80484393b
|
[] |
no_license
|
lijiancheng0614/Emoji
|
e1f2fbc9b141c38ebf6a38a2733637e55db69723
|
143d069f681f62f52c6f577fd6a7503b957dfeab
|
refs/heads/master
| 2022-08-31T17:17:31.452666
| 2022-08-09T02:25:49
| 2022-08-09T02:25:49
| 130,447,575
| 1
| 1
| null | 2022-08-09T02:25:50
| 2018-04-21T06:43:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils.functional import lazy
from django.views.generic import CreateView
from .forms import UserCreationForm
from users.models import User
reverse_lazy = lambda name=None, *args: lazy(reverse, str)(name, args=args)
class CreateUser(CreateView):
template_name = 'users/register.html'
model = User
form_class = UserCreationForm
success_url = reverse_lazy('core:recent-pins')
def get(self, request, *args, **kwargs):
if not settings.ALLOW_NEW_REGISTRATIONS:
messages.error(request, "The admin of this service is not allowing new registrations.")
return HttpResponseRedirect(reverse('core:recent-pins'))
return super(CreateUser, self).get(request, *args, **kwargs)
def form_valid(self, form):
redirect = super(CreateUser, self).form_valid(form)
permissions = Permission.objects.filter(codename__in=['add_pin', 'add_image'])
user = authenticate(username=form.cleaned_data['username'],
password=form.cleaned_data['password'])
user.user_permissions = permissions
login(self.request, user)
return redirect
@login_required
def logout_user(request):
logout(request)
messages.success(request, 'You have successfully logged out.')
return HttpResponseRedirect(reverse('core:recent-pins'))
def private(request):
return TemplateResponse(request, 'users/private.html', None)
|
[
"lijiancheng0614@gmail.com"
] |
lijiancheng0614@gmail.com
|
a201377ec8cb7c619f536ed48d881da13e7b47a6
|
3fb042540a62670300a668ab79381e7cc7739a5e
|
/src/ex00.py
|
223147f9da5b9bd8c7360eb7692fee7355ea539e
|
[
"MIT"
] |
permissive
|
maberf/python
|
830a18986a86cb6da05a991c4886dbc9685ce60a
|
8c72074e4c6bd631c524d79b443428b20460b8d8
|
refs/heads/master
| 2023-04-18T20:40:55.664409
| 2023-04-01T23:48:34
| 2023-04-01T23:48:34
| 295,516,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
import sys
print(sys.path)
print('Olá Dummie!')
|
[
"71026050+maberf@users.noreply.github.com"
] |
71026050+maberf@users.noreply.github.com
|
59aeba235b6cae5ff20d5ed769307e56effba754
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/nemo/collections/common/metrics/perplexity.py
|
9e1c21737ec8caea46c8850c32dd23fe499f6394
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.distributions.categorical import Categorical
from torchmetrics import Metric
__all__ = ['Perplexity']
class Perplexity(Metric):
"""
This class computes mean perplexity of distributions in the last dimension of inputs. It is a wrapper around
:doc:`torch.distributions.Categorical.perplexity<pytorch:distributions>` method. You have to provide either
``probs`` or ``logits`` to the :meth:`update` method. The class computes perplexities for distributions passed to
:meth:`update` method in ``probs`` or ``logits`` arguments and averages the perplexities. Reducing results between
all workers is done via SUM operations.
See `PyTorch Lightning Metrics <https://pytorch-lightning.readthedocs.io/en/stable/ecosystem/metrics.html>`_ for the metric usage instructions.
Args:
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called. default: ``None`` (which selects the entire
world)
validate_args:
If ``True`` values of :meth:`update` method parameters are checked. ``logits`` has to not contain NaNs and
``probs`` last dim has to be valid probability distribution.
"""
full_state_update = True
def __init__(self, dist_sync_on_step=False, process_group=None, validate_args=True):
super().__init__(dist_sync_on_step=dist_sync_on_step, process_group=process_group)
self.validate_args = validate_args
self.add_state('perplexities_sum', torch.tensor(0.0, dtype=torch.float64), dist_reduce_fx='sum')
# Total number of distributions seen since last reset
self.add_state('num_distributions', torch.tensor(0, dtype=torch.int64), dist_reduce_fx='sum')
def update(self, probs=None, logits=None):
"""
Updates :attr:`perplexities_sum` and :attr:`num_distributions`.
Args:
probs: A ``torch.Tensor`` which innermost dimension is valid probability distribution.
logits: A ``torch.Tensor`` without NaNs.
"""
d = Categorical(
None if probs is None else probs.detach(),
None if logits is None else logits.detach(),
validate_args=self.validate_args,
)
ppl = d.perplexity()
self.num_distributions += ppl.numel()
self.perplexities_sum += ppl.sum()
def compute(self):
"""
Returns perplexity across all workers and resets to 0 :attr:`perplexities_sum` and :attr:`num_distributions`.
"""
if self.num_distributions.eq(0):
return None
return self.perplexities_sum / self.num_distributions
|
[
"noreply@github.com"
] |
NVIDIA.noreply@github.com
|
c33906ce3a52e69dcca086bc09ff33f8783031e0
|
60bd7af45fd53c20930e79961dd2fc8c47e67328
|
/LC/30daysChallenge/week3/ConstructBSTFromPreorder.py
|
4252c7dfa0686a5c3eca9fe014e11f825b505c41
|
[] |
no_license
|
albertopha/ds-algo
|
36d6a3c20f4a52b096c8aec276355ae28a770a60
|
51b379b1be406cd27c92f26f44633c87360c7de3
|
refs/heads/master
| 2022-12-27T06:27:42.352855
| 2022-12-24T01:44:48
| 2022-12-24T01:44:48
| 181,027,160
| 2
| 0
| null | 2021-03-14T18:01:12
| 2019-04-12T14:44:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def bstFromPreorder(self, preorder: List[int]) -> TreeNode:
if len(preorder) == 0:
return
root = TreeNode(preorder[0])
self.findChildren(root, preorder, 1, len(preorder)-1)
return root
def findChildren(self, root: TreeNode, preorder: List[int], start: int, end: int) -> None:
if start > end or not root:
return
root_val = root.val
new_start = start
new_end = end+1
for i in range(start, end+1):
if root_val > preorder[i]:
root.left = TreeNode(preorder[i])
new_start = i
break
for i in range(start, end+1):
if root_val < preorder[i]:
root.right = TreeNode(preorder[i])
new_end = i
break
if root.left:
self.findChildren(root.left, preorder, new_start+1, new_end-1)
if root.right:
self.findChildren(root.right, preorder, new_end+1, end)
|
[
"albertopha@hotmail.com"
] |
albertopha@hotmail.com
|
57727b270f34c3828bb09c4ccb03db3ef86953b9
|
55628a9a08a6b6646b4a8aa74bedbf2e3fd7d850
|
/.history/test_20200123001430.py
|
039d1a2e7afe254c5c708d761701cc99e526961b
|
[] |
no_license
|
StRobertCHSCS/final-project-team
|
c115dc11b318f7ac782c94860a8801bb558bd107
|
48907e72813c4dd3b48ff36f794f6fce04533219
|
refs/heads/master
| 2020-12-03T22:35:37.833893
| 2020-01-31T04:05:38
| 2020-01-31T04:05:38
| 231,506,873
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,208
|
py
|
# from time import ctime
# current_time = ctime()
# # with open('scoress.txt', 'r+') as high_score_file:
# # high_scores = [0, 0, 0, 0, 0]
# # for item in high_scores:
# # high_score_file.write(str(high_scores[item]) + "\n")
# # high_score_file.close()
# def replace_line(line_num, text):
# lines = open('scoress.txt', 'r').readlines()
# if len(lines)> 0:
# lines[0] = text
# else:
# lines = text
# lines.close()
# out = open('scoress.txt', 'w')
# out.write(lines + "\n")
# out.close()
# high_score = 0
# with open('scoress.txt', 'r+') as high_score_file:
# current_score = (input("high score?" ))
# high_score = high_score_file.readline()
# print(high_score)
# if current_score > high_score:
# sentence = (" High score of " + str(current_score) + " at " + current_time + "\n")
# high_score_file.write(sentence)
# # with open('scoress.txt', 'r+') as high_score_file:
# # for i in range (7):
# # print(high_score_file.readline(i))
# # print ("--")
# # for line in high_score_file:
# # print (line)
# '''
# import json
# with open("score.json", "r+") as foo:
# for i in range (3):
# score = (input("score; "))
# json.dump(score + "\n", foo)
# #if score > int(foo.readline()):
# '''
"""
Show how to use exceptions to save a high score for a game.
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
"""
def get_high_score():
# Default high score
high_score = 0
# Try to read the high score from a file
try:
high_score_file = open("scoress.txt", "r")
high_score = int(high_score_file.read())
high_score_file.close()
print("The high score is", high_score)
except IOError:
# Error reading file, no high score
print("There is no high score yet.")
except ValueError:
# There's a file there, but we don't understand the number.
print("I'm confused. Starting with no high score.")
return high_score
def save_high_score(new_high_score):
try:
# Write the file to disk
high_score_file = open("scoress.txt", "w")
high_score_file.write(str(new_high_score))
high_score_file.close()
except IOError:
# Hm, can't write it.
print("Unable to save the high score.")
def main():
""" Main program is here. """
# Get the high score
high_score = get_high_score()
# Get the score from the current game
current_score = 0
try:
# Ask the user for his/her score
current_score = int(input("What is your score? "))
except ValueError:
# Error, can't turn what they typed into a number
print("I don't understand what you typed.")
# See if we have a new high score
if current_score > high_score:
# We do! Save to disk
print("Yea! New high score!")
save_high_score(current_score)
else:
print("Better luck next time.")
# Call the main function, start up the game
if __name__ == "__main__":
main()
|
[
"clementina1023@gmail.com"
] |
clementina1023@gmail.com
|
906896361bd2865a171b902df5c155d80d72b068
|
354e921858ad0fdcefa683484d6b7d16c39e9eaa
|
/pyfair/model/model_calc.py
|
05e51d5a712decf770f917aa20052d477413f357
|
[
"MIT"
] |
permissive
|
welshie1/pyfair
|
27936f7a92e9e16c567fc11f3458a0ce83bcf136
|
ca8aae80ca8a89bbfb8d435b480c5985e95efdff
|
refs/heads/master
| 2020-06-12T19:57:42.615948
| 2019-06-29T13:33:40
| 2019-06-29T13:33:40
| 194,408,562
| 0
| 0
| null | 2019-06-29T13:26:50
| 2019-06-29T13:26:50
| null |
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
class FairCalculations(object):
'''A class to perform calculations'''
# TODO confirm accuracy of these calculations.
def __init__(self):
self._data = None
# Lookup table (no leaf nodes required)
self._function_dict = {
'Risk' : self._calculate_multiplication,
'Loss Event Frequency' : self._calculate_multiplication,
'Threat Event Frequency' : self._calculate_multiplication,
'Vulnerability' : self._calculate_step,
'Loss Magnitude' : self._calculate_addition,
'Primary Loss' : self._calculate_multiplication,
'Secondary Loss' : self._calculate_multiplication,
}
def calculate(self, parent_name, child_1_data, child_2_data):
'''General function for dispatching calculations'''
target_function = self._function_dict[parent_name]
calculated_result = target_function(parent_name, child_1_data, child_2_data)
return calculated_result
def _calculate_step(self, parent_name, child_1_data, child_2_data):
'''Create a bool series, which can be multiplied as 0/1 value'''
return child_1_data > child_2_data
def _calculate_addition(self, parent_name, child_1_data, child_2_data):
'''Calculate sum of two columns'''
return child_1_data + child_2_data
def _calculate_multiplication(self, parent_name, child_1_data, child_2_data):
'''Calculate product of two columns'''
return child_1_data * child_2_data
|
[
"theonaunheim@gmail.com"
] |
theonaunheim@gmail.com
|
a01ecbdc9503afd1d6c1a8eb5afb1c1480e39032
|
f03193db41bfa790d07b617d9a65c787cbff780c
|
/app01/stark.py
|
99d3df297887afec9d230dc51c57d1d567c2bf2a
|
[] |
no_license
|
w7374520/starkall
|
d1e9cbc1df7be910127e1272e9f19068222937e5
|
c455f8e12629dc5e7abfe1b8aa5b4ee126415cde
|
refs/heads/master
| 2020-05-18T06:09:15.323179
| 2019-04-15T06:32:50
| 2019-04-15T06:32:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
from stark.service.sites import site,ModelStark
from django.utils.safestring import mark_safe
from app01.models import Book, Publish, Author, AuthorDetail
from django.shortcuts import HttpResponse, render, redirect
class BookConfig(ModelStark):
list_display = ["title", "price", "publish", "authors"]
list_display_links = ["title", "publish", "authors"]
search_fields = ["title", "price"]
list_filter = ["publish", "authors"]
# 批量操作
def patch_init(self, request, queryset):
queryset.update(price=100)
patch_init.desc = "价格初始化"
actions = [patch_init]
site.register(Book, BookConfig)
site.register(Publish)
site.register(Author)
site.register(AuthorDetail)
|
[
"1079264692@qq.com"
] |
1079264692@qq.com
|
eb95018d472eadb3cd500298d47b6c2f17952a99
|
1e696cfacf608326dd8efe60126bc0a3bab25750
|
/supervisor/store/data.py
|
b810493bb3af9943b92a576565e854ed62f91218
|
[
"Apache-2.0"
] |
permissive
|
KiraD5268/supervisor
|
8b3105d6af090653156be72e0d39f65d922e0be1
|
1bf38bdc999f8430cc32b2a3bad6fea927ba3d91
|
refs/heads/main
| 2023-01-12T14:19:00.324792
| 2020-11-19T08:24:40
| 2020-11-19T08:24:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,483
|
py
|
"""Init file for Supervisor add-on data."""
import logging
from pathlib import Path
from typing import Any, Dict
import voluptuous as vol
from voluptuous.humanize import humanize_error
from ..addons.validate import SCHEMA_ADDON_CONFIG
from ..const import (
ATTR_LOCATON,
ATTR_REPOSITORY,
ATTR_SLUG,
REPOSITORY_CORE,
REPOSITORY_LOCAL,
)
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import JsonFileError
from ..resolution.const import ContextType, IssueType
from ..utils.json import read_json_file
from .utils import extract_hash_from_path
from .validate import SCHEMA_REPOSITORY_CONFIG
_LOGGER: logging.Logger = logging.getLogger(__name__)
class StoreData(CoreSysAttributes):
"""Hold data for Add-ons inside Supervisor."""
def __init__(self, coresys: CoreSys):
"""Initialize data holder."""
self.coresys: CoreSys = coresys
self.repositories: Dict[str, Any] = {}
self.addons: Dict[str, Any] = {}
def update(self):
"""Read data from add-on repository."""
self.repositories.clear()
self.addons.clear()
# read core repository
self._read_addons_folder(self.sys_config.path_addons_core, REPOSITORY_CORE)
# read local repository
self._read_addons_folder(self.sys_config.path_addons_local, REPOSITORY_LOCAL)
# add built-in repositories information
self._set_builtin_repositories()
# read custom git repositories
for repository_element in self.sys_config.path_addons_git.iterdir():
if repository_element.is_dir():
self._read_git_repository(repository_element)
def _read_git_repository(self, path):
"""Process a custom repository folder."""
slug = extract_hash_from_path(path)
# exists repository json
repository_file = Path(path, "repository.json")
try:
repository_info = SCHEMA_REPOSITORY_CONFIG(read_json_file(repository_file))
except JsonFileError:
_LOGGER.warning(
"Can't read repository information from %s", repository_file
)
return
except vol.Invalid:
_LOGGER.warning("Repository parse error %s", repository_file)
return
# process data
self.repositories[slug] = repository_info
self._read_addons_folder(path, slug)
def _read_addons_folder(self, path, repository):
"""Read data from add-ons folder."""
try:
# Generate a list without artefact, safe for corruptions
addon_list = [
addon
for addon in path.glob("**/config.json")
if ".git" not in addon.parts
]
except OSError as err:
self.sys_resolution.create_issue(
IssueType.CORRUPT_REPOSITORY, ContextType.SYSTEM
)
_LOGGER.critical(
"Can't process %s because of Filesystem issues: %s", repository, err
)
self.sys_capture_exception(err)
return
for addon in addon_list:
try:
addon_config = read_json_file(addon)
except JsonFileError:
_LOGGER.warning("Can't read %s from repository %s", addon, repository)
continue
# validate
try:
addon_config = SCHEMA_ADDON_CONFIG(addon_config)
except vol.Invalid as ex:
_LOGGER.warning(
"Can't read %s: %s", addon, humanize_error(addon_config, ex)
)
continue
# Generate slug
addon_slug = f"{repository}_{addon_config[ATTR_SLUG]}"
# store
addon_config[ATTR_REPOSITORY] = repository
addon_config[ATTR_LOCATON] = str(addon.parent)
self.addons[addon_slug] = addon_config
def _set_builtin_repositories(self):
"""Add local built-in repository into dataset."""
try:
builtin_file = Path(__file__).parent.joinpath("built-in.json")
builtin_data = read_json_file(builtin_file)
except JsonFileError:
_LOGGER.warning("Can't read built-in json")
return
# core repository
self.repositories[REPOSITORY_CORE] = builtin_data[REPOSITORY_CORE]
# local repository
self.repositories[REPOSITORY_LOCAL] = builtin_data[REPOSITORY_LOCAL]
|
[
"noreply@github.com"
] |
KiraD5268.noreply@github.com
|
d032f87c76a40aec68f5ae53e43964a0d195e85e
|
bde0a0efad2c3e117f4ca01dcc8a7001725c800d
|
/src/annalist_root/annalist/views/site.py
|
1357583a187ac6e1f1a62b0cf740c3b7b6fd2d0f
|
[
"MIT"
] |
permissive
|
josemanuelgp/annalist
|
2eb0e1535214121acd3369dde00479b368f673fe
|
f3324ea457dfbde1cea6a1df301946dc92cdaaf3
|
refs/heads/master
| 2021-01-17T03:40:39.422881
| 2015-01-31T14:09:37
| 2015-01-31T14:25:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,820
|
py
|
"""
Analist site views
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.core.urlresolvers import resolve, reverse
from annalist.identifiers import ANNAL, RDFS
from annalist.exceptions import Annalist_Error, EntityNotFound_Error
from annalist import message
from annalist import util
from annalist.models.site import Site
from annalist.views.displayinfo import DisplayInfo
from annalist.views.generic import AnnalistGenericView
from annalist.views.confirm import ConfirmView
class SiteView(AnnalistGenericView):
"""
View class to handle requests to the annalist site home URI
"""
def __init__(self):
super(SiteView, self).__init__()
self.help = "site-help"
return
# GET
def get(self, request):
"""
Create a rendering of the current site home page, containing (among other things)
a list of defined collections.
"""
viewinfo = DisplayInfo(self, "view")
viewinfo.get_site_info(self.get_request_host())
viewinfo.check_authorization("view")
if viewinfo.http_response:
return viewinfo.http_response
resultdata = viewinfo.sitedata
resultdata.update(viewinfo.context_data())
# log.info("SiteView.get: site_data %r"%(self.site_data()))
return (
self.check_site_data() or
self.render_html(resultdata, 'annalist_site.html') or
self.error(self.error406values())
)
# POST
def post(self, request):
"""
Process options to add or remove a collection in an Annalist site
"""
log.debug("site.post: %r"%(request.POST.lists()))
if "remove" in request.POST:
collections = request.POST.getlist("select", [])
if collections:
# Get user to confirm action before actually doing it
auth_required = (
self.authorize("ADMIN", None) and # either of these..
self.authorize("DELETE_COLLECTION", None)
)
return (
auth_required or
ConfirmView.render_form(request,
action_description= message.REMOVE_COLLECTIONS%{'ids': ", ".join(collections)},
action_params= request.POST,
confirmed_action_uri= self.view_uri('AnnalistSiteActionView'),
cancel_action_uri= self.view_uri('AnnalistSiteView'),
title= self.site_data()["title"]
)
)
else:
return self.redirect_info(
self.view_uri("AnnalistSiteView"),
info_message=message.NO_COLLECTIONS_SELECTED, info_head=message.NO_ACTION_PERFORMED
)
if "new" in request.POST:
# Create new collection with name and label supplied
new_id = request.POST["new_id"]
new_label = request.POST["new_label"]
log.debug("New collection %s: %s"%(new_id, new_label))
if not new_id:
return self.redirect_error(
self.view_uri("AnnalistSiteView"),
error_message=message.MISSING_COLLECTION_ID
)
if not util.valid_id(new_id):
return self.redirect_error(
self.view_uri("AnnalistSiteView"),
error_message=message.INVALID_COLLECTION_ID%{'coll_id': new_id}
)
# Create new collection with name and label supplied
auth_required = (
self.authorize("ADMIN", None) and # either of these..
self.authorize("CREATE_COLLECTION", None)
)
if auth_required:
return auth_required
coll_meta = (
{ RDFS.CURIE.label: new_label
, RDFS.CURIE.comment: ""
})
coll = self.site().add_collection(new_id, coll_meta)
# Create full permissions in new collection for creating user
user = self.request.user
user_id = user.username
user_uri = "mailto:"+user.email
user_name = "%s %s"%(user.first_name, user.last_name)
user_description = "User %s: permissions for %s in collection %s"%(user_id, user_name, new_id)
coll.create_user_permissions(
user_id, user_uri,
user_name, user_description,
user_permissions=["VIEW", "CREATE", "UPDATE", "DELETE", "CONFIG", "ADMIN"]
)
return self.redirect_info(
self.view_uri("AnnalistSiteView"),
info_message=message.CREATED_COLLECTION_ID%{'coll_id': new_id}
)
return self.error(self.error400values())
class SiteActionView(AnnalistGenericView):
"""
View class to perform completion of confirmed action requested from site view
"""
def __init__(self):
super(SiteActionView, self).__init__()
return
# POST
def post(self, request):
"""
Process options to complete action to add or remove a collection
"""
log.debug("siteactionview.post: %r"%(request.POST))
if "remove" in request.POST:
log.debug("Complete remove %r"%(request.POST.getlist("select")))
auth_required = (
self.authorize("ADMIN", None) and # either of these..
self.authorize("DELETE_COLLECTION", None)
)
if auth_required:
return auth_required
coll_ids = request.POST.getlist("select")
for coll_id in coll_ids:
err = self.site().remove_collection(coll_id)
if err:
return self.redirect_error(
self.view_uri("AnnalistSiteView"),
error_message=str(err))
return self.redirect_info(
self.view_uri("AnnalistSiteView"),
info_message=message.COLLECTIONS_REMOVED%{'ids': ", ".join(coll_ids)}
)
else:
return self.error(self.error400values())
return HttpResponseRedirect(self.view_uri("AnnalistSiteView"))
# End.
|
[
"gk-github@ninebynine.org"
] |
gk-github@ninebynine.org
|
b0fb5f97fa751d8b36016df8af5b05eb14efb3bc
|
aeabc9a7b3e8d67fba5e6f8933a96b8a4bb51d2e
|
/camp/opus/track.py
|
d062c9c842f4d807c4608236556ae0c0b93d9887
|
[
"Apache-2.0"
] |
permissive
|
YixuanFranco/camp
|
7926defa85c48aa1fef6481e8f6ba0019a6c2856
|
e2d1ecb57fbbd337028d48239bf20e2cdf8f65d4
|
refs/heads/master
| 2020-12-25T11:58:23.401292
| 2016-02-04T16:46:03
| 2016-02-04T16:46:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
"""
Copyright 2016, Michael DeHaan <michael.dehaan@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# see tests/opus.py for how all this works.
#
# reminder example:
#
# track1 = song.add_track(Track(name="melodica", midi_channel=1))
class Track(object):
def __init__(self, song=None, name=None, midi_channel=None):
self.name = name
self.song = song
self.midi_channel = midi_channel
assert isinstance(self.name, str)
# CAMP doesn't *HAVE* to be used for MIDI, but I'd prefer these get
# checked anyway - if not doing MIDI, just make up a channel number.
# Channel numbers do *NOT* have to be unique between tracks, but we'll
# avoid setting a default to avoid surprises.
assert type(self.midi_channel) == int
|
[
"michael.dehaan@gmail.com"
] |
michael.dehaan@gmail.com
|
d302df19f553b76825f1950985d688173622b927
|
a90ea1a3f103b1dbb8e6132f3f2eb20c48db4671
|
/Hurricane project.py
|
2d786faf0597edaf608bbbc139f1995dd97ad6ff
|
[] |
no_license
|
badinplaid737/Codecademy-Projects
|
2f9618ad3138b472822a256df46de441a35da135
|
a786eb155b03a460f4adbecd7e51fd84c20a29e3
|
refs/heads/main
| 2023-08-18T21:33:40.574439
| 2021-10-04T23:13:43
| 2021-10-04T23:13:43
| 405,109,893
| 0
| 0
| null | 2021-09-21T21:35:32
| 2021-09-10T14:32:07
|
Python
|
UTF-8
|
Python
| false
| false
| 10,473
|
py
|
import pandas as pd
import numpy as np
# names of hurricanes
names = ['Cuba I', 'San Felipe II Okeechobee', 'Bahamas', 'Cuba II', 'CubaBrownsville', 'Tampico', 'Labor Day', 'New England', 'Carol', 'Janet', 'Carla', 'Hattie', 'Beulah', 'Camille', 'Edith', 'Anita', 'David', 'Allen', 'Gilbert', 'Hugo', 'Andrew', 'Mitch', 'Isabel', 'Ivan', 'Emily', 'Katrina', 'Rita', 'Wilma', 'Dean', 'Felix', 'Matthew', 'Irma', 'Maria', 'Michael']
# months of hurricanes
months = ['October', 'September', 'September', 'November', 'August', 'September', 'September', 'September', 'September', 'September', 'September', 'October', 'September', 'August', 'September', 'September', 'August', 'August', 'September', 'September', 'August', 'October', 'September', 'September', 'July', 'August', 'September', 'October', 'August', 'September', 'October', 'September', 'September', 'October']
# years of hurricanes
years = [1924, 1928, 1932, 1932, 1933, 1933, 1935, 1938, 1953, 1955, 1961, 1961, 1967, 1969, 1971, 1977, 1979, 1980, 1988, 1989, 1992, 1998, 2003, 2004, 2005, 2005, 2005, 2005, 2007, 2007, 2016, 2017, 2017, 2018]
# maximum sustained winds (mph) of hurricanes
max_sustained_winds = [165, 160, 160, 175, 160, 160, 185, 160, 160, 175, 175, 160, 160, 175, 160, 175, 175, 190, 185, 160, 175, 180, 165, 165, 160, 175, 180, 185, 175, 175, 165, 180, 175, 160]
# areas affected by each hurricane
areas_affected = [['Central America', 'Mexico', 'Cuba', 'Florida', 'The Bahamas'], ['Lesser Antilles', 'The Bahamas', 'United States East Coast', 'Atlantic Canada'], ['The Bahamas', 'Northeastern United States'], ['Lesser Antilles', 'Jamaica', 'Cayman Islands', 'Cuba', 'The Bahamas', 'Bermuda'], ['The Bahamas', 'Cuba', 'Florida', 'Texas', 'Tamaulipas'], ['Jamaica', 'Yucatn Peninsula'], ['The Bahamas', 'Florida', 'Georgia', 'The Carolinas', 'Virginia'], ['Southeastern United States', 'Northeastern United States', 'Southwestern Quebec'], ['Bermuda', 'New England', 'Atlantic Canada'], ['Lesser Antilles', 'Central America'], ['Texas', 'Louisiana', 'Midwestern United States'], ['Central America'], ['The Caribbean', 'Mexico', 'Texas'], ['Cuba', 'United States Gulf Coast'], ['The Caribbean', 'Central America', 'Mexico', 'United States Gulf Coast'], ['Mexico'], ['The Caribbean', 'United States East coast'], ['The Caribbean', 'Yucatn Peninsula', 'Mexico', 'South Texas'], ['Jamaica', 'Venezuela', 'Central America', 'Hispaniola', 'Mexico'], ['The Caribbean', 'United States East Coast'], ['The Bahamas', 'Florida', 'United States Gulf Coast'], ['Central America', 'Yucatn Peninsula', 'South Florida'], ['Greater Antilles', 'Bahamas', 'Eastern United States', 'Ontario'], ['The Caribbean', 'Venezuela', 'United States Gulf Coast'], ['Windward Islands', 'Jamaica', 'Mexico', 'Texas'], ['Bahamas', 'United States Gulf Coast'], ['Cuba', 'United States Gulf Coast'], ['Greater Antilles', 'Central America', 'Florida'], ['The Caribbean', 'Central America'], ['Nicaragua', 'Honduras'], ['Antilles', 'Venezuela', 'Colombia', 'United States East Coast', 'Atlantic Canada'], ['Cape Verde', 'The Caribbean', 'British Virgin Islands', 'U.S. Virgin Islands', 'Cuba', 'Florida'], ['Lesser Antilles', 'Virgin Islands', 'Puerto Rico', 'Dominican Republic', 'Turks and Caicos Islands'], ['Central America', 'United States Gulf Coast (especially Florida Panhandle)']]
# damages (USD($)) of hurricanes
damages = ['Damages not recorded', '100M', 'Damages not recorded', '40M', '27.9M', '5M', 'Damages not recorded', '306M', '2M', '65.8M', '326M', '60.3M', '208M', '1.42B', '25.4M', 'Damages not recorded', '1.54B', '1.24B', '7.1B', '10B', '26.5B', '6.2B', '5.37B', '23.3B', '1.01B', '125B', '12B', '29.4B', '1.76B', '720M', '15.1B', '64.8B', '91.6B', '25.1B']
# deaths for each hurricane
deaths = [90,4000,16,3103,179,184,408,682,5,1023,43,319,688,259,37,11,2068,269,318,107,65,19325,51,124,17,1836,125,87,45,133,603,138,3057,74]
#converting damages list
def convert_damages_data(damages):
"""Convert damages data from string to float and return converted data as a list."""
conversion = {"M": 1000000, "B": 1000000000}
updated_damages =[]
for damage in damages:
if damage.find('M') != -1:
updated_damages.append(float(damage[0:damage.find('M')])*conversion["M"])
elif damage.find('B') != -1:
updated_damages.append(float(damage[0:damage.find('B')])*conversion["B"])
else:
updated_damages.append(damage)
return updated_damages
# update damages data
updated_damages = convert_damages_data(damages)
# 2
# Create a Table
def create_dictionary(names, months, years, max_sustained_winds,areas_affected, updated_damages, deaths):
hurricanes={}
num_of_hurricanes=len(names)
for i in range(num_of_hurricanes):
hurricanes[names[i]]={"Name": names[i], "Month":months[i], "Year":years[i], "Max Sustained Wind": max_sustained_winds[i],"Areas Affected": areas_affected[i], "Damages": updated_damages[i], "Deaths":deaths[i]}
return hurricanes
# Create and view the hurricanes dictionary
hurricanes = create_dictionary(names, months, years, max_sustained_winds, areas_affected, updated_damages, deaths)
# 3
# Organizing by Year
def create_year_directory(hurricanes):
hurricanes_by_year={}
for cane in hurricanes:
current_year=hurricanes[cane]["Year"]
current_cane=hurricanes[cane]
if current_year not in hurricanes_by_year:
hurricanes_by_year[current_year]=[current_cane]
else:
hurricanes_by_year[current_year].append(current_cane)
return hurricanes_by_year
# create a new dictionary of hurricanes with year and key
hurricanes_by_year=create_year_directory(hurricanes)
print(hurricanes_by_year[1932])
# 4
# Counting Damaged Areas
def count_affected_areas(hurricanes):
affected_areas_count={}
for cane in hurricanes:
for area in hurricanes[cane]["Areas Affected"]:
if area not in affected_areas_count:
affected_areas_count[area]=1
else:
affected_areas_count[area]+=1
return affected_areas_count
# create dictionary of areas to store the number of hurricanes involved in
affected_areas_count=count_affected_areas(hurricanes)
# 5
# Calculating Maximum Hurricane Count
def most_affected_areas(affected_areas_count):
max_area="Central America"
max_area_count=0
for area in affected_areas_count:
if affected_areas_count[area]>max_area_count:
max_area=area
max_area_count=affected_areas_count[area]
return max_area, max_area_count
# find most frequently affected area and the number of hurricanes involved in
max_area, max_area_count=most_affected_areas(affected_areas_count)
print(max_area, max_area_count)
# 6
# Calculating the Deadliest Hurricane
def highest_mortality(hurricanes):
max_mortality_cane = 'Cuba I'
max_mortality = 0
for cane in hurricanes:
if hurricanes[cane]['Deaths'] > max_mortality:
max_mortality_cane = cane
max_mortality = hurricanes[cane]['Deaths']
return max_mortality_cane, max_mortality
# find highest mortality hurricane and the number of deaths
max_mortality_cane, max_mortality=highest_mortality(hurricanes)
print(max_mortality_cane, max_mortality)
# 7
# Rating Hurricanes by Mortality
def catagorize_by_mortality(hurricanes):
mortality_scale={0:0, 1:100, 2:500, 3:1000, 4:10000}
hurricanes_by_mortality={0:[], 1:[] ,2:[], 3:[], 4:[], 5:[]}
for cane in hurricanes:
num_deaths=hurricanes[cane]["Deaths"]
if num_deaths==mortality_scale[0]:
hurricanes_by_mortality[0].append(hurricanes[cane])
elif num_deaths>mortality_scale[0] and num_deaths <=mortality_scale[1]:
hurricanes_by_mortality[1].append(hurricanes[cane])
elif num_deaths>mortality_scale[1] and num_deaths>=mortality_scale[2]:
hurricanes_by_mortality[2].append(hurricanes[cane])
elif num_deaths>mortality_scale[2]and num_deaths<=mortality_scale[3]:
hurricanes_by_mortality[3].append(hurricanes[cane])
elif num_deaths>mortality_scale[3] and num_deaths<=mortality_scale[4]:
hurricanes_by_mortality[4].append(hurricanes[cane])
else:
hurricanes_by_mortality[5].append(hurricanes[cane])
return hurricanes_by_mortality
# categorize hurricanes in new dictionary with mortality severity as key
hurricanes_by_mortality=catagorize_by_mortality(hurricanes)
print(hurricanes_by_mortality[5])
# 8 Calculating Hurricane Maximum Damage
def maximum_damage(hurricanes):
max_damage_cane="Cuba I"
max_damage=0
for cane in hurricanes:
if hurricanes[cane]["Damages"]=="Damages not recorded":
pass
elif hurricanes[cane]["Damages"]>max_damage:
max_damage_cane=cane
max_damage=hurricanes[cane]["Damages"]
return max_damage_cane, max_damage
# find highest damage inducing hurricane and its total cost
max_damage_cane, max_damage=maximum_damage(hurricanes)
print(max_damage_cane, max_damage)
# 9
# Rating Hurricanes by Damage
def hurricanes_by_damage(hurricanes):
damage_scale = {0: 0, 1: 100000000, 2: 1000000000, 3: 10000000000, 4: 50000000000}
canes_by_damage={0:[], 1:[], 2:[], 3:[], 4:[], 5:[]}
for cane in hurricanes:
tot_damage=hurricanes[cane]["Damages"]
if tot_damage=="Damages not recorded":
canes_by_damage[0].append(hurricanes[cane])
elif tot_damage==damage_scale[0]:
canes_by_damage[0].append(hurricanes[cane])
elif tot_damage > damage_scale[0] and tot_damage <= damage_scale[1]:
canes_by_damage[1].append(hurricanes[cane])
elif tot_damage > damage_scale[1] and tot_damage <= damage_scale[2]:
canes_by_damage[2].append(hurricanes[cane])
elif tot_damage > damage_scale[2] and tot_damage <= damage_scale[3]:
canes_by_damage[3].append(hurricanes[cane])
elif tot_damage > damage_scale[3] and tot_damage <= damage_scale[4]:
canes_by_damage[4].append(hurricanes[cane])
else:
canes_by_damage[5].append(hurricanes[cane])
return canes_by_damage
# categorize hurricanes in new dictionary with damage severity as key
canes_by_damage=hurricanes_by_damage(hurricanes)
print(canes_by_damage[5])
|
[
"noreply@github.com"
] |
badinplaid737.noreply@github.com
|
76ca0659bc2a888ba9d30a93427fa3fbf9282b19
|
af34730d1d0129a4381649d9a63b2966ab496fa1
|
/LAB6/zad1.py
|
009f6724957e42cc9188490e562d5db63d5d77e5
|
[] |
no_license
|
panjacob/Artificial-intelligence
|
e84958d2d94f062edf59034e61df711edc027432
|
378b5244a172104cacb59c041891071c4ae55e91
|
refs/heads/master
| 2023-02-21T10:15:56.834349
| 2021-01-12T12:33:34
| 2021-01-12T12:33:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,042
|
py
|
from LAB6.algorithms import hillclimb, hillclimb_random
from LAB6.functions import sphere_f, booth_f, mc_cormick_f
from LAB6.utilis import avg_plot
results_mc_cormick_f_rand = []
results_booth_f_rand = []
results_sphere_f_rand = []
results_mc_cormick_f_hillclimb = []
results_booth_f_hillclimb = []
results_sphere_f_hillclimb = []
for n in range(0, 20):
results_mc_cormick_f_hillclimb.append(
hillclimb(function=mc_cormick_f, domain_x=(-1.5, 4), domain_y=(-3, 4), attempts=100000,
accuracy_float_points_x=5, accuracy_float_points_y=5))
results_booth_f_hillclimb.append(
hillclimb(function=booth_f, domain_x=(-10, 10), domain_y=(-10, 10), attempts=10000,
accuracy_float_points_x=0, accuracy_float_points_y=0))
results_sphere_f_hillclimb.append(
hillclimb(function=sphere_f, domain_x=(-1000, 1000), domain_y=(1, 100), attempts=100000,
accuracy_float_points_x=1, accuracy_float_points_y=0))
results_mc_cormick_f_rand.append(
hillclimb_random(function=mc_cormick_f, domain_x=(-1.5, 4), domain_y=(-3, 4), attempts=100000,
accuracy_float_points_x=5, accuracy_float_points_y=5))
results_booth_f_rand.append(
hillclimb_random(function=booth_f, domain_x=(-10, 10), domain_y=(-10, 10), attempts=10000,
accuracy_float_points_x=0,
accuracy_float_points_y=0))
results_sphere_f_rand.append(
hillclimb_random(function=sphere_f, domain_x=(-1000, 1000), domain_y=(1, 100), attempts=100000,
accuracy_float_points_x=1, accuracy_float_points_y=0))
avg_plot(results_mc_cormick_f_hillclimb, 'results_mc_cormick_f HILLCLIMB')
avg_plot(results_booth_f_hillclimb, 'results_booth_f HILLCLIMB')
avg_plot(results_sphere_f_hillclimb, 'results_sphere_f HILLCLIMB')
avg_plot(results_mc_cormick_f_rand, 'results_mc_cormick_f RAND')
avg_plot(results_booth_f_rand, 'results_booth_f RAND')
avg_plot(results_sphere_f_rand, 'results_sphere_f RAND')
|
[
"kwiatkowskijakub@protonmail.com"
] |
kwiatkowskijakub@protonmail.com
|
bf3f3144ec9f3583ccdd3b4ee75f9f33fb4b2722
|
91e722e72cde9e913803c9d811afb7e971bcafd5
|
/squisha.py
|
4dec736ea5921757748e811ef977cd849bbbca41
|
[] |
no_license
|
gbishop16/PythonTest
|
ba0b80e619a2322417e6192c5377781afdcd9b85
|
9169608d98229367f4c414b818508032784242f1
|
refs/heads/main
| 2023-04-19T12:49:23.395975
| 2021-04-29T21:28:42
| 2021-04-29T21:28:42
| 358,661,381
| 0
| 0
| null | 2021-04-16T16:38:45
| 2021-04-16T16:38:44
| null |
UTF-8
|
Python
| false
| false
| 1,821
|
py
|
import numpy as np
#Here we are going to do two steps
# 1 - Calculate the Output
# 2 - Compare the output to the expected Output
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derrivative(x):
return x * (1 - x)
# Grid of training inputs
training_inputs = np.array([
[1,0,1,1,0],
[0,1,1,0,1],
[1,1,0,1,1],
[0,1,0,0,0],
[1,1,1,0,0],
[1,0,0,0,1
]
])
# .T Transpose the matrix so its a 1 x 4
training_outputs = np.array(
[[1,0,1,1,0,0]]
).T
np.random.seed(2)
# How many weights do we have?
# in1, in2, in3 each need their own weight... so 3!
# Create random weights
synaptic_weights = 2 * np.random.random((5,1)) - 1
#print('Random starting synaptic weights')
#print(synaptic_weights)
for iteration in range(1):
input_layer = training_inputs
# Take each input and multiply it times our weights
unsquished_outputs = np.dot(input_layer, synaptic_weights)
# print('Sum of Weights and Inputs')
# print(unsquished_outputs)
# Squish our result between 0 and 1 by using our normalizing fn
normalized_outputs = sigmoid(unsquished_outputs)
# print('Normalized (Squished) Outputs')
# print(normalized_outputs)
# print('Derrivative of normalized outputs')
# print(sigmoid_derrivative(normalized_outputs))
# 2 - Calc error by checking the training outputs with our calulated ones
error = training_outputs - normalized_outputs
# print('Error')
# print(error)
adjustments = error * sigmoid_derrivative(normalized_outputs)
print('Adjustments')
print(adjustments)
print(input_layer.T)
print(np.dot(input_layer.T, adjustments))
print(synaptic_weights)
synaptic_weights += np.dot(input_layer.T, adjustments)
# print('New Weights')
print('Final synaptic weights')
print(synaptic_weights)
|
[
"gqbishop@gmail.com"
] |
gqbishop@gmail.com
|
4711899bace9f735a03aa47cdd6369e47250c52b
|
3c9905785bf67f39c8c2d4e43911bfca3fac7e7c
|
/ssg_wargame/floppy/floppy_ex.py
|
2026905962306529509cbf589a71f145ee806e4f
|
[] |
no_license
|
kommadot/CTF_STUDY
|
c31a3fd63f45674a41b25a21d2632c849f165d91
|
d7ccb33297f8bccc5b9b47a0958478bc8bfb1998
|
refs/heads/master
| 2022-07-09T17:04:14.286922
| 2022-06-27T07:00:54
| 2022-06-27T07:00:54
| 127,427,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
from pwn import *
s = process('./floppy')
s.recvuntil('>')
s.sendline('1')
s.recv()
s.sendline('1')
s.recvuntil('>')
s.sendline('2')
s.recvuntil('data')
s.send('1234')
s.recvuntil('tion:')
s.send('1234')
s.recvuntil('>')
s.sendline('4')
s.recvuntil('Data')
s.sendline('1')
s.recvuntil('tion:')
s.send('a'*17)
s.recvuntil('>')
s.sendline('3')
s.recvuntil('a'*16)
stackleak = u32(recv(4))
print hex(stackleak)
|
[
"tlagyqls7@naver.com"
] |
tlagyqls7@naver.com
|
d021b744115764d2277dc2d4614b68fc97f495bd
|
bcab6bf51597d4fb8ba7abad8e5e33434cd8aa0a
|
/verts_package/__init__.py
|
cd1f1451ba0456486c767efacec29c163ece17c8
|
[] |
no_license
|
OpenSolutionsFinland/Verts_package
|
847d14230a310f9096a4edbb731fe531d68db5d6
|
583dad43a5e1f43fbc1dbbb73d49608ed1832b62
|
refs/heads/master
| 2016-09-05T09:11:17.272150
| 2013-08-16T04:38:22
| 2013-08-16T04:38:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
import package_report
import package
|
[
"jezco@Jesses-MacBook-Pro.local"
] |
jezco@Jesses-MacBook-Pro.local
|
23bf9810cb81c88e936da29a45a53a82b77e55da
|
10decc1620bf5607e9c57296114026ff4bba4551
|
/leadmanager/leads/migrations/0001_initial.py
|
d1d321c13518b22c04f69b6f2d15d3f4e79e3eab
|
[
"MIT"
] |
permissive
|
GeorgHs/PreSteps_BuchungsTrainerDjangoAPI
|
785797d94fa14489a7375c7e89999f39f52f90dd
|
de1609923e235cb2463463da220157c19e090c97
|
refs/heads/main
| 2023-03-30T15:54:10.083151
| 2021-04-08T15:44:53
| 2021-04-08T15:44:53
| 336,791,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
# Generated by Django 3.1.6 on 2021-02-07 13:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100)),
('email', models.EmailField(max_length=100, unique=True)),
('message', models.CharField(blank=True, max_length=500)),
('create_at', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"Georg.Hertzsch@msg-gillardon.de"
] |
Georg.Hertzsch@msg-gillardon.de
|
147a37571e91159aee5ad94006330608a5b884fa
|
4091f79b6537d091241f66ce02c79f701e07967e
|
/HW_PT2/RELAYplate.py
|
8f722a49d2061c3cee7b167b529e288cd3b7f0a1
|
[] |
no_license
|
habibiam/opto_luna
|
b0b3c29ce1fdb1e730d4c40a7dd91fe8a13d492b
|
1ee2cf09e0e03a046a43c69d35cb09e82f05c457
|
refs/heads/master
| 2020-04-15T12:47:47.547115
| 2017-02-03T22:50:11
| 2017-02-03T22:50:11
| 65,699,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,209
|
py
|
import spidev
import time
import string
import site
import sys
from numbers import Number
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
#Initialize
if (sys.version_info < (2,7,0)):
sys.stderr.write("You need at least python 2.7.0 to use this library")
exit(1)
GPIO.setmode(GPIO.BCM)
RELAYbaseADDR=24
ppFRAME = 25
ppINT = 22
GPIO.setup(ppFRAME,GPIO.OUT)
GPIO.output(ppFRAME,False) #Initialize FRAME signal
time.sleep(.001) #let Pi-Plate reset SPI engine if necessary
GPIO.setup(ppINT, GPIO.IN, pull_up_down=GPIO.PUD_UP)
spi = spidev.SpiDev()
spi.open(0,1)
localPath=site.getsitepackages()[0]
helpPath=localPath+'/piplates/RELAYhelp.txt'
#helpPath='RELAYhelp.txt' #for development only
RPversion=1.1
# Version 1.0 - initial release
# Version 1.1 - adjusted timing on command functions to compensate for RPi SPI changes
RMAX = 2000
MAXADDR=8
relaysPresent = range(8)
#==============================================================================#
# HELP Functions #
#==============================================================================#
def Help():
help()
def HELP():
help()
def help():
valid=True
try:
f=open(helpPath,'r')
while(valid):
Count=0
while (Count<20):
s=f.readline()
if (len(s)!=0):
print s[:len(s)-1]
Count = Count + 1
if (Count==20):
Input=raw_input('press \"Enter\" for more...')
else:
Count=100
valid=False
f.close()
except IOError:
print ("Can't find help file.")
def getPMrev():
return RPversion
#==============================================================================#
# RELAY Functions #
#==============================================================================#
def relayON(addr,relay):
VerifyADDR(addr)
VerifyRELAY(relay)
ppCMDr(addr,0x10,relay,0,0)
def relayOFF(addr,relay):
VerifyADDR(addr)
VerifyRELAY(relay)
ppCMDr(addr,0x11,relay,0,0)
def relayTOGGLE(addr,relay):
VerifyADDR(addr)
VerifyRELAY(relay)
ppCMDr(addr,0x12,relay,0,0)
def relayALL(addr,relays):
VerifyADDR(addr)
assert ((relays>=0) and (relays<=127)),"Argument out of range. Must be between 0 and 127"
ppCMDr(addr,0x13,relays,0,0)
def relaySTATE(addr):
VerifyADDR(addr)
resp=ppCMDr(addr,0x14,0,0,1)
return resp[0]
#==============================================================================#
# LED Functions #
#==============================================================================#
def setLED(addr):
VerifyADDR(addr)
resp=ppCMDr(addr,0x60,0,0,0)
def clrLED(addr):
VerifyADDR(addr)
resp=ppCMDr(addr,0x61,0,0,0)
def toggleLED(addr):
VerifyADDR(addr)
resp=ppCMDr(addr,0x62,0,0,0)
#==============================================================================#
# SYSTEM Functions #
#==============================================================================#
def getID(addr):
global RELAYbaseADDR
VerifyADDR(addr)
addr=addr+RELAYbaseADDR
id=""
arg = range(4)
resp = []
arg[0]=addr;
arg[1]=0x1;
arg[2]=0;
arg[3]=0;
ppFRAME = 25
GPIO.output(ppFRAME,True)
null = spi.writebytes(arg)
count=0
# time.sleep(.01)
while (count<20):
dummy=spi.xfer([00],500000,20)
if (dummy[0] != 0):
num = dummy[0]
id = id + chr(num)
count = count + 1
else:
count=20
GPIO.output(ppFRAME,False)
return id
def getHWrev(addr):
global RELAYbaseADDR
VerifyADDR(addr)
resp=ppCMDr(addr,0x02,0,0,1)
rev = resp[0]
whole=float(rev>>4)
point = float(rev&0x0F)
return whole+point/10.0
def getFWrev(addr):
global RELAYbaseADDR
VerifyADDR(addr)
resp=ppCMDr(addr,0x03,0,0,1)
rev = resp[0]
whole=float(rev>>4)
point = float(rev&0x0F)
return whole+point/10.0
def getVersion():
return RPversion
#==============================================================================#
# LOW Level Functions #
#==============================================================================#
def VerifyRELAY(relay):
assert ((relay>=1) and (relay<=7)),"Relay number out of range. Must be between 1 and 7"
def VerifyADDR(addr):
assert ((addr>=0) and (addr<MAXADDR)),"RELAYplate address out of range"
addr_str=str(addr)
assert (relaysPresent[addr]==1),"No RELAYplate found at address "+addr_str
def ppCMDr(addr,cmd,param1,param2,bytes2return):
global RELAYbaseADDR
arg = range(4)
resp = []
arg[0]=addr+RELAYbaseADDR;
arg[1]=cmd;
arg[2]=param1;
arg[3]=param2;
GPIO.output(ppFRAME,True)
null=spi.xfer(arg,300000,60)
#null = spi.writebytes(arg)
if bytes2return>0:
time.sleep(.0001)
for i in range(0,bytes2return):
dummy=spi.xfer([00],500000,20)
resp.append(dummy[0])
time.sleep(.001)
GPIO.output(ppFRAME,False)
time.sleep(.001)
return resp
def getADDR(addr):
global RELAYbaseADDR
resp=ppCMDr(addr,0x00,0,0,1)
return resp[0]-RELAYbaseADDR
def quietPoll():
global relaysPresent
ppFoundCount=0
for i in range (0,8):
relaysPresent[i]=0
rtn = getADDR(i)
if (rtn==i):
relaysPresent[i]=1
ppFoundCount += 1
#RESET(i)
def RESET(addr):
VerifyADDR(addr)
resp=ppCMDr(addr,0x0F,0,0,0)
time.sleep(.10)
quietPoll()
|
[
"noreply@github.com"
] |
habibiam.noreply@github.com
|
dd953dd9a5015782428fe48bd1d2a3e97abca454
|
23177554c77dbaffd51fd67d2223503980395d19
|
/Hangman.py
|
bdbb93746f1406f8684256a18b4ce10288d4e84f
|
[] |
no_license
|
Aakash1311/Python
|
b9bb62aad73590365dc38572b0bc53880ee65163
|
869f2ea7c767a0c2d801b6223c60bc5d533d88c8
|
refs/heads/master
| 2023-04-30T02:54:21.862781
| 2021-05-20T12:19:30
| 2021-05-20T12:19:30
| 363,636,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
import random
def hangman():
word_list = ["python", "java", "computer", "hacker", "painter"]
random_number = random.randint(0, 4)
word = word_list[random_number]
wrong_guesses = 0
stages = ["", "________ ", "| | ", "| 0 ", "| /|\ ", "| / \ ", "|"]
remaining_letters = list(word)
letter_board = ["__"] * len(word)
win = False
print('Welcome to Hangman')
while wrong_guesses < len(stages) - 1:
print('\n')
guess = input("Guess a letter")
if guess in remaining_letters:
character_index = remaining_letters.index(guess)
letter_board[character_index] = guess
remaining_letters[character_index] = '$'
else:
wrong_guesses += 1
print((' '.join(letter_board)))
print('\n'.join(stages[0: wrong_guesses + 1]))
if '__' not in letter_board:
print('You win! The word was:')
print(' '.join(letter_board))
win = True
break
if not win:
print('\n'.join(stages[0: wrong_guesses]))
print('You lose! The words was {}'.format(word))
hangman()
|
[
"Singhaakash084@gmail.com"
] |
Singhaakash084@gmail.com
|
5a520ace4bc20effd2e6510434cbd0883db604d1
|
1985e1265986b5ba04fef111f1f4dd2f29525f70
|
/postgresql_example/queries.py
|
3c9fa4f7b92e03e8642ba821ec83b53e7c5fdc81
|
[
"MIT"
] |
permissive
|
AshleyBrooks213/Unit3Sprint2
|
a21eca835c9ccf6871d5158b3df10058356d67c5
|
48e35f1a33c5ba50caa5f8f94df244e7f1f4e6b4
|
refs/heads/main
| 2023-05-10T21:47:41.164837
| 2021-05-25T17:27:16
| 2021-05-25T17:27:16
| 370,768,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
"""Holds my PosGreSQL example queries"""
# SQL Create Table Query
CREATE_TABLE_STATEMENT = """
CREATE TABLE test_table (
id SERIAL PRIMARY KEY,
name VARCHAR(40) NOT NULL,
data JSONB
);
"""
#SQL Insert Values Query
INSERT_STATEMENT = """
INSERT INTO test_table(name, data) VALUES
(
'A Row',
null
),
(
'Another Row, with Json',
'{"a": 1, "b": ["leanves, "more leaves", "even more leaves"]}'::JSONB
);
"""
|
[
"noreply@github.com"
] |
AshleyBrooks213.noreply@github.com
|
0cb2d43aa43b94d6fe24a210644786fcedeb5ef9
|
7d59a4dec57d09b7f899d6d0db2de4a4decfaa55
|
/2020-11/2020-11-13-TabuSearch/tabu_tsp.py
|
b521881a2d76cc79cf82da8a546f978677b5af92
|
[
"MIT"
] |
permissive
|
MinWu-DoctorFive/Code
|
df41bcd248179f4378954d51d3076d79d5dfaca1
|
fc3790b9b30b8acd6c46ab4b4f0ce9aa7892e4aa
|
refs/heads/main
| 2023-01-31T21:11:03.247698
| 2020-12-16T03:13:31
| 2020-12-16T03:13:31
| 343,963,649
| 1
| 1
|
MIT
| 2021-03-03T01:27:32
| 2021-03-03T01:27:31
| null |
UTF-8
|
Python
| false
| false
| 5,613
|
py
|
from itertools import combinations
import os,sys,copy
import numpy as np
import time
import matplotlib.pyplot as plt
from GetData import *
from tqdm import tqdm
class Tabu():
def __init__(self,disMatrix,max_iters=50,maxTabuSize=10):
"""parameters definition"""
self.disMatrix = disMatrix
self.maxTabuSize = maxTabuSize
self.max_iters = max_iters
self.tabu_list=[]
def get_route_distance(self,route):
'''
Description: function to calculate total distance of a route. evaluate function.
parameters: route : list
return : total distance : folat
'''
routes = [0] + route + [0] # add the start and end point
total_distance = 0
for i,n in enumerate(routes):
if i != 0 :
total_distance = total_distance + self.disMatrix[last_pos][n]
last_pos = n
return total_distance
def exchange(self,s1,s2,arr):
"""
function to Swap positions of two elements in an arr
Args: int,int,list
s1 : target 1
s2 : target 2
arr : target array
Ouput: list
current_list : target array
"""
current_list = copy.deepcopy(arr)
index1 , index2 = current_list.index(s1) , current_list.index(s2) # get index
current_list[index1], current_list[index2]= arr[index2] , arr[index1]
return current_list
def generate_initial_solution(self,num=10,mode='greedy'):
"""
function to get the initial solution,there two different way to generate route_init.
Args:
num : int
the number of points
mode : string
"greedy" : advance step by choosing optimal one
"random" : randomly generate a series number
Ouput: list
s_init : initial solution route_init
"""
if mode == 'greedy':
route_init=[0]
for i in range(num):
best_distance = 10000000
for j in range(num+1):
if self.disMatrix[i][j] < best_distance and j not in route_init:
best_distance = self.disMatrix[i][j]
best_candidate = j
route_init.append(best_candidate)
route_init.remove(0)
if mode == 'random':
route_init = np.arange(1,num+1) #init solution from 1 to num
np.random.shuffle(route_init) #shuffle the list randomly
return list(route_init)
def tabu_search(self,s_init):
"""tabu search"""
s_best = s_init
bestCandidate = copy.deepcopy(s_best)
routes , temp_tabu = [] , [] # init
routes.append(s_best)
while(self.max_iters):
self.max_iters -= 1 # Number of iterations
neighbors = copy.deepcopy(s_best)
for s in combinations(neighbors, 2):
sCandidate = self.exchange(s[0],s[1],neighbors) # exchange number to generate candidates
if s not in self.tabu_list and self.get_route_distance(sCandidate) < self.get_route_distance(bestCandidate):
bestCandidate = sCandidate
temp_tabu = s
if self.get_route_distance(bestCandidate) < self.get_route_distance(s_best): # record the best solution
s_best = bestCandidate
if temp_tabu not in self.tabu_list:
self.tabu_list.append(temp_tabu)
if len(self.tabu_list) > self.maxTabuSize :
self.tabu_list.pop(0)
routes.append(bestCandidate)
return s_best, routes
if __name__ == "__main__":
np.random.seed(2020)
customerNum = 10 # 定义多少个点
data=GetData()
tsp_data = data.generate_locations(num_points=customerNum+1,map_size=100) #在100*100的图中,随机生成位置,customerNum+1 多一个depot点
dismatrix = data.get_euclidean_distance_matrix(tsp_data.locations)
# data.plot_nodes(tsp_data.locations)
""" Tabu :
disMatrix : the distance matrix from 0 to X , 0 represernt starting and stopping point。
for example: disMatrix = [[0,3,4,...
1,0,5,...
3,5,0,...]]
that means the distance from 0 to 0 is 0, from 0 to 1 is 3,... from 1 to 3 is 5....
max_iters : maximum iterations
maxTabuSize : maximum iterations
"""
tsp = Tabu(disMatrix=dismatrix ,max_iters=20,maxTabuSize=10) # 设置参数
# two different way to generate initial solution
# num : the number of points
s_init = tsp.generate_initial_solution(num=customerNum,mode='greedy') # mode = "greedy" or "random"
print('init route : ' , s_init)
print('init distance : ' , tsp.get_route_distance(s_init))
start = time.time()
best_route , routes = tsp.tabu_search(s_init) # tabu search
end = time.time()
print('best route : ' , best_route)
print('best best_distance : ' , tsp.get_route_distance(best_route))
print('the time cost : ',end - start )
# plot the result changes with iterations
results=[]
for i in routes:
results.append(tsp.get_route_distance(i))
plt.plot(np.arange(len(results)) , results)
plt.show()
# plot the route
data.plot_route(tsp_data.locations,[0]+best_route+[0])
|
[
"noreply@github.com"
] |
MinWu-DoctorFive.noreply@github.com
|
c345ca9a14392bdf35f7190437bb582816f536b5
|
60dae997c1c460a5c058940528669865fa44bb6d
|
/venv/lib/python3.5/types.py
|
c9d27e80838075844f2e347ead9a0fb8adc8c1c9
|
[] |
no_license
|
abhay97ps/ReversiBot
|
5ba5e79e23659313de5004708642fdda508c0769
|
efc31516006a4819276231088f5a96acb3f4a57f
|
refs/heads/master
| 2020-03-07T04:43:37.003946
| 2018-05-09T03:24:48
| 2018-05-09T03:24:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
F:/usr/lib/python3.5/types.py
|
[
"aakashv000@gmail.com"
] |
aakashv000@gmail.com
|
f9cb8c6add31241c4678d8c603fe6ab76c594042
|
1f780ec1f753be1628f7fc5bac23a9d6f594a8be
|
/asyncbb/ethereum/test/test_faucet.py
|
6854d62bab53a288815d92490abf5adefd15fb79
|
[] |
no_license
|
tristan/asyncbb-eth
|
93833dc5b4d078c80eb3d9576d38f3fb9a66ce08
|
8371398076322a24e970fe0650b43d364c28d2cb
|
refs/heads/master
| 2020-06-10T22:36:06.634248
| 2017-02-19T10:34:25
| 2017-02-19T10:34:25
| 75,856,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,722
|
py
|
import asyncio
import os
import subprocess
import unittest
from ethereum.abi import ContractTranslator
from ethereum.utils import sha3
from tornado.escape import json_decode
from asyncbb.handlers import BaseHandler
from asyncbb.ethereum import EthereumMixin
from asyncbb.test.base import AsyncHandlerTest
from tornado.testing import gen_test
from asyncbb.ethereum.client import JsonRPCClient
from testing.common.database import get_path_of
from .parity import requires_parity
from .faucet import FaucetMixin, data_decoder
from .geth import requires_geth
class Handler(EthereumMixin, BaseHandler):
async def get(self, addr):
balance = await self.eth.eth_getBalance(addr)
self.write(str(balance))
class FaucetTest(FaucetMixin, AsyncHandlerTest):
def get_urls(self):
return [(r'^/(0x.+)$', Handler)]
@gen_test(timeout=10)
@requires_parity
async def test_parity_faucet_connection(self):
addr = '0x39bf9e501e61440b4b268d7b2e9aa2458dd201bb'
val = 761751855997712
await self.faucet(addr, val)
resp = await self.fetch('/{}'.format(addr))
self.assertEqual(resp.body.decode('utf-8'), str(val))
@gen_test(timeout=10)
@requires_geth
async def test_geth_faucet(self):
addr = '0x39bf9e501e61440b4b268d7b2e9aa2458dd201bb'
val = 761751855997712
await self.faucet(addr, val)
resp = await self.fetch('/{}'.format(addr))
self.assertEqual(resp.body.decode('utf-8'), str(val))
class ContractTest(FaucetMixin, AsyncHandlerTest):
def get_urls(self):
return []
@unittest.skipIf(get_path_of("solc") is None, "couldn't find solc compiler, skipping test")
@gen_test(timeout=60)
@requires_parity(pass_parity='node')
async def test_deploy_contract(self, *, node):
client = JsonRPCClient(node.dsn()['url'])
sourcecode = b"contract greeter{string greeting;function greeter(string _greeting) public{greeting=_greeting;}function greet() constant returns (string){return greeting;}}"
#source_fn = os.path.join(node.get_data_directory(), 'greeting.sol')
#with open(source_fn, 'wb') as wf:
# wf.write(sourcecode)
source_fn = '<stdin>'
contract_name = 'greeter'
constructor_args = [b'hello world!']
args = ['solc', '--combined-json', 'bin,abi', '--add-std'] # , source_fn]
#output = subprocess.check_output(args, stderr=subprocess.PIPE)
process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, stderrdata = process.communicate(input=sourcecode)
output = json_decode(output)
contract = output['contracts']['{}:{}'.format(source_fn, contract_name)]
bytecode = data_decoder(contract['bin'])
contract_interface = json_decode(contract['abi'])
translator = ContractTranslator(contract_interface)
constructor_call = translator.encode_constructor_arguments(constructor_args)
bytecode += constructor_call
tx_hash, contract_address = await self.deploy_contract(bytecode)
tx_receipt = await client.eth_getTransactionReceipt(tx_hash)
self.assertIsNotNone(tx_receipt)
code = await client.eth_getCode(contract_address)
self.assertIsNotNone(code)
self.assertNotEqual(data_decoder(code), b'')
# call the contract and check the result
res = await client.eth_call(from_address='0x39bf9e501e61440b4b268d7b2e9aa2458dd201bb', to_address=contract_address, data=sha3('greet()'))
result = translator.decode_function_result('greet', data_decoder(res))
self.assertEqual(result[0], constructor_args[0])
|
[
"tristan@bakkenbaeck.no"
] |
tristan@bakkenbaeck.no
|
2a958a04c29b32972ba720e0bf136c454d89520f
|
f6794da6d91204047c31789d28872381b8db73bf
|
/structs_and_algs/queue.py
|
1e2a595a958e0595111fe199c3e29bf4c0fea3d5
|
[] |
no_license
|
mericar/pyPlay
|
357b16eb6d6103bbcad0d8dc30e2a84d08f80ad3
|
6857c11d0ca7ebdaabeb0989a9492b9ec9a9d438
|
refs/heads/master
| 2021-06-18T05:11:22.315657
| 2017-06-26T07:51:17
| 2017-06-26T07:51:17
| 46,304,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
class Queue:
def __init__(self, front=None):
self.q = [front]
def enqueue(self, new_entity):
self.q.append(new_entity)
def first(self):
return self.q[0]
def dequeue(self):
return self.q.pop(0)
|
[
"mirkocarich@gmail.com"
] |
mirkocarich@gmail.com
|
98b06251948d5f524eae90478a5c1dd10a1d57cd
|
7f41ef1cfa30e02e7887bcf16e6587c0052ed4ba
|
/flaskapi/pyapi/logging/apiicefire.py
|
7d714513d903562685ff4cc7bdae48c7c271ed10
|
[] |
no_license
|
bmyers624/pyapi
|
353b4ddf50eb0290d8a161a872520020e50b0cd1
|
fcd97e2f247c314f7f76e0fb68cf410f9c706f7a
|
refs/heads/master
| 2022-11-06T23:59:47.246268
| 2020-07-10T22:05:54
| 2020-07-10T22:05:54
| 276,757,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
#!/usr/bin/python3
import logging
import requests
import argparse
import pprint
BOOK = "https://www.anapioficeandfirie.com/api/books"
def main():
logging.basicConfig(filename='icefire.log', format='%(levelname)s:%(asctime)s %(message)s',\
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
try:
logging.info('Scripting started')
icefire = requests.get(BOOK + "/" + args.bookno)
pprint.pprint(icefire.json())
logging.info("API Response Code -" + str(icefire))
except Exception as err:
logging.critical(err)
finally:
logging.info("Program ended")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--bookno', help='Enter the book number (integer) to look up.')
args = parser.parse_args()
main()
|
[
"b.myers624@gmail.com"
] |
b.myers624@gmail.com
|
8ffd200a40777796426fd2536c915d0fa9d9ddf9
|
6ccdab14aca949c571ef5f63dea001cb9565e23e
|
/app/api/monitoring.py
|
e21a7d646f8880c74a66c03e740f7cdc40bcefa5
|
[] |
no_license
|
unmade/hangman
|
bb87cb3ca3952791f61715e15db3805c6cd4f403
|
28eef30411e06446cd5252143c4d1b0e6a418483
|
refs/heads/master
| 2021-07-10T09:56:45.988497
| 2020-02-09T14:51:56
| 2020-02-09T14:51:56
| 239,015,111
| 0
| 0
| null | 2021-03-26T17:39:01
| 2020-02-07T20:17:40
|
Python
|
UTF-8
|
Python
| false
| false
| 332
|
py
|
from fastapi import APIRouter
from app import db
router = APIRouter()
@router.get("/ping")
async def ping():
"""Health check for service"""
return {"status": "OK"}
@router.get("/ping/db")
def ping_db():
try:
db.ping_db()
except Exception:
return {"status": "ERROR"}
return {"status": "OK"}
|
[
"lesha.maslakov@gmail.com"
] |
lesha.maslakov@gmail.com
|
f17fd033520cfda90e7a79bc7778f3f55c05d53f
|
ffe9783bffe016db125b3bc0416776bf057d999d
|
/OML/logs.py
|
5cf201b237f5195d705db0f04ff7c0d4d9a9e7a2
|
[] |
no_license
|
hrmello/OperationalizingMachineLearning
|
836a221353b231796b28c9b0aa17852467fe235c
|
9fec131a7435ae5f97edc06ab2e6fd9fc2615f2f
|
refs/heads/master
| 2022-12-20T05:28:58.329562
| 2020-10-07T18:45:52
| 2020-10-07T18:45:52
| 300,402,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
from azureml.core import Workspace
from azureml.core.webservice import Webservice
# Requires the config to be downloaded first to the current working directory
ws = Workspace.from_config()
# Set with the deployment name
name = "voting-deploy"
# load existing web service
service = Webservice(name=name, workspace=ws)
logs = service.get_logs()
service.update(enable_app_insights=True)
for line in logs.split('\n'):
print(line)
|
[
"hrmello@outlook.com"
] |
hrmello@outlook.com
|
b564186a86551fe1998854e70937d0ba90f29676
|
e7397ed7c8a79b1df463205ccf8358a183f51d68
|
/venv/Lib/site-packages/pdfdocument/document.py
|
247abee33a04553e4c81c232e08d028a645cb3c2
|
[] |
no_license
|
yzx6151211/test
|
7515a1e15fa16d1ba35a6af3afcd331e579427c0
|
59ffb77c240dc7e3f18d45e0d0863b9080c9fe40
|
refs/heads/master
| 2023-03-31T10:57:11.523739
| 2021-04-08T02:00:12
| 2021-04-08T02:00:12
| 355,732,822
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,264
|
py
|
# coding=utf-8
import copy
import sys
import unicodedata
from functools import reduce
from reportlab.lib import colors
from reportlab.lib.enums import TA_RIGHT
from reportlab.lib.fonts import addMapping
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import cm, mm
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import (
BaseDocTemplate,
CondPageBreak,
Frame,
KeepTogether,
NextPageTemplate,
PageBreak,
PageTemplate,
Paragraph as _Paragraph,
Spacer,
Table,
)
from reportlab.platypus.flowables import HRFlowable
PY2 = sys.version_info[0] < 3
if PY2:
string_type = unicode # noqa
else:
string_type = str
def register_fonts_from_paths(
regular, italic=None, bold=None, bolditalic=None, font_name="Reporting"
):
"""
Pass paths to TTF files which should be used for the PDFDocument
"""
pdfmetrics.registerFont(TTFont("%s" % font_name, regular))
pdfmetrics.registerFont(TTFont("%s-Italic" % font_name, italic or regular))
pdfmetrics.registerFont(TTFont("%s-Bold" % font_name, bold or regular))
pdfmetrics.registerFont(
TTFont("%s-BoldItalic" % font_name, bolditalic or bold or regular)
)
addMapping("%s" % font_name, 0, 0, "%s" % font_name)
addMapping("%s" % font_name, 0, 1, "%s-Italic" % font_name)
addMapping("%s" % font_name, 1, 0, "%s-Bold" % font_name)
addMapping("%s" % font_name, 1, 1, "%s-BoldItalic" % font_name)
class Empty(object):
pass
def sanitize(text):
REPLACE_MAP = [
(u"&", "&"),
(u"<", "<"),
(u">", ">"),
(u"ç", "ç"),
(u"Ç", "Ç"),
(u"\n", "<br />"),
(u"\r", ""),
]
for p, q in REPLACE_MAP:
text = text.replace(p, q)
return text
def normalize(text):
"""
Some layers of reportlab, PDF or font handling or whatever cannot handle
german umlauts in decomposed form correctly. Normalize everything to
NFKC.
"""
if not isinstance(text, string_type):
text = string_type(text)
return unicodedata.normalize("NFKC", text)
def MarkupParagraph(txt, *args, **kwargs):
if not txt:
return _Paragraph(u"", *args, **kwargs)
return _Paragraph(normalize(txt), *args, **kwargs)
def Paragraph(txt, *args, **kwargs):
if not txt:
return _Paragraph(u"", *args, **kwargs)
return _Paragraph(sanitize(normalize(txt)), *args, **kwargs)
class BottomTable(Table):
"""
This table will automatically be moved to the bottom of the page using the
BottomSpacer right before it.
"""
pass
class BottomSpacer(Spacer):
def wrap(self, availWidth, availHeight):
my_height = availHeight - self._doc.bottomTableHeight
if my_height <= 0:
return (self.width, availHeight)
else:
return (self.width, my_height)
class RestartPageBreak(PageBreak):
"""
Insert a page break and restart the page numbering.
"""
pass
class ReportingDocTemplate(BaseDocTemplate):
def __init__(self, *args, **kwargs):
BaseDocTemplate.__init__(self, *args, **kwargs)
self.bottomTableHeight = 0
self.bottomTableIsLast = False
self.numPages = 0
self._lastNumPages = 0
self.setProgressCallBack(self._onProgress_cb)
# For batch reports with several PDFs concatenated
self.restartDoc = False
self.restartDocIndex = 0
self.restartDocPageNumbers = []
def afterFlowable(self, flowable):
self.numPages = max(self.canv.getPageNumber(), self.numPages)
self.bottomTableIsLast = False
if isinstance(flowable, BottomTable):
self.bottomTableHeight = reduce(lambda p, q: p + q, flowable._rowHeights, 0)
self.bottomTableIsLast = True
elif isinstance(flowable, RestartPageBreak):
self.restartDoc = True
self.restartDocIndex += 1
self.restartDocPageNumbers.append(self.page)
# here the real hackery starts ... thanks Ralph
def _allSatisfied(self):
""" Called by multi-build - are all cross-references resolved? """
if self._lastNumPages < self.numPages:
return 0
return BaseDocTemplate._allSatisfied(self)
def _onProgress_cb(self, what, arg):
if what == "STARTED":
self._lastNumPages = self.numPages
self.restartDocIndex = 0
# self.restartDocPageNumbers = []
def page_index(self):
"""
Return the current page index as a tuple (current_page, total_pages)
This is the ugliest thing I've done in the last two years.
For this I'll burn in programmer hell.
At least it is contained here.
(Determining the total number of pages in reportlab is a mess
anyway...)
"""
current_page = self.page
total_pages = self.numPages
if self.restartDoc:
if self.restartDocIndex:
current_page = (
current_page
- self.restartDocPageNumbers[self.restartDocIndex - 1]
+ 1
)
if len(self.restartDocPageNumbers) > self.restartDocIndex:
total_pages = (
self.restartDocPageNumbers[self.restartDocIndex]
- self.restartDocPageNumbers[self.restartDocIndex - 1]
+ 1
)
else:
total_pages = self.restartDocPageNumbers[0]
if self.bottomTableHeight:
total_pages -= 1
if self.bottomTableIsLast and current_page == 1:
total_pages = max(1, total_pages)
# Ensure total pages is always at least 1
total_pages = max(1, total_pages)
return (current_page, total_pages)
def page_index_string(self):
"""
Return page index string for the footer.
"""
current_page, total_pages = self.page_index()
return self.PDFDocument.page_index_string(current_page, total_pages)
def dummy_stationery(c, doc):
pass
class PDFDocument(object):
show_boundaries = False
_watermark = None
def __init__(self, *args, **kwargs):
self.doc = ReportingDocTemplate(*args, **kwargs)
self.doc.PDFDocument = self
self.story = []
self.font_name = kwargs.get("font_name", "Helvetica")
self.font_size = kwargs.get("font_size", 9)
def page_index_string(self, current_page, total_pages):
return "Page %(current_page)d of %(total_pages)d" % {
"current_page": current_page,
"total_pages": total_pages,
}
def generate_style(self, font_name=None, font_size=None):
self.style = Empty()
self.style.fontName = font_name or self.font_name
self.style.fontSize = font_size or self.font_size
_styles = getSampleStyleSheet()
self.style.normal = _styles["Normal"]
self.style.normal.fontName = "%s" % self.style.fontName
self.style.normal.fontSize = self.style.fontSize
self.style.normal.firstLineIndent = 0
# normal.textColor = '#0e2b58'
self.style.heading1 = copy.deepcopy(self.style.normal)
self.style.heading1.fontName = "%s" % self.style.fontName
self.style.heading1.fontSize = 1.5 * self.style.fontSize
self.style.heading1.leading = 2 * self.style.fontSize
# heading1.leading = 10*mm
self.style.heading2 = copy.deepcopy(self.style.normal)
self.style.heading2.fontName = "%s-Bold" % self.style.fontName
self.style.heading2.fontSize = 1.25 * self.style.fontSize
self.style.heading2.leading = 1.75 * self.style.fontSize
# heading2.leading = 5*mm
self.style.heading3 = copy.deepcopy(self.style.normal)
self.style.heading3.fontName = "%s-Bold" % self.style.fontName
self.style.heading3.fontSize = 1.1 * self.style.fontSize
self.style.heading3.leading = 1.5 * self.style.fontSize
self.style.heading3.textColor = "#666666"
# heading3.leading = 5*mm
self.style.small = copy.deepcopy(self.style.normal)
self.style.small.fontSize = self.style.fontSize - 0.9
self.style.smaller = copy.deepcopy(self.style.normal)
self.style.smaller.fontSize = self.style.fontSize * 0.75
self.style.bold = copy.deepcopy(self.style.normal)
self.style.bold.fontName = "%s-Bold" % self.style.fontName
self.style.boldr = copy.deepcopy(self.style.bold)
self.style.boldr.alignment = TA_RIGHT
self.style.right = copy.deepcopy(self.style.normal)
self.style.right.alignment = TA_RIGHT
self.style.indented = copy.deepcopy(self.style.normal)
self.style.indented.leftIndent = 0.5 * cm
self.style.tablenotes = copy.deepcopy(self.style.indented)
self.style.tablenotes.fontName = "%s-Italic" % self.style.fontName
self.style.paragraph = copy.deepcopy(self.style.normal)
self.style.paragraph.spaceBefore = 1
self.style.paragraph.spaceAfter = 1
self.style.bullet = copy.deepcopy(self.style.normal)
self.style.bullet.bulletFontName = "Symbol"
self.style.bullet.bulletFontSize = 7
self.style.bullet.bulletIndent = 6
self.style.bullet.firstLineIndent = 0
self.style.bullet.leftIndent = 15
self.style.numberbullet = copy.deepcopy(self.style.normal)
self.style.numberbullet.bulletFontName = self.style.paragraph.fontName
self.style.numberbullet.bulletFontSize = self.style.paragraph.fontSize
self.style.numberbullet.bulletIndent = 0
self.style.numberbullet.firstLineIndent = 0
self.style.numberbullet.leftIndent = 15
# alignment = TA_RIGHT
# leftIndent = 0.4*cm
# spaceBefore = 0
# spaceAfter = 0
self.style.tableBase = (
("FONT", (0, 0), (-1, -1), "%s" % self.style.fontName, self.style.fontSize),
("TOPPADDING", (0, 0), (-1, -1), 0),
("BOTTOMPADDING", (0, 0), (-1, -1), 1),
("LEFTPADDING", (0, 0), (-1, -1), 0),
("RIGHTPADDING", (0, 0), (-1, -1), 0),
("FIRSTLINEINDENT", (0, 0), (-1, -1), 0),
("VALIGN", (0, 0), (-1, -1), "TOP"),
)
self.style.table = self.style.tableBase + (
("ALIGN", (1, 0), (-1, -1), "RIGHT"),
)
self.style.tableLLR = self.style.tableBase + (
("ALIGN", (2, 0), (-1, -1), "RIGHT"),
("VALIGN", (0, 0), (-1, 0), "BOTTOM"),
)
self.style.tableHead = self.style.tableBase + (
(
"FONT",
(0, 0),
(-1, 0),
"%s-Bold" % self.style.fontName,
self.style.fontSize,
),
("ALIGN", (1, 0), (-1, -1), "RIGHT"),
("TOPPADDING", (0, 0), (-1, -1), 1),
("BOTTOMPADDING", (0, 0), (-1, -1), 2),
("LINEABOVE", (0, 0), (-1, 0), 0.2, colors.black),
("LINEBELOW", (0, 0), (-1, 0), 0.2, colors.black),
)
self.style.tableOptional = self.style.tableBase + (
(
"FONT",
(0, 0),
(-1, 0),
"%s-Italic" % self.style.fontName,
self.style.fontSize,
),
("ALIGN", (1, 0), (-1, -1), "RIGHT"),
("BOTTOMPADDING", (0, 0), (-1, -1), 5),
("RIGHTPADDING", (1, 0), (-1, -1), 2 * cm),
)
def init_templates(self, page_fn, page_fn_later=None):
self.doc.addPageTemplates(
[
PageTemplate(id="First", frames=[self.frame], onPage=page_fn),
PageTemplate(
id="Later", frames=[self.frame], onPage=page_fn_later or page_fn
),
]
)
self.story.append(NextPageTemplate("Later"))
def init_report(self, page_fn=dummy_stationery, page_fn_later=None):
frame_kwargs = {
"showBoundary": self.show_boundaries,
"leftPadding": 0,
"rightPadding": 0,
"topPadding": 0,
"bottomPadding": 0,
}
full_frame = Frame(2.6 * cm, 2 * cm, 16.4 * cm, 25 * cm, **frame_kwargs)
self.doc.addPageTemplates(
[
PageTemplate(id="First", frames=[full_frame], onPage=page_fn),
PageTemplate(
id="Later", frames=[full_frame], onPage=page_fn_later or page_fn
),
]
)
self.story.append(NextPageTemplate("Later"))
self.generate_style()
def init_confidential_report(self, page_fn=dummy_stationery, page_fn_later=None):
if not page_fn_later:
page_fn_later = page_fn
def _first_page_fn(canvas, doc):
page_fn(canvas, doc)
doc.PDFDocument.confidential(canvas)
doc.PDFDocument.watermark("CONFIDENTIAL")
self.init_report(page_fn=_first_page_fn, page_fn_later=page_fn_later)
def init_letter(
self,
page_fn=dummy_stationery,
page_fn_later=None,
address_y=None,
address_x=None,
):
frame_kwargs = {
"showBoundary": self.show_boundaries,
"leftPadding": 0,
"rightPadding": 0,
"topPadding": 0,
"bottomPadding": 0,
}
address_frame = Frame(
address_x or 2.6 * cm,
address_y or 20.2 * cm,
16.4 * cm,
4 * cm,
**frame_kwargs
)
rest_frame = Frame(2.6 * cm, 2 * cm, 16.4 * cm, 18.2 * cm, **frame_kwargs)
full_frame = Frame(2.6 * cm, 2 * cm, 16.4 * cm, 25 * cm, **frame_kwargs)
self.doc.addPageTemplates(
[
PageTemplate(
id="First", frames=[address_frame, rest_frame], onPage=page_fn
),
PageTemplate(
id="Later", frames=[full_frame], onPage=page_fn_later or page_fn
),
]
)
self.story.append(NextPageTemplate("Later"))
self.generate_style()
def watermark(self, watermark=None):
self._watermark = watermark
def restart(self):
self.story.append(NextPageTemplate("First"))
self.story.append(RestartPageBreak())
def p(self, text, style=None):
self.story.append(Paragraph(text, style or self.style.normal))
def h1(self, text, style=None):
self.story.append(Paragraph(text, style or self.style.heading1))
def h2(self, text, style=None):
self.story.append(Paragraph(text, style or self.style.heading2))
def h3(self, text, style=None):
self.story.append(Paragraph(text, style or self.style.heading3))
def small(self, text, style=None):
self.story.append(Paragraph(text, style or self.style.small))
def smaller(self, text, style=None):
self.story.append(Paragraph(text, style or self.style.smaller))
def p_markup(self, text, style=None):
self.story.append(MarkupParagraph(text, style or self.style.normal))
def ul(self, items):
for item in items:
self.story.append(MarkupParagraph(item, self.style.bullet, bulletText=u"•"))
def spacer(self, height=0.6 * cm):
self.story.append(Spacer(1, height))
def table(self, data, columns, style=None):
self.story.append(Table(data, columns, style=style or self.style.table))
def hr(self):
self.story.append(HRFlowable(width="100%", thickness=0.2, color=colors.black))
def hr_mini(self):
self.story.append(HRFlowable(width="100%", thickness=0.2, color=colors.grey))
def mini_html(self, html):
"""Convert a small subset of HTML into ReportLab paragraphs
Requires lxml and BeautifulSoup."""
import lxml.html
import lxml.html.soupparser
TAG_MAP = {
"strong": "b",
"em": "i",
"br": "br", # Leave br tags alone
}
BULLETPOINT = u"•"
def _p(text, list_bullet_point, style=None):
if list_bullet_point:
self.story.append(
MarkupParagraph(
text,
style or self.style.paragraph,
bulletText=list_bullet_point,
)
)
else:
self.story.append(MarkupParagraph(text, style or self.style.paragraph))
def _remove_attributes(element):
for key in element.attrib:
del element.attrib[key]
def _handle_element(element, list_bullet_point=False, style=None):
_remove_attributes(element)
if element.tag in TAG_MAP:
element.tag = TAG_MAP[element.tag]
if element.tag in ("ul",):
for item in element:
_handle_element(
item, list_bullet_point=BULLETPOINT, style=self.style.bullet
)
list_bullet_point = False
elif element.tag in ("ol",):
for counter, item in enumerate(element):
_handle_element(
item,
list_bullet_point=u"{}.".format(counter + 1),
style=self.style.numberbullet,
)
list_bullet_point = False
elif element.tag in ("p", "li"):
for tag in reversed(list(element.iterdescendants())):
_remove_attributes(tag)
if tag.tag in TAG_MAP:
tag.tag = TAG_MAP[tag.tag]
else:
tag.drop_tag()
_p(
lxml.html.tostring(element, method="xml", encoding=string_type),
list_bullet_point,
style,
)
else:
if element.text:
_p(element.text, list_bullet_point, style)
for item in element:
_handle_element(item, list_bullet_point, style)
if element.tail:
_p(element.tail, list_bullet_point, style)
soup = lxml.html.soupparser.fromstring(html)
_handle_element(soup)
def pagebreak(self):
self.story.append(PageBreak())
def bottom_table(self, data, columns, style=None):
obj = BottomSpacer(1, 1)
obj._doc = self.doc
self.story.append(obj)
self.story.append(BottomTable(data, columns, style=style or self.style.table))
def append(self, data):
self.story.append(data)
def generate(self):
self.doc.multiBuild(self.story)
def confidential(self, canvas):
canvas.saveState()
canvas.translate(18.5 * cm, 27.4 * cm)
canvas.setLineWidth(3)
canvas.setFillColorRGB(1, 0, 0)
canvas.setStrokeGray(0.5)
p = canvas.beginPath()
p.moveTo(10, 0)
p.lineTo(20, 10)
p.lineTo(30, 0)
p.lineTo(40, 10)
p.lineTo(30, 20)
p.lineTo(40, 30)
p.lineTo(30, 40)
p.lineTo(20, 30)
p.lineTo(10, 40)
p.lineTo(0, 30)
p.lineTo(10, 20)
p.lineTo(0, 10)
canvas.drawPath(p, fill=1, stroke=0)
canvas.restoreState()
def draw_watermark(self, canvas):
if self._watermark:
canvas.saveState()
canvas.rotate(60)
canvas.setFillColorRGB(0.9, 0.9, 0.9)
canvas.setFont("%s" % self.style.fontName, 120)
canvas.drawCentredString(195 * mm, -30 * mm, self._watermark)
canvas.restoreState()
def draw_svg(self, canvas, path, xpos=0, ypos=0, xsize=None, ysize=None):
from reportlab.graphics import renderPDF
from svglib.svglib import svg2rlg
drawing = svg2rlg(path)
xL, yL, xH, yH = drawing.getBounds()
if xsize:
drawing.renderScale = xsize / (xH - xL)
if ysize:
drawing.renderScale = ysize / (yH - yL)
renderPDF.draw(drawing, canvas, xpos, ypos, showBoundary=self.show_boundaries)
def next_frame(self):
self.story.append(CondPageBreak(20 * cm))
def start_keeptogether(self):
self.keeptogether_index = len(self.story)
def end_keeptogether(self):
keeptogether = KeepTogether(self.story[self.keeptogether_index :])
self.story = self.story[: self.keeptogether_index]
self.story.append(keeptogether)
def address_head(self, text):
self.smaller(text)
self.spacer(2 * mm)
def address(self, obj, prefix=""):
if type(obj) == dict:
data = obj
else:
data = {}
for field in (
"company",
"manner_of_address",
"first_name",
"last_name",
"address",
"zip_code",
"city",
"full_override",
):
attribute = "%s%s" % (prefix, field)
data[field] = getattr(obj, attribute, u"").strip()
address = []
if data.get("company", False):
address.append(data["company"])
title = data.get("manner_of_address", "")
if title:
title += u" "
if data.get("first_name", False):
address.append(
u"%s%s %s"
% (title, data.get("first_name", ""), data.get("last_name", ""))
)
else:
address.append(u"%s%s" % (title, data.get("last_name", "")))
address.append(data.get("address"))
address.append(u"%s %s" % (data.get("zip_code", ""), data.get("city", "")))
if data.get("full_override"):
address = [
l.strip()
for l in data.get("full_override").replace("\r", "").splitlines()
]
self.p("\n".join(address))
|
[
"523882246@qq.com"
] |
523882246@qq.com
|
cfeefa710d82c14c84f1b09f78d2f06d7bc04a4a
|
625f2f86f2b2e07cb35204d9b3232427bf462a09
|
/data/HIRun2017XeXe/HIMinimumBias10_XeXeRun2017_13Dec2017_v1/crabConfig.py
|
8d4c3810aa384ccce2a13748b25967252606e33f
|
[] |
no_license
|
ttrk/production
|
abb84c423a076fd9966276b7ed4350936c755e0b
|
f8a64c9c38de215802799365f0f7a99e1ee78276
|
refs/heads/master
| 2023-02-08T23:48:56.355141
| 2023-01-26T08:46:22
| 2023-01-26T08:46:22
| 52,877,406
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,800
|
py
|
from WMCore.Configuration import Configuration
config = Configuration()
config.section_('General')
config.General.requestName = 'HIMinimumBias10_XeXeRun2017_13Dec2017_v1'
config.General.transferLogs = False
config.section_('JobType')
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'runForestAOD_XeXe_DATA_94X.py'
config.JobType.maxMemoryMB = 2500 # request high memory machines.
config.JobType.maxJobRuntimeMin = 2500 # request longer runtime.
# forest_CMSSW_9_4_1
# https://github.com/CmsHI/cmssw/commit/09a79bd943eda5136a9de73943553dd2bfd30f3e
# runForestAOD_XeXe_DATA_94X.py commit + turn off trees not related to photon production analysis + add tree for standard photons
# https://github.com/CmsHI/cmssw/commit/09a79bd943eda5136a9de73943553dd2bfd30f3e
# JSON file : /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions17/HI/Cert_304899-304907_5TeV_PromptReco_XeXe_Collisions17_JSON.txt
# related : https://hypernews.cern.ch/HyperNews/CMS/get/hi-general/4437.html
config.section_('Data')
config.Data.inputDataset = '/HIMinimumBias10/XeXeRun2017-13Dec2017-v1/AOD'
config.Data.inputDBS = 'global'
config.Data.lumiMask = 'Cert_304899-304907_5TeV_PromptReco_XeXe_Collisions17_JSON.txt'
#config.Data.runRange = '304899-304906'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 10
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outputDatasetTag = 'XeXeRun2017-13Dec2017-v1-photonFOREST'
config.Data.outLFNDirBase = '/store/user/katatar/HIRun2017XeXe/'
# https://github.com/richard-cms/production/commits/2016-03-17-reRECO-foresting/crabConfig.py
config.section_('Site')
config.Site.storageSite = "T2_US_MIT"
config.Site.whitelist = ["T2_US_MIT"]
config.section_("Debug")
config.Debug.extraJDL = ["+CMS_ALLOW_OVERFLOW=False"]
|
[
"tatark@mit.edu"
] |
tatark@mit.edu
|
431bec1f65b0f2f9845fd0cd4fa3323157ce83d2
|
e9e6803260c681fd5f9eb7179f51783685f62e68
|
/oops_employee.py
|
14b085594fd05dbbb36465be830f0b856fc7397c
|
[] |
no_license
|
http-www-testyantra-com/Anusha_Sony
|
74f06f60ede11c0500eb0e58dc2cd381f7966d99
|
a64f4b026661294b4da3be50493fb094c0b7db80
|
refs/heads/master
| 2020-11-24T04:35:10.396054
| 2019-12-15T19:48:17
| 2019-12-15T19:48:17
| 227,967,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
class Company:
CompanyName="LT"
Location="Bangalore"
Hike_Rate=10
def __init__(self,Name, age,id,sal):
self.Name=Name
self.id=id
self.sal=sal
self.age=age
def display_employedetails(self):
print(self.Name,self.age,self.id,self.sal)
def modify_details(self,Name="",age=0,id=0,sal=0 ):
if Name!="":
self.Name=Name
if id!=0:
self.id=id
if age!=0:
self.age=age
if sal!=0:
self.sal=sal
@classmethod
def modify_hike(cls,new=0):
if new==0:
cls.Hike=new
return new
@staticmethod
def get_hike():
new = float(input("enter the new hike"))
return new
@staticmethod
def success():
print("Employee details has been updated successfully")
@staticmethod
def failure():
print("Employee details modification has been failed")
ob=Company("anusha",20,1121,10000)
Company.display_employedetails(ob)
|
[
"anushanvi91@gmail.com"
] |
anushanvi91@gmail.com
|
d596b39e16214c2187363402bb1cc5cfbd030dd2
|
39d100d1ed768ab4bdc768dc70e68d4bf943f233
|
/tgmate/views/admin_view/message.py
|
01e5eed946237b3093a365c59c1db8e628247eca
|
[] |
no_license
|
ivan-koryshkin/tgmate
|
702b5c465a3435be134d858cc5fbd0f5ca8fd1f3
|
7ae1f5125ac19f00c53d557c70dbbdbe99886cac
|
refs/heads/master
| 2023-08-30T09:20:04.947011
| 2021-11-09T13:21:17
| 2021-11-09T13:21:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
from flask_admin.contrib.sqla import ModelView
class MessageView(ModelView):
pass
|
[
"ivan.koryshkin@gmail.com"
] |
ivan.koryshkin@gmail.com
|
6b0cd2f19b9a1b1405cfb0781b19e6c544ac8a43
|
2a45507fff25c42cad05b52d83d011fea0909be5
|
/Codeforces/Python/Indian Coin Change.py
|
7254c2721508e7cd6403b69022eb32ca68ff52f7
|
[
"MIT"
] |
permissive
|
Sharayu1071/Daily-Coding-DS-ALGO-Practice
|
dc8256e76d43952f679236df904f597908fbda13
|
2c424b33a1385085f97b98d6379d6cd9cc71b1bd
|
refs/heads/main
| 2023-08-30T17:49:44.312613
| 2021-10-03T04:21:21
| 2021-10-03T04:21:21
| 412,973,714
| 3
| 0
|
MIT
| 2021-10-03T04:18:20
| 2021-10-03T04:18:19
| null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
denominations = [1, 2, 5, 10, 20, 50, 100, 500, 1000] #denominations of indian currency
n=len(denominations)
def Coin_Change(V):
change = [] # to store the change currencies
for deno in range(n-1 , -1, -1): #reverse for loop
while (V >= denominations[deno]):
V -= denominations[deno]
change.append(denominations[deno])
print("Minimal no. of coins needed:", len(change))
print("Changed currency: ", end=" ")
for i in change:
print(i, end=" ")
# Driver Code
V= int(input("Enter the amount: "))
Coin_Change(V)
# Sample
# Enter the amount: 72
# Minimal no. of coins needed: 3
# Changed currency: 50 20 2
|
[
"1218.muskangupta@gmail.com"
] |
1218.muskangupta@gmail.com
|
363eebe781fa7846f4e80639033db7c5abf5cddf
|
807022b4aebd9c4b2e8b0f5b7c209cf21c697381
|
/ocr_structuring/core/non_template/utils/bol_utils/table_items/table_handler/element_handler.py
|
82587a41931bc5700533f5ccd6b2b83495a80ec7
|
[] |
no_license
|
imfifc/myocr
|
4abc5480222f4828072857fbb84236f4a494b851
|
71ba56084aabfa8b07ddc1842bcac5cdbd71212c
|
refs/heads/master
| 2022-12-13T13:04:51.488913
| 2020-09-02T09:07:09
| 2020-09-02T09:07:09
| 292,232,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,938
|
py
|
import re
from collections import defaultdict
from typing import Dict, List
import editdistance as ed
import numpy as np
import pandas as pd
from ocr_structuring.core.non_template.utils.bol_utils.table_items.table_handler.keyrow_handler import KeyrowGroup
from ocr_structuring.core.non_template.utils.bol_utils.utils.time_counter import record_time
from ocr_structuring.core.utils.bbox import BBox
from ocr_structuring.core.utils.node_item_group import NodeItemGroup
from ocr_structuring.utils.logging import logger
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
from itertools import chain
from .field_handler import Field
from ocr_structuring.core.utils.node_item import NodeItem
from . import element_common_rules as element_common_rules
from collections import OrderedDict
from ocr_structuring.core.non_template.utils.bol_utils.utils.structures import ElementTextType
TOTAL_STATS_REGEX = [
'(Total|TOTAL)s?_{,2}:[0-9\._]*L?',
'TOTAL_{,2}:([0-9]*(PCS|KG)){1,}.*',
'Total.{,4}Package. *',
'Subtotal.{,2}Order.*Q.{,2}ty.*'
'SUB-TOTAL',
'TOTALCustomer',
'Subtotal_Order_Q\'ty\ .',
'Total:[0-9\.]*',
'TOTAL_QUANTITY.*'
'SAY.{,3}TOTAL.{,3}ONE'
]
class Block():
def __init__(self, fid, row_order, header_name, header_type, line_content: List[str],
line_item: List[NodeItemGroup], update=True):
self.fid = fid # fid 记录了field 信息
self.row_order = row_order
self.header_name = header_name
self.header_type = header_type
self.line_content = line_content
self.line_item = line_item
self.node_items = list(chain(*[line.node_items for line in self.line_item]))
if update:
self.line_content, self.line_item = self.extract_text_from_node_items(node_items=self.node_items)
@property
def bbox(self):
_bbox: BBox = self.line_item[0].bbox
for i in range(1, len(self.line_item)):
_bbox.merge_(self.line_item[i].bbox)
return _bbox
@property
def content(self):
return self.line_content
def update_line_content(self):
# 在更新过node_item 相关的信息之后,可以通过这个方法更新行内容
self.line_content, self.line_item = self.extract_text_from_node_items(node_items=self.node_items)
def extract_text_from_node_items(self, node_items, thresh=None):
def h_dis(node1, node2):
return abs(node1.bbox.cx - node2.bbox.cx)
def v_algin(node1, node2, thresh):
if abs(node1.bbox.cy - node2.bbox.cy) < thresh:
return True
return False
if thresh is None:
thresh = node_items[0].bbox.height // 2
sorted_nodes = sorted(node_items, key=lambda x: x.bbox.top)
rows = []
while len(sorted_nodes) > 0:
current_row = []
delete_idxs = []
node = sorted_nodes[0]
del sorted_nodes[0]
current_row.append(node)
for idx, node in enumerate(sorted_nodes):
target = sorted(current_row, key=lambda target: h_dis(target, node))[0]
if v_algin(node, target, thresh):
current_row.append(node)
delete_idxs.append(idx)
current_row = sorted(current_row, key=lambda x: x.bbox.left)
rows.append(current_row)
delete_idxs = sorted(delete_idxs, key=lambda x: -x)
for idx in delete_idxs:
del sorted_nodes[idx]
row_texts = []
for row in rows:
texts = [it.text for it in row]
split = '_'
row_text = split.join(texts)
row_texts.append(row_text)
return row_texts, [NodeItemGroup(row) for row in rows]
class ElementGroup():
def __init__(self, cfg, header_type, header_group, fields, rows, keyrow_config, filterrow_config,
block_update_config,
node_items: Dict[str, NodeItem],
company_name=None):
# 初始化,给出很多表的基本信息
self.cfg = cfg
self.header_type = header_type
self.header_group = header_group
self.fields: List[Field] = fields
self.rows = rows
self.keyrow_config = keyrow_config
self.keyrow_group = KeyrowGroup(keyrow_config, company_name=company_name)
self.filterrow_config = filterrow_config
self.node_items = node_items
self.block_update_config: set = block_update_config
@record_time
def assign_block(self, row_info: Dict[str, str], fields_info: Dict[str, str]):
"""
这个函数的作用,是对于每一个行抽取一系列的特征
比如这一行包含了多少的field,之类的
返回结果:
block_info: Dict[str,str] ,记录着每个uid 属于哪一个block
block_list: List[BlockItem] , 每个对应着一系列的nodes,对应的列号(不同的列会被划分为不同的block)
:param 传入所有的node_item
:param row_info: 记录着每个node_items 所属的行信息
:param fields_info: 记录着每个node_items 所属的列信息
:return:
"""
# step1 建表
# uid 是 node_items 的id
# rid 是 row id,表示这个数据属于哪一行
# fid 是 field id ,表示这个数据属于哪一个列
node_info = pd.DataFrame(
[
(uid, row_idx, fields_info[uid]) for uid, row_idx in row_info.items()
],
columns=['uid', 'rid', 'fid']
)
node_info.index = node_info.uid
# 做一些相关信息的准备:
node_info = self.preprocess_node_info(node_info)
# 对每个行,在每个列涉及到的text 的信息进行记录
self.row_content_in_each_fields = self.get_row_content_in_each_fields(node_info)
# 尝试过滤掉表尾部的一些元素
node_info = self.filter_redudant_content(node_info)
# step2, 对每个行,检查是否是key row
# 检查结果更新在node_info 这个dataframe 当中
find_keyrow, possible_key_row = self.assign_keyrow(node_info)
# step3,过滤掉possible_key_row 最大的那一个之后的内容
if find_keyrow:
start_filter_line = max(possible_key_row)
else:
start_filter_line = 0
node_info, possible_key_row = self.filter_redudant_line(start_filter_line, node_info,
possible_key_row=possible_key_row)
if len(self.row_content_in_each_fields) == 0:
# 说明没有一条有用的记录
return False, None, None
# step4 ,如果有 key_row , 则按照key_row , 对node 进行分类
if find_keyrow and len(possible_key_row) > 0:
blocks_in_row = self.assign_row_to_block(possible_key_row, node_info)
else:
# 认为表头一下只有一行数据
# 注意,要传入 应该是所有剩余有效行的最小的一行
first_row_not_redundant = min([self.rows[rid].order for rid in self.row_content_in_each_fields])
blocks_in_row = self.assign_row_to_block({first_row_not_redundant}, node_info)
# 对于同一个fields,同一个row 的,会被分为一个block
return True, possible_key_row, blocks_in_row
def preprocess_node_info(self, node_info):
# 获取每个node 的数据
self.parse_element_info(node_info)
self.parse_row_info(node_info)
self.parse_field_info(node_info)
return node_info
def parse_element_info(self, node_info):
node_info.loc[:, 'text'] = node_info.uid.map(lambda x: self.node_items[x].text)
# node_info.loc[:, 'clean_text'] = node_info.text.map(lambda x: re.sub('[^0-9A-Za-z\-\.]', '', x))
node_info.loc[:, 'xmin'] = node_info.uid.map(lambda x: self.node_items[x].bbox.left)
def _text_type(text: str):
text = text.lower()
text = text.replace('_', '')
if re.sub('[^0-9\-\./_]', '', text) == text:
# 对于 4.23
return ElementTextType.ALL_NUM
if re.sub('[0-9]', '', text) == text:
# 文本当中不存在数字;
return ElementTextType.ALL_CHAR
if re.sub('[0-9a-z]', '', text) == text:
# 文本当中只包含特殊字符
return ElementTextType.ALL_SPEC
filter_spec = re.sub('[^0-9a-z]', '', text)
# 如果filter_spec 前面全是数字,后面全是字母
if re.match('^[0-9]{1,}[a-z]{1,}$', filter_spec):
return ElementTextType.NUM2CHAR
return ElementTextType.MIX_NUM_CHAR
node_info.loc[:, 'text_type'] = node_info.text.map(lambda x: _text_type(x))
def parse_row_info(self, node_info):
# 对row 进行排序,获取row 从上至下的行数
for idx, (row_id, row) in enumerate(sorted(self.rows.items(), key=lambda x: x[1].bbox.cy)):
row.order = idx
node_info.loc[:, 'row_order'] = node_info.rid.map(lambda x: self.rows[x].order)
# 抽取一些统计信息
# stats1 : 每个行,涉及到的列数,总共对应的node 的数量
node_info.loc[:, 'num_uid_in_row'] = node_info.rid.map(node_info.groupby('rid').uid.count())
node_info.loc[:, 'num_fid_in_row'] = node_info.rid.map(
node_info.groupby('rid').fid.apply(lambda x: len(x.unique()))
)
# 对row 当中的node_item 按照xmin 进行排序
for rowid, row_item in self.rows.items():
row_item.sort(lambda x: x.bbox.rect[0])
def parse_field_info(self, node_info):
# 对列,获取header
node_info.loc[:, 'header_type'] = node_info.fid.map(lambda x: self.fields[x].header.head_type)
node_info.loc[:, 'header_content'] = node_info.fid.map(lambda x: self.fields[x].header.key_node.content)
def filter_redudant_content(self, node_info):
useless_row_id = []
for filter_config in self.filterrow_config['filter_content']:
# 遍历所有的过滤配置
regex_list = filter_config['regex']
adaptive_fields = filter_config['adaptive_fields']
for rid, rid_content in self.row_content_in_each_fields.copy().items():
# 遍历所有的行
for fid, content_info in rid_content.items():
# 遍历这些行涉及到的列
header_type = content_info['header_type']
if header_type not in adaptive_fields:
continue
row_content_in_field = content_info['content']
# print('debug, ', regex_list, row_content_in_field)
useless = False
for regex in regex_list:
if re.match(regex, row_content_in_field):
useless = True
break
if useless:
del self.row_content_in_each_fields[rid]
row_order = content_info['row_order']
logger.info('{} is not useful'.format(row_content_in_field))
useless_row_id.append(row_order)
break
for rid, rid_content in self.rows.copy().items():
row_content = rid_content.content()
# print('debugger', row_content)
need_ignore = False
for regex in self.filterrow_config['filter_content_in_line']:
if re.search(regex, row_content, re.IGNORECASE):
need_ignore = True
if need_ignore:
del self.row_content_in_each_fields[rid]
useless_row_id.append(rid_content.order)
useless_row_id = set(useless_row_id)
if not useless_row_id:
return node_info
node_info = node_info[~node_info.row_order.isin(useless_row_id)]
# TODO: 自适应的去除尾部的内容
return node_info
def filter_redudant_line(self, start_filter_line, node_info, possible_key_row=None):
# 从行的角度筛选数据
ignore_bg_lines = []
for idx, (bg_texts, ed_thresh) in enumerate(self.filterrow_config['filter_lines']):
bg_texts = re.sub('[^0-9A-Za-z]', '', bg_texts).lower()
self.filterrow_config['filter_lines'][idx] = (bg_texts, ed_thresh)
# 建立row_order 对于rid 的字典
row_order_id_map = {self.rows[rid].order: rid for rid in self.row_content_in_each_fields}
# 按照从小到大排序
row_order_id_map = OrderedDict(sorted(row_order_id_map.items(), key=lambda x: x[0]))
after_filter_row = False # 在一个过滤行之后的所有内容, 会会被过滤掉
for order, rid in row_order_id_map.items():
# 遍历每一行
if after_filter_row:
ignore_bg_lines.append(order)
del self.row_content_in_each_fields[rid]
continue
row = self.rows[rid]
if row.order < start_filter_line:
continue
row_content = row.content()
row_content = re.sub('[^0-9A-Za-z]', '', row_content).lower()
logger.info('this print used to check rows need filter: {}'.format(row_content))
filtered_by_line_rule = False
# print('debug',row_content)
for bg_texts, ed_thresh in self.filterrow_config['filter_lines']:
dist = ed.eval(row_content, bg_texts)
if dist < ed_thresh:
del self.row_content_in_each_fields[rid]
ignore_bg_lines.append(row.order)
after_filter_row = True
filtered_by_line_rule = True
break
if filtered_by_line_rule:
# 已经认为是一个需要过滤的行了,这里就不做考虑了
continue
for comb in self.filterrow_config['filter_comb']:
# 拿到每一个comb 的配置
matched_count = 0
for header_type_list, regex_config in comb:
if isinstance(header_type_list, self.header_group.header_types):
header_type_list = [header_type_list]
at_least_succeed = False
for header_type in header_type_list:
# 遍历所有的在这次配置当中的header_type
if at_least_succeed:
break
if isinstance(regex_config, list):
# 如果对某个内容配置为list
regex_list = regex_config
# 获取这一行涉及到的这个类型的type
content = [fid_info['content'] for fid, fid_info in
self.row_content_in_each_fields[rid].items()
if fid_info['header_type'] == header_type]
for regex in regex_list:
if any([re.search(regex, text, re.IGNORECASE) is not None for text in content]):
matched_count += 1
break
elif isinstance(regex_config, dict):
regex_list = regex_config['content_regex']
header_regex_list = regex_config['header_regex']
content_list = [(fid, fid_info['content']) for fid, fid_info in
self.row_content_in_each_fields[rid].items() if
fid_info['header_type'] == header_type]
# 根据fid ,获取每个content 对应的header 的内容
content_list = [(self.fields[fid].header.key_node.content, fid_content) \
for fid, fid_content in content_list]
# 从这些content 当中挑选 符合 header_regex_list 的内容
content_satisfy_header_regex = []
for header_content, field_content in content_list:
satisfy_regex = False
for header_regex in header_regex_list:
if re.search(header_regex, header_content, re.IGNORECASE):
satisfy_regex = True
break
if satisfy_regex:
content_satisfy_header_regex.append(field_content)
if len(content_satisfy_header_regex) == 0:
# 说明这一行没有一个列满足header_regex 的条件
continue
for regex in regex_list:
if any([re.search(regex, text, re.IGNORECASE) is not None for text in
content_satisfy_header_regex]):
matched_count += 1
at_least_succeed = True
break
if matched_count == len(comb):
logger.info('filtered {} by filter_comb'.format(self.rows[rid].content()))
del self.row_content_in_each_fields[rid]
ignore_bg_lines.append(row.order)
after_filter_row = True
break
node_info = node_info[~node_info.row_order.isin(ignore_bg_lines)]
if possible_key_row is not None:
possible_key_row = possible_key_row - set(ignore_bg_lines)
return node_info, possible_key_row
def get_row_content_in_each_fields(self, node_info):
"""
返回的字典记录了每行,在每列的文字信息,以及将每行在每列的node 组合成了一个nodeitemgroup
"""
row_content_in_each_fields = defaultdict(dict)
for (rid, fid), data in node_info.groupby(['rid', 'fid']):
data = data.sort_values(by='xmin')
row_content = '_'.join(data.text)
element_group = NodeItemGroup(data.uid.map(lambda x: self.node_items[x]).to_list())
row_order = data.row_order.values[0]
for node in element_group.node_items:
node.row_order = row_order
header_type = data.header_type.values[0]
row_content_in_each_fields[rid][fid] = {'header_type': header_type,
'content': row_content,
'row_order': row_order,
'element_group': element_group
}
return row_content_in_each_fields
def assign_keyrow(self, node_info: pd.DataFrame):
"""
:param node_info: pd.DataFrame , 记录了每个节点的id , 所属列的id , 行的id
:return:
"""
success, possible_key_row = self.keyrow_group.assign_key_row(node_info, self.fields, self.rows,
self.header_group)
if success:
return success, possible_key_row
return False, set()
@staticmethod
def load_rule(func_name):
if isinstance(func_name, str):
function = getattr(element_common_rules, func_name, None)
assert function is not None, 'must set right function name {}'.format(func_name)
return function
else:
function = func_name
return function
def assign_row_to_block(self, possible_key_row, node_info):
"""
目的,对数据进行分类
可以利用的信息 :
node_info : 记录了所有的node_item ,属于哪一行,哪一列,以及其基本属性
possible_key_row: 记录了哪一行是关键行,一般认为关键行是一个block (一条记录称为一个block)
self.row_content_in_each_fields: 记录了每行,在每一个fields 当中都拥有什么信息
最后记录在block 当中的信息,应该类似于一个paragraph
相当于是所有的在这个block 当中的行,给出content 和 bbox
然后合并成一个node_item_group, 当然也会同时保持相关的其他信息
难点:如何完成asssign:
rule1 : 如果和key_row 完全不相交的,采取向上合并原则
rule2 : 如果和key_row 有交集的(在y轴上有一定的比例相交),采取就近合并原则
rule3 : 对于无法处理的情况,在后续考虑通过company_rules 来解决
:return: node_info: 需要记录每个node_items所属的block id,
blocks 记录着每个block 包含了哪些内容
"""
before_key_row, row_group, row_order_id_map = self.assign_row_to_key_row(possible_key_row)
row_group = sorted(row_group.items(), key=lambda x: x[0])
row_group = self.common_filter_total_line(row_group, row_order_id_map)
blocks_in_row = []
for row_id, row in row_group:
auto_remove_tail = False
if row_id == row_group[-1][0]:
# 说明是最后一个group
auto_remove_tail = True
blocks = self.build_blocks(row_id, row, row_order_id_map, node_info, auto_remove_tail=auto_remove_tail)
blocks_in_row.append(blocks)
return blocks_in_row
def common_filter_total_line(self, row_group, row_order_id_map):
# 如果只有在最后一组出现了某个行是以total相关的正则表达式开头的,则认为这个total 指的是总计的total
# 则这个total 之后的内容会过滤
first_total_appear_id = -1
first_total_appear_line = 0
for group_id, group in row_group:
find_total = False
for group_idx, row in enumerate(group):
row_content = self.rows[row_order_id_map[row]].content()
for total_regex in TOTAL_STATS_REGEX:
# print('debug common filter total line', row_content , total_regex , re.search(total_regex,row_content))
if re.search(total_regex, row_content):
find_total = True
first_total_appear_line = group_idx
break
if find_total:
first_total_appear_id = group_id
break
# print('debug common filter total line', first_total_appear_id, row_group)
if first_total_appear_id == row_group[-1][0]:
# 说明最后一列存在total 的情况
row_group[-1] = (
row_group[-1][0],
row_group[-1][1][:first_total_appear_line]
)
if row_group[-1][1] == []:
row_group = row_group[:-1]
return row_group
def assign_row_to_key_row(self, possible_key_row, ):
"""
:param possible_key_row: 按照key row 分组,将row 分配到不同的keyrow 当中
:return:
"""
_BEFORE_ROW = -1
_AFTER_ROW = 1e4
# 首先对数据进行排序
# 注意,这个程序不能反复执行
possible_key_row = list(possible_key_row)
possible_key_row.sort()
# 加入一前以后,便于后面设计算法
possible_key_row.insert(0, _BEFORE_ROW)
possible_key_row.append(_AFTER_ROW)
# 建立row_order 对于rid 的字典
row_order_id_map = {self.rows[rid].order: rid for rid in self.row_content_in_each_fields}
# 按照从小到大排序
row_order_id_map = OrderedDict(sorted(row_order_id_map.items(), key=lambda x: x[0]))
matched_keyrow = []
cur_key_row_idx = 0
for row_order, rid in row_order_id_map.items():
if row_order == possible_key_row[cur_key_row_idx + 1]:
cur_key_row_idx += 1
matched_keyrow.append(possible_key_row[cur_key_row_idx])
else:
if possible_key_row[cur_key_row_idx + 1] == _AFTER_ROW:
matched_keyrow.append(possible_key_row[cur_key_row_idx])
else:
# 需要计算一下和下一个位置上的key_row 的iou
next_keyrow_id = row_order_id_map[possible_key_row[cur_key_row_idx + 1]]
if self.judge_intersection(rid, next_keyrow_id):
cur_key_row_idx += 1
matched_keyrow.append(possible_key_row[cur_key_row_idx])
before_key_row = []
row_group = defaultdict(list)
for row_order, match_id in zip(row_order_id_map.keys(), matched_keyrow):
if match_id == _BEFORE_ROW:
before_key_row.append(row_order)
else:
row_group[match_id].append(row_order)
return before_key_row, row_group, row_order_id_map
def judge_intersection(self, row_id, keyrow_id):
"""
检查这个行和关键行是否有相交
:param row_id: 某行的数据的id
:param keyrow_id: 某个关键行的id
:return:
"""
bbox1 = self.rows[row_id].bbox.rect
bbox2 = self.rows[keyrow_id].bbox.rect
# 判断y 方向上的iou
ymin_b1, ymax_b1 = bbox1[1], bbox1[3]
ymin_b2, ymax_b2 = bbox2[1], bbox2[3]
iou = (min(ymax_b1, ymax_b2) - max(ymin_b1, ymin_b2)) / (max(ymax_b1, ymax_b2) - min(ymin_b1, ymin_b2))
iou = max(iou, 0)
if iou > 0.5:
return True
else:
return False
def build_blocks(self, row_id, rows, row_order_id_map, node_info, auto_remove_tail=False):
"""
:param row_id: 关键行的行号
:param rows: list of row , 记录着这条记录涉及到的row order
:param row_order_id_map: row_order 和 row 的关系
:param node_info:
:param auto_remove_tail : 对于最后一行设置这个参数为True ,会对最后一行,考虑一些特殊的过滤规则,去除掉表尾部的内容
:return:
"""
lines_in_field = defaultdict(list)
useful_row = [True] * len(rows)
if auto_remove_tail and len(rows) >= 2:
# 需要自适应的去除一些不需要的信息
# rule1 , 计算rows 之间的间隔,如果存在一个很大的间隔,对后面的内容不考虑
# 拿到每个行的top
row_bottom = [self.rows[row_order_id_map[rid]].bbox.bottom for rid in rows]
row_height = [self.rows[row_order_id_map[rid]].bbox.height for rid in rows]
row_height_diff = np.diff(row_bottom) > 5 * np.mean(row_height)
after_useless = False
for idx in range(1, len(rows)):
if row_height_diff[idx - 1] == True:
after_useless = True
if after_useless == True:
useful_row[idx] = False
for row, is_useful in zip(rows, useful_row):
if not is_useful:
continue
row_info = self.row_content_in_each_fields[row_order_id_map[row]]
for fid, field_info in row_info.items():
# header_name = self.fields[fid].header.name
lines_in_field[fid].append(
{
'line_item': field_info['element_group'],
'line_content': field_info['content']
}
)
row_info = {}
for fid, field_info in lines_in_field.items():
line_content = [line['line_content'] for line in field_info]
line_item = [line['line_item'] for line in field_info]
header_name = self.fields[fid].header.name
header_type = self.fields[fid].header.head_type
update = False
if header_type in [self.header_type[htype] for htype in
self.cfg.ELEMENT_HANDLER.get('block_update_config', [])]:
logger.info('set update True for {}'.format(header_type))
update = True
row_info[fid] = Block(fid, row_id, header_name, header_type, line_content, line_item, update=update)
return row_info
|
[
"1193690571@qq.com"
] |
1193690571@qq.com
|
617850ebf299ad30915a18f564628a69243c5d24
|
dfc686228834750216b2cd6eea14d2a6d12422e4
|
/django2.2/reh/django_pro/store/orders/views.py
|
881709a343148285a92835c84be8b89ae69bf6eb
|
[] |
no_license
|
Parth-Ps/python
|
8466e8856bf301908544eb60ae4a68338ccf4550
|
bb448c2a7996d17883214fe8eb11caa61e211400
|
refs/heads/master
| 2023-01-22T13:30:50.507021
| 2020-12-02T07:59:53
| 2020-12-02T07:59:53
| 317,788,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,026
|
py
|
from django.shortcuts import render, redirect
from django.views.generic import View
from django.views.generic.list import ListView
from django.contrib import messages
from django.views.generic.edit import FormView, CreateView
from django.urls import reverse
from .forms import AddressForm, UserAddressForm
from .models import UserAddress, UserCheckout, Order
from products.mixins import LoginRequiredMixin
class UserAddressCreateView(CreateView):
form_class = UserAddressForm
template_name = 'forms.html'
def get_success_url(self):
return reverse('cart:cart_checkout')
def get_user_checkout(self):
user_checkout_id = self.request.session['user_checkout_id']
return UserCheckout.objects.get(id=user_checkout_id)
def form_valid(self, form, *args, **kwargs):
form.instance.user = self.get_user_checkout()
return super(UserAddressCreateView, self).form_valid(form, *args, **kwargs)
class AddressFormView(FormView):
form_class = AddressForm
template_name = 'orders/address_select.html'
def dispatch(self, request, *args, **kwargs):
b_address, s_address = self.get_address()
if not (b_address.exists() and s_address.exists()):
messages.success(self.request, 'Please add an address before continuing')
return redirect('cart:add_address')
return super(AddressFormView, self).dispatch(request, *args, **kwargs)
def get_address(self, *args, **kwargs):
user_checkout = self.request.session['user_checkout_id']
b_address = UserAddress.objects.filter(
type=UserAddress.BILLING, user_id=user_checkout)
s_address = UserAddress.objects.filter(
type=UserAddress.SHIPPING, user_id=user_checkout)
return b_address, s_address
def get_form(self):
form = super(AddressFormView, self).get_form()
b_address, s_address = self.get_address()
form.fields['billing_address'].queryset = b_address
form.fields['shipping_address'].queryset = s_address
return form
def form_valid(self, form, *args, **kwargs):
billing_address = form.cleaned_data['billing_address']
shipping_address = form.cleaned_data['shipping_address']
self.request.session['billing_address_id'] = billing_address.id
self.request.session['shipping_address_id'] = shipping_address.id
return super(AddressFormView, self).form_valid(form, *args, **kwargs)
def get_success_url(self):
return reverse('cart:cart_checkout')
class ConfirmOrderView(View):
def post(self, request):
order = Order.objects.get(id=request.session['order_id'])
if request.POST.get('complete_order'):
order.complete_order()
return redirect('accounts:payment')
class OrdersList(LoginRequiredMixin, ListView):
model = Order
template_name = 'orders/orders_list.html'
def get_queryset(self):
return Order.objects.filter(user__user=self.request.user)
|
[
"parth0129.certificate@gmail.com"
] |
parth0129.certificate@gmail.com
|
2fcddc203d429f2ba2f344a2ef75518082dad2e1
|
1b2bcf9b944e390462b54a64f28f66435a9c62bf
|
/shakespearean_pokemon/exceptions.py
|
2892df99c0f91ff301437c7957ca0df929ebbc07
|
[] |
no_license
|
dalehp/shakespearean_pokemon
|
484fccd15e52882ca7cf19be28a77657006953af
|
31a7e707795fe7977bf6a7b2975d4933ea7d1282
|
refs/heads/master
| 2022-06-02T06:02:16.352253
| 2020-05-04T07:44:49
| 2020-05-04T07:44:49
| 260,740,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
class InvalidPokemonError(Exception):
pass
class UnknownAPIError(Exception):
"""Unexpected errors from external API. Should probably handle this better for
certain scenarios, e.g. retry on 503?"""
pass
class TooManyRequestsError(Exception):
pass
|
[
"dale@Dales-MBP.lan"
] |
dale@Dales-MBP.lan
|
18985bcfa506725407f81a6972befff206a7eb72
|
9813636bf3d99ca727baddc9791b4c02e0532457
|
/book_api_django/book_api/models.py
|
ffee109af22725969cc2ca72c08ae9841055e62d
|
[] |
no_license
|
amfueger/django-starter-code
|
bb791564ee06b2b0cc298c0293c72d595738de5f
|
ce362355af1aadeb921fb6cb3804f2d543a536b8
|
refs/heads/master
| 2020-04-16T21:52:21.518309
| 2018-11-17T00:30:41
| 2018-11-17T00:30:41
| 165,943,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
from django.db import models
# Create your models here.
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.CharField(max_length=100)
year = models.IntegerField(default=0)
def __str__(self):
return self.title
|
[
"Jimmyhaff3@gmail.com"
] |
Jimmyhaff3@gmail.com
|
f9b0fe3e32e67ea52512f7e359041022a22cc2ec
|
e0c5cb0500a1200168e4e2a5b8897715a0c51a15
|
/MSI_calculator.py
|
944461f880080ed81dff269244c3e095b22e0ee9
|
[] |
no_license
|
OriolGraCar/Naivebayes_AGB
|
e976d52cf4272fee992574ba14e677ed3d506a3d
|
d6f873e87101a8434faa2b70e44a748d8a911a03
|
refs/heads/master
| 2021-05-03T10:21:59.849711
| 2018-02-28T08:46:24
| 2018-02-28T08:46:24
| 120,532,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,200
|
py
|
import math
def entropy(splicing_info):
total_tissues = len(splicing_info['up'])
prob_up = 0
prob_down = 0
count_up = 0
count_down = 0
#count_NA = 0
conditional_prob_up = []
conditional_prob_down = []
summatory_up = 0
summatory_down = 0
for element in splicing_info['up']:
conditional_prob_up.append(splicing_info['up'][element])
count_up += splicing_info['up'][element]
for element in splicing_info['down']:
conditional_prob_down.append(splicing_info['down'][element])
count_down += splicing_info['down'][element]
#for element in splicing_info['NA']:
# count_NA += splicing_info['NA'][element]
prob_up = count_up/(count_up + count_down)# + count_NA)
prob_down = count_down/(count_up + count_down)# + count_NA)
summatory_up = sum([(x/count_up)*(math.log(x/count_up)/math.log(2)) for x in conditional_prob_up if x != 0])
summatory_up = summatory_up * prob_up
summatory_down = sum([(x/count_down)*(math.log(x/count_down)/math.log(2)) for x in conditional_prob_down if x != 0])
summatory_down = summatory_down * prob_down
return -(summatory_up + summatory_down)
def calculate_MSI(input_file,output_file):
START = True
tissue_list = []
different_tissue = set()
all_info = {}
result = []
with open(input_file) as fl:
for line in fl:
line = line.rstrip()
data = line.split("\t")
all_info = {}
if START:
tissue_list = data
START = False
for element in data:
different_tissue.add(element)
else:
all_info[data[0]] = {}
all_info[data[0]]['up'] = {}
all_info[data[0]]['down'] = {}
all_info[data[0]]['NA'] = {}
for different in different_tissue:
all_info[data[0]]['up'][different] = 0
all_info[data[0]]['down'][different] = 0
all_info[data[0]]['NA'][different] = 0
for i in range(1,len(data)):
all_info[data[0]][data[i]][tissue_list[i-1]] += 1
result.append([data[0],entropy(all_info[data[0]])])
with open(output_file,'w') as out:
result = sorted(result, key = lambda x: x[1])
for element in result:
out.write("%s\t%s\n" % (element[0],element[1]))
if __name__ == "__main__":
#testing
print(calculate_MSI('training.txt','information_gained.txt'))
|
[
"noreply@github.com"
] |
OriolGraCar.noreply@github.com
|
e36b9e898b42d2ec4030c7b698009d6432b9283d
|
59e2cdec9e271fd05f9c99f485f1a5c020979402
|
/src/edit.py
|
ea66ac3736ba296c96d804345f1094b7b4a99c0a
|
[] |
no_license
|
zwaltman/LSystem
|
c886e8d560a175f0875e2cb43ae5a4d4d4d6785f
|
4406cbb54a802afb2c7ba7bf4bd3eabedffdcba8
|
refs/heads/master
| 2020-04-17T12:05:21.697188
| 2018-05-28T05:46:54
| 2018-05-28T05:46:54
| 65,895,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,773
|
py
|
"""
Editor for L-systems objects
Used for creation of new systems and modifying previously saved ones
"""
import LSystemObject
from printer import printVars, printConsts, printRules, printStart
from modify import addVar, deleteVar, addConst, deleteConst, addRule, deleteRule, addStart, deleteStart
from load import loadSystem
from save import save
def edit(systemName=None):
"""
Editor for L-systems
"""
head = ("\n // LSystem Generator // LSystem Editor //\n\n" +
" 'exit': description\n" +
" 'view': description\n" +
" 'delete': description\n" +
" 'add': description\n" +
" 'save': description\n")
print head
# Initiate system
system = LSystemObject.LSystemObject()
# Import attributes from system file, if file name provided
if systemName:
loadSystem(system, systemName)
# View, modify system settings
while True:
input = raw_input('> ').split()
if input:
cmd = input[0]
args = input[1:]
if cmd == 'exit':
return
elif cmd == 'view':
if not args:
printVars(system)
printConsts(system)
printRules(system)
printStart(system)
elif args[0] == 'variables':
printVars(system)
elif args[0] == 'constants':
printConsts(system)
elif args[0] == 'rules':
printRules(system)
elif args[0] == 'start':
printStart(system)
else:
print " Error: %s is not a valid system attribute. See help for guidelines." % args[0]
elif cmd == 'add':
if not args:
print "Error: argument required. See help for guidelines."
elif args[0] == 'variable':
addVar(system, args[1])
elif args[0] == 'constant':
addConst(system, args[1])
elif args[0] == 'rule':
addRule(system, args[1], args[2])
elif args[0] == 'start':
addStart(system, args[1])
else:
print "Error: %s is not a valid system attribute. See help for guidelines." % args[0]
elif cmd == 'delete':
if not args:
print "Error: two arguments required. See help for guidelines."
elif args[0] == 'all':
# Wipe entire system by creating new instance of LSystem
system = LSystemObject.LSystemObject()
elif args[0] == 'variable':
if args[1]:
deleteVar(system, int(args[1]) - 1)
else:
deleteVar(system)
elif args[0] == 'constant':
if args[1]:
deleteConst(system, int(args[1]) - 1)
else:
deleteConst(system)
elif args[0] == 'rule':
if args[1]:
deleteRule(system, args[1])
else:
deleteRule(system)
elif args[0] == 'start':
deleteStart()
elif cmd == 'save':
if args:
save(system, args[0])
else:
print "Error: please provide a file name"
else:
invalidCommand()
|
[
"z.waltman@gmail.com"
] |
z.waltman@gmail.com
|
91917955537aa8f8ffc9ebad84b1e29e516d770b
|
f2c64c03c0e6d84daffb7464df471ef33944f6a8
|
/keras_kit/__init__.py
|
a7388c89b4d34fc23a78bf9defc6c67395afcfef
|
[
"MIT"
] |
permissive
|
bigboyooo/NER-toolkits
|
c290b279743f3968913d422ebdfde6625ff47861
|
bd568b62b30672fa014308413b7715ec11e8064a
|
refs/heads/master
| 2020-06-05T16:49:32.178363
| 2019-06-11T15:12:25
| 2019-06-11T15:12:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
# _*_ coding: utf-8 _*_
from . import common
from . import transformer
|
[
"lyssym@sina.com"
] |
lyssym@sina.com
|
c96b38abe78c574577aa444659c3ca827a2d4b36
|
fc68ed9f0ca806c5f2473ffb5ffd0430a29393a7
|
/bomtool/pngen.py
|
67c65b730cfbe4421ac011a9b5c083d9fc64fb74
|
[
"MIT"
] |
permissive
|
iromero91/bomtool
|
f48bf3894323fcf09eb6641db020d3a667d50dcf
|
fd967d84a220d7876d98cc02b072ffc71f33c767
|
refs/heads/master
| 2021-06-27T11:41:23.947454
| 2021-02-16T15:59:39
| 2021-02-16T15:59:39
| 67,799,755
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
def RC(value, tolerance=5.0, power=None, package="0603",pkgcode="07"):
res = {"manufacturer": "Yageo"}
suffix = ['R', 'K', 'M']
digits = 3 if tolerance < 5 else 2
while value >= 1000:
value = value/1000.
suffix.pop(0)
suffix = suffix[0]
whole = str(int(value))
decimal = str(round((value-int(value)) * 10**(digits-len(whole))))
v_str = (whole+suffix+decimal).strip("0")
if v_str == "R": v_str = "0R"
if tolerance == 0.5:
t_str = 'D'
elif tolerance == 1.0:
t_str = 'F'
else:
t_str = 'J'
res["MPN"] = "RC{}{}R-{}{}L".format(package, t_str, pkgcode, v_str)
return res
_cc_voltages = {6300: '5', 10000: '6', 16000: '7', 25000: '8', 50000: '9'}
def CC_XxR(value, tolerance=10, voltage=16, package='0603', pkgcode='R', dielectric="X7R"):
res = {"manufacturer": "Yageo"}
c_pf = int(value * 1e12)
exp = 0
while c_pf >= 100:
exp += 1
c_pf /= 10
if package > '0603' and pkgcode == 'R':
pkgcode = 'K'
c_str = str(int(c_pf))+str(exp)
v_mv = round(voltage*1e3)
v_str = _cc_voltages.get(v_mv, '9')
t_str = 'K'
res["MPN"] = "CC{}{}{}{}{}BB{}".format(package, t_str, pkgcode, dielectric, v_str, c_str)
return res
|
[
"jose.cyborg@gmail.com"
] |
jose.cyborg@gmail.com
|
a7a12afb1c022ae8a75143cb8063ae50fcb75dae
|
32b28c9d13797c17ddd215d762dab59d1eaf6ae7
|
/src/utils/bitcoin_address.py
|
5ceecf176aa3496c6d1e3f5dbab4fa849b13f827
|
[] |
no_license
|
pierce403/hashbounty
|
15ee3fff21c519d5eaf4480f30bbde357dc2da40
|
f918c999ee41406db74697f79c45f43b36f5b9ad
|
refs/heads/master
| 2021-01-19T16:33:18.872478
| 2011-07-02T02:37:57
| 2011-07-02T02:37:57
| 1,846,117
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,571
|
py
|
import re
from hashlib import sha256
import math
def is_valid(a):
return get_bcaddress_version(a) is not None
class AddressValidator(object):
def __call__(self, form, field):
if not is_valid(field.data):
from wtforms.validators import ValidationError
raise ValidationError(u'This address is not valid.')
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length):
""" decode v into a string of len bytes
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode(strAddress,25)
if addr is None: return None
version = addr[0]
checksum = addr[-4:]
vh160 = addr[:-4] # Version plus hash160 is what is checksummed
h3=sha256(sha256(vh160).digest()).digest()
if h3[0:4] == checksum:
return ord(version)
return None
|
[
"jure.vrscaj@gmail.com"
] |
jure.vrscaj@gmail.com
|
b42a2b08cb31f2974ae2b3a6c1c3ca86a3b16b17
|
5f10ca2439551040b0af336fd7e07dcc935fc77d
|
/Binary tree/二叉树的构建/109. Convert Sorted List to Binary Search Tree.py
|
fc1c80d96dc7d82272ce983078633f7b7bd7e5cd
|
[] |
no_license
|
catdog001/leetcode2.0
|
2715797a303907188943bf735320e976d574f11f
|
d7c96cd9a1baa543f9dab28750be96c3ac4dc731
|
refs/heads/master
| 2021-06-02T10:33:41.552786
| 2020-04-08T04:18:04
| 2020-04-08T04:18:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,443
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/2/27 10:21
# @Author : LI Dongdong
# @FileName: 109. Convert Sorted List to Binary Search Tree.py
''''''
'''
题目分析
1.要求:Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.
For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
Example:
Given the sorted linked list: [-10,-3,0,5,9],
One possible answer is: [0,-3,9,-10,null,5], which represents the following height balanced BST:
0
/ \
-3 9
/ /
-10 5
2.理解:use node value of linkedlist to create a height balanced BST
3.类型:create tree
4.确认输入输出及边界条件:
input: linkedlist head, linkedlist node is order? Y, repeated? N, number of node? N node value range? N
output: root of tree with definition
corner case:
head of linkedlist is None: return None
5.方法及方法分析:brute force - cut linkedlist, 链表转换数组 + 二分, inorder simulation
time complexity order: O(N)
space complexity order:
6.如何考
'''
'''
A. brute force - cut linkedlist
Method:
recursion(list head) return root
end: head is None
slow and fast pointer, when fast is None or fast.next is None, slow in mid
pre_slow.next = None
root = slow
root.left = recursion(head)
root.right = recursion(slow.next)
time O(NlogN) space O(N)
易错点: while fast and fast.next 不是or,要确保两个存在,有一个不行都不行
'''
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def sortedListToBST(self, head: ListNode) -> TreeNode:
if not head: # corner case
return None
if not head.next:
return TreeNode(head.val)
slow = fast = head
while fast and fast.next:
prev_s = slow
slow = slow.next
fast = fast.next.next
prev_s.next = None
root = TreeNode(slow.val)
root.left = self.sortedListToBST(head)
root.right = self.sortedListToBST(slow.next)
return root
'''
B. 链表转换数组 + 二分
Method:
1. traveral linkedlist to save node val in list # time complexity O(N) space O(N)
2. self.binary(list) recursion, return root # time O(N) sO(N)
end: list is None
mid = len(list) // 2
root.left = binary(left list)
root.right = binary(right list)
return root
tO(N) sO(N) O(N) to keep the output, and O(logN) for the recursion stack
易错点: root = TreeNode(nodeList[mid]) 不是 root = TreeNode(mid)
'''
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def sortedListToBST(self, head: ListNode) -> TreeNode:
if not head: # corner case
return None
nodeList = self.transfer(head)
root = self.dfs(nodeList)
return root
def transfer(self, head): # output [nodes' val]
if not head: # corner case
return []
res = []
while head:
res.append(head.val)
head = head.next
return res
def dfs(self, nodeList): # return tree root node
if not nodeList: # corner case
return None
if len(nodeList) == 1:
return TreeNode(nodeList[0])
mid = len(nodeList) // 2
root = TreeNode(nodeList[mid])
root.left = self.dfs(nodeList[:mid])
root.right = self.dfs(nodeList[mid + 1:])
return root
'''
test code
corner case: None -> None
check main then helper()
[-10,-3,0,5,9]
1. sortedListToBST
nodelist = [-10,-3,0,5,9]
root
return root
2. transfer()
res = []
head = -10
loop 1:
res = [-10]
head = -3
loop 2:
res = [-10, -3]
head = 0
loop 3:
res = [-10, -3, 0]
head = 5
loop 4
res = [-10, -3, 0, 5]
head = 9
loop 5
res = [-10, -3, 0, 5,9]
head = None
return res
3. dfs()
mid = 5 // 2 = 2
root = 0
root.left = dfs([-10, 3])
mid = 2 // 2= 1
root = 3
root.left = dfs([:mid]) = dfs[-10]
return treeNode(-10)
root.right = dfs([mid + 1:]) = dfs []
return None
0
/ \
3 9
/ \ /
-10 N 5
root.left = dfs([3:]) = dfs[5, 9]
mid = 2// 2 = 1
root = 9
root.left = dfs(5)
root.right = None
return root
'''
'''
C. inorder simulation
Method:
1. iterative over the linkedlist to find its length
2. fnd the mid elem by (start + end) // 2
3. recurse on the left half by using start, mid -1 as starting and ending points
4. head as root, and head = head.next
5. recurse on the right half by using mid + 1, end as starting and ending points
time O(N) sO(N): while recursion use O(logN)
'''
class Solution:
def findSize(self, head):
tmp = head
res = 0
while tmp:
tmp = tmp.next
res += 1
return res
def sortedListToBST(self, head):
# Get the size of the linked list first
size = self.findSize(head)
# Recursively form a BST out of linked list from l --> r
def convert(l, r):
nonlocal head
#
# Invalid case
if l > r:
return None
mid = (l + r) // 2
# First step of simulated inorder traversal. Recursively form
# the left half
left = convert(l, mid - 1)
# Once left half is traversed, process the current node
node = TreeNode(head.val)
node.left = left
# Maintain the invariance mentioned in the algorithm
head = head.next
# Recurse on the right hand side and form BST out of them
node.right = convert(mid + 1, r)
return node
return convert(0, size - 1)
|
[
"lidongdongbuaa@gmail.com"
] |
lidongdongbuaa@gmail.com
|
6ef336fe2b4c4c6570f5f608a03e7fb1f0c3b6fa
|
a44d6c8ddf4d43b8f623053ff8e4863d9a5f40d6
|
/readlikeme/wsgi.py
|
fa0c6764c3e6eff3c7b8c7ce9e0fa1b1c6f702b8
|
[] |
no_license
|
martolini/readlikeme
|
c5bdd31f6233203454ee46c600e6add7936da39e
|
69e2509a5b747553fe723d890429444fbda24f14
|
refs/heads/master
| 2021-03-19T07:12:26.034555
| 2014-03-18T20:54:06
| 2014-03-18T20:54:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for readlikeme project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readlikeme.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"msroed@gmail.com"
] |
msroed@gmail.com
|
4163cd7fd42ae455103ea2f04a36f4bd743d26a8
|
f895e1a0ca2499cbe733bde0584d5244ddb2e438
|
/system.py
|
343ad1411ed8ce3cbaaae7ded5a3549cf7793bcc
|
[] |
no_license
|
rcackerman/security-camera
|
7d4c1f597bd6f50e1b6da69b58e1ab5978e39858
|
3411d985400f3cb2f8d5e23162e883285b1efd4f
|
refs/heads/master
| 2021-01-13T13:59:24.634613
| 2015-09-05T14:57:20
| 2015-09-05T14:57:20
| 40,516,281
| 0
| 1
| null | 2015-09-05T14:57:15
| 2015-08-11T02:12:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
'''
TODO:
* Make sure NTP is running so we have an accurate time
* Automatically deal with time zone! (DST etc)
'''
S3_KEY = process.env.S3_KEY
S3_SECRET = process.env.S3_SECRET
MIN_DIFFERENT_FRAMES = 4 # minimum number of different frames to trigger alert
active = True # Is the security camera on?
streak = 0 # Is there motion?
# List of photo files
# maybe this should be a FIFO queue of length TRIGGER_LENGTH?
pastPhotos = []
# save the S3 urls
recording = []
'''
Toggle the system on or off
Maybe with Twilio?
'''
onOff(state):
active = state
if active:
runApp()
'''
Save an image file to S3.
'''
saveToS3(file):
# save the file to S3 with a timestamp
# Do we need to get the metadata from the image?
# Maybe put it in a folder so we don't hit S3's file count limits?
# then save the URL for sending out later
recording.push(s3url)
'''
Send us a message after the recording is over.
'''
sendAlert():
msg = 'Motion detected!\n'
for photo in recording:
msg += recording + '\n'
send msg # twilio? email? both?
'''
Monitor the camera
'''
# How many FPS does this give us?
# Should we instead run this once per second and just do nothing if we are inactive?
# https://docs.python.org/2/library/threading.html#timer-objects
runApp():
while Active:
# take a photo
pastPhotos.push(photo)
# if there is a previous photo
# compare it to the last photo
# is there significant change?
# if yes
streak++
# if streak is >= than MIN_DIFFERENT_FRAMES
Save everything to S3
Do we need to have a filename w/timestamp by now?
# Once everything is saved:
pastPhotos.empty()??? # This won't work..... need to save at least one photo for comparison.
# else
# queue up the different photo in case we want it
# pastPhotos.push(photo)
# if no significant change
# if we had a streak, and it's over, we need to send off an alert
sendAlert()
streak = 0 # reset the streak
|
[
"mahatm@gmail.com"
] |
mahatm@gmail.com
|
f648966895199062456e5e080b63bc975caf9b28
|
648a6e11bea6a15bd677ecf152592edd71203734
|
/Lab6/task-6.1.4.py
|
b95f37d2fd863d0bf10179c439c17e8fea91ce6c
|
[] |
no_license
|
paulkirwan6/AllLabs
|
70975233c2a545e0acf116f9326bc1153fd4d497
|
5c660eb352e61047519e87deaa5a3f0a0155785e
|
refs/heads/master
| 2020-09-20T12:13:17.593871
| 2019-11-27T18:59:30
| 2019-11-27T18:59:30
| 224,473,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer as S
import datetime ##for current dynamic current time
class S(BaseHTTPRequestHandler):
def set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self.set_headers()
self.wfile.write(datetime.datetime.now())
def run(server_class=HTTPServer, handler_class=S, port=8000):
server_address = ('',port)
print 'Starting server'
httpd = server_class(server_address, handler_class)
print 'Server up'
httpd.serve_forever()
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
|
[
"paulkirwan6@gmail.com"
] |
paulkirwan6@gmail.com
|
347dbf0216b68f16364f68c9c54ed2359479483c
|
9793d734de4283cec67ca1c398a7e71fddbdb19d
|
/test/ccp/test_ccp_inputs.py
|
8344778b53a40f78841ad29b887700f026a69d8b
|
[] |
no_license
|
K-Molloy/whitebeam
|
40adcb9e94c59db66da24b65b0d5b7ac01a4f740
|
ef6bf6cbdff1f7713aead37a5f609fe2cfae8f28
|
refs/heads/main
| 2023-02-17T12:39:33.545920
| 2021-01-18T07:18:15
| 2021-01-18T07:18:15
| 327,152,651
| 0
| 0
| null | 2021-01-11T06:23:17
| 2021-01-06T00:10:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,187
|
py
|
from whitebeam.base import CCPTreeClassifier
from sklearn.model_selection import train_test_split
import numpy as np
import pytest
from test.utils import generate_binary_classification
# ---------------
# Expected Error
# ---------------
@pytest.mark.parametrize("param_alpha", [2+3j, "3", []])
def test_alpha_alpha_data_types_fail(param_alpha):
X_train, X_test, y_train, y_test = generate_binary_classification()
# should fail DURING THE <model.fit> CALL
with pytest.raises(Exception) as e_info:
model = CCPTreeClassifier(alpha=param_alpha)
model.fit(X_train, y_train)
@pytest.mark.parametrize("param_depth", [2+3j, "3", []])
def test_alpha_depth_data_types_fail(param_depth):
X_train, X_test, y_train, y_test = generate_binary_classification()
# should fail DURING THE <model.fit> CALL
with pytest.raises(Exception) as e_info:
model = CCPTreeClassifier(max_depth=param_depth)
model.fit(X_train, y_train)
# for some reason, a complex number does not call an error here :/
# also an empty list doesnt
# Perhaps the split condition gets ignored in some way
@pytest.mark.parametrize("param_split", ["3"])
def test_alpha_split_data_types_fail(param_split):
X_train, X_test, y_train, y_test = generate_binary_classification()
# should fail DURING THE <model.fit> CALL
with pytest.raises(Exception) as e_info:
model = CCPTreeClassifier(min_samples_split=param_split)
model.fit(X_train, y_train)
@pytest.mark.parametrize("param_leaf", [2+3j, "3", []])
def test_alpha_leaf_data_types_fail(param_leaf):
X_train, X_test, y_train, y_test = generate_binary_classification()
# should fail DURING THE <model.fit> CALL
with pytest.raises(Exception) as e_info:
model = CCPTreeClassifier(min_samples_leaf= param_leaf)
model.fit(X_train, y_train)
# ---------------
# Expected Pass
# ---------------
@pytest.mark.parametrize("param_alpha", [True, 3.2])
def test_alpha_alpha_data_types_pass(param_alpha):
X_train, X_test, y_train, y_test = generate_binary_classification()
model = CCPTreeClassifier(alpha=param_alpha)
model.fit(X_train, y_train)
@pytest.mark.parametrize("param_depth", [True, 3.2])
def test_alpha_depth_data_types_pass(param_depth):
X_train, X_test, y_train, y_test = generate_binary_classification()
model = CCPTreeClassifier(max_depth=param_depth)
model.fit(X_train, y_train)
# for some reason, a complex number does not call an error here :/
# Additionally, passing a list as split brings a deprecation warning from arrays
@pytest.mark.parametrize("param_split", [True, 3.2, 3+2j, []])
def test_alpha_split_data_types_pass(param_split):
X_train, X_test, y_train, y_test = generate_binary_classification()
model = CCPTreeClassifier(min_samples_split=param_split)
model.fit(X_train, y_train)
@pytest.mark.parametrize("param_leaf", [True, 3.2])
def test_alpha_leaf_data_types_pass(param_leaf):
X_train, X_test, y_train, y_test = generate_binary_classification()
# should fail DURING THE <model.fit> CALL
model = CCPTreeClassifier(min_samples_leaf= param_leaf)
model.fit(X_train, y_train)
|
[
"kieran.b.molloy@outlook.com"
] |
kieran.b.molloy@outlook.com
|
8713742e4a25b2f594904e6f2e0e9a8afae4c048
|
b7c6a4ceb4f6f980f8dbc3c273402d3a70001e38
|
/materialize_threats/mx/models/Text.py
|
6dbdf9fbefa3283ba543ddb6afdc3d5e690985aa
|
[
"Apache-2.0"
] |
permissive
|
ikszero/materialize_threats
|
e1924e7fe93c7eea382f4f47f2f97752eb5f09ff
|
fe811dc4c719baa1258a55bb43eb8a465e050da1
|
refs/heads/master
| 2022-11-27T10:16:56.843204
| 2020-08-03T05:11:19
| 2020-08-03T05:11:19
| 285,853,255
| 0
| 1
|
Apache-2.0
| 2020-08-07T14:49:07
| 2020-08-07T14:49:06
| null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
from ..utils import MxConst
from .Styles import Styles
class Text:
def __init__(self, anchor, family, size, text, color):
self.anchor = anchor
self.family = family
self.size = size
self.text = text
self.color = color
def get_mx_style(self):
align = MxConst.CENTER if self.anchor == MxConst.MIDDLE else MxConst.START
# TODO: add right
margin = (
"margin-top:4px;" if self.anchor == MxConst.MIDDLE else "margin-left:4px;"
)
rescaled_size = 10.0 * (self.size / 14.0)
return Styles.TEXT.format(
align=align,
margin=margin,
size=rescaled_size,
family=self.family or MxConst.DEFAULT_FONT_FAMILY,
color=self.color or MxConst.DEFAULT_FONT_COLOR,
)
@staticmethod
def from_xml(text):
if text != None:
text = text.replace("<", "<").replace(">", ">")
else:
text = ""
return Text(
text = text,
anchor=None,
family=MxConst.DEFAULT_FONT_FAMILY,
size=MxConst.DEFAULT_TEXT_SIZE,
color=None,
)
|
[
"33043272+jacob-salassi@users.noreply.github.com"
] |
33043272+jacob-salassi@users.noreply.github.com
|
f717594b82fedbfdf3ca400ff7a19b81fcb4c4ef
|
1f2262bf0f03c010fdfa595f7d6d1e0dd215f817
|
/myapp/migrations/0054_remove_checkoutcart_country.py
|
235bbe69057d2a633aa447ebf0fb518dd6c17b60
|
[] |
no_license
|
nabin-01/Ecommerce_Django
|
ea27d13491e2dbeb0a2c2e52d6372f3d0b07905b
|
104a3a33672e30234fe776e0504bb6487aa78a9a
|
refs/heads/main
| 2023-04-15T18:30:26.408146
| 2021-05-02T20:08:56
| 2021-05-02T20:08:56
| 358,646,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
# Generated by Django 3.1.7 on 2021-04-24 16:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0053_checkoutcart_country'),
]
operations = [
migrations.RemoveField(
model_name='checkoutcart',
name='country',
),
]
|
[
"nabinghimire1001@gmail.com"
] |
nabinghimire1001@gmail.com
|
553206a6a06c245cc0ea6f8521414d4baf2819d8
|
e3b3ae9aec7e1a2eacdf0176c0b1a6bbf8834f75
|
/imperialtometric.py
|
15d1e003f6841b45135e5558d4a40136851e7902
|
[] |
no_license
|
carolinascarneiro/pp-lesson-projects
|
9f5954790372bbb7f5cb312c376c1d5366dd5cb2
|
7e89d01a461e6edc0c2f5d53bb42caff4fd80edd
|
refs/heads/main
| 2023-06-30T04:11:43.876987
| 2021-08-01T16:11:43
| 2021-08-01T16:11:43
| 389,911,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
#Convert from feet+inches to cm
feet = 5
inches = 3
cm = 30*feet + 3*inches
print(cm)
|
[
"carneiro.carolina@yahoo.com.br"
] |
carneiro.carolina@yahoo.com.br
|
5ebb4852fd107ae15be7b6ad2880aa9fcda1c20e
|
24815c399098b90a4d825f60a78148c8f895848d
|
/test_tdd_demo1a.py
|
ce4bad4abab1ff3d6a4f6a0e9c1f9aca57fe4532
|
[] |
no_license
|
kevinelong/tdd_demo
|
710c250a2f65a5df5e765d74ab89244b2d93e8d9
|
e9a70f0ba34d0c1ec0633697eed54b7ed9d1b6c7
|
refs/heads/master
| 2022-11-13T12:05:35.807818
| 2020-07-02T18:53:33
| 2020-07-02T18:53:33
| 270,855,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
"""
MARKETING REQUEST FOR DEVELOPMENT:
We want a function called "backwards",
that returns its input reversed.
e.g. "ABC" becomes "CBA"
"""
"""
ENGINEERING FUNCTIONAL REQUIREMENTS:
1. The identifier "backwards" must point to a function.
2. The function must accept a string as input.
3. The output value from the function must be the input reversed.
"""
"""
THE TDD PROCESS (Repeat for each functional requirement):
- Red = Make a failing test; see it FAIL.
- Green = Do the minimum required to make it pass; see it PASS.
- Refactor = Make it work well; see it still passes.
"""
# def backwards(): pass
# def backwards(text): return True
# def backwards(text): return text[::-1]
def backwards(text):
letters = list(text)
letters.reverse()
return "".join(letters)
suite: dict = {
"1. Is Function": lambda: callable(backwards),
"2. Accepts String": lambda: backwards("ABC"),
"3. Is Reversed": lambda: "ZYX" == backwards("XYZ"),
}
# SIMPLE TEST FRAMEWORK
red, green = (u"\u001b[31mFAIL-RED: ", u"\u001b[32mPASS-GREEN: ")
for name, test in suite.items():
try:
print(f"{green if test() else red} : {name}")
except Exception as message:
print(f"{red}{name} MESSAGE: {message}")
''
|
[
"kevinelong@gmail.com"
] |
kevinelong@gmail.com
|
d92a5039ae80d5127a0597cbd9b592de97baa478
|
d6185183eea8a99f486f346c8b03b3dc3b71bd54
|
/api/serializers.py
|
777656971ce300f5d824d6f07adf9e55abdf0a1d
|
[] |
no_license
|
dohcgle/squad
|
98acbd5461cb48b75a373a135bdd3c0eb8a952f3
|
e24606276dcc84550f0e5d0d569cb9d1f111b9c7
|
refs/heads/main
| 2023-07-05T17:27:37.827912
| 2021-08-24T10:07:02
| 2021-08-24T10:07:02
| 398,609,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
from rest_framework import serializers
from .models import Members, Squad, Power
class PowerSerializer(serializers.ModelSerializer):
class Meta:
model = Power
exclude = ('id', )
class MembersSerializer(serializers.ModelSerializer):
powers = PowerSerializer(many=True)
class Meta:
model = Members
exclude = ('id', )
class SquadSerializer(serializers.ModelSerializer):
members = MembersSerializer()
class Meta:
model = Squad
fields = '__all__'
|
[
"utn1002@gmail.com"
] |
utn1002@gmail.com
|
52daf9e37da6d60f8e9103ebb8997114695211b5
|
e2d03079c2483ff91f473a4580f1ce4661c14f80
|
/pelican/plugins/pelican-semantic-ui/pelican-semantic-ui.py
|
b49b62cb27ee511b2cda82f7cc2f06dcf782b1ba
|
[
"MIT"
] |
permissive
|
hielsnoppe/pelican-semantic-ui
|
d19599a33086f7a0713827fdfb8167560765d2e2
|
23976c3cc1a065bc94cf4d098dc0aafae68988bf
|
refs/heads/main
| 2023-01-01T01:33:07.252830
| 2020-10-26T22:10:51
| 2020-10-26T22:10:51
| 307,516,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
from pelican import signals
from jinja2 import ChoiceLoader, FileSystemLoader
def add_loader(pelican):
'''
https://stackoverflow.com/questions/30109097/trying-to-override-jinja-choicereloader-in-a-pelican-plugin
'''
pelican.env.loader = ChoiceLoader([
pelican.env.loader,
#PackageLoader('Semantic-UI-Jinja2', 'templates'),
FileSystemLoader('/home/niels/Projects/public/Semantic-UI-Jinja2/semantic_ui_jinja2/templates'),
])
def register():
signals.generator_init.connect(add_loader)
|
[
"mail@nielshoppe.de"
] |
mail@nielshoppe.de
|
a1ea9882410206d97dde501efb90db8f3844d88c
|
61a4f0ef43e31f77d17d41a0f0250cf4d10cc45b
|
/directory/migrations/0002_auto_20180215_2103.py
|
52a16e771442cef9e59c29a1a3088db9a0baf6b3
|
[
"MIT"
] |
permissive
|
for-each-org/taf-members
|
f09dcf9044db7abd070ecda2c8c55fedf06eba54
|
a9e9663acc8ae5a321965a6340c639ede0c09754
|
refs/heads/master
| 2021-05-09T00:31:39.855553
| 2018-03-03T19:06:17
| 2018-03-03T19:06:17
| 119,747,094
| 0
| 0
|
MIT
| 2018-03-03T19:06:18
| 2018-01-31T21:41:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
# Generated by Django 2.0.2 on 2018-02-15 21:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('directory', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SocialField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=256)),
('url', models.URLField()),
],
),
migrations.DeleteModel(
name='User',
),
migrations.RemoveField(
model_name='profile',
name='social',
),
migrations.AddField(
model_name='profile',
name='owner',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='profile',
name='profile_img_url',
field=models.URLField(max_length=256),
),
migrations.AlterField(
model_name='profile',
name='website',
field=models.URLField(max_length=256),
),
migrations.AddField(
model_name='socialfield',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='directory.Profile'),
),
]
|
[
"drewatkinson5@gmail.com"
] |
drewatkinson5@gmail.com
|
adcc26d378e12b39c36bbc0edf78f14655faeafa
|
7b87f276dcc09f372a6f8cd74e5a8b91f1025bde
|
/work/app.py
|
ddcc6311f528a2c07c6a9586a9ebe9cda1894162
|
[] |
no_license
|
hyeljang/code_test
|
45d5cad83da330c4973e613790168a3b790151aa
|
55e9da4a4a884b6a11b879513db538f56e420de5
|
refs/heads/master
| 2022-09-24T22:50:14.756728
| 2020-06-09T08:06:19
| 2020-06-09T08:06:19
| 265,852,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)
from flask import Flask, render_template, jsonify, request
app = Flask(__name__)
client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbsparta # 'dbsparta'라는 이름의 db를 만듭니다.
# HTML을 주는 부분
@app.route('/')
def home():
return render_template('index.html')
# API 역할을 하는 부분
@app.route('/shoppings', methods=['POST'])
def write_order():
name_receive = request.form['name_give']
qty_receive = request.form['qty_give']
address_receive = request.form['address_give']
phone_receive = request.form['phone_give']
orders1 = {'name': name_receive, 'qty': qty_receive,
'address': address_receive, 'phone': phone_receive}
db.orders.insert_one(orders1)
return jsonify({'result': 'success', 'msg': '주문이 정상적으로 접수되었습니다.'})
@app.route('/shoppings', methods=['GET'])
def read_orders():
orders_list = list(db.orders.find({}, {'_id': 0}))
print(orders_list)
return jsonify({'result': 'success', 'order_list': orders_list})
if __name__ == '__main__':
app.run('0.0.0.0', port=5000, debug=True)
|
[
"hyeljang@naver.com"
] |
hyeljang@naver.com
|
2dd6a7d5978b6b8bab205615d8932e5a579079c7
|
bd22572b4fe1bdf7d38e4a37ccb6da52cf5dbb08
|
/bot-engine/EngineStates/InternalVariableFlow.py
|
ef0c1110e0e3c0d003b4e02ba3526caeef1d926a
|
[] |
no_license
|
dannysheyn/bonter
|
a8d4a5dba5be5e43374bcd702d81793c6cc07faa
|
2d2ebc13e4fdcd1b1a3ef6b0cd5cbf0b9d53a4db
|
refs/heads/master
| 2023-08-25T22:59:17.176394
| 2021-11-08T17:58:09
| 2021-11-08T17:58:09
| 333,482,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,566
|
py
|
from botEngine import BotEngine
from callableBox import *
import userMessages
from EngineStates import *
from api import API
from userGeneratedBot import BOX_TYPE_INTERNAL_VARIABLE
from CurrentUser import CurrentUser
class InternalVariableFlow:
@staticmethod
def internal_variable_assignment_start_flow(update: Update, context: CallbackContext):
'''
goal: embedded a value into a variable
'''
query = update.callback_query
query.answer()
text = 'Please enter the name of the variable you want to embed with a value'
query.message.reply_text(text=text)
return GET_INTERNAL_VARIABLE
@staticmethod
def internal_variable_assignment_name(update: Update, context: CallbackContext):
variable_name = update.message.text
user_generated_bot = CurrentUser.get_current_user(update)
if variable_name in user_generated_bot.internal_client_variables or variable_name in user_generated_bot.user_variables:
update.message.reply_text(text='The variable name you gave is already in use, please try again.')
return BotEngine.edit_bot(update, context)
else:
user_generated_bot.user_variables['internal_variable'] = variable_name
update.message.reply_text(text='Please insert if you want to insert the value:\n'
'1) Now\n'
'2) API during run time.')
return INTERNAL_VARIABLE_SOURCE
@staticmethod
def internal_variable_assignment_source(update: Update, context: CallbackContext):
user_choice = update.message.text
if user_choice == "1":
update.message.reply_text(text='Please enter the value you want to insert to the variable')
return INTERNAL_VARIABLE_INSTANT_VALUE
elif user_choice == "2":
update.message.reply_text(text='Please enter the api uri you want to use.\n'
'Note: you can use the ${variable_name} in the uri given, BUT you will need to add'
' a default value in the following matter: ${variable_name:default_value} examples:\n'
' ${city:new york}, ${name:Alice}')
return INTERNAL_VARIABLE_API_VALUE
else:
update.message.reply_text(text='Not 1 or 2 was given as an answer.\n Please try again')
return BotEngine.edit_bot(update, context)
@staticmethod
def internal_variable_instant_value(update: Update, context: CallbackContext):
query = update.callback_query
query.answer()
user_variable_value = update.message.text
user_generated_bot = CurrentUser.get_current_user(update)
internal_variable_name = user_generated_bot.user_variables['internal_variable']
user_generated_bot.internal_client_variables[internal_variable_name] = user_variable_value
update.message.reply_text(
text=f'Value added to variable ${{{internal_variable_name}}} with the value: {internal_variable_name}.\n'
f'Now you can use the ${{{internal_variable_name}}} everywhere in your bot '
f'making.')
return BotEngine.edit_bot(update, context)
@staticmethod
def internal_variable_api_value_start(update: Update, context: CallbackContext):
url = update.message.text
user_generated_bot = CurrentUser.get_current_user(update)
user_generated_bot.apis.append(API(uri=url))
update.message.reply_text(text=userMessages.HEADER_MESSAGE)
return INTERNAL_VARIABLE_API_HEADERS
@staticmethod
def internal_variable_api_headers(update: Update, context: CallbackContext):
authorization = update.message.text
user_generated_bot = CurrentUser.get_current_user(update)
api_obj = user_generated_bot.apis[-1]
if authorization.find('=') == -1:
text = 'No headers where given continuing to next stage...\n'
update.message.reply_text(text=text)
else:
BotEngine.header_extractor(authorization, api_obj)
text = 'Now we will show you the return value from the api call,' \
' and you will choose with value you want from the call'
update.message.reply_text(text=text)
query_variables = re.findall(r'\$\{.+?\}', api_obj.uri)
if query_variables: # if in the url there are variables in the form of ${var}
api_obj.uri_to_check = BotEngine.sub_variables_with_defualt_values(query_variables, api_obj.uri)
response = user_generated_bot.apis[-1].get_api_response()
text = "We just made a request with the parameters that you provided " \
"and got the following response\n\n"
update.message.reply_text(text=text)
if response.status_code == 200:
pretty_response = json.loads(response.text)
pretty_response = json.dumps(pretty_response, indent=2)
user_generated_bot.apis[-1].response = pretty_response
BotEngine.send_long_message(update=update, pretty_response=pretty_response)
text_get_key = f'What key in the data would you like it to be inserted to the variable {user_generated_bot.user_variables["internal_variable"]}.' \
f'please enter the Json expressions which evaluate to your key for example:\n' \
f'[origin] or [destination][flight_num] and etc...\n ' \
'If the key are inside an array you need to index it, for example: [0][origin] or ' \
'[destination][1]\n ' \
'You can use nesting for dictionaries as well, for example: [data][name] or [0][data][' \
'time]\n '
update.message.reply_text(text=text_get_key)
return INTERNAL_VARIABLE_API_MAPPING
else:
text = 'Status code was not 200, please try to request the same values in postman and try again later'
update.message.reply_text(text=text)
return BotEngine.edit_bot(update, context)
@staticmethod
def internal_variable_api_mapping(update: Update, context: CallbackContext):
expression = update.message.text # I.E: [0][data][time]
user_generated_bot = CurrentUser.get_current_user(update)
user_generated_bot.apis[-1].expressions = [expression]
internal_variable = user_generated_bot.user_variables["internal_variable"]
try:
user_generated_bot.apis[-1].validate_keys(save_response=False, use_uri_to_check=True)
item = tuple(user_generated_bot.apis[-1].key_expression.items())
key, value = item[0][0], item[0][1]
user_generated_bot.apis[-1].key_expression[internal_variable] = value
del user_generated_bot.apis[-1].key_expression[key]
except Exception as e:
error_message = "We couldn't validate one of your expressions\n" \
f"Original error is: \n {e}\n" \
f"Please try to write those expressions again according to the rules"
update.message.reply_text(text=error_message)
return INTERNAL_VARIABLE_API_MAPPING
key_expression_mapping = user_generated_bot.apis[-1].key_expression_map()
ref_keys_text = "Your keys have been validated and we have saved them\n" \
"Here is the key-expression mapping (In this format {Key} = {Expression}):\n" \
f"{key_expression_mapping}"
update.message.reply_text(text=ref_keys_text)
box_number = user_generated_bot.add_box(box_msg=f'Internal variable api: {key},'
f' Note: this box will be invisible to the user',
box_type=BOX_TYPE_INTERNAL_VARIABLE,
api_obj=user_generated_bot.apis[-1])
# add to graph
# support add edge to and from this box
update.message.reply_text(
text=f'You have successfully created an internal variable assignment, with the number '
f'of {box_number} '
f'Please remember that the box cannot print and every edge from it is in the '
f'following syntax:\n '
f'{box_number}.0->destination')
return BotEngine.edit_bot(update, context)
|
[
"59560231+ArtemKuznetsovv@users.noreply.github.com"
] |
59560231+ArtemKuznetsovv@users.noreply.github.com
|
dc83acb459fcffe1afe7db43b56887dcd5a13f98
|
c7c3daeaa29fc8c955ea6c143d73f7fe34f0ca6b
|
/Pn/Newtoy10000batch/calc_err.py
|
f98b16236cd5dae79b053205f5efd7b832062d26
|
[] |
no_license
|
hanzhuoran/FETtest
|
9e678f9c6c6ccf6b0fb3fc8063a32b5439299883
|
d0104ca3d5dc06152ad583089a837f3425fb5c00
|
refs/heads/master
| 2020-03-22T10:27:11.010594
| 2018-07-20T18:27:54
| 2018-07-20T18:27:54
| 139,903,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
import glob
import matplotlib.pyplot as plt
import openmc
import os
import numpy as np
import openmc.capi as capi
import scipy.misc as scm
import scipy.special as scp
import generate_xml as genxml
import helperfunction as hf
import dataprocess as dp
import generateplot as gp
import sys
plnnum = 100
porder = 100
pln,plnmid = hf.get_planes(plnnum)
N = [10000]
for n in N:
dp.process_data(plnnum,porder,pln,n)
|
[
"zhuoranh@mit.edu"
] |
zhuoranh@mit.edu
|
bf0c684d00618e404e4f0cf0988ac822ca07783b
|
55fad148029f51073d15efe0f7053cf9c890c797
|
/parser/tasks.py
|
ee9b78983e428c9c38941aef6da0435fc79a928e
|
[] |
no_license
|
MishaAntonkin/domovichok
|
f0dba4570258a896a3254e99d67e7ad3241ccdf7
|
51afbd9c031fde97553c17b70dace02a57fcf029
|
refs/heads/master
| 2022-12-18T03:29:05.621168
| 2018-06-02T19:05:09
| 2018-06-02T19:05:09
| 126,003,055
| 0
| 0
| null | 2022-12-08T00:56:17
| 2018-03-20T10:51:19
|
Python
|
UTF-8
|
Python
| false
| false
| 184
|
py
|
from datetime import datetime
from celer import celery_app
@celery_app.task
def now():
with open('asome', 'a+') as f:
f.write('string\n')
print(str(datetime.now()))
|
[
"mishaAntonkin@gmail.com"
] |
mishaAntonkin@gmail.com
|
dd88f4ab6f1e21d5f809655c618724a9cce5a20d
|
537d28fb2142331e27c84ebf2c16bad77aceb24e
|
/keras/lstmTest/p054_UnivariateMLP.py
|
8d753d5fc810757620cacc1001d07366250eefb2
|
[] |
no_license
|
gema0000/bit2019
|
c27c3cec8d8d3a0907ade41523ce1c5ee86337b6
|
2f44ad3956b387186935374d9a488ad40a13bcaf
|
refs/heads/master
| 2020-07-03T05:19:41.051447
| 2019-10-26T23:56:25
| 2019-10-26T23:56:25
| 201,796,021
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,657
|
py
|
# split a univariate sequence into samples
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence)-1:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# univariate data preparation
from numpy import array
from keras.models import Sequential
from keras.layers import Dense
# split a univariate sequence into samples
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence)-1:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# define input sequence
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90]
# choose a number of time steps
n_steps = 3
# split into samples
X, y = split_sequence(raw_seq, n_steps)
# summarize the data
for i in range(len(X)):
print(X[i], y[i])
# define model
model = Sequential()
model.add(Dense(100, activation='relu', input_dim=n_steps))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X, y, epochs=2000, verbose=0)
# demonstrate prediction
x_input = array([70, 80, 90])
x_input = x_input.reshape((1, n_steps))
yhat = model.predict(x_input, verbose=0)
|
[
"gema0000@naver.com"
] |
gema0000@naver.com
|
0b9e1610455523e45cd4142a03cf3aa9e462915c
|
0f1ffdf3ce215b170e201e8d3d7a103fdb3ec1cf
|
/examples/tenant_tutorial/tenant_tutorial/urls_tenants.py
|
302ac2a76c019007d753a6a177ead0285ac18d5c
|
[
"MIT"
] |
permissive
|
simo97/django-tenants
|
4afa6062e632ad29ec8bbbd2bcf49244fb693ffe
|
386152dfcfdfb0084d471495fd8fcd95762a451f
|
refs/heads/master
| 2020-06-30T15:08:14.788197
| 2019-11-08T12:31:25
| 2019-11-08T12:31:25
| 200,866,875
| 1
| 0
|
MIT
| 2019-11-08T12:31:26
| 2019-08-06T14:22:51
|
Python
|
UTF-8
|
Python
| false
| false
| 425
|
py
|
from customers.views import TenantView, TenantViewRandomForm, TenantViewFileUploadCreate
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('', TenantView.as_view(), name="index"),
path('sample-random/', TenantViewRandomForm.as_view(), name="random_form"),
path('upload-file/', TenantViewFileUploadCreate.as_view(), name="upload_file"),
path('admin/', admin.site.urls),
]
|
[
"tom@twt.me.uk"
] |
tom@twt.me.uk
|
7c6c64e07b7e7b162308256268e3047bf5cb9801
|
c6d4db3208e915ffcca08325b167b6901f4cf3c9
|
/ABC144/c.py
|
580ba5e3d35f14eafeacfc6562ddd91e42d20af2
|
[] |
no_license
|
NagaTaku/atcoder_abc_edition
|
183db46774a642ec4ab2b38cace7d73312ca5fe0
|
d018fcea03681bb78b159c0934dfe02794fe5df6
|
refs/heads/develop
| 2023-03-28T16:13:59.610494
| 2021-04-05T08:41:54
| 2021-04-05T08:41:54
| 259,724,404
| 1
| 0
| null | 2020-08-21T08:05:13
| 2020-04-28T18:52:53
|
Python
|
UTF-8
|
Python
| false
| false
| 461
|
py
|
import math
def make_divisors(n):
lower_divisors , upper_divisors = [], []
i = 1
while i*i <= n:
if n % i == 0:
lower_divisors.append(i)
if i != n // i:
upper_divisors.append(n//i)
i += 1
return lower_divisors + upper_divisors[::-1]
n = int(input())
yaku = make_divisors(n)
Min = float('inf')
for i in range(math.ceil(len(yaku)/2)):
Min = min(Min, yaku[i]+yaku[-(i+1)])
print(Min-2)
|
[
"greeeen0315@gmail.com"
] |
greeeen0315@gmail.com
|
7cf56b169537088e8957df7107db39c5313491c4
|
cda3810d054a9769a2e0d95613fac970bf0f08a4
|
/source_codes/modeling_trial.py
|
26e65411623632cb779ec48a288b67b8429cf553
|
[] |
no_license
|
iocak28/Fantasy_Basketball_ML
|
bb97d41d910a4216496aae8eaadf163fce9a7f69
|
ef8156292e5a50316039fb3dbf6c78cb0bfaa73e
|
refs/heads/master
| 2022-04-11T10:58:29.288289
| 2020-04-02T06:52:09
| 2020-04-02T06:52:09
| 241,824,074
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,533
|
py
|
# Modeling Code
import pandas as pd
import numpy as np
import time
# from basketball_reference_web_scraper import client
import os
import gc
import datetime
import sklearn
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.model_selection import RandomizedSearchCV
from sklearn.tree import DecisionTreeRegressor
from itertools import product
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import TimeSeriesSplit
from sklearn.ensemble import RandomForestRegressor
import xgboost
from matplotlib import pyplot
#from keras.callbacks import ModelCheckpoint
#from keras.models import Sequential
#from keras.layers import Dense, Activation, Flatten, Dropout
##from talos.model import layers
#from keras.regularizers import l1
# gc collect
gc.collect()
# Configuration
feature = 'C:/Users/iocak/OneDrive/Masaüstü/WI20/ECE 271B/Project/model_data/'
# Read Data
dataset = pd.read_parquet(feature + 'all_data.parquet')
# Train Test Split
season_end_year_list = [2015, 2017]
train = dataset[(dataset['season_end_year'] >= season_end_year_list[0]) &
(dataset['season_end_year'] < season_end_year_list[1])].drop(
columns = ['date', 'opponent', 'team', 'name', 'slug', 'season_end_year', 'fantasy_point'])
train_labels = dataset[(dataset['season_end_year'] >= season_end_year_list[0]) &
(dataset['season_end_year'] < season_end_year_list[1])][['fantasy_point']]
test = dataset[(dataset['season_end_year'] == season_end_year_list[1])].drop(
columns = ['date', 'opponent', 'team', 'name', 'slug', 'season_end_year', 'fantasy_point'])
test_labels = dataset[(dataset['season_end_year'] == season_end_year_list[1])][['fantasy_point']]
# Scale data
scaler = StandardScaler()
# Fit on training set only.
scaler.fit(train)
# Apply transform to both the training set and the test set.
train_n = scaler.transform(train)
test_n = scaler.transform(test)
# PCA
## Make an instance of the Model
pca = PCA(.90)
pca.fit(train_n)
train_n = pca.transform(train_n)
test_n = pca.transform(test_n)
# Plot % Variance explained
variance = pca.explained_variance_ratio_ #calculate variance ratios
var=np.cumsum(np.round(pca.explained_variance_ratio_, decimals=3)*100)
var #cumulative sum of variance explained with [n] features
pyplot.ylabel('% Variance Explained')
pyplot.xlabel('# of Features')
pyplot.title('PCA Analysis')
pyplot.ylim(30,100.5)
pyplot.style.context('seaborn-whitegrid')
pyplot.plot(var)
# Try a basic model
## Linear Regression
### Create linear regression object
regr = linear_model.LinearRegression()
### Train the model using the training sets
regr.fit(train_n, train_labels)
### Make predictions using the testing set
y_pred = regr.predict(test_n)
### The coefficients
print('Coefficients: \n', regr.coef_)
### General Error & Bias
lr_err = np.mean(np.abs(y_pred - test_labels)) / np.mean(test_labels)
lr_bias = np.mean((y_pred - test_labels)) / np.mean(test_labels)
## Lasso
### Create lasso object
regr = linear_model.Lasso(alpha = 0.1)
### Train the model using the training sets
regr.fit(train_n, train_labels)
### Make predictions using the testing set
y_pred = regr.predict(test_n)
### The coefficients
print('Coefficients: \n', regr.coef_)
### General Error & Bias
lr_err = np.mean(np.abs(y_pred[:, None] - test_labels)) / np.mean(test_labels)
lr_bias = np.mean((y_pred[:, None] - test_labels)) / np.mean(test_labels)
## Decision Tree
# Time Series CV
max_depth = [5, 10, 50]
min_samples_split = [2, 5]
min_samples_leaf = [1, 5]
max_features = ['auto']
parameters = {'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'max_features': max_features}
clf = GridSearchCV(DecisionTreeRegressor(),
parameters,
n_jobs=-1,
cv = TimeSeriesSplit(max_train_size=None, n_splits=3))
clf.fit(X = train_n, y = train_labels)
tree_model = clf.best_estimator_
### Train the model using the training sets
tree_model.fit(train_n, train_labels)
### Make predictions using the testing set
y_pred = tree_model.predict(test_n)
### General Error & Bias
np.mean(np.abs(y_pred[:, None] - test_labels)) / np.mean(test_labels)
np.mean((y_pred[:, None] - test_labels)) / np.mean(test_labels)
# ideas:
'''
- try rf, xgb, nn if they don't work, try them again by handpicking some features
- selective pca, or some other dim reduction
'''
## Random Forest
# Time Series CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 5)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(2, 100, num = 5)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
parameters = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
clf = GridSearchCV(RandomForestRegressor(),
parameters,
n_jobs=-1,
cv = TimeSeriesSplit(max_train_size=None, n_splits=3),
verbose = 2)
clf.fit(X = train_n, y = train_labels)
tree_model = clf.best_estimator_
### Train the model using the training sets
tree_model.fit(train_n, train_labels)
### Make predictions using the testing set
y_pred = tree_model.predict(test_n)
### General Error & Bias
np.mean(np.abs(y_pred[:, None] - test_labels)) / np.mean(test_labels)
np.mean((y_pred[:, None] - test_labels)) / np.mean(test_labels)
# xgboost
parameters = {'nthread':[-1], #when use hyperthread, xgboost may become slower
'objective':['reg:linear'],
'learning_rate': [.03, 0.05, .07], #so called `eta` value
'max_depth': [5, 6, 7],
'min_child_weight': [1, 4, 10],
'silent': [1],
'subsample': [0.7],
'colsample_bytree': [0.7],
'n_estimators': [50, 100]}
xgb1 = xgboost.XGBRegressor()
clf = GridSearchCV(xgb1,
parameters,
n_jobs=-1,
cv = TimeSeriesSplit(max_train_size=None, n_splits=3),
verbose = 2)
clf.fit(X = train, y = train_labels)
clf.best_estimator_
best_model = clf.best_estimator_
### Train the model using the training sets
best_model.fit(train, train_labels)
### Make predictions using the testing set
y_pred = best_model.predict(test)
### General Error & Bias
print(np.mean(np.abs(y_pred[:, None] - test_labels)) / np.mean(test_labels))
print(np.mean((y_pred[:, None] - test_labels)) / np.mean(test_labels))
# plot feature importance
xgboost.plot_importance(best_model)
pyplot.figure(figsize=(200,50))
pyplot.show()
feat_imp = pd.DataFrame({'col_name' : train.columns,
'feature_imp' : best_model.feature_importances_})
feat_imp = feat_imp.sort_values(by = 'feature_imp', ascending=False)
|
[
"iocak28@gmail.com"
] |
iocak28@gmail.com
|
bcb65764a20736e44cc15c3d9f9062023ebe1e13
|
7c89da72f81c4e99130cf49a8ca48119beabff39
|
/knn.py
|
110287582bbdd3df364d2f6947c0c964798d546d
|
[] |
no_license
|
ianyehwork/Machine-Learning-Movie-Success-Prediction
|
cdb2add2be7d361de2b5b7362647a39bf8e66bd5
|
7d2ecdaa7998333926450b6f08ab5abc7a93e1ab
|
refs/heads/master
| 2022-12-05T17:49:29.556571
| 2020-08-23T02:00:44
| 2020-08-23T02:00:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,148
|
py
|
"""
This program is used for finding the best knn of different roi label
@author: gwang25
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import accuracy_score
from sklearn import metrics
from sklearn.preprocessing import scale
#use iteration to caclulator different k in models, then return the average accuracy based on the cross validation
def cv_knn(dateframe, k_range, cv_times, model_evaluation, feature_number=10):
k_scores = []
feature_array = ['budget','directors_popularity_score', 'casts_popularity_score', 'runtime', 'keywords_popularity_score', 'release_year', 'casts_vote_score', 'release_month', 'directors_vote_score', 'keywords_vote_score']
if feature_number == 10:
knn_attrbiutes = dateframe.loc[:,['budget', 'release_year', 'release_month', 'genres_popularity_score', 'genres_vote_score', 'keywords_popularity_score','keywords_vote_score', 'casts_popularity_score', 'casts_vote_score', 'directors_popularity_score','directors_vote_score']]
else:
feature_choose = feature_array[0:feature_number]
knn_attrbiutes = dateframe.loc[:,feature_choose]
knn_attrbiutes.info()
print(feature_choose)
#.values will change to array, .ravel() convert that array shape to (n,) which is required
knn_label = dateframe.loc[:, ['return_on_investment_label']].values.ravel()
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X=knn_attrbiutes, y=knn_label, cv = cv_times, scoring = model_evaluation)
k_scores.append(scores.mean())
index_max_score = np.argmax(k_scores)
print("Choose "+str(k_range[index_max_score])+" as k, "+"the "+ model_evaluation + " is "+str(k_scores[index_max_score]))
# plot to see clearly
plt.plot(k_range, k_scores)
label = "Cross-Validated "+ model_evaluation
plt.xlabel("Value of K for KNN")
plt.ylabel(label)
path = 'graph/CV of knn '+ model_evaluation + '.png'
plt.savefig(path)
plt.show()
#Out put of knn_new are accuracy, balanced_accuracy, cohen_kappa
def knn_new(dateframe, k_range, k_fold, origin_attribute=False):
#dataframe
#'budget'
if origin_attribute:
knn_attrbiutes = dateframe.loc[:,['budget','runtime', 'release_year', 'release_month', 'popularity', 'vote_average']]
else:
knn_attrbiutes = dateframe.loc[:,['budget', 'runtime', 'casts_popularity_score', 'casts_vote_score', 'directors_popularity_score']]
#knn_attrbiutes = dateframe.loc[:, ['budget', 'runtime', 'release_year', 'release_month', 'genres_popularity_score', 'genres_vote_score', 'keywords_popularity_score', 'keywords_vote_score', 'casts_popularity_score', 'casts_vote_score', 'directors_popularity_score', 'directors_vote_score', 'genres_roi_score','keywords_roi_score','casts_roi_score','directors_roi_score']]
#narray
print(knn_attrbiutes.columns)
knn_label = dateframe.loc[:, ['return_on_investment_label']].values.ravel()
#the different of StratifiedShuffleSplit from stratifiedKold is the random index
shufflesplit = StratifiedKFold(n_splits = k_fold, shuffle=True, random_state=42)
mean_accuracy_list = []
mean_balanced_accuracy_list = []
mean_cohen_kappa_list = []
for k in k_range:
knn_classifier = KNeighborsClassifier(n_neighbors=k, weights="distance", algorithm="kd_tree")
accuracy = []
balanced_accuracy = []
cohen_kappa = []
for train_index, test_index in shufflesplit.split(knn_attrbiutes, knn_label):
scale_attributes_train = scale(knn_attrbiutes.loc[train_index])
scale_attributes_test = scale(knn_attrbiutes.loc[test_index])
predict_test = knn_classifier.fit(scale_attributes_train, knn_label[train_index]).predict(scale_attributes_test)
accuracy.append(accuracy_score(knn_label[test_index], predict_test))
balanced_accuracy.append(balanced_accuracy_score(knn_label[test_index], predict_test))
cohen_kappa.append(cohen_kappa_score(knn_label[test_index], predict_test, weights= "quadratic"))
mean_accuracy_list.append(np.mean(accuracy))
mean_balanced_accuracy_list.append(np.mean(balanced_accuracy))
mean_cohen_kappa_list.append(np.mean(cohen_kappa))
index_max_accuracy = np.argmax(mean_accuracy_list)
index_max_balanced_accuracy = np.argmax(mean_balanced_accuracy_list)
index_max_cohen_kappa = np.argmax(mean_cohen_kappa_list)
print("Choose " + str(k_range[index_max_accuracy]) + " as k, " + "the accuracy is " + str(
mean_accuracy_list[index_max_accuracy]))
print("Choose " + str(k_range[index_max_balanced_accuracy]) + " as k, " + "the balanced_accuracy is " + str(
mean_balanced_accuracy_list[index_max_balanced_accuracy]))
print("Choose " + str(k_range[index_max_cohen_kappa]) + " as k, " + "the cohen_kappa is " + str(
mean_cohen_kappa_list[index_max_cohen_kappa]))
plt.figure(0)
plt.plot(k_range, mean_balanced_accuracy_list)
label = "Cross-Validated " + "balanced accuracy"
plt.xlabel("Value of K for KNN")
plt.ylabel(label)
path = 'graph/CV of knn ' + "balanced accuracy" + '.png'
plt.savefig(path)
plt.figure(1)
plt.plot(k_range, mean_cohen_kappa_list)
label = "Cross-Validated " + "cohen kappa"
plt.xlabel("Value of K for KNN")
plt.ylabel(label)
path = 'graph/CV of knn ' + "cohen kappa" + '.png'
plt.savefig(path)
print(mean_cohen_kappa_list[index_max_balanced_accuracy])
#load dataset
#Using percentile to create label of return on investment
movies_processed = pd.read_csv('data/movies_meta_data_after_processing.csv')
knn_features = movies_processed.loc[:,['budget', 'runtime', 'release_year', 'release_month', 'popularity', 'vote_average']]
knn_feature = scale(knn_features)
movies_processed.isnull().sum()
movies_processed_four_percentile_label = pd.read_csv('data/movies_meta_data_after_processing_percentile_4_label.csv')
movies_processed_three_percentile_label = pd.read_csv('data/movies_meta_data_after_processing_percentile_3_label.csv')
#Using clustering to create label of return on investment
movies_processed_four_cluster_label = pd.read_csv('data/movies_meta_data_after_processing_with_4_cluster_label.csv')
movies_processed_three_cluster_label = pd.read_csv('data/movies_meta_data_after_processing_with_3_cluster_label.csv')
movies_processed_three_cluster_label['return_on_investment_label'].value_counts()
movies_processed_four_cluster_label['return_on_investment_label'].value_counts()
movies_processed_three_percentile_label.return_on_investment_label.value_counts()
#set the range of k
k_range = range(1,31)
k_fold = 10
#f1 maybe better for unbalanced data
#accuracy, precision_macro
#scoring = ['accuracy', 'precision'] multiple metric evaluation
# model_evaluation = "accuracy"
#all features
knn_new(movies_processed_four_percentile_label, k_range, k_fold)
knn_new(movies_processed_four_percentile_label, k_range, k_fold, origin_attribute = True)
knn_new(movies_processed_four_cluster_label, k_range, k_fold)
knn_new(movies_processed_four_cluster_label, k_range, k_fold, origin_attribute = True)
movies_processed_four_cluster_label.describe()
movies_processed_four_cluster_label.groupby('return_on_investment_label').size()
movies_processed_four_cluster_label = movies_processed_four_cluster_label.drop(columns=['popularity','vote_average','return_on_investment','genres_roi_score','keywords_roi_score','casts_roi_score','directors_roi_score'])
from pandas.plotting import parallel_coordinates
plt.figure(figsize=(30,10))
movies_processed_four_cluster_label.loc[:, ['budget', 'runtime', 'release_year', 'release_month', 'genres_popularity_score', 'genres_vote_score', 'keywords_popularity_score', 'keywords_vote_score', 'casts_popularity_score', 'casts_vote_score', 'directors_popularity_score', 'directors_vote_score']] = scale(movies_processed_four_cluster_label.loc[:, ['budget', 'runtime', 'release_year', 'release_month', 'genres_popularity_score', 'genres_vote_score', 'keywords_popularity_score', 'keywords_vote_score', 'casts_popularity_score', 'casts_vote_score', 'directors_popularity_score', 'directors_vote_score']])
parallel_coordinates(movies_processed_four_cluster_label, "return_on_investment_label", color = ('Red', 'Yellow', 'Blue', 'Purple'))
plt.title('Parallel Coordinates Plot', fontsize=20, fontweight='bold')
plt.xlabel('Features', fontsize=5)
plt.ylabel('Features values', fontsize=15)
plt.legend(loc=1, prop={'size': 15}, frameon=True,shadow=True, facecolor="white", edgecolor="black")
plt.savefig('graph/CV of knn ' + "Parallel Coordinates Plot" + '.png')
plt.show()
import seaborn as sns
plt.figure(1)
sns.pairplot(movies_processed_four_cluster_label, hue='return_on_investment_label', size=4, markers=["o", "s", "D", "+"])
plt.savefig('graph/CV of knn ' + "PairPlot" + '.png')
plt.show()
# #Over-sampling method. It creates synthetic samples of the minority class
# #imblearn python package is used to over-sample the minority classes
# from imblearn.over_sampling import SMOTE
# smote = SMOTE('minorty')
#
# knn_attrbiutes = movies_processed_four_percentile_label.loc[:,['budget', 'genres_popularity_score', 'genres_vote_score', 'keywords_popularity_score','keywords_vote_score', 'casts_popularity_score', 'casts_vote_score', 'directors_popularity_score','directors_vote_score']]
# #.values will change to array, .ravel() convert that array shape to (n,) which is required
# knn_label = movies_processed_four_percentile_label.loc[:, ['return_on_investment_label']]#.values.ravel()
# x_train = knn_attrbiutes
# y_train = knn_label
# x_train.shape
# y_train.shape
# X_sm, y_sm = smote.fit_sample(x_train, y_train)
# print(X_sm.shape, y_sm.shape)
|
[
"wanggy522@gmail.com"
] |
wanggy522@gmail.com
|
689deef17fde9d2207c3074bea557590888e9d3c
|
2c5752e9ba3b6563cc51d84b0076df32f101a0ab
|
/IDRUSD/FIX FIX.py
|
dc0af4cef972df7f8b6522f529741c1647351794
|
[] |
no_license
|
rulisastra/Skripsi-Laptop-Github
|
75d42e82bfd42976c89570ca326894c7cabb0033
|
aa26ae87cdffe8e213997cc17ca505d200625a4c
|
refs/heads/master
| 2022-04-08T00:49:37.215486
| 2020-01-27T06:10:59
| 2020-01-27T06:10:59
| 206,923,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,866
|
py
|
import pandas as pd
import numpy as np
import time
import matplotlib.pyplot as plt
from numpy.linalg import inv
from numpy.linalg import cholesky
from scipy.stats import norm
#%% persiapan data
# IDRUSD.csv = 64% | 71#
# data baru mulai 27sep2017.csv = 0.0
# data baru all = 21%
# data = 56%
# Ether mulai 1jul2017 = 52%
# Ether = 55%
# Litecoin mulai 1jan2017.csv = 55%
# NEO jul2017 = 62%
# NEO all = 63
# BTCIDR 1sept2017 | 74%
# Currency Converter.csv = 70%
# Currency Converter - copy.csv 73% malah jadi 54% dengan model yang baru
# 1000 data.csv = 72% tapi mse naik
# USDIDR 2009_2019 = 48% dengan model yang baru
data = pd.read_csv('USDIDR 1000 data.csv', # USDIDR 2009_2019 /// USDIDR 2000_2010 cut /// USDIDR 2000_2019 all
usecols=[1],
engine='python',
delimiter=',',
decimal=".",
thousands=',',
header=None,
names=['date','value'] )
data['value'] = data['value'].values
data['value'] = data['value'].astype('float32')
def pisahData(data,a,b):
if((a+b)!=1):
print("pemisahan tidak valid")
else:
train = []
test = []
train_size = int(len(data)*a)
train = data[0:train_size-1]
test = data[train_size-1:len(data)]
return np.array(train),np.array(test)
#fungsi aktivasi dan turunannya
def tanh(x):
return (1-np.exp(-2*x))/(1+np.exp(-2*x))
def dtanh(x):
return (1-tanh(x)**2)
def normalize(data, scale):
normalized = []
for i in range(len(data)):
a = (min(scale))+(data[i]-min(data))*(max(scale)-min(scale))/(max(data)-min(data))
normalized.append(a)
return np.array(normalized)
scale = (-1,1)
normalized = normalize(data,scale)
def denormalize(normalized, data, scale):
denormalized = []
for i in range(len(normalized)):
a = ((normalized[i]-min(scale))*(max(data)-min(data)))/(max(scale)-min(scale))+min(data)
denormalized.append(a)
return np.array(denormalized)
# =============================================================================
# def norm(x, scale):
# normalized = []
# for i in range(len(x)):
# a = (min(scale))+(x[i]-min(x))*(max(scale)-min(scale))/(max(x)-min(x))
# normalized.append(a)
# return np.array(normalized)
# scale = (-1,1)
# normalized = normalize(x,scale)
# =============================================================================
# normalisasi dengan data satu kolom kebawah
data_raw = np.reshape(normalize(data['value'],(-1,1)),(-1,1))
# pembagian data latih dan test
train_data, test_data = pisahData(data_raw, 0.8, 0.2) #8:2 = 71%
# =============================================================================
# train_data = train_data.reshape(-1,1) #satu kolom kebawah
# test_data = test_data.reshape(-1,1)
# =============================================================================
plt.plot(data, c='g', ls=None)
plt.xlabel('Data ke-')
plt.ylabel('Harga (USD)')
plt.title('Harga Nilai Tukar IDR/USD')
plt.show()
plt.plot(data_raw, c='y', ls=None)
plt.title('Nilai Tukar IDR/USD [Ternormalisasi]')
plt.xlabel('Data ke-')
plt.ylabel('Jangkauan Harga (-1,1)')
plt.show()
plt.plot(data, c='b', label='test', ls=None)
plt.plot(data[1:3499], c='g', label='training', ls=None)
plt.title('Pembagian Data [7:3]')
plt.xlabel('Data ke-')
plt.ylabel('Harga (USD)')
plt.legend()
plt.show()
plt.plot(data_raw, c='b', label='test', ls=None)
plt.plot(train_data[0:-1], c='r', label='training', ls=None)
plt.title('Pembagian Data [7:3]')
plt.xlabel('Data ke-')
plt.ylabel('Jangkauan Harga')
plt.legend()
plt.show()
# createwindowSize =3 untuk input
def createDataset(data, windowSize):
dataX, dataY = [],[]
for i in range(len(data)-windowSize):
a = []
for j in range(i, i+windowSize):
a.append(data[j,0])
dataX.append(a)
dataY.append(data[i+windowSize,0])
return np.array(dataX), np.array(dataY)
# ===================================================
#%%
windowSize = 5 # 5 70%
epoch = 100 # 100
hidden_dim = 7 # 7,9, ---->>> 73% dengan wn=5 dan hd = 9 dan 7:3
#%%
trainX, trainY = createDataset(train_data,windowSize)
testX, testY = createDataset(test_data, windowSize)
#%% PELATIHAN ==== gunain trainX & trainY ====
# INISIALISASI banyaknya neuron setiap layer
batch_dim = trainX.shape[0] # ambil jumlah baris (n) dari trainX(n,m)
input_dim = windowSize
output_dim = 1
alpha = .001
np.random.seed(1) # 1 =72%
# BOBOT === inisialisasi bobot awal (baris,kolom)
synapse_0 = 2*np.random.random((input_dim,hidden_dim)) - 1 # inisialisasi random bobot awal
synapse_h = 2*np.random.random((hidden_dim,hidden_dim)) - 1 # dengan interval [-1,1]
synapse_1 = 2*np.random.random((hidden_dim,output_dim)) - 1 # random.random ->> interval [0,1]
synapse_0_update = np.zeros_like(synapse_0) #meng-0 kan semua isi array sesuai shape dr variabel (synapse_0)
synapse_1_update = np.zeros_like(synapse_1)
synapse_h_update = np.zeros_like(synapse_h)
# log mse tiap epoch
mse_all = []
rmse_all = []
# inisialisasi sebelum train
jumlah_w = (input_dim*hidden_dim)+(hidden_dim*hidden_dim)+(hidden_dim*output_dim)
Q = 1*np.identity(jumlah_w) #kovarian Noise process
R = 1*np.identity(output_dim) #Kovarian Noise measurement(observasi) output_dim
P = 1*np.identity(jumlah_w) #kovarian estimasi vektor state
#%% EVALUASI ====
def mse(x,y):
mse = []
for i in range(len(y)):
a = (x[i]-y[i])**2
mse.append(a)
mse = float((sum(mse)/len(y)))
return mse
def mae(x,y):
mae = []
for i in range(len(y)):
a = abs(y[i]-x[i])
mae.append(a)
mae = float(sum(mae)/len(y))
return mae
def rmse(x,y):
rmse = []
for i in range(len(y)):
a = (x[i]-y[i])**2
rmse.append(a)
rmse = float((sum(rmse)/len(y))**0.5)
return rmse
def mape(x,y):
mape = []
for i in range(len(y)):
a = abs((x[i]-y[i])/x[i])
mape.append(a)
mape = float((sum(mape))/len(y))*100
return mape
def dstat(x,y):
dstat = 0
n = len(y)
for i in range(n-1):
if(((x[i+1]-y[i])*(y[i+1]-y[i]))>0):
dstat += 1
Dstat = (1/float(n-2))*float(dstat)*100
return float(Dstat)
#%% MULAI EPOCH ============ TRAINING ===================
epsilon = 0.98 # sbg forgetting factor berdasarkan dash 2014
gamma = 1.98 # untuk minimize error karena fluktuasi (dash)
start_time = time.time()
for i in range(epoch):
index = 0
layer_2_value = []
context_layer = np.full((batch_dim,hidden_dim),0)
layer_h_deltas = np.zeros(hidden_dim) # context layer (sebelumnya)
sigmas_concat = []
while(index+batch_dim<=trainX.shape[0]):
# input dan output
X = trainX[index:index+batch_dim,:]
Y = trainY[index:index+batch_dim]
index = index+batch_dim
# bawa ke input ~> prev hidden
layer_1 = tanh(np.dot(X,synapse_0) + np.dot(context_layer,synapse_h))
# hidden ~> output
layer_2 = tanh(np.dot(layer_1,synapse_1))
layer_2_value.append(layer_2)
# hitung error output
layer_2_error = layer_2 - Y[:,None] #problemnya, y diganti dr Y matrix
# error di output layer -> layer 2 deltas (masuk ke context layer dari hidden layer)
layer_2_delta = layer_2_error*dtanh(layer_2)
# seberapa besar error berpengaaruh terhadap layer
layer_1_error = np.dot(layer_2_delta,synapse_1.T)
# error di hidden layer -> layer 1 delta (masuk ke hidden layer dari context layer)
layer_1_delta = (layer_1_error + np.dot(layer_h_deltas,synapse_h.T)) * dtanh(layer_1)
# calculate weight update
synapse_1_update = np.dot(np.atleast_2d(layer_1).T,(layer_2_delta))
synapse_h_update = np.dot(np.atleast_2d(context_layer).T,(layer_1_delta))
synapse_0_update = np.dot(X.T,(layer_1_delta))
#%% concatenate weight
synapse_0_c = np.reshape(synapse_0,(-1,1))
synapse_h_c = np.reshape(synapse_h,(-1,1))
synapse_1_c = np.reshape(synapse_1,(-1,1))
w_concat = np.concatenate((synapse_0_c,synapse_h_c,synapse_1_c), axis=0)
w_concat_transpose = w_concat.T
synapse_0_masuk = np.reshape(synapse_0_update,(1,-1)) # satu baris kesamping
synapse_h_masuk = np.reshape(synapse_h_update,(1,-1))
synapse_1_masuk = np.reshape(synapse_1_update,(1,-1))
masuk = np.concatenate((synapse_0_masuk,synapse_h_masuk,synapse_1_masuk), axis=1)
#%% Unscented Kalman Filter without filterpy
# X_ = masuk # myu
X_ = w_concat_transpose
n = X_.size # julier versi masalah 'dimension of problem'
L = X_.ndim #2
beta = 2.
kappa = 0 # dash
lambda_ = alpha**2 * (n + kappa) - n # bisoi, dash
#%% SIGMA POINTS around mean
U = cholesky((n + lambda_)*P) # sama dg np.sqrt
sigmas = np.zeros((2*n+1, n))
sigmas[0] = X_
# maka....
for k in range(n): # gabung kebawah.. jadinya 121,60 (library juga)
sigmas[k+1] = np.subtract(X_, -U[k])
sigmas[n+k+1] = np.subtract(X_, U[k])
# mengasmbil nilai eye dari sigmas
sigmas_reshape_1 = sigmas[0]
sigmas_reshape_2 = sigmas[1:n+1,:] * np.eye(n)
sigmas_reshape_3 = sigmas[n+1::,:] * np.eye(n)
# Sigma dengan 3 kolom (3 titik sigmas)
ones = np.reshape(np.ones(n),(-1,1))
sigmas_1 = np.reshape(sigmas_reshape_1.T,(-1,1))
sigmas_2 = np.dot(sigmas_reshape_2,ones)
sigmas_3 = np.dot(sigmas_reshape_3,ones)
sigmas_concat = np.concatenate((sigmas_1,sigmas_2,sigmas_3), axis=1)
#%% BOBOT SIGMA dari Merwe (tetap nilainya)
c_ = .5 / (n + lambda_)
# Wm = np.full(2*n+1, c_)
Wm = np.full(2*n+1, 1 / (2*(n + lambda_)))
Wc = Wm # size (121,) atau (n,)
Wc[0] = lambda_ / (n + lambda_) + (1 - alpha**2 + beta)
Wm[0] = lambda_ / (n + lambda_)
#%% SIGMA Unscented Transform (biar hold value dg eyeP) ke measurement
Mz = np.dot(Wm,sigmas)
# KOVARIAN ke measurement juga
kmax, n = sigmas.shape
Pz = np.zeros(n)
for k in range(kmax):
c = np.subtract(sigmas[k],Mz)
Pz = Wc[k] * np.outer(c, c)
Pz += R # sebagai S
# Cross covariance Weight sebelum dan weights sesudah unscented transform
Pxz = np.zeros(sigmas.shape) # Tut
for k in range(kmax):
cc = np.subtract(X_,Mz) # sebagai T
Pxz = Wc[k] * np.outer(cc, c)
# Kalman gain
K1 = np.dot(Pxz,inv(P))
Knorm = np.reshape(norm(K1[:,0],(-1,1)),(-1,1)) # tambahan doang
K = np.reshape(K1[:,0],(-1,1))
# update kovarian
P = P - np.dot(K,np.dot(Pz.sum(),K.T))
P += Q
#%%
innovation = ((Y-layer_2).sum()/len(layer_2_error))
w_concat_new = w_concat + np.dot(K,innovation)
#assign bobot
synapse_0 = w_concat_new[0:(input_dim*hidden_dim),0]
synapse_h = w_concat_new[(input_dim*hidden_dim):(input_dim*hidden_dim)+(hidden_dim*hidden_dim),0]
synapse_1 = w_concat_new[(input_dim*hidden_dim)+(hidden_dim*hidden_dim):w_concat_new.shape[0],0]
#reshape balik bobot
synapse_0 = np.reshape(synapse_0,(input_dim,hidden_dim))
synapse_h = np.reshape(synapse_h,(hidden_dim,hidden_dim))
synapse_1 = np.reshape(synapse_1,(hidden_dim,output_dim))
# reset update
synapse_0_update *= 0
synapse_1_update *= 0
synapse_h_update *= 0
# update context layer
layer_h_deltas = layer_1_delta # future_layer_1_delta
context_layer = layer_1 # prev_hidden_layer
layer_2_value = np.reshape(layer_2_value,(-1,1))
mse_epoch = mse(trainY,layer_2_value)
mse_all.append(mse_epoch)
rmse_epoch = rmse(trainY,layer_2_value)
rmse_all.append(rmse_epoch)
run_time = time.time() - start_time
#%% seberapa besar lossnya???
plt.plot(sigmas_1,label='sigma utama (mean)', marker='o', ls='-')
plt.plot(sigmas_2,label='sigma lain', marker='x', ls=None)
plt.plot(sigmas_3,label='sigma lain', marker='x', ls=None)
plt.title('SIGMAS')
plt.xlabel('sigmas ke-')
plt.ylabel('value')
plt.legend()
plt.show()
plt.plot(mse_all, marker='x')
plt.title('Loss (MSE)')
plt.xlabel('Epoch')
plt.ylabel('Loss (MSE)')
plt.show()
np.savetxt('loss_ukf.csv', mse_all, delimiter=';')
# plt.plot(trainY, c='r', marker='o', label='true') #testY[0:50] buat plotting coba liat di catatan
plt.plot(layer_2_value, c='y', marker='o', label='layer_2_value (predict)')
plt.plot(trainY, c='r', marker='o', label='trainY (sebenarnya)')
plt.title('Prediksi Training')
plt.xlabel('data ke-')
plt.ylabel('value')
plt.legend()
plt.show()
plt.plot(layer_2_error,label='error', marker = 'x',c='g')
plt.title('ERROR')
plt.xlabel('data ke-')
plt.ylabel('value')
plt.legend()
plt.show()
mse_pred = mse(trainY,layer_2_value)
mae_pred = mae(trainY,layer_2_value)
rmse_pred = rmse(trainY,layer_2_value)
mape_pred = mape(trainY,layer_2_value)
dstat_pred = dstat(trainY,layer_2_value)
scoring = [mse_pred,rmse_pred,mae_pred,mape_pred,dstat_pred,run_time]
np.savetxt('loss_ukf_train.csv', mse_all, delimiter=';')
print("Training mse : " , mse_pred)
print("Training mape : ", mape_pred)
print("Training mae: " , mae_pred)
print("Training rmse : ", rmse_pred)
print("Training dstat : " , dstat_pred)
print("Training runtime : ", run_time)
#%% mari coba ============ PREDIKSI ===================
batch_predict = testX.shape[0] # mengambil banyaknya baris (n) dari testX(n,m)
context_layer_p = np.full((batch_predict,hidden_dim),0) # return full array yg isinya (0) sebesar dimensi [batch_predict x hidden_dim]
y_pred = [] # hasil akhir Y prediksi
index = 0
mse_pred_all = []
while(index+batch_predict<=testX.shape[0]):
X = testX[index:index+batch_predict,:]
layer_1p = tanh(np.dot(X,synapse_0)+np.dot(context_layer_p,synapse_h))
layer_2p = tanh(np.dot(layer_1p,synapse_1))
y_pred.append(layer_2p)
context_layer_p = layer_1p
index = index+batch_predict
y_pred = denormalize(np.reshape(y_pred,(-1,1)), data['value'], (-1,1))
testYseb = testY.reshape(-1,1)
testY = denormalize(testY, data['value'], (-1,1))
mse_pred = mse(testY,y_pred)
mse_pred_all.append(mse_pred)
rmse_pred = rmse(testY,y_pred)
mae_pred = mae(testY,y_pred)
mape_pred = mape(testY,y_pred)
dstat_pred = dstat(testY,y_pred)
scoring = [mse_pred,rmse_pred,mae_pred,mape_pred,dstat_pred,run_time]
plt.plot(testYseb, label='true') #testY[0:50] buat plotting coba liat di catatan
plt.plot(layer_2p, label='prediction')
plt.xlabel('Data ke-')
plt.ylabel('jangkauan')
plt.title('Jangkauan data uji keseluruhan 1000 data')
plt.legend()
plt.show()
plt.plot(testY[50:100], marker='o', label='true') #testY[0:50] buat plotting coba liat di catatan
plt.plot(y_pred[50:100], marker='o', label='prediction')
plt.title('HASIL UJI dengan metode RNN-UKF 50 data awal')
plt.xlabel('Data ke-')
plt.ylabel('Harga')
plt.legend()
plt.show()
print('Kalman dim: ', K.ndim)
print('Kalman size: ', K.size)
print('Kalman shape: ', K.shape)
#scoring = [mse_pred,rmse_pred,mae_pred,dstat_pred,run_time]
print("mse : " , mse_pred)
print("rmse : ", rmse_pred)
print("mape : ", mape_pred)
print("mae: " , mae_pred)
print("dstat : " , dstat_pred)
print("runtime : ", run_time)
#%%
np.savetxt('bobot_input.csv', synapse_0, delimiter=',')
np.savetxt('bobot_hidden.csv', synapse_h, delimiter=',')
np.savetxt('bobot_output.csv', synapse_1, delimiter=',')
np.savetxt('loss_ukf_uji.csv', mse_pred_all, delimiter=';')
|
[
"rulisastra@gmail.com"
] |
rulisastra@gmail.com
|
96d1e168aeaa36bf400dc56c4da42a6b03fc6da4
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_divorcing.py
|
7d0123ef97df6f655c1258d8cbe1d63c8aeb6693
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
#calss header
class _DIVORCING():
def __init__(self,):
self.name = "DIVORCING"
self.definitions = divorce
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['divorce']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
b880237ef7e09503c0984673529a8d5613255356
|
f48da6cabf525dc1bc63e58d1f3cc8ed7404b852
|
/venv/bin/pip3.5
|
4ed4c88c564904304d661f48a6d6a61ec5aab9e0
|
[] |
no_license
|
wangbingde/Fly_game
|
9c02a7afa0038546b3b58ad0dac7a472860f8ad2
|
91028541f008c2bc292f0fd4b7af1b6fa4b1d238
|
refs/heads/master
| 2020-04-08T10:06:10.746405
| 2018-11-27T00:58:41
| 2018-11-27T00:58:41
| 159,254,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
5
|
#!/home/wang/PycharmProjects/Fly_game/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.5'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.5')()
)
|
[
"1254817017@qq.com"
] |
1254817017@qq.com
|
bb50184d47fad6f414d4f76e397f897a1994c154
|
a474ddeebc2c1b7347aa257ebafc535b1fee42c1
|
/scripts/serato_autotags.py
|
12348b96a5f23d87fd51d2c0cdc9cc3e79799afa
|
[
"MIT",
"CC-BY-3.0",
"CC-BY-SA-4.0"
] |
permissive
|
Christilut/serato-tags
|
8eed466228b33b6a35056755690aac69da142c1a
|
20068085751c3a5cb27efad99639f1872a54ff8f
|
refs/heads/master
| 2020-12-11T16:17:09.495640
| 2019-11-17T02:01:29
| 2019-11-17T02:01:29
| 233,895,511
| 1
| 0
|
MIT
| 2020-01-14T17:20:43
| 2020-01-14T17:20:42
| null |
UTF-8
|
Python
| false
| false
| 3,186
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import configparser
import io
import shutil
import subprocess
import os
import tempfile
import struct
import sys
import mutagen
FMT_VERSION = 'BB'
def readbytes(fp):
for x in iter(lambda: fp.read(1), b''):
if x == b'\00':
break
yield x
def parse(fp):
version = struct.unpack(FMT_VERSION, fp.read(2))
assert version == (0x01, 0x01)
for i in range(3):
data = b''.join(readbytes(fp))
yield float(data.decode('ascii'))
def dump(bpm, autogain, gaindb):
data = struct.pack(FMT_VERSION, 0x01, 0x01)
for value, decimals in ((bpm, 2), (autogain, 3), (gaindb, 3)):
data += '{:.{}f}'.format(value, decimals).encode('ascii')
data += b'\x00'
return data
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('file', metavar='FILE')
parser.add_argument('-e', '--edit', action='store_true')
args = parser.parse_args(argv)
tagfile = mutagen.File(args.file)
if tagfile is not None:
try:
tag = tagfile['GEOB:Serato Autotags']
except KeyError:
print('File is missing "GEOB:Serato Autotags" tag')
return 1
else:
fp = io.BytesIO(tag.data)
else:
fp = open(args.file, mode='rb')
with fp:
bpm, autogain, gaindb = parse(fp)
if args.edit:
editor = shutil.which(os.getenv('EDITOR', 'vi'))
if not editor:
print('No suitable $EDITOR found.', file=sys.stderr)
return 1
with tempfile.NamedTemporaryFile() as f:
f.write((
'bpm: {}\n'
'autogain: {}\n'
'gaindb: {}\n'
).format(bpm, autogain, gaindb).encode('ascii'))
f.flush()
status = subprocess.call((editor, f.name))
f.seek(0)
output = f.read()
if status != 0:
print('Command executation failed with status: {}'.format(status),
file=sys.stderr)
return 1
cp = configparser.ConfigParser()
try:
cp.read_string('[Autotags]\n' + output.decode())
bpm = cp.getfloat('Autotags', 'bpm')
autogain = cp.getfloat('Autotags', 'autogain')
gaindb = cp.getfloat('Autotags', 'gaindb')
except Exception:
print('Invalid input, no changes made', file=sys.stderr)
return 1
new_data = dump(bpm, autogain, gaindb)
if tagfile:
if tagfile is not None:
tagfile['GEOB:Serato Autotags'] = mutagen.id3.GEOB(
encoding=0,
mime='application/octet-stream',
desc='Serato Autotags',
data=new_data,
)
tagfile.save()
else:
with open(args.file, mode='wb') as fp:
fp.write(new_data)
else:
print('BPM: {}'.format(bpm))
print('Auto Gain: {}'.format(autogain))
print('Gain dB: {}'.format(gaindb))
return 0
if __name__ == '__main__':
sys.exit(main())
|
[
"jan.holthuis@ruhr-uni-bochum.de"
] |
jan.holthuis@ruhr-uni-bochum.de
|
2e90533c77034f1799cb7fbaf0505fbc69ef0fe0
|
7ef179d708f9e92054dff9f1f5245cb25fa1b066
|
/api-inference-community/tests/test_nlp.py
|
95e4be5e45a81377341a21028d6b50a2f5bf4fcd
|
[
"Apache-2.0"
] |
permissive
|
ycemsubakan/huggingface_hub
|
8982286d3948188345c0714bfb3518af9a10d199
|
5585292287083dcbb71c0515b4df924078d677a6
|
refs/heads/main
| 2023-05-14T16:39:42.043630
| 2021-06-04T12:05:38
| 2021-06-04T12:05:38
| 374,215,794
| 0
| 0
|
Apache-2.0
| 2021-06-05T21:31:29
| 2021-06-05T21:31:29
| null |
UTF-8
|
Python
| false
| false
| 17,715
|
py
|
import json
from unittest import TestCase
from api_inference_community.validation import normalize_payload_nlp
from parameterized import parameterized
from pydantic.error_wrappers import ValidationError
class ValidationTestCase(TestCase):
def test_malformed_input(self):
bpayload = b"\xc3\x28"
with self.assertRaises(UnicodeDecodeError):
normalize_payload_nlp(bpayload, "tag")
def test_accept_raw_string_for_backward_compatibility(self):
query = "funny cats"
bpayload = query.encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(bpayload, "tag")
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, query)
class QuestionAnsweringValidationTestCase(TestCase):
def test_valid_input(self):
inputs = {"question": "question", "context": "context"}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "question-answering"
)
self.assertEqual(processed_params, {})
self.assertEqual(inputs, normalized_inputs)
def test_missing_input(self):
inputs = {"question": "question"}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "question-answering")
class SentenceSimilarityValidationTestCase(TestCase):
def test_valid_input(self):
source_sentence = "why is the sky blue?"
sentences = ["this is", "a list of sentences"]
inputs = {"source_sentence": source_sentence, "sentences": sentences}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "sentence-similarity"
)
self.assertEqual(processed_params, {})
self.assertEqual(inputs, normalized_inputs)
def test_missing_input(self):
source_sentence = "why is the sky blue?"
inputs = {"source_sentence": source_sentence}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "sentence-similarity")
class ConversationalValidationTestCase(TestCase):
def test_valid_inputs(self):
past_user_inputs = ["Which movie is the best ?"]
generated_responses = ["It's Die Hard for sure."]
text = "Can you explain why ?"
inputs = {
"past_user_inputs": past_user_inputs,
"generated_responses": generated_responses,
"text": text,
}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "conversational"
)
self.assertEqual(processed_params, {})
self.assertEqual(inputs, normalized_inputs)
class TableQuestionAnsweringValidationTestCase(TestCase):
def test_valid_input(self):
query = "How many stars does the transformers repository have?"
table = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
}
inputs = {"query": query, "table": table}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "table-question-answering"
)
self.assertEqual(processed_params, {})
self.assertEqual(inputs, normalized_inputs)
def test_invalid_table_input(self):
query = "How many stars does the transformers repository have?"
table = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512"],
}
inputs = {"query": query, "table": table}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "table-question-answering")
def test_invalid_question(self):
query = "How many stars does the transformers repository have?"
table = "Invalid table"
inputs = {"query": query, "table": table}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "table-question-answering")
def test_invalid_query(self):
query = {"not a": "query"}
table = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
}
inputs = {"query": query, "table": table}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "table-question-answering")
def test_no_table(self):
query = "How many stars does the transformers repository have?"
inputs = {
"query": query,
}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "table-question-answering")
def test_no_query(self):
table = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
}
inputs = {"table": table}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "table-question-answering")
class SummarizationValidationTestCase(TestCase):
def test_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_valid_min_length(self):
params = {"min_length": 10}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_invalid_negative_min_length(self):
params = {"min_length": -1}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_invalid_large_min_length(self):
params = {"min_length": 1000}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_invalid_type_min_length(self):
params = {"min_length": "invalid"}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_valid_max_length(self):
params = {"max_length": 10}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_invalid_negative_max_length(self):
params = {"max_length": -1}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_invalid_large_max_length(self):
params = {"max_length": 1000}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_invalid_type_max_length(self):
params = {"max_length": "invalid"}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_invalid_min_length_larger_than_max_length(self):
params = {"min_length": 20, "max_length": 10}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
class ZeroShotValidationTestCase(TestCase):
def test_single_label(self):
params = {"candidate_labels": "happy"}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "zero-shot-classification"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_list_labels(self):
params = {"candidate_labels": ["happy", "sad"]}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "zero-shot-classification"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_empty_list(self):
params = {"candidate_labels": []}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "zero-shot-classification")
def test_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "zero-shot-classification")
def test_multi_label(self):
params = {"candidate_labels": "happy", "multi_label": True}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "zero-shot-classification"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_multi_label_wrong_type(self):
params = {"candidate_labels": "happy", "multi_label": "wrong type"}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "zero-shot-classification")
class FillMaskValidationTestCase(TestCase):
def test_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "fill-mask"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_top_k(self):
params = {"top_k": 10}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "fill-mask"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_top_k_invalid_value(self):
params = {"top_k": 0}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "fill-mask")
def test_top_k_wrong_type(self):
params = {"top_k": "wrong type"}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "fill-mask")
def make_text_generation_test_case(tag):
def valid_params():
return [
("max_new_tokens", 10),
("top_k", 5),
("top_p", 0.5),
("max_time", 20.0),
("repetition_penalty", 50.0),
("temperature", 10.0),
("return_full_text", True),
("num_return_sequences", 2),
]
def invalid_params():
return [
("min_length", 1000),
("min_length", 0),
("min_length", "invalid"),
("max_length", 1000),
("max_length", 0),
("max_length", "invalid"),
("top_k", 0),
("top_k", "invalid"),
("top_p", -0.1),
("top_p", 2.1),
("top_p", "invalid"),
("max_time", -0.1),
("max_time", 120.1),
("max_time", "invalid"),
("repetition_penalty", -0.1),
("repetition_penalty", 200.1),
("repetition_penalty", "invalid"),
("temperature", -0.1),
("temperature", 200.1),
("temperature", "invalid"),
("return_full_text", "invalid"),
("num_return_sequences", -1),
("num_return_sequences", 100),
]
class TextGenerationTestCase(TestCase):
def test_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(bpayload, tag)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
@parameterized.expand(valid_params())
def test_valid_params(self, param_name, param_value):
params = {param_name: param_value}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(bpayload, tag)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
@parameterized.expand(invalid_params())
def test_invalid_params(self, param_name, param_value):
params = {param_name: param_value}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, tag)
return TextGenerationTestCase
class Text2TextGenerationTestCase(
make_text_generation_test_case("text2text-generation")
):
pass
class TextGenerationTestCase(make_text_generation_test_case("text-generation")):
pass
class TasksWithOnlyInputStringTestCase(TestCase):
def test_feature_extraction_accept_string_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "feature-extraction"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_fill_mask_accept_string_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "fill-mask"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_text_classification_accept_string_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "text-classification"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_token_classification_accept_string_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "token-classification"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_translation_accept_string_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "translation"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
|
[
"noreply@github.com"
] |
ycemsubakan.noreply@github.com
|
32cea5092cf30db70772ef04e5e944197f432c08
|
488a5ccce62226f00f2630f0af99e938c3a1f586
|
/djingles/html/common.py
|
76bfa171f7376d711a9fc6c1fd14fde17c716794
|
[] |
no_license
|
vivsh/djingles
|
c0286c8fedc677f6090a425c91f8b76467fea11d
|
4e83ad66feb84492403cc9d7b05545ba708c7a42
|
refs/heads/master
| 2022-09-14T06:53:17.640075
| 2022-09-05T09:43:43
| 2022-09-05T09:43:43
| 130,049,023
| 0
| 0
| null | 2022-08-31T08:08:20
| 2018-04-18T10:59:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,299
|
py
|
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_str as force_text
from django.forms.models import ModelChoiceIteratorValue
import re
import json
from jinja2.utils import markupsafe
__all__ = ['html_json', 'html_attrs', "Element", "CssClassList", "CssStyle", 'add_css_class', 'empty']
def html_json(values):
content = json.dumps(values)
return markupsafe.Markup(content)
def html_attrs(*args, **kwargs):
attr = HtmlAttr()
attr.update(*args, **kwargs)
return str(attr)
def add_css_class(original_class, *css_classes):
css = CssClassList()
css.append(original_class)
css.append(css_classes)
return str(css)
class CssClassList(object):
def __init__(self):
self.classes = []
def __iter__(self):
return iter(self.classes)
def __len__(self):
return len(self.classes)
def copy(self):
value = CssClassList()
value.classes.extend(self.classes)
return value
def append(self, value):
if isinstance(value, str):
value = re.sub(r'\s+', ' ', value.strip())
if len(value) == 1:
value = value[0]
if isinstance(value, (tuple, list)):
for val in value:
self.append(val)
else:
if value not in self.classes:
self.classes.append(value)
def __contains__(self, item):
return item in self.classes
def __str__(self):
return " ".join(str(c) for c in self.classes if c)
class CssStyle(dict):
def render(self):
return ";".join("%s:%s" % (key.replace("_", "-"), value) for (key, value) in self.items())
def __str__(self):
return self.render()
def copy(self):
return CssStyle(super(CssStyle, self).copy())
def _normalize(key):
if key.endswith("_"):
key = key[:-1]
key = key.replace("__", ":").replace("_", "-")
return key
class HtmlAttr(object):
def __init__(self):
self.attrs = {}
self.styles = CssStyle()
self.classes = CssClassList()
def copy(self):
attr = HtmlAttr()
attr.attrs = self.attrs.copy()
attr.styles = self.styles.copy()
attr.classes = self.classes.copy()
return attr
def dict(self):
return dict(self)
def __setitem__(self, key, value):
self.set(key, value)
def __getitem__(self, item):
return dict(self)[item]
def __len__(self):
return len(dict(self))
def get(self, key):
return dict(self).get(key)
def set(self, key, value):
key = _normalize(key)
if key in {"class"}:
self.classes.append(value)
elif key == "style":
if isinstance(value, str):
result = {}
pairs = value.split(";")
for p in pairs:
k, v = p.split(":", 1)
result[k] = v
value = result
self.styles.update(value)
else:
self.attrs[key] = value
def update(self, *args, **attrs):
values = {}
values.update(*args, **attrs)
for k, v in values.items():
self.set(k, v)
def __iter__(self):
for k, v in self.attrs.items():
yield k, v
if self.classes:
yield "class", str(self.classes)
if self.styles:
yield "style", self.styles.render()
def render(self):
pairs = []
for key, value in self:
if value is None or value is False:
continue
if value is True:
pairs.append(key)
else:
if isinstance(value, ModelChoiceIteratorValue):
value = value.value
if not isinstance(value, (str, bytes)):
value = html_json(value)
pairs.append("%s='%s'" % (key, str(value)))
return " ".join(pairs)
def __str__(self):
return self.render()
class Element(object):
def __init__(self, tag):
self.tag = tag
self.attrib = HtmlAttr()
self.children = []
def __call__(self, **kwargs):
el = self.copy()
el.attrib.update(kwargs)
return el
def __getitem__(self, item):
el = self.copy()
if not isinstance(item, (list, tuple)):
item = [item]
for c in item:
el.append(c)
return el
def copy(self):
el = self.__class__(self.tag)
el.attrib = self.attrib.copy()
el.children = self.children[:]
return el
def mutate(self, tag):
el = tag.copy()
el.attrib.update(self.attrib.copy())
el.children = self.children[:]
return el
def append(self, child):
if child is None:
return
if isinstance(child, (list, tuple)):
for c in child:
self.append(c)
else:
self.children.append(child)
def convert_to_text(self, el, *args, **kwargs):
return el.render(*args, **kwargs) if hasattr(el, 'render') else force_text(el)
def render_children(self, *args, **kwargs):
return "".join(filter(None, (self.convert_to_text(c, *args, **kwargs)for c in self.children)))
def render(self, ctx=None):
if self.attrib.get('if') is False:
return None
attrs = self.attrib
content = self.render_children(ctx)
tag = _normalize(self.tag)
return u"<{tag} {attrs}>{content}</{tag}>".format(**locals())
def __str__(self):
return self.render()
def __html__(self):
return self.render()
class Empty(Element):
def render(self, *args, **kwargs):
return self.render_children(*args, **kwargs)
empty = Empty("none")
for name in "html body link meta div span form section article aside main ul li ol dl dd dt p a strong "\
"i fieldset legend b em input select button label nav textarea " \
"table tbody tfoot thead tr td th figure caption img".split(" "):
__all__.append(name)
globals()[name] = Element(name)
if __name__ == '__main__':
print(input(type="radio", checked=False).render())
|
[
"vivek@levimind.com"
] |
vivek@levimind.com
|
1471ddbaac8e77ebb94373b674d3baffc1ddc0b8
|
dc9b0ea6714c29651cfd8b494862f31f07d85f28
|
/project22_bio_bot/test2/test11.py
|
156b2facbdb8a16006b767f34cf2755aafe51c3b
|
[] |
no_license
|
Papashanskiy/PythonProjects
|
c228269f0aef1677758cb6e2f1acdfa522da0a02
|
cf999867befa7d8213b2b6675b723f2b9f392fd7
|
refs/heads/master
| 2022-12-12T15:23:56.234339
| 2019-02-10T09:14:56
| 2019-02-10T09:14:56
| 148,336,536
| 0
| 0
| null | 2022-12-08T03:01:04
| 2018-09-11T15:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 6,068
|
py
|
import sqlite3
import random
import string
# db questions
# u (username text, var int, count_r_a int)
# q (question text, a_1 text, a_2 text, a_3 text, a_4 text, r_a int)
# v (username text, q_n text) q_v - question number (string: '[1, 2, 7, 8]')
class BioCheck:
db_name = 'questions.db'
MY_VOWELS = 'aieou'
COUNT_VARIANTS = 0
COUNT_QUESTIONS = 0
def __init__(self, is_external=False):
conn, c = self.db_connect()
a = c.execute('SELECT * FROM q').fetchall()
count = 0
for _ in a:
count += 1
self.COUNT_QUESTIONS = count
self.COUNT_VARIANTS = count / 20
def db_connect(self):
try:
conn = sqlite3.connect(self.db_name)
c = conn.cursor()
except sqlite3.Error as e:
print(e)
print("Не удалось подключиться к базе данных!")
exit(1)
return conn, c
def check(self, answers, variant):
conn, c = self.db_connect()
def create_table(self):
conn, c = self.db_connect()
c.execute('CREATE TABLE IF NOT EXISTS q (question text, a_1 text, a_2 text, a_3 text, a_4 text, r_a int)')
conn.commit()
for i in range(100):
try:
q, a_1, a_2, a_3, a_4, r_a = self.randomize()
params = (q, a_1, a_2, a_3, a_4, r_a)
c.execute('INSERT INTO q VALUES (?, ?, ?, ?, ?, ?)', params)
conn.commit()
except sqlite3.Error as e:
print(e)
print(f'Не удалось добавить данные в базу данных {self.db_name}')
exit(1)
def create_table_u(self):
conn, c = self.db_connect()
c.execute('CREATE TABLE IF NOT EXISTS u (username text, var int, count_r_a int)')
conn.commit()
def show_teble(self):
conn, c = self.db_connect()
a = c.execute('SELECT * FROM q').fetchall()
count = 0
for i in a:
count += 1
print(count)
def randomize(self):
q = ''.join(random.choice(string.ascii_lowercase + string.whitespace) for _ in range(random.randint(60, 90))).capitalize()
a_1 = ''.join(random.choice(string.ascii_lowercase + string.whitespace) for _ in range(random.randint(30, 40))).capitalize()
a_2 = ''.join(random.choice(string.ascii_lowercase + string.whitespace) for _ in range(random.randint(30, 40))).capitalize()
a_3 = ''.join(random.choice(string.ascii_lowercase + string.whitespace) for _ in range(random.randint(30, 40))).capitalize()
a_4 = ''.join(random.choice(string.ascii_lowercase + string.whitespace) for _ in range(random.randint(30, 40))).capitalize()
r_a = random.randint(1, 4)
return q, a_1, a_2, a_3, a_4, r_a
def randomize_answers(self):
username = ''
for _ in range(random.randint(2, 5)):
username += username.join(random.choice(string.ascii_lowercase))
username += username.join(random.choice(self.MY_VOWELS))
username = username.capitalize()
username += ' '
last_name = ''
for _ in range(random.randint(2, 5)):
last_name += last_name.join(random.choice(string.ascii_lowercase))
last_name += last_name.join(random.choice(self.MY_VOWELS))
last_name = last_name.capitalize()
username += last_name
variant = random.randint(1, self.COUNT_VARIANTS)
answers = []
for _ in range(20):
answers.append(random.randint(1, 4))
return username, variant, answers
class BioVariant:
DB_NAME = 'variant.db'
MY_VOWELS = 'aieou'
def db_connect(self):
try:
conn = sqlite3.connect(self.DB_NAME)
c = conn.cursor()
except sqlite3.Error as e:
print(e)
print("Не удалось подключиться к базе данных!")
exit(1)
return conn, c
def create_table_v(self):
conn, c = self.db_connect()
c.execute('CREATE TABLE IF NOT EXISTS v (username text, q_n text)')
conn.commit()
# Случайный вариант
def add_info_v(self):
conn, c = self.db_connect()
bio = BioCheck()
count_q = bio.COUNT_QUESTIONS
username = ''
answers = []
answers_str = ''
for _ in range(10):
username = self.name()
for i in range(20):
answers.append(str(random.randint(1, count_q)))
answers_str = ' '.join(answers)
del answers[1:-1]
params = [username, answers_str]
try:
c.execute('INSERT INTO v VALUES(?, ?)', params)
conn.commit()
except sqlite3.Error as e:
print(e)
print("Не удалось добавить данные в таблицу v")
def look_into_v(self):
conn, c = self.db_connect()
try:
a = c.execute('SELECT rowid, * FROM v').fetchall()
except sqlite3.Error as e:
print(e)
print("Не удалось получить данные из таблицы v")
for i in a:
print(i[0], i[1])
def name(self):
username = ''
for _ in range(random.randint(2, 5)):
username += username.join(random.choice(string.ascii_lowercase))
username += username.join(random.choice(self.MY_VOWELS))
username = username.capitalize()
username += ' '
last_name = ''
for _ in range(random.randint(2, 5)):
last_name += last_name.join(random.choice(string.ascii_lowercase))
last_name += last_name.join(random.choice(self.MY_VOWELS))
last_name = last_name.capitalize()
username += last_name
return username
def main():
conn = BioCheck()
variant = BioVariant()
variant.look_into_v()
if __name__ == '__main__':
main()
|
[
"apashanskiy@gmail.com"
] |
apashanskiy@gmail.com
|
6fee4924bd89b10be4caafb0e7033431b22856f4
|
87d5b21265c381104de8f45aa67842a4adc880eb
|
/213. House Robber II.py
|
2368f8538a98c87b43048c5284734ee38d79b8d4
|
[] |
no_license
|
MYMSSENDOG/leetcodes
|
ac047fe0d951e0946740cb75103fc94aae967166
|
8a52a417a903a0742034161471a084bc1e494d68
|
refs/heads/master
| 2020-09-23T16:55:08.579319
| 2020-09-03T19:44:26
| 2020-09-03T19:44:26
| 225,543,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
class Solution:
def rob(self, nums) -> int:
if len(nums) <= 3:
return max(nums)
dp = [0] * len(nums)
dp[0] = nums[0]
dp[1] = max(nums[:2])
dp2 = [0] * len(nums)
dp2[1] = nums[1]
ret = 0
for i in range(2, len(nums)):
dp[i] = max(dp[i - 2] + nums[i], dp[i - 1])
ret = max(ret, dp[-2])
for i in range(2, len(nums)):
dp2[i] = max(dp2[i - 2] + nums[i], dp2[i - 1])
ret = max(ret, dp2[-1])
return ret
sol = Solution()
nums = [2,1,1,2]
print(sol.rob(nums))
|
[
"fhqmtkfkd@naver.com"
] |
fhqmtkfkd@naver.com
|
a03702a61d3caee05419c9c79a3a8f918f429b9b
|
7e2144cbbd53e693755c7d277252d8add867dfe8
|
/src/success_backup_check/tests/test_documentation.py
|
cc33e33bfedac7bc420d9a3a06ae03014dce807f
|
[
"MIT"
] |
permissive
|
linuxluigi/success-backup-check
|
bf7c2cf33aa9d4b39171e46010946e491ecf3a37
|
aa3be2dbd8b0106b931bf226614e05af68034077
|
refs/heads/master
| 2020-06-29T21:00:21.381307
| 2019-08-07T19:34:05
| 2019-08-07T19:34:05
| 74,410,317
| 0
| 0
|
MIT
| 2019-08-07T19:34:06
| 2016-11-21T22:10:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
import os
import pytest
import manuel.ignore
import manuel.codeblock
import manuel.doctest
import manuel.testing
def make_manuel_suite(ns):
"""
Prepare Manuel test suite.
Test functions are injected in the given namespace.
"""
# Wrap function so pytest does not expect an spurious "self" fixture.
def _wrapped(func, name):
wrapped = lambda: func()
wrapped.__name__ = name
return wrapped
# Collect documentation files
cd = os.path.dirname
path = cd(cd(cd(cd(__file__))))
doc_path = os.path.join(path, 'docs')
files = sorted(os.path.join(doc_path, f) for f in os.listdir(doc_path))
files = [f for f in files if f.endswith('.rst') or f.endswith('.txt')]
# Create manuel suite
m = manuel.ignore.Manuel()
m += manuel.doctest.Manuel()
m += manuel.codeblock.Manuel()
# Copy tests from the suite to the global namespace
suite = manuel.testing.TestSuite(m, *files)
for i, test in enumerate(suite):
name = 'test_doc_%s' % i
ns[name] = pytest.mark.documentation(_wrapped(test.runTest, name))
return suite
try:
make_manuel_suite(globals())
except OSError:
print('Documentation files not found: disabling tests!')
|
[
"Steffen.Exler@gmail.com"
] |
Steffen.Exler@gmail.com
|
b813ebcbec7f13c5a86fd3eaae64d01fb3fcd474
|
02067187903c83e354cc4d9146b4c272ccfb9636
|
/tests/test.py
|
43ec383b9ec307b5fef402c89232cf5d674bbc67
|
[] |
no_license
|
japoorv/vehicle_routing_problem
|
777520e4255237a2871194f0fa2b63e9336f0c60
|
3cc77ff54cac26a5568cab78bd131882da9e5ba6
|
refs/heads/master
| 2022-04-29T03:57:00.668458
| 2020-04-30T11:45:38
| 2020-04-30T11:45:38
| 254,881,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
import os
import requests
def sol():
name=[i for i in os.listdir() if i.endswith('.csv')]
for i in name:
response=requests.post('https://vehicleroutingproblem.herokuapp.com/handleUpload',files=dict(datax=open(i,'r')))
file_output=open(i[:-4]+'.txt','w')
file_output.write(response.content.decode('utf-8'))
file_output.close()
return
if __name__=='__main__':
sol()
|
[
"japoorv0@gmail.com"
] |
japoorv0@gmail.com
|
14f23765586203a4d9958eed8733c452add248a8
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4353/codes/1577_996.py
|
89cd099d56c2d7ae2b56fdad07e0cf80becca714
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
print ("*****")
print("(* o o *)")
print("* ^ *")
print("* --- *")
print ("*****")
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
b344650697ece688f1f383bbc57df9ee24f05e01
|
8efe56ee34c455a6b1336897f6d457acbc9c10f9
|
/examples/torch/ppo_pendulum.py
|
34d699874c3aa6ef232a2318b190b4cb7df41cec
|
[
"MIT"
] |
permissive
|
neurips2020submission11699/metarl
|
ab18d11e708bf569d76cb2fab2bcce089badd111
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
refs/heads/master
| 2022-10-15T22:03:09.948673
| 2020-06-11T19:22:55
| 2020-06-11T19:30:58
| 268,410,657
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
#!/usr/bin/env python3
"""This is an example to train a task with PPO algorithm (PyTorch).
Here it runs InvertedDoublePendulum-v2 environment with 100 iterations.
"""
import torch
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv
from metarl.experiment import LocalRunner
from metarl.experiment.deterministic import set_seed
from metarl.torch.algos import PPO
from metarl.torch.policies import GaussianMLPPolicy
from metarl.torch.value_functions import GaussianMLPValueFunction
@wrap_experiment
def ppo_pendulum(ctxt=None, seed=1):
"""Train PPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (metarl.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
env = MetaRLEnv(env_name='InvertedDoublePendulum-v2')
runner = LocalRunner(ctxt)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=100,
discount=0.99,
center_adv=False)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=10000)
ppo_pendulum(seed=1)
|
[
"neurips2020submission11699@gmail.com"
] |
neurips2020submission11699@gmail.com
|
19a92659db1770093afeb8f64755fb59a0e6cae8
|
a8abdb08b72089171e3471cc50954ca3d778f4c3
|
/api/core/migrations/0009_diskwipe_started.py
|
8cb6e8b0be1e9aff796d66f0de11f8b54645ac62
|
[
"MIT"
] |
permissive
|
wh1te909/imagebackups
|
195b86f267750c2230253373edd7d0b08e579e13
|
a0f94b6571a8ed9083cd4a02bf28ce4bfc2882c0
|
refs/heads/master
| 2023-01-13T18:42:24.413592
| 2020-08-01T23:16:14
| 2020-08-01T23:16:14
| 222,405,465
| 9
| 4
|
MIT
| 2023-01-06T02:18:27
| 2019-11-18T09:01:38
|
Python
|
UTF-8
|
Python
| false
| false
| 493
|
py
|
# Generated by Django 2.2.7 on 2019-11-13 07:31
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0008_backupjob_celery_id'),
]
operations = [
migrations.AddField(
model_name='diskwipe',
name='started',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
[
"dcparsi@gmail.com"
] |
dcparsi@gmail.com
|
5d8281c8b7a716e074bc0bfda5f76f158d0a4d82
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/Translator/tags/V00-02-38/src/TestPutNDArray.py
|
4130dbd1d1754ef949c9a0492943eb16895b4fd7
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026
| 2015-09-03T22:22:11
| 2015-09-03T22:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
import psana
import numpy as np
class TestPutNDArray(object):
def __init__(self):
self.src = self.configSrc('cspadsrc')
self.cc=-1
self.allevts = -1
def begincalibcycle(self, evt, env):
self.cc += 1
self.ii=-1
def event(self, evt, env):
self.ii += 1
self.allevts += 1
cspad = evt.get(psana.CsPad.DataV2, self.src)
if cspad is None:
print "Translator.TestPutNDArray: cc=%5d evt=%5d allevt=%7d: NO CSPAD" % (self.cc, self.ii, self.allevts)
return
arr = np.ones((2,3),np.float32)
evt.put(arr,self.src, "myarray")
print "Translator.TestPutNDArray: cc=%5d evt=%5d allevt=%7d" % (self.cc, self.ii, self.allevts)
|
[
"davidsch@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
davidsch@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
|
48c21e7a403c1dc08cc450ffe204cbb00e4114ad
|
d39788764b2e098884b97a1f1a1f8bca4991f592
|
/passwd/secondpaswd.py
|
c32c200c65790a51aec8784166ada6684735ad83
|
[] |
no_license
|
dlasche/mycode
|
c7a574aa944f94927b8e83a9bce177f8774f54d0
|
9f021d61e53eab0a5f66e1c640762127516b19f9
|
refs/heads/main
| 2023-05-31T02:16:59.414997
| 2021-06-18T14:44:53
| 2021-06-18T14:44:53
| 374,736,263
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 887
|
py
|
#!/usr/bin/python3
## Try a real world test with getpass
## import Paramiko so we can talk SSH
import paramiko # allows Python to ssh
import os # low level operating system commands
import getpass # we need this to accept passwords
def main():
## where to connect to
t = paramiko.Transport("10.10.2.3", 22) ## IP and port of bender
## how to connect (see other labs on using id_rsa private / public keypairs)
t.connect(username="bender", password=getpass.getpass()) # notice the password references getpass
## Make an SFTP connection object
sftp = paramiko.SFTPClient.from_transport(t)
## copy our firstpasswd.py script to bender
sftp.put("firstpassword.py", "firstpassword.py") # move file to target location home directory
## close the connection
sftp.close() # close the connection
if __name__ == "__main__":
main()
|
[
"dlasche@gmail.com"
] |
dlasche@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.