blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11173664d5518f6141f609f431c7e0e39c8928a5
|
87e2472fdbdfd841c5e140da302a905d0a7ada58
|
/Coordinator/ParseCor.py
|
a4dd58221d59fc21fa6462bcdf6a63c9804eb283
|
[] |
no_license
|
quanglys/BPA
|
7218a4793e3ad9fe777d4dfa1ee793d3e68a5996
|
f572a562fe9613e2f83b006cbc3d8ea96620dddf
|
refs/heads/master
| 2021-01-18T20:39:24.190274
| 2017-04-03T05:40:41
| 2017-04-03T05:40:41
| 86,983,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
import argparse
def createParse():
parser = argparse.ArgumentParser()
parser.add_argument('-DEBUG', dest='DEBUG', type = bool, default= False)
parser.add_argument('-ext', dest='ext', type=str, default='')
parser.add_argument('-k', dest='k', type=int, default= 4)
parser.add_argument('-ROW', dest='ROW', type=int, default=1)
parser.add_argument('-IP_SERVER', dest = 'IP_SERVER', type=str, default=None)
parser.add_argument('-NUMBER_NODE', dest='NUMBER_NODE', type=int, default=1)
parser.add_argument('-NUM_MONITOR', dest='NUM_MONITOR', type=int, default=120)
parser.add_argument('-TIME_CAL_NETWORK', dest='TIME_CAL_NETWORK', type=float, default=3.0)
return parser
def readConfig(fName:str):
data = ''
try:
with open(fName, 'r') as f:
while 1:
temp = f.readline().replace('\n', '')
if (temp == ''):
break
data += temp + ' '
except Exception as e:
return None
data = data.rstrip()
if (len(data) == 0):
return None
data = data.split(' ')
arg = createParse()
return arg.parse_args(data)
|
[
"quanglys@gmail.com"
] |
quanglys@gmail.com
|
a25593377ee2f85c93e81f693a02f302e5bc24a6
|
b05566468ea03d79432453afe4a00ec2217b84e5
|
/pytorch/evaluation.py
|
569e36557d183bd94f22d6f7cf0e3be624949efe
|
[] |
no_license
|
nianfudong/GCS
|
3a9d474f1f76ff48305b6834287aa518f2b68d40
|
38667e4227fbdaa824cdcf02359b94e09e1112c2
|
refs/heads/master
| 2023-03-09T07:03:55.033178
| 2021-02-24T07:42:00
| 2021-02-24T07:42:00
| 92,651,407
| 0
| 1
| null | 2021-02-24T07:36:27
| 2017-05-28T09:20:27
|
MATLAB
|
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
import os
import math
#inFileTruth = open('F:/wflw/WFLW_annotations/valGroundtruth.txt','r')
#inFilePred = open('F:/wflw/modelv1/results/modelmse/msepredVal.txt','r')
inFileTruth = open('F:/wflw/WFLW_annotations/testGroundtruth.txt','r')
inFilePred = open('F:/wflw/modelv1/results/modelgcs/gcswingpredTest_1.5.txt','r')
allTruth = inFileTruth.readlines()
allPred = inFilePred.readlines()
allError = 0
failureNum = 0
for ix in range(len(allTruth)):
curTruthLine = allTruth[ix]
curTruthLandmark = curTruthLine.split(' ')[1:-1]
curPredLine = allPred[ix]
curPredLandmark = curPredLine.split(' ')[1:-1]
leftx = float(curTruthLandmark[120])
lefty = float(curTruthLandmark[121])
rightx = float(curTruthLandmark[144])
righty = float(curTruthLandmark[145])
norm = math.sqrt((rightx-leftx) * (rightx-leftx) + (righty - lefty) * (righty - lefty))
error = 0
for i in range(0,98):
predX = float(curPredLandmark[i*2])
predY = float(curPredLandmark[i * 2 + 1])
truthX = float(curTruthLandmark[i * 2])
truthY = float(curTruthLandmark[i * 2 + 1])
dist = math.sqrt((truthX-predX) * (truthX-predX) + (truthY - predY) * (truthY - predY))
normdist = dist / norm
error += normdist
if normdist > 0.1:
failureNum += 1
error = error/98.0
allError += error
print("mean error is: " + str(allError / 2500.0))
print("failure rate is: " + str(failureNum / (2500.0 * 98.0)))
|
[
"noreply@github.com"
] |
nianfudong.noreply@github.com
|
8f27136187598f06ad8585512871e82615f64483
|
7e87e190d917735f2e31934d7fe210ee0cd73ba2
|
/src/log_manager.py
|
ef4ab9d6b9eb8402ef5b3bca49f3a359bf796002
|
[
"MIT"
] |
permissive
|
daruuro/instagrambot
|
e70db5d4aa9b257d9837711bb123fd3e3ea100f7
|
e1428606bec294576482acda273129faf3a22947
|
refs/heads/master
| 2023-04-07T23:31:11.313746
| 2015-07-28T04:20:35
| 2015-07-28T04:20:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41
|
py
|
import sys
#TODO: self manage log file
|
[
"amabdalla10@gmail.com"
] |
amabdalla10@gmail.com
|
ab73af80761eab3522a833fbf90b61db4370b423
|
1d443a2e051e4d5e8924285148652c22c2ab55e4
|
/annotationDatabase/shared/models.py
|
a1422f87b3032df0e2bd47e1ef9cdb50a4d7fdd2
|
[] |
no_license
|
erikwestra/ripple-annotation-database
|
e21ec83330c1d9b041444939d44859ae4a50d07f
|
a7d49d463ea97900333885dd29cb2e70c1a0fdb9
|
refs/heads/master
| 2021-01-01T19:02:01.020302
| 2015-02-18T03:05:44
| 2015-02-18T03:05:44
| 19,088,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,944
|
py
|
""" annotationDatabase.shared.models
This module defines the shared database modules used by the Ripple
Annotation Database.
"""
import hashlib
import uuid
from django.db import models
#############################################################################
class User(models.Model):
""" A signed-up user of the public interface.
"""
id = models.AutoField(primary_key=True)
username = models.TextField(unique=True, db_index=True)
password_salt = models.TextField()
password_hash = models.TextField()
blocked = models.BooleanField(default=False)
def set_password(self, password):
""" Encrypt the given password and store it into this User object.
"""
self.password_salt = uuid.uuid4().hex
self.password_hash = hashlib.md5(password +
self.password_salt).hexdigest()
def is_password_correct(self, password):
""" Return True if and only if the given password is correct.
"""
hash = hashlib.md5(password + self.password_salt).hexdigest()
return (hash == self.password_hash)
#############################################################################
class AnnotationKey(models.Model):
""" A single unique annotation key used by one or more annotations.
"""
id = models.AutoField(primary_key=True)
key = models.TextField(unique=True, db_index=True)
#############################################################################
class AnnotationValue(models.Model):
""" A single unique annotation value used by one or more annotations.
"""
id = models.AutoField(primary_key=True)
value = models.TextField(unique=True, db_index=True)
#############################################################################
class Account(models.Model):
""" A reference to a Ripple account.
"""
id = models.AutoField(primary_key=True)
address = models.TextField(unique=True, db_index=True)
owner = models.ForeignKey(User, null=True)
#############################################################################
class AnnotationBatch(models.Model):
""" A single batch of uploaded annotations.
"""
id = models.AutoField(primary_key=True)
timestamp = models.DateTimeField()
user_id = models.TextField()
#############################################################################
class Annotation(models.Model):
""" A single uploaded annotation value.
Note that the Annotation records are never deleted or overwritten; they
provide an audit trail of the changes made to the annotation values
over time.
"""
id = models.AutoField(primary_key=True)
batch = models.ForeignKey(AnnotationBatch)
account = models.ForeignKey(Account)
key = models.ForeignKey(AnnotationKey)
value = models.ForeignKey(AnnotationValue, null=True)
hidden = models.BooleanField(default=False)
hidden_at = models.DateTimeField(null=True)
hidden_by = models.TextField(null=True)
#############################################################################
class CurrentAnnotation(models.Model):
""" A single annotation currently in use.
There is one and only one CurrentAnnotation record for every
combination of account and annotation key. This is distinct from
the Annotation record, which holds annotations which may once have
applied but have now been overwritten.
"""
id = models.AutoField(primary_key=True)
account = models.ForeignKey(Account)
key = models.ForeignKey(AnnotationKey)
value = models.ForeignKey(AnnotationValue)
class Meta:
unique_together = [
["account", "key"],
]
index_together = [
["key", "value"],
]
#############################################################################
class AnnotationTemplate(models.Model):
""" A single uploaded annotation template.
"""
id = models.AutoField(primary_key=True)
name = models.TextField(unique=True, db_index=True)
#############################################################################
class AnnotationTemplateEntry(models.Model):
""" A single annotation entry within an annotation template.
Note that the "choices" field holds the available choices as a JSON
string.
"""
id = models.AutoField(primary_key=True)
template = models.ForeignKey(AnnotationTemplate)
annotation = models.ForeignKey(AnnotationKey)
public = models.NullBooleanField()
label = models.TextField()
type = models.TextField(choices=[("choice", "choice"),
("field", "field")],
default="field")
default = models.TextField(null=True)
choices = models.TextField(null=True)
field_size = models.IntegerField(null=True)
field_required = models.NullBooleanField()
field_min_length = models.IntegerField(null=True)
field_max_length = models.IntegerField(null=True)
#############################################################################
class Client(models.Model):
""" A client system authorized to use the Annotation Database.
"""
id = models.AutoField(primary_key=True)
name = models.TextField(unique=True, db_index=True)
auth_token = models.TextField(unique=True, db_index=True)
#############################################################################
class Session(models.Model):
""" An active session within the Authentication app.
"""
id = models.AutoField(primary_key=True)
session_token = models.TextField()
user = models.ForeignKey(User)
last_access = models.DateTimeField()
|
[
"ewestra@gmail.com"
] |
ewestra@gmail.com
|
581b001c43949dbb7ebcdac5baf8d0a9a270ae0f
|
17e27c3131beb91947d0246372f8810894b4e99b
|
/pH-array/create-pH-array-fast-384.py
|
1e1e25f79beb6e539d66bd1cb1ce75584f646ff5
|
[] |
no_license
|
jhprinz/robots
|
b0a16a5ebcc609d6d0bd411b45706f1f702446a6
|
e2e234a8198044d3d09f1ec5ed429a3bf6062616
|
refs/heads/master
| 2021-01-22T16:17:23.567963
| 2015-10-26T16:16:30
| 2015-10-26T16:16:30
| 24,623,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,961
|
py
|
#!/usr/bin/env python
"""
Create pH arrays along rows of 384-well plate.
Optimized for speed.
"""
# TODO: Replace this taable with a module that computes buffer recipes automatically.
filename = 'citric-phosphate-24.txt'
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
conditions = list()
for line in lines:
# ignore comments
if line[0] == '#': continue
# processs data
elements = line.split()
entry = dict()
entry['pH'] = float(elements[0])
entry['citric acid'] = float(elements[1])
entry['sodium phosphate'] = float(elements[2])
# Adjust for 0.1M sodium phosphate.
entry['sodium phosphate'] *= 2
total = entry['citric acid'] + entry['sodium phosphate']
entry['citric acid'] /= total
entry['sodium phosphate'] /= total
# Store entry.
conditions.append(entry)
def aspirate(RackLabel, RackType, position, volume, tipmask, LiquidClass='Water free dispense'):
if (volume < 3.00) or (volume > 1000.0):
raise Exception("Aspirate volume outside of 3-1000 uL (asked for %.3f uL)" % volume)
return 'A;%s;;%s;%d;;%f;%s;;%d\r\n' % (RackLabel, RackType, position, volume, LiquidClass, tipmask)
def dispense(RackLabel, RackType, position, volume, tipmask, LiquidClass='Water free dispense'):
if (volume < 3.00) or (volume > 1000.0):
raise Exception("Dispense volume > 1000 uL (asked for %.3f uL)" % volume)
return 'D;%s;;%s;%d;;%f;%s;;%d\r\n' % (RackLabel, RackType, position, volume, LiquidClass, tipmask)
def washtips():
return 'W;\r\n' # queue wash tips
assay_volume = 100.0 # assay volume (uL)
buffer_volume = assay_volume
assay_RackType = '4ti-0203' # black 384-well plate with clear bottom
volume_consumed = dict()
volume_consumed['citric acid'] = 0.0
volume_consumed['sodium phosphate'] = 0.0
# Build worklist.
worklist = ""
class TransferQueue(object):
def __init__(self, SourceRackLabel, SourceRackType, SourcePosition, tipmask):
self.SourceRackLabel = SourceRackLabel
self.SourceRackType = SourceRackType
self.SourcePosition = SourcePosition
self.tipmask = tipmask
self.worklist = ""
self.cumulative_volume = 0.0
self.MAX_VOLUME = 950.0
self.queue = list()
return
def transfer(self, DestRackLabel, DestRackType, DestPosition, volume):
if (self.cumulative_volume + volume > self.MAX_VOLUME):
self._flush()
item = (DestRackLabel, DestRackType, DestPosition, volume)
self.queue.append(item)
self.cumulative_volume += volume
def _flush(self):
self.worklist += aspirate(self.SourceRackLabel, self.SourceRackType, self.SourcePosition, self.cumulative_volume + 0.01, self.tipmask)
for item in self.queue:
(DestRackLabel, DestRackType, DestPosition, volume) = item
self.worklist += dispense(DestRackLabel, DestRackType, DestPosition, volume, self.tipmask)
self.worklist += washtips()
# Clear queue.
self.queue = list()
self.cumulative_volume = 0.0
def write(self):
self._flush()
worklist = self.worklist
self.worklist = ""
return worklist
citric_acid_queue = TransferQueue('0.1M Citric Acid', 'Trough 100ml', 1, 1)
sodium_phosphate_queue = TransferQueue('0.1M Sodium Phosphate', 'Trough 100ml', 2, 2)
nrows = 16 # number of rows in plate
ncols = 24 # number of columns in plate
# Build worklist.
worklist = ""
for row_index in range(nrows):
print "Row %d :" % row_index
for (condition_index, condition) in enumerate(conditions):
# destination well of assay plate
col_index = condition_index
destination_position = nrows * col_index + row_index + 1
if (destination_position > nrows*ncols):
raise Exception("destination position out of range (%d)" % destination_position)
print " well %3d : pH : %8.1f" % (destination_position, condition['pH'])
# citric acid
volume = condition['citric acid']*buffer_volume
volume_consumed['citric acid'] += volume
citric_acid_queue.transfer('Assay Plate', assay_RackType, destination_position, volume)
# sodium phosphate
volume = condition['sodium phosphate']*buffer_volume
volume_consumed['sodium phosphate'] += volume
sodium_phosphate_queue.transfer('Assay Plate', assay_RackType, destination_position, volume)
# Write to worklist.
worklist += citric_acid_queue.write()
worklist += "B;\r\n" # ensure all citric acid pipetting is performed before sodium phosphate pipetting begins
worklist += sodium_phosphate_queue.write()
# Write worklist.
worklist_filename = 'ph-worklist-fast-384.gwl'
outfile = open(worklist_filename, 'w')
outfile.write(worklist)
outfile.close()
# Report total volumes.
print "citric acid: %8.3f uL" % volume_consumed['citric acid']
print "sodium phosphate: %8.3f uL" % volume_consumed['sodium phosphate']
|
[
"choderaj@mskcc.org"
] |
choderaj@mskcc.org
|
47f59cc217b45edd370bebd08b1eb1133309fde2
|
73ea55d9e5cd1e6f17da62e204d4f719138090fa
|
/src.bkp/util.py
|
2aefddf74d953e7c74b5dcd230cd967b80235ee5
|
[
"BSD-2-Clause"
] |
permissive
|
Rafagd/Tomiko
|
32be186923a4333e496ba65c97681fc21947bd96
|
fd44ce1cfe1b4571aeac81382793c7d5338d6701
|
refs/heads/master
| 2023-07-09T04:28:55.844270
| 2023-07-02T19:32:17
| 2023-07-02T19:32:17
| 75,745,778
| 0
| 2
|
BSD-2-Clause
| 2018-04-25T02:56:29
| 2016-12-06T15:43:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
from collections.abc import MutableSet
class classproperty(object):
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter(owner)
class ttl_set(MutableSet):
data = {}
ttl = 10
def __init__(self, values = [], ttl=10):
self.data = {}
self.ttl = ttl
for value in values:
self.add(value)
def __contains__(self, item):
return item.upper() in self.data
def __len__(self):
return len(self.data)
def __iter__(self):
for word in self.data:
yield word
def __repr__(self):
return repr(self.data)
def __str__(self):
return ' '.join(self.data).strip()
def add(self, item):
self.data[item.upper()] = self.ttl
def tick(self):
new_data = {}
for item in self.data:
new_ttl = self.data[item] - 1
if new_ttl >= 0:
new_data[item] = new_ttl
self.data = new_data
def discard(self, item):
del self.data[item.upper()]
|
[
"rafagd@gmail.com"
] |
rafagd@gmail.com
|
cb15b50f01539b5d6261c95a0bd792a75da4119a
|
781b9a4a1098f3ac339f97eb1a622924bcc5914d
|
/Exercices/S1_05_FonctionsRecursives/COURS-EMILIEN/programmes/fact_recursif_infini.py
|
3efdc8470329e85417b9c2a4f1980f3da61be950
|
[] |
no_license
|
xpessoles/Informatique
|
24d4d05e871f0ac66b112eee6c51cfa6c78aea05
|
e8fb053c3da847bd0a1a565902b56d45e1e3887c
|
refs/heads/main
| 2023-08-30T21:10:56.788526
| 2023-08-30T20:17:38
| 2023-08-30T20:17:38
| 375,464,331
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
def fact_recursif(n):
return n*fact_recursif(n-1)
fact_recursif(6)
|
[
"emilien.durif@gmail.com"
] |
emilien.durif@gmail.com
|
cf389500c49895e7a7ca3d5f8f147d8b288639a5
|
b73dd400937d30b29a9c48bdbd3e81c7a035d076
|
/Games/Tools/Writing-Tool/test.py
|
6959d2fd42039c9966c5ded87d9f5a8dfe844a5e
|
[] |
no_license
|
Exodus111/Projects
|
772ffbc94f4396b8afded1b6b5095f4b084fdd7a
|
732853897ae0048909efba7b57ea456e6aaf9e10
|
refs/heads/master
| 2020-05-21T19:26:03.972178
| 2017-03-26T10:04:35
| 2017-03-26T10:04:35
| 61,034,978
| 1
| 0
| null | 2017-03-26T10:04:36
| 2016-06-13T12:36:00
|
Rust
|
UTF-8
|
Python
| false
| false
| 117
|
py
|
def foo(a,b):
return a+b
mydict = {}
tupleargs = (5,5)
mydict["func"] = foo
print(mydict["func"](*tupleargs))
|
[
"aurelioxxx@hotmail.com"
] |
aurelioxxx@hotmail.com
|
e594bac5f12675514e6e1d9ef4343c0400678389
|
67536595f3bf945965ace96e1ee432da6f797ec6
|
/area/choose-area.py
|
8fdbfbcd3f29d929e984f9ab6b94086db1742e44
|
[] |
no_license
|
loweffortwizard/Python-Activities
|
a1fe80fecfe02f217cf0b3712e60f2bc19a184b4
|
3e644c089fccc5e6308e320d314d9692c3bf48f1
|
refs/heads/master
| 2020-09-11T19:40:44.003429
| 2019-11-16T23:14:21
| 2019-11-16T23:14:21
| 222,170,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,275
|
py
|
'''
Expand tasks 1 & 2; write a program that asks a
user if they would like to calculate the area of a
square or a rectangle. Depending upon which choice
the user picks, the program then asks for either one
or two measurements to be entered
'''
'''
prog will calculate area of a triangle,
based on user input
https://stackoverflow.com/questions/14607890/area-of-a-triangle-python-27
https://www.programiz.com/python-programming/examples/area-triangle
'''
#importing libs
import time
import sys
import math
#wait def
def wait():
time.sleep(1)
#closing prog
def endprog(txt):
txt.lower()
if(txt!='y'):
sys.exit()
#closing prog option
def userdecision():
userchoice = str(input("If you wish to use again, press \"y\": "))
return userchoice
def main():
choose = str(input("Type 1 for square, 2 for triangle: "))
#making choice for square or triangle
if(choose == '2'):
#while true, run prog
while(True):
inputside1 = int(input("Enter side 1: "))
#get input from user, save as int in var inputside1
wait()
#wait 1 second
print(inputside1)
#print above
wait()
#wait 1 second
inputside2 = int(input("Enter side 2: "))
#get input from user, save as int in var inputside2
wait()
#wait 1 second
print(inputside2)
#print above
wait()
#wait 1 second
inputside3 = int(input("Enter side 3: "))
#get input from user, save as int in var inputside2
wait()
#wait 1 second
print(inputside3)
#print above
wait()
#wait 1 second
half = (inputside1 + inputside2 + inputside3) / 2
#var half = all inputs together then half by 2
area = (half*(half - inputside1)*(half - inputside2)*(half - inputside3))**0.5
#var area is equal to the power of half - inputs times each other, times 0.5
print(area)
#print area
wait()
#wait 1 second
endprog(userdecision())
#prompt user decision
elif(choose == '1'):
#while true, run prog
while(True):
inputside1 = int(input("Enter length: "))
#get input from user, save as int in var inputside1
wait()
#wait 1 second
print(inputside1)
#print above
inputside2 = int(input("Enter width: "))
#get input from user, save as int in var inputside2
wait()
#wait 1 second
print(inputside2)
#print above
area = inputside1 * inputside2
#var area has value of inputside1 X inputside2
print(area)
#print area
wait()
#wait 1 second
endprog(userdecision())
#prompt user decision
else:
print("Error: Please type 1 or 2. ")
main()
main()
#end of prog
|
[
"noreply@github.com"
] |
loweffortwizard.noreply@github.com
|
498ced78e53463ef6ffb262399292c326c9d8b33
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc026/B/4966923.py
|
fc8d3d588929434bc763643e48d9daa1c10272ee
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
import math
n = int(input())
r = [int(input()) for _ in range(n)]
pi = math.pi
r.sort(reverse=True)
R = 0
for i in range(len(r)):
if r.index(r[i])%2 == 0:
R += r[i]**2
else:
R -= r[i]**2
print(R*pi)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
a7984c6bdffc5ad0f808b6d6a088c19a2a42375e
|
019214e2f5fc2345490d115db187632816b525e4
|
/KB_construction/knowledge_from_structured_data/copy_CBDB_from_sqlite_to_mysql.py
|
fa6ba5182429d78d48078f19684f8cfc4c5d8c87
|
[] |
no_license
|
wangbq18/KBQA_on_Chronicle
|
565afbad21c658492c0aea9834e94f298515a6e5
|
018c032c90307d23daae2e1bc5d16f3afaca184d
|
refs/heads/master
| 2020-04-10T11:49:48.256025
| 2018-11-09T06:16:53
| 2018-11-09T06:16:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,542
|
py
|
# encoding=utf-8
"""
@author: SimmerChan
@contact: 7698590@qq.com
@file: copy_CBDB_from_sqlite_to_mysql.py
@time: 2017/10/27 21:09
@desc: 将CBDB中的数据转为简体字后存入Mysql数据库中
"""
import sqlite3
import pymysql
from traditional2simple import tradition2simple
import re
import traceback
from collections import OrderedDict, defaultdict
import pyodbc
def dict_factory(cursor, row):
"""
把sqlite中的记录转为字典
:param cursor:
:param row:
:return:
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
# TODO 正则表达式过滤
chinese_pattern = re.compile(u'[\u4E00-\u9FA5]+')
question_mark_pattern = re.compile(u'\?')
illegal_pattern = re.compile(u'[^\u0000-\u9FA5]+')
space_pattern = re.compile(u' ')
# TODO 连接本地mysql的CBDB数据库
mysql_db = pymysql.connect(host="localhost", user="root", db="CBDB", use_unicode=True, charset="utf8mb4")
# TODO 用sqlite3读取CBDB数据库
sqlite_db = sqlite3.connect('E:\SimmerChan\lab\mywork\\resources\\20170424CBDBauUserSqlite.db')
row_factory = sqlite_db.row_factory
# TODO 用pyodbc链接CBDB的access数据库
conn_str = (
r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'
r'DBQ=E:\SimmerChan\lab\mywork\resources\20170829CBDBavBase\20170829CBDBavBase.mdb;'
)
cnxn = pyodbc.connect(conn_str)
crsr = cnxn.cursor()
mysql_cursor = mysql_db.cursor()
sqlite_cursor = sqlite_db.cursor()
# TODO 获取mysql保留关键词
mysql_cursor.execute('SELECT * FROM mysql.help_keyword')
reserved_keywords = [i[1] for i in mysql_cursor.fetchall()]
# TODO 获取所有表名
name_of_all_tables = [i[0] for i in sqlite_cursor.execute("select name from sqlite_master where type = 'table' order by name").fetchall()]
# TODO 获取所有表的主键和不含主键的表名
pk_of_table = defaultdict(list)
for table_name in name_of_all_tables:
for row in crsr.statistics(table_name, unique=True):
if row[5] is not None:
if row[5].replace(' ', '').lower().find('primarykey') != -1:
pk_of_table[table_name].append(row[8])
if len(pk_of_table[table_name]) == 0:
for row in crsr.statistics(table_name, unique=True):
pk_of_table[table_name].append(row[8])
for k, v in sorted(pk_of_table.iteritems(), key=lambda item: item[0]):
if v[0] is not None:
pk_str = 'primary key ('
for pk in v:
pk_str += pk + ','
pk_str = pk_str[:-1] + ')'
pk_of_table[k] = pk_str
else:
del pk_of_table[k]
# TODO 记录有duplicate key的表
# table_with_dk = defaultdict(tuple)
name_of_table_with_PK = pk_of_table.keys()
print 'Total tables which contains PK: {0}'.format(len(name_of_table_with_PK))
for index, table_name in enumerate(name_of_table_with_PK):
print '{0}.Table {1} transferring...................'.format(index + 1, table_name)
# TODO 还原sqlite的row factory
sqlite_db.row_factory = row_factory
sqlite_cursor = sqlite_db.cursor()
# TODO 获取此表每个字段的类型
field_types = OrderedDict()
field_with_type = ''
fields_str = ''
values_str = ''
for i in sqlite_cursor.execute("PRAGMA TABLE_INFO({0})".format(table_name)).fetchall():
# TODO 给带空格的字段加下划线
field_name = i[1]
if field_name.upper() in reserved_keywords or space_pattern.search(field_name) is not None:
field_with_type += '`' + field_name + '`'
fields_str += '`' + field_name + '`,'
else:
field_with_type += field_name
fields_str += field_name + ','
values_str += '%s,' + ' '
if i[2] == u'CHAR':
field_types[field_name] = u'TEXT'
field_with_type += ' ' + u'TEXT' + ','
else:
field_types[field_name] = i[2]
field_with_type += ' ' + i[2] + ','
fields_str = fields_str[:-1]
values_str = values_str[:-2]
# TODO 在mysql中创建对应的表
field_with_type = field_with_type[:-1]
create_command = "create table {0} ({1}, {2}) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci".format(table_name, field_with_type, pk_of_table[table_name])
try:
mysql_cursor.execute(create_command)
except pymysql.err.InternalError:
print '{0}.Table {1} transferred successfully!\n'.format(index + 1, table_name)
continue
except pymysql.err.ProgrammingError:
traceback.print_exc()
print create_command
for k, v in field_types.iteritems():
print k, v
exit()
# TODO 获取此表所有记录,将类型为char的字段值转为简体,存入mysql表中
sqlite_db.row_factory = dict_factory
sqlite_cursor = sqlite_db.cursor()
records = sqlite_cursor.execute("select * from {0}".format(table_name)).fetchall()
insert_command = "insert into {0} ({1}) values ({2})".format(table_name, fields_str, values_str)
values_list = list()
for record in records:
values = list()
for k, v in field_types.iteritems():
if (v.find('CHAR') != -1 or v.find('TEXT') != -1) and record[k] is not None and chinese_pattern.search(record[k]) is not None:
values.append(tradition2simple(record[k]))
else:
values.append(record[k])
# try:
# mysql_cursor.execute(insert_command, values)
# except (pymysql.err.InternalError, pymysql.err.DataError):
# for v in values_list:
# print v
# print create_command
# print insert_command
# traceback.print_exc()
# exit()
#
# except pymysql.err.IntegrityError, e:
# # TODO 记录有duplicate key的表,查看是否是简繁转换造成的
# if table_name not in table_with_dk:
# table_with_dk[table_name] = e
# continue
values_list.append(tuple(values))
# mysql_db.commit()
try:
mysql_cursor.executemany(insert_command, values_list)
mysql_db.commit()
except (pymysql.err.InternalError, pymysql.err.DataError, pymysql.err.IntegrityError):
for v in values_list:
print v
print create_command
print insert_command
traceback.print_exc()
exit()
print '{0}.Table {1} transferred successfully!\n'.format(index + 1, table_name)
print
name_of_table_without_PK = list()
for n in name_of_all_tables:
if n not in name_of_table_with_PK:
name_of_table_without_PK.append(n)
print 'Total tables which doesn\'t contain PK: {0}'.format(len(name_of_table_without_PK))
for index, table_name in enumerate(name_of_table_without_PK):
print '{0}.Table {1} transferring...................'.format(index + 1, table_name)
# TODO 还原sqlite的row factory
sqlite_db.row_factory = row_factory
sqlite_cursor = sqlite_db.cursor()
# TODO 获取此表每个字段的类型
field_types = OrderedDict()
field_with_type = ''
fields_str = ''
values_str = ''
for i in sqlite_cursor.execute("PRAGMA TABLE_INFO({0})".format(table_name)).fetchall():
# TODO 给带空格的字段加下划线
field_name = i[1]
if field_name.upper() in reserved_keywords or space_pattern.search(field_name) is not None:
field_with_type += '`' + field_name + '`'
fields_str += '`' + field_name + '`,'
else:
field_with_type += field_name
fields_str += field_name + ','
values_str += '%s,' + ' '
if i[2] == u'CHAR':
field_types[field_name] = u'TEXT'
field_with_type += ' ' + u'TEXT' + ','
else:
field_types[field_name] = i[2]
field_with_type += ' ' + i[2] + ','
fields_str = fields_str[:-1]
values_str = values_str[:-2]
# TODO 在mysql中创建对应的表
field_with_type = field_with_type[:-1]
create_command = "create table {0} ({1}) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci".format(table_name, field_with_type)
try:
mysql_cursor.execute(create_command)
except pymysql.err.InternalError:
print '{0}.Table {1} transferred successfully!\n'.format(index + 1, table_name)
continue
except pymysql.err.ProgrammingError:
traceback.print_exc()
print create_command
for k, v in field_types.iteritems():
print k, v
exit()
# TODO 获取此表所有记录,将类型为char的字段值转为简体,存入mysql表中
sqlite_db.row_factory = dict_factory
sqlite_cursor = sqlite_db.cursor()
records = sqlite_cursor.execute("select * from {0}".format(table_name)).fetchall()
insert_command = "insert into {0} ({1}) values ({2})".format(table_name, fields_str, values_str)
values_list = list()
for record in records:
values = list()
for k, v in field_types.iteritems():
if (v.find('CHAR') != -1 or v.find('TEXT') != -1) and record[k] is not None and chinese_pattern.search(record[k]) is not None:
values.append(tradition2simple(record[k]))
else:
values.append(record[k])
# try:
# mysql_cursor.execute(insert_command, values)
# except (pymysql.err.InternalError, pymysql.err.DataError):
# for v in values_list:
# print v
# print create_command
# print insert_command
# traceback.print_exc()
# exit()
#
# except pymysql.err.IntegrityError, e:
# # TODO 记录有duplicate key的表,查看是否是简繁转换造成的
# if table_name not in table_with_dk:
# table_with_dk[table_name] = e
# continue
values_list.append(tuple(values))
# mysql_db.commit()
try:
mysql_cursor.executemany(insert_command, values_list)
mysql_db.commit()
except (pymysql.err.InternalError, pymysql.err.DataError, pymysql.err.IntegrityError):
for v in values_list:
print v
print create_command
print insert_command
traceback.print_exc()
exit()
print '{0}.Table {1} transferred successfully!\n'.format(index + 1, table_name)
# print 'Duplicate Table name with example error message:\n'
# for k, v in table_with_dk.iteritems():
# print k, v
# TODO 关闭数据库连接
mysql_db.close()
sqlite_db.close()
|
[
"7698590@qq.com"
] |
7698590@qq.com
|
63bb8c9ff2c252b8d223665c0abcbf9b1ba3cfed
|
4325b3b69555788b70c678b3e14c45d7cc68a55a
|
/face_recognition.py
|
e3991eab90216308aebdc96a607b0f8290efd024
|
[] |
no_license
|
Jappan07/Facial_Recognition_System
|
1b63e1eedc302c2fca2052316c94337c09b2e80d
|
cc6494e9670b4c9aecc2007a0370a7c2801c714d
|
refs/heads/master
| 2022-11-14T15:35:34.744303
| 2020-06-10T19:19:33
| 2020-06-10T19:19:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,454
|
py
|
import cv2
import numpy as np
import os
########## KNN CODE ############
def distance(v1, v2):
# Eucledian
return np.sqrt(((v1-v2)**2).sum())
def knn(train, test, k=5):
dist = []
for i in range(train.shape[0]):
# Get the vector and label
ix = train[i, :-1]
iy = train[i, -1]
# Compute the distance from test point
d = distance(test, ix)
dist.append([d, iy])
# Sort based on distance and get top k
dk = sorted(dist, key=lambda x: x[0])[:k]
# Retrieve only the labels
labels = np.array(dk)[:, -1]
# Get frequencies of each label
output = np.unique(labels, return_counts=True)
# Find max frequency and corresponding label
index = np.argmax(output[1])
return output[0][index]
################################
#Init Camera
cap = cv2.VideoCapture(0)
# Face Detection
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
skip = 0
dataset_path = '/Users/jappanjeetsingh/Desktop/FacialRecognitionSystem/data/'
face_data = []
labels = []
class_id = 0 # Labels for the given file
names = {} #Mapping btw id - name
# Data Preparation
for fx in os.listdir(dataset_path):
if fx.endswith('.npy'):
#Create a mapping btw class_id and name
names[class_id] = fx[:-4]
print("Loaded "+fx)
data_item = np.load(dataset_path+fx)
face_data.append(data_item)
#Create Labels for the class
target = class_id*np.ones((data_item.shape[0],))
class_id += 1
labels.append(target)
face_dataset = np.concatenate(face_data,axis=0)
face_labels = np.concatenate(labels,axis=0).reshape((-1,1))
print(face_dataset.shape)
print(face_labels.shape)
trainset = np.concatenate((face_dataset,face_labels),axis=1)
print(trainset.shape)
# Testing
while True:
ret,frame = cap.read()
if ret == False:
continue
faces = face_cascade.detectMultiScale(frame,1.3,5)
if(len(faces)==0):
continue
for face in faces:
x,y,w,h = face
#Get the face ROI
offset = 10
face_section = frame[y-offset:y+h+offset,x-offset:x+w+offset]
face_section = cv2.resize(face_section,(100,100))
#Predicted Label (out)
out = knn(trainset,face_section.flatten())
#Display on the screen the name and rectangle around it
pred_name = names[int(out)]
cv2.putText(frame,pred_name,(x,y-10),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2,cv2.LINE_AA)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
cv2.imshow("Faces",frame)
key = cv2.waitKey(1) & 0xFF
if key==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"jappanjeet.99@gmail.com"
] |
jappanjeet.99@gmail.com
|
18e9fa15b3b4b66178c5906fb12f6025d5a60cc1
|
66ab7147772963e5aed972273037715121fa4123
|
/theme_hort/models/res_partner.py
|
b5559c3d107c8b21d1e9ab0ef71674676b0a3f04
|
[] |
no_license
|
OdooBulgaria/trust-themes
|
3c5e9f460724acc634819a977d81f07f3127fdca
|
efeaee432ffa8464d533076b0fa6a88ae2424d32
|
refs/heads/master
| 2021-01-16T21:52:52.000138
| 2016-04-12T20:51:22
| 2016-04-12T20:51:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,591
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2016 Trustcode - www.trustcode.com.br #
# Danimar Ribeiro <danimaribeiro@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from openerp import api, fields, models
from openerp.exceptions import Warning
class ResPartner(models.Model):
_inherit = 'res.partner'
gender = fields.Selection([('0', 'Masculino'), ('1', 'Feminino')],
string=u"Sexo")
date_birth = fields.Date(string=u"Data de Nascimento")
join_events = fields.Boolean(string=u"Gostaria de participar em eventos")
produce_ids = fields.Many2many(
comodel_name='product.product', string="Produz",
relation="product_product_res_partner_rel_produces",
help="Itens que o parceiro produz")
interest_in_ids = fields.Many2many(
comodel_name='product.product', string="Tem interesse",
relation="product_product_res_partner_rel_interest",
help="Itens que o parceiro gostaria de adquirir")
post_category_ids = fields.Many2many(
comodel_name='blog.post.category', string="Temas de interesse",
relation="blog_post_category_res_partner_rel",
help="Temas que o parceiro tem interesse")
|
[
"danimaribeiro@gmail.com"
] |
danimaribeiro@gmail.com
|
7215a16e4e6627a856c4d33235a46b86a998d951
|
6ca1ccd426079260cd69defe77ff2dda2621f994
|
/core/routines/indices/colony_name_index.py
|
e30aee96fd2f248d45131b95eb4b75a49ba5f2eb
|
[] |
no_license
|
OdysseyScorpio/GlitterBot
|
d7307e27b6a760f65e96b1b41b8545a3973490c3
|
b9a9ef6ddda18c6eeab0401b1ec8de05d251ad22
|
refs/heads/master
| 2022-12-18T14:21:32.077798
| 2020-12-06T20:58:06
| 2020-12-06T20:58:06
| 253,093,614
| 0
| 0
| null | 2022-12-08T05:28:13
| 2020-04-04T20:34:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,751
|
py
|
from collections import Counter
from lib.database import Database
from lib.gwpcc.consts import KEY_COLONY_FULL_TEXT_INDEX, KEY_COLONY_METADATA, \
KEY_COLONY_INDEX_BY_ID
from lib.log import Logger
def update():
db = Database().connection
Logger().log.debug('Clearing existing colony name indices')
pipe = db.pipeline()
for key_to_delete in db.scan_iter(KEY_COLONY_FULL_TEXT_INDEX.format('*'), 10000):
pipe.delete(key_to_delete)
pipe.execute()
Logger().log.debug('Fetching master Colony ID index')
# Load the colony index
colony_index = db.lrange(KEY_COLONY_INDEX_BY_ID, 0, -1)
# For each colony
pipe = db.pipeline()
for colony_hash in colony_index:
pipe.hgetall(KEY_COLONY_METADATA.format(colony_hash))
colony_results = dict(zip(colony_index, pipe.execute()))
pipe = db.pipeline()
data_keys = ['BaseName', 'Planet', 'FactionName']
Logger().log.debug('Building colony indices')
# For each thing
for colony_hash, colony_data in colony_results.items():
# Now split the new name and update the indices
for data_key in data_keys:
try:
# Count how many times a letter occurs in the word
scores = Counter(str(colony_data[data_key]).lower())
for letter, score in scores.items():
pipe.zincrby(KEY_COLONY_FULL_TEXT_INDEX.format(letter), score, colony_hash)
except KeyError as e:
Logger().log.error('Error processing Colony: {}, Error was {}'.format(colony_hash, e))
break
# Execute
Logger().log.debug('Writing out colony indices to database')
pipe.execute()
Logger().log.debug('Finished colony indices')
|
[
"martyn.robert.green@gmail.com"
] |
martyn.robert.green@gmail.com
|
acfbf39fbaf3c6428824f782380af0caf1c695ad
|
9479b32b03d5fa5e36652854dac10670e069eca7
|
/random_walks/ch2_graphs/pygal_die/die_visual.py
|
b72238eb8451fa8dfda15730161ccb217792150c
|
[] |
no_license
|
qetennyson/maththeme_ct18
|
265c8e9d479e7f13db88f705bb0a7f30805ddccd
|
1f2dc321e2a57c34ab9438e4120c01afa2ae9751
|
refs/heads/master
| 2020-04-08T00:53:25.208158
| 2018-12-10T21:27:49
| 2018-12-10T21:27:49
| 158,872,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
from main import Die
# Create a d6
die = Die()
# Make some rolls, and store results in a list
results = []
for roll_num in range(100):
result = die.roll()
results.append(result)
print(results)
|
[
"quincytennyson8@gmail.com"
] |
quincytennyson8@gmail.com
|
a210ba0463235061902c9f2b6534784cb3e3bba1
|
2d3cbf5933567ce3c3dcb8f004f1571067742f87
|
/tools/graph-convert/make-test-graph
|
ed77670651b53a786e66495f2c5d0a3cbf8da5d8
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
KatanaGraph/katana
|
9de617944264873198ec7db5ed022d356b1a529a
|
350b6606da9c52bc82ff80f64ffdde8c4bfdacce
|
refs/heads/master
| 2022-06-24T02:50:16.426847
| 2022-03-29T12:23:22
| 2022-03-29T12:23:22
| 310,108,707
| 85
| 83
|
NOASSERTION
| 2023-08-09T00:07:55
| 2020-11-04T20:19:01
|
C++
|
UTF-8
|
Python
| false
| false
| 3,723
|
#!/usr/bin/env python3
import typing, random, sys, getopt, pprint
head = '''<?xml version="1.0" encoding="UTF-8"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
'''
edge_properties = '''<key id="str" for="edge" attr.name="str" attr.type="string"/>
<graph id="G" edgedefault="undirected">
'''
tail = '''</graph>
</graphml>
'''
numNodeProperties = 10
numNodes = 10_000
numEdges = 100_000
def print_node_properties(numProperties: int) :
for i in range(numProperties) :
print(f'<key id="p{i:02}" for="node" attr.name="p{i:02}" attr.type="long"/>')
def print_nodes(numNodes: int) :
"""
Print num nodes, each with age property in range [Start, Stop] such that all sum to 0
"""
Start = -1_000_000
Stop = 1_000_000
randInt = [[random.randrange(Start, Stop+1) for iter in range(numNodes-1)]
for i in range(numNodeProperties)]
for p in range(numNodeProperties) :
randInt[p].append( 0 - sum(randInt[p]) )
for i in range(numNodes) :
print(f'<node id="n{i:04}">')
for p in range(numNodeProperties) :
print(f'<data key="p{p:02}">{randInt[p][i]}</data>')
print('</node>')
def print_edges(numNodes: int, numEdges: int) :
"""
Print edges between numNodes nodes, each with a str property of length [0,100] of a's (positive) or b's (negative) that sum to 0
"""
Start = -100
Stop = 100
randInt = [random.randrange(Start, Stop+1) for iter in range(numEdges-1)]
randInt.append( 0 - sum(randInt) )
for i in range(len(randInt)) :
nodeCycle = int(i / numNodes) + 1
print(f'<edge id="e{i:04}" source="n{i%numNodes:04}" target="n{(i+nodeCycle)%numNodes:04}">')
if(randInt[i] == 0) :
print('<data key="str"></data> </edge>')
elif(randInt[i] > 0) :
print(f'<data key="str">{"a" * randInt[i]}</data> </edge>')
else :
print(f'<data key="str">{"b" * -randInt[i]}</data> </edge>')
######################################################################
## Parse options and usage
def usage():
print("./gen_graph.py [-h] [-n numNodes] [-e numEdges] [-p numNodeProperties]")
print("-n (node) is the number of nodes in the graph (default 1,000)")
print("-p (node_prop) is the number of properties for each node (default 10)")
print("-e (edge) is the number of edges (default 10,000, less than nunNodes @@ 2")
print("-h is for help")
def parse_args(args):
global numNodes, numEdges, numNodeProperties
try:
opts, pos_args = getopt.getopt(sys.argv[1:], "he:n:p:", ["help", "edge=", "node=", "node_prop="])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
opt_update = False
for o, a in opts:
if o in ("-e", "--edge"):
numEdges = int(a)
elif o in ("-n", "--node"):
numNodes = int(a)
elif o in ("-p", "--node_prop"):
numNodeProperties = int(a)
elif o in ("-h", "--help"):
usage()
sys.exit()
else :
print("Option error (%s)" % o)
usage()
sys.exit()
return pos_args
if __name__ == '__main__':
pos_args = parse_args(sys.argv[1:])
assert numEdges <= (numNodes * numNodes), "At most numNode*numNode edges"
print(head)
print_node_properties(numNodeProperties)
print(edge_properties)
print_nodes(numNodes)
print_edges(numNodes, numEdges)
print(tail)
|
[
"ddn0@users.noreply.github.com"
] |
ddn0@users.noreply.github.com
|
|
91781dad9345e1a76f1f332e73a93b3815ff68d6
|
6986ec00b5fcd644c1ef17c40e9ea1997f1c4ebf
|
/setup.py
|
45c985795148d311a6dc2c9605ce5d219a2a0f7a
|
[] |
no_license
|
chnyangjie/wiz_ali_ecs
|
db3f7e2fdf11462488ceecda6389d111228bebb1
|
08d39ab6d3bab1e96d96e5bd9b92d1404a98d12a
|
refs/heads/master
| 2023-08-30T21:30:17.971788
| 2021-10-09T06:30:52
| 2021-10-09T06:30:52
| 415,223,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,578
|
py
|
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='wiz-ali-ecs',
version='1.0.0',
description='wiz-ali-ecs',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://chnyangjie.github.io/',
author='chnyangjie',
author_email='chnyangjie@gmail.com',
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
keywords='python ali log sdk wrapper',
package_dir={'wiz_ali_ecs': 'src/wiz_ali_ecs'},
packages=find_packages(where='src'),
python_requires='>=3.6, <4',
install_requires=['aliyun-python-sdk-ecs'],
py_modules=['wiz_ali_ecs'],
project_urls={
'Bug Reports': 'https://github.com/chnyangjie/wiz_ali_ecs/issues',
'Say Thanks!': 'https://github.com/chnyangjie/wiz_ali_ecs/issues',
'Source': 'https://github.com/chnyangjie/wiz_ali_ecs',
},
)
|
[
"yangjie@xueqiu.com"
] |
yangjie@xueqiu.com
|
668a92e99a99f8d68bc880947a617b6f9a4e2b82
|
d24ac419bb3d78eb10e97196c7be206a90f34d2c
|
/CHMODHelper.py
|
79d61048ca4e02960f167f1ff0f21ca672597162
|
[] |
no_license
|
AsherNoor/PyAlly
|
7b89d3e58c28cc7903c7c8bc5a7dfec2da52be1d
|
7412665880df1757a3f50f886a0366d22be8f930
|
refs/heads/main
| 2023-06-23T01:43:17.655641
| 2021-07-21T17:40:59
| 2021-07-21T17:40:59
| 363,929,888
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,606
|
py
|
"""
--------------
| The PyAlly |
---------------
| CHMOD Helper|
---------------
"""
import _pyally_main as pyally
# ---- Defining Functions ----
# The Intro Function
def chmod_menu():
print(""""
-----------------
| CHMOD Helper |
----------------------------------------------------
| Helps you figure out the CHMOD permissions code. |
----------------------------------------------------
""")
chmod() #-- calling the
# The CHMOD Options Function
def chmod():
print("\n-----------------------"
"\nChoose The Permissions"
"\n-----------------------"
"\n1: Execute [--x ]"
"\n2: Write [ -w- ]"
"\n3: Execute & Write [ -wx ]"
"\n4: Read [ r-- ]"
"\n5: Read & Execute [ r-x ]"
"\n6: Read & Write [ rw- ]"
"\n7: Read, Write, & Execute [ rwx ]" "\n")
# UI's choices.
# user
u = int(input(" What is your choice for USER: "))
# group
g = int(input(" What is your choice for GROUP: "))
# everyone
e = int(input(" What is your choice for EVERYONE: "))
# Concacting the code
uicode = str(u)+str(g)+str(e)
code = uicode
vcode = (u, g, e)
# code test
#print("\nTest Code:"+ code)
# Dispalying the numeric results
print("\nThis is your numeric CHMOD code: "+ code)
# Call the visual function
visual(vcode)
# Creating the Visuals of the permissions chosen
def visual(vcode):
print("This will be your file's permissions: ", end="")
for x in vcode:
if (x == 1):
print("--x", end="")
elif (x == 2):
print("-w-", end="")
elif (x == 3):
print("-wx", end="")
elif (x == 4):
print("r--", end="")
elif (x == 5):
print("r-x", end="")
elif (x == 6):
print("rw-", end="")
elif (x == 7):
print("rwx", end="")
else:
print("Invalid Option")
# Call the again function
internal_loop()
#-- Internal Loop Function
def internal_loop():
loop_answer = input("\nBack to the CHMOD? [y/n] : ")
if loop_answer.lower()== 'y':
chmod()
else:
pyally.mini_menu()
# ----- End of Defining Functions ----
# The Main Guard in ALL the files please.
'''------------------
CALLING THE FUNCTIONS
----------------------'''
#-- Using a Main Guard to prevent it from running when Imported.
if __name__ == '__main__':
chmod_menu() # <-- calling the intro function
|
[
"noreply@github.com"
] |
AsherNoor.noreply@github.com
|
74b71c6f9eed1dded2c4498fee94c29e4efddc81
|
a1fde11f38a7718760b9fbbadf4dd427a13d0241
|
/Python00/com/test01/type01.py
|
d48f53cf38b9ea9f130b84641635aff069dd5e03
|
[] |
no_license
|
moonhyeji/Python
|
f69d889504b8a1def95e6bd80d849514aca9cbdf
|
c8e143cbf79c2a38c9a7a964f231873cef361d92
|
refs/heads/main
| 2023-06-02T07:31:56.855243
| 2021-06-14T07:09:20
| 2021-06-14T07:09:20
| 372,847,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
#number
#정수형
a = 100
print(a)
print(type(a))
print(int(9.8))
print(int(7/6))
print(int('5')+1) #<class 'float'>
#실수형
b = 123.45
print(b)
print(type(b))
print(float(4)) #4.0
print(float(3+2)) #5.0
print(type(float('1.2'))) #<class 'float'>
# 2진수, 8진수, 16진수
c = 0b1111 #바이너리
print(c)
d = 0o77 #옥탈
print(d)
e = 0xff #헥사
print(e)
|
[
"mhj5601@gmail.com"
] |
mhj5601@gmail.com
|
4ccd40c4bd69d5140f4f56aac67c8c94dca387d0
|
a0b2acfb5d49da2b313fa2b8ee8eb5bdba359ced
|
/models/user.py
|
1df44aae4589a292da389f36dc55bd6fa6d671f0
|
[] |
no_license
|
mdcravero/FastAPI-SQLAlchemy-Example
|
79a9d1f242a793b5a196a72b7280d25f16abb722
|
2082ba1c5190249013078129a004d2d73c645a39
|
refs/heads/main
| 2023-08-29T11:06:11.034056
| 2021-11-04T15:37:21
| 2021-11-04T15:37:21
| 424,646,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
from sqlalchemy import Table, Column
from sqlalchemy.sql.sqltypes import Integer, String
from config.db import meta, engine
users = Table("users", meta,
Column("id", Integer, primary_key=True),
Column("name", String(255)),
Column("email", String(255)),
Column("password", String(255))
)
meta.create_all(engine)
|
[
"mdcravero@gmail.com"
] |
mdcravero@gmail.com
|
ae97c4d6d9945a19d54b777cb5d7fdc24dcc82ad
|
d72b83d536a124f9de9d5ae870f38f9edfe7f531
|
/PyPoll/main.py
|
2c7e2b4d3854a75a10085199f33585ff333378d8
|
[] |
no_license
|
grecia1534/Python-Challenge
|
52c90fe3e0191a37710439fe03d57521e375760b
|
15083e61a7ed4c654a51e57f9cd75c6c999fd065
|
refs/heads/master
| 2022-12-03T21:35:11.503407
| 2020-08-13T14:41:49
| 2020-08-13T14:41:49
| 287,136,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,482
|
py
|
# Import CSV file
import os
import csv
# Assign file location with the pathlib library
csv_file_path = Path("python-challenge", "PyPoll", "election_data.csv")
# Declare Variables
total_votes = 0
khan_votes = 0
correy_votes = 0
li_votes = 0
otooley_votes = 0
# Open csv in default read mode with context manager
with open(csv_file_path,newline="", encoding="utf-8") as elections:
# Store data under the csvreader variable
csvreader = csv.reader(elections,delimiter=",")
# Skip the header so we iterate through the actual values
header = next(csvreader)
# Iterate through each row in the csv
for row in csvreader:
# Count the unique Voter ID's and store in variable called total_votes
total_votes +=1
# We have four candidates if the name is found, count the times it appears and store in a list
# We can use this values in our percent vote calculation in the print statements
if row[2] == "Khan":
khan_votes +=1
elif row[2] == "Correy":
correy_votes +=1
elif row[2] == "Li":
li_votes +=1
elif row[2] == "O'Tooley":
otooley_votes +=1
# To find the winner we want to make a dictionary out of the two lists we previously created
candidates = ["Khan", "Correy", "Li","O'Tooley"]
votes = [khan_votes, correy_votes,li_votes,otooley_votes]
# We zip them together the list of candidate(key) and the total votes(value)
# Return the winner using a max function of the dictionary
dict_candidates_and_votes = dict(zip(candidates,votes))
key = max(dict_candidates_and_votes, key=dict_candidates_and_votes.get)
# Print a the summary of the analysis
khan_percent = (khan_votes/total_votes) *100
correy_percent = (correy_votes/total_votes) * 100
li_percent = (li_votes/total_votes)* 100
otooley_percent = (otooley_votes/total_votes) * 100
# Print the summary table
print(f"Election Results")
print(f"----------------------------")
print(f"Total Votes: {total_votes}")
print(f"----------------------------")
print(f"Khan: {khan_percent:.3f}% ({khan_votes})")
print(f"Correy: {correy_percent:.3f}% ({correy_votes})")
print(f"Li: {li_percent:.3f}% ({li_votes})")
print(f"O'Tooley: {otooley_percent:.3f}% ({otooley_votes})")
print(f"----------------------------")
print(f"Winner: {key}")
print(f"----------------------------")
# Output files
# Assign output file location and with the pathlib library
output_file = Path("python-challenge", "PyPoll", "Election_Results_Summary.txt")
with open(output_file,"w") as file:
# Write methods to print to Elections_Results_Summary
file.write(f"Election Results")
file.write("\n")
file.write(f"----------------------------")
file.write("\n")
file.write(f"Total Votes: {total_votes}")
file.write("\n")
file.write(f"----------------------------")
file.write("\n")
file.write(f"Khan: {khan_percent:.3f}% ({khan_votes})")
file.write("\n")
file.write(f"Correy: {correy_percent:.3f}% ({correy_votes})")
file.write("\n")
file.write(f"Li: {li_percent:.3f}% ({li_votes})")
file.write("\n")
file.write(f"O'Tooley: {otooley_percent:.3f}% ({otooley_votes})")
file.write("\n")
file.write(f"----------------------------")
file.write("\n")
file.write(f"Winner: {key}")
file.write("\n")
file.write(f"----------------------------")
|
[
"noreply@github.com"
] |
grecia1534.noreply@github.com
|
e3c75de79ef03dfb3cffac420da401b587d9470a
|
345c61e821d2b386da84875d22a3b2ccd88b9108
|
/ML/DDoS_prediction_linear_regression.py
|
d4819e90a052152be27dfb16b44fbd241f8b820a
|
[] |
no_license
|
amit1809/CMPE_272_mini_project
|
bb411165324d199ac702eafa37cf126831d531fe
|
fca025c57b029cc0bf795503a0e32d730bf64237
|
refs/heads/master
| 2022-06-03T20:18:25.132688
| 2020-04-27T00:49:19
| 2020-04-27T00:49:19
| 255,184,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,074
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import findspark
findspark.init('/home/aarav/Amit/SJSU/CMPE_272/mini_project/spark_setup/spark-2.4.5-bin-hadoop2.6')
# In[2]:
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
# In[4]:
data = spark.read.csv('../dataset/kaggle_small_dataset/applicationlayer-ddos-dataset/train_mosaic.csv', header=True, inferSchema=True)
# In[ ]:
# In[6]:
data.limit(10).toPandas()
# In[7]:
data.count()
# In[8]:
data.groupBy("Label").count().show()
# In[10]:
from pyspark.ml.feature import StringIndexer
indexer = StringIndexer(inputCol="Label", outputCol="LabelIndex")
indexed = indexer.fit(data).transform(data)
new_data = indexed.drop("Label")
new_data.limit(10).toPandas()
# In[11]:
#feature_columns = new_data.columns['Destination_Port','Flow_Duration','Total_Fwd_Packets','Total_Backward_Packets','Total_Length_of_Fwd_Packets','Total_Length_of_Bwd_Packets'] # here we omit the final 2 columns
#feature_columns = ['Destination_Port','Flow_Duration','Total_Fwd_Packets','Total_Backward_Packets','Total_Length_of_Fwd_Packets','Total_Length_of_Bwd_Packets']
feature_columns = data.columns[:-2]
from pyspark.ml.feature import VectorAssembler
assembler = VectorAssembler(inputCols=feature_columns,outputCol="features")
# In[12]:
data_2 = assembler.transform(new_data)
# In[13]:
data_2.select("features").show(truncate=False)
# In[14]:
data_2.limit(10).toPandas()
# In[15]:
train, test = data_2.randomSplit([0.7, 0.3])
# In[16]:
from pyspark.ml.regression import LinearRegression
# In[ ]:
# In[17]:
algo = LinearRegression(featuresCol="features", labelCol="LabelIndex")
# In[18]:
model = algo.fit(train)
# In[19]:
evaluation_summary = model.evaluate(test)
# In[21]:
evaluation_summary.meanAbsoluteError
# In[22]:
evaluation_summary.rootMeanSquaredError
# In[23]:
evaluation_summary.r2
# In[24]:
predictions = model.transform(test)
# In[27]:
predictions.select(predictions.columns[75:]).limit(20).toPandas()
# In[ ]:
|
[
"amitsharma1809@yahoo.com"
] |
amitsharma1809@yahoo.com
|
a8d5c666c135aecea542dde00f2a0ba42357d7ce
|
783070b66238376d7d00c9ef56b644528ec9b8ed
|
/index numbering.py
|
91fe56dd2b9390cd7b51f9da8d13660917601760
|
[] |
no_license
|
muhammed94munshid/code-kata-beginner-2
|
4e2edb88f904e0c221b535f48db890acdcbc22b6
|
948ecfd6a552e5dc8252f6c0068dfbd72b6609d8
|
refs/heads/master
| 2021-04-29T20:18:08.259392
| 2018-05-24T06:29:54
| 2018-05-24T06:29:54
| 121,594,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
a = [2,1,3,4]
for idx, val in enumerate(a):
print(idx, val)
|
[
"noreply@github.com"
] |
muhammed94munshid.noreply@github.com
|
c397938205740b3cb75505b0afe70b317a4d9e7e
|
0b08faea1f02f153815672f9862f734cba9e254e
|
/netrep/conv_layers.py
|
2dc743cdf0cec687cb93f08ff59a801a321c7a4c
|
[] |
no_license
|
robot-ai-machinelearning/netrep
|
d3a9003f9652394107696d9a524a2a8ff38cdf93
|
775be8de79e76fb1f0388da55d560808a8f4d50c
|
refs/heads/main
| 2023-04-04T01:15:03.628464
| 2021-04-13T16:24:04
| 2021-04-13T16:24:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,472
|
py
|
import numpy as np
import itertools
from netrep.validation import check_equal_shapes
from tqdm import tqdm
def convolve_metric(metric, X, Y):
"""
Computes representation metric between convolutional layers,
convolving activations with boundary conditions.
Parameters
----------
metric : Metric
Specifies metric to compute.
X : ndarray
Activations from first layer (images x height x width x channel)
Y : ndarray
Activations from second layer (images x height x width x channel)
Returns
-------
dists : ndarray
Matrix with shape (height x width). Holds `metric.score()` for
X and Y, convolving over the two spatial dimensions.
"""
# Inputs are (images x height x width x channel) tensors, holding activations.
X, Y = check_equal_shapes(X, Y, nd=4, zero_pad=metric.zero_pad)
m, h, w, c = X.shape
# Flattened Y tensor.
Yf = Y.reshape(-1, c)
# Compute metric over all possible offsets.
pbar = tqdm(total=(w * h))
dists = np.full((h, w), -1.0)
for i, j in itertools.product(range(h), range(w)):
# Apply shift to X tensor, then flatten.
shifts = (i - (h // 2), j - (w // 2))
Xf = np.roll(X, shifts, axis=(1, 2)).reshape(-1, c)
# Fit and evaluate metric.
metric.fit(Xf, Yf)
dists[i, j] = metric.score(Xf, Yf)
# Update progress bar.
pbar.update()
pbar.close()
return dists
|
[
"alex.h.willia@gmail.com"
] |
alex.h.willia@gmail.com
|
70ba6e86892477c6ddef73f412d46549962df3c3
|
d1319156173d34de58a4e1ad93fbe1d320b47978
|
/diabetes_detection/programs/store_v11/controler.py
|
17f000a0fbc880684ba5670cc7013c8e7ab8be11
|
[] |
no_license
|
mzminhaz5683/early-prediction_of_diabetes_using_ann_and_machine_learning
|
545e6cc325f1ab876586a72b2106933230b84d06
|
4a2f7729aff19c2eca497f9a3aabd4facd13cc75
|
refs/heads/master
| 2023-05-11T21:17:36.552446
| 2023-05-02T12:28:31
| 2023-05-02T12:28:31
| 308,662,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,631
|
py
|
all = 0 #0/1
####################################################################################################
# file handling
####################################################################################################
file_description = 0 #0/1
save_column_name = 0 #0/1
save_all_data = 0 #0/1
####################################################################################################
# data handling
####################################################################################################
class_creating = 1 # 0/1
multi_level_Data_Handling = 1 #0/1
####################################################################################################
# data checkig
####################################################################################################
hit_map = 0 #0/1/2
hist_plot = 0 #0/1/2
skew_plot = 0 #0/1/2/3 /4 for seeing the transformation effect only
scatter_plot = 0 #0/1
missing_data = 0 #0/1/2
####################################################################################################
# data transformation
####################################################################################################
log_normalization_on_target = 1 #0/1
individual_normalization_show = 0 #0/1
####################################################################################################
# model controller
####################################################################################################
rndm_state = 42 # 0 best: 10
n_estimators = 10 # 10 best: 15
criterion = 'gini' # entropy , gini best: gini
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
project_version = 3 # 3/5
resut_file_name = 'model'
####################################################################################################
# ANN model controller
####################################################################################################
# saved_model_dir = './output/checkpoint/saved/{0}.h5'.format(test_parameters)
test_parameters = '87.7_ann_model'
activate_train = 0 #0/1
target_acc = 80
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
initial_weight = 0.01
alpha_lrelu = 0 # 0/0.1
leakyRelu = 1 #0/1
train_epochs = 300
dropout = [0.10, 0.15, 0.20]
#dns = [32, 32, 64, 128, 128, 256]
dns = [32, 64, 64, 128, 128, 128, 256]
|
[
"mz.minhaz5683@gmail.com"
] |
mz.minhaz5683@gmail.com
|
417369e8c19b91a7e99753191883b4f726ae0e9c
|
e95fedca6baf7027593c96bc86f2c2780a41d9d1
|
/.history/score_sinko_20200614175203.py
|
4f47234de231b111dd0dbb02c007078e2bb7a6a6
|
[] |
no_license
|
jeongeonp/SinK-DaT
|
1769258b02196b666bcc12e81b4e1ac88e7f2f90
|
fd48080b57bc30d9fb62491e06db144ce1f0f4e1
|
refs/heads/master
| 2023-01-02T19:51:44.534677
| 2020-10-30T09:33:04
| 2020-10-30T09:33:04
| 271,836,598
| 1
| 0
| null | 2020-10-07T07:26:57
| 2020-06-12T15:54:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,972
|
py
|
import nltk, re, pprint
from nltk.tokenize import word_tokenize
from urllib import request
from bs4 import BeautifulSoup
import pickle
ROOT_URL = "http://korlex.pusan.ac.kr/search/WebApplication2/KorLex_SearchPage.aspx"
# def fetch_option_data(symbol, datadate, expiration_date):
# response = requests.get(ROOT_URL, params={"symbol": symbol, "datadate": datadate, "expirationDate": expiration_date})
# return response.json()
# data = fetch_option_data('spx', '2018-06-01', '2018-06-15')
# for item in data:
# print("AskPrice:", item['AskPrice'], "Last Price:", item["LastPrice"])
def get_ch(string):
print(string)
ans = [] #(char,num) num=0 for KOR // num=1 for CH // num=2 for ASCII(includes Eng alphabet) // num=3 for something else
for char in string:
num=0
try:
str_enc = char.encode('ascii','strict')
num=2
except:
try:
str_enc = char.encode('gbk','strict')
num=1
except:
try:
str_enc = char.encode("euc_kr",'strict')
num=0
except:
num=3
ans.append((char,num))
return ans
def score_with_korlex(word):
score = 0
return score
def score_with_hanja_level(hanja):
print("Scoring hanja" + hanja + "\n")
score = 0
ROOT_URL = "https://hanja.dict.naver.com/hanja?q=" + hanja
soup = BeautifulSoup(request.urlopen(ROOT_URL).read(), 'html.parser')
res = soup.find_all('a', href=True)
found = False
for a in res:
if a['href'].startswith("/level/read/"):
level = a['href'][11:][0]
found = True
print("The level of " + hanja + " is" + level + "\n")
if not Found:
print(hanja + " has no level specified\n")
# return score
score_with_hanja_level('美'('utf8'))
str1="妥當하다"
str_enc = str1.encode('gbk','strict')
print(str_enc)
|
[
"minarainbow@kaist.ac.kr"
] |
minarainbow@kaist.ac.kr
|
dfe08c9a17f106ce6328a75afa8fd1b37c5400f6
|
72a97182e131ddf1863a9b2fc06e908f3cdcbdba
|
/python/concurrency/asyn_requests/asynrequests01.py
|
148adfe0acfc14772f7aba313566f0cbdd12e589
|
[
"MIT"
] |
permissive
|
cuongnb14/cookbook
|
d07beaf55fec896041451394ba593d6ee8244cbe
|
e4cadc329ead0780d46f0aa22a421d2c765d1039
|
refs/heads/master
| 2020-05-21T23:31:44.214451
| 2020-05-01T14:16:58
| 2020-05-01T14:16:58
| 65,518,104
| 0
| 2
| null | 2017-06-21T11:25:32
| 2016-08-12T02:51:17
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 521
|
py
|
from concurrent.futures import ProcessPoolExecutor
from requests import Session
from requests_futures.sessions import FuturesSession
def callback(future):
print(type(future))
response = future.result()
print(response.json()["args"])
session = FuturesSession(executor=ProcessPoolExecutor(max_workers=10),
session=Session())
for i in range(10):
future_response = session.get('http://httpbin.org/get?foo=' + str(i))
future_response.add_done_callback(callback)
print("done")
|
[
"cuongnb14@gmail.com"
] |
cuongnb14@gmail.com
|
8007a0e1e22c93246d4438e85bedb1bffaa7ac7e
|
d681e088b554a8697d5d3231173a9c800760780f
|
/ex021.py
|
568b9e02bef01866c96a90acba565bc6b3781d5c
|
[] |
no_license
|
thiagocosta-dev/Atividades-Python-CursoEmVideo
|
0870ef5b0d97dd8d9bc4fe30def66bb8b05c2abf
|
4dfeab066ecbe8eb5789965d878db58487e3fdbe
|
refs/heads/main
| 2023-04-15T00:00:15.225495
| 2021-04-10T00:59:07
| 2021-04-10T00:59:07
| 353,154,646
| 0
| 0
| null | 2021-03-31T13:34:01
| 2021-03-30T22:11:57
|
Python
|
UTF-8
|
Python
| false
| false
| 241
|
py
|
'''
DESAFIO 021
Faça um programa em Python que abra e reproduza um arquivo mp3.
'''
import pygame
pygame.init()
pygame.mixer.music.load('')
pygame.mixer.music.play()
pygame.event.wait()
''' Não leu o arquivo mp3 corrigir depois
'''
|
[
"noreply@github.com"
] |
thiagocosta-dev.noreply@github.com
|
a1a316fe037f35b388273f7f75c79d3a3a287dde
|
01dc7ee49e22bd919952779d3eb4f2796eee7dd7
|
/month.py
|
c2faf965e0c71377f659e730b3405c0406de8438
|
[] |
no_license
|
bobbyesh/budget
|
c2c5a7fa170defc81ef1e53b9cb0e7a8dba0914c
|
c1e55ecca48701b7a815718194731c42bf165dec
|
refs/heads/master
| 2020-03-13T18:01:29.114820
| 2018-04-30T01:30:16
| 2018-04-30T01:30:16
| 131,228,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,046
|
py
|
import datetime
import copy
class Month:
def __init__(self, month, year):
self.month = month
self.year = year
self.datetime = datetime.datetime(year, month, 1)
def __str__(self):
return self.datetime.strftime('%b %Y')
def __repr__(self):
return str(self)
def copy(self) -> 'Month':
return copy.deepcopy(self)
@staticmethod
def from_datetime(d):
return Month(d.month, d.year)
def __eq__(self, other):
return self.month == other.month and self.year == other.year
def __add__(self, other):
new = self.datetime + other
return Month.from_datetime(new)
def __sub__(self, other):
return self.datetime - other.datetime
def __lt__(self, other):
return self.datetime < other.datetime
def __le__(self, other):
return self.datetime <= other.datetime
def __gt__(self, other):
return self.datetime > other.datetime
def __ge__(self, other):
return self.datetime >= other.datetime
def next(self) -> 'Month':
temp = self.datetime + datetime.timedelta(days=30)
month = temp.month
year = temp.year
if month == self.month:
if month == 12:
year += 1
month = 1
else:
month += 1
self.datetime = datetime.datetime(year, month, 1)
self.month = month
self.year = year
return self
def __hash__(self):
return hash(frozenset((self.month, self.year, self.datetime)))
class Monthly:
def __init__(self):
self.__month = None
self.start_month = None
@property
def month(self):
if self.__month is None:
raise ValueError('You must set property "month" before using object of class ' + self.__class__.__name__)
return self.__month
@month.setter
def month(self, x: Month):
self.__month = x
if self.start_month is None:
self.start_month = x.copy()
|
[
"eshleman@pdx.edu"
] |
eshleman@pdx.edu
|
f41cfc7242aa91f4fa20d4f79abbb7d7c4ca20d2
|
bfa3c9a29ce4199dd35e2e8da0877755430506d9
|
/DjangoAPI/EmployeeApp/migrations/0001_initial.py
|
679e552a2fd5560e91a2d7c55398184884941075
|
[] |
no_license
|
ykt27/HR2_Project
|
6f46730e8c4022b4fa8d270810297cb492c05519
|
7ed17faaf33f0882d936a025e2df920e539df96c
|
refs/heads/master
| 2023-06-11T02:59:59.864694
| 2021-07-03T15:44:14
| 2021-07-03T15:44:14
| 374,412,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,303
|
py
|
# Generated by Django 3.2 on 2021-06-06 11:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Departments',
fields=[
('DepartmentId', models.AutoField(primary_key=True, serialize=False)),
('DepartmentName', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Employee_Files',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('workexperienceUpload', models.FileField(null=True, upload_to='')),
('CVUpload', models.FileField(null=True, upload_to='')),
('otherUpload', models.FileField(null=True, upload_to='')),
('uploaded_at', models.DateField(max_length=100)),
('EmployeeName', models.CharField(default='nnnnn', max_length=100, null=True)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(max_length=150, unique=True)),
('first_name', models.CharField(max_length=150, null=True)),
('last_name', models.CharField(max_length=150, null=True)),
('department', models.CharField(max_length=250, null=True)),
('joindate', models.DateField(auto_now_add=True, null=True)),
('email', models.EmailField(max_length=254, null=True, unique=True)),
('age', models.IntegerField(null=True)),
('phonenumber', models.CharField(max_length=150, null=True, unique=True)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Recruit',
fields=[
('RecruitmentId', models.AutoField(primary_key=True, serialize=False)),
('CandidateName', models.CharField(max_length=100)),
('PhotoFileName', models.FileField(null=True, upload_to='')),
('Contact', models.CharField(default='0912345678', max_length=100)),
('Email', models.EmailField(default='example@gmail.com', max_length=100)),
('DateOfBirth', models.CharField(default='19-09-1998', max_length=100)),
('Gender', models.CharField(choices=[('male', 'MALE'), ('female', 'FEMALE'), ('other', 'OTHER')], max_length=100)),
('EmergencyContactName', models.CharField(max_length=100)),
('EmergencyPhone', models.CharField(max_length=100)),
('Citizenship', models.CharField(max_length=100)),
('Rase', models.CharField(max_length=100)),
('Education', models.CharField(max_length=100)),
('EmployeeType', models.CharField(choices=[('Full_Time', 'FULL_TIME'), ('Part_Time', 'PART_TIME'), ('Contract', 'CONTACT'), ('Intern', 'INTERN')], max_length=100)),
('Shift', models.CharField(choices=[('Night_Shift', 'NIGHT_SHIFT'), ('Morning_Sift', 'MORING_SHIFT'), ('Contract', 'CONTACT'), ('Intern', 'INTERN')], max_length=100)),
('Department', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='EmployeeApp.departments')),
],
),
migrations.CreateModel(
name='Employees',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('EmployeeName', models.CharField(max_length=100, null=True)),
('DateOfJoining', models.DateField(blank=True, null=True)),
('PhotoFileName', models.ImageField(blank=True, null=True, upload_to='employee_images')),
('Status', models.CharField(choices=[('active', 'ACTIVE'), ('resign', 'RESIGN'), ('vacation', 'VACATION'), ('sick_leave', 'SICK_LEAVE'), ('fired', 'FIRED'), ('layoff', 'LAYOFF')], default='ACTIVE', max_length=100, null=True)),
('StatusDescription', models.CharField(blank=True, max_length=500, null=True)),
('Contact', models.CharField(default='0912345678', max_length=100, null=True)),
('Email', models.EmailField(default='example@gmail.com', max_length=100)),
('DateOfBirth', models.DateField(max_length=100, null=True)),
('Gender', models.CharField(choices=[('male', 'MALE'), ('female', 'FEMALE'), ('other', 'OTHER')], max_length=100, null=True)),
('EmergencyContactName', models.CharField(max_length=100, null=True)),
('EmergencyPhone', models.CharField(max_length=100, null=True)),
('Citizenship', models.CharField(max_length=100, null=True)),
('Race', models.CharField(max_length=100, null=True)),
('Education', models.CharField(max_length=100, null=True)),
('Salary', models.CharField(default='00,000.00', max_length=16, null=True)),
('EmployeeType', models.CharField(choices=[('full_time', 'FULL_TIME'), ('part_time', 'PART_TIME'), ('contract', 'CONTACT'), ('intern', 'INTERN')], max_length=100, null=True)),
('Shift', models.CharField(choices=[('morning_shift', 'MORNING_SHIFT'), ('night_shift', 'NIGHT_SHIFT'), ('afternoon_shift', 'AFTERNOON_SHIFT')], max_length=100, null=True)),
('Work_Location', models.CharField(max_length=100, null=True)),
('Address', models.CharField(max_length=100, null=True)),
('department', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='EmployeeApp.departments')),
('employee_file', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='EmployeeApp.employee_files')),
],
),
migrations.CreateModel(
name='Employee_Recordes',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('warning', models.CharField(choices=[('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6')], default='PRESENT', max_length=15)),
('Disciplinary_Description', models.CharField(blank=True, max_length=500, null=True)),
('employees', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='EmployeeApp.employees')),
],
),
migrations.CreateModel(
name='Attendance',
fields=[
('AttendanceId', models.AutoField(primary_key=True, serialize=False)),
('status', models.CharField(choices=[('PRESENT', 'PRESENT'), ('ABSENT', 'ABSENT'), ('LATE_COME', 'LATE_COME'), ('EARLY_LEAVE', 'EARLY_LEAVE')], default='PRESENT', max_length=15)),
('date', models.DateField(max_length=100)),
('employees', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='EmployeeApp.employees')),
],
),
]
|
[
"yaredtefera206@gmail.com"
] |
yaredtefera206@gmail.com
|
9445e355726ba5bcde92702c64a225d414b23225
|
bec7ec738d78be94a84d443c2dd2fd86258ed3a7
|
/invsolve/__init__.py
|
949a4f845c8b70742a3a535391947cf2d8398f5a
|
[
"MIT"
] |
permissive
|
danassutula/model_parameter_inference
|
95070cdbadf24052dc2a873005b7ae9afc8b3ede
|
4ee415f181e815085660dfe722bd861c99da0cd9
|
refs/heads/master
| 2021-08-09T01:07:50.400590
| 2020-07-20T13:44:49
| 2020-07-20T13:44:49
| 204,201,824
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
from . import config
from . import functions
from . import invsolve
from . import measure
from . import project
from . import utility
from .invsolve import InverseSolver
from .invsolve import InverseSolverBasic
# Get configured logger
logger = config.logger
|
[
"sutula.danas@gmail.com"
] |
sutula.danas@gmail.com
|
e1a5d93d5c03eb755c17953e6d16479525e318c3
|
77c190314ed9c4e186c15d18894fea8352da161e
|
/Pbank/src/model.py
|
c35314e03a814e4338cf96426b36046c06ab8f23
|
[] |
no_license
|
Alleinx/Widget
|
9d0d13e4c6aa8b55d13244cce881dd84a1644c95
|
0c115ef94be435e1465ea8366f5128cee7124dc4
|
refs/heads/master
| 2022-01-18T16:50:37.527926
| 2022-01-08T20:18:36
| 2022-01-08T20:18:36
| 174,493,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,282
|
py
|
import data_util as dao
class Project(object):
'''
abc class, used to define a general interface for Project
'''
def __init__(self, name: str, description: str):
self.name: str = name
self.description: str = description
self._bills: list = []
# use to store a snapshoot of data stored in db.
def add_bill(self, bill):
raise NotImplementedError
def update_bill(self, old_bill, new_bill):
raise NotImplementedError
def delete_bill(self, bill):
raise NotImplementedError
def get_all_bills(self) -> list:
return self._bills
def __repr__(self):
return f'(Project: {self.name}; Project description: {self.description})'
def __str__(self):
return f'{self.name}'
class GeneralProject(Project):
def __init__(self, name: str, description: str):
super().__init__(name, description)
def add_bill(self, bill):
bill = Bill(*bill)
self._bills.append(bill)
def update_bill(self, old_bill, new_bill):
pass
def delete_bill(self, bill):
pass
class Bill(object):
'''
Used to store transfer record
'''
def __init__(self, bill_index: int, title: str,
note: str, time: str, amount: float):
self.bill_index = bill_index
self.amount = amount
self.time = time
self.title = title
self.note = note
def __str__(self):
return '(\'{self.title}\', \'{self.note}\', \'{self.time}\', {self.amount})'.format(
self=self)
def __repr__(self):
return f'({self.time}: Bill[{self.bill_index}] <Title>: {self.title}, <Amount>: {self.amount}, <Note>: {self.note})'
@property
def bill_index(self):
return self._bill_index
@bill_index.setter
def bill_index(self, value):
if not isinstance(value, int):
raise ValueError('bill index should be an integer')
if value < 0:
raise ValueError('bill index should >= 0')
else:
self._bill_index = value
@property
def title(self):
return self._title
@title.setter
def title(self, title: str):
'''
If no title is provided, should not be created
'''
if not isinstance(title, str):
raise ValueError('Title should be a str')
if title is None:
return ValueError('Bill must have a Title or tag')
else:
self._title = title
class ProjectAbstractFactory(object):
'''
abc class, used to define a general interface for ProjectFactory
'''
def __init__(self, dao):
self.data_accessor = dao
self.project_list = None
self._init()
def create_project(self):
raise NotImplementedError
def delete_project(self):
raise NotImplementedError
def update_project(self):
raise NotImplementedError
def _init(self):
raise NotImplementedError
class GeneralProjectFactory(ProjectAbstractFactory):
'''
Project Factory
'''
def __init__(self):
super().__init__(dao.GeneralProjectDAO())
def _init(self):
self.project_list = dict()
for project in self.data_accessor.project_list:
project_description = self.data_accessor.get_project_description(
project)
new_project = GeneralProject(project, project_description)
project_bill = self.data_accessor.get_project_bill(project)
for item in project_bill:
new_project.add_bill(item)
self.project_list[new_project.name] = new_project
def create_project(self, project_name: str,
description='Project Description') -> Project:
'''
This method tends to create a new project
'''
if project_name in self.project_list:
raise ValueError('Project {} already exist.'.format(project_name))
else:
project = Project(project_name, description)
self.project_list[project_name] = project
self.data_accessor.create_project(project_name, description)
return project
def delete_project(self, project_name: str):
project_name = project_name.lower()
if project_name not in self.project_list:
raise ValueError(f'{project_name} doesn\'t exist.')
return
del self.project_list[project_name]
self.data_accessor.delete_project(project_name)
def update_project(
self, target_project_name: str, new_project_name: str = None,
new_project_desc: str = None) -> bool:
if not new_project_name and not new_project_desc:
# nothing to update
return False
target_project_name = target_project_name.lower()
if target_project_name not in self.project_list:
raise ValueError(f'{project_name} doesn\'t exist.')
return False
if new_project_name is not None:
new_name = new_project_name.lower()
if new_name is in self.project_list:
raise ValueError(f'{project_name} already exist.')
return False
project = self.project_list[target_project_name]
del self.project_list[target_project_name]
self.project_list[new_name] = project
if new_project_desc is not None:
if new_project_name is not None:
self.project_list[new_name].description = new_project_desc
else:
self.project_list[target_project_name].description = new_project_desc
self.data_accessor.update_project(
target_project_name, new_project_name, new_project_desc)
return True
def display_project_info(self) -> list:
"""For Menu Displaying
Returns:
list -- a list contains all project in the db.
"""
return [item for item in self.project_list.values()]
if __name__ == "__main__":
project_manager = GeneralProjectFactory()
name = 'hello'
print(project_manager.project_list)
project = project_manager.project_list[name]
print(project.name)
print(project._bills)
|
[
"l630003061@mail.uic.edu.hk"
] |
l630003061@mail.uic.edu.hk
|
6a03a29e3040057e1b145f4ad72f5afc6f054504
|
680b17844c73ddf165414bfe6678131d0830ca7c
|
/Processing/Sketches/Noise_2D/Noise_2D.pyde
|
7da8b6eb0ac5ee632327801154611005fe07dae7
|
[] |
no_license
|
vishangshah/Archive
|
a5a3ad66a9e34aba4289b59da927cc7773bf3cc1
|
e871e76a5011b88dff03b5c62753237d5a90ac5d
|
refs/heads/master
| 2020-03-19T22:42:03.208521
| 2018-06-11T21:04:16
| 2018-06-11T21:04:16
| 136,978,334
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
pyde
|
# Nature of Code
# 2D Perlin Noise
x = 0
y = 0
xoff = 0.0
yoff = 0.0
size(600,600)
loadPixels()
for x in range(0, width):
yoff = 0.0
for y in range(0, height):
bright = float(map(noise(xoff, yoff), 0, 1, 0, 255))
pixels[x+y * width] = color(bright)
yoff += 0.01
xoff += 0.01
updatePixels()
|
[
"noreply@github.com"
] |
vishangshah.noreply@github.com
|
77bb596361645a9ae90378e7bb3ae955df9e48b5
|
f38180bcf74d794b31ad100d07564986a41cff87
|
/src_layout/src/mypackage/subpackage_a/__init__.py
|
d9a8c258fae0cb7d4e61fb285af8c0a9f8979c55
|
[] |
no_license
|
willprice/pytest-coverage-examples
|
51a4ba3daabf8c7426dee2b6a92ea8e8669e46bf
|
ae591f2017fa8dd9807a93d393db170526d262de
|
refs/heads/master
| 2020-04-14T22:24:32.406073
| 2019-01-05T09:22:21
| 2019-01-05T09:22:21
| 164,161,342
| 8
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24
|
py
|
from .module_a import a
|
[
"will.price94@gmail.com"
] |
will.price94@gmail.com
|
3694520e27f74361826be8a154921b0637dab620
|
6c59dc1951eb099ee8228d813b3ceba9125af7a4
|
/sources/train_light2.py
|
624c5dfc204ef6e51ae0de94f4e46abf6dc293e5
|
[] |
no_license
|
alxshine/notice_me_senpai
|
09e524343d7a2a7d8dca6c07e918a054d5fadee3
|
70ee3b4f1139efffef8e4b1792a5896fd65fc6ad
|
refs/heads/master
| 2021-09-10T03:05:09.544088
| 2018-03-20T20:54:49
| 2018-03-20T20:54:49
| 115,210,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,671
|
py
|
import sys
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
def weighted_loss(logits, labels, num_classes, head=None):
with tf.name_scope('loss_1'):
logits = tf.reshape(logits, (-1, num_classes))
epsilon = tf.constant(value=1e-10)
logits = logits + epsilon
# consturct one-hot label array
label_flat = tf.reshape(labels, (-1, 1))
label_flat = tf.cast(label_flat, tf.int32)
labels = tf.reshape(tf.one_hot(label_flat, depth=num_classes), (-1, num_classes))
coefficients = tf.cast(tf.constant([0.1]), tf.float32)
# calculate via median frequency:
# coeff = median_freq/freq(c)
# with freq(c) = number of pixels of class c divided by total number of pixels in image
# median_freq = median of all class freq
unique, counts = np.unique(label_flat, return_counts=True)
median_map = dict(zip(unique,counts))
print(median_map)
#coefficients =
#coefficients = tf.cast(label_flat, tf.float32)
softmax = tf.nn.softmax(logits)
cross_entropy = -tf.reduce_sum(tf.multiply(labels * tf.log(softmax + epsilon), coefficients), reduction_indices=[1])
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
return loss
def softmax(target, axis, name=None):
with tf.name_scope(name, 'softmax', values=[target]):
max_axis = tf.reduce_max(target, axis, keep_dims=True)
target_exp = tf.exp(target-max_axis)
normalize = tf.reduce_sum(target_exp, axis, keep_dims=True)
softmax = target_exp / normalize
return softmax
def cnn_model_fn(features, labels, mode):
input_layer = tf.reshape(features["x"], [-1, 750, 1000, 1])
conv = tf.layers.conv2d(
inputs=input_layer,
filters=3,
kernel_size=[3,3],
padding='same',
activation=tf.nn.relu)
#pool1 = tf.layers.max_pooling2d(
#conv,
#[2,2],
#[2,2])
conv2 = tf.layers.conv2d(
conv,
1,
[3,3],
padding='same',
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(
conv2,
[5,5],
[5,5])
conv3 = tf.layers.conv2d(
pool2,
20,
[3,3],
padding='same',
activation=tf.nn.relu)
pool3 = tf.layers.max_pooling2d(
conv3,
[5,5],
[5,5])
flat = tf.reshape(pool3, [-1, 15*20*20])
dense1 = tf.layers.dense(flat, 15*20*20)
dense2 = tf.layers.dense(dense1, 15*20*20)
dense3 = tf.layers.dense(dense2, 15*20*20)
unflat = tf.reshape(dense3, [-1, 15, 20, 20])
dc1 = tf.layers.conv2d_transpose(
unflat,
20,
[3,3],
padding='same',
activation=tf.nn.relu)
dc2 = tf.layers.conv2d_transpose(
conv2,
3,
[3,3],
padding='same',
activation=tf.nn.relu)
dc3 = tf.layers.conv2d_transpose(
dc2,
1,
[3,3],
padding='same',
activation=tf.nn.relu)
ups3 = tf.image.resize_images(dc1, [750, 1000])
dc4 = tf.layers.conv2d_transpose(
ups3,
1,
[3,3],
padding='same',
activation=tf.nn.relu)
norm = tf.div(
tf.subtract(dc4, tf.reduce_min(dc4)),
tf.subtract(tf.reduce_max(dc4), tf.reduce_min(dc4)))
logits = tf.layers.dense(inputs=norm, units=1)
num_classes = 1
output = dc3
#output =
#predictions = {
#"classes": output,
#"probabilities": output
#}
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
#"classes": tf.argmax(input=output, axis=1),
#"classes": tf.nn.softmax(output, name="softmax_tensor"),
"classes": output,
#tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=labels),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(output, name="softmax_tensor")
#"probabilities": weighted_loss(logits, labels, num_classes)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
#loss = tf.losses.hinge_loss(labels, output)
loss = weighted_loss(output, labels, num_classes)
#loss = tf.losses.mean_squared_error(labels, output)
#loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output,labels=tf.squeeze(labels)))
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
#create estimator
print("Creating estimator...")
classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn)
print("Done")
#load training data
print("Loading features...")
features = np.load("../dataset/extracted.npy")
print("Done")
print("Loading truth maps...")
maps = np.load("../dataset/maps.npy")
print("Done")
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": features[:780]},
y=maps[:780],
batch_size=3,
num_epochs=10,
shuffle=True)
print("Training classifier...")
classifier.train(
input_fn=train_input_fn,
steps=20000)
print("Done")
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": features[780:]},
y=maps[780:],
num_epochs=1,
batch_size=3,
shuffle=False)
print("Evaluating...")
eval_results = classifier.evaluate(input_fn=eval_input_fn)
print("Done, results: {}".format(eval_results))
predictions = classifier.predict(eval_input_fn)
incorrect_pixels = 0
total_pixels = 0
index = 780
for p in predictions:
pred = p['classes']
pred[pred>pred.mean()] = 1
pred[pred<1] = 0
incorrect_pixels += np.count_nonzero(pred-maps[index:index+1])
total_pixels += np.prod(pred.shape)
if incorrect_pixels/total_pixels < 0.2:
plt.figure()
plt.subplot(131)
plt.imshow(maps[index,:,:,0],cmap='gray')
plt.title("truth")
plt.subplot(132)
plt.imshow(pred[:,:,0],cmap='gray')
plt.title("prediction")
diff = pred - maps[index]
plt.subplot(133)
plt.imshow(diff[:,:,0])
plt.colorbar()
plt.title("diff")
incorrect_rate = np.sum(np.abs(diff))/np.prod(diff.shape)
print("Accuracy: {}%".format((1-incorrect_rate)*100))
plt.show()
index += 1
incorrect_rate = incorrect_pixels/total_pixels
print("Accuracy: {}%".format((1-incorrect_rate)*100))
if __name__ == "__main__":
main(sys.argv)
|
[
"stephanie.autherith@student.uibk.ac.at"
] |
stephanie.autherith@student.uibk.ac.at
|
a83a2734bba442edff8190aaf67269d44ff2bc71
|
55887401e4bb082dcc40bed7e585cd10adf1b37e
|
/ugv/script/move_circle_server.py
|
90c962744ed153ef540f426c88ebebd5fd324ad0
|
[] |
no_license
|
ajay2810/ugv
|
b2f7fa7078aa3c81f4e53a3ca99d066ce6256557
|
1809e93459cea97b8db181b66ac3c58d5a410a9b
|
refs/heads/master
| 2020-12-14T01:26:14.865780
| 2020-01-17T16:48:57
| 2020-01-17T16:48:57
| 234,591,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
#!/usr/bin/env python
import rospy
from ugv.srv import *
from geometry_msgs.msg import Twist
PI = 3.1415926535897
def handle_move_circle(req):
pub = rospy.Publisher('/cmd_vel',Twist, queue_size = 10)
vel_msg = Twist()
speed = req.s
radius = req.r
vel_msg.linear.x = speed
vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = speed/radius
#Move Robot in circle
while not rospy.is_shutdown():
pub.publish(vel_msg)
vel_msg.linear.x = 0
vel_msg.linear.z = 0
pub.publish(vel_msg)
def move_circle_server():
rospy.init_node('move_circle_server')
s = rospy.Service( 'move_circle', MoveCircle, handle_move_circle )
rospy.spin()
if __name__ == "__main__":
move_circle_server()
|
[
"noreply@github.com"
] |
ajay2810.noreply@github.com
|
c467c50821869c7e41b81df3df09888967cfbd6d
|
3938fb52e6150ab9a7c04081f01b4dc93d892ba5
|
/projectTeam/services/teamservice.py
|
bc9b95914abdb0ae3bb2a86e1f6406cf01d405b1
|
[] |
no_license
|
flsyaoair/guiren
|
7675bfc897fb4ebdcccb6e31568cdaa8fd51847e
|
1910be9b359f07538fb54e7849cdd039bad4a0da
|
refs/heads/master
| 2016-09-06T05:26:43.722215
| 2015-05-10T08:13:07
| 2015-05-10T08:13:07
| 31,943,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,152
|
py
|
# -*- coding: UTF-8 -*-
from projectTeam.models import UserProfile,database
from projectTeam.models.userprofile import UserStatus
from sqlalchemy.sql.elements import not_
from projectTeam.models.member import Member
from projectTeam.services import userservice, projectservice, mailservice
from projectTeam.powerteamconfig import *
def member_candidate(project_id):
session = database.get_session()
projectMember = session.query(Member.UserId).filter(Member.ProjectId == project_id)
candidate = session.query(UserProfile).filter(UserProfile.Status == UserStatus.Enabled,not_(UserProfile.UserId.in_(projectMember)))
session.close()
return candidate
def member_in_project(project_id):
session = database.get_session()
projectMember = session.query(Member.UserId).filter(Member.ProjectId == project_id)
memberList = session.query(UserProfile).filter(UserProfile.Status == UserStatus.Enabled,UserProfile.UserId.in_(projectMember))
session.close()
return memberList
def add_member(project_id,email):
session = database.get_session()
user = userservice.get(email)
member = Member()
member.ProjectId = project_id
member.UserId = user.UserId
session.add(member)
session.commit()
session.close()
if ENABLE_MAIL_NOTICE:
p = projectservice.get(project_id)
body = mailservice.render_mail_template('Team/AddMember.html',ProjectName=p.ProjectName,SystemUrl=HOST)
mailservice.send_mail(email,p.ProjectName + u'项目组欢迎您的加入:)',body)
def remove_member(project_id,user_id):
session = database.get_session()
session.query(Member).filter(Member.ProjectId == project_id,Member.UserId == user_id).delete()
session.commit()
session.close()
if ENABLE_MAIL_NOTICE:
p = projectservice.get(project_id)
u = userservice.get_user_by_id(user_id)
body = mailservice.render_mail_template('Team/RemoveMember.html',ProjectName=p.ProjectName,SystemUrl=HOST)
mailservice.send_mail(u.Email, u'您已经被' + p.ProjectName + u'项目组移除',body)
|
[
"fls@csst.com"
] |
fls@csst.com
|
aa7c961a91809ac1640e616e5b27f0c73d8d214b
|
e16dd8206e51f9952877d2bb83d8893adb239e65
|
/example/pcf8591_temp_5v.py
|
84ce6e26c6508c7e0e763ac4bc57403b28cbde30
|
[] |
no_license
|
devdio/raspi-example
|
20bbccd6ccbdd83773d1b143b238dccb57f8482d
|
f6fde3c29fc7907c7f8b6b2179fae8b109374088
|
refs/heads/master
| 2020-04-16T18:14:52.875695
| 2019-01-16T06:55:29
| 2019-01-16T06:55:29
| 165,810,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# -*- coding: utf-8 -*-
import smbus
import time
address = 0x48
A0 = 0x40
A1 = 0x41
A2 = 0x42
A3 = 0x43
bus = smbus.SMBus(1)
while True:
bus.write_byte(address,A0)
value = bus.read_byte(address)
aout = value*3.3/255
#5v
aout = aout*2
print("AOUT:%1.3f TEMP:%d" %(aout, aout*100))
time.sleep(0.5)
|
[
"iamtopaz@gmail.com"
] |
iamtopaz@gmail.com
|
b836fb5c5bb4a87dec7cd18c8caddcd9b08b5d1c
|
1fb2c8ccfee70d141924c9b77cf98e46af2e5112
|
/desktop/core/ext-py/cx_Oracle-6.4.1/samples/PLSQLCollection.py
|
97ebdfdeb3f991d92709091257db7232e1f5ca51
|
[
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
criteo-forks/hue
|
bcfa215e654c72eb4bd0a40bfbddcd63b51d30cd
|
752b305d2f336974a2e5c2294f146e338fdd3edf
|
refs/heads/release-4.11.0-criteo
| 2023-09-01T11:53:48.372630
| 2023-08-09T14:55:05
| 2023-08-09T14:55:05
| 170,473,946
| 2
| 6
|
Apache-2.0
| 2023-09-07T08:32:40
| 2019-02-13T08:54:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
#------------------------------------------------------------------------------
# Copyright 2016, 2017, Oracle and/or its affiliates. All rights reserved.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# PLSQLCollection.py
#
# Demonstrate how to get the value of a PL/SQL collection from a stored
# procedure.
#
# This feature is new in cx_Oracle 5.3 and is only available in Oracle
# Database 12.1 and higher.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import SampleEnv
connection = cx_Oracle.connect(SampleEnv.MAIN_CONNECT_STRING)
# create new empty object of the correct type
# note the use of a PL/SQL type defined in a package
typeObj = connection.gettype("PKG_DEMO.UDT_STRINGLIST")
obj = typeObj.newobject()
# call the stored procedure which will populate the object
cursor = connection.cursor()
cursor.callproc("pkg_Demo.DemoCollectionOut", (obj,))
# show the indexes that are used by the collection
print("Indexes and values of collection:")
ix = obj.first()
while ix is not None:
print(ix, "->", obj.getelement(ix))
ix = obj.next(ix)
print()
# show the values as a simple list
print("Values of collection as list:")
print(obj.aslist())
|
[
"noreply@github.com"
] |
criteo-forks.noreply@github.com
|
e7010eb0ec3612d5ff2545497bc07acfc5f826a8
|
acb232098753214ccee894e5dc909c3f9075aa1b
|
/Back_end(Python,SQL)/src/main/execution.py
|
62485f6bc254cdf134619283d9668ebbbb5912a9
|
[] |
no_license
|
Balaji4397/Singlestop-Application
|
f26146526d21b9ba350220421e2f5d81e8b4dc6f
|
c544d799ecb7c27474e17781497533de23a71819
|
refs/heads/main
| 2022-12-24T09:26:13.805138
| 2020-10-07T08:46:44
| 2020-10-07T08:46:44
| 301,973,645
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
# import sys,os.path
# sys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__)),'main'))
from connection import connect_DB
class execution(connect_DB):
def __init__(self):
"""Establish database connection"""
super().__init__()
self.mycursor = self.conn.cursor()
def executes(self, query):
"""SQL COMMAND EXECUTION"""
try:
if (query.split(" ")[0]=="CREATE" or query.split(" ")[0]=="INSERT" or query.split(" ")[0]=="DROP" or query.split(" ")[0]=="DELETE" or query.split(" ")[0]=="UPDATE" or query.split(" ")[0]=="ALTER"):
self.mycursor.execute(query)
self.conn.commit()
return self.mycursor
else:
self.mycursor.execute(query)
self.mycursor.fetchone()
return self.mycursor
except Exception as e:
return 0
|
[
"balaji.a.arunachalam@accenture.com"
] |
balaji.a.arunachalam@accenture.com
|
648b292243859193447a8ccb29d8118e0b264689
|
187634d7ab397c584f99644dadcae0c6022208f1
|
/Daphne/word2vec-nlp-tutorial/main/CNN.py
|
18f78bd38ca6f9371186c14f4f5dbc5a0a4c8bb7
|
[] |
no_license
|
jguti21/Kaggle
|
3fbecbc57b32366132cb063d41c8041e9b91ea30
|
7762c0709382fbd1fb8771278be550b5a4d54496
|
refs/heads/master
| 2021-08-10T11:39:18.117542
| 2020-08-12T14:48:32
| 2020-08-12T14:48:32
| 211,645,658
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,727
|
py
|
import os
os.chdir("C:/Users/daphn/Documents/Kaggle/word2vec-nlp-tutorial")
import pandas as pd
train = pd.read_csv("data/labeledTrainData.tsv", header=0,
delimiter="\t", quoting=3)
# Add the extra data
extra = pd.read_csv('data/extradata.csv',
encoding="latin-1")
extra = extra.drop(['Unnamed: 0', 'type', 'file'],
axis=1)
extra.columns = ["review", "sentiment"]
#remove half of it, unsupervised learning
extra = extra[extra.sentiment != 'unsup']
extra['sentiment'] = extra['sentiment'].map({'pos': 1,
'neg': 0})
# MERGE
train = pd.concat([train, extra]).reset_index(drop=True)
# Inspection of the training set
# for sentiment:
# - 1 is positive
# - 0 is negative
positive = train[train["sentiment"] == 1]
# The sample is equally distributed in positive and
# negative reviews
len(positive) / len(train)
train["characters"] = train["review"].str.len()
# Longest review has 13 710 characters
max(train["characters"])
# Shortest review has 32 characters
min(train["characters"])
# The shortest review is:
short = train[train["characters"] == min(train["characters"])]["review"]
print(list(short))
# Average number of letters is 1 329 characters
train["characters"].mean()
##
import numpy as np
import pandas as pd
import re
import nltk
import spacy
import string
import re
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
import num2words
from emot.emo_unicode import UNICODE_EMO, EMOTICONS
####### CLEANING ########
# Remove the emojis
def remove_emoji(string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
train["review"] = train["review"] \
.apply(lambda review: remove_emoji(review))
# Exchange emoticons for their meaning
def convert_emoticons(text):
for emot in EMOTICONS:
text = re.sub(u'('+emot+')', "_".join(EMOTICONS[emot].replace(",","").split()), text)
return text
train["review"] = train["review"] \
.apply(lambda review: convert_emoticons(review))
# To lower case
train["review_cleaned"] = train["review"].str.lower()
# Remove HTML
from bs4 import BeautifulSoup
def remove_html(review):
review = BeautifulSoup(review).get_text()
return review
train["review_cleaned"] = train["review_cleaned"].apply(
lambda review: remove_html(review)
)
# Removing the punctuation
train["review_cleaned"] = train["review_cleaned"].str.translate(
str.maketrans("", "", string.punctuation)
)
# Removing stop words
from nltk.corpus import stopwords
STOPWORDS = set(stopwords.words('english'))
def remove_stopwords(text):
"""custom function to remove the stopwords"""
return " ".join([word for word in str(text).split()
if word not in STOPWORDS])
train["review_cleaned"] = train["review_cleaned"].apply(
lambda review: remove_stopwords(review))
# Removal of the too frequent words
from collections import Counter
cnt = Counter()
for review in train["review_cleaned"].values:
for word in review.split():
cnt[word] += 1
# 142 478 unique words in the dictionnary
# I will rearrange a little b/c good and bad are in there
FREQWORDS = set([w for (w, wc) in cnt.most_common(50)
if w not in ["good", "bad", "like", "worst",
"great", "love", "best"]])
def remove_freqwords(review):
"""custom function to remove the frequent words"""
return " ".join([word for word in str(review).split()
if word not in FREQWORDS])
train["review_cleaned"] = train["review_cleaned"].apply(
lambda text: remove_freqwords(text)
)
# Removal of rare words
# probably 50 000 is a bit too much
n_rare_words = 230000
RAREWORDS = set([w for (w, wc) in cnt.most_common()[:-n_rare_words-1:-1]])
def remove_rarewords(review):
"""custom function to remove the rare words"""
return " ".join([word for word in str(review).split()
if word not in RAREWORDS])
train["review_cleaned"] = train["review_cleaned"] \
.apply(lambda review: remove_rarewords(review))
# Stemming
# Stemming is the process of reducing inflected
# (or sometimes derived) words to their word stem, base or root form
# from nltk.stem.porter import PorterStemmer
# stemmer = PorterStemmer()
# def stem_words(review):
# return " ".join([stemmer.stem(word)
# for word in review.split()])
#
# train["review_cleaned"] = train["review_cleaned"] \
# .apply(lambda review: stem_words(review))
# Lemmatization
# Lemmatization is similar to stemming in reducing
# inflected words to their word stem but differs
# in the way that it makes sure the root word
# (also called as lemma) belongs to the language.
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet # we need a corpus to
# know what is the type of the word
lemmatizer = WordNetLemmatizer()
wordnet_map = {"N": wordnet.NOUN,
"V": wordnet.VERB,
"J": wordnet.ADJ,
"R": wordnet.ADV}
def lemmatize_words(text):
pos_tagged_text = nltk.pos_tag(text.split())
return " ".join([lemmatizer.lemmatize(word,
wordnet_map.get(pos[0],
wordnet.NOUN))
for word, pos in pos_tagged_text])
train["review_cleaned"] = train["review_cleaned"] \
.apply(lambda review: lemmatize_words(review))
from nltk import word_tokenize
tokens = [word_tokenize(sen) for sen in train.review_cleaned]
train['tokens'] = tokens
# Replace numbers
# import num2words
# ...
# Reshaping into a list of set (list(words in review), sentiment)
docs = train["review_cleaned"].to_list()
# CNN tutorial
#train["review_final"] = train["review_cleaned"].str.split()
train['Pos'] = np.where(train["sentiment"] == 1, 1, 0)
train['Neg'] = np.where(train["sentiment"] == 0, 1, 0)
from sklearn.model_selection import train_test_split
data_train, data_test = train_test_split(train,
test_size=0.10,
random_state=42)
all_training_words = [word for tokens in data_train["tokens"]
for word in tokens]
training_sentence_lengths = [len(tokens) for tokens
in data_train["tokens"]]
TRAINING_VOCAB = sorted(list(set(all_training_words)))
print("%s words total, with a vocabulary size of %s" % (len(all_training_words), len(TRAINING_VOCAB)))
print("Max sentence length is %s" % max(training_sentence_lengths))
all_test_words = [word for tokens in data_test["tokens"]
for word in tokens]
test_sentence_lengths = [len(tokens) for tokens
in data_test["tokens"]]
TEST_VOCAB = sorted(list(set(all_test_words)))
print("%s words total, with a vocabulary size of %s" % (len(all_test_words), len(TEST_VOCAB)))
print("Max sentence length is %s" % max(test_sentence_lengths))
from gensim import models
# https://github.com/mmihaltz/word2vec-GoogleNews-vectors
word2vec_path = './data/word2vec-GoogleNews-vectors/GoogleNews-vectors-negative300.bin.gz'
# Some explanation: https://mccormickml.com/2016/04/12/googles-pretrained-word2vec-model-in-python/
word2vec = models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)
# Keras works with tensorflow
# for it to work you will need to dl the lastest version of virtual studio c++
# https://support.microsoft.com/fr-fr/help/2977003/the-latest-supported-visual-c-downloads
# Then for NVIDIA you will definitely need this one
# https://towardsdatascience.com/installing-tensorflow-with-cuda-cudnn-and-gpu-support-on-windows-10-60693e46e781
# and this
# https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#install-windows
# and this (here you are interested in the VCS root definition)
# https://www.quora.com/How-does-one-install-TensorFlow-to-use-with-PyCharm
from keras.layers import Dense, Dropout, Reshape, Flatten, concatenate, Input, Conv1D, GlobalMaxPooling1D, Embedding
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
MAX_SEQUENCE_LENGTH = 1289
EMBEDDING_DIM = 300
# Tokenize and Pad sequences
tokenizer = Tokenizer(num_words=len(TRAINING_VOCAB),
lower=True, char_level=False)
tokenizer.fit_on_texts(data_train["review_cleaned"].tolist())
training_sequences = tokenizer.texts_to_sequences(
data_train["review_cleaned"].tolist())
train_word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(train_word_index))
# This function transforms a list of num_samples sequences (lists of integers) into a 2D Numpy array
train_cnn_data = pad_sequences(training_sequences,
maxlen=MAX_SEQUENCE_LENGTH)
train_embedding_weights = np.zeros(
(len(train_word_index)+1, EMBEDDING_DIM))
for word,index in train_word_index.items():
train_embedding_weights[index, :] = word2vec[word] if word in word2vec else np.random.rand(EMBEDDING_DIM)
print(train_embedding_weights.shape)
test_sequences = tokenizer.texts_to_sequences(
data_test["review_cleaned"].tolist())
test_cnn_data = pad_sequences(test_sequences,
maxlen=MAX_SEQUENCE_LENGTH)
# Convutional Neural Networks
# https://www.youtube.com/watch?v=9aYuQmMJvjA
# Historically for Image processing but it has been out-performing
# the Recurrent Neural Network on sequence tasks.
# High level explanation: accepts 2D and 3D input
# Image => 2D array of pixels => then convulutions on this array
# i.e try to locate features on window x by x (also called kernel)
# finding shapes and curves and corners and etc.
# once done slide the window
# then condensing the image by keeping the results of the convultions
# then pooling (complex algo) by taking the max value
# Each layer will try to identify patterns in the convultions from
# before.
# The tuto is about imagery and gives the steps for preprocessing those
# You have to be careful of the balance of the training data otherwise
# the NN will optimize for the over-represented class and get stuck.
# YOU NEED TO SHUFFLE THE DATA before training!
def ConvNet(embeddings, max_sequence_length, num_words, embedding_dim, labels_index):
embedding_layer = Embedding(num_words,
embedding_dim,
weights=[embeddings],
input_length=max_sequence_length,
trainable=False)
sequence_input = Input(shape=(max_sequence_length,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
convs = []
filter_sizes = [2, 3, 4, 5, 6]
for filter_size in filter_sizes:
l_conv = Conv1D(filters=200,
kernel_size=filter_size,
activation='relu')(embedded_sequences)
l_pool = GlobalMaxPooling1D()(l_conv)
convs.append(l_pool)
l_merge = concatenate(convs, axis=1)
x = Dropout(0.1)(l_merge)
x = Dense(128, activation='relu')(x)
x = Dropout(0.2)(x)
preds = Dense(labels_index, activation='sigmoid')(x)
model = Model(sequence_input, preds)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['acc'])
model.summary()
return model
label_names = ['Pos', 'Neg']
y_train = data_train[label_names].values
x_train = train_cnn_data
y_tr = y_train
model = ConvNet(train_embedding_weights, MAX_SEQUENCE_LENGTH, len(train_word_index)+1, EMBEDDING_DIM,
len(list(label_names)))
# OUTPUT
# Model: "model_1"
# __________________________________________________________________________________________________
# Layer (type) Output Shape Param # Connected to
# ==================================================================================================
# input_1 (InputLayer) (None, 50) 0
# __________________________________________________________________________________________________
# embedding_1 (Embedding) (None, 50, 300) 24237600 input_1[0][0]
# __________________________________________________________________________________________________
# conv1d_1 (Conv1D) (None, 49, 200) 120200 embedding_1[0][0]
# __________________________________________________________________________________________________
# conv1d_2 (Conv1D) (None, 48, 200) 180200 embedding_1[0][0]
# __________________________________________________________________________________________________
# conv1d_3 (Conv1D) (None, 47, 200) 240200 embedding_1[0][0]
# __________________________________________________________________________________________________
# conv1d_4 (Conv1D) (None, 46, 200) 300200 embedding_1[0][0]
# __________________________________________________________________________________________________
# conv1d_5 (Conv1D) (None, 45, 200) 360200 embedding_1[0][0]
# __________________________________________________________________________________________________
# global_max_pooling1d_1 (GlobalM (None, 200) 0 conv1d_1[0][0]
# __________________________________________________________________________________________________
# global_max_pooling1d_2 (GlobalM (None, 200) 0 conv1d_2[0][0]
# __________________________________________________________________________________________________
# global_max_pooling1d_3 (GlobalM (None, 200) 0 conv1d_3[0][0]
# __________________________________________________________________________________________________
# global_max_pooling1d_4 (GlobalM (None, 200) 0 conv1d_4[0][0]
# __________________________________________________________________________________________________
# global_max_pooling1d_5 (GlobalM (None, 200) 0 conv1d_5[0][0]
# __________________________________________________________________________________________________
# concatenate_1 (Concatenate) (None, 1000) 0 global_max_pooling1d_1[0][0]
# global_max_pooling1d_2[0][0]
# global_max_pooling1d_3[0][0]
# global_max_pooling1d_4[0][0]
# global_max_pooling1d_5[0][0]
# __________________________________________________________________________________________________
# dropout_1 (Dropout) (None, 1000) 0 concatenate_1[0][0]
# __________________________________________________________________________________________________
# dense_1 (Dense) (None, 128) 128128 dropout_1[0][0]
# __________________________________________________________________________________________________
# dropout_2 (Dropout) (None, 128) 0 dense_1[0][0]
# __________________________________________________________________________________________________
# dense_2 (Dense) (None, 2) 258 dropout_2[0][0]
# ==================================================================================================
# Total params: 25,566,986
# Trainable params: 1,329,386
# Non-trainable params: 24,237,600
# __________________________________________________________________________________________________
# Train CNN
num_epochs = 3
batch_size = 34
hist = model.fit(x_train, y_tr, epochs=num_epochs, validation_split=0.1,
shuffle=True, batch_size=batch_size)
import matplotlib.pyplot as plt
# plt.plot(hist.history['loss'])
# plt.plot(hist.history['val_loss'])
# plt.title('model train vs validation loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'validation'], loc='upper right')
# plt.show()
# Test CNN
predictions = model.predict(test_cnn_data, batch_size=1024, verbose=1)
labels = [1, 0]
prediction_labels=[]
for p in predictions:
prediction_labels.append(labels[np.argmax(p)])
sum(data_test.sentiment == prediction_labels)/len(prediction_labels)
# By reducing the number of words in the vocabulary (removing the 150 000
# rarest words), we actually gained: 0.01 in accuracy
data_test.sentiment.value_counts()
labels = ["Pos_cnn", "Neg_cnn"]
df_predictions = pd.DataFrame(data=predictions, columns=labels)
data_test.reset_index(drop=True, inplace=True)
df_predictions.reset_index(drop=True, inplace=True)
hh = pd.concat([data_test, df_predictions], axis=1)
hh["threshold"] = np.where(
(hh["Pos_cnn"] < 0.6) & (hh["Pos_cnn"] > 0.4), True, False)
# Some manual corrections could be added
sum(hh["threshold"] == True)
cut = hh[hh["threshold"] == True]
#################################################################################
# Trying to score
test = pd.read_csv("data/testData.tsv", header=0,
delimiter="\t", quoting=3)
test["review_cleaned"] = test["review"].str.lower()
# Remove the emojis
test["review_cleaned"] = test["review_cleaned"] \
.apply(lambda review: remove_emoji(review))
# Exchange emoticons for their meaning
test["review_cleaned"] = test["review_cleaned"] \
.apply(lambda review: convert_emoticons(review))
# Remove HTML
test["review_cleaned"] = test["review_cleaned"].apply(
lambda review: remove_html(review)
)
# Removing the punctuation
test["review_cleaned"] = test["review_cleaned"].str.translate(
str.maketrans("", "", string.punctuation)
)
# Removing stop words
STOPWORDS = set(stopwords.words('english'))
test["review_cleaned"] = test["review_cleaned"].apply(
lambda review: remove_stopwords(review))
# Removal of the too frequent words
test["review_cleaned"] = test["review_cleaned"].apply(
lambda text: remove_freqwords(text)
)
# Removal of rare words
test["review_cleaned"] = test["review_cleaned"] \
.apply(lambda review: remove_rarewords(review))
# Lemmatization
test["review_cleaned"] = test["review_cleaned"] \
.apply(lambda review: lemmatize_words(review))
#test["review_final"] = test["review_cleaned"].str.split()
# Apply the model
test_sequences = tokenizer.texts_to_sequences(
test["review_cleaned"].tolist())
test_cnn_data = pad_sequences(test_sequences,
maxlen=MAX_SEQUENCE_LENGTH)
predictions = model.predict(test_cnn_data, batch_size=1024, verbose=1)
labels = ["Pos", "Neg"]
prediction_labels=[]
for p in predictions:
prediction_labels.append(labels[np.argmax(p)])
df_predictions = pd.DataFrame(data=predictions, columns=labels)
essai = pd.concat([test, df_predictions], axis=1)
essai["threshold"] = np.where((essai["Pos"] < 0.6) & (essai["Pos"] > 0.4), True, False)
# Finding the review in test and train
sub_train = train[["review", "sentiment"]]
sub_train = sub_train.rename(columns={"sentiment": "true_sentiment"})
mergedStuff = pd.merge(essai, sub_train, on=['review'], how='left')
len(mergedStuff)
sum(mergedStuff["true_sentiment"] == 1)
sum(mergedStuff["true_sentiment"] == 0)
mergedStuff["Pos"] = np.where(mergedStuff["true_sentiment"] == 1, 1, mergedStuff["Pos"])
mergedStuff["Pos"] = np.where(mergedStuff["true_sentiment"] == 0, 0, mergedStuff["Pos"])
mergedStuff["threshold"] = np.where((mergedStuff["Pos"] < 0.6) & (mergedStuff["Pos"] > 0.4), True, False)
mergedStuff = mergedStuff[["id", "review", "Pos", "Neg", "threshold"]]
#mergedStuff.to_excel("data/manual_classification.xlsx", index=False)
# Some manual corrections could be added
sum(mergedStuff["threshold"] == True)
# Read back
# Actually lost a bit of accuracy. My understanding of positive and negative is not strong enough
# The gem of this boring reading:
# "I've heard a lot about Porno Holocaust and its twin film Erotic Nights Of The Living Dead.
# Both films are interchangeable and were filmed at the same time on the same location with
# the same actors changing clothes for each film (and taking them off).
# If you are expecting the D'Amato genius displayed in films like Buio Omega
# or Death Smiles on Murder, you won't find it here. Nonetheless this film has a charm
# that exploitation fans will not be able to resist. Where else will you see hardcore sex mixed
# with a zombie/monster and his enormous penis that strangles and chokes women to death? Only from D'Amato.
# There is some amount of gore in which many of the men are bludgeoned to death.
# The film is set on a beautiful tropical island. As far as I know there is no subtitled version,
# so if you don't speak Italian you wont know what is going on...but who cares right?
# In all honesty, Gore fans will probably fast forward through the hardcore sex.
# And if anyone is actually watching this for the sex only, will for sure be offended instantly.
# I can just imagine modern day porn fans tracing back through D'Amato's output and coming across this atrocity!
# Out of the two I find Erotic Nights Of The Living Dead far superior.
# But, don't bother watching either if they are cut. Porno Holocaust is extremely low budget as expected.
# Even the monster looks no where as good as George Eastman's character in Anthropophagus.
# The film is worth watching for laughs and to complete your D'Amato film quest."
essai = pd.read_excel("data/manual_classification.xlsx")
essai["Pos"] = np.where(essai["true_sentiment"] == 1, 1, essai["Pos"])
essai["Pos"] = np.where(essai["true_sentiment"] == -1, 0, essai["Pos"])
#essai["sentiment"] = np.where(essai["Pos"] > essai["Neg"], 1, 0)
#essai["sentiment"] = essai["Pos"]
# This methods carry no goods because the missclassified drag the score down more than the well-classified
#essai["sentiment"] = essai["Pos"]
#essai["sentiment"] = np.where(essai["sentiment"] > 0.80, 1, essai["sentiment"])
#essai["sentiment"] = np.where(essai["sentiment"] < 0.20, 0, essai["sentiment"])
# Submission file
submission = essai[["id", "sentiment"]]
submission.to_csv("data/submission_cnn_padded_rounding.csv", index=False, quoting=3)
|
[
"daphne.aurouet@gmail.com"
] |
daphne.aurouet@gmail.com
|
848ffc2a07ccec2f5c3ea110fe29732f4cf1c6d6
|
81407be1385564308db7193634a2bb050b4f822e
|
/the-python-standard-library-by-example/abc/abc_abstractproperty_rw_deco.py
|
03ad06129d6e94565992e68b63a5f0ee11503bc5
|
[
"MIT"
] |
permissive
|
gottaegbert/penter
|
6db4f7d82c143af1209b4259ba32145aba7d6bd3
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
refs/heads/master
| 2022-12-30T14:51:45.132819
| 2020-10-09T05:33:23
| 2020-10-09T05:33:23
| 305,266,398
| 0
| 0
|
MIT
| 2020-10-19T04:56:02
| 2020-10-19T04:53:05
| null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
# -*- coding: utf-8 -*-
import abc
class Base(object):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def value(self):
return 'Should never see this'
@value.setter
def value(self, newvalue):
return
class Implementation(Base):
_value = 'Default value'
@property
def value(self):
return self._value
@value.setter
def value(self, newvalue):
self._value = newvalue
i = Implementation()
print('Implementation.value:', i.value)
i.value = 'New value'
print('Changed value:', i.value)
|
[
"350840291@qq.com"
] |
350840291@qq.com
|
de4adada837f7a051656dc8fa8d52f87ebd17d35
|
c8d4e73901ed6ae6670d94d0c4d3c4f828a1293b
|
/venv/Scripts/qiniupy-script.py
|
b923ca076bbcaa68571e68f79336fe2773763d74
|
[] |
no_license
|
a2395299624/danibbs
|
c43591eae06ea3314c9e1a8c10de8ca4229dff4c
|
28150f67bc9c801bdd57ac531d1bc12df58f736a
|
refs/heads/master
| 2022-07-19T04:10:31.576997
| 2020-05-15T16:34:06
| 2020-05-15T16:34:06
| 264,239,758
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 415
|
py
|
#!F:\Á·Ï°ÏîÄ¿\BSSÂÛ̳\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'qiniu==7.2.8','console_scripts','qiniupy'
__requires__ = 'qiniu==7.2.8'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('qiniu==7.2.8', 'console_scripts', 'qiniupy')()
)
|
[
"2395299624@qq.com"
] |
2395299624@qq.com
|
c70efa89b88a7dbf20fe99858b4f08ca22d415c5
|
8e7a2b9efbc0d25111f01f4cddb781961032685a
|
/python-1025/spider/20180122/program/count_words.py
|
e525a169b86df878f815e31a52019faf1ddbbc5a
|
[] |
no_license
|
Dituohgasirre/python
|
e044aa2e1fb2233b6ccd59701b834ab01e4e24c2
|
05f036d2723f75cd89e4412aaed7ee0ba5d3a502
|
refs/heads/master
| 2023-06-03T13:50:18.641433
| 2021-06-17T10:23:40
| 2021-06-17T10:23:40
| 366,942,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
# 二十一、从wikipedia.org 获取Unix的解释页面,按以下要求写一个脚本
#
# 计算出该页面中出现次数最多的10个英文单词,写成一个脚本
import re
import requests
def count_words(words):
res = {}
for word in words:
res[word] = res.get(word, 0) + 1
return res
def sort_words(words_count):
return sorted(words_count.items(), key=lambda x: x[1], reverse=True)
def show(words):
for word, count in words:
print('%03d %s' % (count, word))
if __name__ == '__main__':
n = 10
url = 'https://en.wikipedia.org/wiki/Unix'
r = requests.get(url)
clean_text = re.sub(r'<[a-zA-Z0-9]+(?:\s+[^>]+)?>|</[a-zA-Z0-9]+>', '', r.text)
words = re.findall('[A-Za-z]+', clean_text)
words_count = count_words(words)
sorted_words = sort_words(words_count)
show(sorted_words[:n])
|
[
"linfeiji4729289@126.com"
] |
linfeiji4729289@126.com
|
7fefe4623630146e254d408deef9f9690a88617d
|
f99523da17e00531464dd7a1f9ebeee5230b31b4
|
/src/custom_range.py
|
e8df421062d69259de2e7293ae48adfcf7c28b02
|
[] |
no_license
|
wazeem27/learn_python
|
7d5e579847c5f0244607ef87d7b9c00ac5a43eee
|
504117c5e31fdc3a607ba03a06cf56ae78cb42ec
|
refs/heads/master
| 2021-03-21T01:06:04.618871
| 2020-05-20T09:15:35
| 2020-05-20T09:15:35
| 247,250,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
"""Custom range"""
class MyRange:
def __init__(self, start, end, step=1):
self.value = start
self.start = start
self.end = end
self.step = step
self._ok = True
self.__validate()
def __iter__(self):
return self
def __next__(self):
if self.step == 0:
raise ValueError
if self.value >= self.end and self.step > 0 or self.value <= self.end and self.step < 0:
raise StopIteration
if self._ok:
current = self.value
self.value += self.step
return current
raise StopIteration
def validate(self):
if self.step == 0:
raise ValueError("step cannot be zero value")
if self.start < self.end:
if not self.end - (self.start + self.step) < (self.end - self.start):
self._ok = False
else:
if not self.end - (self.start + self.step) > (self.end - self.start):
self._ok = False
__validate = validate
myrange = MyRange(1, 10, -2)
for i in myrange:
print(i)
|
[
"wazeem27@gmail.com"
] |
wazeem27@gmail.com
|
6f8859acb994cee06b7008118063b2ab54d7fa3d
|
1875c16dee46ab528b5a227c09c5743d8d10e982
|
/pyacs/gts/Sgts_methods/frame.py
|
d8e1a7be230a456908093ee78436d63313befd75
|
[] |
no_license
|
CorentinPeutin/pyacs
|
56a8d2b3941bb7f921c447167c40d89157a502ed
|
c82aecd2c95a5d635170ed6750131cb49e28e570
|
refs/heads/main
| 2023-04-16T13:41:53.042830
| 2021-05-03T14:56:28
| 2021-05-03T14:56:28
| 363,963,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,509
|
py
|
###################################################################
def frame(self,frame=None,euler=None,w=None,verbose=False):
###################################################################
"""
Rotates the time series according to an Euler pole.
User must provide either frame, euler or w.
:param frame: str, implemented values are 'soam','nas','nazca','inca','nas_wrt_soam','inca_wrt_soam'.
:param euler: Euler values provided either as a \
string 'euler_lon/euler_lat/euler_w', a list [euler_lon,euler_lat,euler_w] or \
a 1D numpy array np.array([euler_lon,euler_lat,euler_w])
:param w: rotation rate vector in rad/yr, provided either as a \
string 'wx/wy/wz', a list [wx,wy,wz] or \
a 1D numpy array np.array([wx,wy,wz])
:return: the new Sgts instance in new frame
:ref: All values for frames are from Nocquet et al., Nat Geosc., 2014.
"""
# import
import numpy as np
import pyacs.lib.euler
from pyacs.gts.Sgts import Sgts
from pyacs.gts.Gts import Gts
# check arguments are OK
if [frame,euler,w].count(None) != 2:
print('!!! ERROR: define either argument frame, euler or w ')
return(None)
# Euler poles taken from pygvel_pole_info.py
lEuler={}
lEuler['soam']=[-132.21,-18.83,0.121]
lEuler['nas']=[-97.52,6.48,0.359]
lEuler['nazca']=[-94.4,61.0,0.57]
lEuler['inca']=[-103.729,-1.344,0.1659]
lEuler['nas_wrt_soam']=[-83.40,15.21,0.287]
lEuler['inca_wrt_soam']=[-63.76,22.47,0.092]
# check frame case is OK
if ( frame not in list(lEuler.keys())) and ( frame is not None):
print("!!! ERROR: requested frame ",frame," not known")
print("!!! ERROR: available frames are: ", list(lEuler.keys()))
return(None)
# initialize new gts
New_Sgts=Sgts(read=False)
# convert to Euler vector whatever the provided argument
# case frame
if frame is not None:
euler_vector=np.array(lEuler[frame])
# case w as rotation rate vector
if w != None:
if ( isinstance(w,str) ) and '/' in w:
w=np.array(list(map(float,w.split('/'))))
if isinstance(w,list):
w=np.array(w)
if not isinstance(w,np.ndarray):
print('!!! ERROR: argument w not understood: ',w)
return(None)
euler_vector=np.array(pyacs.lib.euler.rot2euler([w[0],w[1],w[2]]))
# case euler vector
if euler is not None:
if ( isinstance(euler,str) ) and '/' in euler:
euler=np.array(list(map(float,euler.split('/'))))
if isinstance(euler,list):
euler=np.array(euler)
if not isinstance(euler,np.ndarray):
print('!!! ERROR: argument euler not understood: ',euler)
return(None)
euler_vector=np.array(euler)
# converts the gts
for gts in self.lGts():
if verbose:print("-- Processing ",gts.code)
try:
new_gts=gts.remove_pole(euler_vector,pole_type='euler',in_place=False, verbose=verbose)
except (RuntimeError, TypeError, NameError):
print("!!! Error processing ",gts.code)
continue
if isinstance(new_gts,Gts):
New_Sgts.append(new_gts)
else:
print("!!! Error processing ",gts.code, "!!! No time series created.")
return(New_Sgts)
|
[
"noreply@github.com"
] |
CorentinPeutin.noreply@github.com
|
89951f9437e4b2416e6850b4e761b90f18b32759
|
b4afa58299ceea4e46cb212634638b0bced1f3cb
|
/python_data.py
|
3d9a2e813c859995dc0792348c7d71e785585c78
|
[] |
no_license
|
uncleguanghui/tesla_route_planning
|
17bceb4df9c770d8e2e7f533817b26bc91b50056
|
a29fd82bff836719c35030df32a9225cc76dfe32
|
refs/heads/master
| 2022-10-31T18:46:27.764787
| 2020-06-15T02:00:38
| 2020-06-15T02:00:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,993
|
py
|
locations = [
["Albany_NY", 42.710356, -73.819109, 131.0],["Edison_NJ", 40.544595, -74.334113, 159.0],["Dayton_OH", 39.858702, -84.277027, 133.0],["Boise_ID", 43.592251, -116.27942, 143.0],["Lumberton_NC", 34.667629, -79.002343, 105.0],["Albuquerque_NM", 35.108486, -106.612804, 175.0],["Newark_DE", 39.662265, -75.692027, 120.0],
["West_Lebanon_NH", 43.623536, -72.3258949, 153.0],["West_Wendover_NV", 40.738399, -114.058998, 106.0],["Salina_KS", 38.877342, -97.618699, 177.0],["Glen_Allen_VA", 37.66976, -77.461414, 128.0],["Beaver_UT", 38.249149, -112.652524, 109.0],["Pleasant_Prairie_WI", 42.518715, -87.950428, 144.0],["Independence_MO", 39.040814, -94.369265, 107.0],["Redondo_Beach_CA", 33.894227, -118.367407, 114.0],["Yuma_AZ", 32.726686, -114.619093, 116.0],["Milford_CT", 41.245823, -73.009059, 130.0],["Liverpool_NY", 43.102424, -76.187446, 138.0],
["Columbia_MO", 38.957778, -92.252761, 109.0],["Harrisburg_PA", 40.277134, -76.823255, 141.0],["Turkey_Lake_FL", 28.514873, -81.500189, 133.0],["Lake_City_FL", 30.181405, -82.679605, 86.0],["Fremont_CA", 37.394181, -122.149858, 151.0],["Bozeman_MT", 45.70007, -111.06329, 105.0],["Peru_IL", 41.348503, -89.126115, 158.0],["Pendleton_OR", 45.64655, -118.68198, 82.0],["Ann_Arbor_MI", 42.241125, -83.766522, 103.0],["Needles_CA", 34.850835, -114.624329, 102.0],["Lebec_CA", 34.98737, -118.946272, 180.0],
["s", 33.79382, -84.39713, 145.0],["Winnemucca_NV", 40.958869, -117.746501, 114.0],["Queens_NY", 40.66179, -73.79282, 149.0],["Country_Club_Hills_IL", 41.585206, -87.721114, 88.0],["Flagstaff_AZ", 35.174151, -111.663194, 144.0],["Norfolk_VA", 36.860525, -76.207467, 128.0],["Uvalde_TX", 29.112378, -99.75208, 140.0],["Tannersville_PA", 41.045431, -75.312237, 174.0],["Centralia_WA", 46.729872, -122.977392, 159.0],["Southampton_NY", 40.891909, -72.426995, 104.0],["Seaside_CA", 36.61697, -121.843973, 110.0],
["Dublin_CA", 37.703163, -121.925304, 175.0],["Lexington_KY", 38.017955, -84.420664, 122.0],["Napa_CA", 38.235578, -122.263886, 155.0],["Augusta_ME", 44.347885, -69.786042, 129.0],["Nephi_UT", 39.678111, -111.841003, 141.0],["Green_River_UT", 38.993577, -110.140513, 146.0],["Plattsburgh_NY", 44.704537, -73.491829, 169.0],["Hooksett_NH", 43.109066, -71.477768, 121.0],["Cisco_TX", 32.374263, -99.007197, 181.0],["Cadillac_MI", 44.28254, -85.40306, 111.0],["Cranbury_NJ", 40.32244, -74.4869, 103.0],
["Charlotte_NC", 35.34075, -80.76579, 164.0],["Indio_CA", 33.741291, -116.215029, 167.0],["Alexandria_LA", 31.312424, -92.446436, 160.0],["Maumee_OH", 41.57833, -83.664593, 134.0],["Ellensburg_WA", 46.976918, -120.54162, 139.0],["Savannah_GA", 32.135885, -81.212853, 100.0],["Holbrook_AZ", 34.922962, -110.145558, 132.0],["Fresno_CA", 36.835455, -119.91058, 113.0],["Newburgh_NY", 41.499616, -74.071324, 194.0],["Temecula_CA", 33.52421, -117.152568, 98.0],["South_Burlington_VT", 44.46286, -73.179308, 117.0],
["Folsom_CA", 38.642291, -121.18813, 148.0],["Gardnerville_NV", 38.696385, -119.548525, 100.0],["London_KY", 37.14916, -84.11385, 190.0],["Casa_Grande_AZ", 32.878773, -111.681694, 158.0],["San_Marcos_TX", 29.827707, -97.979685, 126.0],["Corsicana_TX", 32.068583, -96.448248, 125.0],["El_Centro_CA", 32.760837, -115.532486, 128.0],["Onalaska_WI", 43.879042, -91.188428, 130.0],["Darien_CT", 41.080103, -73.46135, 150.0],["Sandy_OR", 45.402786, -122.294371, 119.0],["Superior_MT", 47.192149, -114.888901, 125.0],
["Manteca_CA", 37.782622, -121.228683, 128.0],["Ocala_FL", 29.140981, -82.193938, 127.0],["Santa_Rosa_NM", 34.947013, -104.647997, 164.0],["Santee_SC", 33.485858, -80.475763, 188.0],["South_Salt_Lake_City_UT", 40.720352, -111.888712, 167.0],["Sparks_NV", 39.541124, -119.442336, 188.0],["Allentown_PA", 40.588118, -75.560089, 183.0],["Knoxville_TN", 35.901319, -84.149634, 126.0],["Moab_UT", 38.573122, -109.552368, 121.0],["Denver_CO", 39.77512, -104.794648, 160.0],["Brandon_FL", 27.940665, -82.323525, 146.0],
["Rapid_City_SD", 44.105601, -103.212569, 128.0],["West_Yellowstone_MT", 44.656089, -111.099022, 135.0],["Burlington_WA", 48.509743, -122.338681, 121.0],["Cheyenne_WY", 41.161085, -104.804955, 179.0],["Dedham_MA", 42.236461, -71.178325, 146.0],["West_Springfield_MA", 42.130914, -72.621435, 139.0],["Port_St._Lucie_FL", 27.31293, -80.406743, 129.0],["Somerset_PA", 40.017517, -79.07712, 133.0],["San_Rafael_CA", 37.963357, -122.515699, 89.0],["St._Joseph_MI", 42.056357, -86.456352, 124.0],["San_Mateo_CA", 37.5447, -122.29011, 118.0],
["Vienna_VA", 38.931919, -77.239564, 84.0],["Brentwood_TN", 35.9696, -86.804159, 183.0],["Ukiah_CA", 39.1481, -123.208604, 153.0],["Aurora_IL", 41.760671, -88.309184, 105.0],["San_Diego_CA", 32.902166, -117.193699, 102.0],["Hawthorne_CA", 33.921063, -118.330074, 158.0],["Grove_City_OH", 39.877253, -83.063448, 155.0],["Gallup_NM", 35.505278, -108.828094, 161.0],["Butte_MT", 45.981226, -112.507161, 84.0],["Grants_Pass_OR", 42.460931, -123.324124, 118.0],["Queensbury_NY", 43.328388, -73.679992, 118.0],["Colorado_Springs_CO", 38.837573, -104.824889, 158.0],
["Highland_Park_IL", 42.17434, -87.816626, 138.0],["Hays_KS", 38.900543, -99.319142, 156.0],["St._Charles_MO", 38.78216, -90.5329, 115.0],["Paramus_NJ", 40.957892, -74.073976, 114.0],["Lone_Tree_CO", 39.563776, -104.875651, 188.0],["Cleveland_OH", 41.519427, -81.493146, 146.0],["Bellmead_TX", 31.582287, -97.109152, 132.0],["Seabrook_NH", 42.895248, -70.869299, 108.0],["Missoula_MT", 46.914375, -114.031924, 114.0],["Watertown_NY", 43.979585, -75.954114, 166.0],["Atascadero_CA", 35.486585, -120.666378, 94.0],["Murdo_SD", 43.886915, -100.716887, 121.0],
["Burbank_CA", 34.174754, -118.300803, 179.0],["Sunnyvale_CA", 37.405893, -121.987945, 150.0],["Laurel_MD", 39.095382, -76.858319, 115.0],["Oakdale_MN", 44.964892, -92.961249, 130.0],["Buffalo_NY", 42.968675, -78.69568, 146.0],["Culver_City_CA", 33.986765, -118.390162, 120.0],["Fountain_Valley_CA", 33.70275, -117.934297, 125.0],["Macon_GA", 32.833485, -83.625813, 160.0],["Baxter_MN", 46.378836, -94.256378, 142.0],["Madison_WI", 43.12669, -89.306829, 151.0],["Angola_IN", 41.699048, -85.000326, 129.0],["Effingham_IL", 39.137114, -88.563468, 131.0],
["Quartzsite_AZ", 33.660784, -114.241801, 123.0],["Gilroy_CA", 37.02445, -121.56535, 155.0],["Kennewick_WA", 46.198035, -119.162687, 157.0],["Hamilton_Township_NJ", 40.195539, -74.641375, 110.0],["Duluth_MN", 46.784467, -92.10232, 184.0],["Terre_Haute_IN", 39.443345, -87.331737, 146.0],["Egg_Harbor_Township_NJ", 39.393663, -74.562619, 79.0],["Las_Vegas_NV", 36.165906, -115.138655, 84.0],["Mammoth_Lakes_CA", 37.644519, -118.965499, 97.0],["Strasburg_VA", 39.00496, -78.337848, 176.0],["Wickenburg_AZ", 33.970281, -112.731503, 164.0],["Limon_CO", 39.268975, -103.708626, 126.0],
["East_Greenwich_RI", 41.660517, -71.497242, 107.0],["Riviera_Beach_FL", 26.77825, -80.109586, 113.0],["Erie_PA", 42.049602, -80.086345, 144.0],["Kingman_AZ", 35.191331, -114.065592, 98.0],["Okeechobee_FL", 27.60089, -80.82286, 135.0],["Big_Timber_MT", 45.83626, -109.94341, 166.0],["Tucumcari_NM", 35.15396, -103.7226, 147.0],["Baton_Rouge_LA", 30.423892, -91.154637, 173.0],["The_Dalles_OR", 45.611941, -121.208249, 178.0],["Greenwich_CT", 41.041538, -73.671661, 138.0],["Dallas_TX", 32.832466, -96.837638, 101.0],["Perry_OK", 36.289315, -97.325935, 144.0],
["Syosset_NY", 40.7999, -73.51524, 106.0],["Cranberry_PA", 40.683508, -80.108327, 148.0],["Greenville_SC", 34.729509, -82.366353, 96.0],["Tonopah_NV", 38.069801, -117.232243, 119.0],["Mountville_SC", 34.39359, -82.028798, 132.0],["Pearl_MS", 32.274159, -90.151048, 141.0],["Louisville_KY", 38.211962, -85.67319, 177.0],["Buellton_CA", 34.614555, -120.188432, 155.0],["Sheboygan_WI", 43.749753, -87.746971, 116.0],["Bethesda_MD", 39.023876, -77.144352, 106.0],["Victoria_TX", 28.766853, -96.978988, 165.0],["Grand_Rapids_MI", 42.914231, -85.533057, 125.0],["Tifton_GA", 31.448847, -83.53221, 185.0],
["Richfield_UT", 38.78799, -112.085173, 176.0],["Columbus_TX", 29.690066, -96.537727, 142.0],["Indianapolis_IN", 39.702238, -86.07959, 91.0],["Triadelphia_WV", 40.06076, -80.602742, 115.0],["Normal_IL", 40.508562, -88.984738, 162.0],["Burlingame_CA", 37.593182, -122.367483, 130.0],["Mountain_View_CA", 37.415328, -122.076575, 133.0],["South_Hill_VA", 36.748516, -78.103517, 149.0],["Chicago_IL", 41.890872, -87.654214, 144.0],["Brooklyn_NY", 40.68331, -74.006508, 115.0],["Buttonwillow_CA", 35.400105, -119.397796, 166.0],["Beatty_NV", 36.913695, -116.754463, 127.0],
["Asheville_NC", 35.531428, -82.604495, 163.0],["Corning_CA", 39.92646, -122.1984, 134.0],["Shreveport_LA", 32.478594, -93.75437, 95.0],["Farmington_NM", 36.766315, -108.144266, 143.0],["Billings_MT", 45.734046, -108.604932, 119.0],["Matthews_NC", 35.140024, -80.719776, 110.0],["Twin_Falls_ID", 42.597887, -114.455249, 146.0],["Vacaville_CA", 38.366645, -121.958136, 123.0],["St._Augustine_FL", 29.924286, -81.416018, 137.0],["Lake_Charles_LA", 30.199071, -93.248782, 134.0],["Tinton_Falls_NJ", 40.226408, -74.093572, 113.0],["Stanfield_AZ", 32.949077, -111.991933, 92.0],
["Grand_Junction_CO", 39.090758, -108.604325, 107.0],["Coeur_d'Alene_ID", 47.708479, -116.794283, 113.0],["Lindale_TX", 32.470885, -95.450473, 135.0],["Orlando_FL", 28.617982, -81.387995, 124.0],["Binghamton_NY", 42.145542, -75.902081, 157.0],["Hagerstown_MD", 39.605859, -77.733324, 121.0],["DeFuniak_Springs_FL", 30.720702, -86.116677, 123.0],["Slidell_LA", 30.266552, -89.760156, 124.0],["Kingsland_GA", 30.790734, -81.663625, 130.0],["Catoosa_OK", 36.167631, -95.766044, 132.0],["Port_Huron_MI", 42.998817, -82.428935, 86.0],["Marathon_FL", 24.72611, -81.047912, 154.0],
["Goodland_KS", 39.326258, -101.725107, 140.0],["Cherry_Valley_IL", 42.243893, -88.978895, 101.0],["Truckee_CA", 39.327438, -120.20741, 158.0],["Monterey_CA", 36.612153, -121.897995, 165.0],["Blue_Ash_OH", 39.224642, -84.383507, 127.0],["Rocky_Mount_NC", 35.972904, -77.846845, 180.0],["Inyokern_CA", 35.646451, -117.812644, 178.0],["Sagamore_Beach_MA", 41.781195, -70.540289, 114.0],["West_Hartford_CT", 41.722672, -72.759717, 106.0],["Hinckley_MN", 46.009797, -92.93137, 169.0],["Bowling_Green_KY", 36.955196, -86.438854, 145.0],["Oxnard_CA", 34.238115, -119.178084, 104.0],
["Auburn_AL", 32.627837, -85.445105, 111.0],["Costa_Mesa_CA", 33.673925, -117.882412, 119.0],["Roseville_CA", 38.771208, -121.266149, 138.0],["East_Brunswick_NJ", 40.415938, -74.444713, 153.0],["Bellevue_WA", 47.62957, -122.148073, 145.0],["St._George_UT", 37.126463, -113.601737, 183.0],["Buckeye_AZ", 33.443011, -112.556876, 154.0],["San_Juan_Capistrano_CA", 33.498538, -117.66309, 135.0],["Oklahoma_City_OK", 35.461664, -97.65144, 87.0],["Lima_OH", 40.726668, -84.071932, 159.0],["Weatherford_OK", 35.53859, -98.66012, 116.0],["Ritzville_WA", 47.116294, -118.368328, 118.0],
["Trinidad_CO", 37.134167, -104.519352, 121.0],["Denton_TX", 33.231373, -97.166412, 154.0],["Sweetwater_TX", 32.450591, -100.392455, 145.0],["Champaign_IL", 40.146204, -88.259828, 144.0],["Gillette_WY", 44.292984, -105.526325, 135.0],["Barstow_CA", 34.849124, -117.085459, 127.0],["Mobile_AL", 30.671556, -88.118644, 98.0],["Glenwood_Springs_CO", 39.552676, -107.340171, 125.0],["Miner_MO", 36.893583, -89.533986, 153.0],["Eureka_CA", 40.778885, -124.188383, 135.0],["Plantation_FL", 26.108605, -80.252444, 113.0],["Idaho_Falls_ID", 43.485152, -112.05205, 142.0],
["Utica_NY", 43.113878, -75.206857, 133.0],["Fort_Myers_FL", 26.485574, -81.787149, 106.0],["Yucca_AZ", 34.879736, -114.131562, 131.0],["Albert_Lea_MN", 43.68606, -93.357721, 92.0],["Sheridan_WY", 44.804582, -106.956345, 95.0],["Sulphur_Springs_TX", 33.137098, -95.603229, 151.0],["Villa_Park_IL", 41.907415, -87.973023, 129.0],["Mayer_AZ", 34.32753, -112.11846, 142.0],["Gila_Bend_AZ", 32.943675, -112.734081, 131.0],["Mishawaka_IN", 41.717337, -86.18863, 109.0],["Tempe_AZ", 33.421676, -111.897331, 187.0],["Silverthorne_CO", 39.631467, -106.070818, 163.0],
["Huntsville_TX", 30.716158, -95.565944, 154.0],["Price_UT", 39.600831, -110.831666, 163.0],["Lone_Pine_CA", 36.60059, -118.061916, 105.0],["Amarillo_TX", 35.189016, -101.931467, 98.0],["Woodburn_OR", 45.15313, -122.881254, 139.0],["Primm_NV", 35.610678, -115.388014, 115.0],["Lincoln_City_OR", 44.957751, -124.010966, 136.0],["Blanding_UT", 37.625618, -109.473842, 148.0],["Brattleboro_VT", 42.838443, -72.565798, 107.0],["Springfield_OR", 44.082607, -123.037458, 92.0],["Cabazon_CA", 33.931316, -116.820082, 169.0],["Pocatello_ID", 42.899615, -112.435248, 96.0],
["Mt._Shasta_CA", 41.310222, -122.31731, 103.0],["Decatur_GA", 33.793198, -84.285394, 128.0],["Bend_OR", 44.03563, -121.308473, 186.0],["Coalinga_CA", 36.254143, -120.23792, 159.0],["Wytheville_VA", 36.945693, -81.054651, 142.0],["Chattanooga_TN", 35.038644, -85.19593, 113.0],["Port_Orange_FL", 29.108571, -81.034603, 165.0],["Wichita_KS", 37.60878, -97.33314, 154.0],["Macedonia_OH", 41.313663, -81.517018, 159.0],["Tremonton_UT", 41.70995, -112.198576, 99.0],["Plymouth_NC", 35.850587, -76.756116, 107.0],["Petaluma_CA", 38.242676, -122.625023, 134.0],
["Lafayette_IN", 40.41621, -86.814089, 126.0],["Detroit_OR", 44.73704, -122.151999, 99.0],["Palo_Alto_CA", 37.394011, -122.150347, 124.0],["Mojave_CA", 35.068595, -118.174576, 169.0],["Eau_Claire_WI", 44.77083, -91.43711, 142.0],["Mitchell_SD", 43.701129, -98.0445, 125.0],["Lee_MA", 42.295745, -73.239226, 151.0],["Houston_TX", 29.980687, -95.421547, 124.0],["East_Liberty_OH", 40.303817, -83.550529, 145.0],["Tallahassee_FL", 30.510908, -84.247841, 182.0],["Lovelock_NV", 40.179476, -118.472135, 168.0],["Ardmore_OK", 34.179106, -97.165632, 143.0],
["Baker_City_OR", 44.782882, -117.812306, 163.0],["Woodbridge_VA", 38.64082, -77.29633, 97.0],["Rocklin_CA", 38.80086, -121.210529, 125.0],["Elko_NV", 40.836301, -115.790859, 108.0],["Reno_NV", 39.489732, -119.794179, 142.0],["Lusk_WY", 42.75625, -104.45267, 136.0],["Shamrock_TX", 35.226765, -100.24836, 173.0],["Tooele_UT", 40.684466, -112.269008, 126.0],["Salisbury_MD", 38.4016, -75.56489, 108.0],["Council_Bluffs_IA", 41.220921, -95.835579, 165.0],["Topeka_KS", 39.04438, -95.760267, 122.0],["Rancho_Cucamonga_CA", 34.113584, -117.529427, 108.0],
["Worthington_MN", 43.63385, -95.595647, 108.0],["Mauston_WI", 43.795551, -90.059358, 138.0],["Warsaw_NC", 34.994625, -78.13567, 135.0]
]
cut_down = [["Pleasant_Prairie_WI", 42.518715, -87.950428, 144.0],
["Peru_IL", 41.348503, -89.126115, 158.0],
["Ann_Arbor_MI", 42.241125, -83.766522, 103.0],
["Country_Club_Hills_IL", 41.585206, -87.721114, 88.0],
["Cadillac_MI", 44.28254, -85.40306, 111.0],
["Onalaska_WI", 43.879042, -91.188428, 130.0],
["Highland_Park_IL", 42.17434, -87.816626, 138.0],
["Oakdale_MN", 44.964892, -92.961249, 130.0],
["Baxter_MN", 46.378836, -94.256378, 142.0],
["Madison_WI", 43.12669, -89.306829, 151.0],
["Sheboygan_WI", 43.749753, -87.746971, 116.0],
["Albert_Lea_MN", 43.68606, -93.357721, 92.0],
["Council_Bluffs_IA", 41.220921, -95.835579, 165.0],
["Worthington_MN", 43.63385, -95.595647, 108.0],
["Mauston_WI", 43.795551, -90.059358, 138.0]]
def get_locations():
return locations
|
[
"cpd@Chinmays-MBP.fios-router.home"
] |
cpd@Chinmays-MBP.fios-router.home
|
f7bb7ab366a5e55809506a53c69ce6a612561a69
|
540c4a2efb0c5f5f59df4b8a94c7f30dd8f4a53d
|
/example2.py
|
ec7203d8b408777c42c3b025fa2f6fb46cff57d1
|
[
"MIT"
] |
permissive
|
salisu14/learn-python-programming
|
797ea3ab79530602b90f861bcab6988c779c5172
|
a1d3d8206850199d8fab1321a432bd3f1fc0a6a1
|
refs/heads/main
| 2023-04-03T22:21:50.522192
| 2021-04-19T14:25:25
| 2021-04-19T14:25:25
| 359,467,267
| 2
| 0
|
MIT
| 2021-04-19T13:20:35
| 2021-04-19T13:20:35
| null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
#!bin/usr/env python3
def fBank(bvn, bankName):
f = bankName.pop(0)
print(f'{f} BVN')
print(bvn)
def accBank(bvn, bankName):
a = bankName.pop(1)
print(f'{a} BVN')
print(bvn)
def gtBank(bvn, bankName):
g = bankName.pop(2)
print(f'{g} BVN')
print(bvn)
def zBank(bvn, bankName):
z = bankName.pop()
print(f'{z} BVN')
print(bvn)
def nsFound():
print('Error no such bvn found in the database!')
def main():
choice = "y"
while choice.lower() == "y" or choice.lower() == "yes":
bankName = ['FirstBank', 'AccessBank', 'GTBank', 'Zenith Bank']
bvn = input("Enter ur Bvn Number: ")
if bvn.startswith("22") and len(bvn) == 11:
fBank(bvn, bankName)
elif bvn.startswith("23") and len(bvn) == 11:
accBank(bvn, bankName)
elif bvn.startswith("24") and len(bvn) == 11:
gtBank(bvn, bankName)
elif bvn.startswith("25") and len(bvn) == 11:
zBank(bvn, bankName)
elif len(bvn) != 11:
print('bvn must be 11 digits')
else:
nsFound()
print()
choice = input("Enter again? (y/n): ")
print()
print("Bye!")
if __name__ == "__main__":
main()
|
[
"ibrahimsafiyan@yahoo.com"
] |
ibrahimsafiyan@yahoo.com
|
57cc0303090d481fbda65739352c2411e2351acf
|
0473e3da4a0eeabbc9832fc5ca622b45ea5bbea4
|
/teste_2.py
|
141e0daa2be4007dad0e548a0c6f324dba97a15e
|
[] |
no_license
|
diegojsk/MAP3121-Numerico-EP1-2019
|
e5228baae9650ce46f94c561d5857bcf4f945e50
|
58cff9508b6c52febf4ca2ca51cd00121b2a7b01
|
refs/heads/master
| 2022-08-31T13:55:16.605436
| 2022-07-23T21:36:14
| 2022-07-23T21:36:14
| 183,908,832
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
from main import resolver_sist
import numpy as np
A = np.array([[3/10, 3/5, 0],
[1/2, 0, 1],
[4/10, 4/5, 0]])
W = np.array([[3/5, 0],
[0, 1],
[4/5, 0]])
H = np.array([[1/2, 1, 0],
[1/2, 0, 1]])
np.set_printoptions(precision=3, suppress=True)
_H = resolver_sist(W, A)
print(_H)
# [[0.5 1. 0. ]
# [0.5 0. 1. ]]
|
[
"felipegmelo@usp.br"
] |
felipegmelo@usp.br
|
f32275c28250460f83bc6676890ce5333979af5a
|
7c148402e01212b59540568a9767607c13ec891d
|
/pages/chat_page.py
|
2209d2713d2ddaac2e3ebb8fad855a1c5aa64337
|
[] |
no_license
|
timbortnik/behave_web
|
63994c7e8855fa1efddbd7204615330aaf3ee3de
|
d9133d57d4142e1a5247d0d380539949a16fe53d
|
refs/heads/master
| 2021-01-18T18:26:03.566837
| 2018-03-06T12:09:37
| 2018-03-06T12:09:37
| 41,996,054
| 3
| 5
| null | 2018-03-06T12:09:38
| 2015-09-06T09:05:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,455
|
py
|
# -*- coding: UTF-8 -*-
from .base_page import Page
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import os
import random
class ChatPage(Page):
"""
Chat page
"""
url = '/chat'
unique_name = 'test_' + str(random.random())
def open_home_room(self):
self.set_home_room().click()
def set_home_room(self):
self.context.wait.until(EC.presence_of_element_located((By.ID, 'status_dropdown')))
xpath = "//a[@aria-label='" + self.context.hipchat_full_name + "']"
return self.context.driver.find_element_by_xpath(xpath)
def upload_attach(self):
img_path = os.getcwd() + '/swap/Selenium.txt'
self.context.driver.find_element_by_id("fileInput").send_keys(img_path)
self.context.driver.find_element_by_id("hc-message-input").send_keys(self.unique_name, Keys.ENTER)
xpath_uname = "//span[@class='description'][text()='" + self.unique_name + "']"
self.context.wait.until(lambda driver: driver.find_element_by_xpath(xpath_uname))
def check_attach_by_name(self):
for i in self.context.driver.find_elements_by_css_selector('div.msg-status.msg-confirmed.hc-msg-file'):
if i.find_element_by_css_selector('span.description').text == self.unique_name:
return i.find_element_by_css_selector('div.file-meta').text
|
[
"noreply@github.com"
] |
timbortnik.noreply@github.com
|
0692580528c7b459ca232a659b3932db1c8dd6ed
|
ead0ab89d5c54695d747245812d239aac73c5861
|
/extractEntities.py
|
2d806249fedbf8c1fd0389edeca5a4b7b7a8d89c
|
[
"Apache-2.0"
] |
permissive
|
dtu-02819-projects-fall2014/02819
|
8ec506b4a2525e83eabaddcc257894c1ad2b624b
|
871eea7cd059b0b9d01290c05046741d5c8f5399
|
refs/heads/master
| 2021-01-15T09:04:01.201050
| 2014-11-30T23:31:27
| 2014-11-30T23:31:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
# -*- coding: utf-8 -*-
"""
Module for gathering papers entities from WikiLit and storing them to DB
for later processing.
"""
from gensim import corpora, models
from urllib import urlopen
import csv
import nltk.corpus
import mongo_db
def extract_entities():
"""Reads the webpage, extracts paper entities as a list of dictionaries,
and stores in the database"""
url = "http://wikilit.referata.com/" + \
"wiki/Special:Ask/" + \
"-5B-5BCategory:Publications-5D-5D/" + \
"-3FHas-20author%3DAuthor(s)/-3FYear/" + \
"-3FPublished-20in/-3FAbstract/-3FHas-20topic%3DTopic(s)/" + \
"-3FHas-20domain%3DDomain(s)/" + \
"format%3D-20csv/limit%3D-20500/offset%3D0"
web = urlopen(url)
lines = csv.reader(web, delimiter=',', quotechar='"')
header = []
papers = []
for row in lines:
line = [unicode(cell, 'utf-8') for cell in row]
if not header:
header = line
continue
papers.append(dict(zip(header, line)))
abstracts=[]
for abstract, i in enumerate(papers):
abstracts.append(papers[abstract]['Abstract'])
mongo_db.save_to_mongo(papers, "wikilit_mining", "papers")
#mongo_db.save_to_mongo(abstracts, "wikilit_mining", "abstracts")
extract_entities()
|
[
"psyllost@gmail.com"
] |
psyllost@gmail.com
|
276ad906e5d0cc17b394f8c5b51dd12e5735ddab
|
25f2cf8702238374f7ddcc2dbedc072f34eafe0b
|
/week1/practicial1/problem4.py
|
c77e12c339fd2a968f4d0a1f91ae2572ec5650ff
|
[] |
no_license
|
tigranmovsisyan123/introtopython
|
40c53e649341173651e6139cca6c3b59d495eceb
|
e7a03036834aea76aa92ab9daa6e162a4b88e075
|
refs/heads/master
| 2020-08-26T16:13:48.260647
| 2019-12-14T12:18:17
| 2019-12-14T12:18:17
| 216,320,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
#Problem 4
ab=3
ac=4
bc=(ab**2 + ac**2)**0.5
print("the hypotenuse of triagnle abc =",bc)
|
[
"arm27102@mail.ru"
] |
arm27102@mail.ru
|
e16bb6db37f6de30ff19ed00ae5f7977cf184ca5
|
4138aa9600dbf1e3294fdae9171c40be1e4a3435
|
/utils/box_utils.py
|
1d25675139d8ea162c38fd3d18ee5a4776f3dd41
|
[
"Apache-2.0"
] |
permissive
|
jtpils/single-network-panoptic-segmentation
|
5b1c5a9bfcc673ce8b3e56c18f5727d303764550
|
891f13b8bca0f41e298900fe1c73bc3035caef5d
|
refs/heads/master
| 2020-12-01T22:05:36.768028
| 2019-06-07T13:56:17
| 2019-06-07T13:56:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,289
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Tensorflow Object Detection API code adapted by Daan de Geus
import tensorflow as tf
EPSILON=1e-7
"""Faster RCNN box coder.
Faster RCNN box coder follows the coding schema described below:
ty = (y - ya) / ha
tx = (x - xa) / wa
th = log(h / ha)
tw = log(w / wa)
where x, y, w, h denote the box's center coordinates, width and height
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tw and th denote the anchor-encoded
center, width and height respectively.
See http://arxiv.org/abs/1506.01497 for details.
"""
def get_center_coordinates_and_sizes(boxes):
with tf.variable_scope("GetCenterCoordinatesAndSizes"):
xmin, ymin, xmax, ymax = tf.unstack(tf.transpose(boxes))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def encode_boxes(boxes, anchors, scale_factors=None):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
scale_factors: Factors to scale the encoded boxes (float, float, float, float).
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw].
"""
with tf.variable_scope("EncodeBoxes"):
# Convert anchors and boxes to the center coordinate representation.
xmin_a, ymin_a, xmax_a, ymax_a = tf.unstack(tf.transpose(anchors))
wa = xmax_a - xmin_a
ha = ymax_a - ymin_a
ycenter_a = ymin_a + ha / 2.
xcenter_a = xmin_a + wa / 2.
xmin, ymin, xmax, ymax = tf.unstack(tf.transpose(boxes))
w = xmax - xmin
h = ymax - ymin
ycenter = ymin + h / 2.
xcenter = xmin + w / 2.
# Avoid NaN in division and log below.
ha += EPSILON
wa += EPSILON
h += EPSILON
w += EPSILON
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = tf.log(w / wa)
th = tf.log(h / ha)
# Scales location targets as used in paper for joint training.
if scale_factors:
ty *= scale_factors[0]
tx *= scale_factors[1]
th *= scale_factors[2]
tw *= scale_factors[3]
return tf.transpose(tf.stack([ty, tx, th, tw]))
def decode_boxes(encoded_boxes, anchors, scale_factors=None):
"""Decode relative codes to boxes.
Args:
encoded_boxes: encoded boxes with relative coding to anchors [N, 4]
anchors: anchors [N, 4]
scale_factors: Factors to scale the decoded boxes (float, float, float, float).
Returns:
boxes: decoded boxes [N, 4]
"""
with tf.variable_scope("DecodeBoxes"):
xmin, ymin, xmax, ymax = tf.unstack(tf.transpose(anchors))
wa = xmax - xmin
ha = ymax - ymin
ycenter_a = ymin + ha / 2.
xcenter_a = xmin + wa / 2.
ty, tx, th, tw = tf.unstack(tf.transpose(encoded_boxes))
if scale_factors:
ty /= scale_factors[0]
tx /= scale_factors[1]
th /= scale_factors[2]
tw /= scale_factors[3]
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return tf.transpose(tf.stack([xmin, ymin, xmax, ymax]))
def normalize_boxes(boxes, orig_height, orig_width):
"""
Args:
boxes: input boxes [N, 4] [x_min, y_min, x_max, y_max]
orig_height: original image height for input boxes
orig_width: original image width for input boxes
Returns: normalized boxes
"""
with tf.variable_scope("NormalizeBoxes"):
orig_height = tf.cast(orig_height, tf.float32)
orig_width = tf.cast(orig_width, tf.float32)
boxes = tf.cast(boxes, tf.float32)
x_min, y_min, x_max, y_max = tf.split(boxes, num_or_size_splits=4, axis=1)
x_min = x_min / orig_width
y_min = y_min / orig_height
x_max = x_max / orig_width
y_max = y_max / orig_height
return tf.concat([x_min, y_min, x_max, y_max], axis=1)
def resize_normalized_boxes(norm_boxes, new_height, new_width):
"""
Resize normalized boxes to a given set of coordinates
Args:
norm_boxes: normalized boxes [N, 4] [x_min, y_min, x_max, y_max] (between 0 and 1)
new_height: new height for the normalized boxes
new_width: new width for the normalized boxes
Returns: Resized boxes
"""
with tf.variable_scope("ResizeNormBoxes"):
x_min, y_min, x_max, y_max = tf.split(norm_boxes, num_or_size_splits=4, axis=1)
x_min = x_min * new_width
y_min = y_min * new_height
x_max = x_max * new_width
y_max = y_max * new_height
return tf.concat([x_min, y_min, x_max, y_max], axis=1)
def flip_normalized_boxes_left_right(boxes):
"""
Flips boxes that are already normalized from left to right
Args:
boxes: normalized boxes
Returns: Flipped boxes
"""
with tf.variable_scope("FlipBoxesLeftRight"):
boxes = tf.stack([1 - boxes[:, 2], boxes[:, 1],
1 - boxes[:, 0], boxes[:, 3]], axis=-1)
return boxes
def convert_input_box_format(boxes):
with tf.variable_scope("ConvertInputBoxFormat"):
boxes = tf.reshape(boxes, [-1, 4])
return tf.transpose([boxes[:, 0],
boxes[:, 1],
boxes[:, 0]+boxes[:, 2],
boxes[:, 1]+boxes[:, 3]])
def calculate_ious(boxes_1, boxes_2):
with tf.variable_scope("CalculateIous"):
x_min_1, y_min_1, x_max_1, y_max_1 = tf.split(boxes_1, 4, axis=1)
x_min_2, y_min_2, x_max_2, y_max_2 = tf.unstack(boxes_2, axis=1)
max_x_min = tf.maximum(x_min_1, x_min_2)
max_y_min = tf.maximum(y_min_1, y_min_2)
min_x_max = tf.minimum(x_max_1, x_max_2)
min_y_max = tf.minimum(y_max_1, y_max_2)
x_overlap = tf.maximum(0., min_x_max - max_x_min)
y_overlap = tf.maximum(0., min_y_max - max_y_min)
overlaps = x_overlap * y_overlap
area_1 = (x_max_1 - x_min_1) * (y_max_1 - y_min_1)
area_2 = (x_max_2 - x_min_2) * (y_max_2 - y_min_2)
ious = overlaps / (area_1 + area_2 - overlaps)
return ious
def calculate_ious_2(boxes_1, boxes_2):
with tf.variable_scope("CalculateIous"):
x_min_1, y_min_1, x_max_1, y_max_1 = tf.split(boxes_1, 4, axis=1)
x_min_2, y_min_2, x_max_2, y_max_2 = tf.split(boxes_2, 4, axis=1)
x_min_2 = tf.squeeze(x_min_2, 1)
y_min_2 = tf.squeeze(y_min_2, 1)
x_max_2 = tf.squeeze(x_max_2, 1)
y_max_2 = tf.squeeze(y_max_2, 1)
max_x_min = tf.maximum(x_min_1, x_min_2)
max_y_min = tf.maximum(y_min_1, y_min_2)
min_x_max = tf.minimum(x_max_1, x_max_2)
min_y_max = tf.minimum(y_max_1, y_max_2)
x_overlap = tf.maximum(0., min_x_max - max_x_min)
y_overlap = tf.maximum(0., min_y_max - max_y_min)
overlaps = x_overlap * y_overlap
area_1 = (x_max_1 - x_min_1) * (y_max_1 - y_min_1)
area_2 = (x_max_2 - x_min_2) * (y_max_2 - y_min_2)
ious = overlaps / (area_1 + area_2 - overlaps)
return ious
def clip_to_img_boundaries(boxes, image_shape):
"""
Args:
boxes: decoded boxes with relative coding to anchors [N, 4]
image_shape: shape of the image [2], (height, width)
Returns:
Boxes that have been clipped to the image boundaries [N, 4]
"""
with tf.variable_scope("ClipToImgBoundaries"):
xmin, ymin, xmax, ymax = tf.unstack(tf.transpose(boxes))
hi, wi = tf.cast(image_shape[0], tf.float32), tf.cast(image_shape[1], tf.float32)
# xmin = tf.maximum(tf.minimum(xmin, wi - 1.), 0.)
# ymin = tf.maximum(tf.minimum(ymin, hi - 1.), 0.)
#
# xmax = tf.maximum(tf.minimum(xmax, wi - 1.), 0.)
# ymax = tf.maximum(tf.minimum(ymax, hi - 1.), 0.)
xmin = tf.maximum(tf.minimum(xmin, wi), 0.)
ymin = tf.maximum(tf.minimum(ymin, hi), 0.)
xmax = tf.maximum(tf.minimum(xmax, wi), 0.)
ymax = tf.maximum(tf.minimum(ymax, hi), 0.)
return tf.transpose(tf.stack([xmin, ymin, xmax, ymax]))
def convert_xyxy_to_yxyx_format(boxes):
with tf.variable_scope("ConvertXyxyToYxyxFormat"):
xmin, ymin, xmax, ymax = tf.unstack(tf.transpose(boxes))
return tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))
def convert_yxyx_to_xyxy_format(boxes):
with tf.variable_scope("ConvertYxyxToXyxyFormat"):
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(boxes))
return tf.transpose(tf.stack([xmin, ymin, xmax, ymax]))
def pad_boxes_and_return_num(boxes, pad_size):
with tf.variable_scope("PadBoxesReturnNum"):
num_boxes = tf.shape(boxes)[0]
shape = [[0, pad_size - num_boxes], [0, 0]]
boxes_pad = tf.pad(boxes, shape)
return boxes_pad, num_boxes
|
[
"d.c.d.geus@student.tue.nl"
] |
d.c.d.geus@student.tue.nl
|
9d9569b8ca91fd527549be789e0a61897ebfbf90
|
a3ce3c594ee51a6fc8b58bf92337cba136d145d9
|
/hetero_teleop_twist/teleop_twist_keyboard.py
|
51b6b1fd164c68a5bbb945773127765ee70c07b3
|
[] |
no_license
|
MonsterMaster007/hetero-system
|
be09f976280a6fdcd6be34d9e65122ff00b927a6
|
2b11142b7918da33b02327ef9161c8588a8e1a91
|
refs/heads/master
| 2022-11-17T12:48:47.037264
| 2020-07-21T13:29:09
| 2020-07-21T13:29:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,898
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import threading
import roslib; roslib.load_manifest('teleop_twist_keyboard')
import rospy
from geometry_msgs.msg import Twist
import sys, select, termios, tty
msg = """
Reading from the keyboard and Publishing to Twist!
---------------------------
Moving around:
u i o
j k l
m , .
For Holonomic mode (strafing), hold down the shift key:
---------------------------
U I O
J K L
M < >
t : take off
anything else : stop
q/z : increase/decrease max speeds by 10%
w/x : increase/decrease only linear speed by 10%
e/c : increase/decrease only angular speed by 10%
CTRL-C to quit
"""
moveBindings = {
'i':(1,0,0,0),
'o':(1,0,0,-1),
'j':(0,0,0,1),
'l':(0,0,0,-1),
'u':(1,0,0,1),
',':(-1,0,0,0),
'.':(-1,0,0,1),
'm':(-1,0,0,-1),
'O':(1,-1,0,0),
'I':(1,0,0,0),
'J':(0,1,0,0),
'L':(0,-1,0,0),
'U':(1,1,0,0),
'<':(-1,0,0,0),
'>':(-1,-1,0,0),
'M':(-1,1,0,0),
}
speedBindings={
'q':(1.1,1.1),
'z':(.9,.9),
'w':(1.1,1),
'x':(.9,1),
'e':(1,1.1),
'c':(1,.9),
}
class PublishThread(threading.Thread):
def __init__(self, rate):
super(PublishThread, self).__init__()
self.publisher = rospy.Publisher('cmd_vel', Twist, queue_size = 1)
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.th = 0.0
self.speed = 0.0
self.turn = 0.0
self.condition = threading.Condition()
self.done = False
# Set timeout to None if rate is 0 (causes new_message to wait forever
# for new data to publish)
if rate != 0.0:
self.timeout = 1.0 / rate
else:
self.timeout = None
self.start()
def wait_for_subscribers(self):
i = 0
while not rospy.is_shutdown() and self.publisher.get_num_connections() == 0:
if i == 4:
print("Waiting for subscriber to connect to {}".format(self.publisher.name))
rospy.sleep(0.5)
i += 1
i = i % 5
if rospy.is_shutdown():
raise Exception("Got shutdown request before subscribers connected")
def update(self, x, y, z, th, speed, turn):
self.condition.acquire()
self.x = x
self.y = y
self.z = z
self.th = th
self.speed = speed
self.turn = turn
# Notify publish thread that we have a new message.
self.condition.notify()
self.condition.release()
def stop(self):
self.done = True
self.update(0, 0, 0, 0, 0, 0)
self.join()
def run(self):
twist = Twist()
while not self.done:
self.condition.acquire()
# Wait for a new message or timeout.
self.condition.wait(self.timeout)
# Copy state into twist message.
twist.linear.x = self.x * self.speed
twist.linear.y = self.y * self.speed
twist.linear.z = self.z * self.speed
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = self.th * self.turn
self.condition.release()
# Publish.
self.publisher.publish(twist)
# Publish stop message when thread exits.
twist.linear.x = 0
twist.linear.y = 0
twist.linear.z = 0
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = 0
self.publisher.publish(twist)
def getKey(key_timeout):
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], key_timeout)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
def vels(speed, turn):
return "currently:\tspeed %s\tturn %s " % (speed,turn)
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('teleop_twist_keyboard')
speed = rospy.get_param("~speed", 0.5)
turn = rospy.get_param("~turn", 1.0)
repeat = rospy.get_param("~repeat_rate", 0.0)
key_timeout = rospy.get_param("~key_timeout", 0.0)
rospy.set_param('uav_take_off', False)
if key_timeout == 0.0:
key_timeout = None
pub_thread = PublishThread(repeat)
x = 0
y = 0
z = 0
th = 0
status = 0
try:
pub_thread.wait_for_subscribers()
pub_thread.update(x, y, z, th, speed, turn)
print(msg)
print(vels(speed,turn))
while(1):
key = getKey(key_timeout)
if key in moveBindings.keys():
x = moveBindings[key][0]
y = moveBindings[key][1]
z = moveBindings[key][2]
th = moveBindings[key][3]
elif key in speedBindings.keys():
speed = speed * speedBindings[key][0]
turn = turn * speedBindings[key][1]
print(vels(speed,turn))
if (status == 14):
print(msg)
status = (status + 1) % 15
elif (key == 't'):
rospy.set_param('uav_take_off', True)
else:
# Skip updating cmd_vel if key timeout and robot already
# stopped.
if key == '' and x == 0 and y == 0 and z == 0 and th == 0:
continue
x = 0
y = 0
z = 0
th = 0
if (key == '\x03'):
break
pub_thread.update(x, y, z, th, speed, turn)
except Exception as e:
print(e)
finally:
pub_thread.stop()
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
|
[
"zhijun_lyu@163.com"
] |
zhijun_lyu@163.com
|
f7f9bb59e970cedcf8c9e2df930779fa407845fb
|
dee1c7a882cc64e300079c5936ae24a9f0c00883
|
/safer.py
|
c384ed737ab140f33d38d1535945a9b32dfbf19e
|
[
"MIT"
] |
permissive
|
jayd2446/safer
|
a33460c425056fad5e0681ba0c28328465a47c95
|
7d641741c87fce24e489daed5e3ca045d313d01a
|
refs/heads/master
| 2022-04-21T07:06:14.152109
| 2020-04-19T17:19:02
| 2020-04-19T17:19:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,734
|
py
|
# -*- coding: utf-8 -*-
"""
✏️safer: a safer file writer ✏️
-------------------------------
No more partial writes or corruption! ``safer`` writes a whole file or
nothing.
``safer.writer()`` and ``safer.printer()`` are context managers that open a
file for writing or printing: if an Exception is raised, then the original file
is left unaltered.
Install ``safer`` from the command line using
`pip <https://pypi.org/project/pip/>`_:
.. code-block:: bash
pip install safer
Tested on Python 2.7, and 3.4 through 3.8.
"""
from __future__ import print_function
import contextlib
import functools
import os
import shutil
import tempfile
try:
from pathlib import Path
except ImportError:
Path = None
__version__ = '1.0.0'
__all__ = 'writer', 'printer'
@contextlib.contextmanager
def writer(
file,
mode='w',
create_parent=False,
delete_failures=True,
**kwargs
):
"""
A context manager that yields {result}, but leaves the file unchanged
if an exception is raised.
It uses an extra temporary file which is renamed over the file only after
the context manager exits successfully: this requires as much disk space
as the old and new files put together.
If ``mode`` contains either ``'a'`` (append), or ``'+'`` (update), then
the original file will be copied to the temporary file before writing
starts.
Arguments:
file:
Path to the file to be opened
mode:
Mode string passed to ``open()``
create_parent:
If true, create the parent directory of the file if it doesn't exist
delete_failures:
If true, the temporary file is deleted if there is an exception
kwargs:
Keywords passed to ``open()``
"""
copy = '+' in mode or 'a' in mode
if not copy and 'r' in mode:
raise IOError('File not open for writing')
if Path and isinstance(file, Path):
file = str(file)
elif not isinstance(file, str):
raise IOError('`file` argument must be a string')
parent = os.path.dirname(os.path.abspath(file))
if not os.path.exists(parent) and create_parent:
os.makedirs(parent)
fd, out = tempfile.mkstemp(dir=parent)
os.close(fd)
if copy and os.path.exists(file):
shutil.copy2(file, out)
try:
with open(out, mode, **kwargs) as fp:
yield fp
except Exception:
if delete_failures and os.path.exists(out):
try:
os.remove(out)
except Exception:
pass
raise
if not copy:
if os.path.exists(file):
shutil.copymode(file, out)
else:
os.chmod(out, 0o100644)
os.rename(out, file)
@functools.wraps(writer)
@contextlib.contextmanager
def printer(*args, **kwargs):
with writer(*args, **kwargs) as fp:
yield functools.partial(print, file=fp)
printer.__doc__ = printer.__doc__.format(
result='a function that prints to the opened file'
)
writer.__doc__ = writer.__doc__.format(
result='a writable stream returned from open()'
)
writer._examples = """\
# dangerous
with open(file, 'w') as fp:
json.dump(data, fp) # If this fails, the file is corrupted
# safer
with safer.writer(file) as fp:
json.dump(data, fp) # If this fails, the file is unaltered
"""
printer._examples = """\
# dangerous
with open(file, 'w') as fp:
for item in items:
print(item, file=fp)
# Prints a partial file if ``items`` raises an exception while iterating
# or any ``item.__str__()`` raises an exception
# safer
with safer.printer(file) as print:
for item in items:
print(item)
# Either the whole file is written, or nothing
"""
|
[
"tom@swirly.com"
] |
tom@swirly.com
|
766117b27177f267b4fa4d5a9383a0ba83bfff65
|
cbf044cc969826c544e413943c37408ec913c3f7
|
/game.py
|
eaa455b8556e8cae752abc32c7cff55d882de9a0
|
[] |
no_license
|
jessapp/guessing-game
|
ed30ee55173d26e4237fbe73afae5694d9d06c1a
|
18ca2068a6f9bf6a1eb7cbd178e8d73b05e608fb
|
refs/heads/master
| 2021-04-29T02:48:33.439288
| 2017-01-03T23:49:12
| 2017-01-03T23:49:12
| 78,051,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
# Put your code here
from random import randint
print "Hello! What's your name?"
name = raw_input("> ")
play_again = True
best_score = 100
while play_again == True:
number = randint(1, 100)
print "%s, I'm thinking of a number between 1 and 100. \nTry to guess my number." % (name)
num_guesses = 0
guessed = False
while not guessed:
try:
guess = int(raw_input("> "))
except ValueError:
print "That's not a valid number! Try Again! (Hint: is it a decimal? I don't like decimals.)"
continue
num_guesses += 1
if guess == number:
print "Congratulations! You guessed the number in %s guesses!" % (num_guesses)
guessed = True
if num_guesses < best_score:
best_score = num_guesses
print "This is your best score yet!"
else:
print "Your best score so far is %s" % (best_score)
print "Would you like to play again?"
play_again = raw_input("Y or N: ").upper()
if play_again == "Y":
play_again = True
elif play_again == "N":
play_again = False
else:
print "That's not a valid answer! Try again."
elif guess < 1 or guess > 100:
print "Can't you read?! Guess again, in the range!"
elif guess < number:
print "Too Low!"
elif guess > number:
print "Too High!"
else:
print "Sorry, I don't understand that input!"
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
59b097fed4b070a9ac55dd082bbc48b565900828
|
ceb5a20968cc5698ae5c49b18fa881511219553b
|
/net.py
|
77ada951333c57a5696822cc3ed7b64ce0251cf1
|
[] |
no_license
|
teju85/capsnet
|
37d7d48c1f808d612f657085203cfe0025dcd2f3
|
49d242eb3358d18bd5959cda13fd89f7624eb1a2
|
refs/heads/master
| 2021-01-24T02:34:43.731878
| 2018-04-27T16:24:32
| 2018-04-27T16:24:32
| 122,854,433
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,588
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST, CIFAR10
from torchvision.transforms import ToTensor
from torch.autograd import Variable
from torch.autograd import profiler
import time
from torch.optim import Adam, SGD
def __get_datasets(root):
"""
Helper function to download, prepare and return the MNIST datasets for
training/testing, respectively.
Args:
. root - folder where to download and prepare the dataset
Output: training and testing sets, respectively
"""
if root == "mnist":
trainset = MNIST(root, train=True, download=True, transform=ToTensor())
testset = MNIST(root, train=False, download=True, transform=ToTensor())
elif root == "cifar10":
trainset = CIFAR10(root, train=True, download=True, transform=ToTensor())
testset = CIFAR10(root, train=False, download=True, transform=ToTensor())
else:
trainset, testset = None, None
return trainset, testset
def get_loaders(args): #trainset, testset, batch_size, test_batch_size, shuffle):
"""
Download and prepare DataLoader wrappers for training and testing sets.
Args:
. args - all commandline args passed
. batch_size - batch size during training
. test_batch_size - batch size during testing
. shuffle - whether to shuffle inputs
Output: dataloaders for training set and testing set, respectively
"""
trainset, testset = __get_datasets(args.root)
pin_memory = True if torch.cuda.is_available() else False
train_loader = DataLoader(trainset, batch_size=args.batch_size,
shuffle=args.shuffle, pin_memory=pin_memory)
test_loader = DataLoader(testset, batch_size=args.test_batch_size,
shuffle=args.shuffle, pin_memory=pin_memory)
return train_loader, test_loader
# TODO: parameterize the hard-coded Linear dimensions
class Reconstructor(nn.Module):
def __init__(self, nCaps, capsDim, outDim, outImgDim):
super(Reconstructor, self).__init__()
self.nCaps = nCaps
self.capsDim = capsDim
self.fc1 = nn.Linear(nCaps*capsDim, 512)
self.fc2 = nn.Linear(512, 1024)
self.fc3 = nn.Linear(1024, outDim)
self.outImgDim = outImgDim
def forward(self, x, labels):
idx = Variable(torch.zeros(x.size(0), self.nCaps), requires_grad=False)
if x.is_cuda:
idx = idx.cuda()
idx.scatter_(1, labels.view(-1, 1), 1) # one-hot vector!
idx = idx.unsqueeze(dim=-1)
activities = x * idx
activities = activities.view(x.size(0), self.nCaps*self.capsDim)
x = F.relu(self.fc1(activities))
x = F.relu(self.fc2(x))
x = F.sigmoid(self.fc3(x))
x = x.view(x.size(0), self.outImgDim[0],
self.outImgDim[1], self.outImgDim[2])
return x
def squash(x, dim=-1):
norm = x.norm(dim=dim, keepdim=True)
norm2 = norm * norm
scale = norm2 / (1 + norm2) / norm
x = scale * x
return x
class ConvCapsule(nn.Module):
def __init__(self, inC, outC, capsDim, stride, kernel):
super(ConvCapsule, self).__init__()
self.outC = outC
self.capsDim = capsDim
arr = []
self.c1 = nn.Conv2d(inC, outC*capsDim, kernel_size=kernel,stride=stride)
def forward(self, x):
out = self.c1(x)
N, _, H, W = out.size()
out = out.view(N, self.outC, self.capsDim, H, W)
out = out.permute([0, 1, 3, 4, 2])
a, b, c, d, e = out.size()
out = out.contiguous()
out = out.view(a, b*c*d, e)
out = squash(out)
return out
class Capsule(nn.Module):
def __init__(self, nOutCaps, outCapsDim, nInCaps, inCapsDim, nRouting, detach):
super(Capsule, self).__init__()
self.nOutCaps = nOutCaps
self.outCapsDim = outCapsDim
self.nInCaps = nInCaps
self.inCapsDim = inCapsDim
self.r = nRouting
self.W = nn.Parameter(torch.zeros(nInCaps, inCapsDim, nOutCaps * outCapsDim))
self.detach = detach
nn.init.kaiming_uniform(self.W)
def forward(self, u):
b = Variable(torch.zeros(u.size(0), self.nInCaps, self.nOutCaps))
if torch.cuda.is_available():
b = torch.empty(u.size(0), self.nInCaps, self.nOutCaps, device="cuda")
else:
b = torch.empty(u.size(0), self.nInCaps, self.nOutCaps)
b.zero_()
b = Variable(b)
u1 = u.unsqueeze(dim=-1)
#uhat = u1.matmul(self.W)
uhat = torch.sum(u1 * self.W, dim=2)
uhat = uhat.view(uhat.size(0), self.nInCaps, self.nOutCaps, self.outCapsDim)
uhat_d = uhat.detach() if self.detach else uhat
for i in range(self.r):
c = F.softmax(b, dim=-1)
c = c.unsqueeze(-1)
if i == self.r - 1:
s = torch.sum(c * uhat, dim=1)
else:
s = torch.sum(c * uhat_d, dim=1)
v = squash(s)
if i != self.r - 1:
v1 = v.unsqueeze(1)
a = torch.sum(uhat_d * v1, dim=-1)
b = b + a
return v
class MarginLoss(nn.Module):
def __init__(self, mplus, _lambda, mminus, recon_weight):
super(MarginLoss, self).__init__()
self.mplus = mplus
self._lambda = _lambda
self.mminus = mminus
self.recon_weight = recon_weight
def forward(self, output, data, label):
pred, recon, x = output
if pred.is_cuda:
idx = torch.empty(pred.size(), device="cuda")
else:
idx = torch.empty(pred.size())
idx.zero_()
idx = idx.scatter_(1, label.data.view(-1, 1), 1.0) # one-hot!
idx = Variable(idx)
loss_plus = F.relu(self.mplus - pred).pow(2) * idx
loss_minus = F.relu(pred - self.mminus).pow(2) * (1. - idx)
loss = loss_plus + (self._lambda * loss_minus)
lval = loss.sum(dim=1).mean()
if recon is not None:
lval = lval + self.recon_weight * F.mse_loss(recon, data)
return lval
class MnistCapsuleNet(nn.Module):
def __init__(self, detach, nrouting):
super(MnistCapsuleNet, self).__init__()
self.c1 = nn.Conv2d(1, 256, kernel_size=9)
self.convcaps = ConvCapsule(inC=256, outC=32, capsDim=8, stride=2,
kernel=9)
self.caps = Capsule(10, 16, 32*6*6, 8, nrouting, detach)
imSize = 28
self.decoder = Reconstructor(nCaps=10, capsDim=16, outDim=imSize*imSize,
outImgDim=(1, imSize, imSize))
def forward(self, x, labels=None):
x = self.c1(x)
x = F.relu(x)
x = self.convcaps(x)
x = self.caps(x)
pred = x.norm(dim=-1)
if labels is not None:
recon = self.decoder(x, labels)
else:
recon = None
return pred, recon, x
class Cifar10CapsuleNet(nn.Module):
def __init__(self, detach, nrouting):
super(Cifar10CapsuleNet, self).__init__()
self.c1 = nn.Conv2d(3, 256, kernel_size=9)
self.convcaps = ConvCapsule(inC=256, outC=32, capsDim=8, stride=2,
kernel=9)
self.caps = Capsule(10, 16, 32*8*8, 8, nrouting, detach)
imSize = 32
self.decoder = Reconstructor(nCaps=10, capsDim=16, outDim=3*imSize*imSize,
outImgDim=(3, imSize, imSize))
def forward(self, x, labels=None):
x = self.c1(x)
x = F.relu(x)
x = self.convcaps(x)
x = self.caps(x)
pred = x.norm(dim=-1)
if labels is not None:
recon = self.decoder(x, labels)
else:
recon = None
return pred, recon, x
def get_model(args):
if args.root == "mnist":
model = MnistCapsuleNet(not args.no_detach, args.nrouting)
elif args.root == "cifar10":
model = Cifar10CapsuleNet(not args.no_detach, args.nrouting)
else:
model = None
return model
def get_loss(args):
if args.root == "mnist":
pixels = 28
elif args.root == "cifar10":
pixels = 32
else:
pixels = 1
loss = MarginLoss(args.mplus, args.mlambda, args.mminus,
args.lambda_recon * pixels * pixels)
return loss
def get_optimizer(args):
if args.adam:
optimizer = Adam(model.parameters(), lr=args.lr)
else:
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.mom)
return optimizer
def train(epoch_id, model, loader, loss, optimizer, recon, max_idx):
start = time.time()
loss_val = 0.0
accuracy = 0.0
for idx, (data, label) in enumerate(loader):
if torch.cuda.is_available():
data, label = data.cuda(), label.cuda()
data, label = Variable(data), Variable(label)
optimizer.zero_grad()
if recon:
output = model(data, label)
else:
output = model(data)
lval = loss(output, data, label)
lval.backward()
optimizer.step()
loss_val += lval.item()
_, pred = output[0].data.max(dim=-1) # argmax
accuracy += pred.eq(label.data.view_as(pred)).float().sum()
if idx == max_idx:
break
loss_val /= len(loader.dataset)
accuracy /= len(loader.dataset)
total = time.time() - start
print("Train epoch:%d time(s):%.3f loss=%.8f accuracy:%.4f" % \
(epoch_id, total, loss_val, accuracy))
def test(epoch_id, model, loader, loss):
start = time.time()
model.eval()
loss_val = 0.0
accuracy = 0.0
for idx, (data, label) in enumerate(loader):
if torch.cuda.is_available():
data, label = data.cuda(), label.cuda()
data, label = Variable(data), Variable(label)
output = model(data)
loss_val += loss(output, data, label).data[0]
_, pred = output[0].data.max(1) # argmax
accuracy += pred.eq(label.data.view_as(pred)).float().sum()
loss_val /= len(loader.dataset)
accuracy /= len(loader.dataset)
total = time.time() - start
print("Test epoch:%d time(s):%.3f loss=%.8f accuracy:%.4f" % \
(epoch_id, total, loss_val, accuracy))
if __name__ == "__main__":
import argparse
print("Parsing args...")
parser = argparse.ArgumentParser(description="Capsnet Benchmarking")
parser.add_argument("-adam", default=False, action="store_true",
help="Use ADAM as the optimizer (Default SGD)")
parser.add_argument("-batch-size", type=int, default=256,
help="Input batch size for training")
parser.add_argument("-epoch", type=int, default=50, help="Training epochs")
parser.add_argument("-lambda-recon", type=float, default=0.0005,
help="Reconstruction-loss weight")
parser.add_argument("-lr", type=float, default=0.1, help="Learning Rate")
parser.add_argument("-max-idx", type=int, default=-1,
help="Max batches to run per epoch (debug-only)")
parser.add_argument("-mom", type=float, default=0.9,
help="Momentum (SGD only)")
parser.add_argument("-mlambda", type=float, default=0.5,
help="MarginLoss lambda")
parser.add_argument("-mminus", type=float, default=0.1, help="MarginLoss m-")
parser.add_argument("-mplus", type=float, default=0.9, help="MarginLoss m+")
parser.add_argument("-no-detach", default=False, action="store_true",
help="Don't detach uhat while routing except last iter")
parser.add_argument("-no-test", default=False, action="store_true",
help="Don't run validation (debug-only)")
parser.add_argument("-nrouting", type=int, default=3,
help="Num routing iterations")
parser.add_argument("-profile", default=False, action="store_true",
help="Profile the runtimes to gather perf info")
parser.add_argument("-no-recon", default=False, action="store_true",
help="Disable reconstruction loss")
parser.add_argument("-root", type=str, choices=("mnist", "cifar10"),
default="mnist",
help="Directory where to download the mnist dataset")
parser.add_argument("-seed", type=int, default=12345,
help="Random seed for number generation")
parser.add_argument("-shuffle", default=False, action="store_true",
help="To shuffle inputs during training/testing or not")
parser.add_argument("-test-batch-size", type=int, default=128,
help="Input batch size for testing")
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
print("Loading datasets...")
train_loader, test_loader = get_loaders(args)
print("Preparing model/loss-function/optimizer...")
profiler.emit_nvtx(enabled=args.profile)
model = get_model(args)
loss = get_loss(args)
optimizer = get_optimizer(args)
if torch.cuda.is_available():
model.cuda()
loss.cuda()
print("Training loop...")
for idx in range(0, args.epoch):
train(idx, model, train_loader, loss, optimizer, not args.no_recon,
args.max_idx)
if not args.no_test:
test(idx, model, test_loader, loss)
|
[
"rao.thejaswi@gmail.com"
] |
rao.thejaswi@gmail.com
|
3fb08eaf53531d5b3136f276a073e1ebba731c66
|
b38dea8822f021824be40412c2a54794afc877d7
|
/tests/test_collection_manager.py
|
99d0e6b586fd58de15e5131ecda5a8d8bad54f9a
|
[] |
no_license
|
alixaxel/kvlite
|
1f47679a61406d55d779d78ffa1d10d330adc20d
|
400ea9c6813d6a70f848147c669d2e9bb02002c1
|
refs/heads/master
| 2021-01-18T08:47:18.230763
| 2013-04-21T06:54:10
| 2013-04-21T06:54:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,687
|
py
|
import sys
if '' not in sys.path:
sys.path.append('')
import unittest
from kvlite import CollectionManager
class KvliteCollectionManagerTests(unittest.TestCase):
def test_wrong_uri(self):
URI = None
self.assertRaises(RuntimeError, CollectionManager, URI)
def test_mysql_manager(self):
URI = 'mysql://kvlite_test:eixaaghiequ6ZeiBahn0@localhost/kvlite_test'
collection_name = 'kvlite_test'
manager = CollectionManager(URI)
if collection_name in manager.collections():
manager.remove(collection_name)
self.assertNotIn(collection_name, manager.collections())
manager.create(collection_name)
self.assertIn(collection_name, manager.collections())
manager.remove(collection_name)
self.assertNotIn(collection_name, manager.collections())
def test_sqlite_manager(self):
URI = 'sqlite://tests/db/testdb.sqlite'
collection_name = 'kvlite_test'
manager = CollectionManager(URI)
if collection_name in manager.collections():
manager.remove(collection_name)
self.assertNotIn(collection_name, manager.collections())
manager.create(collection_name)
self.assertIn(collection_name, manager.collections())
manager.remove(collection_name)
self.assertNotIn(collection_name, manager.collections())
def test_unsupported_backend(self):
URI = 'backend://database'
self.assertRaises(RuntimeError, CollectionManager, (URI))
if __name__ == '__main__':
unittest.main()
|
[
"ownport@gmail.com"
] |
ownport@gmail.com
|
3e57601ccc89b25441772e567a75e48ff6bea6dd
|
615b3d65acdcebe557b2b03ca1b661db40bffcdd
|
/TitanicBinaryClassifier.py
|
d8a50fa7acb232a57b48851f4e1a65e00255601d
|
[
"MIT"
] |
permissive
|
psengupta1973/MachineLearning_py
|
c210a3cae70a5a7aef184a73d48d43b8640dcb56
|
98dfda55693353e641ed150b66fcab8170593c8b
|
refs/heads/master
| 2020-03-23T18:10:39.515883
| 2018-12-06T09:17:32
| 2018-12-06T09:17:32
| 141,893,815
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,189
|
py
|
# Using Logistic Regression for binary classification of Titanic passengers in categories of Survived (1) or not (0)
# based on input features e.g. Pclass,Sex,Age,SibSp,Parch,Fare,Cabin and Embarked etc.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from LogisticRegressor import LogisticRegressor
class TitanicBinaryClassifier:
########### main method runs the steps of training & prediction ###########
def __init__(self, epoch=100, alpha=0.3, reg=10):
# LOAD data
xLabels = ['Pclass','Sex','Age','SibSp','Parch','Ticket','Fare','Embarked']
yLabel = 'Survived'
classTags = ['Not Survived', 'Survived']
data = pd.read_csv('input/titanic_train.csv')
X, y = self.preprocessTitanicData(data, xLabels, yLabel)
#self.plot(X, y, xLabels, yLabel, classTags)
classifier = LogisticRegressor(numOfIterations=epoch, learningRate=alpha, regularizer=reg, scalingNeeded=True, biasNeeded=True, verbose=True)
print('\nTRAINING:\n') # TRAIN the model (i.e. theta here)
classifier.train(X, y) # alpha is learning rate for gradient descent
classifier.saveModel('model/titanic_classifier.model')
classifier.loadModel('model/titanic_classifier.model')
print('\nVAIDATION:\n')
yPred = classifier.validate(X, y) # VALIDATE model with training data
self.writeOutput(X, yPred, 'output/titanic_validation.csv')
#self.plot(X, y, xLabels, yLabel, classTags) # Plot after validation
print('\nPREDICTION:\n') # PREDICT with trained model using test data
data = pd.read_csv("input/titanic_test.csv")
X, y = self.preprocessTitanicData(data, xLabels, yLabel, training=False)
yPred = classifier.predict(X)
indexField = data['PassengerId'].values.reshape(data.shape[0], 1)
#self.plot(X, yPred, xLabels, yLabel, classTags) # Plot after prediction
#printData(X, yPred, xLabels, yLabel)
self.writeOutput(indexField, yPred, 'output/titanic_prediction.csv', colHeaders=['PassengerId', 'Survived'])
def writeOutput(self, X, y, fileName, delim=',', colHeaders=None):
if colHeaders is None:
print(' Headless Write in ', fileName)
data = np.hstack([X, y])
np.savetxt(fileName, data, fmt='%.d', delimiter=delim)
else:
self.printData(X, y, colHeaders[0:len(colHeaders)-1], colHeaders[len(colHeaders)-1], delim=',', fileName=fileName)
print('Output written to ', fileName)
return
# Print house prices with specific number of columns
def printData(self, X, y, xLabels, yLabel, delim='\t', fileName=None):
rows, cols = X.shape
if (rows != y.shape[0]) :
return
headLine = ''
colheads = len(xLabels)
for c in range(0, colheads):
headLine += xLabels[c] + delim
headLine += yLabel +str('\n')
bodyLine = ''
for r in range(0, rows):
for c in range(0, cols):
bodyLine += str(X[r, c]) + delim
bodyLine += str(y[r,0])
bodyLine += str('\n')
if fileName is None:
print(headLine)
print (bodyLine)
else:
with open(fileName, "w") as f:
f.write(headLine)
f.write(bodyLine)
# Plotting dataset
def plot(self, X, y, xLabels, yLabel, classLabels):
plt.figure(figsize=(15,4), dpi=100)
y = y.ravel()
rows, cols = X.shape
if cols != len(xLabels):
return
for c in range(0, cols):
plt.subplot(1, cols, c+1)
Xy0 = X[y == 0][:, c]
Xy1 = X[y == 1][:, c]
plt.scatter(range(1, Xy0.shape[0]+1), Xy0, color='r', label=classLabels[0])
plt.scatter(range(1, Xy1.shape[0]+1), Xy1, color='b', label=classLabels[1])
plt.xlabel('Passenger #')
plt.ylabel(xLabels[c])
plt.legend()
plt.show()
def preprocessTitanicData(self, data, xLabels, yLabel=None, training=True):
y = None
if training:
y = data[yLabel].values
y = y.reshape(len(y), 1)
y = y.astype('int64')
data = data[xLabels]
data['Sex'] = data['Sex'].map({'male':1, 'female':0})
data['Embarked'] = data['Embarked'].map({'C':1, 'Q':2, 'S':3})
meanAge = np.mean(data['Age'])
data['Age'] = data['Age'].fillna(meanAge)
data['Ticket'] = pd.to_numeric(data['Ticket'], errors='coerce')
data['Ticket'] = data['Ticket'].fillna(0.0)
X = data.values.astype('int64')
return X, y
if True:
TitanicBinaryClassifier(epoch=100, alpha=0.544, reg=1)
|
[
"noreply@github.com"
] |
psengupta1973.noreply@github.com
|
327f38630fcf0d0b156f9b662c837293bff9d49b
|
0d49e8bfc6cf4bd390b5aa3103d6cb780305a09a
|
/analysis.py
|
64c35320313fa5be3df7653887216f217c40e772
|
[] |
no_license
|
djlee9812/basketball-shot
|
6b5faec6288194a9c587cf99804dee445910e227
|
7ae26b14be7ce0353c955fc3830e73b888815c83
|
refs/heads/master
| 2022-09-22T23:48:54.591415
| 2020-06-07T03:18:02
| 2020-06-07T03:18:02
| 266,488,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,785
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Circle
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d.art3d as art3d
from multiprocessing import Pool
from simulate import Ball
from multiprocessing import Pool
import time
import os
plt.style.use('seaborn')
def data_gen(speeds, phis, h, thetas, omegas):
for i, v in enumerate(speeds):
for j, phi in enumerate(phis):
yield i, j, v, phi, h, thetas[i,j], omegas[i,j]
def compute_shot(args):
i, j, v, phi, h, theta, omega = args
ball = Ball(15, 0, h, v, phi, theta, omega)
return i, j, ball.score
def noisy_data_gen(speeds, phis, h, thetas, omegas, num_trial):
for i, v in enumerate(speeds):
for j, phi in enumerate(phis):
yield i, j, v, phi, h, thetas[i,j], omegas[i,j], num_trial
def noisy_compute_shot(args):
i, j, v, phi, h, theta, omega, num_trial = args
count = 0
for t in range(num_trial):
theta_i = theta + np.random.normal(0, 1.2)
phi_i = phi + np.random.normal(0, 3)
v_i = v + np.random.normal(0, 0.6)
ball = Ball(15, 0, h, v_i, phi_i, theta_i, omega)
count += ball.score
if t > 3 and count == 0: break
return i, j, count
def free_throws(height, noise=False, data_file="temp.npz", save=False):
nspeed = 100
nphi = 100
speeds = np.linspace(23, 34, nspeed)
phis = np.linspace(35, 70, nphi)
thetas = np.zeros((nspeed, nphi))
omegas = 5 * np.ones((nspeed, nphi))
scored = np.zeros((nphi, nspeed))
start = time.time()
pool = Pool()
if noise:
num_trials = 10
results = pool.map(noisy_compute_shot,
noisy_data_gen(speeds, phis, height, thetas,
omegas, num_trials))
else:
results = pool.map(compute_shot, data_gen(speeds, phis, height, thetas,
omegas))
for i, j, score in results:
scored[j, i] = score
np.savez(data_file, speeds=speeds, phis=phis, thetas=thetas,
omegas=omegas, scored=scored)
print(np.round((time.time() - start)/(nspeed*nphi),3), "secs per shot")
plot_data(data_file)
if not save:
os.remove(data_file)
def plot_data(data_file):
data = np.load(data_file)
speeds, phis, thetas, omegas, scored = [data[arr] for arr in data.files]
plt.figure()
X, Y = np.meshgrid(speeds, phis)
plt.pcolormesh(X, Y, scored)
plt.xlabel("Speeds [ft/s]")
plt.ylabel("Launch Angle [deg]")
plt.title("Free Throws at Various Launch Speeds and Angles")
plt.tight_layout()
plt.show()
data.close()
if __name__ == "__main__":
# free_throws(6)
plot_data("data.npz")
|
[
"dongjoon.wow@gmail.com"
] |
dongjoon.wow@gmail.com
|
5d0a3052244721daea9285d697ee1ef8a39f1b90
|
dd792d3b72583eb87b713049e60e281a4185a3ba
|
/models/docker/eDN/model/run_model.py
|
19f73d3bd0fd3e24ed285b2ae97a325749743daf
|
[
"MIT"
] |
permissive
|
TsotsosLab/SMILER
|
59b9c3f5b7a7d9109924d4bce6d3edb5d03222e8
|
427ce783a4477db550dd8fe064a38e3b71db346e
|
refs/heads/master
| 2023-07-22T18:39:26.495739
| 2022-11-25T21:35:07
| 2022-11-25T21:35:07
| 162,616,795
| 41
| 19
|
NOASSERTION
| 2023-07-06T21:37:59
| 2018-12-20T18:32:31
|
MATLAB
|
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
#!/usr/bin/env python
import os
import pickle
import numpy as np
from scipy import misc, ndimage
from eDNSalModel import EDNSaliencyModel
from liblinearutil import load_model
from smiler_tools.runner import run_model
os.environ['GLOG_minloglevel'] = '3' # Suppress logging.
if __name__ == "__main__":
desc_file_path = 'slmBestDescrCombi.pkl'
with open(desc_file_path) as fp:
desc = pickle.load(fp)
nFeatures = np.sum([
d['desc'][-1][0][1]['initialize']['n_filters'] for d in desc
if d != None
])
# load SVM model and whitening parameters
svm_path = 'svm-slm-cntr'
svm = load_model(svm_path)
whiten_path = 'whiten-slm-cntr'
with open(whiten_path) as fp:
whitenParams = np.asarray(
[map(float, line.split(' ')) for line in fp]).T
# assemble svm model
svmModel = {'svm': svm, 'whitenParams': whitenParams}
biasToCntr = (svm.get_nr_feature() - nFeatures) == 1
def eDNsaliency(image_path):
img = misc.imread(image_path, mode='RGB')
# compute saliency map
model = EDNSaliencyModel(desc, svmModel, biasToCntr)
salMap = model.saliency(img, normalize=False)
salMap = salMap.astype('f')
# normalize and save the saliency map to disk
normSalMap = (255.0 / (salMap.max() - salMap.min()) *
(salMap - salMap.min())).astype(np.uint8)
return normSalMap
run_model(eDNsaliency)
|
[
"tkunic@rocketmail.com"
] |
tkunic@rocketmail.com
|
0029f4576ea567e6e2b893539535522728b4bab9
|
f3c09c5059ae7f228dc07e64dbc256c290a65bcf
|
/backend/src/lectures/models.py
|
425c7925c215729fb042ba620f3760bc96c3e2cb
|
[] |
no_license
|
Rados13/Hall-of-Fame
|
a1005295d2c846fc6a7a6c61efc0abc455db0617
|
41ae774fa80311fde152a6b9e6b0675690ac7b3e
|
refs/heads/master
| 2023-01-12T20:56:31.453275
| 2021-02-04T08:27:49
| 2021-02-04T08:27:49
| 246,289,430
| 1
| 1
| null | 2023-01-05T19:38:42
| 2020-03-10T12:01:50
|
Python
|
UTF-8
|
Python
| false
| false
| 681
|
py
|
from HallOfFame.settings import AUTH_USER_MODEL
from djongo import models
from django import forms
class Lecture(models.Model):
lecture = models.ForeignKey(AUTH_USER_MODEL, models.PROTECT, blank=False, null=False)
main_lecture = models.BooleanField(default=True)
objects = models.DjongoManager()
# def __iter__(self):
# yield 'lecture_id', self.lecture
# yield 'main_lecture', self.main_lecture
# class Meta:
# abstract = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class LectureForm(forms.ModelForm):
class Meta:
model = Lecture
fields = ('lecture', 'main_lecture')
|
[
"radoslawszuma@gmail.com"
] |
radoslawszuma@gmail.com
|
e27582701ca561e08b0957f378b19cb2d02a55b7
|
4167af383bd1f8bc7807435d0f877027e21015e2
|
/OnlineShopping/design/models.py
|
e14077342e47cba1fe6d6250350887c66df0aab3
|
[] |
no_license
|
papry/shop
|
1bfa15dd6455b9d474e263661d58016da2d3e0c8
|
e009025bbbb2d5a12526f807e37bedbcc384e79d
|
refs/heads/master
| 2020-06-16T11:42:49.421777
| 2019-07-11T17:12:37
| 2019-07-11T17:12:37
| 195,559,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
from django.db import models
from django.db import models
class category(models.Model):
Category_name=models.CharField(max_length=250,default="")
class Supplier(models.Model):
Supplier_name = models.CharField(max_length=250, default="")
Address = models.CharField(max_length=250, default="")
Phone_no = models.IntegerField( default=2000)
class Product(models.Model):
Category = models.ForeignKey(category,on_delete='CASCADE')
Supplier = models.ForeignKey(Supplier, on_delete='CASCADE')
Product_name = models.CharField(max_length=250, default="")
description = models.CharField(max_length=250, default="")
Stock = models.IntegerField(default=20000)
Price = models.IntegerField( default="")
class Admin(models.Model):
Product = models.ForeignKey(Product,on_delete='CASCADE')
Admin_name = models.CharField(max_length=250, default="")
Password = models.CharField(max_length=250, default="")
class Customer(models.Model):
Product = models.ForeignKey(Product, on_delete='CASCADE')
Customer_name = models.CharField(max_length=250, default="")
Email= models.CharField(max_length=250, default="")
Password = models.CharField(max_length=250, default="")
Phone_no= models.CharField(max_length=250, default="")
class Payment(models.Model):
Customer = models.ForeignKey(Customer, on_delete='CASCADE')
Payment_type = models.CharField(max_length=250, default="")
Payment_date = models.CharField(max_length=250, default="")
Quantity = models.IntegerField(default="")
Amount = models.IntegerField(default="")
|
[
"52271203+papry@users.noreply.github.com"
] |
52271203+papry@users.noreply.github.com
|
d4d2e535b3c074389af1df2de93a66c43c2933d7
|
7eed34bb9d3054cec1135f07caf5ba3b9098b177
|
/logging/LogWithWarning.py
|
6e37221d99c35dcb38dc25da5178558b8ff34aad
|
[] |
no_license
|
Rocia/Learning-python
|
108ede736e245d427c8c09e8a9c258356d876c50
|
46985466130b62a4c8df0fa61a3e7e91bd370d48
|
refs/heads/master
| 2020-12-30T22:36:03.705676
| 2018-10-30T01:37:42
| 2018-10-30T01:37:42
| 80,645,173
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,002
|
py
|
import logging
import sys
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
if len(sys.argv) > 1:
level_name = sys.argv[1]
level = LEVELS.get(level_name, logging.NOTSET)
logging.basicConfig(level=level)
logging.debug('This is a debug message')
logging.info('This is an info message')
logging.warning('This is a warning message')
logging.error('This is an error message')
logging.critical('This is a critical error message')
'''
$ python logging_level_example.py debug
DEBUG:root:This is a debug message
INFO:root:This is an info message
WARNING:root:This is a warning message
ERROR:root:This is an error message
CRITICAL:root:This is a critical error message
$ python logging_level_example.py info
INFO:root:This is an info message
WARNING:root:This is a warning message
ERROR:root:This is an error message
CRITICAL:root:This is a critical error message
'''
|
[
"rocia.fernandes@gmail.com"
] |
rocia.fernandes@gmail.com
|
c26461e3c73ec95c419007538058e74fa9a1bb53
|
7f59e2c4e771c19378e9839406c220d3985e7efe
|
/python-daemon/marvin_python_daemon/management/engine.py
|
799c3a1fd75de86cf7a48aab538f9e000281cd6c
|
[
"Apache-2.0"
] |
permissive
|
apache/incubator-marvin
|
c6ff32d50eb01ccd84266587d79f562a9e371496
|
58fdccf2e677041a13966ddbdd96d484edf3b474
|
refs/heads/develop
| 2023-08-30T12:46:56.973102
| 2022-11-18T15:27:52
| 2022-11-18T15:27:52
| 148,087,939
| 112
| 77
|
Apache-2.0
| 2023-03-07T05:45:59
| 2018-09-10T02:27:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,874
|
py
|
#!/usr/bin/env python
# coding=utf-8
# Copyright [2020] [Apache Software Foundation]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import os
import sys
import time
import os.path
import subprocess
import multiprocessing
from ..common.profiling import profiling
from ..common.data import MarvinData
from ..common.log import get_logger
from ..common.config import Config, load_conf_from_file
logger = get_logger('management.engine')
CLAZZES = {
"acquisitor": "AcquisitorAndCleaner",
"tpreparator": "TrainingPreparator",
"trainer": "Trainer",
"evaluator": "MetricsEvaluator",
"ppreparator": "PredictionPreparator",
"predictor": "Predictor",
"feedback": "Feedback"
}
ARTIFACTS = {
"AcquisitorAndCleaner": [],
"TrainingPreparator": ["initialdataset"],
"Trainer": ["dataset"],
"MetricsEvaluator": ["dataset", "model"],
"PredictionPreparator": ["model", "metrics"],
"Predictor": ["model", "metrics"],
"Feedback": []
}
def dryrun(config, action, profiling):
# setting spark configuration directory
os.environ["SPARK_CONF_DIR"] = os.path.join(
os.environ["SPARK_HOME"], "conf")
os.environ["YARN_CONF_DIR"] = os.environ["SPARK_CONF_DIR"]
params = read_file('engine.params')
messages_file = read_file('engine.messages')
feedback_file = read_file('feedback.messages')
if action == 'all':
pipeline = ['acquisitor', 'tpreparator', 'trainer',
'evaluator', 'ppreparator', 'predictor', 'feedback']
else:
pipeline = [action]
_dryrun = MarvinDryRun(config=config, messages=[
messages_file, feedback_file])
initial_start_time = time.time()
for step in pipeline:
_dryrun.execute(clazz=CLAZZES[step],
params=params, profiling_enabled=profiling)
logger.info("Total Time : {:.2f}s".format(
time.time() - initial_start_time))
class MarvinDryRun(object):
def __init__(self, config, messages):
self.predictor_messages = messages[0]
self.feedback_messages = messages[1]
self.pmessages = []
self.package_name = config['marvin_package']
def execute(self, clazz, params, profiling_enabled=False):
self.print_start_step(clazz)
_Step = dynamic_import("{}.{}".format(self.package_name, clazz))
kwargs = generate_kwargs(self.package_name, _Step, params)
step = _Step(**kwargs)
def call_online_actions(step, msg, msg_idx):
if profiling_enabled:
with profiling(output_path=".profiling", uid=clazz) as prof:
result = step.execute(input_message=msg, params=params)
prof.disable
logger.info(
"\nProfile images created in {}\n".format(prof.image_path))
else:
result = step.execute(input_message=msg, params=params)
return result
if clazz == 'PredictionPreparator':
for idx, msg in enumerate(self.predictor_messages):
self.pmessages.append(call_online_actions(step, msg, idx))
elif clazz == 'Feedback':
for idx, msg in enumerate(self.feedback_messages):
self.pmessages.append(call_online_actions(step, msg, idx))
elif clazz == 'Predictor':
self.execute("PredictionPreparator", params)
self.pmessages = self.messages if not self.pmessages else self.pmessages
for idx, msg in enumerate(self.pmessages):
call_online_actions(step, msg, idx)
else:
if profiling_enabled:
with profiling(output_path=".profiling", uid=clazz) as prof:
step.execute(params=params)
prof.disable
logger.info(
"\nProfile images created in {}\n".format(prof.image_path))
else:
step.execute(params=params)
self.print_finish_step()
def print_finish_step(self):
logger.info("STEP TAKES {:.4f} (seconds) ".format(
(time.time() - self.start_time)))
def print_start_step(self, name):
logger.info("MARVIN DRYRUN - STEP [{}]".format(name))
self.start_time = time.time()
def dynamic_import(clazz):
components = clazz.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def read_file(filename):
fname = os.path.join("", filename)
if os.path.exists(fname):
logger.info("Engine file {} loaded!".format(filename))
with open(fname, 'r') as fp:
return json.load(fp)
else:
logger.info("Engine file {} doesn't exists...".format(filename))
return {}
def generate_kwargs(package_name, clazz, params=None, initial_dataset='initialdataset', dataset='dataset', model='model', metrics='metrics'):
kwargs = {}
kwargs["persistence_mode"] = 'local'
kwargs["default_root_path"] = os.path.join(
os.getenv('MARVIN_DATA_PATH'), '.artifacts')
kwargs["is_remote_calling"] = True
_artifact_folder = package_name.replace(
'marvin_', '').replace('_engine', '')
_artifacts_to_load = ARTIFACTS[clazz.__name__]
logger.debug("clazz: {0}, artifacts to load: {1}".format(clazz, str(_artifacts_to_load)))
if params:
kwargs["params"] = params
if dataset in _artifacts_to_load:
kwargs["dataset"] = clazz.retrieve_obj(os.path.join(kwargs["default_root_path"],
_artifact_folder, dataset))
if initial_dataset in _artifacts_to_load:
kwargs["initial_dataset"] = clazz.retrieve_obj(os.path.join(kwargs["default_root_path"],
_artifact_folder, initial_dataset))
if model in _artifacts_to_load:
kwargs["model"] = clazz.retrieve_obj(os.path.join(kwargs["default_root_path"],
_artifact_folder, model))
if metrics in _artifacts_to_load:
kwargs["metrics"] = clazz.retrieve_obj(os.path.join(kwargs["default_root_path"],
_artifact_folder, metrics))
return kwargs
class MarvinEngineServer(object):
@classmethod
def create(self, config, action, port, workers, rpc_workers, params, pipeline):
package_name = config['marvin_package']
def create_object(act):
clazz = CLAZZES[act]
_Action = dynamic_import("{}.{}".format(package_name, clazz))
kwargs = generate_kwargs(package_name, _Action, params)
return _Action(**kwargs)
root_obj = create_object(action)
previous_object = root_obj
if pipeline:
for step in list(reversed(pipeline)):
previous_object._previous_step = create_object(step)
previous_object = previous_object._previous_step
server = root_obj._prepare_remote_server(
port=port, workers=workers, rpc_workers=rpc_workers)
logger.info(
"Starting GRPC server [{}] for {} Action".format(port, action))
server.start()
return server
def engine_server(config, action, max_workers, max_rpc_workers):
logger.info("Starting server ...")
# setting spark configuration directory
os.environ["SPARK_CONF_DIR"] = os.path.join(
os.environ["SPARK_HOME"], "conf")
os.environ["YARN_CONF_DIR"] = os.environ["SPARK_CONF_DIR"]
params = read_file('engine.params')
metadata = read_file('engine.metadata')
default_actions = {action['name']
: action for action in metadata['actions']}
if action == 'all':
action = default_actions
else:
action = {action: default_actions[action]}
servers = []
for action_name in action.keys():
# initializing server configuration
engine_server = MarvinEngineServer.create(
config=config,
action=action_name,
port=action[action_name]["port"],
workers=max_workers,
rpc_workers=max_rpc_workers,
params=params,
pipeline=action[action_name]["pipeline"]
)
servers.append(engine_server)
return servers
|
[
"cardosolucas61.lcs@gmail.com"
] |
cardosolucas61.lcs@gmail.com
|
a1c9c5f9f613b30cfd954f6e409d7d177b5624dc
|
fb9430533beaac089c86643978aba2b9a3520940
|
/api/scrapy/migrations/0004_auto_20190620_2004.py
|
5865e4ae284344a713011aec7ca67c167bafa19e
|
[] |
no_license
|
danielaguiladev/sistemasDistribuidos
|
3eaf0209192ba2c7e5b4f7afac596dd51d81408b
|
b5bd2d285f74f263a18ca764ed1afbdc54e77474
|
refs/heads/master
| 2022-12-10T10:52:26.103099
| 2019-06-23T20:47:16
| 2019-06-23T20:47:16
| 188,890,763
| 0
| 0
| null | 2022-09-23T22:24:32
| 2019-05-27T18:21:49
|
Python
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
# Generated by Django 2.1.9 on 2019-06-20 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrapy', '0003_pagina_rank'),
]
operations = [
migrations.AlterField(
model_name='pagina',
name='titulo',
field=models.CharField(max_length=500),
),
]
|
[
"sandro.oliveira@hotmart.com"
] |
sandro.oliveira@hotmart.com
|
63458ff3643479aa3802996a82f4d552211d7544
|
5d0e76e3c741adc120ce753bacda1e723550f7ac
|
/500. Keyboard Row.py
|
f6a10f1aceaf75210657ff71c086096061726f32
|
[] |
no_license
|
GoldF15h/LeetCode
|
d8d9d5dedca3cce59f068b94e2edf986424efdbf
|
56fcbede20e12473eaf09c9d170c86fdfefe7f87
|
refs/heads/main
| 2023-08-25T12:31:08.436640
| 2021-10-20T04:36:23
| 2021-10-20T04:36:23
| 392,336,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
def sol (words) :
rows = ['qwertyuiop','asdfghjkl','zxcvbnm']
op = []
for i in words :
curWord = i.lower()
for curRow in rows :
# print(curWord,curRow)
isAns = True
for curChr in curWord :
# print(curChr,end= ' ')
if curChr not in curRow :
isAns = False
break
# print()
if isAns :
op.append(i)
return op
if __name__ == "__main__" :
l = list( x.strip('"') for x in input().strip('[]').split(',') )
print(sol(l))
|
[
"todsapon.singsunjit@gmail.com"
] |
todsapon.singsunjit@gmail.com
|
c5520f29f722d2f60680f169174492076f580824
|
898eb5f4d2511901e4a8db27ef2940f92033f065
|
/1/mass_sanity.py
|
90a51acf25bce15d5911e1750ce01c33ea4fb1f8
|
[
"MIT"
] |
permissive
|
Migelo/mpa_garching
|
b4e15a824be1a13c247848e0d73439ee2d599470
|
86d0eacb7ae4bbf0f382002d94610f799dd73b62
|
refs/heads/master
| 2023-07-25T15:39:34.985620
| 2023-07-18T14:14:50
| 2023-07-18T14:14:50
| 95,018,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pygad as pg
from scipy import stats
import glob
from multiprocessing import Pool
import utils
s, h, g = pg.prepare_zoom('/ptmp/mpa/naab/REFINED/M1196/SF_X/4x-2phase/out/snap_M1196_4x_470', gas_trace='/u/mihac/data/M1196/4x-2phase/gastrace_disc', star_form=None)
s1, h1, g1 = pg.prepare_zoom('/ptmp/mpa/naab/REFINED/M1196/SF_X/4x-2phase/out/snap_M1196_4x_070', gas_trace=None, star_form=None)
R200_frac, Tcrit, rhocrit = [.15, '2e4 K', '1e-2 u/cm**3']
R200, M200 = pg.analysis.virial_info(s1)
s1_ism = s1.gas[pg.BallMask(R200_frac*R200) & \
pg.ExprMask('(temp < "%s") & (rho > "%s")' % (Tcrit,rhocrit)) ]
s1_mass = s1_ism['mass'].sum()
s_ism = s.gas[pg.BallMask(R200_frac*R200) & \
pg.ExprMask('(temp < "%s") & (rho > "%s")' % (Tcrit,rhocrit)) ]
s_mass = s_ism['mass'].sum()
print (g.stars['mass'].sum() - g1.stars['mass'].sum()) / (s.gas['mass_at_infall'].sum() - s.gas['mass_at_ejection'].sum())
|
[
"miha@filetki.si"
] |
miha@filetki.si
|
96e37625042374b17555deb0b7701d1fb33ae61a
|
8dc84558f0058d90dfc4955e905dab1b22d12c08
|
/third_party/android_sdk/public/platform-tools/systrace/catapult/common/py_utils/py_utils/refactor/__init__.py
|
9ba7aafe94dcee9e282650b09f8042e0de2c7d04
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
meniossin/src
|
42a95cc6c4a9c71d43d62bc4311224ca1fd61e03
|
44f73f7e76119e5ab415d4593ac66485e65d700a
|
refs/heads/master
| 2022-12-16T20:17:03.747113
| 2020-09-03T10:43:12
| 2020-09-03T10:43:12
| 263,710,168
| 1
| 0
|
BSD-3-Clause
| 2020-05-13T18:20:09
| 2020-05-13T18:20:08
| null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
../../../../../../../../../../../.cipd/pkgs/82/_current/platform-tools/systrace/catapult/common/py_utils/py_utils/refactor/__init__.py
|
[
"arnaud@geometry.ee"
] |
arnaud@geometry.ee
|
fa37911eb19ad739ecb1ecc4d559184d9b828cb4
|
3de5038f4998c493a1f5cc058fb359c36c019936
|
/Test/SQL.py
|
2f11a97d95b751fc16a5de9c3225ce5f2dd6d58d
|
[] |
no_license
|
vivek-gour/MyProjects
|
880158d7c5ca040c41e5e565f082b64b66d4dbba
|
e3106b09cf9b27672c2ca212c4ce014beac4585b
|
refs/heads/master
| 2021-05-29T16:57:33.453304
| 2015-09-07T06:25:03
| 2015-09-07T06:25:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
__author__ = 'vivek.gour'
import pymssql
conn = pymssql.connect(server='175.41.138.226:3784', user='CEVA_READER', password='fe3A7deF4889a', database='live_billingrating', as_dict=True)
cursor = conn.cursor()
cursor.execute("Select top 10 * from invoicebilling")
row = cursor.fetchone()
while row:
print "Invoice = %s, Amount = %s" % (row['InvoiceNumber'],row['BillingAmount'])
row = cursor.fetchone()
conn.close()
|
[
"vivek.gour@searce.com"
] |
vivek.gour@searce.com
|
5a39ecc8039d63fdab59f78bc1b8d36960daf619
|
e066cb735cb7bcfa539999b8451f9015659a4926
|
/Mobile Embedded/Source_code/camera.py
|
0d7eea1b421973760bb501e8e301cdad2035614a
|
[] |
no_license
|
DazhiLi-hub/Dazhi-Project
|
4eef060f1d08278c7fec74dc2b9f773fbee75308
|
68f191832384dfb8e7b5b2a23d186c1d5956dc28
|
refs/heads/master
| 2023-07-19T08:41:02.785586
| 2021-09-08T07:46:39
| 2021-09-08T07:46:39
| 240,947,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
from Tkinter import *
import time
from picamera import PiCamera
from picamera.array import PiRGBArray
import cv2
from PIL import Image,ImageTk
def camera_setup():
camera=PiCamera()
camera.resolution=(640,480)
camera.framerate=30
rawCapture=PiRGBArray(camera,size=(640,480))
time.sleep(0.1)
'''
for frame in camera.capture_continuous(rawCapture,format="bgr",use_video_port=True):
image=frame.array
cv2.imshow("Frame",image)
key=cv2.waitKey(1) & 0xFFF
rawCapture.truncate(0)
if key == ord("q"):
break
'''
def video_loop():
success, image = frame.array
if success:
cv2.waitKey(1) & 0xFFF
#cv2image = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
current_image = Image.fromarray(image)
imgtk = ImageTk.PhotoImage(image=current_image)
panel.imgtk = imgtk
panel.config(image=imgtk)
root.after(1, video_loop)
camera = cv2.VideoCapture(0)
root = Tk()
root.title("opencv + tkinter")
#root.protocol('WM_DELETE_WINDOW', detector)
panel = Label(root) # initialize image panel
panel.pack(padx=10, pady=10)
root.config(cursor="arrow")
video_loop()
root.mainloop()
camera.release()
cv2.destroyAllWindows()
|
[
"61103944+DazhiLi-hub@users.noreply.github.com"
] |
61103944+DazhiLi-hub@users.noreply.github.com
|
b6ed7de6e924c9d26f8715f9c246bbccdd0fbd09
|
6ff85b80c6fe1b3ad5416a304b93551a5e80de10
|
/Python/Algorithm/LoopExit.py
|
0a1375af2ad8e2890c7bd7c2d0fc4444e93de2e4
|
[
"MIT"
] |
permissive
|
maniero/SOpt
|
c600cc2333e0a47ce013be3516bbb8080502ff2a
|
5d17e1a9cbf115eaea6d30af2079d0c92ffff7a3
|
refs/heads/master
| 2023-08-10T16:48:46.058739
| 2023-08-10T13:42:17
| 2023-08-10T13:42:17
| 78,631,930
| 1,002
| 136
|
MIT
| 2023-01-28T12:10:01
| 2017-01-11T11:19:24
|
C#
|
UTF-8
|
Python
| false
| false
| 190
|
py
|
cont = 0
n = 0
total = 0
while True:
n = int(input("Digite 999 para parar"))
if n == 999: break
cont += 1
total += n
print(total)
#https://pt.stackoverflow.com/q/350241/101
|
[
"noreply@github.com"
] |
maniero.noreply@github.com
|
e839214c1b0de2c25b6aa79840b0d97e371c5ea9
|
765831c8fd7599ec55e445a29effef4520e082e2
|
/store/migrations/0001_initial.py
|
46626fddd80734d5750e9ab81980a9b32f9ed12d
|
[] |
no_license
|
naborit/coviessentials
|
4f0b2e850febe2fcee5d8eece0eb19c5b0a41301
|
98bbd0de26d4840bdbe44f462b2f614965029d82
|
refs/heads/master
| 2023-06-18T22:49:05.161911
| 2021-07-24T17:38:47
| 2021-07-24T17:38:47
| 389,159,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
# Generated by Django 3.2.4 on 2021-06-20 09:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('price', models.IntegerField(default=0)),
('description', models.CharField(default=' ', max_length=200)),
('image', models.ImageField(upload_to='products/')),
],
),
]
|
[
"naboritdutta007@gmail.com"
] |
naboritdutta007@gmail.com
|
a60d2141b736e4debbe112d48f950d21a12ece63
|
011b1f69d6d4b6cd6a837da9b35d85f09fcc3b9c
|
/build/catkin_generated/order_packages.py
|
8cb9af5fb6790b1365684594a35668d4dccba209
|
[] |
no_license
|
Young-Geo/Car
|
53b6494aee3959588aa0222e27d9abbd40889d81
|
2cdfc8b5c92a9767a96980d209af92f3f877071e
|
refs/heads/master
| 2021-09-04T00:27:46.027369
| 2018-01-13T08:38:13
| 2018-01-13T08:38:13
| 108,971,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
# generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/pi/Car/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/pi/Car/devel;/opt/ros/indigo".split(';') if "/home/pi/Car/devel;/opt/ros/indigo" != "" else []
|
[
"anxan524@126.com"
] |
anxan524@126.com
|
9593d313764adf2c2a0ed60d41f1b809b0f8cd12
|
7b03ef2c0ee7aefbb6d8243340e0b2f12e3f6126
|
/ex13.py
|
4e3adc4375f7282fe69ba2144bdc519a52475f0d
|
[] |
no_license
|
mrama030/Learning_Python_Exercises
|
d6211bf2c38b11f490c7e361dbaa376771fbad72
|
e95653db7d269191a5763ed50105303a77dfab32
|
refs/heads/master
| 2020-04-21T14:19:10.366350
| 2019-02-08T16:06:49
| 2019-02-08T16:06:49
| 169,630,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
from sys import argv
script, first, second, third = argv
print "The script is called: ", script
print "The first variable is: ", first
print "The second variable is: ", second
print "The third variable is: ", third
# Call script with 3 parameters:
# python ex13.py stuff things orange
|
[
"mrama030@uottawa.ca"
] |
mrama030@uottawa.ca
|
4ddc2772953e6d92e6c4584b63b9aa0456c129f0
|
1755cc4bc27a0b75165e8d643d91cc9b45a17aef
|
/ex1/ex1.py
|
25dace0ad18049e48578dc938916b176d9004c6a
|
[] |
no_license
|
andrealmar/learn-python-the-hard-way
|
dcd2635e430474ab20dd79501dc458e649eb29b8
|
85d76c3365ababab0000169f2bfab7fcf7a344a6
|
refs/heads/master
| 2021-01-13T12:07:28.023870
| 2017-02-10T19:31:04
| 2017-02-10T19:31:04
| 78,069,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
# -*- coding: utf-8 -*-
print "Hello World"
print "Hello Again"
print "I like typing this"
print "This is fun"
print 'Yay! Printing'
print "I'd much better rather you 'not'."
print 'i "said" do not touch this.'
print "testando utf 8 ççççc'áááééé"
|
[
"andre@y7mail.com"
] |
andre@y7mail.com
|
0f48852b0884b2321c31d1598a0b4376351ddbcf
|
e483b0515cca39f4ddac19645f03fc1695d1939f
|
/google/ads/google_ads/v1/proto/services/customer_client_link_service_pb2.py
|
e3d64431e4f6d39ec6700f77f73964e16f721547
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
BrunoWMello/google-ads-python
|
0af63d2ca273eee96efd8a33252d27112c049442
|
9b074a037d10f0c1208a00d5d41a8e5e25405f28
|
refs/heads/master
| 2020-05-27T04:37:47.669144
| 2019-05-24T17:07:31
| 2019-05-24T17:07:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 16,567
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/services/customer_client_link_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v1.proto.resources import customer_client_link_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_customer__client__link__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/services/customer_client_link_service.proto',
package='google.ads.googleads.v1.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v1.servicesB\036CustomerClientLinkServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V1.Services\312\002 Google\\Ads\\GoogleAds\\V1\\Services\352\002$Google::Ads::GoogleAds::V1::Services'),
serialized_pb=_b('\nIgoogle/ads/googleads_v1/proto/services/customer_client_link_service.proto\x12 google.ads.googleads.v1.services\x1a\x42google/ads/googleads_v1/proto/resources/customer_client_link.proto\x1a\x1cgoogle/api/annotations.proto\x1a google/protobuf/field_mask.proto\x1a\x1egoogle/protobuf/wrappers.proto\"5\n\x1cGetCustomerClientLinkRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\"\x88\x01\n\x1fMutateCustomerClientLinkRequest\x12\x13\n\x0b\x63ustomer_id\x18\x01 \x01(\t\x12P\n\toperation\x18\x02 \x01(\x0b\x32=.google.ads.googleads.v1.services.CustomerClientLinkOperation\"\xed\x01\n\x1b\x43ustomerClientLinkOperation\x12/\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12G\n\x06\x63reate\x18\x01 \x01(\x0b\x32\x35.google.ads.googleads.v1.resources.CustomerClientLinkH\x00\x12G\n\x06update\x18\x02 \x01(\x0b\x32\x35.google.ads.googleads.v1.resources.CustomerClientLinkH\x00\x42\x0b\n\toperation\"t\n MutateCustomerClientLinkResponse\x12P\n\x06result\x18\x01 \x01(\x0b\x32@.google.ads.googleads.v1.services.MutateCustomerClientLinkResult\"7\n\x1eMutateCustomerClientLinkResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xd4\x03\n\x19\x43ustomerClientLinkService\x12\xcd\x01\n\x15GetCustomerClientLink\x12>.google.ads.googleads.v1.services.GetCustomerClientLinkRequest\x1a\x35.google.ads.googleads.v1.resources.CustomerClientLink\"=\x82\xd3\xe4\x93\x02\x37\x12\x35/v1/{resource_name=customers/*/customerClientLinks/*}\x12\xe6\x01\n\x18MutateCustomerClientLink\x12\x41.google.ads.googleads.v1.services.MutateCustomerClientLinkRequest\x1a\x42.google.ads.googleads.v1.services.MutateCustomerClientLinkResponse\"C\x82\xd3\xe4\x93\x02=\"8/v1/customers/{customer_id=*}/customerClientLinks:mutate:\x01*B\x85\x02\n$com.google.ads.googleads.v1.servicesB\x1e\x43ustomerClientLinkServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V1.Services\xca\x02 Google\\Ads\\GoogleAds\\V1\\Services\xea\x02$Google::Ads::GoogleAds::V1::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_customer__client__link__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,])
_GETCUSTOMERCLIENTLINKREQUEST = _descriptor.Descriptor(
name='GetCustomerClientLinkRequest',
full_name='google.ads.googleads.v1.services.GetCustomerClientLinkRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v1.services.GetCustomerClientLinkRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=275,
serialized_end=328,
)
_MUTATECUSTOMERCLIENTLINKREQUEST = _descriptor.Descriptor(
name='MutateCustomerClientLinkRequest',
full_name='google.ads.googleads.v1.services.MutateCustomerClientLinkRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v1.services.MutateCustomerClientLinkRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operation', full_name='google.ads.googleads.v1.services.MutateCustomerClientLinkRequest.operation', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=331,
serialized_end=467,
)
_CUSTOMERCLIENTLINKOPERATION = _descriptor.Descriptor(
name='CustomerClientLinkOperation',
full_name='google.ads.googleads.v1.services.CustomerClientLinkOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='update_mask', full_name='google.ads.googleads.v1.services.CustomerClientLinkOperation.update_mask', index=0,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='create', full_name='google.ads.googleads.v1.services.CustomerClientLinkOperation.create', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update', full_name='google.ads.googleads.v1.services.CustomerClientLinkOperation.update', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.ads.googleads.v1.services.CustomerClientLinkOperation.operation',
index=0, containing_type=None, fields=[]),
],
serialized_start=470,
serialized_end=707,
)
_MUTATECUSTOMERCLIENTLINKRESPONSE = _descriptor.Descriptor(
name='MutateCustomerClientLinkResponse',
full_name='google.ads.googleads.v1.services.MutateCustomerClientLinkResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='google.ads.googleads.v1.services.MutateCustomerClientLinkResponse.result', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=709,
serialized_end=825,
)
_MUTATECUSTOMERCLIENTLINKRESULT = _descriptor.Descriptor(
name='MutateCustomerClientLinkResult',
full_name='google.ads.googleads.v1.services.MutateCustomerClientLinkResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v1.services.MutateCustomerClientLinkResult.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=827,
serialized_end=882,
)
_MUTATECUSTOMERCLIENTLINKREQUEST.fields_by_name['operation'].message_type = _CUSTOMERCLIENTLINKOPERATION
_CUSTOMERCLIENTLINKOPERATION.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_CUSTOMERCLIENTLINKOPERATION.fields_by_name['create'].message_type = google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_customer__client__link__pb2._CUSTOMERCLIENTLINK
_CUSTOMERCLIENTLINKOPERATION.fields_by_name['update'].message_type = google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_customer__client__link__pb2._CUSTOMERCLIENTLINK
_CUSTOMERCLIENTLINKOPERATION.oneofs_by_name['operation'].fields.append(
_CUSTOMERCLIENTLINKOPERATION.fields_by_name['create'])
_CUSTOMERCLIENTLINKOPERATION.fields_by_name['create'].containing_oneof = _CUSTOMERCLIENTLINKOPERATION.oneofs_by_name['operation']
_CUSTOMERCLIENTLINKOPERATION.oneofs_by_name['operation'].fields.append(
_CUSTOMERCLIENTLINKOPERATION.fields_by_name['update'])
_CUSTOMERCLIENTLINKOPERATION.fields_by_name['update'].containing_oneof = _CUSTOMERCLIENTLINKOPERATION.oneofs_by_name['operation']
_MUTATECUSTOMERCLIENTLINKRESPONSE.fields_by_name['result'].message_type = _MUTATECUSTOMERCLIENTLINKRESULT
DESCRIPTOR.message_types_by_name['GetCustomerClientLinkRequest'] = _GETCUSTOMERCLIENTLINKREQUEST
DESCRIPTOR.message_types_by_name['MutateCustomerClientLinkRequest'] = _MUTATECUSTOMERCLIENTLINKREQUEST
DESCRIPTOR.message_types_by_name['CustomerClientLinkOperation'] = _CUSTOMERCLIENTLINKOPERATION
DESCRIPTOR.message_types_by_name['MutateCustomerClientLinkResponse'] = _MUTATECUSTOMERCLIENTLINKRESPONSE
DESCRIPTOR.message_types_by_name['MutateCustomerClientLinkResult'] = _MUTATECUSTOMERCLIENTLINKRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetCustomerClientLinkRequest = _reflection.GeneratedProtocolMessageType('GetCustomerClientLinkRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCUSTOMERCLIENTLINKREQUEST,
__module__ = 'google.ads.googleads_v1.proto.services.customer_client_link_service_pb2'
,
__doc__ = """Request message for
[CustomerClientLinkService.GetCustomerClientLink][google.ads.googleads.v1.services.CustomerClientLinkService.GetCustomerClientLink].
Attributes:
resource_name:
The resource name of the customer client link to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.GetCustomerClientLinkRequest)
))
_sym_db.RegisterMessage(GetCustomerClientLinkRequest)
MutateCustomerClientLinkRequest = _reflection.GeneratedProtocolMessageType('MutateCustomerClientLinkRequest', (_message.Message,), dict(
DESCRIPTOR = _MUTATECUSTOMERCLIENTLINKREQUEST,
__module__ = 'google.ads.googleads_v1.proto.services.customer_client_link_service_pb2'
,
__doc__ = """Request message for
[CustomerClientLinkService.MutateCustomerClientLink][google.ads.googleads.v1.services.CustomerClientLinkService.MutateCustomerClientLink].
Attributes:
customer_id:
The ID of the customer whose customer link are being modified.
operation:
The operation to perform on the individual CustomerClientLink.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.MutateCustomerClientLinkRequest)
))
_sym_db.RegisterMessage(MutateCustomerClientLinkRequest)
CustomerClientLinkOperation = _reflection.GeneratedProtocolMessageType('CustomerClientLinkOperation', (_message.Message,), dict(
DESCRIPTOR = _CUSTOMERCLIENTLINKOPERATION,
__module__ = 'google.ads.googleads_v1.proto.services.customer_client_link_service_pb2'
,
__doc__ = """A single operation (create, update) on a CustomerClientLink.
Attributes:
update_mask:
FieldMask that determines which resource fields are modified
in an update.
operation:
The mutate operation.
create:
Create operation: No resource name is expected for the new
link.
update:
Update operation: The link is expected to have a valid
resource name.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.CustomerClientLinkOperation)
))
_sym_db.RegisterMessage(CustomerClientLinkOperation)
MutateCustomerClientLinkResponse = _reflection.GeneratedProtocolMessageType('MutateCustomerClientLinkResponse', (_message.Message,), dict(
DESCRIPTOR = _MUTATECUSTOMERCLIENTLINKRESPONSE,
__module__ = 'google.ads.googleads_v1.proto.services.customer_client_link_service_pb2'
,
__doc__ = """Response message for a CustomerClientLink mutate.
Attributes:
result:
A result that identifies the resource affected by the mutate
request.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.MutateCustomerClientLinkResponse)
))
_sym_db.RegisterMessage(MutateCustomerClientLinkResponse)
MutateCustomerClientLinkResult = _reflection.GeneratedProtocolMessageType('MutateCustomerClientLinkResult', (_message.Message,), dict(
DESCRIPTOR = _MUTATECUSTOMERCLIENTLINKRESULT,
__module__ = 'google.ads.googleads_v1.proto.services.customer_client_link_service_pb2'
,
__doc__ = """The result for a single customer client link mutate.
Attributes:
resource_name:
Returned for successful operations.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.MutateCustomerClientLinkResult)
))
_sym_db.RegisterMessage(MutateCustomerClientLinkResult)
DESCRIPTOR._options = None
_CUSTOMERCLIENTLINKSERVICE = _descriptor.ServiceDescriptor(
name='CustomerClientLinkService',
full_name='google.ads.googleads.v1.services.CustomerClientLinkService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=885,
serialized_end=1353,
methods=[
_descriptor.MethodDescriptor(
name='GetCustomerClientLink',
full_name='google.ads.googleads.v1.services.CustomerClientLinkService.GetCustomerClientLink',
index=0,
containing_service=None,
input_type=_GETCUSTOMERCLIENTLINKREQUEST,
output_type=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_customer__client__link__pb2._CUSTOMERCLIENTLINK,
serialized_options=_b('\202\323\344\223\0027\0225/v1/{resource_name=customers/*/customerClientLinks/*}'),
),
_descriptor.MethodDescriptor(
name='MutateCustomerClientLink',
full_name='google.ads.googleads.v1.services.CustomerClientLinkService.MutateCustomerClientLink',
index=1,
containing_service=None,
input_type=_MUTATECUSTOMERCLIENTLINKREQUEST,
output_type=_MUTATECUSTOMERCLIENTLINKRESPONSE,
serialized_options=_b('\202\323\344\223\002=\"8/v1/customers/{customer_id=*}/customerClientLinks:mutate:\001*'),
),
])
_sym_db.RegisterServiceDescriptor(_CUSTOMERCLIENTLINKSERVICE)
DESCRIPTOR.services_by_name['CustomerClientLinkService'] = _CUSTOMERCLIENTLINKSERVICE
# @@protoc_insertion_point(module_scope)
|
[
"noreply@github.com"
] |
BrunoWMello.noreply@github.com
|
a51eb96b25d9042539d8720c01a2f9e03183ed1d
|
3b89e48ba2a7288026d13cea7aaf897f9e1ab22f
|
/youtube/index/views.py
|
4867b08379eaf9e6b6e9892e7c4e592acc2db39e
|
[] |
no_license
|
b11901/Youtube_Notes
|
50aeb0a5183e76405eac0037994978ae6293d4e4
|
ef78c87e8c71273a048ae29ea5d47719f5870391
|
refs/heads/master
| 2022-04-23T12:09:20.950145
| 2020-04-15T08:28:17
| 2020-04-15T08:28:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,015
|
py
|
from django.shortcuts import render,HttpResponseRedirect,redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from index.models import Notes,Liked
# Create your views here.
def index(request):
return render(request,'index/index.html')
def search(request):
from . import search_yt
#list for thumbnail src
thumb = []
link,description,title,thumbnail_link = search_yt.search_youtube(request.POST['query'])
#Preparing the thumbnail src
for thumbnail in thumbnail_link:
thumb.append("http://img.youtube.com/vi/"+thumbnail[9:]+"/hqdefault.jpg")
#Preparing the base link
base_link = []
for l in link:
base_link.append(l[28:])
zip_elem = zip(title,description,link,thumb,base_link)
context = {
'query' : request.POST['query'],
'zip_elem' : zip_elem,
'range' : range(len(title)),
}
return render(request,'index/search.html',context)
#Video Template
def video(request,link):
base_link = 'https://www.youtube.com/embed/'
costum_link = 'http://127.0.0.1:8000/video/' + link
link = base_link + link
context = {
'link' : link,
}
if(request.user):
notes = Notes.objects.filter(user=request.user,url=costum_link)
print(costum_link)
context.update(
{
"note" : notes
}
)
return render(request,'index/video.html' ,context)
@login_required
def saveNote(request):
if request.method == 'POST':
note = request.POST['note'],
video_url = request.META.get('HTTP_REFERER')
#print(note)
#print(request.user.username)
note = Notes(user=request.user, note=note, url=video_url)
note.save()
print(request.path_info)
return HttpResponseRedirect(video_url)
def likeVideo(request,link):
like = Like(user=request.user,url=link)
like.save()
def test(request):
return render(request, 'index/test.html',{})
|
[
"noreply@github.com"
] |
b11901.noreply@github.com
|
b30402b9452fc6e2688da5103fc27f0989c3529a
|
43575c1324dc0760958a110d7f056bce88422a03
|
/listing/Removing a node from a linked list using a tail reference.py
|
68fcf477889f50bd34e9026ec5c371e1463fe53e
|
[] |
no_license
|
nicolas4d/Data-Structures-and-Algorithms-Using-Python
|
1ffd74d26f09de2057bdc53998a56e56ed77c1de
|
a879ce6fd4033867783ee487d57d459b029eb5f8
|
refs/heads/master
| 2020-09-24T12:48:30.726766
| 2019-12-31T03:15:44
| 2019-12-31T03:15:44
| 225,761,970
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# Given the head and tail references, removes a target from a linked list.
predNode = None
curNode = head
while curNode is not None and curNode.data != target :
predNode = curNode
curNode = curNode.next
if curNode is not None :
if curNode is head :
head = curNode.next
else :
predNode.next = curNode.next
if curNode is tail :
tail = predNode
|
[
"nicolas4d@foxmail.com"
] |
nicolas4d@foxmail.com
|
7b22e507c17e21f41fde1bc26f2020162a415d67
|
14f880edf737b9c0e4bdc23de71d23ad5d5650c4
|
/Kalman_Filter.py
|
281eecef177eefbe12dfea8153b17eca74942eaf
|
[] |
no_license
|
moralesarias94/AIRobotics
|
ce686b853a63b1e2cf694eba3e395799d546fed2
|
89777245ce12112f7ff608a17b62a3d92c8de3c7
|
refs/heads/master
| 2020-12-13T07:08:03.435625
| 2017-03-09T18:21:39
| 2017-03-09T18:21:39
| 83,629,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,373
|
py
|
# Write a function 'kalman_filter' that implements a multi-
# dimensional Kalman Filter for the example given
from math import *
class matrix:
# implements basic operations of a matrix class
def __init__(self, value):
self.value = value
self.dimx = len(value)
self.dimy = len(value[0])
if value == [[]]:
self.dimx = 0
def zero(self, dimx, dimy):
# check if valid dimensions
if dimx < 1 or dimy < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dimx
self.dimy = dimy
self.value = [[0 for row in range(dimy)] for col in range(dimx)]
def identity(self, dim):
# check if valid dimension
if dim < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dim
self.dimy = dim
self.value = [[0 for row in range(dim)] for col in range(dim)]
for i in range(dim):
self.value[i][i] = 1
def show(self):
for i in range(self.dimx):
print self.value[i]
print ' '
def __add__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimy != other.dimy:
raise ValueError, "Matrices must be of equal dimensions to add"
else:
# add if correct dimensions
res = matrix([[]])
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] + other.value[i][j]
return res
def __sub__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimy != other.dimy:
raise ValueError, "Matrices must be of equal dimensions to subtract"
else:
# subtract if correct dimensions
res = matrix([[]])
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] - other.value[i][j]
return res
def __mul__(self, other):
# check if correct dimensions
if self.dimy != other.dimx:
raise ValueError, "Matrices must be m*n and n*p to multiply"
else:
# subtract if correct dimensions
res = matrix([[]])
res.zero(self.dimx, other.dimy)
for i in range(self.dimx):
for j in range(other.dimy):
for k in range(self.dimy):
res.value[i][j] += self.value[i][k] * other.value[k][j]
return res
def transpose(self):
# compute transpose
res = matrix([[]])
res.zero(self.dimy, self.dimx)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[j][i] = self.value[i][j]
return res
# Thanks to Ernesto P. Adorio for use of Cholesky and CholeskyInverse functions
def Cholesky(self, ztol=1.0e-5):
# Computes the upper triangular Cholesky factorization of
# a positive definite matrix.
res = matrix([[]])
res.zero(self.dimx, self.dimx)
for i in range(self.dimx):
S = sum([(res.value[k][i])**2 for k in range(i)])
d = self.value[i][i] - S
if abs(d) < ztol:
res.value[i][i] = 0.0
else:
if d < 0.0:
raise ValueError, "Matrix not positive-definite"
res.value[i][i] = sqrt(d)
for j in range(i+1, self.dimx):
S = sum([res.value[k][i] * res.value[k][j] for k in range(self.dimx)])
if abs(S) < ztol:
S = 0.0
res.value[i][j] = (self.value[i][j] - S)/res.value[i][i]
return res
def CholeskyInverse(self):
# Computes inverse of matrix given its Cholesky upper Triangular
# decomposition of matrix.
res = matrix([[]])
res.zero(self.dimx, self.dimx)
# Backward step for inverse.
for j in reversed(range(self.dimx)):
tjj = self.value[j][j]
S = sum([self.value[j][k]*res.value[j][k] for k in range(j+1, self.dimx)])
res.value[j][j] = 1.0/tjj**2 - S/tjj
for i in reversed(range(j)):
res.value[j][i] = res.value[i][j] = -sum([self.value[i][k]*res.value[k][j] for k in range(i+1, self.dimx)])/self.value[i][i]
return res
def inverse(self):
aux = self.Cholesky()
res = aux.CholeskyInverse()
return res
def __repr__(self):
return repr(self.value)
########################################
# Implement the filter function below
def kalman_filter(x, P):
for n in range(len(measurements)):
# measurement update
print("Measurement: ")
z = matrix([[measurements[n]]])
print("Z: ")
z.show()
y = z - H * x
print("Y: ")
y.show()
s = H * P * H.transpose() + R
print("S: ")
s.show()
k = P * H.transpose() * s.inverse()
print("K: ")
k.show()
x = x + (k * y)
print("X: ")
x.show()
P = (I - k * H) * P
print("P: ")
P.show()
# prediction
print("Prediction: ")
x = F * x + u
print("X: ")
x.show()
P = F * P * F.transpose()
print("P: ")
P.show()
return x,P
############################################
### use the code below to test your filter!
############################################
measurements = [1, 2, 3]
x = matrix([[0.], [0.]]) # initial state (location and velocity)
P = matrix([[1000., 0.], [0., 1000.]]) # initial uncertainty
u = matrix([[0.], [0.]]) # external motion
F = matrix([[1., 1.], [0, 1.]]) # next state function
H = matrix([[1., 0.]]) # measurement function
R = matrix([[1.]]) # measurement uncertainty
I = matrix([[1., 0.], [0., 1.]]) # identity matrix
print kalman_filter(x, P)
# output should be:
# x: [[3.9996664447958645], [0.9999998335552873]]
# P: [[2.3318904241194827, 0.9991676099921091], [0.9991676099921067, 0.49950058263974184]]
|
[
"moralesarias94@gmail.com"
] |
moralesarias94@gmail.com
|
ac52f423008777fa7ade108342ad39c8e7a59772
|
634f86d2e9a534566b4e120c986c079ffb246804
|
/relevate_web_app/apps/api/urls.py
|
09f6d94f65240a4222f3e69397e76e21bdac5e4b
|
[] |
no_license
|
jhock/Relevate
|
dcbb32a11c44766a55291dec1ed8b1f68fb32236
|
8296c49dfa8771b47965c24b6b49a2b6e8ace6cf
|
refs/heads/master
| 2023-01-19T14:13:56.756661
| 2019-08-12T22:19:02
| 2019-08-12T22:19:02
| 105,825,724
| 1
| 0
| null | 2023-01-13T22:30:25
| 2017-10-04T22:31:12
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 210
|
py
|
from django.conf.urls import url, include
from .views.feed_view import FeedView
urlpatterns = [
url(r'^post-feeds/$', FeedView.as_view()),
url(r'^post-feeds/(?P<feed_index>[0-9]+)/$', FeedView.as_view()),
]
|
[
"joshua.a.hock@gmail.com"
] |
joshua.a.hock@gmail.com
|
86a8f212e08c881bf5a6eab8dd112c2593d3e0c6
|
470fb2e2b02881c029ed6c50368936cc2e4826c8
|
/sorting_iterative.py
|
d6e6d0b09f460a1ce536d333d182d610c79e760d
|
[] |
no_license
|
asha952/sorting
|
0ace7346241f3affec9def9a5dffec60de285607
|
1f2d942942ebb9c73edca6f118df7112d98cacd0
|
refs/heads/main
| 2023-01-30T07:29:38.873438
| 2020-12-10T00:18:26
| 2020-12-10T00:18:26
| 308,815,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,002
|
py
|
#!python
sorted_list = [1, 2, 3, 4, 5]
unsorted_list = [4, 1, 5, 3, 2]
zeroes_list = [0, 0, 0]
def is_sorted(items):
"""Return a boolean indicating whether given items are in sorted order.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?
"""
it = 0
while it < len(items):
if items[it] > items[it + 1]:
return False
else:
return True
it += 1
# TODO: Check that all adjacent items are in order, return early if so
def bubble_sort(items):
"""Sort given items by swapping adjacent items that are out of order, and
repeating until all items are in sorted order.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
num_items = len(items)
for i in range(num_items - 1):
for j in range(0, num_items - i - 1):
if items[j] > items[j + 1]:
items[j], items[j + 1] = items[j + 1], items[j]
def selection_sort(items):
"""Sort given items by finding minimum item, swapping it with first
unsorted item, and repeating until all items are in sorted order.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Repeat until all items are in sorted order
# TODO: Find minimum item in unsorted items
# TODO: Swap it with first unsorted item
def insertion_sort(items):
"""Sort given items by taking first unsorted item, inserting it in sorted
order in front of items, and repeating until all items are in order.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Repeat until all items are in sorted order
# TODO: Take first unsorted item
# TODO: Insert it in sorted order in front of items
print(unsorted_list)
bubble_sort(unsorted_list)
print(unsorted_list)
|
[
"noreply@github.com"
] |
asha952.noreply@github.com
|
daa019c6d9b925790a732b23389802cbe1adc9a2
|
d9ae8abae85a36934f69659bea698f658578dbba
|
/worldmodel/agent/ActorCritic.py
|
fbc9d733262947c578dbfad1a5107fa459c21120
|
[] |
no_license
|
mahkons/WorldModel
|
d2dc56da36dcd7edbf45362510805526e37e0dae
|
c15bc0df4fc4e2c2ea92154943c510efaafeb858
|
refs/heads/master
| 2020-09-24T01:40:49.375248
| 2020-01-27T12:04:59
| 2020-01-27T12:04:59
| 225,631,975
| 0
| 0
| null | 2020-01-27T12:05:00
| 2019-12-03T13:53:02
|
Python
|
UTF-8
|
Python
| false
| false
| 5,375
|
py
|
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as T
import torch.nn.functional as F
from worldmodel.agent.ReplayMemory import Transition
from workflow.params import GAMMA, TAU, BATCH_SIZE, PRIORITY_DECR, MIN_MEMORY
class Actor(nn.Module):
def __init__(self, state_sz, action_sz, hidden_sz):
super(Actor, self).__init__()
self.fc1 = nn.Linear(state_sz + hidden_sz, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, action_sz)
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.xavier_uniform_(self.fc3.weight)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return torch.tanh(x)
class Critic(nn.Module):
def __init__(self, state_sz, action_sz, hidden_sz):
super(Critic, self).__init__()
self.fc1 = nn.Linear(state_sz + hidden_sz + action_sz, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 1)
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.xavier_uniform_(self.fc3.weight)
def forward(self, state, action):
x = F.relu(self.fc1(torch.cat([state, action], dim=1)))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class ControllerAC(nn.Module):
def __init__(self, state_sz, action_sz, hidden_sz, memory, actor_lr=1e-4, critic_lr=1e-4, device='cpu'):
super(ControllerAC, self).__init__()
self.state_sz = state_sz
self.action_sz = action_sz
self.hidden_sz = hidden_sz
self.memory = memory
self.device = device
self.actor = Actor(state_sz, action_sz, hidden_sz).to(device)
self.target_actor = Actor(state_sz, action_sz, hidden_sz).to(device)
self.critic = Critic(state_sz, action_sz, hidden_sz).to(device)
self.target_critic = Critic(state_sz, action_sz, hidden_sz).to(device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=actor_lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=critic_lr)
self.steps_done = 0
def select_action(self, state):
with torch.no_grad():
return self.actor(state).to(torch.device('cpu')).numpy().squeeze(0)
def hard_update(self):
self.target_actor.load_state_dict(self.actor.state_dict())
self.target_critic.load_state_dict(self.critic.state_dict())
def soft_update_net(self, local_model, target_model):
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(TAU * local_param.data + (1.0 - TAU) * target_param.data)
def soft_update(self):
self.soft_update_net(self.actor, self.target_actor)
self.soft_update_net(self.critic, self.target_critic)
def combine_errors(self, td_error, model_error):
# return td_error
return model_error
def optimize_critic(self, positions, weights):
state, action, reward, next_state, done, model_error = self.memory.get_transitions(positions)
state_action_values = self.critic(state, action)
with torch.no_grad():
noise = torch.empty(action.shape).data.normal_(0, 0.2).to(self.device) # from SafeWorld
noise = noise.clamp(-0.5, 0.5)
next_action = (self.target_actor(next_state) + noise).clamp(-1., 1.)
next_values = self.target_critic(next_state, next_action).squeeze(1)
expected_state_action_values = (next_values * GAMMA * (1 - done)) + reward
td_error = (expected_state_action_values.unsqueeze(1) - state_action_values).squeeze(1) # TODO clamp add?
self.memory.update(positions, self.combine_errors(torch.abs(td_error), torch.abs(model_error)))
loss = F.smooth_l1_loss(state_action_values.squeeze() * weights, expected_state_action_values * weights)
self.critic_optimizer.zero_grad()
loss.backward()
self.critic_optimizer.step()
def optimize_actor(self, positions, weights):
state, action, reward, next_state, done, model_error = self.memory.get_transitions(positions)
predicted_action = self.actor(state)
value = self.critic(state, predicted_action) # TODO remove or not remove (1 - done)?
loss = -(value.squeeze() * weights).mean()
self.actor_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
def optimize(self):
if len(self.memory) < MIN_MEMORY:
return
positions, weights = self.memory.sample_positions(BATCH_SIZE)
weights = weights.to(self.device)
self.optimize_critic(positions, weights)
self.optimize_actor(positions, weights)
self.soft_update()
def save_model(self, path):
torch.save(self, path)
@staticmethod
def load_model(path, *args, **kwargs):
cnt = torch.load(path, map_location='cpu')
cnt.to(cnt.device)
cnt.actor_optimizer = torch.optim.Adam(cnt.actor.parameters(), lr=kwargs['actor_lr'])
cnt.critic_optimizer = torch.optim.Adam(cnt.critic.parameters(), lr=kwargs['critic_lr'])
return cnt
|
[
"mah.kons@gmail.com"
] |
mah.kons@gmail.com
|
fd58ffa80be6039c1aeff77f5eeb8a77075dacfa
|
b3f5c18efe5aed5f3daeb0991d092263ea080a44
|
/final.py
|
bb3bef880557ba6751fa182beaf464728f617646
|
[] |
no_license
|
Polestar574/class107dhruv
|
fb5b661e72c3207aa6a718404d84ac589f939dda
|
0cb906e6f280bbc15be2b81c1d052fa2b7bfa77d
|
refs/heads/main
| 2023-07-24T17:34:35.268982
| 2021-09-09T10:59:03
| 2021-09-09T10:59:03
| 404,686,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
import pandas as pd
import csv
import plotly.graph_objects as go
df=pd.read_csv("data.csv")
student_df=df.loc[df['student_id']=="TRL_987"]
print(student_df.groupby("level")["attempt"].mean())
fig=go.Figure(go.Bar(
x=student_df.groupby("level")["attempt"].mean(),
y=['Level 1','Level 2','Level 3','Level 4'],
orientation='h'
))
fig.show()
|
[
"noreply@github.com"
] |
Polestar574.noreply@github.com
|
78c8f872dc05c84e31c46f7701ce9411d0144b6f
|
b18aec8b07131e93b2618e96bc5dfd6ba8e6cd62
|
/by_isp_coverage/parsers/flynet_parser.py
|
11e3a2e2afbe9f3a3b185b82fee27d5619febf8e
|
[
"MIT"
] |
permissive
|
MrLokans/isp-coverage-map
|
f7f5e4a4fbbfc79d301b81538f33e9084aa182a8
|
9dc5157e778fcff6d8b502e44a0707f0b96a54d3
|
refs/heads/master
| 2020-05-21T20:46:38.571080
| 2016-10-21T16:30:42
| 2016-10-21T16:30:42
| 60,363,672
| 1
| 2
| null | 2016-10-21T16:27:33
| 2016-06-03T16:54:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,602
|
py
|
import time
import grequests
from by_isp_coverage.parsers.base import BaseParser
from by_isp_coverage.connection import Connection
STREET_ID_REGEX = r"this,\"(?P<_id>\d+)\""
class FlynetParser(BaseParser):
PARSER_NAME = "flynet"
PARSER_URL = "https://flynet.by"
def __init__(self, coordinate_obtainer=None, validator=None):
self.coordinate_obtainer = coordinate_obtainer
self.validator = validator
def get_all_connected_streets(self):
ltrs = "0123456789абвгдеёжзийклмнопрстуфхцчшщэюя"
streets = set()
u = self.PARSER_URL + '/connection/searcher.php'
rs = (grequests.get(u, params={"what": "str",
"limit": 1500,
"timestamp": int(time.time()),
"street": l,
"q": 1,
})
for l in ltrs)
results = grequests.map(rs)
for response in results:
streets.update(self._streets_from_api_response(response))
streets.discard('')
return streets
def _streets_from_api_response(self, resp):
text = resp.text
if not text:
return ""
streets = text.split('\n')
results = {s.split('|')[0] for s in streets}
return results
def _houses_from_api_response(self, resp):
text = resp.text
if not text:
return ""
houses = text.split('\n')
results = {h.split('|')[0] for h in houses}
return results
def _house_list_for_street(self, street):
numbers = list(range(1, 10))
house_numbers = set()
u = self.PARSER_URL + '/connection/searcher.php'
rs = (grequests.get(u, params={"what": "house",
"limit": 1500,
"timestamp": int(time.time()),
"street": street,
"q": n,
})
for n in numbers)
results = grequests.map(rs)
for response in results:
house_numbers.update(self._houses_from_api_response(response))
house_numbers.discard('')
return house_numbers
def __connections_from_street(self, street):
region = u"Минск"
city = u"Минск"
status = u"Есть подключение"
for h in self._house_list_for_street(street):
yield Connection(provider=self.PARSER_NAME, region=region,
city=city, street=street, status=status,
house=h)
def get_connections(self):
streets = self.get_all_connected_streets()
for street in streets:
connections = self.__connections_from_street(street)
if self.validator:
yield from self.validator.validate_connections(connections)
else:
yield from connections
def get_points(self):
streets = self.get_all_connected_streets()
data = [(s, self._house_list_for_street(s)) for s in streets]
return self.coordinate_obtainer.get_points(data)
if __name__ == '__main__':
from by_isp_coverage.coordinate_obtainer import CoordinateObtainer
parser = FlynetParser(CoordinateObtainer())
# points = parser.get_points()
# print(points)
# print(list(parser.get_connections()))
print(list(parser.get_points()))
|
[
"trikster1911@gmail.com"
] |
trikster1911@gmail.com
|
b045ee0d46d32f91ed11af104ded569a3ab680c8
|
9a0a39437158bb3c875deefff46b75e44f6002c4
|
/modules/site_info.py
|
765b2b40d5fe02464e79f9cf07c064029d8274fe
|
[] |
no_license
|
maryam98/Pedgene
|
fba67c213b9b7dfd26075cbe76cb8378415e11ef
|
a116dad27989f434edc8b46f6bdf5e23517ca332
|
refs/heads/main
| 2023-03-05T21:10:56.149332
| 2021-02-14T16:59:22
| 2021-02-14T16:59:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
import builtwith
def Fetch_info(domain,GREEN):
res = builtwith.builtwith("http://"+domain)
for i in res :
print(f"{GREEN} {i}",res[i])
|
[
"noreply@github.com"
] |
maryam98.noreply@github.com
|
c32069fe3308a420538cc2ddf69ade490f2b9409
|
764c34ee4728dd39f7ace474e14d1d8d5a55efd1
|
/main.py
|
2987e3d9b6dd8e10771f43abd82a41e605124013
|
[] |
no_license
|
JoshiRah/floof_analyzis
|
d2efdaf2ea440a2a25225ad7258855bab2ca83ce
|
811c71c55de967c09b6ac5c8418fcd21d40b9843
|
refs/heads/master
| 2023-03-17T07:38:16.194548
| 2021-03-07T16:45:40
| 2021-03-07T16:45:40
| 345,399,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
import requests
import matplotlib.pyplot as plt
rounds = 10
floofs = []
floofNames = []
for i in range(123):
floofNames.append('')
a = 0
b = 1
for i in range(123):
floofNames[a] = b
b += 1
a += 1
for i in range(123):
floofs.append(0)
for i in range(rounds):
response = requests.get("https://randomfox.ca/floof/")
fox = response.json()
link = fox['link']
splittedLink = link.split('=')
floofNumber = splittedLink[1]
floofs[int(floofNumber)] += 1
print('Fortschritt', i, 'von', rounds-1)
plt.bar(floofNames, floofs, label='Count of floof')
plt.legend(loc='upper left')
plt.show()
|
[
"joshua.rahmlow@gmail.com"
] |
joshua.rahmlow@gmail.com
|
286e976e365502aef139c46909861245e0c587ff
|
b29359b1e4cdc9492e11e85f46d967739f4810fd
|
/app_final.py
|
9cea417118187aaf8633341a10d0d9b561b6e02d
|
[] |
no_license
|
pavankm96/Streamlit-Model-Deployment
|
decc62f31e78f68aacda75c58d64026afccfbb12
|
73284beeabd1779fbf0d7dbd920c85cbec497d8e
|
refs/heads/main
| 2023-06-20T04:10:50.216120
| 2021-07-20T01:51:28
| 2021-07-20T01:51:28
| 387,637,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,224
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 23 14:40:02 2021
@author: Aravind
"""
import streamlit as st
from PIL import Image
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import pickle
from textblob import TextBlob
from wordcloud import WordCloud
import re
import matplotlib.pyplot as plt
import seaborn as sns
import streamlit.components.v1 as comp
import requests
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, confusion_matrix
from nltk.stem.snowball import SnowballStemmer
from pickle import load, dump
#sess = tf.Session()
#set_session(sess)
data=pd.read_csv("C:/Users/Pavan K M/Datasets/polarity_updated.csv",encoding = "utf-8")
st.markdown("<h1 style='text-align: center;'> <img src='https://placeit-assets0.s3-accelerate.amazonaws.com/custom-pages/landing-page-medical-logo-maker/Pharmacy-Logo-Maker-Red.png' alt='' width='120' height='120'</h1>", unsafe_allow_html=True)
#data=pd.read_csv('D:/Data Science/Project/ExcelR Project/streamlit/new_data.csv')
data_review=pd.DataFrame(columns=['Reviews'],data=data)
st.title = '<p style="font-family:Imprint MT Shadow; text-align:center;background-color:#1561;border-radius: 0.4rem; text-font:Bodoni MT Poster Compressed; color:Black; font-size: 60px;">Apna-MediCare</p>'
st.markdown(st.title, unsafe_allow_html=True)
#st.sidebar.title("Drug Name")
#st.text_input("Drug","Type Here")
#st.text_input("Condition","Type Here")
#st.text_input('SideEffect')
#st.text_input('Previous Reviews')
######model_lr=pickle.load(open('D:\Data Science\Project\ExcelR Project\Medicines Side Effect Analysis/logisitc.pkl','rb'))
######tfidf=pickle.load(open('D:\Data Science\Project\ExcelR Project\Medicines Side Effect Analysis/TfidfVectorizer.pkl','rb'))
#x=data['Reviews'].values.astype('U')
#y=data['Analysis']
#x=x.astype
#y=y.astype
#x_train, x_test, y_train, y_test=train_test_split(x,y, test_size=0.20, random_state=42)
#vectorizer =TfidfVectorizer()
#model=Pipeline([('tfidf', TfidfVectorizer()),
#('logistic', LogisticRegression(max_iter=500)),
#])
# Feed the training data through the pipeline
#model.fit(x_train, y_train)
#prediction_log_test=model.predict(x_test)
#accuracy_score=accuracy_score(y_test,prediction_log_test )
#def predict_model_lr(reviews):
#results =model_lr.predict([reviews])
#return results[0]
activities=["Medicine Name","Condition","Clear"]
choice = st.sidebar.selectbox("Select Your Activity", activities)
#if choice=="NONE":
def Average(lst):
try:
return sum(lst) / len(lst)
except:
pass
if choice=="Medicine Name":
#st.write("Top MostRecent Drugs")
raw_text = st.text_area("Enter the Medicine Name")
Analyzer_Choice = st.selectbox("Select the Activities", [" ","Show Related Drug Conditions"])
if st.button("Analyzer"):
if Analyzer_Choice =="Show Related Drug Conditions":
#st.success("Fetching Top Conditions")
data_top_condition=data[(data['Condition']=='Analysis') & (data['Drug']==str(raw_text))]
data_top_condition=data[data['Drug']==raw_text]
data_top_condition=data_top_condition.groupby(['Drug','Condition']).agg('mean').reset_index()
data_top_condition=data_top_condition.sort_values(by=['Condition'], ascending=False).head(5)
#data_top_condition=data_top_condition.head(5)
data_top_condition_list=data_top_condition['Condition'].tolist()
#comp.html("<b> Condition: </b>")
for i in data_top_condition_list:
st.markdown(i)
Analyzer_Choice = st.selectbox("Reviews", [" ","Show Top Reviews","Visualize the Sentiment Analysis"])
if st.button("Reviews"):
if Analyzer_Choice =="Visualize the Sentiment Analysis":
data_top_positive=data[(data['Analysis']=='Positive') & (data['Drug']==str(raw_text))]
data_top_positive=data_top_positive
data_top_positive_list=data_top_positive['Satisfaction_Real'].tolist()
#st.markdown(Average(data_top_positive_list))
data_top_negative=data[(data['Analysis']=='Negative') & (data['Drug']==str(raw_text))]
data_top_negative=data_top_negative
data_top_negative_list=data_top_negative['Satisfaction_Real'].tolist()
#st.markdown(Average(data_top_negative_list))
data_top_neutral=data[(data['Analysis']=='Neutral') & (data['Drug']==str(raw_text))]
data_top_neutral=data_top_neutral
data_top_neutral_list=data_top_neutral['Satisfaction_Real'].tolist()
#st.markdown(Average(data_top_neutral_list))
st.text("Below are the Observation plotted")
rating={'avg_rat':[Average(data_top_positive_list),Average(data_top_negative_list),Average(data_top_neutral_list)],
'rat':['Positive','Negative','Neutral']}
df_rating=pd.DataFrame(rating)
#plt.bar(df_rating.avg_rat, df_rating.rat)
st.bar_chart(df_rating['avg_rat'])
st.text("0:Positive, 1:Neutral, 2:Negative")
st.write("Total average rating=",df_rating['avg_rat'].mean())
if Analyzer_Choice =="Show Top Reviews":
#st.success("Fetching Top Reviews")
data_top_positive=data[(data['Analysis']=='Positive') & (data['Drug']==str(raw_text))]
data_top_positive=data_top_positive
data_top_positive_list=data_top_positive['Reviews'].tolist()
comp.html("<b>Positive:</b>")
for i in data_top_positive_list:
st.markdown(i)
comp.html("<b>Average Positive Review Rating:</b>")
data_top_positive_list=data_top_positive['Satisfaction_Real'].tolist()
st.markdown(Average(data_top_positive_list))
data_top_negative=data[(data['Analysis']=='Negative') & (data['Drug']==str(raw_text))]
data_top_negative=data_top_negative
data_top_negative_list=data_top_negative['Reviews'].tolist()
comp.html("<b> Negative: </b>")
for i in data_top_negative_list:
st.markdown(i)
comp.html("<b>Average Negative Review Rating:</b>")
data_top_negative_list=data_top_negative['Satisfaction_Real'].tolist()
st.markdown(Average(data_top_negative_list))
data_top_neutral=data[(data['Analysis']=='Neutral') & (data['Drug']==str(raw_text))]
data_top_neutral=data_top_neutral
data_top_neutral_list=data_top_neutral['Reviews'].tolist()
comp.html("<b> Neutral: </b>")
for i in data_top_neutral_list:
st.markdown(i)
comp.html("<b>Average Neutral Review Rating:</b>")
data_top_neutral_list=data_top_neutral['Satisfaction_Real'].tolist()
st.markdown(Average(data_top_neutral_list))
comp.html("<br>")
st.text("Below are the Observation plotted")
rating={'avg_rat':[Average(data_top_positive_list),Average(data_top_negative_list),Average(data_top_neutral_list)],
'rat':['Positive','Negative','Neutral']}
df_rating=pd.DataFrame(rating)
#plt.bar(df_rating.avg_rat, df_rating.rat)
st.bar_chart(df_rating['avg_rat'])
st.text("0:Positive, 1:Neutral, 2:Negative")
st.write("Total average rating=",df_rating['avg_rat'].mean())
#comp.html("<html><head><link rel=""stylesheet"" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css"><style>.checked {color: orange;}</style></head><body><h2>Star Rating</h2><span class="fa fa-star checked"></span><span class="fa fa-star checked"></span><span class="fa fa-star checked"></span><span class="fa fa-star"></span><span class="fa fa-star"></span></body></html>"")
#df=pd.DataFrame(data_top_neutral)
#st.bar_chart(df)
#fig,(ax1,ax2)=plt.subplots(1,2, figsize=(10,4))
#fig.suptitle('Sentiment Analysis')
#df['data_top_neutral'].value_counts().plot.bar(ax=ax1, color='tomato', ec="black")
#st.write(sns.countplot(x=["df"], data=df))
#st.pyplot(use_container_width=True)
#def Show_Top_Reviews(raw_text):
# data_top_Reviews=data[(data['Reviews']=='Analysis') & (data['Drug']==str(raw_text))]
# data_top_Reviews=data[data['Drug']==raw_text]
# Reviews_grouped=data_top_Reviews.groupby(['Drug','Reviews']).agg('mean').reset_index()
# data_top_Reviews_df=Reviews_grouped.sort_values(by=['Reviews'], ascending=False)
#data_top_condition=data_top_condition.head(5)
# data_top_Reviews_list=data_top_Reviews_df['Reviews'].tolist()
#st.bar_chart(data_top_Reviews_list)
#comp.html("<b> Condition: </b>")
# for i in data_top_Reviews_list:
# st.markdown(i)
# data_top_Reviews_list=Show_Top_Reviews(raw_text)
# st.write(data_top_Reviews_list)
# df=pd.DataFrame(data['Reviews'])
# def getPolarity(text):
#return TextBlob(text).sentiment.polarity
# df['Polarity']=df['Reviews'].apply (getPolarity)
#def getAnalysis(score):
# if score>0.02:
# return 'Positive'
# elif score==0:
# return 'Neutral'
# else:
# return 'Negative'
#df['Analysis']= df['Polarity'].appy(getAnalysis)
#return df
# st.write(sns.countplot(x=["Reviews"], data=df))
#st.pyplot(use_container_width=True)
# if Analyzer_Choice=="Generate WorldCloud":
# st.success("Create the WorldCloud")
#else:
#df_plot_Analysis():
#st.success("Generating Visualisation for Sentiment Analysis")
# Analyzer_Choice = st.selectbox("Sentiment_Analysis", [" ","Sentiment Analysis"])
# if st.button("Sentiment_Analysis"):
# if Analyzer_Choice =="Sentiment Analysis":
# data_top_Reviews=data[(data['Reviews']=='Analysis') & (data['Drug']==str(Analyzer_Choice))]
# data_top_Reviews=data[data['Drug']==raw_text]
# Reviews_grouped=data_top_Reviews.groupby(['Drug','Reviews']).agg('mean').reset_index()
# data_top_Reviews_df=Reviews_grouped.sort_values(by=['Reviews'], ascending=False)
# top_Reviews=data_top_Reviews_df['Reviews'].tolist()
# st.write(top_Reviews)
# def getPolarity(text):
# return TextBlob(text).sentiment.polarity
# df['Polarity']=df['Reviews'].apply (getPolarity)
# def getAnalysis(score):
# if score>0.02:
# return 'Positive'
# elif score==0:
# return 'Neutral'
# else:
# return 'Negative'
# df['Analysis']= df['Polarity'].appy(getAnalysis)
# return df
# st.write(sns.countplot(x=["Reviews"], data=df))
# st.pyplot(use_container_width=True)
#st.bar_chart(df)
#if Analyzer_Choice =="Visualize the Sentiment Analysis":
# st.success("Create the Sentiment Analysis")
if choice=="Condition":
#st.write("Top Most Condition")
raw_text = st.text_area("Enter the Condition")
Analyzer_Choice = st.selectbox("Select the Activities", [" ","Show Condition Related Medicines"])
if st.button("Analyzer"):
data_top_Drug=data[(data['Drug']=='Analysis') & (data['Condition']==str(raw_text))]
data_top_Drug=data[data['Condition']==raw_text]
data_top_Drug=data_top_Drug.groupby(['Condition','Drug']).agg('mean').reset_index()
data_top_Drug=data_top_Drug.sort_values(by=['Drug'], ascending=True).head(5)
data_top_Drug_list=data_top_Drug['Drug'].tolist()
for i in data_top_Drug_list:
st.markdown(i)
if Analyzer_Choice =="Show Condition Related Drugs":
st.success("Fetching Top Condition")
Analyzer_Choice = st.selectbox("Reviews", [" ","Show Top Reviews","Visualize the Sentiment Analysis"])
if st.button("Reviews"):
if Analyzer_Choice =="Visualize the Sentiment Analysis":
data_top_positive=data[(data['Analysis']=='Positive') & (data['Condition']==str(raw_text))]
data_top_positive=data_top_positive.head(5)
data_top_positive_list=data_top_positive['Satisfaction_Real'].tolist()
#st.markdown(Average(data_top_positive_list))
data_top_negative=data[(data['Analysis']=='Negative') & (data['Condition']==str(raw_text))]
data_top_negative=data_top_negative.head(5)
data_top_negative_list=data_top_negative['Satisfaction_Real'].tolist()
#st.markdown(Average(data_top_negative_list))
data_top_neutral=data[(data['Analysis']=='Neutral') & (data['Condition']==str(raw_text))]
data_top_neutral=data_top_neutral.head(5)
data_top_neutral_list=data_top_neutral['Satisfaction_Real'].tolist()
#st.markdown(Average(data_top_neutral_list))
st.text("Below are the Observation plotted")
rating={'avg_rat':[Average(data_top_positive_list),Average(data_top_negative_list),Average(data_top_neutral_list)],
'rat':['Positive','Negative','Neutral']}
df_rating=pd.DataFrame(rating)
#plt.bar(df_rating.avg_rat, df_rating.rat)
st.bar_chart(df_rating['avg_rat'])
st.text("0:Positive, 1:Neutral, 2:Negative")
st.write("Total average rating=",df_rating['avg_rat'].mean())
if Analyzer_Choice =="Show Top Reviews":
#st.success("Fetching Top Reviews")
data_top_positive=data[(data['Analysis']=='Positive') & (data['Condition']==str(raw_text))]
data_top_positive=data_top_positive.head(5)
data_top_positive_list=data_top_positive['Reviews'].tolist()
comp.html("<b>Positive:</b>")
for i in data_top_positive_list:
st.markdown(i)
data_top_negative=data[(data['Analysis']=='Negative') & (data['Condition']==str(raw_text))]
data_top_negative=data_top_negative.head(5)
data_top_negative_list=data_top_negative['Reviews'].tolist()
comp.html("<b> Negative: </b>")
for i in data_top_negative_list:
st.markdown(i)
data_top_neutral=data[(data['Analysis']=='Neutral') & (data['Condition']==str(raw_text))]
data_top_neutral=data_top_neutral.head(5)
data_top_neutral_list=data_top_neutral['Reviews'].tolist()
comp.html("<b> Neutral: </b>")
for i in data_top_neutral_list:
st.markdown(i)
# if Analyzer_Choice =="Generate WorldCloud":
# st.success("Create the WorldCloud")
# if Analyzer_Choice=="Visualize the Sentiment Analysis":
# st.success("Create the Sentiment Analysis")
#Background color
page_bg_img = '''
<style>
body {
background-image: url("https://wallpapercave.com/download/medic-wallpapers-wp4331260?nocache=1");
background-size: cover;
}
</style>
'''
st.markdown(page_bg_img, unsafe_allow_html=True)
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
stemmer = SnowballStemmer('english')
|
[
"noreply@github.com"
] |
pavankm96.noreply@github.com
|
6ea61955b09ac51df0e86c28f926d3e2aa4ed6ac
|
b87ab91f3626dd244cb528e54132e7966a5dbe1f
|
/lab7/main.py
|
38acb98c4b3463b3d973f7532936a8e4b589dafa
|
[] |
no_license
|
paekva/ML
|
b20100d6af6059d0b02b68139f6cd5aefad19ef3
|
fea1bc58e0d088d556b6f60198143d5b3492b8a0
|
refs/heads/master
| 2021-04-07T15:09:25.129874
| 2020-04-04T16:11:45
| 2020-04-04T16:11:45
| 248,685,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
import draw
from keras.datasets import imdb
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
max_review_length = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
embedding_vecor_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
H = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=64)
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
draw.draw_graphics(H)
|
[
"paekva@yandex.ru"
] |
paekva@yandex.ru
|
f8b8f5593741a9bd06cc06e4cd1d04bf154a9f0a
|
0f6250f164177cafe2f990dc07732d2dfd9a2888
|
/Homework 1/.history/codinghwq2_20210602130053.py
|
64427409dec736e7bb3bf5d4ae130852f1ad856a
|
[] |
no_license
|
Dustyik/AI-sem8
|
42f1f233671752c139e1d98f2b98717c6264933d
|
fae83d8003312a9eaf78322ce5d8efef8ee293ef
|
refs/heads/main
| 2023-07-02T02:34:16.429629
| 2021-08-01T10:25:11
| 2021-08-01T10:25:11
| 372,744,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,251
|
py
|
#flight search engine
#flight - starting city + time
#city - strings and time
#1. Good choice of state - Current City and Current Time
from search import Problem, breadth_first_search
class Flight:
def __init__(self, start_city, start_time, end_city, end_time):
self.start_city = start_city
self.start_time = start_time
self.end_city = end_city
self.end_time = end_time
def __str__(self):
return str((self.start_city, self.start_time))+ "->"+ str((self.end_city, self.end_time))
def matches(self, city, time):
#returns boolean whether city and time match those of the flights, flight leaves city past the time argument
return (self.start_city == city and self.start_time >= time)
flightDB = [Flight("Rome", 1, "Paris", 4),
Flight("Rome", 3, "Madrid", 5),
Flight("Rome", 5, "Istanbul", 10),
Flight("Paris", 2, "London", 4),
Flight("Paris", 5, "Oslo", 7),
Flight("Paris", 5, "Istanbul", 9),
Flight("Madrid", 7, "Rabat", 10),
Flight("Madrid", 8, "London", 10),
Flight("Istanbul", 10, "Constantinople", 10)]
def find_itinerary(start_city, start_time, end_city, deadline):
pass
|
[
"chiayik_tan@mymail.sutd.edu.sg"
] |
chiayik_tan@mymail.sutd.edu.sg
|
ddeea5f69f707a1666beff0ad4724ca510765f27
|
d21ca4cc1727875ac2bd2b83d96d03236c2e90e6
|
/lncrnadbtable/cms_plugins.py
|
06fc749af974950b8dbdf442ffdb15d10ed7d9b2
|
[] |
no_license
|
bluecerebudgerigar/lncrnadb-table
|
61356054a9e6a960c1d4356d38783e88ddbba3b6
|
5306694cf339361c0b1ca277213a8e4caba25d47
|
refs/heads/master
| 2016-09-10T03:59:02.773858
| 2014-04-14T15:06:46
| 2014-04-14T15:06:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,774
|
py
|
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from models import Annotation, Expression, Species, Literature, Nomenclature, Sequences, Associatedcomp
from forms import AnnotationForm, ExpressionForm, SpeciesForm, LiteratureForm, NomenclatureForm, SequencesForm, AssociatedcompForm
from django.utils import simplejson
from utils import static_url
from django.http import HttpResponseRedirect
import re
class AssociatedcompPlugin(CMSPluginBase):
model = Associatedcomp
form = AssociatedcompForm
render_template = "cms/plugins/associatedcomp.html"
text_enabled = True
fieldsets = (
(None, {
'fields': ('name',)
}),
(_('Headers'), {
'fields': (('headers_top', 'headers_left', 'headers_bottom'),)
}),
(None, {
'fields': ('table_data', 'csv_upload')
})
)
def render(self, context, instance, placeholder):
try:
data = simplejson.loads(instance.table_data)
except:
data = "error"
context.update({
'name': instance.name,
'data': data,
'instance':instance,
})
return context
def icon_src(self, instance):
return static_url("img/table.png")
def response_change(self, request, obj):
response = super(AssociatedcompPlugin, self).response_change(request, obj)
if 'csv_upload' in request.FILES.keys():
self.object_successfully_changed = False
return response
class SequencesPlugin(CMSPluginBase):
model = Sequences
form = SequencesForm
render_template = "cms/plugins/sequences.html"
text_enabled = True
fieldsets = (
(None, {
'fields': ('name',)
}),
(None, {
'fields':('sequence_prefix',)
}),
(_('Headers'), {
'fields': (('headers_top', 'headers_left', 'headers_bottom'),)
}),
(None, {
'fields': ('table_data', 'csv_upload')
})
)
def render(self, context, instance, placeholder):
try:
data = simplejson.loads(instance.table_data)
except:
data = "error"
context.update({
'name': instance.name,
'data': data,
'instance':instance,
})
return context
def icon_src(self, instance):
return static_url("img/table.png")
def response_change(self, request, obj):
response = super(SequencesPlugin, self).response_change(request, obj)
if 'csv_upload' in request.FILES.keys():
self.object_successfully_changed = False
return response
class NomenclaturePlugin(CMSPluginBase):
model = Nomenclature
form = NomenclatureForm
render_template = "cms/plugins/nomenclature.html"
text_enabled = True
fieldsets = (
(None, {
'fields': ('name',)
}),
(_('Headers'), {
'fields': (('headers_top', 'headers_left', 'headers_bottom'),)
}),
(None, {
'fields': ('table_data', 'csv_upload')
})
)
def render(self, context, instance, placeholder):
try:
data = simplejson.loads(instance.table_data)
except:
data = "error"
context.update({
'name': instance.name,
'data': data,
'instance':instance,
})
return context
def icon_src(self, instance):
return static_url("img/table.png")
def response_change(self, request, obj):
response = super(NomenclaturePlugin, self).response_change(request, obj)
if 'csv_upload' in request.FILES.keys():
self.object_successfully_changed = False
return response
class AnnotationPlugin(CMSPluginBase):
model = Annotation
form = AnnotationForm
render_template = "cms/plugins/annotation.html"
text_enabled = True
fieldsets = (
(None, {
'fields': ('name',)
}),
(_('Headers'), {
'fields': (('headers_top', 'headers_left', 'headers_bottom'),)
}),
(None, {
'fields': ('table_data', 'csv_upload')
})
)
def render(self, context, instance, placeholder):
try:
data = instance.table_data
data = simplejson.loads(data)
except:
data = "error"
context.update({
'name': instance.name,
'data': simplejson.loads(instance.table_data),
'instance':instance,
})
return context
def icon_src(self, instance):
return static_url("img/table.png")
def response_change(self, request, obj):
response = super(AnnotationPlugin, self).response_change(request, obj)
if 'csv_upload' in request.FILES.keys():
self.object_successfully_changed = False
return response
class ExpressionPlugin(CMSPluginBase):
model = Expression
form = ExpressionForm
render_template = "cms/plugins/expression.html"
text_enabled = True
fieldsets = (
(None, {
'fields': ('name',)
}),
(_('Headers'), {
'fields': (('headers_top', 'headers_left', 'headers_bottom'),)
}),
(None, {
'fields': ('table_data', 'csv_upload')
})
)
def render(self, context, instance, placeholder):
try:
#$ print instance.table_data
#instance.table_data = instance.table_data.replace("is","are")
data = simplejson.loads(instance.table_data)
#if type(data) == list:
# print data
# data = [[x.replace("is","are") for x in i] for i in data]
except:
data = "error"
context.update({
'name': instance.name,
'data': data,
'instance':instance,
'json_data': instance.table_data,
})
return context
def icon_src(self, instance):
return static_url("img/table.png")
def response_change(self, request, obj):
response = super(ExpressionPlugin, self).response_change(request, obj)
if 'csv_upload' in request.FILES.keys():
self.object_successfully_changed = False
return response
class SpeciesPlugin(CMSPluginBase):
model = Species
form = SpeciesForm
render_template = "cms/plugins/species.html"
text_enabled = True
fieldsets = (
(None, {
'fields': ('name',)
}),
(_('Headers'), {
'fields': (('headers_top', 'headers_left', 'headers_bottom'),)
}),
(None, {
'fields': ('table_data', 'csv_upload')
})
)
def render(self, context, instance, placeholder):
try:
data = instance.table_data
data = simplejson.loads(data)
except:
data = "error"
context.update({
'name': instance.name,
'data': simplejson.loads(instance.table_data),
'instance':instance,
})
return context
def icon_src(self, instance):
return static_url("img/table.png")
def response_change(self, request, obj):
response = super(SpeciesPlugin, self).response_change(request, obj)
if 'csv_upload' in request.FILES.keys():
self.object_successfully_changed = False
return response
class LiteraturePlugin(CMSPluginBase):
model = Literature
form = LiteratureForm
render_template = "cms/plugins/literature.html"
text_enabled = True
fieldsets = (
(None, {
'fields': ('name',)
}),
(_('Headers'), {
'fields': (('headers_top', 'headers_left', 'headers_bottom'),)
}),
(None, {
'fields': ('table_data', 'csv_upload')
})
)
def render(self, context, instance, placeholder):
try:
data = instance.table_data
data = simplejson.loads(instance.data)
except:
data = "error"
context.update({
'name': instance.name,
'data': simplejson.loads(instance.table_data),
'instance':instance,
})
return context
def icon_src(self, instance):
return static_url("img/table.png")
def response_change(self, request, obj):
response = super(LiteraturePlugin, self).response_change(request, obj)
if 'csv_upload' in request.FILES.keys():
self.object_successfully_changed = False
return response
plugin_pool.register_plugin(AnnotationPlugin)
plugin_pool.register_plugin(ExpressionPlugin)
plugin_pool.register_plugin(SpeciesPlugin)
plugin_pool.register_plugin(LiteraturePlugin)
plugin_pool.register_plugin(NomenclaturePlugin)
plugin_pool.register_plugin(SequencesPlugin)
plugin_pool.register_plugin(AssociatedcompPlugin)
#data_2 = obj.table_data
#print type(data_2)
#data_2 = data_2.replace("ky","kyness")
#obj.table_data = data_2
## data_2 = request.POST.get("table_data")
## print data_2
## data_2 = data_2.replace("ky", "kyness")
## print data_2
## request.POST.__setitem__("table_data", data_2)
## data_2 = request.POST.get("table_data")
## print data_2
## print dir(obj)
##
|
[
"bluecerebudgerigar@gmail.com"
] |
bluecerebudgerigar@gmail.com
|
25992b96b12a2511eca80384eee352c586c60f3f
|
77b717487523312623e158dba52bb2c61b18b6c3
|
/workshops/25_query_service_main.py
|
19b19b3e60517268f374a9a0a1c3bc2d8c102652
|
[] |
no_license
|
jakubbujny/docker-workshops
|
f9b917a39f6db95c7ea89cdbf649f894e4395d2b
|
3e46d4fe4d2b01b3ed20c0a3e6fcaa58d99c9e5d
|
refs/heads/master
| 2021-07-24T04:27:39.739700
| 2017-11-05T13:53:40
| 2017-11-05T13:53:40
| 108,516,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
import pika
import json
import os
import sys
import time
from pymongo import MongoClient
import flask
import pprint
#We need mongo connection here
#We need query endpoint here
|
[
"jakub.bujny@ac-project.net"
] |
jakub.bujny@ac-project.net
|
f6d2f3c15738863e04e5a411298efee81f3bb8fa
|
d8b7436e85e43163759a4482b5cde547c1f09dd1
|
/services/svc_topic.py
|
d1bba218c1e76b7bd13b7ccb630b4b6ef81e4bef
|
[] |
no_license
|
ujued/witalk
|
1bb3379a04b21cdc2083dadd9695b43b5e299e4a
|
4f19d3b13aa42a332d4ad838170d11e4e40e532b
|
refs/heads/master
| 2021-01-24T21:35:48.475088
| 2018-04-04T01:42:14
| 2018-04-04T01:42:14
| 123,273,164
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
from flask import current_app, session
from threading import current_thread
def price(id):
column(id, 'price')[0]
def column(column_name, id):
conn = current_app.connections[current_thread()]
c_row = conn.execute('select %s from topic where id=%d' % (column_name, id)).first()
if c_row:
return c_row
else:
return None
def add_topic():
pass
def goodtopic(id):
"""
return boolean
"""
if id in current_app.good_topic_ids : return True
return False
def good_operate(op, id):
if op == 'non':
current_app.good_topic_ids.remove(id)
elif op == 'to':
current_app.good_topic_ids.append(id)
else : return
|
[
"ujued@qq.com"
] |
ujued@qq.com
|
e67b09956c5110bda1d7cc018446ff7e6b008a33
|
c046e4c4c010f4845acd8f527f4eb89347d9b035
|
/tests/test_extraneous_whitespace.py
|
003c3b181fe83108a1a71f9941bb6e20c5594e9f
|
[
"MIT"
] |
permissive
|
sturmianseq/krllint
|
7ad1d6e0febf081682e8b1d6608a81f86789931f
|
2f9376cdae14c201364d9c31b4c19a8ff2f708d2
|
refs/heads/master
| 2022-01-10T15:03:20.755423
| 2019-03-02T16:42:25
| 2019-03-02T16:42:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,666
|
py
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from importlib import reload
from krllint import config
from krllint.reporter import Category, MemoryReporter
from krllint.linter import _create_arg_parser, Linter
class ExtraneousWhiteSpaceTestCase(TestCase):
TEST_INPUT = ["foo bar\n"]
FIXED_INPUT = ["foo bar\n"]
def test_rule_without_fix(self):
cli_args = _create_arg_parser().parse_args(["test_rule_without_fix"])
reload(config)
config.REPORTER = MemoryReporter
linter = Linter(cli_args, config)
lines, reporter = linter.lint_lines("test_rule_without_fix", self.TEST_INPUT)
self.assertEqual(reporter.found_issues[Category.CONVENTION], 1)
self.assertEqual(reporter.found_issues[Category.REFACTOR], 0)
self.assertEqual(reporter.found_issues[Category.WARNING], 0)
self.assertEqual(reporter.found_issues[Category.ERROR], 0)
self.assertEqual(reporter.found_issues[Category.FATAL], 0)
self.assertEqual(lines, self.TEST_INPUT)
self.assertEqual(reporter.messages[0].line_number, 0)
self.assertEqual(reporter.messages[0].column, 3)
self.assertEqual(reporter.messages[0].message, "superfluous whitespace")
self.assertEqual(reporter.messages[0].code, "superfluous-whitespace")
def test_rule_with_fix(self):
cli_args = _create_arg_parser().parse_args(["--fix", "test_rule_with_fix"])
reload(config)
config.REPORTER = MemoryReporter
linter = Linter(cli_args, config)
lines, _ = linter.lint_lines("test_rule_with_fix", self.TEST_INPUT)
self.assertEqual(lines, self.FIXED_INPUT)
|
[
"d4nuu8@gmail.com"
] |
d4nuu8@gmail.com
|
161794d774b8032d4ea9f5efe224e3dc64eb9229
|
a85357e58f8a598a997ddea78fd9d81b02fa8f79
|
/ring0/pwnage/tool.py
|
c988726b11315d18d046f241b812d879482900e8
|
[] |
no_license
|
0xchase/ctfs
|
edcfc266c5535deebcb037f8f726e1ebd4e7aff0
|
49be9404299400c855996a43cc9b87ce70b70138
|
refs/heads/master
| 2022-09-08T17:57:39.488841
| 2022-09-06T13:31:03
| 2022-09-06T13:31:03
| 189,076,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
#!/usr/bin/python3
Make fuzz script that automatically finds length. Also contains various shellcodes. Can encode for bad characters
|
[
"chasekanipe@gmail.com"
] |
chasekanipe@gmail.com
|
2ae5fbee0abe004c9dfb14725c6701764d2373e9
|
fbf352b3701607c24d2c49d16712bce9213d3926
|
/supper/models.py
|
633126a31dc6ce6533535965556b022c53d7da8c
|
[] |
no_license
|
roussieau/Treasury
|
8bb1e5473459b762d2f362323c5b66ca544da3c2
|
e98443b72061fdaf76da0f84ebf2b4d6700b708a
|
refs/heads/master
| 2020-03-27T18:09:02.336036
| 2019-12-28T12:02:06
| 2019-12-28T12:02:06
| 146,901,586
| 0
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 814
|
py
|
from django.db import models
from django.core.validators import MinValueValidator
# Create your models here.
class Day(models.Model):
date = models.DateField()
week = models.IntegerField(default=0)
visible = models.BooleanField(default=False)
def __str__(self):
return 'S{} - {: %d/%m/%y}'.format(self.week, self.date)
def presence(self, user):
return Participation.objects.filter(user=user, day=self).exists()
class Participation(models.Model):
user = models.ForeignKey('users.CustomUser',on_delete=models.CASCADE)
day = models.ForeignKey(Day, on_delete=models.CASCADE)
weight = models.IntegerField(default=1, validators=[MinValueValidator(1)])
def __str__(self):
return '{} au souper du {: %d/%m}'.format(self.user.get_full_name(), self.day.date)
|
[
"julian.roussieau@student.uclouvain.be"
] |
julian.roussieau@student.uclouvain.be
|
14bbb84f7da9c9815b81361b1dd37edb29746f63
|
2dbc9f6a98c097ef205ca9f014608f57df16e0f2
|
/pyyincheng/自动化运维/day1/3.morefiledircmp/2.filescmp.py
|
2ae84820500907bd7d0d90672fa9e37fc36989a8
|
[] |
no_license
|
qqzmr/pynumbertheory
|
59500c50c15f0f7f668be550458e0e6c8f29e254
|
29e83e678379d86db551c205462260b41aa26160
|
refs/heads/master
| 2021-06-29T00:15:45.199916
| 2020-09-22T07:58:12
| 2020-09-22T07:58:12
| 157,073,353
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
import filecmp
import os
#cmpfiles可以对比文件,不可以对比文件夹,第一个列表相等,第二个列表不等
print(filecmp.cmpfiles("./cyzonespider1","./cyzonespider2",
os.listdir("./cyzonespider1")))
print(filecmp.cmpfiles("./cyzonespider1","./cyzonespider2",
['1.txt', 'scrapy.cfg', 'starts.py']))
|
[
"379896832@qq.com"
] |
379896832@qq.com
|
8675870657f4e23e740fd364aea0913844121570
|
3f8f986ce8de3fc378655c71f46e25ac5dac33cd
|
/obywatele/migrations/0030_uzytkownik_phone.py
|
f5a790bea7a778138fe31e09af6d3556a5af6692
|
[
"MIT"
] |
permissive
|
soma115/wikikracja
|
ffdc97ec4b3f72c981c1d3cbe0108673797e5a0e
|
ff9530e4ab7b38623c097deb2beb120211fcd950
|
refs/heads/master
| 2023-07-24T15:46:31.284999
| 2023-07-06T17:03:40
| 2023-07-06T17:03:40
| 176,117,874
| 5
| 2
|
MIT
| 2023-07-06T17:03:41
| 2019-03-17T15:05:20
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 468
|
py
|
# Generated by Django 3.1.12 on 2021-09-11 18:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('obywatele', '0029_auto_20210821_2201'),
]
operations = [
migrations.AddField(
model_name='uzytkownik',
name='phone',
field=models.TextField(blank=True, help_text='Phone number', max_length=50, null=True, verbose_name='Phone number'),
),
]
|
[
"robert.fialek@gmail.com"
] |
robert.fialek@gmail.com
|
d68d2bde985e9616226335ff5e0881a34fbb535d
|
9d2a06bdf5228edffc789e4112e2b41517e5df7c
|
/foe/views.py
|
c862a24ce29b33821dbdea9da77bcf49e1c45405
|
[] |
no_license
|
JosmanPS/FOE-app
|
db985b023e07655d94eb7eb76c96642ef01610fe
|
157a567f91ec01f67bcbebe5c05159ea44319ad0
|
refs/heads/master
| 2020-06-06T19:13:35.463567
| 2015-07-23T22:25:52
| 2015-07-23T22:25:52
| 39,058,892
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,409
|
py
|
# -*- coding: utf-8 -*-
from django.shortcuts import redirect, render, render_to_response, render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from .forms import *
from .models import *
from django.utils.text import slugify
#
# FOE, main site
#
def index(request):
args = dict()
return render_to_response('foe/main/index.html', args)
@login_required
def registro_oe(request):
args = dict()
usuario = request.user
oe_usuario = OrganizacionEstudiantil(usuario=usuario)
oe = OrganizacionEstudiantil.objects.filter(usuario=usuario)
args['completo'] = False
if request.method == 'POST':
print(usuario)
if oe:
form = OEForm(request.POST, request.FILES, instance=oe[0])
else:
form = OEForm(request.POST, request.FILES, instance=oe_usuario)
if form.is_valid():
f = form.save()
f.slug = slugify(f.nombre)
f.save()
return redirect(reverse('registro_oe'))
else:
if oe:
form = OEForm(instance=oe[0])
else:
form = OEForm(instance=oe_usuario)
args['form'] = form
return render(request, "foe/forms/registroOE.html", args)
@login_required
def registro_comite(request):
args = dict()
usuario = request.user
cm_usuario = Comite(usuario=usuario)
cm = Comite.objects.filter(usuario=usuario)
if request.method == 'POST':
print(usuario)
if cm:
form = ComiteForm(request.POST, request.FILES, instance=cm[0])
else:
form = ComiteForm(request.POST, request.FILES, instance=cm_usuario)
print(request.FILES)
print(form.is_valid())
if form.is_valid():
form.save()
return redirect('/')
else:
if cm:
form = ComiteForm(instance=cm[0])
else:
form = ComiteForm(instance=cm_usuario)
args['form'] = form
return render(request, "foe/forms/comite.html", args)
@login_required
def miembros_oe(request):
args = dict()
usuario = request.user
oe = get_object_or_404(OrganizacionEstudiantil, usuario=usuario)
m_oe = Miembro(organizacion_estudiantil=oe)
m = Miembro.objects.filter(organizacion_estudiantil=oe)
if request.method == 'POST':
print(usuario)
if m:
form = MiembroForm(request.POST, request.FILES, instance=m[0])
else:
form = MiembroForm(request.POST, request.FILES, instance=m_oe)
print(request.FILES)
print(form.is_valid())
if form.is_valid():
form.save()
return redirect('/')
else:
if m:
form = MiembroForm(instance=m[0])
else:
form = MiembroForm(instance=m_oe)
args['form'] = form
return render(request, "foe/forms/miembro.html", args)
@login_required
def datos_bancarios(request):
args = dict()
usuario = request.user
oe = get_object_or_404(OrganizacionEstudiantil, usuario=usuario)
m_oe = DatosBancarios(organizacion_estudiantil=oe)
m = DatosBancarios.objects.filter(organizacion_estudiantil=oe)
if request.method == 'POST':
print(usuario)
if m:
form = BancarioForm(request.POST, request.FILES, instance=m[0])
else:
form = BancarioForm(request.POST, request.FILES, instance=m_oe)
print(request.FILES)
print(form.is_valid())
if form.is_valid():
form.save()
return redirect('/')
else:
if m:
form = BancarioForm(instance=m[0])
else:
form = BancarioForm(instance=m_oe)
args['form'] = form
return render(request, "foe/forms/datos-bancarios.html", args)
def directorio(request):
args = dict()
oes = OrganizacionEstudiantil.objects.all()
oes.order_by('clasificacion', 'nombre')
args['organizaciones'] = oes
return render(request, "foe/main/directorio.html", args)
def perfil_oe(request, oe_slug):
args = dict()
oe = get_object_or_404(
OrganizacionEstudiantil, slug=oe_slug)
args['oe'] = oe
args['logo_url'] = oe.logo._get_url()
args['plan_trabajo_url'] = oe.plan_trabajo._get_url()
args['presupuesto_url'] = oe.presupuesto._get_url()
return render(request, "foe/main/perfil.html", args)
|
[
"josman@localhost.localdomain"
] |
josman@localhost.localdomain
|
0dc321c6cd6f8c7a77bbd827084f09160d9bd5ca
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/HaydeeTools/HaydeeNodeMat.py
|
18962c5bc62d268ca0be953b091ad844fedf5d4e
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,738
|
py
|
# -*- coding: utf-8 -*-
# <pep8 compliant>
import bpy
import os
from mathutils import Vector
#Nodes Layout
NODE_FRAME = 'NodeFrame'
#Nodes Shaders
BSDF_DIFFUSE_NODE = 'ShaderNodeBsdfDiffuse'
BSDF_EMISSION_NODE = 'ShaderNodeEmission'
BSDF_GLOSSY_NODE = 'ShaderNodeBsdfGlossy'
PRINCIPLED_SHADER_NODE = 'ShaderNodeBsdfPrincipled'
BSDF_TRANSPARENT_NODE = 'ShaderNodeBsdfTransparent'
SHADER_ADD_NODE = 'ShaderNodeAddShader'
SHADER_MIX_NODE = 'ShaderNodeMixShader'
#Nodes Color
RGB_MIX_NODE = 'ShaderNodeMixRGB'
INVERT_NODE = 'ShaderNodeInvert'
#Nodes Input
TEXTURE_IMAGE_NODE = 'ShaderNodeTexImage'
SHADER_NODE_FRESNEL = 'ShaderNodeFresnel'
SHADER_NODE_NEW_GEOMETRY = 'ShaderNodeNewGeometry'
#Nodes Outputs
OUTPUT_NODE = 'ShaderNodeOutputMaterial'
#Nodes Vector
NORMAL_MAP_NODE = 'ShaderNodeNormalMap'
#Nodes Convert
SHADER_NODE_MATH = 'ShaderNodeMath'
SHADER_NODE_SEPARATE_RGB = 'ShaderNodeSeparateRGB'
SHADER_NODE_COMBINE_RGB = 'ShaderNodeCombineRGB'
# Node Groups
NODE_GROUP = 'ShaderNodeGroup'
NODE_GROUP_INPUT = 'NodeGroupInput'
NODE_GROUP_OUTPUT = 'NodeGroupOutput'
SHADER_NODE_TREE = 'ShaderNodeTree'
# Node Custom Groups
HAYDEE_NORMAL_NODE = 'Haydee Normal'
# Sockets
NODE_SOCKET_COLOR = 'NodeSocketColor'
NODE_SOCKET_FLOAT = 'NodeSocketFloat'
NODE_SOCKET_FLOAT_FACTOR = 'NodeSocketFloatFactor'
NODE_SOCKET_SHADER = 'NodeSocketShader'
NODE_SOCKET_VECTOR = 'NodeSocketVector'
DEFAULT_PBR_POWER = .5
def load_image(textureFilepath, forceNewTexture = False):
image = None
if textureFilepath:
textureFilename = os.path.basename(textureFilepath)
fileRoot, fileExt = os.path.splitext(textureFilename)
if (os.path.exists(textureFilepath)):
print("Loading Texture: " + textureFilename)
image = bpy.data.images.load(filepath=textureFilepath, check_existing=not forceNewTexture)
else:
print("Warning. Texture not found " + textureFilename)
image = bpy.data.images.new(
name=textureFilename, width=1024, height=1024, alpha=True,
float_buffer=False)
image.source = 'FILE'
image.filepath = textureFilepath
return image
def create_material(obj, useAlpha, mat_name, diffuseFile, normalFile, specularFile, emissionFile):
obj.data.materials.clear()
material = bpy.data.materials.get(mat_name)
if not material:
material = bpy.data.materials.new(mat_name)
material.use_nodes = True
if useAlpha:
material.blend_method = 'BLEND'
obj.data.materials.append(material)
create_cycle_node_material(material, useAlpha, diffuseFile, normalFile, specularFile, emissionFile)
def create_cycle_node_material(material, useAlpha, diffuseFile, normalFile, specularFile, emissionFile):
# Nodes
node_tree = material.node_tree
node_tree.nodes.clear()
col_width = 200
diffuseTextureNode = node_tree.nodes.new(TEXTURE_IMAGE_NODE)
diffuseTextureNode.label = 'Diffuse'
diffuseTextureNode.image = load_image(diffuseFile)
diffuseTextureNode.location = Vector((0, 0))
specularTextureNode = node_tree.nodes.new(TEXTURE_IMAGE_NODE)
specularTextureNode.label = 'Roughness Specular Metalic'
specularTextureNode.color_space = 'NONE'
specularTextureNode.image = load_image(specularFile)
specularTextureNode.location = diffuseTextureNode.location + Vector((0, -450))
normalTextureRgbNode = node_tree.nodes.new(TEXTURE_IMAGE_NODE)
normalTextureRgbNode.label = 'Haydee Normal'
normalTextureRgbNode.color_space = 'NONE'
normalTextureRgbNode.image = load_image(normalFile)
if normalTextureRgbNode.image:
normalTextureRgbNode.image.use_alpha = False
normalTextureRgbNode.location = specularTextureNode.location + Vector((0, -300))
normalTextureAlphaNode = node_tree.nodes.new(TEXTURE_IMAGE_NODE)
normalTextureAlphaNode.label = 'Haydee Normal Alpha'
normalTextureAlphaNode.image = load_image(normalFile, True)
if normalTextureAlphaNode.image:
normalTextureAlphaNode.image.use_alpha = True
normalTextureAlphaNode.color_space = 'NONE'
normalTextureAlphaNode.location = specularTextureNode.location + Vector((0, -600))
haydeeNormalMapNode = node_tree.nodes.new(NODE_GROUP)
haydeeNormalMapNode.label = 'Haydee Normal Converter'
haydeeNormalMapNode.node_tree = haydee_normal_map()
haydeeNormalMapNode.location = normalTextureRgbNode.location + Vector((col_width * 1.5, 0))
normalMapNode = node_tree.nodes.new(NORMAL_MAP_NODE)
normalMapNode.location = haydeeNormalMapNode.location + Vector((col_width, 100))
emissionTextureNode = node_tree.nodes.new(TEXTURE_IMAGE_NODE)
emissionTextureNode.label = 'Emission'
emissionTextureNode.image = load_image(emissionFile)
emissionTextureNode.location = diffuseTextureNode.location + Vector((0, 260))
separateRgbNode = node_tree.nodes.new(SHADER_NODE_SEPARATE_RGB)
separateRgbNode.location = specularTextureNode.location + Vector((col_width * 1.5, 60))
roughnessPowerNode = node_tree.nodes.new(SHADER_NODE_MATH)
roughnessPowerNode.operation = 'POWER'
roughnessPowerNode.inputs[1].default_value = DEFAULT_PBR_POWER
roughnessPowerNode.location = separateRgbNode.location + Vector((col_width, 200))
specPowerNode = node_tree.nodes.new(SHADER_NODE_MATH)
specPowerNode.operation = 'POWER'
specPowerNode.inputs[1].default_value = DEFAULT_PBR_POWER
specPowerNode.location = separateRgbNode.location + Vector((col_width, 50))
metallicPowerNode = node_tree.nodes.new(SHADER_NODE_MATH)
metallicPowerNode.operation = 'POWER'
metallicPowerNode.inputs[1].default_value = DEFAULT_PBR_POWER
metallicPowerNode.location = separateRgbNode.location + Vector((col_width, -100))
alphaMixNode = None
transparencyNode = None
pbrShaderNode = None
pbrColorInput = None
pbrRoughnessInput = None
pbrReflectionInput = None
pbrMetallicInput = None
pbrShaderNode = node_tree.nodes.new(PRINCIPLED_SHADER_NODE)
#pbrShaderNode.location = roughnessPowerNode.location + Vector((200, 100))
pbrShaderNode.location = diffuseTextureNode.location + Vector((col_width * 4, -100))
pbrColorInput = 'Base Color'
pbrRoughnessInput = 'Roughness'
pbrReflectionInput = 'Specular'
pbrMetallicInput = 'Metallic'
emissionNode = node_tree.nodes.new(BSDF_EMISSION_NODE)
emissionNode.inputs['Color'].default_value = (0, 0, 0, 1)
emissionNode.location = pbrShaderNode.location + Vector((100, 100))
addShaderNode = node_tree.nodes.new(SHADER_ADD_NODE)
addShaderNode.location = emissionNode.location + Vector((250, 0))
outputNode = node_tree.nodes.new(OUTPUT_NODE)
outputNode.location = addShaderNode.location + Vector((500, 200))
if useAlpha:
alphaMixNode = node_tree.nodes.new(SHADER_MIX_NODE)
alphaMixNode.location = pbrShaderNode.location + Vector((600, 300))
transparencyNode = node_tree.nodes.new(BSDF_TRANSPARENT_NODE)
transparencyNode.location = alphaMixNode.location + Vector((-250, -100))
#Links Input
links = node_tree.links
if emissionFile and os.path.exists(emissionFile):
links.new(emissionTextureNode.outputs['Color'], emissionNode.inputs['Color'])
links.new(diffuseTextureNode.outputs['Color'], pbrShaderNode.inputs[pbrColorInput])
links.new(specularTextureNode.outputs['Color'], separateRgbNode.inputs['Image'])
if normalFile and os.path.exists(normalFile):
links.new(normalTextureRgbNode.outputs['Color'], haydeeNormalMapNode.inputs['Color'])
links.new(normalTextureAlphaNode.outputs['Alpha'], haydeeNormalMapNode.inputs['Alpha'])
links.new(haydeeNormalMapNode.outputs['Normal'], normalMapNode.inputs['Color'])
links.new(emissionNode.outputs['Emission'], addShaderNode.inputs[0])
links.new(addShaderNode.outputs['Shader'], outputNode.inputs['Surface'])
if useAlpha:
links.new(diffuseTextureNode.outputs['Alpha'], alphaMixNode.inputs['Fac'])
links.new(transparencyNode.outputs['BSDF'], alphaMixNode.inputs[1])
links.new(addShaderNode.outputs['Shader'], alphaMixNode.inputs[2])
links.new(alphaMixNode.outputs['Shader'], outputNode.inputs['Surface'])
links.new(specularTextureNode.outputs['Color'], separateRgbNode.inputs['Image'])
links.new(separateRgbNode.outputs['R'], roughnessPowerNode.inputs[0])
links.new(separateRgbNode.outputs['G'], specPowerNode.inputs[0])
links.new(separateRgbNode.outputs['B'], metallicPowerNode.inputs[0])
if specularFile and os.path.exists(specularFile):
links.new(roughnessPowerNode.outputs[0], pbrShaderNode.inputs[pbrRoughnessInput])
links.new(specPowerNode.outputs[0], pbrShaderNode.inputs[pbrReflectionInput])
if pbrMetallicInput:
links.new(metallicPowerNode.outputs[0], pbrShaderNode.inputs[pbrMetallicInput])
links.new(normalMapNode.outputs['Normal'], pbrShaderNode.inputs['Normal'])
links.new(pbrShaderNode.outputs[0], addShaderNode.inputs[1])
def haydee_normal_map():
if HAYDEE_NORMAL_NODE in bpy.data.node_groups:
return bpy.data.node_groups[HAYDEE_NORMAL_NODE]
# create a group
node_tree = bpy.data.node_groups.new(HAYDEE_NORMAL_NODE, SHADER_NODE_TREE)
separateRgbNode = node_tree.nodes.new(SHADER_NODE_SEPARATE_RGB)
separateRgbNode.location = Vector((0, 0))
invertRNode = node_tree.nodes.new(INVERT_NODE)
invertRNode.inputs[0].default_value = 0
invertRNode.location = separateRgbNode.location + Vector((200, 40))
invertGNode = node_tree.nodes.new(INVERT_NODE)
invertGNode.inputs[0].default_value = 1
invertGNode.location = separateRgbNode.location + Vector((200, -60))
SpaceChange = node_tree.nodes.new(NODE_FRAME)
SpaceChange.name = 'R & G Space Change'
SpaceChange.label = 'R & G Space Change'
mathMultiplyRNode = node_tree.nodes.new(SHADER_NODE_MATH)
mathMultiplyRNode.parent = SpaceChange
mathMultiplyRNode.operation = 'MULTIPLY'
mathMultiplyRNode.inputs[1].default_value = 2
mathMultiplyRNode.location = invertGNode.location + Vector((250, -100))
mathMultiplyGNode = node_tree.nodes.new(SHADER_NODE_MATH)
mathMultiplyGNode.parent = SpaceChange
mathMultiplyGNode.operation = 'MULTIPLY'
mathMultiplyGNode.inputs[1].default_value = 2
mathMultiplyGNode.location = invertGNode.location + Vector((250, -250))
mathSubstractRNode = node_tree.nodes.new(SHADER_NODE_MATH)
mathSubstractRNode.parent = SpaceChange
mathSubstractRNode.operation = 'SUBTRACT'
mathSubstractRNode.inputs[1].default_value = 1
mathSubstractRNode.location = mathMultiplyRNode.location + Vector((200, 0))
mathSubstractGNode = node_tree.nodes.new(SHADER_NODE_MATH)
mathSubstractGNode.parent = SpaceChange
mathSubstractGNode.operation = 'SUBTRACT'
mathSubstractGNode.inputs[1].default_value = 1
mathSubstractGNode.location = mathMultiplyGNode.location + Vector((200, 0))
BCalc = node_tree.nodes.new(NODE_FRAME)
BCalc.name = 'B Calc'
BCalc.label = 'B Calc'
mathPowerRNode = node_tree.nodes.new(SHADER_NODE_MATH)
mathPowerRNode.parent = BCalc
mathPowerRNode.operation = 'POWER'
mathPowerRNode.inputs[1].default_value = 2
mathPowerRNode.location = mathSubstractRNode.location + Vector((200, 0))
mathPowerGNode = node_tree.nodes.new(SHADER_NODE_MATH)
mathPowerGNode.parent = BCalc
mathPowerGNode.operation = 'POWER'
mathPowerGNode.inputs[1].default_value = 2
mathPowerGNode.location = mathSubstractGNode.location + Vector((200, 0))
mathAddNode = node_tree.nodes.new(SHADER_NODE_MATH)
mathAddNode.parent = BCalc
mathAddNode.operation = 'ADD'
mathAddNode.location = mathPowerGNode.location + Vector((200, 60))
mathSubtractNode = node_tree.nodes.new(SHADER_NODE_MATH)
mathSubtractNode.parent = BCalc
mathSubtractNode.operation = 'SUBTRACT'
mathSubtractNode.inputs[0].default_value = 1
mathSubtractNode.location = mathAddNode.location + Vector((200, 0))
mathRootNode = node_tree.nodes.new(SHADER_NODE_MATH)
mathRootNode.parent = BCalc
mathRootNode.operation = 'POWER'
mathRootNode.inputs[1].default_value = .5
mathRootNode.location = mathSubtractNode.location + Vector((200, 0))
combineRgbNode = node_tree.nodes.new(SHADER_NODE_COMBINE_RGB)
combineRgbNode.location = mathRootNode.location + Vector((200, 230))
# Input/Output
group_inputs = node_tree.nodes.new(NODE_GROUP_INPUT)
group_inputs.location = separateRgbNode.location + Vector ((-200, -100))
group_outputs = node_tree.nodes.new(NODE_GROUP_OUTPUT)
group_outputs.location = combineRgbNode.location + Vector ((200, 0))
#group_inputs.inputs.new(NODE_SOCKET_SHADER,'Shader')
input_color = node_tree.inputs.new(NODE_SOCKET_COLOR,'Color')
input_color.default_value = (.5, .5, .5, 1)
input_alpha = node_tree.inputs.new(NODE_SOCKET_COLOR,'Alpha')
input_alpha.default_value = (.5, .5, .5, 1)
output_value = node_tree.outputs.new(NODE_SOCKET_COLOR,'Normal')
#Links Input
links = node_tree.links
links.new(group_inputs.outputs['Color'], separateRgbNode.inputs['Image'])
links.new(group_inputs.outputs['Alpha'], invertGNode.inputs['Color'])
links.new(separateRgbNode.outputs['R'], invertRNode.inputs['Color'])
links.new(invertRNode.outputs['Color'], mathMultiplyRNode.inputs[0])
links.new(invertGNode.outputs['Color'], mathMultiplyGNode.inputs[0])
links.new(mathMultiplyRNode.outputs[0], mathSubstractRNode.inputs[0])
links.new(mathMultiplyGNode.outputs[0], mathSubstractGNode.inputs[0])
links.new(mathSubstractRNode.outputs[0], mathPowerRNode.inputs[0])
links.new(mathSubstractGNode.outputs[0], mathPowerGNode.inputs[0])
links.new(mathPowerRNode.outputs['Value'], mathAddNode.inputs[0])
links.new(mathPowerGNode.outputs['Value'], mathAddNode.inputs[1])
links.new(mathAddNode.outputs['Value'], mathSubtractNode.inputs[1])
links.new(mathSubtractNode.outputs['Value'], mathRootNode.inputs[0])
links.new(invertRNode.outputs['Color'], combineRgbNode.inputs['R'])
links.new(invertGNode.outputs['Color'], combineRgbNode.inputs['G'])
links.new(mathRootNode.outputs['Value'], combineRgbNode.inputs['B'])
links.new(combineRgbNode.outputs['Image'], group_outputs.inputs['Normal'])
return node_tree
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.