blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7518016cfc9c6146f9aeac73199a889b77b08bb5
|
cf9ba2393abe81c173dc1b822f03aa9eb05b0a61
|
/Topsisz.py
|
877ef6592c6ea970b1f70875ab4a92251ea29645
|
[
"MIT"
] |
permissive
|
zabhitak/TOPSIS-Abhinav-101803706
|
5b2066b47109608f9ba759713f1e14fa45fdf151
|
08b3a4b9887ba30027284f60ccc134bcbbb28d4f
|
refs/heads/master
| 2023-01-07T01:48:02.911109
| 2020-11-12T05:17:11
| 2020-11-12T05:17:11
| 311,130,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,297
|
py
|
import sys
import pandas as pd
import numpy as np
import os
class myexception(Exception):
pass
def normalised_mat(filename,weights,impact):
dataset = pd.read_csv(filename)
column_values = dataset.iloc[:,1:].values
if (dataset.shape[1]<3):
raise myexception("Input file must contain three or more columns")
if (len(weights)!=len(impact)!=len(column_values[0][:])):
raise myexception("Number of weights, number of impacts and number of columns (from 2nd to last columns) must be same")
is_int = dataset.apply(lambda s: pd.to_numeric(s, errors='coerce').notnull().all())
for i in range(1,len(is_int)):
if is_int[i]!=True:
raise myexception("For the Given Dataset from column2 all values must be numeric")
sum_columns = np.zeros(len(column_values[0]),dtype=float)
for i in range (len(column_values)):
for j in range (len(column_values[i])):
sum_columns[j] += np.square(column_values[i][j])
for i in range(len(sum_columns)):
sum_columns[i]=np.sqrt(sum_columns[i])
for i in range(len(column_values)):
for j in range(len(column_values[i])):
column_values[i][j]=column_values[i][j]/sum_columns[j]
return (column_values)
def weight_assign(column_values,weights):
weights=weights.split(',')
sum_weights = 0
sum_weights = sum(map(float,weights))
for i in range(len(weights)):
weights[i]=float(weights[i])/sum_weights
weighted_column_values=[]
for i in range(len(column_values)):
temp=[]
for j in range(len(column_values[i])):
temp.append(column_values[i][j]*weights[j])
weighted_column_values.append(temp)
return(weighted_column_values)
def performance_score(weighted_column,impacts):
q =weighted_column
q = np.array(q)
q[:,0]
Vjpositive=np.zeros(len(weighted_column[0]),dtype=float)
Vjnegative=np.zeros(len(weighted_column[0]),dtype=float)
for i in range(len(weighted_column[0])):
if impacts[i]=='+':
Vjpositive[i]=max(q[:,i])
Vjnegative[i]=min(q[:,i])
elif impacts[i]=='-':
Vjpositive[i]=min(q[:,i])
Vjnegative[i]=max(q[:,i])
Sjpositive=np.zeros(len(weighted_column),dtype=float)
Sjnegative=np.zeros(len(weighted_column),dtype=float)
for i in range(len(weighted_column)):
for j in range(len(weighted_column[i])):
Sjpositive[i]+=np.square(weighted_column[i][j]-Vjpositive[j])
Sjnegative[i]+=np.square(weighted_column[i][j]-Vjnegative[j])
for i in range(len(Sjpositive)):
Sjpositive[i]=np.sqrt(Sjpositive[i])
Sjnegative[i]=np.sqrt(Sjnegative[i])
Performance_score=[0]*len(weighted_column)
for i in range(len(weighted_column)):
Performance_score[i]=Sjnegative[i]/(Sjnegative[i]+Sjpositive[i])
return(Performance_score)
def adding_data(score,filename):
df = pd.read_csv(filename)
df = df.assign(Topsis_Score=score)
df['Rank'] = df['Topsis_Score'].rank(method='max',ascending=False)
return (df)
def main():
if len(sys.argv)!=5:
raise myexception("You have missed an input file..,Use : python topsis.py inputfile.csv “1,1,1,2” “+,+,-,+” result.csv")
path=os.getcwd()
filename = os.path.join(path,sys.argv[1])
if not os.path.exists(filename):
raise myexception("file does not exists")
weights = sys.argv[2]
impacts = sys.argv[3]
result = sys.argv[4]
impacts=impacts.split(',')
for i in range(0,len(impacts)):
if impacts[i]=='-':
continue
elif impacts[i]=='+':
continue
else:
raise myexception("Impacts must be either +ve or -ve.")
data_table = normalised_mat(filename,weights,impacts) # normalised the matrix
wt_given = weight_assign(data_table,weights) # mutiply each value with their respective weights
Performance = performance_score(wt_given,impacts) # calculating performace score for given weight and impact
new_dataset = adding_data(Performance,filename) # now adding performace_score and rank to the csv
new_dataset.to_csv(result)
if __name__=='__main__':
main()
|
[
"zabhitak1292000@gmail.com"
] |
zabhitak1292000@gmail.com
|
5a71efd94cdeda2ade61ec8e122438f31aa20dbc
|
db7b7ec5177533020c028b717166d0f446b983ce
|
/py_lab2/my_pkg/bin_conv.py
|
018dabf37cadc5844053544d2bd51ce538d02ef7
|
[] |
no_license
|
BREG001/osp_repo_sub
|
63c97dc54eac778bd841f163df9bd367df451ed1
|
0588f41ccffc0b2aab0fb0cba79ca87586cef3a9
|
refs/heads/master
| 2021-05-18T15:08:35.890106
| 2020-04-27T16:07:44
| 2020-04-27T16:07:44
| 251,288,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
#!/usr/bin/python3
def bin_conv():
bin = int(input("input binary number:"))
bin_ = bin
temp = 0
oct = 0
i = 0
asdf = 0
while (bin_ >= 1):
for j in range(0, 3):
temp += (bin_ % 10) * pow(2, j)
bin_ = int(bin_ / 10)
oct += (temp * pow(10, i))
i += 1
temp = 0
print("=> OCT> %d" % oct)
bin_ = bin
dec = 0
i = 0
while (bin_ >= 1):
dec += (bin_ % 10) * pow(2, i)
bin_ = int(bin_ / 10)
i += 1
print("=> DEC> %d" % dec)
bin_ = bin
hex = ""
i = 0
while (bin_ >= 1):
for j in range(0, 4):
temp += (bin_ % 10) * pow(2, j)
bin_ = int(bin_ / 10)
if (temp < 10):
hex = chr(temp + ord("0")) + hex
else:
hex = chr(temp - 10 + ord("A")) + hex
temp = 0
i += 1
print("=> HEX>",hex)
if __name__=='__main__':
bin_conv()
|
[
"ws4368@naver.com"
] |
ws4368@naver.com
|
b8f7783476de85f6a2b7ad67a79e0767980ce466
|
e3afb5c1df8ecaac3555d0318aa111b34cd7724a
|
/RadixSort.py
|
ba27c45d3dd88281141d5c7da564e675b96ef24b
|
[] |
no_license
|
Avinash18046/Sorting-Python
|
f71683a7af34dca40c06198abd7588fbcb57937d
|
5f25adc1d2fbd84f6696e1bce290de625f028723
|
refs/heads/main
| 2023-08-02T10:43:35.572685
| 2021-10-02T09:42:23
| 2021-10-02T09:42:23
| 412,750,185
| 1
| 0
| null | 2021-10-02T09:28:40
| 2021-10-02T09:28:40
| null |
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
def countingSort(arr, exp1):
n = len(arr)
# The output array elements that will have sorted arr
output = [0] * (n)
# initialize count array as 0
count = [0] * (10)
# Store count of occurrences in count[]
for i in range(0, n):
index = (arr[i]/exp1)
count[int((index)%10)] += 1
# Change count[i] so that count[i] now contains actual
# position of this digit in output array
for i in range(1,10):
count[i] += count[i-1]
# Build the output array
i = n-1
while i>=0:
index = (arr[i]/exp1)
output[ count[ int((index)%10) ] - 1] = arr[i]
count[int((index)%10)] -= 1
i -= 1
# Copying the output array to arr[],
# so that arr now contains sorted numbers
i = 0
for i in range(0,len(arr)):
arr[i] = output[i]
# Method to do Radix Sort
def radixSort(arr):
# Find the maximum number to know number of digits
max1 = max(arr)
# Do counting sort for every digit. Note that instead
# of passing digit number, exp is passed. exp is 10^i
# where i is current digit number
exp = 1
while max1/exp > 0:
countingSort(arr,exp)
exp *= 10
# Driver code to test above
arr = [ 170, 45, 75, 90, 802, 24, 2, 66]
radixSort(arr)
for i in range(len(arr)):
print(arr[i]),
|
[
"noreply@github.com"
] |
noreply@github.com
|
877389eadf5431f86cce9536338e7780b5b6f092
|
090324db0c04d8c30ad6688547cfea47858bf3af
|
/tests/test_sokorule.py
|
d0e02c22a7085d11f93f4eebbaa8548dce508f8b
|
[] |
no_license
|
fidlej/sokobot
|
b82c4c36d73e224d0d0e1635021ca04485da589e
|
d3d04753a5043e6a22dafd132fa633d8bc66b9ea
|
refs/heads/master
| 2021-01-21T13:14:29.523501
| 2011-06-12T07:34:14
| 2011-06-12T07:34:14
| 32,650,745
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,035
|
py
|
from nose.tools import assert_equal
from soko.struct.rules.sokorule import PushRule, SokobanGoalRule
from soko.struct import modeling
def test_get_children():
rule = PushRule()
s = (
"# #",
"#@. #",
" $ #",
" #",
"#####",
)
used_cells = set()
children = rule.get_children(s, used_cells)
assert_equal(3, len(children))
_assert_contains(children,
("# #",
"# + #",
" $ #",
" #",
"#####",
))
_assert_contains(children,
("#@ #",
"# . #",
" $ #",
" #",
"#####",
))
_assert_contains(children,
("# #",
"# . #",
" @ #",
" $ #",
"#####",
))
used_s = modeling.mutablize(s)
for pos in used_cells:
x, y = pos
used_s[y][x] = "!"
assert_equal(modeling.immutablize(
(
"#! #",
"!!! #",
" ! #",
" ! #",
"#####",
)), modeling.immutablize(used_s))
def test_get_children_from_end_state():
s = modeling.immutablize("""\
#$ #
@ .#
#
#
#####""".splitlines())
rule = PushRule()
used_cells = set()
children = rule.get_children(s, used_cells)
assert_equal(None, children)
assert_equal(set(), used_cells)
def test_is_goaling():
rule = SokobanGoalRule()
s = (
"# #",
"# .#",
" $#",
" @#",
"#####",
)
next_s = (
"# #",
"# *#",
" @#",
" #",
"#####",
)
assert_equal(True, rule.is_goaling(s, next_s))
assert_equal(False, rule.is_goaling(next_s, s))
assert_equal(False, rule.is_goaling(next_s, next_s))
def _assert_contains(childern, s):
s = modeling.immutablize(s)
assert s in childern
|
[
"ivo@danihelka.net"
] |
ivo@danihelka.net
|
b2a469d2a6e34aba032faf3f8067fe1c2bbcc24c
|
8573030aa0d57dae152041c05dbedc6de0c187c9
|
/bluetooth_serial_test.py
|
3614100769033bcaad02259bc74f0275a2268d6a
|
[] |
no_license
|
aradicaldreamer/Singing_Tree
|
a691ae8b0519761e669710bcea859fb5597282da
|
d27e04251e455efc4067875db266ad3451d145c9
|
refs/heads/master
| 2021-09-11T13:31:12.981443
| 2018-04-08T00:02:15
| 2018-04-08T00:02:15
| 112,475,605
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
#! /usr/bin/python
import serial
#import pyfirmata
from time import sleep
bluetoothSerial = serial.Serial("/dev/rfcomm0", baudrate=115200)
from pyfirmata import Arduino, util
board = Arduino('/dev/rfcomm0')
board.digital[4].write(255)
|
[
"31594961+aradicaldreamer@users.noreply.github.com"
] |
31594961+aradicaldreamer@users.noreply.github.com
|
730187e0dca39b4ce449a65b80b8caee1e53e2d0
|
264b75d3e5862022b4d5a27f062b684e3a85b5be
|
/car_dealer/serializers.py
|
941316cda8f26a97a731d9f69a92ee1158083f69
|
[] |
no_license
|
greatday4april/database-project
|
9a7c0ad5d6aefd10ecaab78ba8b61a0abadee55f
|
b449d8de4fd38655fd600888c1aa460b74659940
|
refs/heads/master
| 2023-04-18T16:11:00.237546
| 2021-05-15T20:05:50
| 2021-05-15T20:05:50
| 367,564,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,345
|
py
|
from typing import DefaultDict
from rest_framework import serializers
from car_dealer.models import Customer, PurchaseBill, Service, ServiceAppointment, ServicePackage, ServicePerformed
import datetime
class CustomerSerializer(serializers.ModelSerializer):
class Meta:
model = Customer
fields = '__all__'
class PurchaseBillSerializer(serializers.ModelSerializer):
class Meta:
model = PurchaseBill
fields = '__all__'
class ServiceItemSerializer(serializers.ModelSerializer):
class Meta:
model = ServicePerformed
fields = '__all__'
class ServiceAppointmentSerializer(serializers.ModelSerializer):
estimated_time = serializers.SerializerMethodField()
line_items = serializers.SerializerMethodField()
total_cost = serializers.SerializerMethodField()
def get_total_cost(self, appt: ServiceAppointment):
return '${}'.format(sum(line_item['cost'] for line_item in self.get_line_items_impl(appt)))
def get_line_items(self, appt: ServiceAppointment):
line_items = self.get_line_items_impl(appt)
new_line_items = []
for value in line_items:
new_line_items.append(value)
new_line_items[-1]['labor_time'] = str(value['labor_time'])
return new_line_items
def get_line_items_impl(self, appt: ServiceAppointment):
names = appt.service_package.service_names
services = Service.objects.filter(name__in=names).all()
services_performed = ServicePerformed.objects.filter(appt=appt)
line_items = []
for service in services:
line_items.append({
"item": service.name,
"type": str(service.type),
"labor_time": service.labor_time,
"cost": service.cost
})
for service_performed in services_performed:
service = service_performed.service
line_items.append({
"item": service.name,
"type": str(service.type),
"labor_time": service.labor_time,
"cost": service.cost
})
return line_items
def get_estimated_time(self, appt: ServiceAppointment):
labor_times = [
line_item['labor_time'] for line_item in self.get_line_items_impl(appt)
]
delta = datetime.timedelta()
for labor_time in labor_times:
delta += labor_time
return str(delta)
class Meta:
model = ServiceAppointment
fields = '__all__'
class SaleStatsSerializer(serializers.Serializer):
stats = serializers.SerializerMethodField(read_only=True)
begin_date = serializers.DateField(write_only=True)
end_date = serializers.DateField(write_only=True)
def get_stats(self, bills):
numbers = DefaultDict(int)
profit = DefaultDict(float)
for bill in bills:
vehicle = '{} {} {}'.format(bill.vin.year, bill.vin.make, bill.vin.model)
bill: PurchaseBill = bill
profit[vehicle] += bill.price
numbers[vehicle] += 1
stats = []
for vin, value in profit.items():
stats.append({
'vehicle': vin,
'profit': value,
'sale_number': numbers[vin]
})
return stats
|
[
"greatday4april@gmail.com"
] |
greatday4april@gmail.com
|
c94a6cceb87bc3039ad2a7bc7a6bb86363be9e9f
|
29e43a0ff67fff987bfe9fb51b85ab0f60cf0ff9
|
/inward/apps.py
|
165441933d1ec467ddd331f06c172fe449c6f92c
|
[
"MIT"
] |
permissive
|
kwabena-aboah/dms-efile
|
5c42b9016d4416f7af6c701272e1bee59c093fcf
|
0e9fe6b059dcb441dac29d7b2710f66e8f4d855c
|
refs/heads/master
| 2022-12-10T05:12:56.038469
| 2022-02-19T18:37:10
| 2022-02-19T18:37:10
| 179,473,534
| 0
| 0
|
MIT
| 2022-12-08T01:43:42
| 2019-04-04T10:12:20
|
Python
|
UTF-8
|
Python
| false
| false
| 87
|
py
|
from django.apps import AppConfig
class InwardConfig(AppConfig):
name = 'inward'
|
[
"obed30mintah@yahoo.com"
] |
obed30mintah@yahoo.com
|
6b9a1e5837577c0b9ee09f0991941d0b6c85a2b7
|
b436ac4d46fb9a8019948fde91c2590d49c86c4d
|
/protein-translation/protein_translation.py
|
7c33c784516594e08fe6a888fb97756968ba1c45
|
[] |
no_license
|
SirObi/exercism-python
|
c3fa4a3a3b28d6e1bd342101603515bb233eebfc
|
02b1b6e46e08211d6885a2b192f05bec27ac3605
|
refs/heads/master
| 2021-06-11T01:38:44.365160
| 2019-07-06T18:56:27
| 2019-07-06T18:56:27
| 167,589,338
| 0
| 0
| null | 2021-04-20T17:56:27
| 2019-01-25T17:52:21
|
Python
|
UTF-8
|
Python
| false
| false
| 854
|
py
|
# Codons are related - this is best represented as a tree/graph (simplified here)
MAP = {
"AU": "Methionine",
"UA": "Tyrosine",
"UC": "Serine",
"UG": {
"G": "Tryptophan",
"U": "Cysteine",
"C": "Cysteine"
},
"UU": {
"U": "Phenylalanine",
"C": "Phenylalanine",
"A": "Leucine",
"G": "Leucine"
}
}
STOP_CODON = "UAA, UAG, UGA"
def proteins(strand):
'''Traverses a tree of codons and returns aminoacid name'''
translation = []
next_codon = zip(*[iter(strand)] * 3)
for codon in next_codon:
codon = ''.join(codon)
if codon in STOP_CODON:
return translation
protein = MAP[codon[0:2]]
protein = protein[codon[2]] if isinstance(protein, dict) else protein
translation.append(protein)
return translation
|
[
"obi.orciuch@onfido.com"
] |
obi.orciuch@onfido.com
|
27ab1bab32d6bdc3f021bf7ca29e7d2821c32dac
|
4dc4f06a2c3bdff4dabb8e27434dcd42479f1b4a
|
/coursera/functions.py
|
6f9f8a99cabe50208f6a2cfacc4f46492a15cd57
|
[] |
no_license
|
fgokdata/exercises-python
|
4a259eab02e8b64ee5a2ef3382f33a111a3fbcaf
|
51689a89829f37c6a6646d009df32fde7f80ab9b
|
refs/heads/master
| 2023-03-16T04:48:35.734158
| 2021-03-07T20:59:14
| 2021-03-07T20:59:14
| 325,111,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
def printAll(*args): # All the arguments are 'packed' into args which can be treated like a tuple
print("No of arguments:", len(args))
for argument in args:
print(argument)
#printAll with 3 arguments
printAll('Horsefeather','Adonis','Bone')
#printAll with 4 arguments
printAll('Sidecar','Long Island','Mudslide','Carriage')
##############
def printDictionary(**args):
for key in args:
print(key + " : " + args[key])
printDictionary(Country='Canada',Province='Ontario',City='Toronto')
|
[
"fgokdata@gmail.com"
] |
fgokdata@gmail.com
|
24cf381ade836b3bbdf9c48d26ed4710c70d97f3
|
1d4a0cf3d970d9e1c81f9395a59088768e1b179e
|
/LeanEuler/CleanTaxParser/CleanTaxParser.py
|
03dd4e93b1057f210280af61507905b64cebe98f
|
[
"Apache-2.0"
] |
permissive
|
idaks/LeanEuler
|
a3577bceab736ccc1f105f4c29b432b77ea0a379
|
a8fba6c08e25b1cd293dac9013256354cd8a95e2
|
refs/heads/master
| 2021-07-01T17:59:08.227590
| 2019-04-23T15:14:06
| 2019-04-23T15:14:06
| 118,785,114
| 1
| 0
|
Apache-2.0
| 2019-03-13T07:09:56
| 2018-01-24T15:38:16
|
Python
|
UTF-8
|
Python
| false
| false
| 19,224
|
py
|
# Generated from CleanTax.g4 by ANTLR 4.7.1
# encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\16")
buf.write("N\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b")
buf.write("\t\b\4\t\t\t\3\2\6\2\24\n\2\r\2\16\2\25\3\2\3\2\3\3\3")
buf.write("\3\6\3\34\n\3\r\3\16\3\35\3\3\7\3!\n\3\f\3\16\3$\13\3")
buf.write("\3\4\3\4\6\4(\n\4\r\4\16\4)\3\4\3\4\3\5\3\5\6\5\60\n\5")
buf.write("\r\5\16\5\61\3\5\7\5\65\n\5\f\5\16\58\13\5\3\6\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\7\3\7\5\7B\n\7\3\b\3\b\3\t\3\t\6\tH\n\t")
buf.write("\r\t\16\tI\3\t\3\t\3\t\2\2\n\2\4\6\b\n\f\16\20\2\3\3\2")
buf.write("\6\7\2M\2\23\3\2\2\2\4\31\3\2\2\2\6%\3\2\2\2\b-\3\2\2")
buf.write("\2\n9\3\2\2\2\fA\3\2\2\2\16C\3\2\2\2\20E\3\2\2\2\22\24")
buf.write("\5\4\3\2\23\22\3\2\2\2\24\25\3\2\2\2\25\23\3\2\2\2\25")
buf.write("\26\3\2\2\2\26\27\3\2\2\2\27\30\5\b\5\2\30\3\3\2\2\2\31")
buf.write("\33\7\3\2\2\32\34\7\r\2\2\33\32\3\2\2\2\34\35\3\2\2\2")
buf.write("\35\33\3\2\2\2\35\36\3\2\2\2\36\"\3\2\2\2\37!\5\6\4\2")
buf.write(" \37\3\2\2\2!$\3\2\2\2\" \3\2\2\2\"#\3\2\2\2#\5\3\2\2")
buf.write("\2$\"\3\2\2\2%\'\7\4\2\2&(\7\r\2\2\'&\3\2\2\2()\3\2\2")
buf.write("\2)\'\3\2\2\2)*\3\2\2\2*+\3\2\2\2+,\7\5\2\2,\7\3\2\2\2")
buf.write("-/\t\2\2\2.\60\7\r\2\2/.\3\2\2\2\60\61\3\2\2\2\61/\3\2")
buf.write("\2\2\61\62\3\2\2\2\62\66\3\2\2\2\63\65\5\n\6\2\64\63\3")
buf.write("\2\2\2\658\3\2\2\2\66\64\3\2\2\2\66\67\3\2\2\2\67\t\3")
buf.write("\2\2\28\66\3\2\2\29:\7\b\2\2:;\7\r\2\2;<\5\f\7\2<=\7\r")
buf.write("\2\2=>\7\t\2\2>\13\3\2\2\2?B\5\16\b\2@B\5\20\t\2A?\3\2")
buf.write("\2\2A@\3\2\2\2B\r\3\2\2\2CD\7\f\2\2D\17\3\2\2\2EG\7\n")
buf.write("\2\2FH\7\f\2\2GF\3\2\2\2HI\3\2\2\2IG\3\2\2\2IJ\3\2\2\2")
buf.write("JK\3\2\2\2KL\7\13\2\2L\21\3\2\2\2\n\25\35\")\61\66AI")
return buf.getvalue()
class CleanTaxParser ( Parser ):
grammarFileName = "CleanTax.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'taxonomy'", "'('", "')'", "'articulation'",
"'articulations'", "'['", "']'", "'{'", "'}'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "RCC_BASIC_5", "TEXT", "WHITESPACE" ]
RULE_ct_input = 0
RULE_tax_desc = 1
RULE_tax_sub_desc = 2
RULE_articulations_desc = 3
RULE_articulation = 4
RULE_relation = 5
RULE_rcc5_rel = 6
RULE_rcc32_rel = 7
ruleNames = [ "ct_input", "tax_desc", "tax_sub_desc", "articulations_desc",
"articulation", "relation", "rcc5_rel", "rcc32_rel" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
RCC_BASIC_5=10
TEXT=11
WHITESPACE=12
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class Ct_inputContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def articulations_desc(self):
return self.getTypedRuleContext(CleanTaxParser.Articulations_descContext,0)
def tax_desc(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CleanTaxParser.Tax_descContext)
else:
return self.getTypedRuleContext(CleanTaxParser.Tax_descContext,i)
def getRuleIndex(self):
return CleanTaxParser.RULE_ct_input
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCt_input" ):
listener.enterCt_input(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCt_input" ):
listener.exitCt_input(self)
def ct_input(self):
localctx = CleanTaxParser.Ct_inputContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_ct_input)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 17
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 16
self.tax_desc()
self.state = 19
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CleanTaxParser.T__0):
break
self.state = 21
self.articulations_desc()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tax_descContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TEXT(self, i:int=None):
if i is None:
return self.getTokens(CleanTaxParser.TEXT)
else:
return self.getToken(CleanTaxParser.TEXT, i)
def tax_sub_desc(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CleanTaxParser.Tax_sub_descContext)
else:
return self.getTypedRuleContext(CleanTaxParser.Tax_sub_descContext,i)
def getRuleIndex(self):
return CleanTaxParser.RULE_tax_desc
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTax_desc" ):
listener.enterTax_desc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTax_desc" ):
listener.exitTax_desc(self)
def tax_desc(self):
localctx = CleanTaxParser.Tax_descContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_tax_desc)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 23
self.match(CleanTaxParser.T__0)
self.state = 25
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 24
self.match(CleanTaxParser.TEXT)
self.state = 27
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CleanTaxParser.TEXT):
break
self.state = 32
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CleanTaxParser.T__1:
self.state = 29
self.tax_sub_desc()
self.state = 34
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tax_sub_descContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TEXT(self, i:int=None):
if i is None:
return self.getTokens(CleanTaxParser.TEXT)
else:
return self.getToken(CleanTaxParser.TEXT, i)
def getRuleIndex(self):
return CleanTaxParser.RULE_tax_sub_desc
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTax_sub_desc" ):
listener.enterTax_sub_desc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTax_sub_desc" ):
listener.exitTax_sub_desc(self)
def tax_sub_desc(self):
localctx = CleanTaxParser.Tax_sub_descContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_tax_sub_desc)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 35
self.match(CleanTaxParser.T__1)
self.state = 37
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 36
self.match(CleanTaxParser.TEXT)
self.state = 39
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CleanTaxParser.TEXT):
break
self.state = 41
self.match(CleanTaxParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Articulations_descContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TEXT(self, i:int=None):
if i is None:
return self.getTokens(CleanTaxParser.TEXT)
else:
return self.getToken(CleanTaxParser.TEXT, i)
def articulation(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CleanTaxParser.ArticulationContext)
else:
return self.getTypedRuleContext(CleanTaxParser.ArticulationContext,i)
def getRuleIndex(self):
return CleanTaxParser.RULE_articulations_desc
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArticulations_desc" ):
listener.enterArticulations_desc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArticulations_desc" ):
listener.exitArticulations_desc(self)
def articulations_desc(self):
localctx = CleanTaxParser.Articulations_descContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_articulations_desc)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 43
_la = self._input.LA(1)
if not(_la==CleanTaxParser.T__3 or _la==CleanTaxParser.T__4):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 45
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 44
self.match(CleanTaxParser.TEXT)
self.state = 47
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CleanTaxParser.TEXT):
break
self.state = 52
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CleanTaxParser.T__5:
self.state = 49
self.articulation()
self.state = 54
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArticulationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TEXT(self, i:int=None):
if i is None:
return self.getTokens(CleanTaxParser.TEXT)
else:
return self.getToken(CleanTaxParser.TEXT, i)
def relation(self):
return self.getTypedRuleContext(CleanTaxParser.RelationContext,0)
def getRuleIndex(self):
return CleanTaxParser.RULE_articulation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArticulation" ):
listener.enterArticulation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArticulation" ):
listener.exitArticulation(self)
def articulation(self):
localctx = CleanTaxParser.ArticulationContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_articulation)
try:
self.enterOuterAlt(localctx, 1)
self.state = 55
self.match(CleanTaxParser.T__5)
self.state = 56
self.match(CleanTaxParser.TEXT)
self.state = 57
self.relation()
self.state = 58
self.match(CleanTaxParser.TEXT)
self.state = 59
self.match(CleanTaxParser.T__6)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RelationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def rcc5_rel(self):
return self.getTypedRuleContext(CleanTaxParser.Rcc5_relContext,0)
def rcc32_rel(self):
return self.getTypedRuleContext(CleanTaxParser.Rcc32_relContext,0)
def getRuleIndex(self):
return CleanTaxParser.RULE_relation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRelation" ):
listener.enterRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRelation" ):
listener.exitRelation(self)
def relation(self):
localctx = CleanTaxParser.RelationContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_relation)
try:
self.state = 63
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [CleanTaxParser.RCC_BASIC_5]:
self.enterOuterAlt(localctx, 1)
self.state = 61
self.rcc5_rel()
pass
elif token in [CleanTaxParser.T__7]:
self.enterOuterAlt(localctx, 2)
self.state = 62
self.rcc32_rel()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Rcc5_relContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RCC_BASIC_5(self):
return self.getToken(CleanTaxParser.RCC_BASIC_5, 0)
def getRuleIndex(self):
return CleanTaxParser.RULE_rcc5_rel
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRcc5_rel" ):
listener.enterRcc5_rel(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRcc5_rel" ):
listener.exitRcc5_rel(self)
def rcc5_rel(self):
localctx = CleanTaxParser.Rcc5_relContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_rcc5_rel)
try:
self.enterOuterAlt(localctx, 1)
self.state = 65
self.match(CleanTaxParser.RCC_BASIC_5)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Rcc32_relContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RCC_BASIC_5(self, i:int=None):
if i is None:
return self.getTokens(CleanTaxParser.RCC_BASIC_5)
else:
return self.getToken(CleanTaxParser.RCC_BASIC_5, i)
def getRuleIndex(self):
return CleanTaxParser.RULE_rcc32_rel
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRcc32_rel" ):
listener.enterRcc32_rel(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRcc32_rel" ):
listener.exitRcc32_rel(self)
def rcc32_rel(self):
localctx = CleanTaxParser.Rcc32_relContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_rcc32_rel)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 67
self.match(CleanTaxParser.T__7)
self.state = 69
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 68
self.match(CleanTaxParser.RCC_BASIC_5)
self.state = 71
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CleanTaxParser.RCC_BASIC_5):
break
self.state = 73
self.match(CleanTaxParser.T__8)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
|
[
"sahil1105@hotmail.com"
] |
sahil1105@hotmail.com
|
f370e68f5f151f81a8bdc822000422bb3a00eb2f
|
20f951bd927e4e5cde8ef7781813fcf0d51cc3ea
|
/fossir/modules/events/payment/testing/fixtures.py
|
cd8ec800a3c8878de5cf225e634d8c0ddd435ece
|
[] |
no_license
|
HodardCodeclub/SoftwareDevelopment
|
60a0fbab045cb1802925d4dd5012d5b030c272e0
|
6300f2fae830c0c2c73fe0afd9c684383bce63e5
|
refs/heads/master
| 2021-01-20T00:30:02.800383
| 2018-04-27T09:28:25
| 2018-04-27T09:28:25
| 101,277,325
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
import pytest
from fossir.modules.events.payment.models.transactions import PaymentTransaction, TransactionStatus
@pytest.fixture
def create_transaction():
"""Returns a callable which lets you create transactions"""
def _create_transaction(status, **params):
params.setdefault('amount', 10)
params.setdefault('currency', 'USD')
params.setdefault('provider', '_manual')
params.setdefault('data', {})
return PaymentTransaction(status=status, **params)
return _create_transaction
@pytest.fixture
def dummy_transaction(create_transaction):
"""Gives you a dummy successful transaction"""
return create_transaction(status=TransactionStatus.successful)
|
[
"hodardhazwinayo@gmail.com"
] |
hodardhazwinayo@gmail.com
|
bcd0cf676cbc9d4bdeeca3cf67c6ecefb10024d2
|
4f12100a5f1d99a67e2119724b4f23bd54e4df3b
|
/sistemaDePersonasWS/sistemaDePersonasWS/sistemaDePersonasWS/wsgi.py
|
06543e622952e5c6f424bb3010f5f01e611bf982
|
[] |
no_license
|
javierperini/SistemaDePersonas
|
d6adc21528fd057a1a6580008589b146f27446f0
|
65537e03dd088117901eb470e7a1dbf3046c2bcf
|
refs/heads/master
| 2020-03-16T13:06:46.727122
| 2018-05-10T03:37:16
| 2018-05-13T23:41:57
| 132,681,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
"""
WSGI config for sistemaDePersonasWS project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sistemaDePersonasWS.settings")
application = get_wsgi_application()
|
[
"javierperini90@gmail.com"
] |
javierperini90@gmail.com
|
a8f79e485a62acd559aacca26284916a6d7816cc
|
30575ea2c63b40f5c578acc3dd3aa306c539f0b8
|
/dialog/schema/factories/conditions/results_count.py
|
e13a9bbe9dd13e2689486c3cb6a17fd639f0ae58
|
[] |
no_license
|
robdefeo/dialog
|
87b8cff9b48fd6980ab7ad167823de38a40c91bb
|
1db84ca439d226e9d9222d76d825b6c92f56edd8
|
refs/heads/master
| 2021-05-30T11:39:58.053586
| 2016-02-07T22:22:55
| 2016-02-07T22:22:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
from dialog.elements import Condition
from dialog.schema.factories.variables import NAME_RESULTS_COUNT
class ResultsCountConditions:
@staticmethod
def equals_zero():
return Condition(name=NAME_RESULTS_COUNT, operator="EQUALS", root_text="0")
@staticmethod
def less_than(value):
return Condition(name=NAME_RESULTS_COUNT, operator="LESS_THEN", root_text=value)
# @staticmethod
# def has_value():
# return Condition(name="Color_Preference", operator="HAS_VALUE")
|
[
"robertodefeo@hotmail.com"
] |
robertodefeo@hotmail.com
|
8b4a81d4fc267ceeff9bff7fb3ff4286888a5000
|
c5c7542a3b532740e20495aed22b91d58a66e576
|
/hbaselines/goal_conditioned/sac.py
|
342b00f6a291b006d697ef5e1df87914b7c73b52
|
[
"MIT"
] |
permissive
|
jesbu1/h-baselines
|
964c80d1bb54bcd070de8ba55f940bc836078df1
|
f6f775bb18de22527f2d01d73bd733ed2e435ba3
|
refs/heads/master
| 2022-12-07T03:18:37.077556
| 2020-09-03T06:41:49
| 2020-09-03T06:41:49
| 282,724,302
| 0
| 0
|
MIT
| 2020-07-26T20:04:03
| 2020-07-26T20:04:03
| null |
UTF-8
|
Python
| false
| false
| 12,889
|
py
|
"""SAC-compatible goal-conditioned hierarchical policy."""
import numpy as np
from hbaselines.goal_conditioned.base import GoalConditionedPolicy as \
BaseGoalConditionedPolicy
from hbaselines.fcnet.sac import FeedForwardPolicy
class GoalConditionedPolicy(BaseGoalConditionedPolicy):
"""SAC-compatible goal-conditioned hierarchical policy."""
def __init__(self,
sess,
ob_space,
ac_space,
co_space,
buffer_size,
batch_size,
actor_lr,
critic_lr,
verbose,
tau,
gamma,
layer_norm,
layers,
act_fun,
use_huber,
target_entropy,
num_levels,
meta_period,
intrinsic_reward_type,
intrinsic_reward_scale,
relative_goals,
off_policy_corrections,
hindsight,
subgoal_testing_rate,
connected_gradients,
use_fingerprints,
fingerprint_range,
centralized_value_functions,
cg_weights,
scope=None,
env_name="",
num_envs=1):
"""Instantiate the goal-conditioned hierarchical policy.
Parameters
----------
sess : tf.compat.v1.Session
the current TensorFlow session
ob_space : gym.spaces.*
the observation space of the environment
ac_space : gym.spaces.*
the action space of the environment
co_space : gym.spaces.*
the context space of the environment
buffer_size : int
the max number of transitions to store
batch_size : int
SGD batch size
actor_lr : float
actor learning rate
critic_lr : float
critic learning rate
verbose : int
the verbosity level: 0 none, 1 training information, 2 tensorflow
debug
tau : float
target update rate
gamma : float
discount factor
layer_norm : bool
enable layer normalisation
layers : list of int or None
the size of the neural network for the policy
act_fun : tf.nn.*
the activation function to use in the neural network
use_huber : bool
specifies whether to use the huber distance function as the loss
for the critic. If set to False, the mean-squared error metric is
used instead
target_entropy : float
target entropy used when learning the entropy coefficient. If set
to None, a heuristic value is used.
num_levels : int
number of levels within the hierarchy. Must be greater than 1. Two
levels correspond to a Manager/Worker paradigm.
meta_period : int
meta-policy action period
intrinsic_reward_type : str
the reward function to be used by the lower-level policies. See the
base goal-conditioned policy for a description.
intrinsic_reward_scale : float
the value that the intrinsic reward should be scaled by
relative_goals : bool
specifies whether the goal issued by the higher-levels policies is
meant to be a relative or absolute goal, i.e. specific state or
change in state
off_policy_corrections : bool
whether to use off-policy corrections during the update procedure.
See: https://arxiv.org/abs/1805.08296
hindsight : bool
whether to include hindsight action and goal transitions in the
replay buffer. See: https://arxiv.org/abs/1712.00948
subgoal_testing_rate : float
rate at which the original (non-hindsight) sample is stored in the
replay buffer as well. Used only if `hindsight` is set to True.
connected_gradients : bool
whether to use the connected gradient update actor update procedure
to the higher-level policy. See: https://arxiv.org/abs/1912.02368v1
cg_weights : float
weights for the gradients of the loss of the lower-level policies
with respect to the parameters of the higher-level policies. Only
used if `connected_gradients` is set to True.
use_fingerprints : bool
specifies whether to add a time-dependent fingerprint to the
observations
fingerprint_range : (list of float, list of float)
the low and high values for each fingerprint element, if they are
being used
centralized_value_functions : bool
specifies whether to use centralized value functions
"""
super(GoalConditionedPolicy, self).__init__(
sess=sess,
ob_space=ob_space,
ac_space=ac_space,
co_space=co_space,
buffer_size=buffer_size,
batch_size=batch_size,
actor_lr=actor_lr,
critic_lr=critic_lr,
verbose=verbose,
tau=tau,
gamma=gamma,
layer_norm=layer_norm,
layers=layers,
act_fun=act_fun,
use_huber=use_huber,
num_levels=num_levels,
meta_period=meta_period,
intrinsic_reward_type=intrinsic_reward_type,
intrinsic_reward_scale=intrinsic_reward_scale,
relative_goals=relative_goals,
off_policy_corrections=off_policy_corrections,
hindsight=hindsight,
subgoal_testing_rate=subgoal_testing_rate,
connected_gradients=connected_gradients,
cg_weights=cg_weights,
use_fingerprints=use_fingerprints,
fingerprint_range=fingerprint_range,
centralized_value_functions=centralized_value_functions,
scope=scope,
env_name=env_name,
num_envs=num_envs,
meta_policy=FeedForwardPolicy,
worker_policy=FeedForwardPolicy,
additional_params=dict(
target_entropy=target_entropy,
),
)
# ======================================================================= #
# Auxiliary methods for HIRO #
# ======================================================================= #
# TODO
def _log_probs(self, meta_actions, worker_obses, worker_actions):
"""Calculate the log probability of the next goal by the meta-policies.
Parameters
----------
meta_actions : array_like
(batch_size, m_ac_dim, num_samples) matrix of candidate higher-
level policy actions
worker_obses : array_like
(batch_size, w_obs_dim, meta_period + 1) matrix of lower-level
policy observations
worker_actions : array_like
(batch_size, w_ac_dim, meta_period) list of lower-level policy
actions
Returns
-------
array_like
(batch_size, num_samples) fitness associated with every state /
action / goal pair
Helps
-----
* _sample_best_meta_action(self):
"""
fitness = []
batch_size, goal_dim, num_samples = meta_actions.shape
_, _, meta_period = worker_actions.shape
# Loop through the elements of the batch.
for i in range(batch_size):
# Extract the candidate goals for the current element in the batch.
# The worker observations and actions from the meta period of the
# current batch are also collected to compute the log-probability
# of a given candidate goal.
goals_per_sample = meta_actions[i, :, :].T
worker_obses_per_sample = worker_obses[i, :, :].T
worker_actions_per_sample = worker_actions[i, :, :].T
# This will be used to store the cumulative log-probabilities of a
# given candidate goal for the entire meta-period.
fitness_per_sample = np.zeros(num_samples)
# Create repeated representations of each worker action for each
# candidate goal.
tiled_worker_actions_per_sample = np.tile(
worker_actions_per_sample, (num_samples, 1))
# Create repeated representations of each worker observation for
# each candidate goal. The indexing of worker_obses_per_sample is
# meant to do the following:
# 1. We remove the last observation since it does not correspond
# to any action for the current meta-period.
# 2. Unlike the TD3 implementation, we keep the trailing context
# (goal) terms since they are needed to compute the log-prob
# of a given action when feeding to logp_action.
tiled_worker_obses_per_sample = np.tile(
worker_obses_per_sample[:-1, :], (num_samples, 1))
# Create repeated representations of each candidate goal for each
# worker observation in a meta period.
tiled_goals_per_sample = np.tile(
goals_per_sample, meta_period).reshape(
(num_samples * meta_period, goal_dim))
# If relative goals are being used, update the later goals to match
# what they would be under the relative goals difference approach.
if self.relative_goals:
goal_diff = worker_obses_per_sample[:-1, :] - np.tile(
worker_obses_per_sample[0, :], (meta_period, 1))
tiled_goals_per_sample += \
np.tile(goal_diff, (num_samples, 1))[:, :goal_dim]
# Compute the log-probability of each action using the logp_action
# attribute of the SAC lower-level policy.
normalized_error = self.sess.run(
self.policy[-1].logp_action,
feed_dict={
self.policy[-1].obs_ph: tiled_worker_obses_per_sample,
self.policy[-1].action_ph: tiled_worker_actions_per_sample,
}
)
# Sum the different normalized errors to get the fitness of each
# candidate goal.
for j in range(num_samples):
fitness_per_sample[j] = np.sum(
normalized_error[j * meta_period: (j+1) * meta_period])
fitness.append(fitness_per_sample)
return np.array(fitness)
# ======================================================================= #
# Auxiliary methods for HRL-CG #
# ======================================================================= #
def _setup_connected_gradients(self):
"""Create the connected gradients meta-policy optimizer."""
raise NotImplementedError # TODO
def _connected_gradients_update(self,
obs0,
actions,
rewards,
obs1,
terminals1,
update_actor=True):
"""Perform the gradient update procedure for the HRL-CG algorithm.
This procedure is similar to update_from_batch, expect it runs the
self.cg_optimizer operation instead of the policy object's optimizer,
and utilizes some information from the worker samples as well.
Parameters
----------
obs0 : list of array_like
(batch_size, obs_dim) matrix of observations for every level in the
hierarchy
actions : list of array_like
(batch_size, ac_dim) matrix of actions for every level in the
hierarchy
obs1 : list of array_like
(batch_size, obs_dim) matrix of next step observations for every
level in the hierarchy
rewards : list of array_like
(batch_size,) vector of rewards for every level in the hierarchy
terminals1 : list of numpy bool
(batch_size,) vector of done masks for every level in the hierarchy
update_actor : bool
specifies whether to update the actor policy of the meta policy.
The critic policy is still updated if this value is set to False.
Returns
-------
[float, float]
higher-level policy critic loss
float
higher-level policy actor loss
"""
raise NotImplementedError # TODO
|
[
"noreply@github.com"
] |
noreply@github.com
|
85670d26e24b6401659b0d9ff893326cd87d1fd6
|
fd8024aa9d0995caf60b05ee8249340dda49479e
|
/dynamite-remote/dynamite_remote/utilities.py
|
3d87506adedf349009df9400e77e7f9f09be2746
|
[] |
no_license
|
DynamiteAI/utilities
|
d8c5088aeea8d6e4cbc5f1656b1374362f6bd571
|
5cbfebd92b6376db20708d1363079d0974f6a029
|
refs/heads/master
| 2023-08-11T17:17:28.453094
| 2021-10-01T20:25:09
| 2021-10-01T20:25:09
| 358,475,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,615
|
py
|
import os
import tarfile
import subprocess
from typing import Optional, Tuple, Union
USER_HOME = os.environ.get("HOME")
LOCK_PATH = f'{USER_HOME}/.dynamite_remote/locks'
REMOTE_SSH_USER = 'dynamite-remote'
class NodeLocked(Exception):
"""
Thrown a remote node is already running a command
"""
def __init__(self, hostname, command):
msg = f'{hostname.strip()} is already running \'{command.strip()}\''
super(NodeLocked, self).__init__(msg)
def create_new_remote_keypair(node_name) -> Tuple[int, str, str]:
temp_key_root = '/tmp/dynamite-remote/keys/'
makedirs(temp_key_root)
p = subprocess.Popen(
f'cat /dev/zero | ssh-keygen -t rsa -b 4096 -f {temp_key_root}/{node_name} -N ""', shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = p.communicate()
return p.returncode, out.decode('utf-8'), err.decode('utf-8')
def execute_over_ssh(*args):
cmd = ['bash', f'{os.environ.get("HOME")}/.dynamite_remote/bin/ssh_wrapper.sh']
cmd.extend(args)
ssh_subprocess = subprocess.Popen(cmd)
ssh_subprocess.communicate()
def execute_dynamite_command_on_remote_host(host_or_ip: str, port: int, private_key_path: str,
*dynamite_arguments):
makedirs(LOCK_PATH)
def is_locked():
return host_or_ip in os.listdir(LOCK_PATH)
remote_cmd = [f'{REMOTE_SSH_USER}@{host_or_ip}', '-p', str(port), '-t', '-i', private_key_path]
local_command = ['sudo', '/usr/local/bin/dynamite']
local_command.extend(dynamite_arguments)
remote_cmd.extend(local_command)
if is_locked():
with open(f'{LOCK_PATH}/{host_or_ip}') as node_lock:
command = node_lock.read()
raise NodeLocked(host_or_ip, command)
execute_over_ssh(*remote_cmd)
def extract_archive(archive_path: str, destination_path: str) -> None:
"""Extract a tar.gz archive to a given destination path.
Args:
archive_path: The full path to the tar.gz archive file
destination_path: The path where the archive will be extracted
Returns:
None
"""
try:
tf = tarfile.open(archive_path)
tf.extractall(path=destination_path)
except IOError:
pass
def makedirs(path: str, exist_ok: Optional[bool] = True) -> None:
"""Create directory(ies) at a given path
Args:
path: The path to the directories
exist_ok: If it exists, create anyway (Default value = True)
Returns:
None
"""
if exist_ok:
os.makedirs(path, exist_ok=True)
else:
os.makedirs(path)
def safely_remove_file(path: str) -> None:
"""Remove a file if it exists at the given path
Args:
path: The path of the file to remove
Returns:
None
"""
if os.path.exists(path):
os.remove(path)
def set_permissions_of_file(file_path: str, unix_permissions_integer: Union[str, int]) -> None:
"""Set the permissions of a file to unix_permissions_integer
Args:
file_path: The path to the file
unix_permissions_integer: The numeric representation of user/group/everyone permissions on a file
Returns:
None
"""
subprocess.call('chmod -R {} {}'.format(unix_permissions_integer, file_path), shell=True)
def search_for_config():
locations = [f'{os.environ.get("HOME")}/.dynamite_remote/config.cfg',
'/etc/dynamite-remote/config.cfg',
'../config.cfg', './config.cfg']
for fp in locations:
if os.path.exists(fp):
return fp
return None
|
[
"jamin@dynamite.ai"
] |
jamin@dynamite.ai
|
68d9ab65613c09fa8f9fb2cc9c777da8f5849f98
|
bea556733142d4a41562f4c9e0d26418780f244e
|
/tools/cef_parser.py
|
d624358ad9ecb90298be67f31df591c9d7a548fa
|
[
"BSD-3-Clause"
] |
permissive
|
EricTop3/cef
|
fd48f706b27a51951b830a6673be10a9e63030c5
|
e83d8d6a131ad39b98c97c945ccf77bcd723378f
|
refs/heads/master
| 2023-09-04T00:11:52.720554
| 2021-11-09T19:21:58
| 2021-11-09T19:21:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 66,664
|
py
|
# Copyright (c) 2011 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from __future__ import absolute_import
from date_util import *
from file_util import *
import os
import re
import shutil
import string
import sys
import textwrap
import time
def notify(msg):
""" Display a message. """
sys.stdout.write(' NOTE: ' + msg + '\n')
def wrap_text(text, indent='', maxchars=80):
""" Wrap the text to the specified number of characters. If
necessary a line will be broken and wrapped after a word.
"""
result = ''
lines = textwrap.wrap(text, maxchars - len(indent))
for line in lines:
result += indent + line + '\n'
return result
def is_base_class(clsname):
""" Returns true if |clsname| is a known base (root) class in the object
hierarchy.
"""
return clsname == 'CefBaseRefCounted' or clsname == 'CefBaseScoped'
def get_capi_file_name(cppname):
""" Convert a C++ header file name to a C API header file name. """
return cppname[:-2] + '_capi.h'
def get_capi_name(cppname, isclassname, prefix=None):
""" Convert a C++ CamelCaps name to a C API underscore name. """
result = ''
lastchr = ''
for chr in cppname:
# add an underscore if the current character is an upper case letter
# and the last character was a lower case letter
if len(result) > 0 and not chr.isdigit() \
and chr.upper() == chr \
and not lastchr.upper() == lastchr:
result += '_'
result += chr.lower()
lastchr = chr
if isclassname:
result += '_t'
if not prefix is None:
if prefix[0:3] == 'cef':
# if the prefix name is duplicated in the function name
# remove that portion of the function name
subprefix = prefix[3:]
pos = result.find(subprefix)
if pos >= 0:
result = result[0:pos] + result[pos + len(subprefix):]
result = prefix + '_' + result
return result
def get_wrapper_type_enum(cppname):
""" Returns the wrapper type enumeration value for the specified C++ class
name. """
return 'WT_' + get_capi_name(cppname, False)[4:].upper()
def get_prev_line(body, pos):
""" Retrieve the start and end positions and value for the line immediately
before the line containing the specified position.
"""
end = body.rfind('\n', 0, pos)
start = body.rfind('\n', 0, end) + 1
line = body[start:end]
return {'start': start, 'end': end, 'line': line}
def get_comment(body, name):
""" Retrieve the comment for a class or function. """
result = []
pos = body.find(name)
in_block_comment = False
while pos > 0:
data = get_prev_line(body, pos)
line = data['line'].strip()
pos = data['start']
if len(line) == 0:
# check if the next previous line is a comment
prevdata = get_prev_line(body, pos)
prevline = prevdata['line'].strip()
if prevline[0:2] == '//' and prevline[0:3] != '///':
result.append(None)
else:
break
# single line /*--cef()--*/
elif line[0:2] == '/*' and line[-2:] == '*/':
continue
# start of multi line /*--cef()--*/
elif in_block_comment and line[0:2] == '/*':
in_block_comment = False
continue
# end of multi line /*--cef()--*/
elif not in_block_comment and line[-2:] == '*/':
in_block_comment = True
continue
elif in_block_comment:
continue
elif line[0:2] == '//':
# keep the comment line including any leading spaces
result.append(line[2:])
else:
break
result.reverse()
return result
def validate_comment(file, name, comment):
""" Validate the comment array returned by get_comment(). """
# Verify that the comment contains beginning and ending '///' as required by
# CppDoc (the leading '//' from each line will already have been removed by
# the get_comment() logic). There may be additional comments proceeding the
# CppDoc block so we look at the quantity of lines equaling '/' and expect
# the last line to be '/'.
docct = 0
for line in comment:
if not line is None and len(line) > 0 and line == '/':
docct = docct + 1
if docct != 2 or len(comment) < 3 or comment[len(comment) - 1] != '/':
raise Exception('Missing or incorrect comment in %s for: %s' % \
(file, name))
def format_comment(comment, indent, translate_map=None, maxchars=80):
""" Return the comments array as a formatted string. """
if not translate_map is None:
# Replace longest keys first in translation.
translate_keys = sorted(
translate_map.keys(), key=lambda item: (-len(item), item))
result = ''
wrapme = ''
hasemptyline = False
for line in comment:
# if the line starts with a leading space, remove that space
if not line is None and len(line) > 0 and line[0:1] == ' ':
line = line[1:]
didremovespace = True
else:
didremovespace = False
if line is None or len(line) == 0 or line[0:1] == ' ' \
or line[0:1] == '/':
# the previous paragraph, if any, has ended
if len(wrapme) > 0:
if not translate_map is None:
# apply the translation
for key in translate_keys:
wrapme = wrapme.replace(key, translate_map[key])
# output the previous paragraph
result += wrap_text(wrapme, indent + '// ', maxchars)
wrapme = ''
if not line is None:
if len(line) == 0 or line[0:1] == ' ' or line[0:1] == '/':
# blank lines or anything that's further indented should be
# output as-is
result += indent + '//'
if len(line) > 0:
if didremovespace:
result += ' ' + line
else:
result += line
result += '\n'
else:
# add to the current paragraph
wrapme += line + ' '
else:
# output an empty line
hasemptyline = True
result += '\n'
if len(wrapme) > 0:
if not translate_map is None:
# apply the translation
for key in translate_map.keys():
wrapme = wrapme.replace(key, translate_map[key])
# output the previous paragraph
result += wrap_text(wrapme, indent + '// ', maxchars)
if hasemptyline:
# an empty line means a break between comments, so the comment is
# probably a section heading and should have an extra line before it
result = '\n' + result
return result
def format_translation_changes(old, new):
""" Return a comment stating what is different between the old and new
function prototype parts.
"""
changed = False
result = ''
# normalize C API attributes
oldargs = [x.replace('struct _', '') for x in old['args']]
oldretval = old['retval'].replace('struct _', '')
newargs = [x.replace('struct _', '') for x in new['args']]
newretval = new['retval'].replace('struct _', '')
# check if the prototype has changed
oldset = set(oldargs)
newset = set(newargs)
if len(oldset.symmetric_difference(newset)) > 0:
changed = True
result += '\n // WARNING - CHANGED ATTRIBUTES'
# in the implementation set only
oldonly = oldset.difference(newset)
for arg in oldonly:
result += '\n // REMOVED: ' + arg
# in the current set only
newonly = newset.difference(oldset)
for arg in newonly:
result += '\n // ADDED: ' + arg
# check if the return value has changed
if oldretval != newretval:
changed = True
result += '\n // WARNING - CHANGED RETURN VALUE'+ \
'\n // WAS: '+old['retval']+ \
'\n // NOW: '+new['retval']
if changed:
result += '\n #pragma message("Warning: "__FILE__": '+new['name']+ \
' prototype has changed")\n'
return result
def format_translation_includes(header, body):
""" Return the necessary list of includes based on the contents of the
body.
"""
result = ''
# <algorithm> required for VS2013.
if body.find('std::min') > 0 or body.find('std::max') > 0:
result += '#include <algorithm>\n'
if body.find('cef_api_hash(') > 0:
result += '#include "include/cef_api_hash.h"\n'
# identify what CppToC classes are being used
p = re.compile('([A-Za-z0-9_]{1,})CppToC')
list = sorted(set(p.findall(body)))
for item in list:
directory = ''
if not is_base_class(item):
cls = header.get_class(item)
dir = cls.get_file_directory()
if not dir is None:
directory = dir + '/'
result += '#include "libcef_dll/cpptoc/'+directory+ \
get_capi_name(item[3:], False)+'_cpptoc.h"\n'
# identify what CToCpp classes are being used
p = re.compile('([A-Za-z0-9_]{1,})CToCpp')
list = sorted(set(p.findall(body)))
for item in list:
directory = ''
if not is_base_class(item):
cls = header.get_class(item)
dir = cls.get_file_directory()
if not dir is None:
directory = dir + '/'
result += '#include "libcef_dll/ctocpp/'+directory+ \
get_capi_name(item[3:], False)+'_ctocpp.h"\n'
if body.find('shutdown_checker') > 0:
result += '#include "libcef_dll/shutdown_checker.h"\n'
if body.find('transfer_') > 0:
result += '#include "libcef_dll/transfer_util.h"\n'
return result
def str_to_dict(str):
""" Convert a string to a dictionary. If the same key has multiple values
the values will be stored in a list. """
dict = {}
parts = str.split(',')
for part in parts:
part = part.strip()
if len(part) == 0:
continue
sparts = part.split('=')
if len(sparts) > 2:
raise Exception('Invalid dictionary pair format: ' + part)
name = sparts[0].strip()
if len(sparts) == 2:
val = sparts[1].strip()
else:
val = True
if name in dict:
# a value with this name already exists
curval = dict[name]
if not isinstance(curval, list):
# convert the string value to a list
dict[name] = [curval]
dict[name].append(val)
else:
dict[name] = val
return dict
def dict_to_str(dict):
""" Convert a dictionary to a string. """
str = []
for name in dict.keys():
if not isinstance(dict[name], list):
if dict[name] is True:
# currently a bool value
str.append(name)
else:
# currently a string value
str.append(name + '=' + dict[name])
else:
# currently a list value
for val in dict[name]:
str.append(name + '=' + val)
return ','.join(str)
# regex for matching comment-formatted attributes
_cre_attrib = '/\*--cef\(([A-Za-z0-9_ ,=:\n]{0,})\)--\*/'
# regex for matching class and function names
_cre_cfname = '([A-Za-z0-9_]{1,})'
# regex for matching class and function names including path separators
_cre_cfnameorpath = '([A-Za-z0-9_\/]{1,})'
# regex for matching function return values
_cre_retval = '([A-Za-z0-9_<>:,\*\&]{1,})'
# regex for matching typedef value and name combination
_cre_typedef = '([A-Za-z0-9_<>:,\*\&\s]{1,})'
# regex for matching function return value and name combination
_cre_func = '([A-Za-z][A-Za-z0-9_<>:,\*\&\s]{1,})'
# regex for matching virtual function modifiers + arbitrary whitespace
_cre_vfmod = '([\sA-Za-z0-9_]{0,})'
# regex for matching arbitrary whitespace
_cre_space = '[\s]{1,}'
# regex for matching optional virtual keyword
_cre_virtual = '(?:[\s]{1,}virtual){0,1}'
# Simple translation types. Format is:
# 'cpp_type' : ['capi_type', 'capi_default_value']
_simpletypes = {
'void': ['void', ''],
'void*': ['void*', 'NULL'],
'int': ['int', '0'],
'int16': ['int16', '0'],
'uint16': ['uint16', '0'],
'int32': ['int32', '0'],
'uint32': ['uint32', '0'],
'int64': ['int64', '0'],
'uint64': ['uint64', '0'],
'double': ['double', '0'],
'float': ['float', '0'],
'float*': ['float*', 'NULL'],
'long': ['long', '0'],
'unsigned long': ['unsigned long', '0'],
'long long': ['long long', '0'],
'size_t': ['size_t', '0'],
'bool': ['int', '0'],
'char': ['char', '0'],
'char* const': ['char* const', 'NULL'],
'cef_color_t': ['cef_color_t', '0'],
'cef_json_parser_error_t': ['cef_json_parser_error_t', 'JSON_NO_ERROR'],
'cef_plugin_policy_t': ['cef_plugin_policy_t', 'PLUGIN_POLICY_ALLOW'],
'CefCursorHandle': ['cef_cursor_handle_t', 'kNullCursorHandle'],
'CefCompositionUnderline': [
'cef_composition_underline_t', 'CefCompositionUnderline()'
],
'CefEventHandle': ['cef_event_handle_t', 'kNullEventHandle'],
'CefWindowHandle': ['cef_window_handle_t', 'kNullWindowHandle'],
'CefInsets': ['cef_insets_t', 'CefInsets()'],
'CefPoint': ['cef_point_t', 'CefPoint()'],
'CefRect': ['cef_rect_t', 'CefRect()'],
'CefSize': ['cef_size_t', 'CefSize()'],
'CefRange': ['cef_range_t', 'CefRange()'],
'CefDraggableRegion': ['cef_draggable_region_t', 'CefDraggableRegion()'],
'CefThreadId': ['cef_thread_id_t', 'TID_UI'],
'CefTime': ['cef_time_t', 'CefTime()'],
'CefAudioParameters': ['cef_audio_parameters_t', 'CefAudioParameters()']
}
def get_function_impls(content, ident, has_impl=True):
""" Retrieve the function parts from the specified contents as a set of
return value, name, arguments and body. Ident must occur somewhere in
the value.
"""
# extract the functions
find_regex = '\n' + _cre_func + '\((.*?)\)([A-Za-z0-9_\s]{0,})'
if has_impl:
find_regex += '\{(.*?)\n\}'
else:
find_regex += '(;)'
p = re.compile(find_regex, re.MULTILINE | re.DOTALL)
list = p.findall(content)
# build the function map with the function name as the key
result = []
for retval, argval, vfmod, body in list:
if retval.find(ident) < 0:
# the identifier was not found
continue
# remove the identifier
retval = retval.replace(ident, '')
retval = retval.strip()
# Normalize the delimiter.
retval = retval.replace('\n', ' ')
# retrieve the function name
parts = retval.split(' ')
name = parts[-1]
del parts[-1]
retval = ' '.join(parts)
# parse the arguments
args = []
for v in argval.split(','):
v = v.strip()
if len(v) > 0:
args.append(v)
result.append({
'retval': retval.strip(),
'name': name,
'args': args,
'vfmod': vfmod.strip(),
'body': body if has_impl else '',
})
return result
def get_next_function_impl(existing, name):
result = None
for item in existing:
if item['name'] == name:
result = item
existing.remove(item)
break
return result
def get_copyright(full=False, translator=True):
if full:
result = \
"""// Copyright (c) $YEAR$ Marshall A. Greenblatt. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
else:
result = \
"""// Copyright (c) $YEAR$ The Chromium Embedded Framework Authors. All rights
// reserved. Use of this source code is governed by a BSD-style license that
// can be found in the LICENSE file.
"""
if translator:
result += \
"""//
// ---------------------------------------------------------------------------
//
// This file was generated by the CEF translator tool. If making changes by
// hand only do so within the body of existing method and function
// implementations. See the translator.README.txt file in the tools directory
// for more information.
//
// $hash=$$HASH$$$
//
"""
# add the copyright year
return result.replace('$YEAR$', get_year())
class obj_header:
""" Class representing a C++ header file. """
def __init__(self):
self.filenames = []
self.typedefs = []
self.funcs = []
self.classes = []
self.root_directory = None
def set_root_directory(self, root_directory):
""" Set the root directory. """
self.root_directory = root_directory
def get_root_directory(self):
""" Get the root directory. """
return self.root_directory
def add_directory(self, directory, excluded_files=[]):
""" Add all header files from the specified directory. """
files = get_files(os.path.join(directory, '*.h'))
for file in files:
if len(excluded_files) == 0 or \
not os.path.split(file)[1] in excluded_files:
self.add_file(file)
def add_file(self, filepath):
""" Add a header file. """
if self.root_directory is None:
filename = os.path.split(filepath)[1]
else:
filename = os.path.relpath(filepath, self.root_directory)
filename = filename.replace('\\', '/')
# read the input file into memory
self.add_data(filename, read_file(filepath))
def add_data(self, filename, data):
""" Add header file contents. """
added = False
# remove space from between template definition end brackets
data = data.replace("> >", ">>")
# extract global typedefs
p = re.compile('\ntypedef' + _cre_space + _cre_typedef + ';',
re.MULTILINE | re.DOTALL)
list = p.findall(data)
if len(list) > 0:
# build the global typedef objects
for value in list:
pos = value.rfind(' ')
if pos < 0:
raise Exception('Invalid typedef: ' + value)
alias = value[pos + 1:].strip()
value = value[:pos].strip()
self.typedefs.append(obj_typedef(self, filename, value, alias))
# extract global functions
p = re.compile('\n' + _cre_attrib + '\n' + _cre_func + '\((.*?)\)',
re.MULTILINE | re.DOTALL)
list = p.findall(data)
if len(list) > 0:
added = True
# build the global function objects
for attrib, retval, argval in list:
comment = get_comment(data, retval + '(' + argval + ');')
validate_comment(filename, retval, comment)
self.funcs.append(
obj_function(self, filename, attrib, retval, argval, comment))
# extract includes
p = re.compile('\n#include \"include/' + _cre_cfnameorpath + '.h')
includes = p.findall(data)
# extract forward declarations
p = re.compile('\nclass' + _cre_space + _cre_cfname + ';')
forward_declares = p.findall(data)
# extract empty classes
p = re.compile('\n' + _cre_attrib + '\nclass' + _cre_space + _cre_cfname +
_cre_space + ':' + _cre_space + 'public' + _cre_virtual +
_cre_space + _cre_cfname + _cre_space + '{};',
re.MULTILINE | re.DOTALL)
list = p.findall(data)
if len(list) > 0:
added = True
# build the class objects
for attrib, name, parent_name in list:
# Style may place the ':' on the next line.
comment = get_comment(data, name + ' :')
if len(comment) == 0:
comment = get_comment(data, name + "\n")
validate_comment(filename, name, comment)
self.classes.append(
obj_class(self, filename, attrib, name, parent_name, "", comment,
includes, forward_declares))
# Remove empty classes from |data| so we don't mess up the non-empty
# class search that follows.
data = p.sub('', data)
# extract classes
p = re.compile('\n' + _cre_attrib + '\nclass' + _cre_space + _cre_cfname +
_cre_space + ':' + _cre_space + 'public' + _cre_virtual +
_cre_space + _cre_cfname + _cre_space + '{(.*?)\n};',
re.MULTILINE | re.DOTALL)
list = p.findall(data)
if len(list) > 0:
added = True
# build the class objects
for attrib, name, parent_name, body in list:
# Style may place the ':' on the next line.
comment = get_comment(data, name + ' :')
if len(comment) == 0:
comment = get_comment(data, name + "\n")
validate_comment(filename, name, comment)
self.classes.append(
obj_class(self, filename, attrib, name, parent_name, body, comment,
includes, forward_declares))
if added:
# a global function or class was read from the header file
self.filenames.append(filename)
def __repr__(self):
result = ''
if len(self.typedefs) > 0:
strlist = []
for cls in self.typedefs:
strlist.append(str(cls))
result += "\n".join(strlist) + "\n\n"
if len(self.funcs) > 0:
strlist = []
for cls in self.funcs:
strlist.append(str(cls))
result += "\n".join(strlist) + "\n\n"
if len(self.classes) > 0:
strlist = []
for cls in self.classes:
strlist.append(str(cls))
result += "\n".join(strlist)
return result
def get_file_names(self):
""" Return the array of header file names. """
return self.filenames
def get_typedefs(self):
""" Return the array of typedef objects. """
return self.typedefs
def get_funcs(self, filename=None):
""" Return the array of function objects. """
if filename is None:
return self.funcs
else:
# only return the functions in the specified file
res = []
for func in self.funcs:
if func.get_file_name() == filename:
res.append(func)
return res
def get_classes(self, filename=None):
""" Return the array of class objects. """
if filename is None:
return self.classes
else:
# only return the classes in the specified file
res = []
for cls in self.classes:
if cls.get_file_name() == filename:
res.append(cls)
return res
def get_class(self, classname, defined_structs=None):
""" Return the specified class or None if not found. """
for cls in self.classes:
if cls.get_name() == classname:
return cls
elif not defined_structs is None:
defined_structs.append(cls.get_capi_name())
return None
def get_class_names(self):
""" Returns the names of all classes in this object. """
result = []
for cls in self.classes:
result.append(cls.get_name())
return result
def get_base_class_name(self, classname):
""" Returns the base (root) class name for |classname|. """
cur_cls = self.get_class(classname)
while True:
parent_name = cur_cls.get_parent_name()
if is_base_class(parent_name):
return parent_name
else:
parent_cls = self.get_class(parent_name)
if parent_cls is None:
break
cur_cls = self.get_class(parent_name)
return None
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
for cls in self.typedefs:
cls.get_types(list)
for cls in self.classes:
cls.get_types(list)
def get_alias_translation(self, alias):
""" Return a translation of alias to value based on typedef
statements. """
for cls in self.typedefs:
if cls.alias == alias:
return cls.value
return None
def get_analysis(self, value, named=True):
""" Return an analysis of the value based the header file context. """
return obj_analysis([self], value, named)
def get_defined_structs(self):
""" Return a list of already defined structure names. """
return [
'cef_print_info_t', 'cef_window_info_t', 'cef_base_ref_counted_t',
'cef_base_scoped_t'
]
def get_capi_translations(self):
""" Return a dictionary that maps C++ terminology to C API terminology.
"""
# strings that will be changed in C++ comments
map = {
'class': 'structure',
'Class': 'Structure',
'interface': 'structure',
'Interface': 'Structure',
'true': 'true (1)',
'false': 'false (0)',
'empty': 'NULL',
'method': 'function'
}
# add mappings for all classes and functions
funcs = self.get_funcs()
for func in funcs:
map[func.get_name() + '()'] = func.get_capi_name() + '()'
classes = self.get_classes()
for cls in classes:
map[cls.get_name()] = cls.get_capi_name()
funcs = cls.get_virtual_funcs()
for func in funcs:
map[func.get_name() + '()'] = func.get_capi_name() + '()'
funcs = cls.get_static_funcs()
for func in funcs:
map[func.get_name() + '()'] = func.get_capi_name() + '()'
return map
class obj_class:
""" Class representing a C++ class. """
def __init__(self, parent, filename, attrib, name, parent_name, body, comment,
includes, forward_declares):
if not isinstance(parent, obj_header):
raise Exception('Invalid parent object type')
self.parent = parent
self.filename = filename
self.attribs = str_to_dict(attrib)
self.name = name
self.parent_name = parent_name
self.comment = comment
self.includes = includes
self.forward_declares = forward_declares
# extract typedefs
p = re.compile(
'\n' + _cre_space + 'typedef' + _cre_space + _cre_typedef + ';',
re.MULTILINE | re.DOTALL)
list = p.findall(body)
# build the typedef objects
self.typedefs = []
for value in list:
pos = value.rfind(' ')
if pos < 0:
raise Exception('Invalid typedef: ' + value)
alias = value[pos + 1:].strip()
value = value[:pos].strip()
self.typedefs.append(obj_typedef(self, filename, value, alias))
# extract static functions
p = re.compile('\n' + _cre_space + _cre_attrib + '\n' + _cre_space +
'static' + _cre_space + _cre_func + '\((.*?)\)',
re.MULTILINE | re.DOTALL)
list = p.findall(body)
# build the static function objects
self.staticfuncs = []
for attrib, retval, argval in list:
comment = get_comment(body, retval + '(' + argval + ')')
validate_comment(filename, retval, comment)
self.staticfuncs.append(
obj_function_static(self, attrib, retval, argval, comment))
# extract virtual functions
p = re.compile(
'\n' + _cre_space + _cre_attrib + '\n' + _cre_space + 'virtual' +
_cre_space + _cre_func + '\((.*?)\)' + _cre_vfmod,
re.MULTILINE | re.DOTALL)
list = p.findall(body)
# build the virtual function objects
self.virtualfuncs = []
for attrib, retval, argval, vfmod in list:
comment = get_comment(body, retval + '(' + argval + ')')
validate_comment(filename, retval, comment)
self.virtualfuncs.append(
obj_function_virtual(self, attrib, retval, argval, comment,
vfmod.strip()))
def __repr__(self):
result = '/* ' + dict_to_str(
self.attribs) + ' */ class ' + self.name + "\n{"
if len(self.typedefs) > 0:
result += "\n\t"
strlist = []
for cls in self.typedefs:
strlist.append(str(cls))
result += "\n\t".join(strlist)
if len(self.staticfuncs) > 0:
result += "\n\t"
strlist = []
for cls in self.staticfuncs:
strlist.append(str(cls))
result += "\n\t".join(strlist)
if len(self.virtualfuncs) > 0:
result += "\n\t"
strlist = []
for cls in self.virtualfuncs:
strlist.append(str(cls))
result += "\n\t".join(strlist)
result += "\n};\n"
return result
def get_file_name(self):
""" Return the C++ header file name. Includes the directory component,
if any. """
return self.filename
def get_capi_file_name(self):
""" Return the CAPI header file name. Includes the directory component,
if any. """
return get_capi_file_name(self.filename)
def get_file_directory(self):
""" Return the file directory component, if any. """
pos = self.filename.rfind('/')
if pos >= 0:
return self.filename[:pos]
return None
def get_name(self):
""" Return the class name. """
return self.name
def get_capi_name(self):
""" Return the CAPI structure name for this class. """
return get_capi_name(self.name, True)
def get_parent_name(self):
""" Return the parent class name. """
return self.parent_name
def get_parent_capi_name(self):
""" Return the CAPI structure name for the parent class. """
return get_capi_name(self.parent_name, True)
def has_parent(self, parent_name):
""" Returns true if this class has the specified class anywhere in its
inheritance hierarchy. """
# Every class has a known base class as the top-most parent.
if is_base_class(parent_name) or parent_name == self.parent_name:
return True
if is_base_class(self.parent_name):
return False
cur_cls = self.parent.get_class(self.parent_name)
while True:
cur_parent_name = cur_cls.get_parent_name()
if is_base_class(cur_parent_name):
break
elif cur_parent_name == parent_name:
return True
cur_cls = self.parent.get_class(cur_parent_name)
return False
def get_comment(self):
""" Return the class comment as an array of lines. """
return self.comment
def get_includes(self):
""" Return the list of classes that are included from this class'
header file. """
return self.includes
def get_forward_declares(self):
""" Return the list of classes that are forward declared for this
class. """
return self.forward_declares
def get_attribs(self):
""" Return all attributes as a dictionary. """
return self.attribs
def has_attrib(self, name):
""" Return true if the specified attribute exists. """
return name in self.attribs
def get_attrib(self, name):
""" Return the first or only value for specified attribute. """
if name in self.attribs:
if isinstance(self.attribs[name], list):
# the value is a list
return self.attribs[name][0]
else:
# the value is a string
return self.attribs[name]
return None
def get_attrib_list(self, name):
""" Return all values for specified attribute as a list. """
if name in self.attribs:
if isinstance(self.attribs[name], list):
# the value is already a list
return self.attribs[name]
else:
# convert the value to a list
return [self.attribs[name]]
return None
def get_typedefs(self):
""" Return the array of typedef objects. """
return self.typedefs
def has_typedef_alias(self, alias):
""" Returns true if the specified typedef alias is defined in the scope
of this class declaration. """
for typedef in self.typedefs:
if typedef.get_alias() == alias:
return True
return False
def get_static_funcs(self):
""" Return the array of static function objects. """
return self.staticfuncs
def get_virtual_funcs(self):
""" Return the array of virtual function objects. """
return self.virtualfuncs
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
for cls in self.typedefs:
cls.get_types(list)
for cls in self.staticfuncs:
cls.get_types(list)
for cls in self.virtualfuncs:
cls.get_types(list)
def get_alias_translation(self, alias):
for cls in self.typedefs:
if cls.alias == alias:
return cls.value
return None
def get_analysis(self, value, named=True):
""" Return an analysis of the value based on the class definition
context.
"""
return obj_analysis([self, self.parent], value, named)
def is_library_side(self):
""" Returns true if the class is implemented by the library. """
return self.attribs['source'] == 'library'
def is_client_side(self):
""" Returns true if the class is implemented by the client. """
return self.attribs['source'] == 'client'
class obj_typedef:
""" Class representing a typedef statement. """
def __init__(self, parent, filename, value, alias):
if not isinstance(parent, obj_header) \
and not isinstance(parent, obj_class):
raise Exception('Invalid parent object type')
self.parent = parent
self.filename = filename
self.alias = alias
self.value = self.parent.get_analysis(value, False)
def __repr__(self):
return 'typedef ' + self.value.get_type() + ' ' + self.alias + ';'
def get_file_name(self):
""" Return the C++ header file name. """
return self.filename
def get_capi_file_name(self):
""" Return the CAPI header file name. """
return get_capi_file_name(self.filename)
def get_alias(self):
""" Return the alias. """
return self.alias
def get_value(self):
""" Return an analysis of the value based on the class or header file
definition context.
"""
return self.value
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
name = self.value.get_type()
if not name in list:
list[name] = self.value
class obj_function:
""" Class representing a function. """
def __init__(self, parent, filename, attrib, retval, argval, comment):
self.parent = parent
self.filename = filename
self.attribs = str_to_dict(attrib)
self.retval = obj_argument(self, retval)
self.name = self.retval.remove_name()
self.comment = comment
# build the argument objects
self.arguments = []
arglist = argval.split(',')
argindex = 0
while argindex < len(arglist):
arg = arglist[argindex]
if arg.find('<') >= 0 and arg.find('>') == -1:
# We've split inside of a template type declaration. Join the
# next argument with this argument.
argindex += 1
arg += ',' + arglist[argindex]
arg = arg.strip()
if len(arg) > 0:
argument = obj_argument(self, arg)
if argument.needs_attrib_count_func() and \
argument.get_attrib_count_func() is None:
raise Exception("A 'count_func' attribute is required "+ \
"for the '"+argument.get_name()+ \
"' parameter to "+self.get_qualified_name())
self.arguments.append(argument)
argindex += 1
if self.retval.needs_attrib_default_retval() and \
self.retval.get_attrib_default_retval() is None:
raise Exception("A 'default_retval' attribute is required for "+ \
self.get_qualified_name())
def __repr__(self):
return '/* ' + dict_to_str(self.attribs) + ' */ ' + self.get_cpp_proto()
def get_file_name(self):
""" Return the C++ header file name. """
return self.filename
def get_capi_file_name(self):
""" Return the CAPI header file name. """
return get_capi_file_name(self.filename)
def get_name(self):
""" Return the function name. """
return self.name
def get_qualified_name(self):
""" Return the fully qualified function name. """
if isinstance(self.parent, obj_header):
# global function
return self.name
else:
# member function
return self.parent.get_name() + '::' + self.name
def get_capi_name(self, prefix=None):
""" Return the CAPI function name. """
if 'capi_name' in self.attribs:
return self.attribs['capi_name']
return get_capi_name(self.name, False, prefix)
def get_comment(self):
""" Return the function comment as an array of lines. """
return self.comment
def get_attribs(self):
""" Return all attributes as a dictionary. """
return self.attribs
def has_attrib(self, name):
""" Return true if the specified attribute exists. """
return name in self.attribs
def get_attrib(self, name):
""" Return the first or only value for specified attribute. """
if name in self.attribs:
if isinstance(self.attribs[name], list):
# the value is a list
return self.attribs[name][0]
else:
# the value is a string
return self.attribs[name]
return None
def get_attrib_list(self, name):
""" Return all values for specified attribute as a list. """
if name in self.attribs:
if isinstance(self.attribs[name], list):
# the value is already a list
return self.attribs[name]
else:
# convert the value to a list
return [self.attribs[name]]
return None
def get_retval(self):
""" Return the return value object. """
return self.retval
def get_arguments(self):
""" Return the argument array. """
return self.arguments
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
for cls in self.arguments:
cls.get_types(list)
def get_capi_parts(self, defined_structs=[], prefix=None):
""" Return the parts of the C API function definition. """
retval = ''
dict = self.retval.get_type().get_capi(defined_structs)
if dict['format'] == 'single':
retval = dict['value']
name = self.get_capi_name(prefix)
args = []
if isinstance(self, obj_function_virtual):
# virtual functions get themselves as the first argument
str = 'struct _' + self.parent.get_capi_name() + '* self'
if isinstance(self, obj_function_virtual) and self.is_const():
# const virtual functions get const self pointers
str = 'const ' + str
args.append(str)
if len(self.arguments) > 0:
for cls in self.arguments:
type = cls.get_type()
dict = type.get_capi(defined_structs)
if dict['format'] == 'single':
args.append(dict['value'])
elif dict['format'] == 'multi-arg':
# add an additional argument for the size of the array
type_name = type.get_name()
if type.is_const():
# for const arrays pass the size argument by value
args.append('size_t ' + type_name + 'Count')
else:
# for non-const arrays pass the size argument by address
args.append('size_t* ' + type_name + 'Count')
args.append(dict['value'])
return {'retval': retval, 'name': name, 'args': args}
def get_capi_proto(self, defined_structs=[], prefix=None):
""" Return the prototype of the C API function. """
parts = self.get_capi_parts(defined_structs, prefix)
result = parts['retval']+' '+parts['name']+ \
'('+', '.join(parts['args'])+')'
return result
def get_cpp_parts(self, isimpl=False):
""" Return the parts of the C++ function definition. """
retval = str(self.retval)
name = self.name
args = []
if len(self.arguments) > 0:
for cls in self.arguments:
args.append(str(cls))
if isimpl and isinstance(self, obj_function_virtual):
# enumeration return values must be qualified with the class name
# if the type is defined in the class declaration scope.
type = self.get_retval().get_type()
if type.is_result_struct() and type.is_result_struct_enum() and \
self.parent.has_typedef_alias(retval):
retval = self.parent.get_name() + '::' + retval
return {'retval': retval, 'name': name, 'args': args}
def get_cpp_proto(self, classname=None):
""" Return the prototype of the C++ function. """
parts = self.get_cpp_parts()
result = parts['retval'] + ' '
if not classname is None:
result += classname + '::'
result += parts['name'] + '(' + ', '.join(parts['args']) + ')'
if isinstance(self, obj_function_virtual) and self.is_const():
result += ' const'
return result
def is_same_side(self, other_class_name):
""" Returns true if this function is on the same side (library or
client) and the specified class. """
if isinstance(self.parent, obj_class):
# this function is part of a class
this_is_library_side = self.parent.is_library_side()
header = self.parent.parent
else:
# this function is global
this_is_library_side = True
header = self.parent
if is_base_class(other_class_name):
other_is_library_side = False
else:
other_class = header.get_class(other_class_name)
if other_class is None:
raise Exception('Unknown class: ' + other_class_name)
other_is_library_side = other_class.is_library_side()
return other_is_library_side == this_is_library_side
class obj_function_static(obj_function):
""" Class representing a static function. """
def __init__(self, parent, attrib, retval, argval, comment):
if not isinstance(parent, obj_class):
raise Exception('Invalid parent object type')
obj_function.__init__(self, parent, parent.filename, attrib, retval, argval,
comment)
def __repr__(self):
return 'static ' + obj_function.__repr__(self) + ';'
def get_capi_name(self, prefix=None):
""" Return the CAPI function name. """
if prefix is None:
# by default static functions are prefixed with the class name
prefix = get_capi_name(self.parent.get_name(), False)
return obj_function.get_capi_name(self, prefix)
class obj_function_virtual(obj_function):
""" Class representing a virtual function. """
def __init__(self, parent, attrib, retval, argval, comment, vfmod):
if not isinstance(parent, obj_class):
raise Exception('Invalid parent object type')
obj_function.__init__(self, parent, parent.filename, attrib, retval, argval,
comment)
if vfmod == 'const':
self.isconst = True
else:
self.isconst = False
def __repr__(self):
return 'virtual ' + obj_function.__repr__(self) + ';'
def is_const(self):
""" Returns true if the method declaration is const. """
return self.isconst
class obj_argument:
""" Class representing a function argument. """
def __init__(self, parent, argval):
if not isinstance(parent, obj_function):
raise Exception('Invalid parent object type')
self.parent = parent
self.type = self.parent.parent.get_analysis(argval)
def __repr__(self):
result = ''
if self.type.is_const():
result += 'const '
result += self.type.get_type()
if self.type.is_byref():
result += '&'
elif self.type.is_byaddr():
result += '*'
if self.type.has_name():
result += ' ' + self.type.get_name()
return result
def get_name(self):
""" Return the name for this argument. """
return self.type.get_name()
def remove_name(self):
""" Remove and return the name value. """
name = self.type.get_name()
self.type.name = None
return name
def get_type(self):
""" Return an analysis of the argument type based on the class
definition context.
"""
return self.type
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
name = self.type.get_type()
if not name in list:
list[name] = self.type
def needs_attrib_count_func(self):
""" Returns true if this argument requires a 'count_func' attribute. """
# A 'count_func' attribute is required for non-const non-string vector
# attribute types
return self.type.has_name() and \
self.type.is_result_vector() and \
not self.type.is_result_vector_string() and \
not self.type.is_const()
def get_attrib_count_func(self):
""" Returns the count function for this argument. """
# The 'count_func' attribute value format is name:function
if not self.parent.has_attrib('count_func'):
return None
name = self.type.get_name()
vals = self.parent.get_attrib_list('count_func')
for val in vals:
parts = val.split(':')
if len(parts) != 2:
raise Exception("Invalid 'count_func' attribute value for "+ \
self.parent.get_qualified_name()+': '+val)
if parts[0].strip() == name:
return parts[1].strip()
return None
def needs_attrib_default_retval(self):
""" Returns true if this argument requires a 'default_retval' attribute.
"""
# A 'default_retval' attribute is required for enumeration return value
# types.
return not self.type.has_name() and \
self.type.is_result_struct() and \
self.type.is_result_struct_enum()
def get_attrib_default_retval(self):
""" Returns the defualt return value for this argument. """
return self.parent.get_attrib('default_retval')
def get_arg_type(self):
""" Returns the argument type as defined in translator.README.txt. """
if not self.type.has_name():
raise Exception('Cannot be called for retval types')
# simple or enumeration type
if (self.type.is_result_simple() and \
self.type.get_type() != 'bool') or \
(self.type.is_result_struct() and \
self.type.is_result_struct_enum()):
if self.type.is_byref():
if self.type.is_const():
return 'simple_byref_const'
return 'simple_byref'
elif self.type.is_byaddr():
return 'simple_byaddr'
return 'simple_byval'
# boolean type
if self.type.get_type() == 'bool':
if self.type.is_byref():
return 'bool_byref'
elif self.type.is_byaddr():
return 'bool_byaddr'
return 'bool_byval'
# structure type
if self.type.is_result_struct() and self.type.is_byref():
if self.type.is_const():
return 'struct_byref_const'
return 'struct_byref'
# string type
if self.type.is_result_string() and self.type.is_byref():
if self.type.is_const():
return 'string_byref_const'
return 'string_byref'
# *ptr type
if self.type.is_result_ptr():
prefix = self.type.get_result_ptr_type_prefix()
same_side = self.parent.is_same_side(self.type.get_ptr_type())
if self.type.is_byref():
if same_side:
return prefix + 'ptr_same_byref'
return prefix + 'ptr_diff_byref'
if same_side:
return prefix + 'ptr_same'
return prefix + 'ptr_diff'
if self.type.is_result_vector():
# all vector types must be passed by reference
if not self.type.is_byref():
return 'invalid'
if self.type.is_result_vector_string():
# string vector type
if self.type.is_const():
return 'string_vec_byref_const'
return 'string_vec_byref'
if self.type.is_result_vector_simple():
if self.type.get_vector_type() != 'bool':
# simple/enumeration vector types
if self.type.is_const():
return 'simple_vec_byref_const'
return 'simple_vec_byref'
# boolean vector types
if self.type.is_const():
return 'bool_vec_byref_const'
return 'bool_vec_byref'
if self.type.is_result_vector_ptr():
# *ptr vector types
prefix = self.type.get_result_vector_ptr_type_prefix()
same_side = self.parent.is_same_side(self.type.get_ptr_type())
if self.type.is_const():
if same_side:
return prefix + 'ptr_vec_same_byref_const'
return prefix + 'ptr_vec_diff_byref_const'
if same_side:
return prefix + 'ptr_vec_same_byref'
return prefix + 'ptr_vec_diff_byref'
# string single map type
if self.type.is_result_map_single():
if not self.type.is_byref():
return 'invalid'
if self.type.is_const():
return 'string_map_single_byref_const'
return 'string_map_single_byref'
# string multi map type
if self.type.is_result_map_multi():
if not self.type.is_byref():
return 'invalid'
if self.type.is_const():
return 'string_map_multi_byref_const'
return 'string_map_multi_byref'
return 'invalid'
def get_retval_type(self):
""" Returns the retval type as defined in translator.README.txt. """
if self.type.has_name():
raise Exception('Cannot be called for argument types')
# unsupported modifiers
if self.type.is_const() or self.type.is_byref() or \
self.type.is_byaddr():
return 'invalid'
# void types don't have a return value
if self.type.get_type() == 'void':
return 'none'
if (self.type.is_result_simple() and \
self.type.get_type() != 'bool') or \
(self.type.is_result_struct() and self.type.is_result_struct_enum()):
return 'simple'
if self.type.get_type() == 'bool':
return 'bool'
if self.type.is_result_string():
return 'string'
if self.type.is_result_ptr():
prefix = self.type.get_result_ptr_type_prefix()
if self.parent.is_same_side(self.type.get_ptr_type()):
return prefix + 'ptr_same'
else:
return prefix + 'ptr_diff'
return 'invalid'
def get_retval_default(self, for_capi):
""" Returns the default return value based on the retval type. """
# start with the default retval attribute, if any.
retval = self.get_attrib_default_retval()
if not retval is None:
if for_capi:
# apply any appropriate C API translations.
if retval == 'true':
return '1'
if retval == 'false':
return '0'
return retval
# next look at the retval type value.
type = self.get_retval_type()
if type == 'simple':
return self.get_type().get_result_simple_default()
elif type == 'bool':
if for_capi:
return '0'
return 'false'
elif type == 'string':
if for_capi:
return 'NULL'
return 'CefString()'
elif type == 'refptr_same' or type == 'refptr_diff' or \
type == 'rawptr_same' or type == 'rawptr_diff':
if for_capi:
return 'NULL'
return 'nullptr'
elif type == 'ownptr_same' or type == 'ownptr_diff':
if for_capi:
return 'NULL'
return 'CefOwnPtr<' + self.type.get_ptr_type() + '>()'
return ''
class obj_analysis:
""" Class representing an analysis of a data type value. """
def __init__(self, scopelist, value, named):
self.value = value
self.result_type = 'unknown'
self.result_value = None
self.result_default = None
self.ptr_type = None
# parse the argument string
partlist = value.strip().split()
if named == True:
# extract the name value
self.name = partlist[-1]
del partlist[-1]
else:
self.name = None
if len(partlist) == 0:
raise Exception('Invalid argument value: ' + value)
# check const status
if partlist[0] == 'const':
self.isconst = True
del partlist[0]
else:
self.isconst = False
if len(partlist) == 0:
raise Exception('Invalid argument value: ' + value)
# combine the data type
self.type = ' '.join(partlist)
# extract the last character of the data type
endchar = self.type[-1]
# check if the value is passed by reference
if endchar == '&':
self.isbyref = True
self.type = self.type[:-1]
else:
self.isbyref = False
# check if the value is passed by address
if endchar == '*':
self.isbyaddr = True
self.type = self.type[:-1]
else:
self.isbyaddr = False
# see if the value is directly identifiable
if self._check_advanced(self.type) == True:
return
# not identifiable, so look it up
translation = None
for scope in scopelist:
if not isinstance(scope, obj_header) \
and not isinstance(scope, obj_class):
raise Exception('Invalid scope object type')
translation = scope.get_alias_translation(self.type)
if not translation is None:
break
if translation is None:
raise Exception('Failed to translate type: ' + self.type)
# the translation succeeded so keep the result
self.result_type = translation.result_type
self.result_value = translation.result_value
def _check_advanced(self, value):
# check for vectors
if value.find('std::vector') == 0:
self.result_type = 'vector'
val = value[12:-1].strip()
self.result_value = [self._get_basic(val)]
self.result_value[0]['vector_type'] = val
return True
# check for maps
if value.find('std::map') == 0:
self.result_type = 'map'
vals = value[9:-1].split(',')
if len(vals) == 2:
self.result_value = [
self._get_basic(vals[0].strip()),
self._get_basic(vals[1].strip())
]
return True
# check for multimaps
if value.find('std::multimap') == 0:
self.result_type = 'multimap'
vals = value[14:-1].split(',')
if len(vals) == 2:
self.result_value = [
self._get_basic(vals[0].strip()),
self._get_basic(vals[1].strip())
]
return True
# check for basic types
basic = self._get_basic(value)
if not basic is None:
self.result_type = basic['result_type']
self.result_value = basic['result_value']
if 'ptr_type' in basic:
self.ptr_type = basic['ptr_type']
if 'result_default' in basic:
self.result_default = basic['result_default']
return True
return False
def _get_basic(self, value):
# check for string values
if value == "CefString":
return {'result_type': 'string', 'result_value': None}
# check for simple direct translations
if value in _simpletypes.keys():
return {
'result_type': 'simple',
'result_value': _simpletypes[value][0],
'result_default': _simpletypes[value][1],
}
# check if already a C API structure
if value[-2:] == '_t':
return {'result_type': 'structure', 'result_value': value}
# check for CEF reference pointers
p = re.compile('^CefRefPtr<(.*?)>$', re.DOTALL)
list = p.findall(value)
if len(list) == 1:
return {
'result_type': 'refptr',
'result_value': get_capi_name(list[0], True) + '*',
'ptr_type': list[0]
}
# check for CEF owned pointers
p = re.compile('^CefOwnPtr<(.*?)>$', re.DOTALL)
list = p.findall(value)
if len(list) == 1:
return {
'result_type': 'ownptr',
'result_value': get_capi_name(list[0], True) + '*',
'ptr_type': list[0]
}
# check for CEF raw pointers
p = re.compile('^CefRawPtr<(.*?)>$', re.DOTALL)
list = p.findall(value)
if len(list) == 1:
return {
'result_type': 'rawptr',
'result_value': get_capi_name(list[0], True) + '*',
'ptr_type': list[0]
}
# check for CEF structure types
if value[0:3] == 'Cef' and value[-4:] != 'List':
return {
'result_type': 'structure',
'result_value': get_capi_name(value, True)
}
return None
def __repr__(self):
return '(' + self.result_type + ') ' + str(self.result_value)
def has_name(self):
""" Returns true if a name value exists. """
return (not self.name is None)
def get_name(self):
""" Return the name. """
return self.name
def get_value(self):
""" Return the C++ value (type + name). """
return self.value
def get_type(self):
""" Return the C++ type. """
return self.type
def get_ptr_type(self):
""" Return the C++ class type referenced by a CefRefPtr. """
if self.is_result_vector() and self.is_result_vector_ptr():
# return the vector RefPtr type
return self.result_value[0]['ptr_type']
# return the basic RefPtr type
return self.ptr_type
def get_vector_type(self):
""" Return the C++ class type referenced by a std::vector. """
if self.is_result_vector():
return self.result_value[0]['vector_type']
return None
def is_const(self):
""" Returns true if the argument value is constant. """
return self.isconst
def is_byref(self):
""" Returns true if the argument is passed by reference. """
return self.isbyref
def is_byaddr(self):
""" Returns true if the argument is passed by address. """
return self.isbyaddr
def is_result_simple(self):
""" Returns true if this is a simple argument type. """
return (self.result_type == 'simple')
def get_result_simple_type_root(self):
""" Return the simple structure or basic type name. """
return self.result_value
def get_result_simple_type(self):
""" Return the simple type. """
result = ''
if self.is_const():
result += 'const '
result += self.result_value
if self.is_byaddr() or self.is_byref():
result += '*'
return result
def get_result_simple_default(self):
""" Return the default value fo the basic type. """
return self.result_default
def is_result_ptr(self):
""" Returns true if this is a *Ptr type. """
return self.is_result_refptr() or self.is_result_ownptr() or \
self.is_result_rawptr()
def get_result_ptr_type_root(self):
""" Return the *Ptr type structure name. """
return self.result_value[:-1]
def get_result_ptr_type(self, defined_structs=[]):
""" Return the *Ptr type. """
result = ''
if not self.result_value[:-1] in defined_structs:
result += 'struct _'
result += self.result_value
if self.is_byref() or self.is_byaddr():
result += '*'
return result
def get_result_ptr_type_prefix(self):
""" Returns the *Ptr type prefix. """
if self.is_result_refptr():
return 'ref'
if self.is_result_ownptr():
return 'own'
if self.is_result_rawptr():
return 'raw'
raise Exception('Not a pointer type')
def is_result_refptr(self):
""" Returns true if this is a RefPtr type. """
return (self.result_type == 'refptr')
def is_result_ownptr(self):
""" Returns true if this is a OwnPtr type. """
return (self.result_type == 'ownptr')
def is_result_rawptr(self):
""" Returns true if this is a RawPtr type. """
return (self.result_type == 'rawptr')
def is_result_struct(self):
""" Returns true if this is a structure type. """
return (self.result_type == 'structure')
def is_result_struct_enum(self):
""" Returns true if this struct type is likely an enumeration. """
# structure values that are passed by reference or address must be
# structures and not enumerations
if not self.is_byref() and not self.is_byaddr():
return True
return False
def get_result_struct_type(self, defined_structs=[]):
""" Return the structure or enumeration type. """
result = ''
is_enum = self.is_result_struct_enum()
if not is_enum:
if self.is_const():
result += 'const '
if not self.result_value in defined_structs:
result += 'struct _'
result += self.result_value
if not is_enum:
result += '*'
return result
def is_result_string(self):
""" Returns true if this is a string type. """
return (self.result_type == 'string')
def get_result_string_type(self):
""" Return the string type. """
if not self.has_name():
# Return values are string structs that the user must free. Use
# the name of the structure as a hint.
return 'cef_string_userfree_t'
elif not self.is_const() and (self.is_byref() or self.is_byaddr()):
# Parameters passed by reference or address. Use the normal
# non-const string struct.
return 'cef_string_t*'
# Const parameters use the const string struct.
return 'const cef_string_t*'
def is_result_vector(self):
""" Returns true if this is a vector type. """
return (self.result_type == 'vector')
def is_result_vector_string(self):
""" Returns true if this is a string vector. """
return self.result_value[0]['result_type'] == 'string'
def is_result_vector_simple(self):
""" Returns true if this is a string vector. """
return self.result_value[0]['result_type'] == 'simple'
def is_result_vector_ptr(self):
""" Returns true if this is a *Ptr vector. """
return self.is_result_vector_refptr() or \
self.is_result_vector_ownptr() or \
self.is_result_vector_rawptr()
def get_result_vector_ptr_type_prefix(self):
""" Returns the *Ptr type prefix. """
if self.is_result_vector_refptr():
return 'ref'
if self.is_result_vector_ownptr():
return 'own'
if self.is_result_vector_rawptr():
return 'raw'
raise Exception('Not a pointer type')
def is_result_vector_refptr(self):
""" Returns true if this is a RefPtr vector. """
return self.result_value[0]['result_type'] == 'refptr'
def is_result_vector_ownptr(self):
""" Returns true if this is a OwnPtr vector. """
return self.result_value[0]['result_type'] == 'ownptr'
def is_result_vector_rawptr(self):
""" Returns true if this is a RawPtr vector. """
return self.result_value[0]['result_type'] == 'rawptr'
def get_result_vector_type_root(self):
""" Return the vector structure or basic type name. """
return self.result_value[0]['result_value']
def get_result_vector_type(self, defined_structs=[]):
""" Return the vector type. """
if not self.has_name():
raise Exception('Cannot use vector as a return type')
type = self.result_value[0]['result_type']
value = self.result_value[0]['result_value']
result = {}
if type == 'string':
result['value'] = 'cef_string_list_t'
result['format'] = 'single'
return result
if type == 'simple':
str = value
if self.is_const():
str += ' const'
str += '*'
result['value'] = str
elif type == 'refptr' or type == 'ownptr' or type == 'rawptr':
str = ''
if not value[:-1] in defined_structs:
str += 'struct _'
str += value
if self.is_const():
str += ' const'
str += '*'
result['value'] = str
else:
raise Exception('Unsupported vector type: ' + type)
# vector values must be passed as a value array parameter
# and a size parameter
result['format'] = 'multi-arg'
return result
def is_result_map(self):
""" Returns true if this is a map type. """
return (self.result_type == 'map' or self.result_type == 'multimap')
def is_result_map_single(self):
""" Returns true if this is a single map type. """
return (self.result_type == 'map')
def is_result_map_multi(self):
""" Returns true if this is a multi map type. """
return (self.result_type == 'multimap')
def get_result_map_type(self, defined_structs=[]):
""" Return the map type. """
if not self.has_name():
raise Exception('Cannot use map as a return type')
if self.result_value[0]['result_type'] == 'string' \
and self.result_value[1]['result_type'] == 'string':
if self.result_type == 'map':
return {'value': 'cef_string_map_t', 'format': 'single'}
elif self.result_type == 'multimap':
return {'value': 'cef_string_multimap_t', 'format': 'multi'}
raise Exception('Only mappings of strings to strings are supported')
def get_capi(self, defined_structs=[]):
""" Format the value for the C API. """
result = ''
format = 'single'
if self.is_result_simple():
result += self.get_result_simple_type()
elif self.is_result_ptr():
result += self.get_result_ptr_type(defined_structs)
elif self.is_result_struct():
result += self.get_result_struct_type(defined_structs)
elif self.is_result_string():
result += self.get_result_string_type()
elif self.is_result_map():
resdict = self.get_result_map_type(defined_structs)
if resdict['format'] == 'single' or resdict['format'] == 'multi':
result += resdict['value']
else:
raise Exception('Unsupported map type')
elif self.is_result_vector():
resdict = self.get_result_vector_type(defined_structs)
if resdict['format'] != 'single':
format = resdict['format']
result += resdict['value']
if self.has_name():
result += ' ' + self.get_name()
return {'format': format, 'value': result}
# test the module
if __name__ == "__main__":
import pprint
import sys
# verify that the correct number of command-line arguments are provided
if len(sys.argv) != 2:
sys.stderr.write('Usage: ' + sys.argv[0] + ' <directory>')
sys.exit()
pp = pprint.PrettyPrinter(indent=4)
# create the header object
header = obj_header()
header.add_directory(sys.argv[1])
# output the type mapping
types = {}
header.get_types(types)
pp.pprint(types)
sys.stdout.write('\n')
# output the parsed C++ data
sys.stdout.write(str(header))
# output the C API formatted data
defined_names = header.get_defined_structs()
result = ''
# global functions
funcs = header.get_funcs()
if len(funcs) > 0:
for func in funcs:
result += func.get_capi_proto(defined_names) + ';\n'
result += '\n'
classes = header.get_classes()
for cls in classes:
# virtual functions are inside a structure
result += 'struct ' + cls.get_capi_name() + '\n{\n'
funcs = cls.get_virtual_funcs()
if len(funcs) > 0:
for func in funcs:
result += '\t' + func.get_capi_proto(defined_names) + ';\n'
result += '}\n\n'
defined_names.append(cls.get_capi_name())
# static functions become global
funcs = cls.get_static_funcs()
if len(funcs) > 0:
for func in funcs:
result += func.get_capi_proto(defined_names) + ';\n'
result += '\n'
sys.stdout.write(result)
|
[
"magreenblatt@gmail.com"
] |
magreenblatt@gmail.com
|
63bfbaf1cc2e5f3abf32344a9ebfe404aa6104e6
|
5bf4a43469b8f8ddeb924a1a7a1073a804151fef
|
/InputGUI/VideoPlayer.py
|
8b4c11b2c13e174d2e23ae68bf7e7e89f2d82eb9
|
[] |
no_license
|
MSauerM/CVFouldetection
|
b52eee085a12630a8f32452edf6f6d86ec48b7d3
|
15d4fcbb1826d7fb9bb640b10981599fef627903
|
refs/heads/master
| 2023-07-25T14:41:00.179637
| 2021-09-01T20:53:56
| 2021-09-01T20:53:56
| 357,150,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,436
|
py
|
import sys
from PyQt5.QtCore import QDir, Qt, QUrl
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import QMainWindow, QStyle, QPushButton, QSlider, QWidget, QHBoxLayout, QVBoxLayout
'''angelehnt an https://pythonprogramminglanguage.com/pyqt5-video-widget/'''
class VideoPlayer(QMainWindow):
def __init__(self, parent = None):
super(VideoPlayer, self).__init__(parent)
self.setWindowTitle("Video Player")
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
videoWidget = QVideoWidget()
# Play Button
self.playButton = QPushButton()
self.playButton.setEnabled(False)
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playButton.clicked.connect(self.play)
self.positionSlider = QSlider(Qt.Horizontal)
self.positionSlider.setRange(0, 0)
self.positionSlider.sliderMoved.connect(self.setPosition)
mainWidget = QWidget(self)
self.setCentralWidget(mainWidget)
controlLayout = QHBoxLayout()
controlLayout.setContentsMargins(0, 0, 0, 0)
controlLayout.addWidget(self.playButton)
controlLayout.addWidget(self.positionSlider)
layout = QVBoxLayout()
layout.addWidget(videoWidget)
layout.addLayout(controlLayout)
mainWidget.setLayout(layout)
self.mediaPlayer.setVideoOutput(videoWidget)
self.mediaPlayer.stateChanged.connect(self.mediaStateChanged)
self.mediaPlayer.positionChanged.connect(self.positionChanged)
def play(self):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.mediaPlayer.pause()
else:
self.mediaPlayer.play()
def positionChanged(self, position):
self.positionSlider.setValue(position)
def mediaStateChanged(self, state):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPause))
else:
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
def setPosition(self, position):
self.mediaPlayer.setPosition(position)
def loadFile(self, fileName):
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(fileName)))
|
[
"matthias.sauer97@gmx.net"
] |
matthias.sauer97@gmx.net
|
c94623fa4a303341d2a14bd2502ddbb12809ef67
|
75fa11b13ddab8fd987428376f5d9c42dff0ba44
|
/metadata-ingestion/tests/integration/ldap/test_ldap.py
|
3e76f13fc823d2cba27669df218aeac46589492f
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] |
permissive
|
RyanHolstien/datahub
|
163d0ff6b4636919ed223ee63a27cba6db2d0156
|
8cf299aeb43fa95afb22fefbc7728117c727f0b3
|
refs/heads/master
| 2023-09-04T10:59:12.931758
| 2023-08-21T18:33:10
| 2023-08-21T18:33:10
| 246,685,891
| 0
| 0
|
Apache-2.0
| 2021-02-16T23:48:05
| 2020-03-11T21:43:58
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 5,662
|
py
|
import time
import pytest
from datahub.ingestion.run.pipeline import Pipeline
from tests.test_helpers import mce_helpers
from tests.test_helpers.docker_helpers import wait_for_port
@pytest.mark.integration
def test_ldap_ingest(docker_compose_runner, pytestconfig, tmp_path, mock_time):
test_resources_dir = pytestconfig.rootpath / "tests/integration/ldap"
with docker_compose_runner(
test_resources_dir / "docker-compose.yml", "ldap"
) as docker_services:
# The openldap container loads the sample data after exposing the port publicly. As such,
# we must wait a little bit extra to ensure that the sample data is loaded.
wait_for_port(docker_services, "openldap", 389)
# without this ldap server can provide empty results
time.sleep(5)
pipeline = Pipeline.create(
{
"run_id": "ldap-test",
"source": {
"type": "ldap",
"config": {
"ldap_server": "ldap://localhost",
"ldap_user": "cn=admin,dc=example,dc=org",
"ldap_password": "admin",
"base_dn": "dc=example,dc=org",
"group_attrs_map": {
"members": "memberUid",
},
"custom_props_list": ["givenName"],
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/ldap_mces.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "ldap_mces.json",
golden_path=test_resources_dir / "ldap_mces_golden.json",
)
@pytest.mark.integration
def test_ldap_memberof_ingest(docker_compose_runner, pytestconfig, tmp_path, mock_time):
test_resources_dir = pytestconfig.rootpath / "tests/integration/ldap"
with docker_compose_runner(
test_resources_dir / "docker-compose.yml", "ldap"
) as docker_services:
# The openldap container loads the sample data after exposing the port publicly. As such,
# we must wait a little bit extra to ensure that the sample data is loaded.
wait_for_port(docker_services, "openldap", 389)
# without this ldap server can provide empty results
time.sleep(5)
pipeline = Pipeline.create(
{
"run_id": "ldap-test",
"source": {
"type": "ldap",
"config": {
"ldap_server": "ldap://localhost",
"ldap_user": "cn=admin,dc=example,dc=org",
"ldap_password": "admin",
"base_dn": "dc=example,dc=org",
"filter": "(memberOf=cn=HR Department,dc=example,dc=org)",
"attrs_list": ["+", "*"],
"group_attrs_map": {
"members": "member",
},
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/ldap_memberof_mces.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "ldap_memberof_mces.json",
golden_path=test_resources_dir / "ldap_memberof_mces_golden.json",
)
@pytest.mark.integration
def test_ldap_ingest_with_email_as_username(
docker_compose_runner, pytestconfig, tmp_path, mock_time
):
test_resources_dir = pytestconfig.rootpath / "tests/integration/ldap"
with docker_compose_runner(
test_resources_dir / "docker-compose.yml", "ldap"
) as docker_services:
# The openldap container loads the sample data after exposing the port publicly. As such,
# we must wait a little bit extra to ensure that the sample data is loaded.
wait_for_port(docker_services, "openldap", 389)
time.sleep(5)
pipeline = Pipeline.create(
{
"run_id": "ldap-test",
"source": {
"type": "ldap",
"config": {
"ldap_server": "ldap://localhost",
"ldap_user": "cn=admin,dc=example,dc=org",
"ldap_password": "admin",
"base_dn": "dc=example,dc=org",
"user_attrs_map": {"email": "mail"},
"group_attrs_map": {
"members": "memberUid",
"email": "mail",
},
"use_email_as_username": True,
"custom_props_list": ["givenName"],
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/ldap_mces.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "ldap_mces.json",
golden_path=test_resources_dir / "ldap_mces_golden.json",
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e2ec8e1807b2ada32487f68445c59d81a1985ee4
|
ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3
|
/python/baiduads-sdk-auto/test/test_update_material_bind_response_wrapper_body.py
|
0b1aeca6da72b0508699b95f4a1b46ff5039dc1e
|
[
"Apache-2.0"
] |
permissive
|
baidu/baiduads-sdk
|
24c36b5cf3da9362ec5c8ecd417ff280421198ff
|
176363de5e8a4e98aaca039e4300703c3964c1c7
|
refs/heads/main
| 2023-06-08T15:40:24.787863
| 2023-05-20T03:40:51
| 2023-05-20T03:40:51
| 446,718,177
| 16
| 11
|
Apache-2.0
| 2023-06-02T05:19:40
| 2022-01-11T07:23:17
|
Python
|
UTF-8
|
Python
| false
| false
| 996
|
py
|
"""
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.materialbindmod.model.material_bind_update_response import MaterialBindUpdateResponse
globals()['MaterialBindUpdateResponse'] = MaterialBindUpdateResponse
from baiduads.materialbindmod.model.update_material_bind_response_wrapper_body import UpdateMaterialBindResponseWrapperBody
class TestUpdateMaterialBindResponseWrapperBody(unittest.TestCase):
"""UpdateMaterialBindResponseWrapperBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateMaterialBindResponseWrapperBody(self):
"""Test UpdateMaterialBindResponseWrapperBody"""
# FIXME: construct object with mandatory attributes with example values
# model = UpdateMaterialBindResponseWrapperBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"v_wangzichen02@baidu.com"
] |
v_wangzichen02@baidu.com
|
e0fab741660cd79a9bb39662c41807ef1d654d98
|
168978c0d4e33f9a2e614c51e77a75cd6393def8
|
/blog/migrations/0002_comment.py
|
153650079fc199576b2a749ee91fe89c86e6be33
|
[] |
no_license
|
Faisal-Zamir/Online_Course
|
cbb548a9c98ba24c907be584dd2aa418235bd8a2
|
73a3c5f84af68acafc85d6762f28858900c688ef
|
refs/heads/master
| 2022-12-06T08:32:45.940898
| 2020-08-24T03:15:19
| 2020-08-24T03:15:19
| 289,815,942
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
# Generated by Django 3.0.7 on 2020-08-23 08:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
options={
'ordering': ['-created_on'],
},
),
]
|
[
"Jafriweb@gmail.com"
] |
Jafriweb@gmail.com
|
8dc18ef2f8c1b9adeb021bbd1cc39ef1d13084b3
|
85ab4cc5e16e2e51fee8488f47f4ed1ecd043c61
|
/examples/tracing/kvm_hypercall.py
|
322bb8e50098e60cc6f8e678268741876e74d888
|
[
"Apache-2.0"
] |
permissive
|
polycube-network/bcc
|
8fa018358f03fc2a3444910fade338de6933babf
|
b8158f43ceb884a9eef456d30f4f413604397a6a
|
refs/heads/master
| 2022-06-25T14:00:23.852780
| 2020-01-27T11:16:21
| 2020-01-27T11:16:21
| 161,410,720
| 2
| 4
|
Apache-2.0
| 2020-06-25T13:38:04
| 2018-12-12T00:26:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
#!/usr/bin/env python
#
# kvm_hypercall.py
#
# Demonstrates stateful kvm_entry and kvm_exit recording along with the
# associated hypercall when exit_reason is VMCALL. See kvm_hypercall.txt
# for usage
#
# REQUIRES: Linux 4.7+ (BPF_PROG_TYPE_TRACEPOINT support)
#
# Copyright (c) 2017 ShiftLeft Inc.
#
# Author(s):
# Suchakrapani Sharma <suchakra@shiftleft.io>
from __future__ import print_function
from bcc import BPF
# load BPF program
b = BPF(text="""
#define EXIT_REASON 18
BPF_HASH(start, u8, u8);
TRACEPOINT_PROBE(kvm, kvm_exit) {
u8 e = EXIT_REASON;
u8 one = 1;
if (args->exit_reason == EXIT_REASON) {
bpf_trace_printk("KVM_EXIT exit_reason : %d\\n", args->exit_reason);
start.update(&e, &one);
}
return 0;
}
TRACEPOINT_PROBE(kvm, kvm_entry) {
u8 e = EXIT_REASON;
u8 zero = 0;
u8 *s = start.lookup(&e);
if (s != NULL && *s == 1) {
bpf_trace_printk("KVM_ENTRY vcpu_id : %u\\n", args->vcpu_id);
start.update(&e, &zero);
}
return 0;
}
TRACEPOINT_PROBE(kvm, kvm_hypercall) {
u8 e = EXIT_REASON;
u8 zero = 0;
u8 *s = start.lookup(&e);
if (s != NULL && *s == 1) {
bpf_trace_printk("HYPERCALL nr : %d\\n", args->nr);
}
return 0;
};
""")
# header
print("%-18s %-16s %-6s %s" % ("TIME(s)", "COMM", "PID", "EVENT"))
# format output
while 1:
try:
(task, pid, cpu, flags, ts, msg) = b.trace_fields()
except ValueError:
continue
print("%-18.9f %-16s %-6d %s" % (ts, task, pid, msg))
|
[
"goldshtn@gmail.com"
] |
goldshtn@gmail.com
|
15de457978364d20c77a1b440c0744b05fc37897
|
79c2638dba0f7efe1530a08be0abd9ccc86de14a
|
/leagues/views.py
|
ce4bba6f5a98690911dbc3278fe9aebc4601045f
|
[] |
no_license
|
cristianrdev/BD_teams_leagues_players
|
06d0e505a264cc1492d87e61b362031baa0427f2
|
49868c90e5cf5437009c2fbe012efdcfaae83866
|
refs/heads/main
| 2023-04-14T20:31:52.968883
| 2021-04-30T04:51:10
| 2021-04-30T04:51:10
| 363,029,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,405
|
py
|
from django.shortcuts import render, redirect
from .models import League, Team, Player
from . import team_maker
def index(request):
if request.method == 'GET':
print('------------es un GET-----------------')
context = {
"leagues": League.objects.all(),
"teams": Team.objects.all(),
"players": Player.objects.all(),
"title_leagues": '',
"title_teams": '',
"title_players": '',
}
return render(request, "leagues/index.html", context)
else:
print('------------es un POST-----------------')
# if request.POST['filtro'] == '0' :
# print('Volver a el index***********')
# return redirect("/")
if int(request.POST['filtro']) == 1 :
print('es el filtro 1***********')
context = {
"leagues": League.objects.filter(sport='Baseball'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 2 :
print('es el filtro 2***********')
context = {
"leagues": League.objects.filter(name__contains='Women'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 3 :
print('es el filtro 3***********')
context = {
"leagues": League.objects.filter(sport__contains='Hockey'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 4 :
print('es el filtro 4***********')
context = {
"leagues": League.objects.exclude(sport='Football'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 5 :
print('es el filtro 5***********')
context = {
"leagues": League.objects.filter(name__contains='Conference'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 6 :
print('es el filtro 6***********')
context = {
"leagues": League.objects.filter(name__contains='Atlantic'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 7 :
print('es el filtro 7***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.filter(location='Dallas'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 8 :
print('es el filtro 8***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.filter(team_name='Raptors'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 9 :
print('es el filtro 9***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.filter(location__contains='City'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 10 :
print('es el filtro 10***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.filter(team_name__startswith='T'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 11 :
print('es el filtro 11***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.all().order_by('location'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 12 :
print('es el filtro 12***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.all().order_by('-team_name'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 13 :
print('es el filtro 13***********')
context = {
"title_leagues": 'hidden',
"title_teams": 'hidden',
"players": Player.objects.filter(last_name = 'Cooper'),
}
if int(request.POST['filtro']) == 14 :
print('es el filtro 14***********')
context = {
"title_leagues": 'hidden',
"title_teams": 'hidden',
"players": Player.objects.filter(first_name = 'Joshua'),
}
if int(request.POST['filtro']) == 15 :
print('es el filtro 15***********')
# jugadores_cooper = Player.objects.
context = {
"title_leagues": 'hidden',
"title_teams": 'hidden',
"players": Player.objects.filter(last_name = 'Cooper').exclude(first_name = 'Joshua'),
}
if int(request.POST['filtro']) == 16 :
print('es el filtro 16***********')
context = {
"title_leagues": 'hidden',
"title_teams": 'hidden',
"players": Player.objects.filter(first_name__in = ['Alexander' , 'Wyatt']),
}
return render(request, "leagues/index.html", context)
def make_data(request):
team_maker.gen_leagues(10)
team_maker.gen_teams(50)
team_maker.gen_players(200)
return redirect("index")
|
[
"crrojasserrano@gmail.com"
] |
crrojasserrano@gmail.com
|
03159707aa89c1ada2d4e3e3109c8a8cff6de8ff
|
9b3df22a5352484d7cab3f3d32b2404a951b9d53
|
/server/test_python_client.py
|
e2cebaaca06ecb4ed7a2e851e729a6b4332d487b
|
[] |
no_license
|
nickmoop/untitled_messenger
|
24694b41f623ca42abab790d6569108cc3dba46b
|
089c5c06f94aee9bf7273e8c56c35b0872e63f83
|
refs/heads/master
| 2021-08-06T10:00:45.859278
| 2017-11-04T21:55:04
| 2017-11-04T21:55:04
| 109,385,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
from socket import *
SERVER_HOSTNAME = 'localhost'
SERVER_PORT = 50007
def send(socket_object, message=None):
if message:
print('Encode message: {}'.format(message))
message = [message.encode()]
print('Send message')
for line in message:
socket_object.send(line)
else:
print('Empty message')
def connect():
socket_object = socket(AF_INET, SOCK_STREAM)
socket_object.connect((SERVER_HOSTNAME, SERVER_PORT))
print('Connected to socket with host: {}, and port: {}'.format(
SERVER_HOSTNAME, SERVER_PORT)
)
return socket_object
def disconnect(socket_object):
print('Disconnect from socket')
socket_object.close()
if __name__ == '__main__':
import sys
import time
message_to_send = 'Test message for send to server'
if len(sys.argv) >= 2:
message_to_send = ' '.join(sys.argv[1:])
test_socket = connect()
send(test_socket, message=message_to_send)
time.sleep(3)
send(test_socket, message='I will disconnect now')
disconnect(test_socket)
|
[
"nicolay.chirik@gmail.com"
] |
nicolay.chirik@gmail.com
|
e1655716d0d948d2cb64544b273a2246f9206b96
|
a239382d2752cb9d04979c1eebfbfb46bd11329a
|
/face_detection_project_.py
|
996ac0c233f83db38ccc52cd44fec7b76207c98f
|
[] |
no_license
|
Gurwinder-Kaur98/facial-recognition-system-project
|
c15988257ffbb563f18e50f0ec2bc6886a452c89
|
fa660ab7dea2cb8162a1485d98c7aef2c9fb6378
|
refs/heads/main
| 2023-03-23T15:29:11.755162
| 2021-03-19T09:20:53
| 2021-03-19T09:20:53
| 349,356,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,459
|
py
|
'''SINCE DATABASE IS GIVING US QUICK ACCESS TO PICTURES AND GETTING DATA IS MUCH EASIER IN DATABASE THAN LISTS
THAT'S WHY WE ARE USING MYSQL DATABSE TO STORE USERS DATA LIKE IMAGES,NAME,ID AND ATTENDENCE DATA'''
import mysql.connector as sql
import os
import cv2
import numpy as np
from datetime import date
from datetime import datetime
import face_recognition
import csv
#connecting with MYSQL DATABASE
connection=sql.connect(
host='localhost',
user='root',
password='*****'
)
cursor=connection.cursor(buffered=True)
#CREATING DATABASE AND TABLES TO STORE DATA
'''
cursor.execute('CREATE DATABASE pics_of_permitted_users ')
cursor.execute('USE pics_of_permitted_users')
cursor.execute('CREATE TABLE permitted_users_inform(id INT AUTO_INCREMENT PRIMARY KEY,name VARCHAR(255),image_path VARCHAR(1024))')
cursor.execute('CREATE TABLE Attendence_Record_Table(date VARCHAR(255),name VARCHAR(255),time VARCHAR(255))')
'''
# STORING NAMES AND IMAGES PATH INTO TABLE
'''
path=' write your location of images of permitted users'
for name in mylist:
pic_path= path+'/'+name
name_of_user=os.path.splitext(name)[0]
query='INSERT INTO permitted_users_inform(name,image_path) VALUES(%s,%s)'
val=(name_of_user,pic_path)
cursor.execute(query,val)
cursor.execute("SELECT * FROM permitted_users_inform")
'''
# READING AND ENCODING ALL THE IMAGES OF USERS
cursor.execute('USE pics_of_permitted_users')
cursor.execute("SELECT * FROM permitted_users_inform")
# ENCODING THE IMAGES
encoded_images_list=[]
for touple in cursor:
curr_img=cv2.imread(touple[2]) # GETTING THE INFORMATION OF IMAGES
curr_img=cv2.cvtColor(curr_img,cv2.COLOR_BGR2RGB)#CONVERTING THE IMAGES INO rgb
encode=face_recognition.face_encodings(curr_img)[0]
encoded_images_list.append(encode)
# getting data from webcam
print('encoding has been done')
# CREATING FUNCTION WHICH WILL RECORD THE ATTENDENCE DATA
def Attendence(user_name) :
cursor.execute(' USE pics_of_permitted_users')
dt=date.today()
dtt=dt.strftime("%Y-%m-%d")
now=datetime.now()
tt=now.strftime("%H:%M:%S")
query='SELECT name FROM Attendence_Record_Table WHERE name= %s AND date=%s'
val=(user_name,dtt)
cursor.execute(query,val)
result=cursor.fetchone()
if result is None:
query='INSERT INTO Attendence_Record_Table(date,name,time) VALUES(%s,%s,%s)'
val=(dtt,user_name,tt)
cursor.execute(query,val)
print('Attendence Recorded')
connection.commit()
cap=cv2.VideoCapture(0)
while True:
success,img=cap.read()
reduced_size_img=cv2.resize(img,(0,0),None,0.25,0.25) # RESIZING THE IMAGE CAPTURED BY WEBCAM
reduced_size_imag=cv2.cvtColor(reduced_size_img,cv2.COLOR_BGR2RGB)#CONVERTING IT INTO RGB SINCE COLOURS ARE NOT REQUIRED IN IMAGE
face_frame=face_recognition.face_locations(reduced_size_imag)
encodedframe=face_recognition.face_encodings(reduced_size_imag,face_frame)#ENCODINGS THE IMAGE CAPTURED BY WEBCAM
for encodeface,faceloc in zip(encodedframe,face_frame):
matching=face_recognition.compare_faces(encoded_images_list,encodeface)
faceDis=face_recognition.face_distance(encoded_images_list,encodeface)
matchingindex=np.argmin(faceDis)
matching_index=matchingindex
matchingindex+=1
if matching[matching_index]:
query='SELECT name FROM permitted_users_inform WHERE id = %s'
match_index1=matchingindex.item()
cursor.execute(query,(match_index1,))
name_of_user=cursor.fetchone()[0]
name=str(name_of_user).upper() # STORING ATTENDENCE OF PERSON IN ATTENDENCE TABLE
print(name)
# CREATING RECTANGLE OVER THE FACE OF PREDICTED USER AND SHOWING HIS/HER NAME
y1,x2,y2,x1=faceloc
y1,x2,y2,x1=y1*4,x2*4,y2*4,x1*4 # multiplying with 4 because we are restoring the size
cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
cv2.putText(img,name,(x1+6,y2-6),cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),2)
Attendence(name)
connection.commit()
cv2.imshow('webcam',img)
cv2.waitKey(1)
''''
# WE CAN ALSO MAKE A EXCEL FILE AFTER COMPLETING THE ATTENDENCE
cursor.execute('SELECT * FROM Attendence_Record_Table')
with open ("Attendencefile.csv",'w',newline='')as csv_file:
csv_writer=csv.writer(csv_file)
csv_writer.writerow([i[0] for i in cursor.description])
csv_writer.writerows(cursor)
'''
''' we can also use twilio to message the users when they enter.So that if somehow wrong person enter in the room
instead of authorised person than the authorised person will came to know through message and report the issue
'''
'''
import twilio
from twilio.rest import Client
account_sid = '******2bc5988a68276815ddae526ef7b4'
auth_token = '*******77934b8be9963b22cdac9a74c'
client = Client(account_sid,auth_token )
message = client.messages.create(body ="Your attendence is marked", from_ = '+***8491', to ='+*****9898')
'''
|
[
"noreply@github.com"
] |
noreply@github.com
|
6c84ccddbbd2a6110e5b60242adf271558d404ee
|
83ed8b754703a1c9e661c90f0763bfebbc0f2606
|
/爬虫/抓取动态Ajax请求的数据.py
|
872e0b1c73089f5de87f161395db0e8837b3d7ad
|
[] |
no_license
|
zbh123/hobby
|
4ce267a20e1af7f2accd2bde8d39af269efa319b
|
2215c406fe7700bf150fd536dd56823a2e4733d1
|
refs/heads/master
| 2021-08-02T10:31:34.683391
| 2021-07-26T07:26:16
| 2021-07-26T07:26:16
| 150,555,879
| 4
| 0
| null | 2021-07-27T07:34:28
| 2018-09-27T08:41:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
import urllib.request
import ssl
import json
import re
def ajaxCrawler(url):
headers = {
"User-Agent": "User-Agent:Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0"
}
req = urllib.request.Request(url, headers=headers)
#使用ssl创建未验证的版本
context = ssl._create_unverified_context()
response = urllib.request.urlopen(req,context=context)
# response = urllib.request.urlopen(req)
jsonStr = response.read().decode("utf-8")
# jsonStr = re.sub(r'<html>|<head>|</head>|<body>|<pre style="word-wrap: break-word; white-space: pre-wrap;">','', jsonStr)
# jsonStr = re.sub(r'</pre>|</body>|</html>|</script>','', jsonStr)
# print(jsonStr)
jsonData = json.loads(jsonStr)
return jsonData
url = "https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&sort=recommend&page_limit=20&page_start=20"
info = ajaxCrawler(url)
print(type(info))
for i in range(11):
url = "https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&sort=recommend&page_limit=20&page_start=" +str(i*20)
info = ajaxCrawler(url)
print(type(info))
|
[
"noreply@github.com"
] |
noreply@github.com
|
4c45f08b05b3d7602a124435f0b016247808ffc6
|
2ec75e8b2fce5b21f8df8944eb2c792b78743552
|
/server.py
|
4b334f45597809d3e274e5b4f73eb0593cce7e46
|
[] |
no_license
|
smilefufu/tornado-s
|
6ea541bdf3795913c3df070cda38d70665997f90
|
19bd56b33e70e36c9cc0c1d9b4f253b657caaed0
|
refs/heads/master
| 2020-05-20T16:45:25.109800
| 2015-11-19T14:38:43
| 2015-11-19T14:38:43
| 42,692,516
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,529
|
py
|
#!/usr/bin/env python
# encoding: utf-8
from tornado.ioloop import IOLoop
from tornado.web import Application, url
from tornado.options import options, define, parse_command_line
from lib.core import HTTPServer, config_settings, RequestHandler, RestfulApiHandler, ModuleRouter, ProviderManager
import logging
import sys
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append("./modules/")
import tornado.web
#class HtmlHandler(tornado.web.RequestHandler):
class HtmlHandler(RequestHandler):
'''
def post(self):
path = os.path.normpath(self.request.uri).strip('/')
local_path = os.path.join(self.settings['template_path'], path)
if not os.path.exists(local_path):
self.set_status(404)
return self.write("404 not found")
return self.render(path, _data=ModuleRouter(self.settings))
'''
def post(self):
#页面使用的provider配置在uri同目录下的.html.data文件中
#比如页面是/example/example2.html, 则配置文件为:/example/example2.html.data
local_path, tpl_path = self.find_template()
data = ProviderManager.getdata(local_path, self)
return self.render(tpl_path, **data)
def get(self):
return self.post()
class ApiHandler(RestfulApiHandler):
def request_dealer(self, method):
ret = dict()
return self.write(ret)
def make_app(dev=False):
'''定义url路由和服务选项等'''
return Application([
url(r"/api", ApiHandler),
url(r"/.*", HtmlHandler),
],
debug = dev,
gzip = True,
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
)
def main():
define("config", type=str, help="path to config file")
define("dev", type=bool, help="dev mode switch", default=False)
define("port", type=int, help="port to listen", default=8218)
define("ac", type=str, help="Access-Control-Allow-Origin", default="*")
parse_command_line(final=True)
app = make_app(options.dev)
server = HTTPServer(app,xheaders=True)
define("pid", type=int, default=0)
options.pid = 0
if not options.dev:
server.bind(options.port)
options.pid = server.start(0) # Forks multiple sub-processes
else:
server.listen(options.port)
app.settings.update(config_settings(options))
IOLoop.current().start()
if __name__ == '__main__':
main()
|
[
"fufu.bluesand@gmail.com"
] |
fufu.bluesand@gmail.com
|
db2a2a1a31115c20f2aa9461c575fc65b4918eef
|
9838d1b978bf34926c2f881b8e06ace732869997
|
/assignment2/counter.py
|
9a61e7e5efc846d762d3f0e7ae1ff8a8c7a9a3be
|
[] |
no_license
|
thinkocapo/big-data-processing
|
57678e9cca144c3e310aae82283062d95fe99147
|
5e417c08c7cd3cada732963b32624b6faa8a8d8d
|
refs/heads/master
| 2023-04-15T12:27:08.028286
| 2019-11-12T04:27:41
| 2019-11-12T04:27:41
| 207,067,630
| 1
| 0
| null | 2023-03-31T14:51:22
| 2019-09-08T05:43:50
|
Python
|
UTF-8
|
Python
| false
| false
| 7,330
|
py
|
import argparse
import csv
import datetime
import json
import multiprocessing
from multiprocessing import Process, Value, Lock
import os
import pprint
import time
import threading
from random import randint
# Capture any exceptions and send to Sentry.io :)
if 'DSN_DATA_PIPELINE' in os.environ:
import sentry_sdk
sentry_sdk.init(os.environ['DSN_DATA_PIPELINE'])
field_names = ['uuid', 'timestamp', 'url', 'userId', 'country', 'ua_browser', 'ua_os', 'response_status', 'TTFB']
# Makes a data structure like { date1: { url:count } }
def query1(lock, fileName, server_process_dict):
input_file = csv.DictReader(open(fileName), fieldnames=field_names)
for row in input_file:
timestamp = row['timestamp']
url = row['url']
obj = datetime.time()
try:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
timestamp_key = '%s-%s-%s:%s' % (obj.year, obj.month, obj.day, obj.hour)
with lock:
if timestamp_key in server_process_dict:
url_dict = server_process_dict[timestamp_key]
if url in server_process_dict[timestamp_key]:
url_dict[url] = url_dict[url] + 1
server_process_dict[timestamp_key] = url_dict
else:
url_dict[url] = 1
server_process_dict[timestamp_key] = url_dict
else:
server_process_dict[timestamp_key] = { url: 1 }
# Makes a data structure like { timestamp: { url: [users] }}
def query2(lock, fileName, server_process_dict):
input_file = csv.DictReader(open(fileName), fieldnames=field_names)
for row in input_file:
timestamp = row['timestamp']
url = row['url']
userId = row['userId']
obj = datetime.time()
try:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
timestamp_key = '%s-%s-%s:%s' % (obj.year, obj.month, obj.day, obj.hour)
with lock:
if timestamp_key in server_process_dict:
url_dict = server_process_dict[timestamp_key]
if url in url_dict:
url_dict[url].append(userId)
server_process_dict[timestamp_key] = url_dict
else:
url_dict[url] = [userId]
server_process_dict[timestamp_key] = url_dict
else:
server_process_dict[timestamp_key] = { url: [userId] }
# Makes a data structure that looks like { timestamp: { url: [uuids] }}
def query3(lock, fileName, server_process_dict):
input_file = csv.DictReader(open(fileName), fieldnames=field_names)
for row in input_file:
timestamp = row['timestamp']
url = row['url']
userId = row['userId']
uuid = row['uuid']
obj = datetime.time()
try:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
timestamp_key = '%s-%s-%s:%s' % (obj.year, obj.month, obj.day, obj.hour)
with lock:
if timestamp_key in server_process_dict:
url_dict = server_process_dict[timestamp_key]
if url in url_dict:
url_dict[url].append(uuid)
server_process_dict[timestamp_key] = url_dict
else:
url_dict[url] = [uuid]
server_process_dict[timestamp_key] = url_dict
else:
server_process_dict[timestamp_key] = { url: [uuid] }
# Makes a data structure that looks like { timestamp: { country: [urls] }}
def problem4(lock, fileName, server_process_dict):
input_file = csv.DictReader(open(fileName), fieldnames=field_names)
for row in input_file:
timestamp = row['timestamp']
url = row['url']
country = row['country']
obj = datetime.time()
try:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
timestamp_key = '%s-%s-%s:%s' % (obj.year, obj.month, obj.day, obj.hour)
with lock:
if timestamp_key in server_process_dict:
country_dict = server_process_dict[timestamp_key]
if country in country_dict:
country_dict[country].append(url)
server_process_dict[timestamp_key] = country_dict
else:
country_dict[country] = [url]
server_process_dict[timestamp_key] = country_dict
else:
server_process_dict[timestamp_key] = { country: [url] }
def printer(query):
# Print the number of unique URL's per query
if query == query1:
for k,v in server_process_dict.items():
unique_urls = server_process_dict[k].items()
print k, len(unique_urls)
# Print the number of unique visitors per URL per day
if query == query2:
for timestamp_key, url_dict in server_process_dict.items():
for url, users in url_dict.items():
unique_users = set(user for user in users)
print timestamp_key, url, len(unique_users)
# Print the number of uuids (unique event clicks) per URL per hour per day
if query == query3:
for timestamp_key, url_dict in server_process_dict.items():
for url, uuids in url_dict.items():
unique_uuids = set(uuid for uuid in uuids)
print timestamp_key, url, len(unique_uuids)
# Problem 4
if query == problem4:
for timestamp_key, country_dict in server_process_dict.items():
for country, urls in country_dict.items():
unique_urls = set(url for url in urls)
print timestamp_key, country, len(unique_urls)
# EXAMPLE USAGE:
# python3 countery.py query1
# python3 countery.py query2
# python3 countery.py query4
# python3 countery.py problem4
if __name__ == '__main__':
# Specify the number of threads and query from command-line
parser = argparse.ArgumentParser()
parser.add_argument("query", type=str, help="query1 query2 query3")
args = parser.parse_args()
# Which program are we calling
queries={'query1': query1, 'query2': query2, 'query3': query3, 'problem4': problem4}
query = queries[args.query]
fileNames = ('./input_files/file-input1.csv', 'input_files/file-input2.csv', 'input_files/file-input3.csv', 'input_files/file-input4.csv')
with multiprocessing.Manager() as manager:
server_process_dict = manager.dict()
lock = Lock()
processes = []
for i in range(4):
process = multiprocessing.Process(target=query, args=(lock, fileNames[i], server_process_dict,))
processes.append(process)
process.start()
for curr_process in processes:
curr_process.join()
printer(query)
|
[
"thinkocapo@gmail.com"
] |
thinkocapo@gmail.com
|
b24113a5aef60eee13af9475c0f32a9ad3eb8856
|
e9b0a4cbd4757a598fbc8b3973f69dfd76b2020d
|
/app_system/migrations/0002_userssconfig_user.py
|
2062f4dd7f79ecb1b1bcae0300611f6e00bfc068
|
[
"Apache-2.0"
] |
permissive
|
visoon0012/plover.cloud
|
fa58cdc8b241183670a229ccf4db64f78ed023d1
|
04542628758d969085eb6172928165fddb5d2677
|
refs/heads/master
| 2022-12-14T08:08:10.099953
| 2018-08-01T10:08:14
| 2018-08-01T10:08:14
| 129,711,956
| 1
| 0
|
Apache-2.0
| 2022-11-22T02:28:49
| 2018-04-16T08:41:44
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
# Generated by Django 2.0.2 on 2018-05-31 10:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('app_system', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='userssconfig',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"visoon0012@hotmail.com"
] |
visoon0012@hotmail.com
|
fd03335e59e081fdbdaab6e2a3b46cb34f7afd28
|
0903058aeac42ae40371f5a31978df8fdc838162
|
/feature_engine/creation/mathematical_combination.py
|
fda42a6dc8c184102a96825077b8f9d93a5e2a87
|
[
"BSD-3-Clause"
] |
permissive
|
vasusuren/feature_engine
|
5f852c229153dc6a5a2e4f7787152b1aa4c2ae6f
|
24b8cbdc0aea1e8c266b805947f38e9f02369d69
|
refs/heads/master
| 2023-04-10T03:09:47.328514
| 2021-04-12T16:16:47
| 2021-04-12T16:16:47
| 358,562,154
| 0
| 0
|
BSD-3-Clause
| 2021-06-10T13:14:48
| 2021-04-16T10:28:45
| null |
UTF-8
|
Python
| false
| false
| 10,885
|
py
|
from typing import List, Optional, Union
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from feature_engine.dataframe_checks import (
_check_contains_na,
_check_input_matches_training_df,
_is_dataframe,
)
from feature_engine.variable_manipulation import _find_or_check_numerical_variables
class MathematicalCombination(BaseEstimator, TransformerMixin):
"""
MathematicalCombination() applies basic mathematical operations to multiple
features, returning one or more additional features as a result. That is, it sums,
multiplies, takes the average, maximum, minimum or standard deviation of a group
of variables and returns the result into new variables.
For example, if we have the variables **number_payments_first_quarter**,
**number_payments_second_quarter**, **number_payments_third_quarter** and
**number_payments_fourth_quarter**, we can use MathematicalCombination() to
calculate the total number of payments and mean number of payments as follows:
.. code-block:: python
transformer = MathematicalCombination(
variables_to_combine=[
'number_payments_first_quarter',
'number_payments_second_quarter',
'number_payments_third_quarter',
'number_payments_fourth_quarter'
],
math_operations=[
'sum',
'mean'
],
new_variables_name=[
'total_number_payments',
'mean_number_payments'
]
)
Xt = transformer.fit_transform(X)
The transformed X, Xt, will contain the additional features
**total_number_payments** and **mean_number_payments**, plus the original set of
variables.
Parameters
----------
variables_to_combine : list
The list of numerical variables to be combined.
math_operations : list, default=None
The list of basic math operations to be used to create the new features.
If None, all of ['sum', 'prod', 'mean', 'std', 'max', 'min'] will be performed
over the `variables_to_combine`. Alternatively, the user can enter the list of
operations to carry out.
Each operation should be a string and must be one of the elements
from the list: ['sum', 'prod', 'mean', 'std', 'max', 'min']
Each operation will result in a new variable that will be added to the
transformed dataset.
new_variables_names : list, default=None
Names of the newly created variables. The user can enter a name or a list
of names for the newly created features (recommended). The user must enter
one name for each mathematical transformation indicated in the `math_operations`
parameter. That is, if you want to perform mean and sum of features, you
should enter 2 new variable names. If you perform only mean of features,
enter 1 variable name. Alternatively, if you chose to perform all
mathematical transformations, enter 6 new variable names.
The name of the variables indicated by the user should coincide with the order
in which the mathematical operations are initialised in the transformer.
That is, if you set math_operations = ['mean', 'prod'], the first new variable
name will be assigned to the mean of the variables and the second variable name
to the product of the variables.
If `new_variable_names = None`, the transformer will assign an arbitrary name
to the newly created features starting by the name of the mathematical
operation, followed by the variables combined separated by -.
Attributes
----------
combination_dict_ :
Dictionary containing the mathematical operation to column name pairs
math_operations_ :
List with the mathematical operations to be applied to the
`variables_to_combine`.
Methods
-------
fit:
This transformer does not learn parameters.
transform:
Combine the variables with the mathematical operations.
fit_transform:
Fit to the data, then transform it.
Notes
-----
Although the transformer in essence allows us to combine any feature with any of
the allowed mathematical operations, its used is intended mostly for the creation
of new features based on some domain knowledge. Typical examples within the
financial sector are:
- Sum debt across financial products, i.e., credit cards, to obtain the total debt.
- Take the average payments to various financial products per month.
- Find the Minimum payment done at any one month.
In insurance, we can sum the damage to various parts of a car to obtain the
total damage.
"""
def __init__(
self,
variables_to_combine: List[Union[str, int]],
math_operations: Optional[List[str]] = None,
new_variables_names: Optional[List[str]] = None,
) -> None:
# check input types
if not isinstance(variables_to_combine, list) or not all(
isinstance(var, (int, str)) for var in variables_to_combine
):
raise ValueError(
"variables_to_combine takes a list of strings or integers "
"corresponding to the names of the variables to combine "
"with the mathematical operations."
)
if new_variables_names:
if not isinstance(new_variables_names, list) or not all(
isinstance(var, str) for var in new_variables_names
):
raise ValueError(
"new_variable_names should be None or a list with the "
"names to be assigned to the new variables created by"
"the mathematical combinations."
)
if math_operations:
if not isinstance(math_operations, list):
raise ValueError("math_operations parameter must be a list or None")
if any(
operation not in ["sum", "prod", "mean", "std", "max", "min"]
for operation in math_operations
):
raise ValueError(
"At least one of the entered math_operations is not supported. "
"Choose one or more of ['sum', 'prod', 'mean', 'std', 'max', 'min']"
)
# check input logic
if len(variables_to_combine) <= 1:
raise KeyError(
"MathematicalCombination requires two or more features to make proper "
"transformations."
)
if new_variables_names:
if len(new_variables_names) != len(math_operations): # type: ignore
raise ValueError(
"Number of items in new_variables_names must be equal to number of "
"items in math_operations."
"In other words, the transformer needs as many new variable names"
"as mathematical operations to perform over the variables to "
"combine."
)
self.variables_to_combine = variables_to_combine
self.new_variables_names = new_variables_names
self.math_operations = math_operations
def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):
"""
This transformer does not learn parameters.
Perform dataframe checks. Creates dictionary of operation to new feature
name pairs.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples. Can be the entire dataframe, not just the
variables to transform.
y : pandas Series, or np.array. Defaults to None.
It is not needed in this transformer. You can pass y or None.
Raises
------
TypeError
- If the input is not a Pandas DataFrame
- If any user provided variables in variables_to_combine are not numerical
ValueError
If the variable(s) contain null values
Returns
-------
self
"""
# check input dataframe
X = _is_dataframe(X)
# check variables to combine are numerical
self.variables_to_combine = _find_or_check_numerical_variables(
X, self.variables_to_combine
)
# check if dataset contains na
_check_contains_na(X, self.variables_to_combine)
if self.math_operations is None:
self.math_operations_ = ["sum", "prod", "mean", "std", "max", "min"]
else:
self.math_operations_ = self.math_operations
# dictionary of new_variable_name to operation pairs
if self.new_variables_names:
self.combination_dict_ = dict(
zip(self.new_variables_names, self.math_operations_)
)
else:
if all(isinstance(var, str) for var in self.variables_to_combine):
vars_ls = self.variables_to_combine
else:
vars_ls = [str(var) for var in self.variables_to_combine]
self.combination_dict_ = {
f"{operation}({'-'.join(vars_ls)})": operation # type: ignore
for operation in self.math_operations_
}
self.input_shape_ = X.shape
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Combine the variables with the mathematical operations.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The data to transform.
Raises
------
TypeError
If the input is not a Pandas DataFrame
ValueError
- If the variable(s) contain null values
- If the dataframe is not of the same size as that used in fit()
Returns
-------
X : Pandas dataframe, shape = [n_samples, n_features + n_operations]
The dataframe with the original variables plus the new variables.
"""
# Check method fit has been called
check_is_fitted(self)
# check that input is a dataframe
X = _is_dataframe(X)
# check if dataset contains na
_check_contains_na(X, self.variables_to_combine)
# Check if input data contains same number of columns as dataframe used to fit.
_check_input_matches_training_df(X, self.input_shape_[1])
# combine mathematically
for new_variable_name, operation in self.combination_dict_.items():
X[new_variable_name] = X[self.variables_to_combine].agg(operation, axis=1)
return X
|
[
"noreply@github.com"
] |
noreply@github.com
|
c710a8f96c892727fa5bba29bf8a3a2db1cff76c
|
9a07514a9942303d96031b002b4f28ef248fe689
|
/app/page/main.py
|
392ae1d6191abb895cd03f46c59ac479abb3ae25
|
[] |
no_license
|
zhousk/hogwarts
|
58a257c162331b7e262db88fe308312e2d390101
|
93d38d33fb5b4cfa52649c5101d1a325c56d64ca
|
refs/heads/master
| 2023-02-14T00:26:32.820142
| 2021-01-04T17:09:01
| 2021-01-04T17:09:01
| 307,015,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
from appium.webdriver.common.mobileby import MobileBy
from app.page.contact_page import ContactPage
from app.page.base_page import BasePage
class Main(BasePage):
_contact_list = (MobileBy.XPATH, "//*[@text='通讯录']")
def goto_contact_page(self):
'''
进入到通讯录
'''
# 点击【通讯录】
# *号起到拆分元组的作用
self.find(*self._contact_list).click()
return ContactPage(self.driver)
|
[
"980692186@qq.com"
] |
980692186@qq.com
|
3d03ee4b346bb937bb87a78407998a3a3294ea25
|
83aa8b54f55eeeca3c58eb7cc59219fb2c9f6307
|
/tests/test_preprocessing.py
|
6f8f76c60bd5e75d5dbec29723d48b9ca37d1a36
|
[
"MIT"
] |
permissive
|
vftens/RocAlphaGo-aug25-keras2-py35
|
650e8321dfc82b0cfa922afa78e3dc0e92017e49
|
67ad5242ea7a8cdda60b9e10590f7bc9e91447c6
|
refs/heads/master
| 2023-01-04T07:10:31.509986
| 2020-10-24T18:17:50
| 2020-10-24T18:17:50
| 306,944,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,207
|
py
|
import unittest
from . import parseboard
import numpy as np
import AlphaGo.go as go
from AlphaGo.go import GameState
from AlphaGo.preprocessing.preprocessing import Preprocess
def simple_board():
"""
"""
gs = GameState(size=7)
# make a tiny board for the sake of testing and hand-coding expected results
#
# X
# 0 1 2 3 4 5 6
# B W . . . . . 0
# B W . . . . . 1
# B . . . B . . 2
# Y . . . B k B . 3
# . . . W B W . 4
# . . . . W . . 5
# . . . . . . . 6
#
# where k is a ko position (white was just captured)
# ladder-looking thing in the top-left
gs.do_move((0, 0)) # B
gs.do_move((1, 0)) # W
gs.do_move((0, 1)) # B
gs.do_move((1, 1)) # W
gs.do_move((0, 2)) # B
# ko position in the middle
gs.do_move((3, 4)) # W
gs.do_move((3, 3)) # B
gs.do_move((4, 5)) # W
gs.do_move((4, 2)) # B
gs.do_move((5, 4)) # W
gs.do_move((5, 3)) # B
gs.do_move((4, 3)) # W - the ko position
gs.do_move((4, 4)) # B - does the capture
return gs
def self_atari_board():
"""
"""
gs = GameState(size=7)
# another tiny board for testing self-atari specifically.
# positions marked with 'a' are self-atari for black
#
# X
# 0 1 2 3 4 5 6
# a W . . . W B 0
# . . . . . . . 1
# . . . . . . . 2
# Y . . W . W . . 3
# . W B a B W . 4
# . . W W W . . 5
# . . . . . . . 6
#
# current_player = black
gs.do_move((2, 4), go.BLACK)
gs.do_move((4, 4), go.BLACK)
gs.do_move((6, 0), go.BLACK)
gs.do_move((1, 0), go.WHITE)
gs.do_move((5, 0), go.WHITE)
gs.do_move((2, 3), go.WHITE)
gs.do_move((4, 3), go.WHITE)
gs.do_move((1, 4), go.WHITE)
gs.do_move((5, 4), go.WHITE)
gs.do_move((2, 5), go.WHITE)
gs.do_move((3, 5), go.WHITE)
gs.do_move((4, 5), go.WHITE)
return gs
def capture_board():
"""
"""
gs = GameState(size=7)
# another small board, this one with imminent captures
#
# X
# 0 1 2 3 4 5 6
# . . B B . . . 0
# . B W W B . . 1
# . B W . . . . 2
# Y . . B . . . . 3
# . . . . W B . 4
# . . . W . W B 5
# . . . . W B . 6
#
# current_player = black
black = [(2, 0), (3, 0), (1, 1), (4, 1), (1, 2), (2, 3), (5, 4), (6, 5), (5, 6)]
white = [(2, 1), (3, 1), (2, 2), (4, 4), (3, 5), (5, 5), (4, 6)]
for B in black:
gs.do_move(B, go.BLACK)
for W in white:
gs.do_move(W, go.WHITE)
gs.set_current_player(go.BLACK)
return gs
class TestPreprocessingFeatures(unittest.TestCase):
"""Test the functions in preprocessing.py
note that the hand-coded features look backwards from what is depicted
in simple_board() because of the x/y column/row transpose thing (i.e.
numpy is typically thought of as indexing rows first, but we use (x,y)
indexes, so a numpy row is like a go column and vice versa)
"""
def test_get_board(self):
gs = simple_board()
pp = Preprocess(["board"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
white_pos = np.asarray([
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
black_pos = np.asarray([
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
empty_pos = np.ones((gs.get_size(), gs.get_size())) - (white_pos + black_pos)
# check number of planes
self.assertEqual(feature.shape, (gs.get_size(), gs.get_size(), 3))
# check return value against hand-coded expectation
# (given that current_player is white)
self.assertTrue(np.all(feature == np.dstack((white_pos, black_pos, empty_pos))))
def test_get_turns_since(self):
"""
"""
gs = simple_board()
pp = Preprocess(["turns_since"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
one_hot_turns = np.zeros((gs.get_size(), gs.get_size(), 8))
rev_moves = list(gs.get_history())
rev_moves = rev_moves[::-1]
board = gs.get_board()
for x in range(gs.get_size()):
for y in range(gs.get_size()):
if board[x, y] != go.EMPTY:
# find most recent move at x, y
age = rev_moves.index((x, y))
one_hot_turns[x, y, min(age, 7)] = 1
self.assertTrue(np.all(feature == one_hot_turns))
def test_get_liberties(self):
"""
"""
gs = simple_board()
pp = Preprocess(["liberties"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
# todo - test liberties when > 8
one_hot_liberties = np.zeros((gs.get_size(), gs.get_size(), 8))
# black piece at (4,4) has a single liberty: (4,3)
one_hot_liberties[4, 4, 0] = 1
# the black group in the top left corner has 2 liberties
one_hot_liberties[0, 0:3, 1] = 1
# .. as do the white pieces on the left and right of the eye
one_hot_liberties[3, 4, 1] = 1
one_hot_liberties[5, 4, 1] = 1
# the white group in the top left corner has 3 liberties
one_hot_liberties[1, 0:2, 2] = 1
# ...as does the white piece at (4,5)
one_hot_liberties[4, 5, 2] = 1
# ...and the black pieces on the sides of the eye
one_hot_liberties[3, 3, 2] = 1
one_hot_liberties[5, 3, 2] = 1
# the black piece at (4,2) has 4 liberties
one_hot_liberties[4, 2, 3] = 1
for i in range(8):
self.assertTrue(
np.all(feature[:, :, i] == one_hot_liberties[:, :, i]),
"bad expectation: stones with %d liberties" % (i + 1))
def test_get_capture_size(self):
"""
"""
gs = capture_board()
pp = Preprocess(["capture_size"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
score_before = gs.get_captures_white()
one_hot_capture = np.zeros((gs.get_size(), gs.get_size(), 8))
# there is no capture available; all legal moves are zero-capture
for (x, y) in gs.get_legal_moves():
copy = gs.copy()
copy.do_move((x, y))
num_captured = copy.get_captures_white() - score_before
one_hot_capture[x, y, min(7, num_captured)] = 1
for i in range(8):
self.assertTrue(
np.all(feature[:, :, i] == one_hot_capture[:, :, i]),
"bad expectation: capturing %d stones" % i)
def test_get_self_atari_size(self):
"""
"""
gs = self_atari_board()
pp = Preprocess(["self_atari_size"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
one_hot_self_atari = np.zeros((gs.get_size(), gs.get_size(), 8))
# self atari of size 1 at position 0,0
one_hot_self_atari[0, 0, 0] = 1
# self atari of size 3 at position 3,4
one_hot_self_atari[3, 4, 2] = 1
self.assertTrue(np.all(feature == one_hot_self_atari))
def test_get_self_atari_size_cap(self):
"""
"""
gs = capture_board()
pp = Preprocess(["self_atari_size"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
one_hot_self_atari = np.zeros((gs.get_size(), gs.get_size(), 8))
# self atari of size 1 at the ko position and just below it
one_hot_self_atari[4, 5, 0] = 1
one_hot_self_atari[3, 6, 0] = 1
# self atari of size 3 at bottom corner
one_hot_self_atari[6, 6, 2] = 1
self.assertTrue(np.all(feature == one_hot_self_atari))
def test_get_liberties_after(self):
"""
"""
gs = simple_board()
pp = Preprocess(["liberties_after"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
one_hot_liberties = np.zeros((gs.get_size(), gs.get_size(), 8))
# TODO (?) hand-code?
for (x, y) in gs.get_legal_moves():
copy = gs.copy()
copy.do_move((x, y))
liberty = copy.get_liberty()
libs = liberty[x, y]
if libs < 7:
one_hot_liberties[x, y, libs - 1] = 1
else:
one_hot_liberties[x, y, 7] = 1
for i in range(8):
self.assertTrue(
np.all(feature[:, :, i] == one_hot_liberties[:, :, i]),
"bad expectation: stones with %d liberties after move" % (i + 1))
def test_get_liberties_after_cap(self):
"""
A copy of test_get_liberties_after but where captures are imminent
"""
gs = capture_board()
pp = Preprocess(["liberties_after"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
one_hot_liberties = np.zeros((gs.get_size(), gs.get_size(), 8))
for (x, y) in gs.get_legal_moves():
copy = gs.copy()
copy.do_move((x, y))
liberty = copy.get_liberty()
libs = liberty[x, y]
one_hot_liberties[x, y, min(libs - 1, 7)] = 1
for i in range(8):
self.assertTrue(
np.all(feature[:, :, i] == one_hot_liberties[:, :, i]),
"bad expectation: stones with %d liberties after move" % (i + 1))
def test_get_ladder_capture(self):
"""
"""
gs, moves = parseboard.parse(". . . . . . .|"
"B W a . . . .|"
". B . . . . .|"
". . . . . . .|"
". . . . . . .|"
". . . . . W .|")
pp = Preprocess(["ladder_capture"], size=7)
feature = pp.state_to_tensor(gs)[0, 0] # 1D tensor; no need to transpose
expectation = np.zeros((gs.get_size(), gs.get_size()))
expectation[moves['a']] = 1
self.assertTrue(np.all(expectation == feature))
def test_get_ladder_escape(self):
"""
"""
# On this board, playing at 'a' is ladder escape because there is a breaker on the right.
gs, moves = parseboard.parse(". B B . . . .|"
"B W a . . . .|"
". B . . . . .|"
". . . . . W .|"
". . . . . . .|"
". . . . . . .|")
pp = Preprocess(["ladder_escape"], size=7)
gs.set_current_player(go.WHITE)
feature = pp.state_to_tensor(gs)[0, 0] # 1D tensor; no need to transpose
expectation = np.zeros((gs.get_size(), gs.get_size()))
expectation[moves['a']] = 1
self.assertTrue(np.all(expectation == feature))
def test_two_escapes(self):
"""
"""
gs, moves = parseboard.parse(". . X . . .|"
". X O a . .|"
". X c X . .|"
". O X b . .|"
". . O . . .|"
". . . . . .|")
# place a white stone at c, and reset player to white
gs.do_move(moves['c'], color=go.WHITE)
gs.set_current_player(go.WHITE)
pp = Preprocess(["ladder_escape"], size=6)
gs.set_current_player(go.WHITE)
feature = pp.state_to_tensor(gs)[0, 0] # 1D tensor; no need to transpose
# both 'a' and 'b' should be considered escape moves for white after 'O' at c
expectation = np.zeros((gs.get_size(), gs.get_size()))
expectation[moves['a']] = 1
expectation[moves['b']] = 1
self.assertTrue(np.all(expectation == feature))
def test_get_sensibleness(self):
"""
"""
# TODO - there are no legal eyes at the moment
gs = simple_board()
pp = Preprocess(["sensibleness"], size=7)
feature = pp.state_to_tensor(gs)[0, 0] # 1D tensor; no need to transpose
expectation = np.zeros((gs.get_size(), gs.get_size()))
for (x, y) in gs.get_legal_moves():
if not (gs.is_eye((x, y), go.WHITE)):
expectation[x, y] = 1
self.assertTrue(np.all(expectation == feature))
def test_get_legal(self):
"""
"""
gs = simple_board()
pp = Preprocess(["legal"], size=7)
feature = pp.state_to_tensor(gs)[0, 0] # 1D tensor; no need to transpose
expectation = np.zeros((gs.get_size(), gs.get_size()))
for (x, y) in gs.get_legal_moves():
expectation[x, y] = 1
self.assertTrue(np.all(expectation == feature))
def test_feature_concatenation(self):
"""
"""
gs = simple_board()
pp = Preprocess(["board", "sensibleness", "capture_size"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
expectation = np.zeros((gs.get_size(), gs.get_size(), 3 + 1 + 8))
board = gs.get_board()
# first three planes: board
expectation[:, :, 0] = (board == go.WHITE) * 1
expectation[:, :, 1] = (board == go.BLACK) * 1
expectation[:, :, 2] = (board == go.EMPTY) * 1
# 4th plane: sensibleness (as in test_get_sensibleness)
for (x, y) in gs.get_legal_moves():
if not (gs.is_eye((x, y), go.WHITE)):
expectation[x, y, 3] = 1
# 5th through 12th plane: capture size (all zero-capture)
for (x, y) in gs.get_legal_moves():
expectation[x, y, 4] = 1
self.assertTrue(np.all(expectation == feature))
if __name__ == '__main__':
unittest.main()
|
[
"aico@ya.ru"
] |
aico@ya.ru
|
bbd3db53b09bf960e6e995204e2771897492d6dc
|
599d569b586cb1414886b1a2454cf3c59c4362bd
|
/master_classifyNewcase.py
|
1e11353691c3a1709fc3f692cee4905ea5ed08fd
|
[] |
no_license
|
cgallego/master
|
77511ff3330882f0c5456beaedd81468d7a99bb1
|
2a04d66ac783f5729413aecf9c66037fc8501c78
|
refs/heads/master
| 2016-09-07T19:02:40.537285
| 2014-07-28T16:05:01
| 2014-07-28T16:05:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,972
|
py
|
# -*- coding: utf-8 -*-
"""
Master python script to run each module in sequence
Arguments:
============
sys.argv[1] = input text file with one case per line in the following format:
BenignNMaligNAnt StudyNumber DicomExamNumber LesionID StudyDate SeriesID BreastSide PathReportID PathoBreastSide
Created on Tue Apr 08 14:20:35 2014
@ author (C) Cristina Gallego, University of Toronto
----------------------------------------------------------------------
"""
import os, os.path
import sys
import string
from sys import argv, stderr, exit
import shlex, subprocess
import re
import numpy as np
import dicom
import psycopg2
import sqlalchemy as al
import sqlalchemy.orm
import pandas as pd
from query_database import *
from dictionaries import my_aet, hostID, local_port, clinical_aet, clinical_IP, clinical_port, remote_aet, remote_IP, remote_port
import dcmtk_routines as dcmtk
from inputs_init import *
from display import *
from segment import *
from features_dynamic import *
from features_morphology import *
from features_texture import *
import pylab
# convertion packages
import pandas.rpy.common as com
from rpy2.robjects.numpy2ri import numpy2ri
from rpy2.robjects.packages import importr
import rpy2.robjects as R
from rpy2.robjects import globalenv
from classifyCascade import *
def getScans(path_rootFolder, fileline, PatientID, StudyID, AccessionN, oldExamID):
"""
run : getScans(path_rootFolder, PatientID, StudyID, AccessionN):
Inputs
======
path_rootFolder: (string) Automatically generated based on the location of file
PatientID : (int) MRN
StudyID : (int) CAD StudyID
AccessionN : (int) CAD AccessionN
database : (bool) [True] whether to check database for info about study.
Output
======
"""
try:
dcmtk.check_MRI_MARTEL(data_loc, remote_aet, remote_port, remote_IP, local_port, PatientID, StudyID, AccessionN)
if(oldExamID==False):
dcmtk.pull_MRI_MARTEL(path_rootFolder, data_loc, remote_aet, remote_port, remote_IP, local_port, PatientID, StudyID, AccessionN, countImages=False)
else:
ExamID = fileline[4]
dcmtk.pull_MRI_MARTELold(path_rootFolder, data_loc, remote_aet, remote_port, remote_IP, local_port, PatientID, StudyID, AccessionN, ExamID, countImages=False)
except (KeyboardInterrupt, SystemExit):
dcmtk.check_pacs(path_rootFolder, data_loc, clinical_aet , clinical_port, clinical_IP, local_port, PatientID, StudyID, AccessionN)
dcmtk.pull_pacs(path_rootFolder, data_loc, clinical_aet, clinical_port, clinical_IP, local_port, PatientID, StudyID, AccessionN)
except (KeyboardInterrupt, SystemExit):
print 'Unable to find study in MRI_MARTEL or AS0SUNB --- Abort'
sys.exit()
return
if __name__ == '__main__':
# Get Root folder ( the directory of the script being run)
path_rootFolder = os.path.dirname(os.path.abspath(__file__))
print path_rootFolder
# Open filename list
file_ids = open(sys.argv[1],"r")
init_flag=1
for fileline in file_ids:
# Get the line: StudyNumber DicomExamNumber MRN chosen_lesions_id StudyDate SeriesID image_pos_pat image_ori_pat
fileline = fileline.split()
cond = fileline[0]
StudyID = fileline[1]
DicomExamNumber = fileline[2]
Lesions_id = fileline[3]
dateID = fileline[4]
SeriesID = fileline[5] # corresponds to dynamic sequence;
#############################
###### 1) Retrieving Images from Research PACS
#############################
print "Retrieving Scans to local drive..."
#getScans(path_rootFolder, fileline, PatientID, StudyID, AccessionN, oldExamID=False)
#############################
###### 2) Querying Research database for clinical, pathology, radiology data
#############################
print "Executing SQL connection..."
# Format query StudyID
if (len(StudyID) >= 4 ): fStudyID=StudyID
if (len(StudyID) == 3 ): fStudyID='0'+StudyID
if (len(StudyID) == 2 ): fStudyID='00'+StudyID
if (len(StudyID) == 1 ): fStudyID='000'+StudyID
# Format query redateID
redateID = dateID[0:4]+'-'+dateID[4:6]+'-'+dateID[6:8]
# perform query
queryData = Query()
queryData.queryDatabase(fStudyID, redateID)
rowCase=["0", "0"]
rowCase = int(raw_input('pick row (0-n): '))
# recollect pathologies
queryData.d1['is_insitu'] = pd.Series(True, index=queryData.d1)
queryData.d1['is_invasive'] = pd.Series(True, index=queryData.d1)
queryData.d1['Diagnosis'] = pd.Series(True, index=queryData.d1)
queryData.d1['BenignNMaligNAnt'] = pd.Series(True, index=queryData.d1)
queryData.d1['labels'] = pd.Series(True, index=queryData.d1)
ansLesion = array((raw_input('Enter: is_insitu?: is_invasive?: ')).split()).astype(bool)
#slice data, get only 1 record
dataCase = pd.Series( queryData.d1.loc[rowCase,:] )
dataCase['is_insitu'] = ansLesion[0]
dataCase['is_invasive'] = ansLesion[1]
ansDiag=["diagnosis"]
ansDiag = str(raw_input('Dignosis: '))
dataCase['Diagnosis'] = ansDiag
dataCase['BenignNMaligNAnt'] = cond[:-1]
dataCase['labels'] = cond
if(init_flag):
casesFrame = pd.DataFrame(columns=queryData.d1.columns)
init_flag=False
#############################
###### 3) Extractfeatures
#############################
###### Start by Loading
print "Start by loading volumes..."
load = Inputs_init()
data_loc='Z:\Cristina\MassNonmass'+os.sep+cond[:-1]
[series_path, phases_series, lesionID_path] = load.readVolumes(data_loc, StudyID, DicomExamNumber, SeriesID, Lesions_id)
print "Path to series location: %s" % series_path
print "List of pre and post contrast volume names: %s" % phases_series
print "Path to lesion segmentation: %s" % lesionID_path
print "\n Load Segmentation..."
lesion3D = load.loadSegmentation(lesionID_path)
print "Data Structure: %s" % lesion3D.GetClassName()
print "Number of points: %d" % int(lesion3D.GetNumberOfPoints())
print "Number of cells: %d" % int(lesion3D.GetNumberOfCells())
print "\n Visualize volumes..."
# Create only 1 display
loadDisplay = Display()
lesion3D_mesh = loadDisplay.addSegment(lesion3D, (0,1,0), interact=False)
loadDisplay.visualize(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, sub=True, postS=4, interact=True)
# Get z slice
LesionZslice = loadDisplay.zImagePlaneWidget.GetSliceIndex()
#############################
# 4) Create Segmentation of lesion. Comment out if not needed ( define seededlesion3D = lesion3D )
#############################
createSegment = Segment()
print "\n Displaying picker for lesion segmentation"
seeds = loadDisplay.display_pick(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, 4, LesionZslice)
seededlesion3D = createSegment.segmentFromSeeds(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, seeds, loadDisplay.iren1, loadDisplay.xImagePlaneWidget, loadDisplay.yImagePlaneWidget, loadDisplay.zImagePlaneWidget)
seededlesion3D_mesh = loadDisplay.addSegment(seededlesion3D, (0,0,1), interact=True)
loadDisplay.picker.RemoveAllObservers()
# save it to file
createSegment.saveSegmentation(lesionID_path, seededlesion3D)
#############################
###### Extract Dynamic features
#############################
print "\n Extract Dynamic contour features..."
loadDynamic = Dynamic()
dynamicfeatures_contour = loadDynamic.extractfeatures_contour(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, series_path, phases_series, seededlesion3D)
print "\n=========================================="
print dynamicfeatures_contour
print "\n Extract Dynamic inside features..."
dynamicfeatures_inside = loadDynamic.extractfeatures_inside(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, series_path, phases_series, seededlesion3D)
print dynamicfeatures_inside
print "\n=========================================="
#############################
###### Extract Morphology features
#############################
print "\n Extract Morphology features..."
loadMorphology = Morphology()
morphofeatures = loadMorphology.extractfeatures(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, series_path, phases_series, seededlesion3D)
print "\n=========================================="
print morphofeatures
print "\n=========================================="
#############################
###### Extract Texture features
#############################
print "\n Extract Texture features..."
loadTexture = Texture()
texturefeatures = loadTexture.extractfeatures(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, series_path, phases_series, seededlesion3D, loadMorphology.VOI_efect_diameter, loadMorphology.lesion_centroid )
print "\n=========================================="
print texturefeatures
print "\n=========================================="
# deal with closing windows, plots, renders, actors
pylab.close('all')
loadDisplay.renderer1.RemoveActor(loadDisplay.actor_mesh)
loadDisplay.iren1.TerminateApp()
loadDisplay.renWin1.Finalize()
#############################
###### Finish tidying up and save to file
## append collection of cases
#############################
casesFrame = casesFrame.append(dataCase) # 20
casesFrame['id']=fStudyID
casesFrame.set_index('id',inplace=False)
dynamicfeatures_contour['id']=fStudyID
dynamicfeatures_contour.set_index('id',inplace=False)
casesFrame = pd.merge(casesFrame, dynamicfeatures_contour, on='id', how='inner')
dynamicfeatures_inside['id']=fStudyID
dynamicfeatures_inside.set_index('id',inplace=False)
casesFrame = pd.merge(casesFrame, dynamicfeatures_inside, on='id', how='inner')
morphofeatures['id']=fStudyID
morphofeatures.set_index('id',inplace=False)
casesFrame = pd.merge(casesFrame, morphofeatures, on='id', how='inner')
texturefeatures['id']=fStudyID
texturefeatures.set_index('id',inplace=False)
casesFrame = pd.merge(casesFrame, texturefeatures, on='id', how='inner')
# end of line
os.chdir("Z:\Cristina\MassNonmass\codeProject\codeBase\extractFeatures\casesDatabase")
casesFrame.to_csv('casesFrames_toclasify.csv')
#############################
## Classification stage: send features to classifier to generate new case prediction
## Cascade current classifier
#############################
classifier = classifyCascade()
classifier.case_classifyCascade()
file_ids.close()
|
[
"admin@webdsdesign.com"
] |
admin@webdsdesign.com
|
1ed4eb824895a8efbf8b5abcf633e22583dae17e
|
802a34c7452f7035b3f3441169fb2e3c1743b2e9
|
/Modelling/logistic_regression_pipeline.py
|
3e07b150de7d5fe35ca90a48dc2fb65f9e5f1ee9
|
[] |
no_license
|
XingLLiu/ED_code
|
6756a945c213df4676a7278aa89de0675afb87c9
|
79f6d680354a944a538a3d1983fbc898b0d1c097
|
refs/heads/master
| 2020-06-15T09:05:59.861517
| 2019-08-27T20:32:55
| 2019-08-27T20:32:55
| 195,255,510
| 0
| 0
| null | 2019-08-26T00:40:25
| 2019-07-04T14:22:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,888
|
py
|
# ----------------------------------------------------
# To run:
# 1. customize hyper-parameters and DATA_PATH in Section 0
# 2. in Terminal:
# python logistic_regression_pipeline.py
# ----------------------------------------------------
from ED_support_module import *
from ED_support_module import EPICPreprocess
from ED_support_module import Evaluation
from ED_support_module import LogisticRegression
# ----------------------------------------------------
# ========= 0. Preliminary seetings =========
MODEL_NAME = "LR"
RANDOM_SEED = 20
MODE = "c"
FPR_THRESHOLD = 0.1
PENALTY = "l1" # Penalty of the first fit
# Arguments
def setup_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--random_seed",
default=27,
required=True,
type=int,
help="Random seed.")
parser.add_argument("---dynamic",
default=True,
required=True,
type=bool,
help="If using one-month ahead prediction.")
parser.add_argument("--path",
required=True,
type=str,
help="Path to save figures.")
return parser
# Parser arguements
# parser = setup_parser()
# args = parser.parse_args()
# ----------------------------------------------------
# Path set-up
FIG_PATH = "../../results/logistic_regression/"
DATA_PATH = "../../data/EPIC_DATA/preprocessed_EPIC_with_dates_and_notes.csv"
FIG_ROOT_PATH = FIG_PATH + f"dynamic_{MODE}_seeds{RANDOM_SEED}_{PENALTY}penalty/"
# Create folder if not already exist
if not os.path.exists(FIG_PATH):
os.makedirs(FIG_PATH)
# ----------------------------------------------------
# ========= 1. Further preprocessing =========
preprocessor = EPICPreprocess.Preprocess(DATA_PATH)
EPIC, EPIC_enc, EPIC_CUI, EPIC_arrival = preprocessor.streamline()
# Get numerical columns (for later transformation)
num_cols = preprocessor.which_numerical(EPIC)
num_cols.remove("Primary.Dx")
# Get time span
time_span = EPIC_arrival['Arrived'].unique().tolist()
# ----------------------------------------------------
# ========= 2.a. One-month ahead prediction =========
print("====================================")
print("Dynamically evaluate the model ...\n")
for j, time in enumerate(time_span[2:-1]):
# Month to be predicted
time_pred = time_span[j + 3]
# Create folder if not already exist
DYNAMIC_PATH = FIG_ROOT_PATH + f"{time_pred}/"
if not os.path.exists(DYNAMIC_PATH):
os.makedirs(DYNAMIC_PATH)
# Prepare train/test sets
XTrain, XTest, yTrain, yTest= splitter(EPIC_arrival,
num_cols = num_cols,
mode = MODE,
time_threshold = time,
test_size =None,
EPIC_CUI = EPIC_CUI,
seed=RANDOM_SEED)
print("Training for data up to {} ...".format(time))
print( "Train size: {}. Test size: {}. Sepsis cases in [train, test]: [{}, {}]."
.format( yTrain.shape, yTest.shape, yTrain.sum(), yTest.sum() ) )
# ========= 2.a.i. Model =========
# Apply SMOTE
smote = SMOTE(random_state = RANDOM_SEED, sampling_strategy = 'auto')
col_names = XTrain.columns
XTrain, yTrain = smote.fit_sample(XTrain, yTrain)
XTrain = pd.DataFrame(XTrain, columns=col_names)
# Fit logistic regression
model = sk.linear_model.LogisticRegression(solver = 'liblinear', penalty = PENALTY,
max_iter = 1000, random_state = RANDOM_SEED).fit(XTrain, yTrain)
# Re-fit after removing features of zero coefficients
XTrain = model.remove_zero_coef_(XTrain)
model_new = sk.linear_model.LogisticRegression(solver = 'liblinear', penalty = 'l2',
max_iter = 1000, random_state = RANDOM_SEED).fit(XTrain, yTrain)
# Predict
# Note that remove_zero_coef_ does not use XTest in training. It only removes some
# features according to the LR model.
XTest = model.remove_zero_coef_(XTest)
pred_new = model_new.predict_proba(XTest)[:, 1]
# ========= 2.a.ii. Plot beta values =========
# Plot the features whose coefficients are the top 50 largest in magnitude
non_zero_coeffs = model_new.coef_[model_new.coef_ != 0]
indices = np.argsort(abs(non_zero_coeffs))[::-1][:50]
_ = plt.figure()
_ = plt.title("Logistic Regression Coefficients Values")
_ = sns.barplot(y = XTrain.columns[indices], x = np.squeeze(non_zero_coeffs)[indices])
_ = plt.yticks(fontsize = 4)
plt.savefig(DYNAMIC_PATH + f"coeffs_{time_pred}.eps", format = 'eps', dpi = 800)
plt.close()
# ========= 2.c. Feature importance =========
# Permutation test
imp_means, imp_vars = feature_importance_permutation(
predict_method = model_new.predict_proba_single,
X = np.array(XTest),
y = np.array(yTest),
metric = true_positive_rate,
fpr_threshold = FPR_THRESHOLD,
num_rounds = 5,
seed = RANDOM_SEED)
fi_evaluator = Evaluation.FeatureImportance(imp_means, imp_vars, XTest.columns, MODEL_NAME)
# Save feature importance plot
fi_evaluator.FI_plot(save_path = DYNAMIC_PATH, y_fontsize = 8, eps = True)
# ========= 2.b. Evaluation =========
evaluator = Evaluation.Evaluation(yTest, pred_new)
# Save ROC plot
_ = evaluator.roc_plot(plot = False, title = MODEL_NAME, save_path = DYNAMIC_PATH + f"roc_{time_pred}")
# Save summary
summary_data = evaluator.summary()
summary_data.to_csv(DYNAMIC_PATH + f"summary_{time_pred}.csv", index = False)
# ========= 2.c. Save predicted results =========
pred_new = pd.DataFrame(pred_new, columns = ["pred_prob"])
pred_new.to_csv(DYNAMIC_PATH + f"predicted_result_{time_pred}.csv", index = False)
# ========= End of iteration =========
print("Completed evaluation for {}.\n".format(time_pred))
# ========= 2.c. Summary plots =========
print("Saving summary plots ...")
summary_plot_path = FIG_ROOT_PATH
# Subplots of ROCs
evaluator.roc_subplot(summary_plot_path, time_span, [3, 3], eps = True)
# Aggregate ROC
aggregate_summary = evaluator.roc_aggregate(summary_plot_path, time_span, eps = True)
# Save aggregate summary
aggregate_summary.to_csv(summary_plot_path + "aggregate_summary.csv", index = False)
print("Summary plots saved at {}".format(summary_plot_path))
print("====================================")
|
[
"liuxing971015@outlook.com"
] |
liuxing971015@outlook.com
|
21c7744fe9bf965805efcb90210e003fbc603dfe
|
f4ea512b8a156abfaef83f25e044b94c37b6dedb
|
/questions/ABC194/C.py
|
fe4fe5952d374d1f02b2c0ed3a53e5bd84cd5e11
|
[] |
no_license
|
HubHikari/CompetitiveProgramming
|
9d1483e0a964508e84ae233aba9cb4ade7ff88e3
|
b7c471a80378183b2ed5fe86fa829ee89d26709f
|
refs/heads/main
| 2023-04-13T07:00:13.812800
| 2021-04-24T13:51:27
| 2021-04-24T13:51:27
| 308,897,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
from itertools import permutations
DEBUG_MODE = 0
MAX_NUM=2**63-1
def DBG(s):
if DEBUG_MODE == 1:
print("DEBUG: ")
print(s)
#入力変数の数がN個の場合
N=int(input())
A = list(map(int, input().split()))
s1=sum(A)
A2=[]
for i in range(0,N):
A2.append(A[i]*A[i])
s2=sum(A2)
Ai=(N-1)*s2
DBG(Ai)
kake=[]
for i in range(0,N):
kake.append(A[i]*(s1-A[i]))
DBG(sum(kake))
ans=Ai-sum(kake)
print(ans)
|
[
"donhanya0321@gmail.com"
] |
donhanya0321@gmail.com
|
62f85c84ce0341424f10db40157445ebc85a70e9
|
6b2af072847d22c17344856636054497d1f4f632
|
/leetcode/448.find-all-numbers-disappeared-in-an-array.py
|
ecee2f006429567a5e6fc41126e55ec157ebeef3
|
[] |
no_license
|
iplay16/vscode
|
2a2dde14550164cc36d76dee4a5bdba6e998f026
|
542626143c4eab527f6bc7143e8778619c8e4857
|
refs/heads/master
| 2020-04-25T01:55:23.972221
| 2019-10-05T08:40:31
| 2019-10-05T08:40:31
| 172,422,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
#
# @lc app=leetcode id=448 lang=python3
#
# [448] Find All Numbers Disappeared in an Array
#
class Solution:
def findDisappearedNumbers(self, nums: List[int]) -> List[int]:
for n in nums:
nums[abs(n)-1]=-abs(nums[abs(n)-1])
res=[]
for i in range(len(nums)):
if(nums[i]>0):
res.append(i+1)
return res
|
[
"iplay16@163.com"
] |
iplay16@163.com
|
1bab096720a3d1db7ea403b4988e1744b8ee3cd9
|
541523537649b48a96eef475d6b66e8e8270978d
|
/fenci/web/test/test.py
|
554e8d9c53702256b8d8ed584dbcc758e7f9ac4b
|
[] |
no_license
|
HNU-MSC/fenci
|
03720288e29b3dbf05eb408d8af164d1e3ed7813
|
dc9218eae7e58bafda5d659a01b473eb57b63861
|
refs/heads/master
| 2020-12-27T04:31:25.202604
| 2020-02-02T12:28:11
| 2020-02-02T12:28:11
| 237,766,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
import json
with open('force_directed.json', 'r') as f:
res = f.read()
res = json.loads(res)
print(res['nodes'])
|
[
"noreply@github.com"
] |
noreply@github.com
|
23d781e34d8d2f3ae61620fd43b6f47b75e59a5b
|
5b0ff689a3e14f42bdf688864cae40c931a5f685
|
/msa/core/armve/tests/test_multi_write.py
|
b2424dcb0537e0251c93404cf4c3107e15a472cd
|
[] |
no_license
|
prometheus-ar/vot.ar
|
cd7012f2792a2504fb7f0ee43796a197fc82bd28
|
72d8fa1ea08fe417b64340b98dff68df8364afdf
|
refs/heads/2017-ago-salta
| 2021-01-02T22:19:41.591077
| 2017-08-25T11:55:49
| 2017-08-25T11:55:49
| 37,735,555
| 171
| 110
| null | 2020-06-30T13:33:49
| 2015-06-19T17:15:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import division
from serial import Serial
from msa.core.armve.constants import DEV_PRINTER, CMD_PRINTER_PAPER_START, \
CMD_PRINTER_MOVE, EVT_PRINTER_PAPER_INSERTED, CMD_PRINTER_PRINT, \
CMD_PRINTER_PAPER_REMOVE, DEV_RFID, EVT_RFID_NEW_TAG,\
CMD_PRINTER_LOAD_COMP_BUFFER, MSG_EV_PUB
from msa.core.armve.protocol import Printer, RFID, Device, Agent, \
PowerManager, PIR
from msa.core.armve.settings import SERIAL_PORT
def init_channel():
channel = Serial(SERIAL_PORT, timeout=3)
if not channel.isOpen():
channel.open()
channel.flushInput()
channel.flushOutput()
return channel
def test_boleta():
channel = init_channel()
agent = Agent(channel)
init = agent.initialize()
printer = Printer(channel)
rfid = RFID(channel)
device = Device(channel)
#esperar_evento(device, DEV_PRINTER, EVT_PRINTER_PAPER_INSERTED)
#print rfid.get_multitag_data()
tags_data = rfid.get_tags()[0]
serial_number = tags_data['serial_number'][0]
rfid.write_tag(serial_number, 4, "1C",
"--00--01--02--03--04--05--06--07--08--09--10--11--12"
"--13--14--15--16--17--18--19--20--21--22--23--24--25"
"--26--27--28--29--30--31--32--33--34--35--36--37--38"
"--39--40--41--42--43--44--45--46--47--48--49--50--51"
)
rfid.get_multitag_data()
def esperar_evento(device, device_id, event):
print("esperando evento", device_id, event)
esperando = True
while esperando:
ret = device.read(True)
if ret is not None and ret[1:] == (device_id, event, MSG_EV_PUB):
esperando = False
if __name__ == "__main__":
test_boleta()
|
[
"prometheus@olympus.org"
] |
prometheus@olympus.org
|
c15e0849f76eff4678bd60d201ed33b6738de563
|
3a0deef4feb62d9b0cee5c581cbc57233d356f9d
|
/ParkEasy/migrations/0001_initial.py
|
4ab181853091171dd6fb4a862bbce2da2d69d75f
|
[] |
no_license
|
sirwill98/William_Rodgers_Graded_Unit
|
d1d24edf2d78b7eb421e1f2307fd424e4a1c8426
|
94ce68bc97773a93c4c2b963b0252ea5c0830d2a
|
refs/heads/master
| 2021-06-05T10:29:27.070287
| 2020-11-12T13:32:30
| 2020-11-12T13:32:30
| 131,712,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,506
|
py
|
# Generated by Django 2.0.3 on 2018-05-24 12:40
import ParkEasy.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Arriving',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('arriving_flight_number', models.TextField(max_length=16)),
('arriving_flight_datetime', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Booking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('booking_date', models.DateField(default=django.utils.timezone.now)),
('booking_length', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Departing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('departing_flight_number', models.TextField(max_length=16)),
('departing_flight_datetime', models.DateTimeField()),
('destination', models.TextField(max_length=64)),
],
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_paid', models.DateTimeField(default=django.utils.timezone.now)),
('paid', models.BooleanField(default=False)),
('amount', models.IntegerField()),
('booking', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ParkEasy.Booking')),
],
),
migrations.CreateModel(
name='Prices',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vip', models.IntegerField(default=0)),
('valet', models.IntegerField(default=0)),
('day', models.FloatField(default=1.2)),
('base', models.IntegerField(default=27)),
('after_five', models.IntegerField(default=10)),
('is_current', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reg_no', models.TextField(max_length=7)),
('make', models.TextField()),
('manufacturer', models.TextField()),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('password', models.TextField(default='', max_length=100)),
('email', models.EmailField(default='', max_length=100, unique=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this site.')),
('address_line1', models.CharField(max_length=100)),
('address_line2', models.CharField(max_length=100)),
('postcode', models.CharField(max_length=16)),
('tel_no', models.CharField(max_length=20)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', ParkEasy.models.UserManager()),
],
),
migrations.AddField(
model_name='departing',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='booking',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='booking',
name='prices',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='ParkEasy.Prices'),
),
migrations.AddField(
model_name='booking',
name='vehicle',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ParkEasy.Vehicle'),
),
migrations.AddField(
model_name='arriving',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"billyboy2410@hotmail.co.uk"
] |
billyboy2410@hotmail.co.uk
|
83888354f0b783bb8f6c9e830474067926de5f17
|
7d4667ee455337014760a7e23b9556c30d358b25
|
/Web2_0course/buyagrade/cardValidate.py
|
c0cc154f6d1542837d2e0826f1b482ece02bd129
|
[] |
no_license
|
joyeecheung/WebHWs
|
786d6bfcfd3b0a0822a73f7b94b7d6673fd89568
|
9f9725187ef7ce6d496e3368a9c7acbd512f27f2
|
refs/heads/master
| 2020-12-25T17:05:30.659229
| 2015-02-11T19:17:41
| 2015-02-11T19:17:41
| 13,560,924
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,316
|
py
|
""" Functions for validating credit card numbers. """
import re
def IsValidChecksum(number):
""" Checks if the card number passes a luhn mod-10 checksum. """
numlist = [int(x) for x in reversed(str(number)) if x.isdigit()]
# digits that count once
count = sum(x for i, x in enumerate(numlist) if i % 2 == 0)
# digits that count double (add digits of double value)
count += sum(sum(divmod(2 * x, 10)) for i, x in enumerate(numlist) if i % 2 != 0)
return (count % 10 == 0)
def IsValidCharacters(number):
"""
Checks if the number only contains digits and '-'.
If the digits are grouped, checks if they are grouped correctly.
"""
if re.compile('^[-0-9]*$').match(number):
return True
else:
return re.compile('^([0-9]{4}[-])*([0-9]{4})$').match(number) != None
def IsValidPattern(number, type):
""" Checks to make sure that the card number match the CC pattern. """
CC_PATTERNS = {
'mastercard':'^5[12345]([0-9]{14})$',
'visa' :'^4([0-9]{15})$',
}
return re.compile(CC_PATTERNS[type]).match(number) != None
def IsValid(number, type):
if IsValidCharacters(number):
clean = number.replace('-', '')
if IsValidPattern(clean, type):
return IsValidChecksum(clean)
return False
|
[
"joyeecheung@joyeecheung-virtual-machine.(none)"
] |
joyeecheung@joyeecheung-virtual-machine.(none)
|
ea6e913cfb0bfbdeae407ef6826a14197f46c3c5
|
805a795ea81ca8b5cee1dec638585011da3aa12f
|
/MAIN/2.79/python/lib/site-packages/OpenGL/GLES2/EXT/float_blend.py
|
b15df56ee4bf4fa5dd71042d1b67ad8dbacc6e7d
|
[
"Apache-2.0"
] |
permissive
|
josipamrsa/Interactive3DAnimation
|
5b3837382eb0cc2ebdee9ee69adcee632054c00a
|
a4b7be78514b38fb096ced5601f25486d2a1d3a4
|
refs/heads/master
| 2022-10-12T05:48:20.572061
| 2019-09-26T09:50:49
| 2019-09-26T09:50:49
| 210,919,746
| 0
| 1
|
Apache-2.0
| 2022-10-11T01:53:36
| 2019-09-25T19:03:51
|
Python
|
UTF-8
|
Python
| false
| false
| 750
|
py
|
'''OpenGL extension EXT.float_blend
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.float_blend to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/float_blend.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.float_blend import *
from OpenGL.raw.GLES2.EXT.float_blend import _EXTENSION_NAME
def glInitFloatBlendEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
[
"jmrsa21@gmail.com"
] |
jmrsa21@gmail.com
|
e0a10d5037f48480969cd86d793755f6b876565a
|
b2654c3003c7de93b24c84d6acce9bcdb2cad826
|
/07_function_objects.py
|
384c804218176b869e4385cd2bf0caf45d48041d
|
[] |
no_license
|
sonicbrcm/dive-into-cpython
|
eef9ab16bb10aa82555c0221a17b28c578b34afd
|
1e17fbaacdfb0d546fda1bbc6d79361de417caf0
|
refs/heads/master
| 2021-12-22T04:13:05.784313
| 2017-10-10T08:34:09
| 2017-10-10T08:34:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
PyCodeObject
cpython/Include/code.h
cpython/Objects/codeobject.c
PyFunctionObject
cpython/Include/funcobject.h
cpython/Objects/funcobject.c
|
[
"hexiaowei91@163.com"
] |
hexiaowei91@163.com
|
b0208119966e1fdb6ebb2df463fa9d87fbde71c4
|
e479cf6650db5766ca8435c5a1165e8cb43de0b7
|
/evaluate.py
|
9cfef8c4942f289387614d90d2854c750e40f0aa
|
[] |
no_license
|
WangGewu/2020-ai-road-segmentation
|
531b9eaaa0a6038b77a5a5fe886748a1ccda42c3
|
5e5c0a161b1bc3424dd351738641c4a367a8c1cf
|
refs/heads/main
| 2023-03-29T05:00:12.529475
| 2021-04-08T13:22:23
| 2021-04-08T13:22:23
| 355,915,734
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,237
|
py
|
import numpy as np
class Evaluator(object):
def __init__(self, num_class):
self.num_class = num_class
self.confusion_matrix = np.zeros((self.num_class,)*2)
def Pixel_Accuracy(self):
Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()
return Acc
def Pixel_Accuracy_Class(self):
# print(self.confusion_matrix.sum(axis=1))
# print(self.confusion_matrix.sum())
Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)
Acc = np.nanmean(Acc)
return Acc
def Mean_Intersection_over_Union(self):
MIoU = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
MIoU = np.nanmean(MIoU)
return MIoU
def Mean_Intersection_over_Union_test(self):
MIoU = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
return MIoU
def Frequency_Weighted_Intersection_over_Union(self):
freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
iu = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def _generate_matrix(self, gt_image, pre_image):
mask = (gt_image >= 0) & (gt_image < self.num_class)
label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]
count = np.bincount(label, minlength=self.num_class**2)
confusion_matrix = count.reshape(self.num_class, self.num_class)
return confusion_matrix
def add_batch(self, gt_image, pre_image):
assert gt_image.shape == pre_image.shape
self.confusion_matrix += self._generate_matrix(gt_image, pre_image)
def reset(self):
self.confusion_matrix = np.zeros((self.num_class,) * 2)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e1d4c5491ac53c61a1a27e3e470c44305f3885f3
|
56bcef6090d53b43afb60240079d6c71ed01f808
|
/kalakriti/customer_block/views.py
|
1bdd9cf807b7f6d63fd9e418f0f3a454f3d6a9c3
|
[] |
no_license
|
hrs2203/soad_project_2020
|
1091da3b167c863a28229ddc95859fdd9396f637
|
067853dd52676062358b5841fdcca0feefaabfc4
|
refs/heads/main
| 2023-02-04T20:44:23.771516
| 2020-12-22T06:50:11
| 2020-12-22T06:50:11
| 308,274,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,119
|
py
|
from django.shortcuts import render, redirect
from django.http import JsonResponse
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.models import User, AnonymousUser
from customer_block.forms import (
user_login_form,
business_login_form,
user_signup_form,
business_signup_form,
)
from customer_block.models import CustomerModel, BusinessModel, OrderModel
import os, random, string
from pathlib import Path
from image_model.models import Product
from image_model.forms import upload_product_form
from django.core.files.storage import default_storage
def home_page(request):
return render(request=request, template_name="homepage.html", context={})
def login_user_page(request):
context = dict()
if request.method == "POST":
form = user_login_form(request.POST)
if form.is_valid():
user = authenticate(
request,
username=request.POST["userName"],
password=request.POST["password"],
)
if user:
login(request, user)
else:
form = user_login_form()
context["user"] = request.user
context["form"] = form
if request.user.is_authenticated:
return redirect("/user")
print("INNNN")
return render(request=request, template_name="login_user.html", context=context)
def login_business_page(request):
context = dict()
if request.method == "POST":
form = business_login_form(request.POST)
if form.is_valid():
user = authenticate(
request,
username=request.POST["userName"],
password=request.POST["password"],
)
if user:
login(request, user)
else:
form = business_login_form()
context["user"] = request.user
context["form"] = form
if request.user.is_authenticated:
return redirect("/business")
return render(request=request, template_name="login_business.html", context=context)
def logout_page(request):
logout(request)
return redirect("/")
def signup_user_page(request):
context = {}
if request.method == "POST":
form = user_signup_form(request.POST)
if form.is_valid():
try:
newUser = User.objects.create_user(
email=form.cleaned_data["userEmail"],
username=form.cleaned_data["userName"],
password=form.cleaned_data["password"],
)
newUser.is_staff = False
newUser.save()
except:
return redirect("/signup/user")
newUserDetail = CustomerModel(userModel=newUser)
newUserDetail.save()
user = authenticate(
request,
username=request.POST["userName"],
password=request.POST["password"],
)
if user:
login(request, user)
return redirect("/")
else:
print("not auth")
else:
form = user_signup_form()
context["form"] = form
return render(request=request, template_name="signup_user.html", context=context)
def signup_business_page(request):
context = {}
if request.method == "POST":
form = business_signup_form(request.POST)
if form.is_valid():
try:
newUser = User.objects.create_user(
email=form.cleaned_data["businessEmail"],
username=form.cleaned_data["businessName"],
password=form.cleaned_data["password"],
)
newUser.is_staff = True
newUser.save()
except:
print("some error")
return redirect("/signup/business")
newBusinessDetail = BusinessModel(
userModel=newUser,
serviceCharge=form.cleaned_data["serviceCharge"],
businessDescription=form.cleaned_data["businessDescription"],
)
newBusinessDetail.save()
user = authenticate(
request,
username=request.POST["businessName"],
password=request.POST["password"],
)
if user:
login(request, user)
return redirect("/")
else:
print("not auth")
else:
form = business_signup_form()
context["form"] = form
return render(
request=request, template_name="signup_business.html", context=context
)
def user_page(request):
context = dict()
try:
context["customerDetail"] = CustomerModel.objects.filter(
userModel=request.user
)[0]
context["orderHistory"] = OrderModel.objects.filter(
userModelLink=context["customerDetail"]
)
except:
context = dict()
return render(request=request, template_name="user_page.html", context=context)
def business_page(request):
context = dict()
try:
context["businessDetail"] = BusinessModel.objects.filter(
userModel=request.user
)[0]
context["orderHistory"] = OrderModel.objects.filter(
businessModelLink=context["businessDetail"]
)
except:
context = dict()
return render(request=request, template_name="business_page.html", context=context)
def choice_page(request):
context = dict()
context["productList"] = Product.objects.all()[::-1]
context["dealerList"] = BusinessModel.objects.all()
return render(
request=request, template_name="design_list_page.html", context=context
)
def confirm_payment_page(request):
if request.method == "POST":
paymentAmount = int(request.POST["totalAmount"])
tempProductModel = Product.objects.filter(id=request.POST["productModelId"])[0]
tempCustomerModel = CustomerModel.objects.filter(
id=request.POST["userModelId"]
)[0]
tempCustomerModel.balance -= paymentAmount
tempCustomerModel.save()
tempbusinessModel = BusinessModel.objects.filter(
id=request.POST["businessModelId"]
)[0]
tempbusinessModel.balance += paymentAmount
tempbusinessModel.save()
tempOrder = OrderModel(
productModelLink=tempProductModel,
userModelLink=tempCustomerModel,
businessModelLink=tempbusinessModel,
paymentStatus=True,
deliveryStatus=False,
totalAmount=request.POST["totalAmount"],
)
tempOrder.save()
return redirect("/user")
def add_money_to_user(request):
if not request.user.is_authenticated:
return('/login/user')
if request.user.is_staff:
return redirect('/business')
context = dict()
if request.method == 'POST':
return redirect('/user')
return render(request=request, template_name="add_money_page.html", context=context )
def deliver_custom_product(request):
if request.method == 'POST':
orderId = request.POST['orderId']
tempOrderObj = OrderModel.objects.filter(id=orderId)[0]
try:
tempOrderObj.deliveryStatus = True
tempOrderObj.save()
except:
pass
if request.user.is_staff:
return redirect('/business')
else:
return redirect('/user')
def payment_page(request):
context = dict()
context["businessId"] = None
context["productId"] = None
context["businessObject"] = None
context["productObject"] = None
context["customerDetail"] = None
context["canUserPay"] = False
context["totalPaymentAmount"] = 0
if not request.user.is_staff:
if request.method == "POST":
try:
context["businessId"] = request.POST["selectedBusinessId"]
context["productId"] = request.POST["selectedProductId"]
context["businessObject"] = BusinessModel.objects.filter(
id=context["businessId"]
)[0]
context["productObject"] = Product.objects.filter(
id=context["productId"]
)[0]
context["totalPaymentAmount"] = (
context["businessObject"].serviceCharge
+ context["productObject"].ProductPrice
)
context["customerDetail"] = CustomerModel.objects.filter(
userModel=request.user
)[0]
context["canUserPay"] = (
context["customerDetail"].balance >= context["totalPaymentAmount"]
)
except:
context["businessId"] = None
context["productId"] = None
context["businessObject"] = None
context["productObject"] = None
context["customerDetail"] = None
context["canUserPay"] = False
context["totalPaymentAmount"] = 0
return render(request=request, template_name="make_payment.html", context=context)
def genRandomName(fileName):
"""Generate unique Name for images
Args:
fileName (str): fileName to get its extention
Returns:
str: unique file name
"""
fileExt = fileName.split(".")[-1]
randName = "".join([random.choice(string.ascii_lowercase) for i in range(20)])
resp = f"{randName}.{fileExt}"
BASE_FILE = Path(__file__).resolve().parent.parent
FILE_PATH = os.path.join(BASE_FILE, "image_model", "images", resp)
while os.path.isfile(FILE_PATH):
randName = "".join([random.choice(string.ascii_lowercase) for i in range(20)])
resp = f"{randName}.{fileExt}"
BASE_FILE = Path(__file__).resolve().parent.parent
FILE_PATH = os.path.join(BASE_FILE, "image_model", "images", resp)
return resp
def upload_custom_product(request):
""" Web interface to upload image """
if not request.user.is_authenticated:
return redirect("/login/business")
if not request.user.is_staff:
return redirect("/user")
context = {}
if request.method == "POST":
form = upload_product_form(request.POST, request.FILES)
if form.is_valid:
temp_file = request.FILES["ProductImage"]
tempFileName = genRandomName(temp_file.name)
file_name = default_storage.save(tempFileName, temp_file)
tempProduct = Product(
ProductName=request.POST["ProductName"],
ProductUrl=f"/static/{tempFileName}",
ProductDescription=request.POST["ProductDescription"],
ProductPrice=request.POST["ProductPrice"],
)
tempProduct.save()
return redirect("/make_choice")
form = upload_product_form()
context["form"] = form
return render(request=request, template_name="upload_product.html", context=context)
|
[
"hrishabh2203@gmail.com"
] |
hrishabh2203@gmail.com
|
8aa6225c10b41ae45a7dcca40c4dab0d5f1bbe27
|
8652ad554a5fc6076ddae6a869576b6360438a0f
|
/boilerplate.py
|
f8d7b3708ef6a230941929700669295cddc6d494
|
[] |
no_license
|
dipamsen/Pygame-Intro-Code
|
b79bb2c554332275c5042efc44cffc32e1711da3
|
f4316677d965f140c1ba38b6af26620bf2ddeae0
|
refs/heads/main
| 2023-06-17T11:01:34.632924
| 2021-07-16T07:17:19
| 2021-07-16T07:17:19
| 386,546,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
from colors import *
import pygame
# unnecessary imports for vscode intellisense
import pygame.display
import pygame.time
import pygame.draw
import pygame.mouse
import pygame.event
import pygame.image
import pygame.transform
import math
pygame.init()
FPS = 60
WIDTH = 400
HEIGHT = 400
pygame.display.set_caption("PyGame: Sketch Title")
pygame.display.set_icon(pygame.image.load("logo.png"))
win = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
def redraw_game_window():
pygame.display.update()
win.fill(DARK_GREY)
# mainloop
run = True
while run:
redraw_game_window()
MOUSE = pygame.mouse.get_pos()
pygame.draw.circle(win, (150, 150, 150), MOUSE, 20)
pygame.draw.circle(win, WHITE, MOUSE, 20, 2)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
clock.tick(FPS)
pygame.quit()
|
[
"dipamdiptam@gmail.com"
] |
dipamdiptam@gmail.com
|
6cb2260307d2f7c6cbf7b028abac70e25d96e6fd
|
972322a06d74e90be88b32204d5a777b24c95a3c
|
/weatherVenv/lib/python3.8/site-packages/twilio/rest/preview/__init__.py
|
667c76b23bd2cd834e13bb10628482bad99f6515
|
[] |
no_license
|
Ktailor34/weatherBot
|
5ae95f0635d658ba7d8792afe882c01518bc7025
|
ca6029b19e085d301b1358a38c70186d864215b5
|
refs/heads/master
| 2022-12-15T14:02:33.400798
| 2020-09-10T21:38:51
| 2020-09-10T21:38:51
| 240,813,256
| 2
| 0
| null | 2022-12-08T03:37:21
| 2020-02-16T01:27:21
|
Python
|
UTF-8
|
Python
| false
| false
| 7,099
|
py
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.domain import Domain
from twilio.rest.preview.bulk_exports import BulkExports
from twilio.rest.preview.deployed_devices import DeployedDevices
from twilio.rest.preview.hosted_numbers import HostedNumbers
from twilio.rest.preview.marketplace import Marketplace
from twilio.rest.preview.sync import Sync
from twilio.rest.preview.trusted_comms import TrustedComms
from twilio.rest.preview.understand import Understand
from twilio.rest.preview.wireless import Wireless
class Preview(Domain):
def __init__(self, twilio):
"""
Initialize the Preview Domain
:returns: Domain for Preview
:rtype: twilio.rest.preview.Preview
"""
super(Preview, self).__init__(twilio)
self.base_url = 'https://preview.twilio.com'
# Versions
self._bulk_exports = None
self._deployed_devices = None
self._hosted_numbers = None
self._marketplace = None
self._sync = None
self._understand = None
self._wireless = None
self._trusted_comms = None
@property
def bulk_exports(self):
"""
:returns: Version bulk_exports of preview
:rtype: twilio.rest.preview.bulk_exports.BulkExports
"""
if self._bulk_exports is None:
self._bulk_exports = BulkExports(self)
return self._bulk_exports
@property
def deployed_devices(self):
"""
:returns: Version deployed_devices of preview
:rtype: twilio.rest.preview.deployed_devices.DeployedDevices
"""
if self._deployed_devices is None:
self._deployed_devices = DeployedDevices(self)
return self._deployed_devices
@property
def hosted_numbers(self):
"""
:returns: Version hosted_numbers of preview
:rtype: twilio.rest.preview.hosted_numbers.HostedNumbers
"""
if self._hosted_numbers is None:
self._hosted_numbers = HostedNumbers(self)
return self._hosted_numbers
@property
def marketplace(self):
"""
:returns: Version marketplace of preview
:rtype: twilio.rest.preview.marketplace.Marketplace
"""
if self._marketplace is None:
self._marketplace = Marketplace(self)
return self._marketplace
@property
def sync(self):
"""
:returns: Version sync of preview
:rtype: twilio.rest.preview.sync.Sync
"""
if self._sync is None:
self._sync = Sync(self)
return self._sync
@property
def understand(self):
"""
:returns: Version understand of preview
:rtype: twilio.rest.preview.understand.Understand
"""
if self._understand is None:
self._understand = Understand(self)
return self._understand
@property
def wireless(self):
"""
:returns: Version wireless of preview
:rtype: twilio.rest.preview.wireless.Wireless
"""
if self._wireless is None:
self._wireless = Wireless(self)
return self._wireless
@property
def trusted_comms(self):
"""
:returns: Version trusted_comms of preview
:rtype: twilio.rest.preview.trusted_comms.TrustedComms
"""
if self._trusted_comms is None:
self._trusted_comms = TrustedComms(self)
return self._trusted_comms
@property
def exports(self):
"""
:rtype: twilio.rest.preview.bulk_exports.export.ExportList
"""
return self.bulk_exports.exports
@property
def export_configuration(self):
"""
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationList
"""
return self.bulk_exports.export_configuration
@property
def fleets(self):
"""
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetList
"""
return self.deployed_devices.fleets
@property
def authorization_documents(self):
"""
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentList
"""
return self.hosted_numbers.authorization_documents
@property
def hosted_number_orders(self):
"""
:rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderList
"""
return self.hosted_numbers.hosted_number_orders
@property
def available_add_ons(self):
"""
:rtype: twilio.rest.preview.marketplace.available_add_on.AvailableAddOnList
"""
return self.marketplace.available_add_ons
@property
def installed_add_ons(self):
"""
:rtype: twilio.rest.preview.marketplace.installed_add_on.InstalledAddOnList
"""
return self.marketplace.installed_add_ons
@property
def services(self):
"""
:rtype: twilio.rest.preview.sync.service.ServiceList
"""
return self.sync.services
@property
def assistants(self):
"""
:rtype: twilio.rest.preview.understand.assistant.AssistantList
"""
return self.understand.assistants
@property
def commands(self):
"""
:rtype: twilio.rest.preview.wireless.command.CommandList
"""
return self.wireless.commands
@property
def rate_plans(self):
"""
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanList
"""
return self.wireless.rate_plans
@property
def sims(self):
"""
:rtype: twilio.rest.preview.wireless.sim.SimList
"""
return self.wireless.sims
@property
def branded_calls(self):
"""
:rtype: twilio.rest.preview.trusted_comms.branded_call.BrandedCallList
"""
return self.trusted_comms.branded_calls
@property
def businesses(self):
"""
:rtype: twilio.rest.preview.trusted_comms.business.BusinessList
"""
return self.trusted_comms.businesses
@property
def cps(self):
"""
:rtype: twilio.rest.preview.trusted_comms.cps.CpsList
"""
return self.trusted_comms.cps
@property
def current_calls(self):
"""
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallList
"""
return self.trusted_comms.current_calls
@property
def devices(self):
"""
:rtype: twilio.rest.preview.trusted_comms.device.DeviceList
"""
return self.trusted_comms.devices
@property
def phone_calls(self):
"""
:rtype: twilio.rest.preview.trusted_comms.phone_call.PhoneCallList
"""
return self.trusted_comms.phone_calls
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview>'
|
[
"ktailor@Kishans-MacBook-Air.local"
] |
ktailor@Kishans-MacBook-Air.local
|
da01705f525324f663168eb74c1a77e66c4cc174
|
417b516b7c15779a8f93511a09ca213017d22415
|
/app/members/models.py
|
86ca7fd32e3082d6975e7a69d35f8d72a7ef0fc5
|
[
"MIT"
] |
permissive
|
krakiun/chargeflask
|
453f631cb5ba5a05ebeb1d318cce1b84e9637b18
|
dabca234a07550889927dd308c7d4ef8923da943
|
refs/heads/master
| 2021-08-14T06:55:56.017209
| 2017-11-13T20:39:18
| 2017-11-13T20:39:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
"""
filename: members.py
description: Model for Members in Committees.
created by: Omar De La Hoz (oed7416@rit.edu)
created on: 08/31/17
"""
from app import db
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
members_table = db.Table('members', db.Model.metadata,
db.Column('committees_id', db.String(255), db.ForeignKey('committees.id')),
db.Column('users_id', db.String(255), db.ForeignKey('users.id'))
)
|
[
"omar.dlhz@hotmail.com"
] |
omar.dlhz@hotmail.com
|
400ac17153480a63df98dda5dac0d88bf318c97e
|
508321d683975b2339e5292202f3b7a51bfbe22d
|
/Userset.vim/ftplugin/python/CompletePack/PySide2/QtWidgets/QGraphicsPixmapItem.py
|
0b913af111c321403b7dbad1da4f899c98fdb78f
|
[] |
no_license
|
cundesi/vimSetSa
|
4947d97bcfe89e27fd2727423112bb37aac402e2
|
0d3f9e5724b471ab21aa1199cc3b4676e30f8aab
|
refs/heads/master
| 2020-03-28T05:54:44.721896
| 2018-08-31T07:23:41
| 2018-08-31T07:23:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,254
|
py
|
# encoding: utf-8
# module PySide2.QtWidgets
# from C:\Program Files\Autodesk\Maya2017\Python\lib\site-packages\PySide2\QtWidgets.pyd
# by generator 1.145
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import PySide2.QtGui as __PySide2_QtGui
import Shiboken as __Shiboken
from QGraphicsItem import QGraphicsItem
class QGraphicsPixmapItem(QGraphicsItem):
# no doc
def boundingRect(self, *args, **kwargs): # real signature unknown
pass
def contains(self, *args, **kwargs): # real signature unknown
pass
def extension(self, *args, **kwargs): # real signature unknown
pass
def isObscuredBy(self, *args, **kwargs): # real signature unknown
pass
def offset(self, *args, **kwargs): # real signature unknown
pass
def opaqueArea(self, *args, **kwargs): # real signature unknown
pass
def paint(self, *args, **kwargs): # real signature unknown
pass
def pixmap(self, *args, **kwargs): # real signature unknown
pass
def setOffset(self, *args, **kwargs): # real signature unknown
pass
def setPixmap(self, *args, **kwargs): # real signature unknown
pass
def setShapeMode(self, *args, **kwargs): # real signature unknown
pass
def setTransformationMode(self, *args, **kwargs): # real signature unknown
pass
def shape(self, *args, **kwargs): # real signature unknown
pass
def shapeMode(self, *args, **kwargs): # real signature unknown
pass
def transformationMode(self, *args, **kwargs): # real signature unknown
pass
def type(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
BoundingRectShape = None # (!) real value is ''
HeuristicMaskShape = None # (!) real value is ''
MaskShape = None # (!) real value is ''
ShapeMode = None # (!) real value is ''
|
[
"noreply@github.com"
] |
noreply@github.com
|
23ea96da8e03e7b4983c3084a4f0f423d4cca8a5
|
18f5c71436a22da1c3835b562f1538722114f2af
|
/backend/api/urls.py
|
a587b3e16148b0d73e2da279b169f27c4f01b4e0
|
[
"MIT"
] |
permissive
|
timakaryo/antrean
|
54968fd960e8d886a6224d5d5a6f21c212f6acad
|
8eab42a0a17092355adc9b56d2f29dbf53fa2a54
|
refs/heads/master
| 2021-01-20T21:09:17.098029
| 2017-10-23T13:00:42
| 2017-10-23T13:00:42
| 101,754,612
| 0
| 0
| null | 2017-10-23T13:00:43
| 2017-08-29T11:39:50
|
Python
|
UTF-8
|
Python
| false
| false
| 144
|
py
|
from django.conf.urls import url
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
url(r'^token/', obtain_jwt_token)
]
|
[
"chiputera@gmail.com"
] |
chiputera@gmail.com
|
ee7907386d75d7bd896092d36804b4b05cab52d4
|
b04b52614b31a5d77239d19b4259f84abb2cbea5
|
/adminpanel/migrations/0023_album.py
|
03b00f2f23fc6b2ecfeaed66b54429ce8761ca75
|
[] |
no_license
|
nawed-xigmapro/vibanote
|
ae348561d7c6b8b208a1b6a8f46db2c928e3df91
|
03f2f23497203e4dbfde43c5bb8898a5e77492fa
|
refs/heads/master
| 2021-01-19T19:01:59.108160
| 2017-08-23T13:38:30
| 2017-08-23T13:38:30
| 101,181,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,700
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-03 14:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('adminpanel', '0022_remove_video_artist'),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=100, null=True)),
('slug', models.CharField(blank=True, max_length=100, null=True)),
('subtitle', models.CharField(blank=True, max_length=200, null=True)),
('album_image', models.ImageField(null=True, upload_to='albumimages')),
('dedicate', models.CharField(blank=True, max_length=255, null=True)),
('is_approved', models.IntegerField(blank=True, null=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('genre', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='adminpanel.Genre')),
('types', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='adminpanel.Type')),
('uploadby', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"nawed@xigmapro.com"
] |
nawed@xigmapro.com
|
fb3123fc3cc659b25547cf2182f90805f6614142
|
9412f4ba84f6b54f67c0d6534ab3804fa621bee8
|
/order/migrations/0011_auto__add_field_order_carrier.py
|
d7dad8989e163e5ead999ccf8f111e992c583e0f
|
[] |
no_license
|
ruspython/adler-m
|
5fbeb44d1a5187d481391e49d6cca86b69d14b7a
|
c9b27ee7c1794c4632742887599545893621a58d
|
refs/heads/master
| 2020-12-24T14:45:16.535606
| 2014-12-01T10:55:34
| 2014-12-01T10:55:34
| 31,331,474
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,055
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Order.carrier'
db.add_column(u'order_order', 'carrier',
self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Order.carrier'
db.delete_column(u'order_order', 'carrier')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'catalog.item': {
'Meta': {'ordering': "['id']", 'object_name': 'Item'},
'article': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'brand': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'brand_en': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'brand_ru': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'color_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'color_ru': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_just_updated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'length': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'manufacturer_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'manufacturer_ru': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'material': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'new_before': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'note_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'note_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'price_min': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'default': "'model'", 'max_length': '16'}),
'series': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'series_en': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'series_ru': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status_action': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_back_in_stock': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_new': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_not_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_on_the_way': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_sale': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_without_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['catalog.ItemTag']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'type_en': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'type_ru': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
u'catalog.itemtag': {
'Meta': {'object_name': 'ItemTag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'order.order': {
'Meta': {'ordering': "['-add_time']", 'object_name': 'Order'},
'add_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address_building': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'address_flat': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'address_house': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'address_zipcode': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'carrier': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'client_last_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'client_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'client_second_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'delivery_method': ('django.db.models.fields.CharField', [], {'default': "'postal'", 'max_length': '32', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '32', 'blank': 'True'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "'pc'", 'max_length': '2'}),
'payment_status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total_price': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'order.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'article': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'discount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Item']"}),
'manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.Order']"}),
'price': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'quantity': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'scale': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['order']
|
[
"vlad0058@gmail.com"
] |
vlad0058@gmail.com
|
d57958a6781674de72dcaadbfbe121da93e285e6
|
b9499f3f235e5da9c3e83782d114ad41586e7bcd
|
/data_utils.py
|
4b8623b9df9bd5b974c4a8d17e921d339690cdc7
|
[
"MIT"
] |
permissive
|
maremita/-fork-Phylo_structural_EM
|
9e3e57e6bd5406996d52e77d01b69ff2c7340d61
|
609fdd94874e02b02101649033390335e34c43cb
|
refs/heads/master
| 2023-08-30T04:55:49.227399
| 2020-03-19T10:44:58
| 2020-03-19T10:44:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,253
|
py
|
import numpy as np
import re
import networkx as nx
# define parameters
nuc_names = ['A', 'C', 'G', 'T']
transform = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
regex = re.compile('[ACGT]')
def get_char_data(name='infile'):
"""
Returns numerical representation of dna sequences, number of sequences (N), length of sequences (M).
A: 0, T: 1, C: 2, G: 3
"""
regex = re.compile('[ATCG]')
# transform = {'A': 0, 'T': 1, 'C': 2, 'G': 3}
sequences, N, M = parse_data(name)
numeric_sequences = np.chararray((N, M), unicode=True)
n = 0
for seq in sequences:
sites = re.findall(regex, seq)
if len(sites) == 0 or sites is None:
continue
sites = sites[-M:]
numeric_sequences[n, :] = np.array([site for site in sites])
n += 1
return numeric_sequences, N, M
def parse_data(name='infile'):
f = open(name, 'r')
sequences = []
meta_data = f.readline().split()
num_sequences = int(meta_data[0])
len_sequences = int(meta_data[1])
for i in range(len_sequences):
seq = f.readline().strip('\n')
sequences.append(seq)
return sequences, num_sequences, len_sequences
# simulate sequences given the tree topology and rate matrices
def simulate_seq(tree, evo_model, ndata=10):
n_nodes = len(tree)
root = n_nodes - 1
n_leaves = (n_nodes + 1) // 2
pt_matrix = [np.zeros((4, 4)) for i in range(2 * n_leaves - 2)]
# do postorder tree traversal to compute the transition matrices
for node in nx.dfs_postorder_nodes(tree, root):
if not tree.nodes[node]['type'] == 'root':
t = tree.nodes[node]['t']
pt_matrix[node] = evo_model.trans_matrix(t)
simuData = []
status = [''] * (2 * n_leaves - 1)
for run in range(ndata):
for node in nx.dfs_preorder_nodes(tree, root):
if tree.nodes[node]['type'] == 'root':
status[node] = np.random.choice(4, size=1, p=evo_model.stat_prob)[0]
else:
parent = tree.nodes[node]['parent']
status[node] = np.random.choice(4, size=1, p=pt_matrix[node][status[parent]])[0]
simuData.append([nuc_names[i] for i in status[:n_leaves]])
return np.transpose(simuData)
|
[
"okviman@kth.se"
] |
okviman@kth.se
|
bbb6ba991cd66865d214bb9394d18bf5215976cd
|
7bb1c8cabeda75bd7db913e2396da383a4e7ba83
|
/smtpapi/send.py
|
3299ba75e99343761e54794f47893946e7bb390d
|
[] |
no_license
|
dlinsg/test
|
273945d72a2d210e96ac441c1c8a60ab0bd5d7a0
|
4804381299797c4ff61bc23aa95b0cd306f93f5f
|
refs/heads/master
| 2021-01-17T10:06:44.653743
| 2016-04-10T23:00:32
| 2016-04-10T23:00:32
| 23,331,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
#!/usr/bin/python
# coding: utf-8
import datetime
today = str(datetime.date.today().strftime('%m/%d/%Y'))
import sendgrid
sg = sendgrid.SendGridClient('dlintestapi', 'testingapi123')
message = sendgrid.Mail()
message.set_from('Dave Lin <david.lin@sendgrid.com>')
message.add_to('David Lin <david.lin@sendgrid.com>')
message.set_subject("Hello %tag1% García, your résumé balance is %tag2% as of %tag3%. Thank you and 谢谢!!!")
message.set_text("test email")
message.add_substitution("%tag1%", "José")
message.add_substitution("%tag2%", "1.234£")
message.add_substitution("%tag3%", today)
status, msg = sg.send(message)
print str(status) + ' ' + msg
|
[
"david.lin@sendgrid.com"
] |
david.lin@sendgrid.com
|
441e3e75fd6b5ef8cc403e0b4b73843eb432393c
|
62c6e50d148f1ccd51001abedbfe748fda94427e
|
/backend/cookieapp/views.py
|
65b7b4217bfa0991bcd696807104284c0951ead4
|
[] |
no_license
|
i7-Ryzen/django-jwt-httponly-cookie
|
be27936d0d7111688a0b2d5811edd891c2b5c925
|
bb21ae75b05f7b42e98da6a69f9280c51a1171fd
|
refs/heads/main
| 2023-05-06T15:30:01.870387
| 2021-05-24T05:35:10
| 2021-05-24T05:35:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,870
|
py
|
from rest_framework_simplejwt.tokens import RefreshToken
from django.middleware import csrf
from rest_framework.views import APIView
from rest_framework.response import Response
from django.contrib.auth import authenticate
from django.conf import settings
from rest_framework import status
def get_tokens_for_user(user):
refresh = RefreshToken.for_user(user)
return {
'refresh': str(refresh),
'access': str(refresh.access_token),
}
class LoginView(APIView):
def post(self, request, format=None):
data = request.data
response = Response()
username = data.get('username', None)
password = data.get('password', None)
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
data = get_tokens_for_user(user)
response.set_cookie(
key = settings.SIMPLE_JWT['AUTH_COOKIE'],
value = data["access"],
expires = settings.SIMPLE_JWT['ACCESS_TOKEN_LIFETIME'],
secure = settings.SIMPLE_JWT['AUTH_COOKIE_SECURE'],
httponly = settings.SIMPLE_JWT['AUTH_COOKIE_HTTP_ONLY'],
samesite = settings.SIMPLE_JWT['AUTH_COOKIE_SAMESITE']
)
csrf.get_token(request)
response.data = {"Success" : "Login successfully","data":data}
return response
else:
return Response({"No active" : "This account is not active!!"}, status=status.HTTP_404_NOT_FOUND)
else:
return Response({"Invalid" : "Invalid username or password!!"}, status=status.HTTP_404_NOT_FOUND)
|
[
"abhishekk580@gmail.com"
] |
abhishekk580@gmail.com
|
7c9c32d90f97ed66f476c030951ca39cf376ba56
|
dd5d54eb45b8993769310a679c14b20600005793
|
/data/process_data.py
|
338c21443b179bace0b3b4d1f8e2e8cada447d81
|
[
"MIT"
] |
permissive
|
amalpm-rog/Disaster-Response-Pipeline
|
b5d3653ac9741ea841b67c9f94486afb30766cf3
|
47688decc1d8d0daa2be3ab9edb8bb391a1d1661
|
refs/heads/master
| 2022-11-15T12:10:20.539038
| 2020-07-06T05:36:20
| 2020-07-06T05:36:20
| 277,448,793
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,284
|
py
|
#! /usr/bin/env python3
# coding=utf-8
# The Data processing module
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""Load two csv files into pandas dataframes and
merge them into one.
Parameters
----------
messages_filepath : string
location of the messages csv file
categories_filepath : string
location of the categories csv file
Returns
-------
pandas.DataFrame
The merged dataframe
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
return pd.merge(messages, categories, on='id')
def clean_data(df):
"""
Process a dataframe
Parameters
----------
df: pandas.DataFrame
The pandas.Dataframe to be processed
Returns
-------
pandas.DataFrame
The processed dataframe
"""
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(pat=';', n=None, expand=True)
# use the first row to extract a list of new column names for categories.
category_colnames = categories.iloc[0].str[:-2]
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
# and cast to int
categories[column] = categories[column].str[-1].astype(int)
# drop the original categories column from `df`
df = df.drop(columns='categories')
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df, categories], axis=1)
# drop duplicates
df = df.drop_duplicates(keep='first')
return df
def save_data(df, database_filename):
"""
Writes a dataframe to a Sql-lite Database
Parameters
----------
df: pandas.DataFrame
The pandas.Dataframe to be written
database_filename: string
The filename path for the database
Returns
-------
None
"""
print('Writing {} to {} database: '.format(df, database_filename))
engine = create_engine('sqlite:///{}'.format(database_filename))
df.to_sql('messages_categories', engine, index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '
'datasets as the first and second argument respectively, as '
'well as the filepath of the database to save the cleaned data '
'to as the third argument. \n\nExample: python process_data.py '
'disaster_messages.csv disaster_categories.csv '
'DisasterResponse.db')
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
ca64375f7c116ff02f021ac13a54ca325742e802
|
56f9208443ae7f3dc6b06ee840e58b2edc74b627
|
/ll_env/bin/django-admin.py
|
a6d719b9040c4d62203d5a90b9b2b9a4a5c4978e
|
[] |
no_license
|
haruyamu/learing_log
|
d3891f2f66ff634feb36b52eaa04f251c4fdb571
|
be85bff1ccb59f46c7ef332e8a0e4fd9c1530678
|
refs/heads/main
| 2023-02-24T18:53:40.675780
| 2021-01-27T02:34:23
| 2021-01-27T02:34:23
| 332,979,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
#!/Users/haruya/projects/learing_log/ll_env/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"haruya.20020331@icloud.com"
] |
haruya.20020331@icloud.com
|
639425d836fa30470f16437a549f853bba95ed67
|
b92d4895baa78683e328a32295114e70e47b7ff2
|
/genetics/phasing/make_readbackPhasing_samplesheet.py
|
6937323194973c13ce7b72758dc851164d21273d
|
[] |
no_license
|
npklein/random_scripts
|
dcd367b465b6c32da90f7390372eee1da1d78c41
|
11eda8a4889ab533be7eff19e493fd66938ba3a3
|
refs/heads/master
| 2021-01-19T22:49:32.887873
| 2018-05-28T13:35:08
| 2018-05-28T13:35:08
| 88,863,994
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,528
|
py
|
import os
seen = []
with open('sample_individual_idLink.txt') as input_file, open('individual_bam_link.txt','w') as out:
out.write('individualID,sampleName,bam\n')
input_file.readline()
for line in input_file:
sample_id = line.split('\t')[0]
individual_id = line.strip().split('\t')[1]
if individual_id in seen:
continue
seen.append(individual_id)
path = '/groups/umcg-bios/tmp03/projects/masked_BAMs/diagnostic/mergeAndReheaderBAMs/results/'+individual_id+'.mdup.sorted.readGroupsAdded.bam'
if os.path.exists(path):
bam_file = path
else:
print(individual_id,' bam not found')
print(path)
continue
out.write(individual_id+','+individual_id+','+bam_file+'\n')
converter = {}
with open('/groups/umcg-bios/tmp03/projects/bbmriSampleInfo/sampleSheetDirectlyFromMdb26-01-2016.txt') as lldeepConverter:
for line in lldeepConverter:
line = line.split('\t')
id = line[0]
runID = line[1]
converter[id] = runID
#LL-LLDeep_0043 BD1NR9ACXX-4-19
with open('freeze2_complete_GTE_Groningen_07092016.txt') as input_file,open('individual_bam_link.txt','a') as out:
input_file.readline()
for line in input_file:
line = line.strip().split('\t')
path = '/groups/umcg-bios/tmp03/projects/masked_BAMs/BAMsReadGroupsAdded/'+line[1]+'.mdup.sorted.readGroupsAdded.bam'
if os.path.exists(path):
bam_file = path
else:
lldeep_id = '_'.join(line[0].split('_')[1:])
newID = converter['LL-'+lldeep_id]
line[0] = newID
path = '/groups/umcg-bios/tmp03/projects/masked_BAMs/BAMsReadGroupsAdded/'+newID+'.mdup.sorted.readGroupsAdded.bam'
if os.path.exists(path):
bam_file = path
else:
print(path+' bam not found')
continue
out.write(line[0]+','+line[1]+','+bam_file+'\n')
with open('lldeepNotInBiosSamples.txt') as input_file, open('individual_bam_link.txt','a') as out:
for line in input_file:
line = line.strip().split('\t')
sampleID = line[0]
genotypeID = line[1]
path = '/groups/umcg-bios/tmp03/projects/masked_BAMs/lldeepNotInBIOS/'+sampleID+'-lib1.bam'
if os.path.exists(path):
bam_file = path
else:
print(path+' bam not found')
continue
out.write(sampleID+','+genotypeID+','+bam_file+'\n')
|
[
"niekdeklein@gmail.com"
] |
niekdeklein@gmail.com
|
f927f5c141a4938dceb50e383256385903c97b7d
|
7e74dae3390c0dd2751c9353c6051350ab4f1d86
|
/blog_python/blog/migrations/0014_auto_20201015_2350.py
|
30bf6597ab55111815756e299f6211904d794eb1
|
[] |
no_license
|
Eugin-Paul/Blog
|
70c327198fb93ef569246a02f9a56f2d57d0b53e
|
7f562b3cd17f477e5ea97ba04132554262c3f9b5
|
refs/heads/master
| 2023-01-01T09:02:41.702171
| 2020-10-22T07:15:38
| 2020-10-22T07:15:38
| 305,793,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
# Generated by Django 3.0.7 on 2020-10-15 18:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0013_auto_20201015_2341'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='content',
new_name='comment',
),
]
|
[
"euginpaul1717@gmail.com"
] |
euginpaul1717@gmail.com
|
6b91829cc22bc82d5f07ab40c654250f7a903fbb
|
dea24559930c75ed7fd6016464e6844644e7bf06
|
/plot_kolmogorov.py
|
ad925c613472e1f535acd0edfc6a4c1ffb2a3f2f
|
[] |
no_license
|
sheyma/fitzefatze
|
45f8da30f9bf4403c27feac6529da2367c9a391b
|
11c6839c86fe131ab803f9e39d4fe738b75e8b54
|
refs/heads/master
| 2021-01-11T22:19:01.351693
| 2017-01-26T14:56:18
| 2017-01-26T14:56:18
| 78,947,686
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,834
|
py
|
import numpy as np
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as pl
import sys, glob, os
from scipy import stats
import collections
from math import factorial, sqrt, ceil
# check the loaded matrix if it is symmetric
def load_matrix(file):
A = np.loadtxt(file, unpack=True)
AT = np.transpose(A)
# check the symmetry
if A.shape[0] != A.shape[1] or not (A == AT).all():
print "error: loaded matrix is not symmetric"
raise ValueError
return AT
def corr_histo(corr_matrix):
corr_flat = np.ndarray.flatten(corr_matrix)
corr_max = 1.0
corr_min = -1.0
bin_nu = 100
# get a normalized histogram
hist, bin_edges = np.histogram(corr_flat, bins=bin_nu,
range=[corr_min, corr_max], normed =True)
return hist
def compare_kolmo(name_A, name_B, THR, SIG):
R_thr = {}
for THR in thr_array :
R_temp = []
for SIG in sig_array :
input_A = name_A % (THR, SIG)
input_B = name_B % (THR, SIG)
mtx_A = load_matrix(input_A)
HistA = corr_histo(mtx_A)
mtx_B = load_matrix(input_B)
HistB = corr_histo(mtx_B)
diff, p = stats.ks_2samp(HistA, HistB)
R_val = diff
R_temp = np.append(R_temp, R_val)
R_thr[THR] = np.array(R_temp)
Ordered_R = collections.OrderedDict(sorted(R_thr.items()))
datam = np.array(Ordered_R.values())
return datam
data_brain = '/run/media/sheyma/0a5437d3-d51c-4c40-8c7a-06738fd0c83a/sheyma_bayrak_2015/jobs_corr/'
name_brain = data_brain + 'acp_w_0_ADJ_thr_0.%02d_sigma=%g_D=0.05_v=30.0_tmax=45000_FHN_corr.dat'
data_random = '/var/tmp/fitzefatze-hydra/jobs_erdos01/'
name_random = data_random + 'acp_w_thr_0.%02d_erdos_sigma=%.3f_D=0.05_v=30.0_tmax=45000_pearson.dat'
thr_array = np.arange(34, 86, 4)
sig_array = np.array([0.050, 0.045, 0.040, 0.035, 0.030, 0.025, 0.020, 0.015, 0.010, 0.005 ])
KS = compare_kolmo(name_brain, name_random, thr_array, sig_array)
#Parameter Space Plot
fig, ax = pl.subplots(figsize=(15,12))
pl.subplots_adjust(left=0.15, right=0.95, top=0.93, bottom=0.13)
pl.subplot(1,1,1)
pl.imshow(np.transpose(KS), interpolation='nearest',
cmap='jet', aspect='auto')
a = np.array([0.38, 0.50, 0.62, 0.74])
b = np.array([0.05, 0.04, 0.03, 0.02, 0.01])
separ_xthick = ceil(float(len(thr_array))/len(a)) -1
pl.xticks(np.arange(1,len(thr_array), separ_xthick), a, fontsize = 50)
pl.yticks([0, 2, 4, 6, 8], b, fontsize = 50)
pl.tick_params(which='major', length=12, width=5)
pl.ylabel('$c$', fontsize = 50)
pl.xlabel('$p$', fontsize = 50)
cbar = pl.colorbar()
cbar.ax.set_title('d', fontsize = 50)
for t in cbar.ax.get_yticklabels():
t.set_fontsize(50)
pl.show()
|
[
"sheymaba@gmail.com"
] |
sheymaba@gmail.com
|
5fa12fe4e4a40d5f551f4cbcfc107b8a901841e6
|
db774c9a29620e8374740f7a30bd27df40cd8da3
|
/pars/bin/easy_install
|
2f05c1d74e3ab9b64838db9f13794f43ed9943bd
|
[] |
no_license
|
Rakhimzhan312/Part2Task19Parsinglalafo
|
7330b79912d96c05f9b32a8adf08a628df587378
|
1f7edf5ce3e65164799f0f2dfcc137e33a1b0c7d
|
refs/heads/master
| 2020-09-15T22:54:34.115603
| 2019-11-23T11:17:30
| 2019-11-23T11:17:30
| 223,576,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
#!/home/rakhimzhan/Desktop/Tasks/Chapterparsing/parsingtest/pars/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"rakhimzhan312@gmail.com git config --global user.name Rakhimzhangit push -u origin mastergit statusgit push origin mastergit config --global user.email rakhimzhan312@gmail.com"
] |
rakhimzhan312@gmail.com git config --global user.name Rakhimzhangit push -u origin mastergit statusgit push origin mastergit config --global user.email rakhimzhan312@gmail.com
|
|
46bbf9daf0b61574b23a2631b6a78bc7caa69495
|
e5e2b7da41fda915cb849f031a0223e2ac354066
|
/sdk/python/pulumi_azure_native/documentdb/v20210515/sql_resource_sql_trigger.py
|
61599bbe3cb634dfa2ed1f8cf1d6c22dcfb144dd
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
johnbirdau/pulumi-azure-native
|
b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25
|
d676cc331caa0694d8be99cb90b93fa231e3c705
|
refs/heads/master
| 2023-05-06T06:48:05.040357
| 2021-06-01T20:42:38
| 2021-06-01T20:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,020
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SqlResourceSqlTriggerArgs', 'SqlResourceSqlTrigger']
@pulumi.input_type
class SqlResourceSqlTriggerArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
container_name: pulumi.Input[str],
database_name: pulumi.Input[str],
resource: pulumi.Input['SqlTriggerResourceArgs'],
resource_group_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input['CreateUpdateOptionsArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
trigger_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SqlResourceSqlTrigger resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] container_name: Cosmos DB container name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input['SqlTriggerResourceArgs'] resource: The standard JSON format of a trigger
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input['CreateUpdateOptionsArgs'] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
:param pulumi.Input[str] trigger_name: Cosmos DB trigger name.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "container_name", container_name)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "resource", resource)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if location is not None:
pulumi.set(__self__, "location", location)
if options is not None:
pulumi.set(__self__, "options", options)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if trigger_name is not None:
pulumi.set(__self__, "trigger_name", trigger_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Input[str]:
"""
Cosmos DB container name.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: pulumi.Input[str]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database name.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def resource(self) -> pulumi.Input['SqlTriggerResourceArgs']:
"""
The standard JSON format of a trigger
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input['SqlTriggerResourceArgs']):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def options(self) -> Optional[pulumi.Input['CreateUpdateOptionsArgs']]:
"""
A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: Optional[pulumi.Input['CreateUpdateOptionsArgs']]):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="triggerName")
def trigger_name(self) -> Optional[pulumi.Input[str]]:
"""
Cosmos DB trigger name.
"""
return pulumi.get(self, "trigger_name")
@trigger_name.setter
def trigger_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "trigger_name", value)
class SqlResourceSqlTrigger(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['SqlTriggerResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
trigger_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An Azure Cosmos DB trigger.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] container_name: Cosmos DB container name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[pulumi.InputType['SqlTriggerResourceArgs']] resource: The standard JSON format of a trigger
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
:param pulumi.Input[str] trigger_name: Cosmos DB trigger name.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SqlResourceSqlTriggerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure Cosmos DB trigger.
:param str resource_name: The name of the resource.
:param SqlResourceSqlTriggerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SqlResourceSqlTriggerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['SqlTriggerResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
trigger_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SqlResourceSqlTriggerArgs.__new__(SqlResourceSqlTriggerArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
if container_name is None and not opts.urn:
raise TypeError("Missing required property 'container_name'")
__props__.__dict__["container_name"] = container_name
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__.__dict__["database_name"] = database_name
__props__.__dict__["location"] = location
__props__.__dict__["options"] = options
if resource is None and not opts.urn:
raise TypeError("Missing required property 'resource'")
__props__.__dict__["resource"] = resource
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["trigger_name"] = trigger_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20190801:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20191212:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20200301:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20200401:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20200901:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20210115:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20210315:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20210415:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:SqlResourceSqlTrigger")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlResourceSqlTrigger, __self__).__init__(
'azure-native:documentdb/v20210515:SqlResourceSqlTrigger',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlResourceSqlTrigger':
"""
Get an existing SqlResourceSqlTrigger resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlResourceSqlTriggerArgs.__new__(SqlResourceSqlTriggerArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["resource"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return SqlResourceSqlTrigger(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resource(self) -> pulumi.Output[Optional['outputs.SqlTriggerGetPropertiesResponseResource']]:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
|
[
"noreply@github.com"
] |
noreply@github.com
|
3ca302784c9b639fe76172e986949f7b16c5f686
|
b5df9e66d292ed332d2164ad6e454e6c5b333968
|
/fetch-text-gui.py
|
e816fd2902cd17b2da960c64d21cb6455f033786
|
[] |
no_license
|
ronandoolan2/python-gui
|
c3be55d34b9ca9e95451419acfa4cdda70bfdc90
|
84e935c1c06a22616224b1bab42a0c550502b467
|
refs/heads/master
| 2021-01-20T06:52:33.290336
| 2017-05-01T16:09:14
| 2017-05-01T16:09:14
| 89,939,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
from Tkinter import *
root = Tk()
svalue = StringVar() # defines the widget state as string
w = Entry(root,textvariable=svalue) # adds a textarea widget
w.textbox.grid(column=0,row=0)
def act():
print("you entered")
print('%s' % svalue.get())
foo = Button(root,text="Press Me", command=act)
foo.textbox.grid(column=0,row=1)
root.mainloop()
|
[
"ronandoolan@gmail.com"
] |
ronandoolan@gmail.com
|
dd5fbc68c39d3c24641b9f746e2812d44fa78e62
|
e6d4a87dcf98e93bab92faa03f1b16253b728ac9
|
/algorithms/python/destinationCity/destinationCity.py
|
1b55d8c23b19986d5f6d1359d7af30216a4080a4
|
[] |
no_license
|
MichelleZ/leetcode
|
b5a58e1822e3f6ef8021b29d9bc9aca3fd3d416f
|
a390adeeb71e997b3c1a56c479825d4adda07ef9
|
refs/heads/main
| 2023-03-06T08:16:54.891699
| 2023-02-26T07:17:47
| 2023-02-26T07:17:47
| 326,904,500
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Source: https://leetcode.com/problems/destination-city/
# Author: Miao Zhang
# Date: 2021-05-06
class Solution:
def destCity(self, paths: List[List[str]]) -> str:
ind = collections.defaultdict(int)
out = collections.defaultdict(int)
for u, v in paths:
ind[v] += 1
out[u] += 1
for city, val in ind.items():
if val == 1 and out[city] == 0:
return city
return ''
|
[
"zhangdaxiaomiao@163.com"
] |
zhangdaxiaomiao@163.com
|
72c6b5820ec2373fc5c053015b127eae12ba7b5d
|
74fb05c7b5eddf2b368e181f38b9423a711bf2e0
|
/real_python_tutorails/iterators/iterators_example.py
|
ae43af08beec8d63c2765d453574e4ff98b5c5cb
|
[] |
no_license
|
musram/python_progs
|
426dcd4786e89b985e43284ab5a5b1ba79cb285e
|
ad1f7f2b87568ba653f839fe8fa45e90cbde5a63
|
refs/heads/master
| 2022-11-10T09:53:29.993436
| 2020-06-21T00:21:23
| 2020-06-21T00:21:23
| 264,607,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,387
|
py
|
if __name__ == "__main__":
names = ['sai', 'asi', 'isa']
for name in names:
print(name)
#what actuall happens internally is this:
it = names.__iter__()
print(next(it))
#similalry
f = open('/etc/passwd', 'r')
it = f.__iter__()
print(next(it))
#writing generator
#(1)
def countDown(n):
print('Counting from' , n)
while (n > 0):
yield n
n -= 1
print('Done')
for x in countDown(5):
print(x)
#this is same as
c = countDown(5)
it = c.__iter__()
print(next(it))
#writing genertor
#(2)
it = ( x for x in range(5,0,-1))
print(next(it))
#writing generator
#(3)
class CountDown:
def __init__(self, n):
self.n = n
def __iter__(self):
n = self.n
while (n > 0):
yield n
n -= 1
c = CountDown(5)
for x in c:
print(x)
import os
import time
def follow(filename):
f = open(filename, 'r')
f.seek(0, os.SEEK_END)
while True:
line = f.readline()
if not line:
time.sleep(0.1)
continue
yield line
for line in follow('/etc/passwd'):
row = line.split(',')
print(row)
|
[
"musram@gmail.com"
] |
musram@gmail.com
|
a516a1d9d6566da6c0e8403dbfd46b44eaa1bf43
|
5b3e4b9263c2fcbec1fc5890e4a6035aeb9637f7
|
/case/test_cate_gory_second.py
|
ad6bc35d090a77b8d6c162699efc3033bd566863
|
[] |
no_license
|
cheng2020-G/fastapp
|
68216bcba5860a51a765c64989b5e99dc53b2e3e
|
d55d7d3a53337a2bd1792093c39169b0e31f1cf8
|
refs/heads/master
| 2023-04-21T11:39:22.590349
| 2021-05-27T05:40:15
| 2021-05-27T05:40:15
| 365,464,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
import re
from basecase.basecase import BaseCase
class TestCateGorySecond(BaseCase):
def test_cate_gory_second(self):
res = self.cate_gory_second.cate_gory_second()
print('请求url:' + res.url)
print('requestId:' + res.headers['requestId'])
print(res.json())
# print(res.json()['data']['books'])
assert res.status_code == 200
assert res.json()['retCode'] == 0
assert re.search(r'\d', str(res.json()['data']['isMore']))
# assert re.search(r'\d', str(res.json()['data']['sortMark'][0]['markId']))
# assert re.search(r'\w+', str(res.json()['data']['sortMark'][0]['title']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['author']))
assert re.search(r'\d', str(res.json()['data']['books'][0]['resFormat']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['iconDesc']))
assert re.search(r'http://\w+', str(res.json()['data']['books'][0]['coverWap']))
assert re.search(r'\d', str(res.json()['data']['books'][0]['iconType']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['clickNum']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['totalWordSize']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['bookName']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['introduction']))
assert re.search(r'\d+', str(res.json()['data']['books'][0]['bookId']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['status']))
# assert re.search(r'\d', str(res.json()['data']['statusMark'][0]['markId']))
# assert re.search(r'\w+', str(res.json()['data']['statusMark'][0]['title']))
|
[
"1007884377@qq.com"
] |
1007884377@qq.com
|
afa792b926c2ea3c9563b1ca60d34e69bc4fc2bc
|
b2ba78fb1e53f92efdc3b6e0be50c81e5dd036ed
|
/plot_f/plot_offline_mbl_5M_all.py
|
ef16bbcfd8943228d88a28c336263fa8c582ed91
|
[
"MIT"
] |
permissive
|
ShuoZ9379/Integration_SIL_and_MBL
|
2dcfae10cb5929c4121a3a8bfceebae8c0b6ba08
|
d7df6501a665d65eb791f7fd9b8e85fd660e6320
|
refs/heads/master
| 2020-07-23T20:04:17.304302
| 2019-09-23T18:58:57
| 2019-09-23T18:58:57
| 207,690,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,634
|
py
|
import os, argparse, subprocess
import matplotlib.pyplot as plt
import numpy as np
from baselines.common import plot_util as pu
def arg_parser():
return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
def filt(results,name,name_2=''):
ls=[r for r in results if name in r.dirname and name_2 in r.dirname]
return ls
def filt_or(results,name,name_2):
ls=[r for r in results if name in r.dirname or name_2 in r.dirname]
return ls
def filt_or_or_or(results,name,name_2,name_3,name_4):
ls=[r for r in results if name in r.dirname or name_2 in r.dirname or name_3 in r.dirname or name_4 in r.dirname]
return ls
def main():
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='HalfCheetah-v2')
parser.add_argument('--dir', type=str, default='logs')
parser.add_argument('--thesis', type=str, default='Offline_V0')
args = parser.parse_args()
# dirname = '~/Desktop/carla_sample_efficient/data/bk/bkup_EXP2_FINAL/'+args.extra_dir+args.env
dirname = '~/Desktop/logs/'+args.dir+'/EXP_OFF_24_5M_V0/'+args.env
results = pu.load_results(dirname)
r_copos1_nosil,r_copos2_nosil,r_trpo_nosil,r_ppo_nosil=filt(results,'copos1-'),filt(results,'copos2-'),filt(results,'trpo-'),filt(results,'ppo-')
r_copos1_sil,r_copos2_sil,r_trpo_sil,r_ppo_sil=filt(results,'copos1+sil-'),filt(results,'copos2+sil-'),filt(results,'trpo+sil-'),filt(results,'ppo+sil-')
r_mbl_sil=filt(results,'mbl+','sil-')
# r_mbl_nosil_tmp=[r for r in results if r not in r_mbl_sil]
r_mbl_nosil=filt_or_or_or(results,'mbl+copos1-','mbl+copos2-','mbl+trpo-','mbl+ppo-')
r_copos1_comp, r_copos2_comp, r_trpo_comp, r_ppo_comp=filt_or(results,'mbl+copos1','copos1+sil'),filt_or(results,'mbl+copos2','copos2+sil'),filt_or(results,'mbl+trpo','trpo+sil'),filt_or(results,'mbl+ppo','ppo+sil')
dt={'copos1_nosil':r_copos1_nosil,'copos2_nosil':r_copos2_nosil, 'trpo_nosil':r_trpo_nosil, 'ppo_nosil':r_ppo_nosil,
'copos1_sil':r_copos1_sil,'copos2_sil':r_copos2_sil, 'trpo_sil':r_trpo_sil, 'ppo_sil':r_ppo_sil,
'mbl_nosil':r_mbl_nosil, 'mbl_sil':r_mbl_sil,
'copos1_comp':r_copos1_comp,'copos2_comp':r_copos2_comp, 'trpo_comp':r_trpo_comp, 'ppo_comp':r_ppo_comp}
for name in dt:
pu.plot_results(dt[name],xy_fn=pu.progress_mbl_vbest_xy_fn,average_group=True,name=name,split_fn=lambda _: '',shaded_err=True,shaded_std=False)
plt.xlabel('Number of Timesteps [M]')
plt.ylabel('Best Average Return [-]')
plt.tight_layout()
fig = plt.gcf()
fig.set_size_inches(9, 7.5)
# fig.savefig("/Users/zsbjltwjj/Desktop/carla_sample_efficient/plot_f/OFFLINE/"+args.extra_dir+args.env+'/'+name+'.pdf',format="pdf")
fig.savefig("/Users/zsbjltwjj/Desktop/thesis/img/"+args.thesis+"/"+args.env+'/'+name+'.pdf', format="pdf")
if name=='mbl_nosil' or name=='mbl_sil':
pu.plot_results(dt[name],xy_fn=pu.progress_default_entropy_xy_fn,average_group=True,name=name,split_fn=lambda _: '',shaded_err=True,shaded_std=False,legend_entropy=1)
plt.xlabel('Number of Timesteps [M]')
plt.ylabel('Entropy [-]')
plt.tight_layout()
fig = plt.gcf()
fig.set_size_inches(9, 7.5)
# fig.savefig("/Users/zsbjltwjj/Desktop/carla_sample_efficient/plot_f/OFFLINE/"+args.extra_dir+args.env+'/'+name+'_entropy.pdf',format="pdf")
fig.savefig("/Users/zsbjltwjj/Desktop/thesis/img/"+args.thesis+"/"+args.env+'/'+name+'_entropy.pdf', format="pdf")
if __name__ == '__main__':
main()
|
[
"zhangshuo19930709@gmail.com"
] |
zhangshuo19930709@gmail.com
|
e04c0bf21ef6ef4a8ce6e6a89f934139e335a5d8
|
f098c361ee79bb8b7a8402fcf20b37f17fb36983
|
/Back-End/Python/Basics/Part -1 - Functional/04 - First-Class-Functions/send_email_partial.py
|
f536c40a3c3798957ca6c45af1bfb96feb7036ee
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
rnsdoodi/Programming-CookBook
|
4d619537a6875ffbcb42cbdaf01d80db1feba9b4
|
9bd9c105fdd823aea1c3f391f5018fd1f8f37182
|
refs/heads/master
| 2023-09-05T22:09:08.282385
| 2021-10-31T11:57:40
| 2021-10-31T11:57:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
from functools import partial
def sendmail(to, subject, body):
# code to send email
print('To:{0}, Subject:{1}, Body:{2}'.format(to, subject, body))
email_admin = 'palin@python.edu'
email_devteam = 'idle@python.edu;cleese@python.edu'
# Now when we want to send emails we would have to write things like:
# Email 1
sendmail(email_admin, 'My App Notification', 'the parrot is dead.')
# Email 2
sendmail(';'.join((email_admin, email_devteam)), 'My App Notification',
'the ministry is closed until further notice.')
# Email 1
# To:palin@python.edu,
# Subject:My App Notification,
# Body:the parrot is dead.
# Email 2
# To:palin@python.edu;idle@python.edu;cleese@python.edu,
# Subject:My App Notification,
# Body:the ministry is closed until further notice.
# Partial
# Email 1
send_admin = partial(sendmail, email_admin, 'For you eyes only')
# Email 2
send_dev = partial(sendmail, email_devteam, 'Dear IT:')
# Email 3
send_all = partial(sendmail, ';'.join((email_admin, email_devteam)), 'Loyal Subjects')
send_admin('the parrot is dead.')
send_all('the ministry is closed until further notice.')
def sendmail(to, subject, body, *, cc=None, bcc=email_devteam):
# code to send email
print('To:{0}, Subject:{1}, Body:{2}, CC:{3}, BCC:{4}'.format(to,
subject,
body,
cc,
bcc))
# Email 1
send_admin = partial(sendmail, email_admin, 'General Admin')
# Email 2
send_admin_secret = partial(sendmail, email_admin, 'For your eyes only', cc=None, bcc=None)
send_admin('and now for something completely different')
#To:palin@python.edu,
# Subject:General Admin,
# Body:and now for something completely different,
# CC:None,
# BCC:idle@python.edu;cleese@python.edu
send_admin_secret('the parrot is dead!')
#To:palin@python.edu,
# Subject:For your eyes only,
# Body:the parrot is dead!,
# CC:None,
# BCC:None
send_admin_secret('the parrot is no more!', bcc=email_devteam)
# To:palin@python.edu,
# Subject:For your eyes only,
# Body:the parrot is no more!,
# CC:None,
# BCC:idle@python.edu;cleese@python.edu
|
[
"58447627+Koubae@users.noreply.github.com"
] |
58447627+Koubae@users.noreply.github.com
|
54d30e07523c4b8a72948a31db878f4c25809cfc
|
7b48cfecdf478bfffbf1cc9cb20c62100898eb6a
|
/mordred/mordred.py
|
b1a214096714b6e219f614269a6e2b424fbaef4c
|
[] |
no_license
|
albertinisg/mordred
|
cc2cd17806cac208857f3e04b48b1cd13095e9b6
|
e141e1c6ad09ea89f194c410285903542b6b557e
|
refs/heads/master
| 2021-01-13T15:50:36.193316
| 2017-02-05T15:08:32
| 2017-02-05T15:08:32
| 76,866,269
| 0
| 0
| null | 2016-12-19T13:52:35
| 2016-12-19T13:52:35
| null |
UTF-8
|
Python
| false
| false
| 12,992
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Luis Cañas-Díaz <lcanas@bitergia.com>
# Alvaro del Castillo <acs@bitergia.com>
#
import configparser
import logging
import time
import json
import sys
import requests
import threading
from datetime import datetime, timedelta
from grimoire_elk.utils import get_connectors
from mordred.task_collection import TaskRawDataCollection
from mordred.task_enrich import TaskEnrich
from mordred.task_identities import TaskIdentitiesCollection, TaskIdentitiesInit, TaskIdentitiesMerge
from mordred.task_manager import TasksManager
from mordred.task_panels import TaskPanels, TaskPanelsMenu
SLEEPFOR_ERROR = """Error: You may be Arthur, King of the Britons. But you still """ + \
"""need the 'sleep_for' variable in sortinghat section\n - Mordred said."""
ES_ERROR = "Before starting to seek the Holy Grail, make sure your ElasticSearch " + \
"at '%(uri)s' is available!!\n - Mordred said."
logger = logging.getLogger(__name__)
class ElasticSearchError(Exception):
"""Exception raised for errors in the list of backends
"""
def __init__(self, expression):
self.expression = expression
class Mordred:
def __init__(self, conf_file):
self.conf_file = conf_file
self.conf = None
def update_conf(self, conf):
self.conf = conf
def read_conf_files(self):
conf = {}
logger.debug("Reading conf files")
config = configparser.ConfigParser()
config.read(self.conf_file)
logger.debug(config.sections())
if 'min_update_delay' in config['general'].keys():
conf['min_update_delay'] = config.getint('general','min_update_delay')
else:
# if no parameter is included, the update won't be performed more
# than once every minute
conf['min_update_delay'] = 60
# FIXME: Read all options in a generic way
conf['es_collection'] = config.get('es_collection', 'url')
conf['es_enrichment'] = config.get('es_enrichment', 'url')
conf['autorefresh_on'] = config.getboolean('es_enrichment', 'autorefresh')
conf['studies_on'] = config.getboolean('es_enrichment', 'studies')
projects_file = config.get('projects','projects_file')
conf['projects_file'] = projects_file
with open(projects_file,'r') as fd:
projects = json.load(fd)
conf['projects'] = projects
conf['collection_on'] = config.getboolean('phases','collection')
conf['identities_on'] = config.getboolean('phases','identities')
conf['enrichment_on'] = config.getboolean('phases','enrichment')
conf['panels_on'] = config.getboolean('phases','panels')
conf['update'] = config.getboolean('general','update')
try:
conf['kibana'] = config.get('general','kibana')
except configparser.NoOptionError:
pass
conf['sh_bots_names'] = config.get('sortinghat', 'bots_names').split(',')
# Optional config params
try:
conf['sh_no_bots_names'] = config.get('sortinghat', 'no_bots_names').split(',')
except configparser.NoOptionError:
pass
conf['sh_database'] = config.get('sortinghat', 'database')
conf['sh_host'] = config.get('sortinghat', 'host')
conf['sh_user'] = config.get('sortinghat', 'user')
conf['sh_password'] = config.get('sortinghat', 'password')
aux_matching = config.get('sortinghat', 'matching')
conf['sh_matching'] = aux_matching.replace(' ','').split(',')
aux_autoprofile = config.get('sortinghat', 'autoprofile')
conf['sh_autoprofile'] = aux_autoprofile.replace(' ','').split(',')
conf['sh_orgs_file'] = config.get('sortinghat', 'orgs_file')
conf['sh_load_orgs'] = config.getboolean('sortinghat', 'load_orgs')
try:
conf['sh_sleep_for'] = config.getint('sortinghat','sleep_for')
except configparser.NoOptionError:
if conf['identities_on'] and conf['update']:
logging.error(SLEEPFOR_ERROR)
sys.exit(1)
try:
conf['sh_ids_file'] = config.get('sortinghat', 'identities_file')
except configparser.NoOptionError:
logger.info("No identities files")
for backend in self.__get_backends():
try:
raw = config.get(backend, 'raw_index')
enriched = config.get(backend, 'enriched_index')
conf[backend] = {'raw_index':raw, 'enriched_index':enriched}
for p in config[backend]:
try:
conf[backend][p] = config.getboolean(backend, p)
except ValueError:
conf[backend][p] = config.get(backend, p)
except configparser.NoSectionError:
pass
return conf
def check_es_access(self):
##
## So far there is no way to distinguish between read and write permission
##
def _ofuscate_server_uri(uri):
if uri.rfind('@') > 0:
pre, post = uri.split('@')
char_from = pre.rfind(':')
result = uri[0:char_from + 1] + '****@' + post
return result
else:
return uri
es = self.conf['es_collection']
try:
r = requests.get(es, verify=False)
if r.status_code != 200:
raise ElasticSearchError(ES_ERROR % {'uri' : _ofuscate_server_uri(es)})
except:
raise ElasticSearchError(ES_ERROR % {'uri' : _ofuscate_server_uri(es)})
if self.conf['enrichment_on'] or self.conf['studies_on']:
es = self.conf['es_enrichment']
try:
r = requests.get(es, verify=False)
if r.status_code != 200:
raise ElasticSearchError(ES_ERROR % {'uri' : _ofuscate_server_uri(es)})
except:
raise ElasticSearchError(ES_ERROR % {'uri' : _ofuscate_server_uri(es)})
def __get_backends(self):
gelk_backends = list(get_connectors().keys())
extra_backends = ["google_hits"]
return gelk_backends + extra_backends
def __get_repos_by_backend(self):
#
# return dict with backend and list of repositories
#
output = {}
projects = self.conf['projects']
for backend in self.__get_backends():
for pro in projects:
if backend in projects[pro]:
if not backend in output:
output[backend] = projects[pro][backend]
else:
output[backend] = output[backend] + projects[pro][backend]
# backend could be in project/repo file but not enabled in
# mordred conf file
enabled = {}
for k in output:
if k in self.conf:
enabled[k] = output[k]
# logger.debug('repos to be retrieved: %s ', enabled)
return enabled
def execute_tasks (self, tasks_cls):
"""
Just a wrapper to the execute_batch_tasks method
"""
self.execute_batch_tasks(tasks_cls)
def execute_nonstop_tasks(self, tasks_cls):
"""
Just a wrapper to the execute_batch_tasks method
"""
self.execute_batch_tasks(tasks_cls, self.conf['sh_sleep_for'], self.conf['min_update_delay'], False)
def execute_batch_tasks(self, tasks_cls, big_delay=0, small_delay=0, wait_for_threads = True):
"""
Start a task manager per backend to complete the tasks.
:param task_cls: list of tasks classes to be executed
:param big_delay: seconds before global tasks are executed, should be days usually
:param small_delay: seconds before blackend tasks are executed, should be minutes
:param wait_for_threads: boolean to set when threads are infinite or
should be synchronized in a meeting point
"""
def _split_tasks(tasks_cls):
"""
we internally distinguish between tasks executed by backend
and tasks executed with no specific backend. """
backend_t = []
global_t = []
for t in tasks_cls:
if t.is_backend_task(t):
backend_t.append(t)
else:
global_t.append(t)
return backend_t, global_t
logger.debug(' Task Manager starting .. ')
backend_tasks, global_tasks = _split_tasks(tasks_cls)
logger.debug ('backend_tasks = %s' % (backend_tasks))
logger.debug ('global_tasks = %s' % (global_tasks))
threads = []
# stopper won't be set unless wait_for_threads is True
stopper = threading.Event()
# launching threads for tasks by backend
if len(backend_tasks) > 0:
repos_backend = self.__get_repos_by_backend()
for backend in repos_backend:
# Start new Threads and add them to the threads list to complete
t = TasksManager(backend_tasks, backend, repos_backend[backend],
stopper, self.conf, small_delay)
threads.append(t)
t.start()
# launch thread for global tasks
if len(global_tasks) > 0:
#FIXME timer is applied to all global_tasks, does it make sense?
gt = TasksManager(global_tasks, None, None, stopper, self.conf, big_delay)
threads.append(gt)
gt.start()
if big_delay > 0:
when = datetime.now() + timedelta(seconds = big_delay)
when_str = when.strftime('%a, %d %b %Y %H:%M:%S %Z')
logger.info("%s will be executed on %s" % (global_tasks, when_str))
if wait_for_threads:
time.sleep(1) # Give enough time create and run all threads
stopper.set() # All threads must stop in the next iteration
logger.debug(" Waiting for all threads to complete. This could take a while ..")
# Wait for all threads to complete
for t in threads:
t.join()
logger.debug(" Task manager and all its tasks (threads) finished!")
def run(self):
#logger.debug("Starting Mordred engine ...")
logger.info("")
logger.info("----------------------------")
logger.info("Starting Mordred engine ...")
logger.info("- - - - - - - - - - - - - - ")
self.update_conf(self.read_conf_files())
# check we have access to the needed ES
self.check_es_access()
# do we need ad-hoc scripts?
tasks_cls = []
all_tasks_cls = []
# phase one
# we get all the items with Perceval + identites browsing the
# raw items
if self.conf['identities_on']:
tasks_cls = [TaskIdentitiesInit]
self.execute_tasks(tasks_cls)
if self.conf['collection_on']:
tasks_cls = [TaskRawDataCollection]
#self.execute_tasks(tasks_cls)
if self.conf['identities_on']:
tasks_cls.append(TaskIdentitiesCollection)
all_tasks_cls += tasks_cls
self.execute_tasks(tasks_cls)
if self.conf['identities_on']:
tasks_cls = [TaskIdentitiesMerge]
all_tasks_cls += tasks_cls
self.execute_tasks(tasks_cls)
if self.conf['enrichment_on']:
# raw items + sh database with merged identities + affiliations
# will used to produce a enriched index
tasks_cls = [TaskEnrich]
all_tasks_cls += tasks_cls
self.execute_tasks(tasks_cls)
if self.conf['panels_on']:
# Remove first the dashboard menu
tasks_cls = [TaskPanels, TaskPanelsMenu]
self.execute_tasks(tasks_cls)
logger.debug(' - - ')
logger.debug('Meeting point 0 reached')
time.sleep(1)
while self.conf['update']:
self.execute_nonstop_tasks(all_tasks_cls)
logger.info("Finished Mordred engine ...")
|
[
"acs@bitergia.com"
] |
acs@bitergia.com
|
0b459f2956f8b32f62c231644e0df079e662cadd
|
5a82795c3860745112b7410d9060c5ef671adba0
|
/leetcode/Network Delay Time.py
|
0ead9c6fd14d36b87d1e6aaa9d1e5ac0d91d18eb
|
[] |
no_license
|
ashishvista/geeks
|
8e09d0f3a422c1c9a1c1b19d879ebafa31b62f44
|
1677a304fc7857a3054b574e8702491f5ce01a04
|
refs/heads/master
| 2023-03-05T12:01:03.911096
| 2021-02-15T03:00:56
| 2021-02-15T03:00:56
| 336,996,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
class Graph:
v = None
def __init__(self, V):
self.V = V
self.adj = [[] for i in range(V)]
self.weights = {}
def addEdge(self, u, v, w):
self.adj[u].append(v)
self.weights[str(u) + "-" + str(v)] = w
class Solution:
def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
graph = Graph(N)
dist = [float("+inf") for i in range(N)]
dist[K - 1] = 0
visited = {}
for uv in times:
graph.addEdge(uv[0] - 1, uv[1] - 1, uv[2])
varr = {K - 1: 1}
while varr:
su = self.getLowestCostV(varr, dist)
del varr[su]
visited[su] = 1
for v in graph.adj[su]:
new_dist = dist[su] + graph.weights[str(su) + "-" + str(v)]
if new_dist < dist[v]:
dist[v] = new_dist
if v not in visited:
varr[v] = 1
largest = float("-inf")
if len(visited) != N:
return -1
for d in dist:
largest = max(largest, d)
return largest
def getLowestCostV(self, varr, dist):
sw = float("inf")
sv = None
for v in varr:
if sw > dist[v]:
sw = dist[v]
sv = v
return sv
|
[
"ashish@groomefy.com"
] |
ashish@groomefy.com
|
01e60493883cfefce15b71a07e7676b42da9e91e
|
aedded4974138c7e510337cd5dd99144a2a388a6
|
/Sentimental Analysis using Elastic Search and Python/load_elasticsearch.py
|
3913516136c143abc375b1f7e2cfd0c768ee99b8
|
[] |
no_license
|
dalalbhargav07/Data-Warehousing-to-Data-Analytics
|
a1304b4e606d776a48b7b815b945f615c5571ef9
|
96ed04416e3db9012cbb3e9a1a2cf591bbb4a7c8
|
refs/heads/master
| 2020-04-02T16:07:23.922297
| 2018-10-25T03:06:15
| 2018-10-25T03:06:15
| 154,599,043
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 06 15:13:45 2018
@author: Hardik Galiawala, Bhargav Dalal
"""
import csv
from datetime import datetime
from elasticsearch_dsl import DocType, Date, Integer, Keyword, Text, connections
connections.create_connection(hosts=['team1.canadaeast.cloudapp.azure.com'])
class Tweet(DocType):
#user = Text(analyzer='snowball', fields={'raw': Keyword()})
tweet = Text(analyzer='snowball')
score = Text()
sentiment = Text(analyzer='snowball')
#hashtags = Keyword()
created_at = Date()
class Meta:
index = 'tweet_sentiments'
def save(self, ** kwargs):
'''
self.lines = len(self.tweet.split())
self.hashtags = [tag for tag in
self.tweet.split()
if tag.startswith('#')]
'''
return super(Tweet, self).save(** kwargs)
with open('sentimentAnalysis.csv', 'rb') as csvfile:
id_number = 0
sentiment = csv.reader(csvfile, delimiter=',')
for i in sentiment:
id_number = id_number + 1
Tweet.init()
tweet = Tweet(meta={'id': id_number})
tweet.tweet = i[0]
tweet.sentiment = i[1]
tweet.score = i[2]
tweet.created_at = datetime.now()
tweet.save()
|
[
"dalal.bhargav07@gmail.com"
] |
dalal.bhargav07@gmail.com
|
1cd6b2cde90dae2214ce5243b597fbe6393438cf
|
15da36fb301d51425ce38e7af6e77a64e05e62d5
|
/test_iterator.py
|
b0ef7cea978b72ad37b7c2383065723f86ad077b
|
[] |
no_license
|
bassemhossam/Video-Summarization
|
7375681d5d875d2644ad1f05d5ab66936db5a019
|
321cfb659e742a96407c57c6bcac101062c3cff0
|
refs/heads/master
| 2020-12-05T11:14:08.537642
| 2020-07-19T09:57:57
| 2020-07-19T09:57:57
| 232,091,949
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
# Helper code used for testing and seeing the outputs from the iterator.
from data_iterator import *
import torch
device = torch.device("cpu")
iterator = SSIterator(64, 15, 20,"test", device,max_videos=200)
iterator.start()
batch = iterator.next()
counter = 0
while batch != None:
counter+=1
batch = iterator.next()
print(counter)
print(counter)
|
[
"noreply@github.com"
] |
noreply@github.com
|
0daaa4c643fd769852f1d89126fea5c9a7ff5325
|
e4d441f9d9e743e685650b3aee1f09d09d7e4681
|
/zad 1.py
|
d3e16f68a9089f76375a361dc0d1d5d4da03e0cd
|
[] |
no_license
|
pstatkiewicz/lista-3
|
b9f56f462365713dd49a908579f0315f31c49ee7
|
d7133d7222928bcb35d7cfde1c004cc94d798ed7
|
refs/heads/main
| 2023-06-07T20:47:53.101174
| 2021-01-18T14:46:47
| 2021-01-18T14:46:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
import matplotlib.pyplot as plt
def how_many_letters(string):
string.lower()
counter=[0]*26
letters=[]
list1=[]
for i in range(26):
letters.append(chr(97+i))
for i in range(len(string)):
if ord(string[i])<123 and ord(string[i])>96:
counter[ord(string[i])-97]+=1
for i in range(26):
list1.append([counter[i],letters[i]])
list1.sort(reverse=True)
result=list1[0:10]
counter1=[]
letters1=[]
for i in result:
counter1.append(i[0])
letters1.append(i[1])
plt.bar(letters1,counter1)
plt.title("Częstotliwość występowania liter")
plt.show() #matplotlib
return result #print
print("Podaj tekst: ")
text=input()
print(how_many_letters(text))
|
[
"patrykstatkiewicz1012@gmail.com"
] |
patrykstatkiewicz1012@gmail.com
|
bc7d6aaa70db83515dcb9be6218ae064862630e5
|
b563e04d91dcd5169b83fbfba840c16a882f7357
|
/filechanger.py
|
4384db93fcb61bc1ec51600c25b56c4ab7ff7868
|
[] |
no_license
|
tomfa/filechanger
|
7765d248799815f9980a3151a63f16738ef9c389
|
e9d0f4e6543557eb4bb8fd84a5d3cc74466d81f5
|
refs/heads/master
| 2020-07-06T02:11:21.699066
| 2016-11-24T18:58:29
| 2016-11-24T18:58:29
| 73,969,133
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,591
|
py
|
#coding: utf-8
import os, sys
IGNORED_FILE_ENDINGS = [".py", ".bat"]
VALID_ACTIONS = ["lower", "upper", "remove", "insert", "replace"]
def _get_file_ext(file):
"""
>>> _get_file_ext("C:\\Program Files\test.txt")
'.txt'
>>> _get_file_ext("/Users/tomas/.bash_rc")
''
"""
return os.path.splitext(file)[1]
def _confirm_continue(text):
raw_input(text)
def _input(helptext, valid_input=None):
inputtext = raw_input(helptext + ("\n(" + ", ".join(valid_input) + ")" if valid_input else "") + "\n> ")
if valid_input:
while not inputtext in valid_input:
print("Valid input is " + ", ".join(valid_input))
inputtext = raw_input("> ").lower()
return inputtext
def _print_usage():
print("Usage:")
print("python renamer ACTION POSITION (TEXT)")
print("- ACTION: replace, insert, remove, upper, lower")
print("- POSITION: position eller start_position:end_position")
def _get_files_to_be_converted(dir, recursive):
if not recursive:
return [os.path.abspath(os.path.join(dir, x)) for x in os.listdir(dir) if not (os.path.isdir(x) or _get_file_ext(x) in IGNORED_FILE_ENDINGS)]
convert_files = []
for root, subFolders, files in os.walk(dir):
if ".git" in root:
continue
for file in files:
if not _get_file_ext(file) in IGNORED_FILE_ENDINGS:
convert_files.append(os.path.join(root, file))
return convert_files
def _to_upper_case(text, start_pos_pos, end_pos):
"""
>>> _to_upper_case("tomas", 3, 3)
'toMas'
>>> _to_upper_case("tomas", 3, 50)
'toMAS'
"""
if end_pos == None:
end_pos = start_pos_pos
first_part = text[:start_pos_pos-1]
last_part = text[end_pos:]
upper_case_text = text[start_pos_pos-1:end_pos].upper()
return first_part + upper_case_text + last_part
def _to_lower_case(text, start_pos_pos, end_pos):
"""
>>> _to_lower_case("TOMAS", 3, 4)
'TOmaS'
>>> _to_lower_case("TOMS", 3, 50)
'TOms'
>>> _to_lower_case("tomas", 3, 50)
'tomas'
"""
if end_pos == None:
end_pos = start_pos_pos
first_part = text[:start_pos_pos-1]
last_part = text[end_pos:]
lower_case_text = text[start_pos_pos-1:end_pos].lower()
return first_part + lower_case_text + last_part
def _remove_at_pos(text, start_pos_pos, end_pos):
"""
>>> _remove_at_pos("tomas", 3, 4)
'tos'
>>> _remove_at_pos("tomas", 3, 50)
'to'
"""
if end_pos == None:
return text[:start_pos_pos-1] + text[start_pos_pos:]
else:
return text[:start_pos_pos-1] + text[end_pos:]
def _handle_special_input(arg):
if arg == "help":
_print_usage()
return True
if arg == "test":
import doctest
doctest.testmod()
return True
return False
def _insert_at_pos(text, insert_text, pos):
"""
>>> _insert_at_pos("tomas", "Fi", 3)
'toFimas'
>>> _insert_at_pos("tomas", "Fi", 1)
'Fitomas'
"""
return text[:pos-1] + insert_text + text[pos-1:]
def upper(working_dir, start_pos, end_pos, recursive=False):
rename(working_dir, 'upper', start_pos, end_pos, recursive)
def lower(working_dir, start_pos, end_pos, recursive=False):
rename(working_dir, 'lower', start_pos, end_pos, recursive)
def remove(working_dir, start_pos, end_pos, recursive=False):
rename(working_dir, 'remove', start_pos, end_pos, recursive)
def insert(working_dir, start_pos, insert_text, recursive=False):
rename(working_dir, 'insert', start_pos, start_pos, recursive, insert_text)
def replace(working_dir, start_pos, end_pos, insert_text, recursive=False):
rename(working_dir, 'replace', start_pos, end_pos, recursive, insert_text)
def rename(working_dir, action, start_pos, end_pos, recursive, insert_text=None, quiet=True):
has_confirmed = False
current_filename = ""
changed_files = 0
files_to_be_converted = _get_files_to_be_converted(working_dir, recursive)
for file in files_to_be_converted:
path, filename = os.path.split(file)
if (action.lower() == "lower"):
new_filename = _to_lower_case(filename, start_pos, end_pos)
elif (action.lower() == "upper"):
new_filename = _to_upper_case(filename, start_pos, end_pos)
elif (action.lower() == "remove"):
new_filename = _remove_at_pos(filename, start_pos, end_pos)
elif (action.lower() == "insert"):
new_filename = _insert_at_pos(filename, insert_text, start_pos)
elif (action.lower() == "replace"):
new_filename = _remove_at_pos(filename, start_pos, end_pos)
new_filename = _insert_at_pos(new_filename, insert_text, start_pos)
else:
_print_usage()
return
if new_filename == filename:
continue
if (not has_confirmed and not quiet):
print("Will run (in folder " + working_dir + ") "
+ action + " on position " + str(start_pos) + (" to " + str(end_pos) if end_pos else "") +
(" with the word " + insert_text if insert_text else ""))
print("Original filename: " + filename)
print("New filename: " + new_filename)
_confirm_continue("Click Enter to continue...")
has_confirmed = True
changed_files += 1
new_filepath = os.path.join(path, new_filename)
print(file + " > " + new_filename)
os.rename(file, new_filepath)
if changed_files:
print("Changed " + str(changed_files) + " files")
else:
print("No files to be changed.")
if __name__ == '__main__':
if len(sys.argv) > 1:
if _handle_special_input(sys.argv[1]):
exit()
working_dir = sys.argv[1]
else:
working_dir = _input("Path to directory: ")
while not os.path.exists(working_dir):
print("Not a valid directory")
working_dir = _input("Path to directory: ")
working_dir = os.path.abspath(working_dir)
action = None
if len(sys.argv) > 2:
action = sys.argv[2]
if not action or action not in VALID_ACTIONS:
action = _input("Choose action", VALID_ACTIONS)
if len(sys.argv) > 3:
position = sys.argv[3]
if ":" in position:
start_pos = int(position.split(":")[0])
end_pos = int(position.split(":")[1])
else:
start_pos = int(position)
end_pos = None
else:
start_pos = int(_input("Start position: "))
if action != "insert":
end_pos = int(_input("End position: "))
else:
end_pos = None
insert_text = False
recursive = True
if action in ["insert", "replace"]:
if (len(sys.argv) > 4):
insert_text = sys.argv[4]
else:
insert_text = _input("Text to be inserted: ")
if (len(sys.argv) > 5):
recursive = sys.argv[5] != "nonrecursive"
else:
if (len(sys.argv) < 4):
recursive = _input("Should subfolders be included?", ["y", "n"]) == "y"
else:
if (len(sys.argv) > 4):
recursive = sys.argv[4] != "nonrecursive"
else:
recursive = _input("Should subfolders be included?", ["y", "n"]) == "y"
rename(working_dir, action, start_pos, end_pos, recursive, insert_text, False)
_confirm_continue("Click Enter to exit...")
|
[
"tomas@webutvikling.org"
] |
tomas@webutvikling.org
|
35901516fe1969c53a2c4655c745d03c2b532bdb
|
36aea5790cc01c652326f6f5a5722f13ee9d498b
|
/minihack/capname.py
|
86b963c86d8403610ea7a9da222fde26542bfa71
|
[] |
no_license
|
kev158/NguyenTrongDuc-c4t
|
2d9162a69dfa87e8ee24b93e4a72dc8811031cff
|
e1fba30f06c77bb7ab3271475d7ba6da9771ae09
|
refs/heads/master
| 2020-04-18T22:04:00.041524
| 2019-04-20T14:59:10
| 2019-04-20T14:59:10
| 167,783,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
x=input("Ho:")
y=input("Ten:")
print("ho va ten:", x,y)
|
[
"trongduc811@gmail.com"
] |
trongduc811@gmail.com
|
3ad1be3f4021991b8dff98164ef3af62fb67b912
|
d0fe3d0316aa90ef68c9a39f0335d53602d2be44
|
/node_modules/socket.io-servicebus/node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/ws/build/config.gypi
|
11810bdab21d746e4e7e0b083659e41b147c5e81
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
bitchwhocodes/photobooth
|
98184bc8eb1ed17ad37176ff216b30ee42d471e6
|
a8f8182d9d971333899b3c427f9182b607b3992a
|
refs/heads/master
| 2020-12-24T17:17:49.726329
| 2015-04-11T06:46:31
| 2015-04-11T06:46:31
| 33,573,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,839
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in\\icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps\\icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_has_winsdk": "true",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"openssl_no_asm": 0,
"python": "C:\\Python27\\python.exe",
"target_arch": "ia32",
"uv_library": "static_library",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"visibility": "",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\stmulcah\\.node-gyp\\0.12.1",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"access": "",
"always_auth": "",
"bin_links": "true",
"browser": "",
"ca": "",
"cache": "C:\\Users\\stmulcah\\AppData\\Roaming\\npm-cache",
"cache_lock_retries": "10",
"cache_lock_stale": "60000",
"cache_lock_wait": "10000",
"cache_max": "Infinity",
"cache_min": "10",
"cafile": "",
"cert": "",
"color": "true",
"depth": "Infinity",
"description": "true",
"dev": "",
"editor": "notepad.exe",
"engine_strict": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_maxtimeout": "60000",
"fetch_retry_mintimeout": "10000",
"force": "",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\stmulcah\\AppData\\Roaming\\npm\\etc\\npmrc",
"globalignorefile": "C:\\Users\\stmulcah\\AppData\\Roaming\\npm\\etc\\npmignore",
"group": "",
"heading": "npm",
"https_proxy": "",
"ignore_scripts": "",
"init_author_email": "",
"init_author_name": "",
"init_author_url": "",
"init_license": "ISC",
"init_module": "C:\\Users\\stmulcah\\.npm-init.js",
"init_version": "1.0.0",
"json": "",
"key": "",
"link": "",
"local_address": "",
"long": "",
"message": "%s",
"node_version": "0.12.1",
"npat": "",
"onload_script": "",
"optional": "true",
"parseable": "",
"prefix": "C:\\Users\\stmulcah\\AppData\\Roaming\\npm",
"production": "",
"proprietary_attribs": "true",
"rebuild_bundle": "true",
"registry": "https://registry.npmjs.org/",
"rollback": "true",
"save": "true",
"save_bundle": "",
"save_dev": "",
"save_exact": "",
"save_optional": "",
"save_prefix": "^",
"scope": "",
"searchexclude": "",
"searchopts": "",
"searchsort": "name",
"shell": "C:\\windows\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_tag": "",
"spin": "true",
"strict_ssl": "true",
"tag": "latest",
"tmp": "C:\\Users\\stmulcah\\AppData\\Local\\Temp",
"umask": "0000",
"unicode": "true",
"unsafe_perm": "true",
"usage": "",
"user": "",
"userconfig": "C:\\Users\\stmulcah\\.npmrc",
"user_agent": "npm/2.5.1 node/v0.12.1 win32 ia32",
"version": "",
"versions": "",
"viewer": "browser"
}
}
|
[
"stacey.mulcahy@gmail.com"
] |
stacey.mulcahy@gmail.com
|
307ab6fcace65005c39968f593e93497733e5f09
|
caaf7723580684886559dedba9a0cfa19036243d
|
/autofocus.py
|
6372eb067440d35876fbc6f9048db2bbfc5c9398
|
[] |
no_license
|
mike-fang/led_micro
|
27214b5d9e67abd3dbc85c2962be13bb82c83723
|
c08105b1cd84836fed2dea11074e1d47d13f099a
|
refs/heads/master
| 2022-11-28T10:46:09.647242
| 2020-08-02T19:44:22
| 2020-08-02T19:44:22
| 275,946,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,174
|
py
|
from elp_usb_cam import ELP_Camera
from capture_msi import init_rb, STD_EXPOSURE
import numpy as np
from asi_controller import AsiController
from time import time, sleep
from get_sharpness import grad_sharp
import cv2
import matplotlib.pylab as plt
def get_sharpness(img):
gy, gx = np.gradient(img)
gnorm = np.sqrt(gx**2 + gy**2)
sharpness = np.average(gnorm)
return sharpness
class AutoFocus:
def __init__(self, cam, rb, control, rng=10, steps=20, maxrng=100):
self.cam = cam
self.rb = rb
self.stage = control
self.rng = rng
self.steps = steps
self.maxrng = maxrng
def scan(self, z0, rng, steps):
z_low = max(z0-rng, self.z_low)
z_high = min(z0+rng, self.z_high)
z_scan = np.linspace(z_low, z_high, steps)
sharp_scan = np.zeros_like(z_scan)
for n, z in enumerate(z_scan):
self.stage.goto_z(z)
sleep(1)
for _ in range(5):
frame = self.cam.capture_img()
sharp = grad_sharp(frame)
sharp_scan[n] = sharp
return z_scan, sharp_scan
def step(self, iter=2):
# turn on white led
state = np.zeros(8)
state[4] = 1
self.rb.set_state(state)
rng = self.rng
steps = self.steps
z0 = self.stage.where_z()
self.z_low = z0 - self.maxrng
self.z_high = z0 + self.maxrng
for i in range(iter):
print(f'Searching in range {z0-rng, z0+rng}')
Z, S = self.scan(z0, rng, steps)
z0 = Z[S.argmax()]
rng /= (steps / 3)
self.stage.goto_z(z0)
print(f'Best depth -- z={z0:.4f}')
sleep(1.)
# turn off leds
state = np.zeros(8)
self.rb.set_state(state)
if __name__ == '__main__':
# init
cam = ELP_Camera(0)
rb = init_rb()
control = AsiController(config_file='./asi_config.yml', init_xy=False)
rng=20
steps=10
af = AutoFocus(cam, rb, control, rng=rng, steps=steps)
af.step(iter=2)
plt.imshow(cam.capture_img())
plt.show()
|
[
"1michaelfang@gmail.com"
] |
1michaelfang@gmail.com
|
bd373d5d4ad4b354c760abc092fd061bfaab15e3
|
777fa9edef9c1a88423762c85adaf6716244bcd5
|
/app/api/v2/views/user_views.py
|
bf6b3b4d81eeea36ab3f84041b9bc1eeefa157af
|
[] |
no_license
|
ansarisan/vigilant-spoon
|
1f12c05a408ee36ab8c2371d595f48f4ec3f27ce
|
50eaeac7e3f1748a8cbc2ae8fa0e4e4619a851aa
|
refs/heads/master
| 2020-04-16T12:43:42.428369
| 2019-01-14T05:21:00
| 2019-01-14T05:21:00
| 165,593,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
from flask import Flask, request, jsonify
from .. import version2
from .. models.user_model import UserModels
from werkzeug.security import check_password_hash
from werkzeug.exceptions import BadRequest
@version2.route("/users", methods=["GET"])
def hello():
""" List of all registered users """
resp = UserModels().fetch_users()
return jsonify(resp)
@version2.route("/auth/signup", methods=["POST"])
def register_user():
""" Registers a user given details """
data = request.get_json()
resp = UserModels(data).register_user()
return jsonify(resp), 201
@version2.route("/auth/login", methods=["POST"])
def login_user():
""" Logs in a registered user """
data = request.get_json()
try:
password = data["password"]
username = data["username"]
except KeyError as p:
raise BadRequest(
"{} should be present in the provided data".format(p))
resp = UserModels([username, password]).login_user()
return jsonify(resp), resp["status"]
|
[
"Leewelkarani@gmail.com"
] |
Leewelkarani@gmail.com
|
35501df946cd308d781d7730ff743efd5f3dc66a
|
fbb16a594e43cf57690c2ada793e8a8a9386caa7
|
/okane.py
|
d18ccb6d6d37df30b2e5e262be14172fb0b0faff
|
[] |
no_license
|
voyager42/okane
|
20315a8bdb1ce923bd0a547d095b6b11d4e6913d
|
7472399d2ab66f0962e8faf4d6768fb810727d61
|
HEAD
| 2016-09-01T16:54:51.881820
| 2014-01-12T16:11:37
| 2014-01-12T16:11:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,248
|
py
|
'''
Created on Feb 8, 2012
@author: johan
'''
import sys
import os
import transaction
import wx
import random
import wx.html
import Controller
import Model
import View
import TransactionView
import TotalsView
import csv
import Shapes
import logging
import logging.config
import math
logging.basicConfig(level=logging.WARN)
motionlog=logging.getLogger('motion')
motionlog.setLevel("WARN")
eventlog = logging.getLogger('event')
eventlog.setLevel("INFO")
wildcard = "CSV files (*.csv)|*.csv|" \
"All files (*.*)|*.*"
def calcMouseVelocity(posOld, posNew):
"Computes the velocity of the mouse"
dX = posNew[0] - posOld[0]
dY = posNew[1] - posOld[1]
motionlog.debug("dX = %s" % (dX))
motionlog.debug("dY = %s" % (dY))
angle = math.atan2(dY, dX) + 0.5*math.pi
speed = 0.05* math.hypot(dX, dY)
motionlog.debug("Speed = %s, Angle = %s deg" % (speed, math.degrees(angle)))
if speed*speed < 10:
return (speed, angle)
else:
return (0,0)
class Frame(wx.Frame):
def __init__(self, parent, title, size=wx.DefaultSize):
wx.Frame.__init__(self, parent, wx.ID_ANY, title, wx.DefaultPosition, size)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_TIMER, self.OnTimer)
self.Bind(wx.EVT_LEFT_DOWN, self.OnClick)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnClick)
self.Bind(wx.EVT_LEFT_UP, self.OnRelease)
self.Bind(wx.EVT_RIGHT_UP, self.OnRelease)
self.Bind(wx.EVT_MOTION, self.OnMotion)
menuBar = wx.MenuBar()
menu = wx.Menu()
m_open = menu.Append(wx.ID_FILE, "O&pen\tAlt-O", "Open file")
m_exit = menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Close window and exit program.")
menuBar.Append(menu, "&File")
self.SetMenuBar(menuBar)
self.statusbar = self.CreateStatusBar()
# MVC
# self.controller = Controller.CController()
# self.model = Model.CModel()
# self.controller.setModel(self.model)
# self.totalsView = TotalsView.TotalsView(self, "Category/Totals View", (400, 300))
# self.totalsView.setModel(self.model)
# self.controller.setView(self.totalsView)
#self.transactionView = TransactionView.TransactionView(self, "Transaction View", (400,200))
#self.transactionView.setModel(self.model)
#self.controller.setView(self.transactionView)
# events
self.Bind(wx.EVT_MENU, self.OnOpen, m_open)
self.Bind(wx.EVT_MENU, self.OnClose, m_exit)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_SIZE, self.OnSize)
#self.Bind(wx.EVT_LEFT_UP, self.OnClick)
self.transactionList = []
self.transactionDict = {}
self.transactionTotals = {}
self.modelChangedCallbacks = []
self.shapes = list()
self.clickedShapes = list()
self.rightClickedShapes = list()
self.timer = wx.Timer(self)
self.timer.Start(100)
self.shapes = list()
self.clickedShapes = list()
self.rightClickedShapes = list()
self.frameState="NORMAL"
#self.generateShapes()
t = transaction.Bucket(pos=(10,10), size=(70,70), amt=0, desc="Bucket", cat="test", droptarget=True)
#t = Shapes.RandomRect()
self.shapes.append(t)
# t = Rect((0,0), (30,30))
#t = Shapes.RandomRect()
#self.shapes.append(t)
self.lastMovePosition = (0,0)
self.selectedShape = None
# layout
#box = wx.BoxSizer(wx.VERTICAL)
#box.Add(self.totalsView, wx.EXPAND)
#box.Add(self.transactionView, wx.EXPAND)
#self.SetSizer(box)
#self.Layout()
def dumpTransactionList(self):
for t in self.shapes:
print t
def openFile(self, fn):
ifile = open(fn, "rb")
reader = csv.reader(ifile)
for row in reader:
if len(row) > 0 and row[0] == "HIST":
self.shapes.append(transaction.DrawableTransaction(date=row[1], amt=row[3], cat=row[4], desc=row[5]))
ifile.close()
#self.dumpTransactionList()
#self.createDicts()
#self.dumpDicts()
#self.notifyModelChanged()
def OnClose(self, event):
dlg = wx.MessageDialog(self,
"Do you really want to close this application?",
"Confirm Exit", wx.OK|wx.CANCEL|wx.ICON_QUESTION)
result = dlg.ShowModal()
if result == wx.ID_OK:
self.Destroy()
def OnOpen(self, event):
dlg = wx.FileDialog(
self, message="Choose a file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=wildcard,
style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR)
dlg.ShowModal()
self.openFile(dlg.GetFilename())
#self.totalsView.start()
dlg.Destroy()
self.Refresh()
def OnSize(self, event):
print "ON SIZE"
hsize = event.GetSize()[0] * 0.75
self.SetSizeHints(minW=-1, minH=hsize, maxH=hsize)
self.SetTitle(str(event.GetSize()))
self.Refresh()
def isLeftClick(self, e):
return (e.GetButton() == wx.MOUSE_BTN_LEFT)
def isRightClick(self, e):
return (e.GetButton() == wx.MOUSE_BTN_RIGHT)
def guessSelectedShape(self, e):
x, y = e.GetPosition()
if self.isLeftClick(e):
self.clickedShapes = [s for s in self.shapes if s.contains(x, y)]
self.clickedShapes.sort(key=lambda shape: shape.zOrder, reverse=True)
eventlog.info("clickedShapes: %s" % (self.clickedShapes))
if e.ShiftDown():
if len(self.clickedShapes) > 1:
return self.clickedShapes[1]
else:
try:
clicked = self.clickedShapes[0]
return clicked
except:
for s in self.shapes:
s.state = "NORMAL"
return None
if self.isRightClick(e):
self.rightClickedShapes = [s for s in self.shapes if s.contains(x, y)]
self.rightClickedShapes.sort(key=lambda shape: shape.zOrder, reverse=True)
eventlog.info("rightClickedShapes: %s" % (self.rightClickedShapes))
if e.ShiftDown():
if len(self.rightClickedShapes) > 1:
return self.rightClickedShapes[1]
else:
try:
clicked = self.rightClickedShapes[0]
return clicked
except:
for s in self.shapes:
s.state = "NORMAL"
return None
def add(self, s):
self.shapes.append(t)
def OnMotion(self, e):
newX, newY = e.GetPosition()
if self.frameState == "POSSIBLE_LEFT_DRAG" and e.LeftIsDown():
self.frameState = self.selectedShape.state = "LEFT_DRAGGING"
elif self.frameState == "LEFT_DRAGGING" and e.LeftIsDown():
# or ((self.frameState == "RIGHT_DRAGGING" and e.RightIsDown()):
oldX, oldY = self.lastMovePosition
deltaX = newX - oldX
deltaY = newY - oldY
self.selectedShape.moveBy(deltaX, deltaY)
else:
self.frameState = "MOTION"
calcMouseVelocity(self.lastMovePosition, (newX, newY))
self.lastMovePosition = e.GetPosition()
self.Refresh()
def OnClick(self, e):
x, y = e.GetPosition()
if self.isLeftClick(e):
eventlog.info("LEFT CLICK")
for s in self.clickedShapes:
s.isClicked=False
s.state="NORMAL"
del self.clickedShapes[:]
# motionlog.debug("OnClick (%s, %s)" % (x, y)
self.selectedShape = self.guessSelectedShape(e)
eventlog.info("%s", self.selectedShape)
self.statusbar.SetStatusText("%r" %(self.selectedShape))
try:
self.selectedShape.isClicked=True
self.frameState = self.selectedShape.state = "POSSIBLE_LEFT_DRAG"
self.selectedShape.velocity =(0,0)
except:
pass
elif self.isRightClick(e):
eventlog.info("RIGHT CLICK")
for s in self.rightClickedShapes:
s.isRightClicked=False
del self.rightClickedShapes[:]
try:
self.selectedShape.isRightClicked=True
self.frameState = self.selectedShape.state = "POSSIBLE_RIGHT_DRAG"
except:
pass
self.lastPosition = (x, y)
# motionlog.debug("Shape %s has a hit" % (self.clickedShapes[0])
self.Refresh()
e.Skip() # recommended practice
def OnRelease(self,e):
if self.frameState=="LEFT_DRAGGING":
newX, newY = e.GetPosition()
oldX, oldY = self.lastPosition
deltaX = newX - oldX
deltaY = newY - oldY
self.Refresh()
def OnTimer(self, e):
for i in self.shapes:
i.updatePosition()
(x,y) = i.position
self.targetShapes = [s for s in self.shapes if s.contains(x, y) and s is not i and i.isVisible()]
if len(self.targetShapes) > 0:
self.targetShapes.sort(key=lambda shape: shape.zOrder, reverse=True)
s=self.targetShapes[0]
eventlog.info("TARGET SHAPE : %s", s)
# try:
if s.isDropTarget:
eventlog.info("DO SOMETHING WITH THE COLLISION EVENT")
s.add(i.amt)
i.container=s
i.hide()
else:
eventlog.info("COLLISION BUT %s is not a drop target ", s)
motionlog.debug("DROPPED")
# except:
# eventlog.info("NO TARGET SHAPE")
self.Refresh()
def OnPaint(self, e):
dc = wx.PaintDC(self)
for i in self.shapes:
i.drawself(dc)
# if self.selectedShape != None:
# (x,y) = self.selectedShape.position
# self.targetShapes = [s for s in self.shapes if s.contains(x, y) and s is not self.selectedShape]
# if len(self.targetShapes) > 0:
# self.targetShapes.sort(key=lambda shape: shape.zOrder, reverse=True)
# s=self.targetShapes[0]
# eventlog.info("TARGET SHAPE : %s", s)
# try:
# if s.isDropTarget:
# eventlog.info("DO SOMETHING WITH THE COLLISION EVENT")
# s.add(self.selectedShape.amt)
# self.selectedShape.container=s
# else:
# eventlog.info("COLLISION BUT %s is not a drop target ", s)
# motionlog.debug("DROPPED")
# except:
# eventlog.info("NO TARGET SHAPE")
def main():
app = wx.App(redirect=False) # Error messages go to popup window
top = Frame(None, "Okane", size=(620, 620))
top.Show()
app.MainLoop()
if __name__ == "__main__":
random.seed()
main()
|
[
"johan.kohler@gmail.com"
] |
johan.kohler@gmail.com
|
d698a20dedf519aa173880d68d7f6e8ed00066a9
|
87e424de1cb55b221b2b5f7c239850ae81db1e5e
|
/venv/bin/pip
|
1ae730144a641a60e28ae5ee1596bd9f20af2f23
|
[] |
no_license
|
aliu917/NetworkDesign
|
ac6029c6dd4a1843d946ed7f9636bf5f41aaff54
|
75441ffa27a6aab1ce5a8d20469d059dac8709c0
|
refs/heads/master
| 2022-11-23T10:36:35.036412
| 2020-05-02T21:19:02
| 2020-05-02T21:19:30
| 281,579,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
#!/Users/angelaliu/PycharmProjects/Angela-s-Friends-Become-Network-Designers/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"aliu917@berkeley.edu"
] |
aliu917@berkeley.edu
|
|
32d28d9915732158ab3594e0d09aa170fcda9791
|
8127f4197870cda55bdb064deef6412415844a10
|
/manage.py
|
cc31fe6a73af46a194a920f177b85ce51bb70a2f
|
[] |
no_license
|
mbaiye/django
|
a5480d529ef4e8abcd43e90587dcd1bb96d143e6
|
ec665b577635285724c0ce5dca5f92ea344994a1
|
refs/heads/main
| 2023-08-12T23:50:03.200932
| 2021-09-13T20:30:42
| 2021-09-13T20:30:42
| 357,112,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoP.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"moyosorebaiye@gmail.com"
] |
moyosorebaiye@gmail.com
|
c709879b1fee60eecdc644534c5f072428a76609
|
31eaed64b0caeda5c5fe3603609402034e6eb7be
|
/python_zumbi/py_functions/ler_e_gravar_arquivo_CSV.py
|
e8d6a926b35a01ceccefbd8155a6cdd818c3a912
|
[] |
no_license
|
RaphaelfsOliveira/workspace_python
|
93657b581043176ecffb5783de208c0a00924832
|
90959697687b9398cc48146461750942802933b3
|
refs/heads/master
| 2021-01-11T17:39:49.574875
| 2017-06-28T20:55:43
| 2017-06-28T20:55:43
| 79,814,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
import csv
arq_name = "test"
title = 'perfumes'
name_prod = 'perfume-feminino'
url_prod = 'http://www.epocacosmeticos.com.br/perfumes/perfume-feminino'
rows = ['teste','teste']
def save_urls(arq_name, rows):
arq = csv.writer(open(arq_name + '.csv', "w"))
arq.writerow(rows)
print(rows)
#print(arq)
|
[
"raphaelbrf@gmail.com"
] |
raphaelbrf@gmail.com
|
386d526236ceef1e4accd80ace256f69374c7b69
|
266f073facf1754763af372f3b4433337161f91a
|
/memegen/domain/template.py
|
4c61cdbd88b6c6134c5c7f14b2935ed1e4fbc5d5
|
[
"MIT"
] |
permissive
|
jkloo/memegen
|
7717104eedc0db1cad15673b426f1ebdb5119445
|
9360486066b52ede528f0c45671f81ebb168e3b3
|
refs/heads/master
| 2020-04-05T18:55:54.182899
| 2015-06-19T13:47:24
| 2015-06-19T13:47:24
| 37,665,345
| 0
| 0
| null | 2015-06-18T14:46:22
| 2015-06-18T14:46:22
| null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
import os
from .text import Text
class Template:
"""Blank image to generate a meme."""
DEFAULTS = ("default.png", "default.jpg")
def __init__(self, key,
name=None, lines=None, aliases=None, link=None, root=None):
self.key = key
self.name = name or ""
self.lines = lines or []
self.aliases = aliases or []
self.link = link or ""
self.root = root
def __eq__(self, other):
return self.key == other.key
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.name < other.name
@property
def path(self):
for default in self.DEFAULTS:
path = os.path.join(self.root, self.key, default)
if os.path.isfile(path):
return path
return None
@property
def default(self):
text = Text('/'.join(self.lines))
return text.path
|
[
"jacebrowning@gmail.com"
] |
jacebrowning@gmail.com
|
5f2a5eb29af62914d46eb9bdd3a8b12e5253115d
|
8dd53a5d1820ae5a3efe799381a90c977afd32c4
|
/contrib/devtools/copyright_header.py
|
8ffcca9432a127d16002bcc5aa79aef9ddf47f4a
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
mulecore/mulecoin
|
8b654817a1b78c9e98f96bfef5febaca23347f64
|
e52131742938ae433463f32680837981a5cedc0f
|
refs/heads/master
| 2023-03-28T05:37:53.552271
| 2021-03-27T03:22:13
| 2021-03-27T03:22:13
| 351,796,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,084
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# libsecp256k1:
'src/secp256k1/include/secp256k1.h',
'src/secp256k1/include/secp256k1_ecdh.h',
'src/secp256k1/include/secp256k1_recovery.h',
'src/secp256k1/include/secp256k1_schnorr.h',
'src/secp256k1/src/java/org_mulecoin_NativeSecp256k1.c',
'src/secp256k1/src/java/org_mulecoin_NativeSecp256k1.h',
'src/secp256k1/src/java/org_mulecoin_Secp256k1Context.c',
'src/secp256k1/src/java/org_mulecoin_Secp256k1Context.h',
# auto generated:
'src/univalue/lib/univalue_escapes.h',
'src/qt/mulecoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'src/leveldb/util/env_win.cc',
'src/crypto/ctaes/bench.c',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files'
def call_git_ls():
out = subprocess.check_output(GIT_LS_CMD.split(' '))
return [f for f in out.decode("utf-8").split('\n') if f != '']
def get_filenames_to_examine():
filenames = call_git_ls()
return sorted([filename for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = 'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile('%s %s %s' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
"Satoshi Nakamoto\n",
"The Mulecoin Core developers\n",
"The Mulecoin Core developers \n",
"Mulecoin Core Developers\n",
"the Mulecoin Core developers\n",
"The Mulecoin developers\n",
"The LevelDB Authors\. All rights reserved\.\n",
"BitPay Inc\.\n",
"BitPay, Inc\.\n",
"University of Illinois at Urbana-Champaign\.\n",
"MarcoFalke\n",
"Pieter Wuille\n",
"Pieter Wuille +\*\n",
"Pieter Wuille, Gregory Maxwell +\*\n",
"Pieter Wuille, Andrew Poelstra +\*\n",
"Andrew Poelstra +\*\n",
"Wladimir J. van der Laan\n",
"Jeff Garzik\n",
"Diederik Huys, Pieter Wuille +\*\n",
"Thomas Daede, Cory Fields +\*\n",
"Jan-Klaas Kollhof\n",
"Sam Rushing\n",
"ArtForz -- public domain half-a-node\n",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(os.path.abspath(filename), 'r', encoding="utf8").read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
original_cwd = os.getcwd()
os.chdir(base_directory)
filenames = get_filenames_to_examine()
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
os.chdir(original_cwd)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a mulecoin source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(os.path.abspath(filename), 'r', encoding="utf8")
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(os.path.abspath(filename), 'w', encoding="utf8")
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = 'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Mulecoin Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
original_cwd = os.getcwd()
os.chdir(base_directory)
for filename in get_filenames_to_examine():
update_updatable_copyright(filename)
os.chdir(original_cwd)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Mulecoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
// Copyright (c) 2017-2019 The Raven Core developers
// Copyright (c) 2020-2021 The Mulecoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
// Copyright (c) 2017-2019 The Raven Core developers
// Copyright (c) 2020-2021 The Mulecoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Bitcoin Core developers
// Copyright (c) 2017-2019 The Raven Core developers
// Copyright (c) 2020-2021 The Mulecoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
// Copyright (c) 2017-2019 The Raven Core developers
// Copyright (c) 2020-2021 The Mulecoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a mulecoin source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Bitcoin Core developers
// Copyright (c) 2017-2019 The Raven Core developers
// Copyright (c) 2020-2021 The Mulecoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index != None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Mulecoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Mulecoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the mulecoin repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Mulecoin
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
|
[
"root@DESKTOP-AOBIGEQ.localdomain"
] |
root@DESKTOP-AOBIGEQ.localdomain
|
13291215a88b57ada0920e080d9c3b7a1ef0eb47
|
64e23db686e6d1e7edd74ca5952a16408d320ca8
|
/daemon/lvm.py
|
2369697565859ac07dea534a71bd4f106d3f2b1d
|
[] |
no_license
|
pengxiaojun/ipsan
|
37514472cb1a2306684c664adb41d49a685034b5
|
11b39f55417489d6c7be481f908c01dd6b75ca63
|
refs/heads/master
| 2021-01-18T21:34:17.533550
| 2016-03-30T08:26:29
| 2016-03-30T08:26:29
| 39,625,764
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
# -*-coding: utf-8 -*-
import os
import sqlite3
import logging
import subprocess
from common import grgrant_prog
from common import database
def fetch_all_lvm():
with sqlite3.connect(database) as conn:
c = conn.cursor()
try:
c.execute('select name, path from lvms')
r = c.fetchall()
return r
except Exception as e:
logging.exception(e)
return None
def active_lvm(name, path):
args = [grgrant_prog, '/sbin/vgchange', '-a', 'y']
try:
r = subprocess.check_output(args, universal_newlines=True)
logging.info("Active lvm:%s path:%s success." % (name, path))
except subprocess.CalledProcessError as e:
logging.info("Active lvm:%s path:%s failure:" % (name, path))
logging.exception(e)
def check_lvm():
rs = fetch_all_lvm()
if rs is None:
return
for r in rs:
name = r[0]
path = r[1]
if not os.path.exists(path):
active_lvm(name, path)
# if __name__ == '__main__':
# check_lvm()
|
[
"pengxj@outlook.com"
] |
pengxj@outlook.com
|
8864c5625cee7be5cd7ac66b57768f555f562984
|
fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd
|
/chrome/browser/resources/PRESUBMIT.py
|
e7a3e430986f0a2d2062a15497b6b5e3e4784501
|
[
"BSD-3-Clause"
] |
permissive
|
wzyy2/chromium-browser
|
2644b0daf58f8b3caee8a6c09a2b448b2dfe059c
|
eb905f00a0f7e141e8d6c89be8fb26192a88c4b7
|
refs/heads/master
| 2022-11-23T20:25:08.120045
| 2018-01-16T06:41:26
| 2018-01-16T06:41:26
| 117,618,467
| 3
| 2
|
BSD-3-Clause
| 2022-11-20T22:03:57
| 2018-01-16T02:09:10
| null |
UTF-8
|
Python
| false
| false
| 5,423
|
py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for files in chrome/browser/resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import re
ACTION_XML_PATH = '../../../tools/metrics/actions/actions.xml'
def CheckUserActionUpdate(input_api, output_api, action_xml_path):
"""Checks if any new user action has been added."""
if any('actions.xml' == input_api.os_path.basename(f) for f in
input_api.change.LocalPaths()):
# If actions.xml is already included in the changelist, the PRESUBMIT
# for actions.xml will do a more complete presubmit check.
return []
file_filter = lambda f: f.LocalPath().endswith('.html')
action_re = r'(^|\s+)metric\s*=\s*"([^ ]*)"'
current_actions = None
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
match = input_api.re.search(action_re, line)
if match:
# Loads contents in tools/metrics/actions/actions.xml to memory. It's
# loaded only once.
if not current_actions:
with open(action_xml_path) as actions_f:
current_actions = actions_f.read()
metric_name = match.group(2)
is_boolean = IsBoolean(f.NewContents(), metric_name, input_api)
# Search for the matched user action name in |current_actions|.
if not IsActionPresent(current_actions, metric_name, is_boolean):
return [output_api.PresubmitPromptWarning(
'File %s line %d: %s is missing in '
'tools/metrics/actions/actions.xml. Please run '
'tools/metrics/actions/extract_actions.py to update.'
% (f.LocalPath(), line_num, metric_name), [])]
return []
def IsActionPresent(current_actions, metric_name, is_boolean):
"""Checks if metric_name is defined in the actions file.
Checks whether there's matching entries in an actions.xml file for the given
|metric_name|, depending on whether it is a boolean action.
Args:
current_actions: The content of the actions.xml file.
metric_name: The name for which the check should be done.
is_boolean: Whether the action comes from a boolean control.
"""
if not is_boolean:
action = 'name="{0}"'.format(metric_name)
return action in current_actions
action_disabled = 'name="{0}_Disable"'.format(metric_name)
action_enabled = 'name="{0}_Enable"'.format(metric_name)
return (action_disabled in current_actions and
action_enabled in current_actions)
def IsBoolean(new_content_lines, metric_name, input_api):
"""Check whether action defined in the changed code is boolean or not.
Checks whether the action comes from boolean control based on the HTML
elements attributes.
Args:
new_content_lines: List of changed lines.
metric_name: The name for which the check should be done.
"""
new_content = '\n'.join(new_content_lines)
html_element_re = r'<(.*?)(^|\s+)metric\s*=\s*"%s"(.*?)>' % (metric_name)
type_re = (r'datatype\s*=\s*"boolean"|type\s*=\s*"checkbox"|'
'type\s*=\s*"radio".*?value\s*=\s*("true"|"false")')
match = input_api.re.search(html_element_re, new_content, input_api.re.DOTALL)
return (match and
any(input_api.re.search(type_re, match.group(i)) for i in (1, 3)))
def CheckHtml(input_api, output_api):
return input_api.canned_checks.CheckLongLines(
input_api, output_api, 80, lambda x: x.LocalPath().endswith('.html'))
def RunOptimizeWebUiTests(input_api, output_api):
presubmit_path = input_api.PresubmitLocalPath()
tests = [input_api.os_path.join(presubmit_path, 'optimize_webui_test.py')]
return input_api.canned_checks.RunUnitTests(input_api, output_api, tests)
def _CheckWebDevStyle(input_api, output_api):
results = []
try:
import sys
old_sys_path = sys.path[:]
cwd = input_api.PresubmitLocalPath()
sys.path += [input_api.os_path.join(cwd, '..', '..', '..', 'tools')]
import web_dev_style.presubmit_support
results += web_dev_style.presubmit_support.CheckStyle(input_api, output_api)
finally:
sys.path = old_sys_path
return results
def _CheckChangeOnUploadOrCommit(input_api, output_api):
results = CheckUserActionUpdate(input_api, output_api, ACTION_XML_PATH)
affected = input_api.AffectedFiles()
if any(f for f in affected if f.LocalPath().endswith('.html')):
results += CheckHtml(input_api, output_api)
if any(f for f in affected if f.LocalPath().endswith('optimize_webui.py')):
results += RunOptimizeWebUiTests(input_api, output_api)
results += _CheckWebDevStyle(input_api, output_api)
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api,
check_js=True)
return results
def CheckChangeOnUpload(input_api, output_api):
return _CheckChangeOnUploadOrCommit(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CheckChangeOnUploadOrCommit(input_api, output_api)
def PostUploadHook(cl, change, output_api):
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.chromium.linux:closure_compilation',
],
'Automatically added optional Closure bots to run on CQ.')
|
[
"jacob-chen@iotwrt.com"
] |
jacob-chen@iotwrt.com
|
3f01613eeb26d292392466fcaaaf61554c04e567
|
2b488c48c2d5a6996ead7ba34c99d8cd24c25052
|
/api/getCreatedDate.py
|
98d071724bae6717ba5239699d2de9bfcb9248ab
|
[
"MIT"
] |
permissive
|
yashrastogi16/steemapi-django
|
84f527ea918a48e7e7b6de2cd17d0b2e43109659
|
716455da839686e0305461e735119da9560e0a2d
|
refs/heads/master
| 2020-03-17T13:38:36.653144
| 2018-05-05T17:51:05
| 2018-05-05T17:51:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
from django.http import HttpResponse
from steem import Steem
def index(request):
try:
s = Steem(nodes=["https://api.steemit.com"])
try:
created = s.get_account(request.GET['a'])['created']
except:
created = None
return HttpResponse(created, content_type='text/plain')
except:
return HttpResponse('To use this API call, please supply param a=accountname, substituting accountname with the account to see its creation date.\n\n'
'Example: https://api.steem.place/getCreatedDate/?a=moisesmcardona\n\n'
'Returns: Created date', content_type='text/plain')
|
[
"moises@moises-studios.com"
] |
moises@moises-studios.com
|
9a4f50681591049e468dde5df12c8247abf21f49
|
2e6cc4c6f5e3d532a83bc4ad2960b9ed6d9c6e5a
|
/releasenotes/source/conf.py
|
4c6a1c415f03062d5860803d58bc5c5aef064c71
|
[
"Apache-2.0"
] |
permissive
|
Nexenta/manila
|
dba8cc9f18bf4ed54f2671fe8dc747bb4b7c2e38
|
c7a044733b0be8b4aafd962f04a1a781b16a580b
|
refs/heads/master
| 2023-07-22T09:14:13.728972
| 2023-07-10T16:14:47
| 2023-07-10T16:14:47
| 50,369,420
| 1
| 3
|
Apache-2.0
| 2023-07-10T16:14:48
| 2016-01-25T17:55:21
|
Python
|
UTF-8
|
Python
| false
| false
| 9,080
|
py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Manila Release Notes documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Manila Release Notes'
copyright = u'2015, Manila Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from manila.version import version_info as manila_version # noqa
# The full version, including alpha/beta/rc tags.
release = manila_version.version_string_with_vcs()
# The short X.Y version.
version = manila_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ManilaReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ManilaReleaseNotes.tex', u'Manila Release Notes Documentation',
u'Manila Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'manilareleasenotes', u'Manila Release Notes Documentation',
[u'Manila Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ManilaReleaseNotes', u'Manila Release Notes Documentation',
u'Manila Developers', 'ManilaReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
[
"tbechtold@suse.com"
] |
tbechtold@suse.com
|
e9f959214d9f66ad9770deeda8fcf6daf9801267
|
11c3dc3f51ec2cab15ce0b8a3be8e8aa06f4686c
|
/backend/api/product/migrations/0008_auto_20200418_1644.py
|
bd446b559c6e9eead0c451222bd95f86e3b53465
|
[
"MIT"
] |
permissive
|
0mri/GStore
|
b3797986473a211ab0581cd80948cfbec4a8a0f3
|
232cf03a6deab15ae4178933210a7431496d9dd0
|
refs/heads/master
| 2022-11-26T11:47:58.825764
| 2020-08-01T10:01:01
| 2020-08-01T10:01:01
| 259,165,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
# Generated by Django 2.2 on 2020-04-18 13:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0007_auto_20200410_1743'),
]
operations = [
migrations.AlterField(
model_name='product',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=7),
),
]
|
[
"omri@efra.im"
] |
omri@efra.im
|
88fd6306ddf23894d2552a4e2bc87e2b89a734df
|
e489172f6e49e1239db56c047a78a29a6ffc0b36
|
/via_code_decode/code_category.py
|
ab1f0d754b5245b8d99d0e949b26421de5effc09
|
[] |
no_license
|
eksotama/prln-via-custom-addons
|
f05d0059353ae1de89ccc8d1625a896c0215cfc7
|
f2b44a8af0e7bee87d52d258fca012bf44ca876f
|
refs/heads/master
| 2020-03-25T19:49:08.117628
| 2015-12-01T07:29:43
| 2015-12-01T07:29:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,052
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Vikasa Infinity Anugrah, PT
# Copyright (c) 2011 - 2013 Vikasa Infinity Anugrah <http://www.infi-nity.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from osv import osv, fields
from tools.translate import _
class code_category(osv.osv):
_name = 'code.category'
_description = 'Code Category'
_columns = {
'name': fields.char('Code Category', size=64, readonly=False, required=True, translate=True, select=True, help="Register Code Category"),
'pinned': fields.boolean('Pinned', readonly=True, help="This is to mark whether the code category is 'pinned', i.e. cannot be deleted. Can be used by modules to force existence of the code category."),
}
_defaults = {
'pinned' : False,
}
## unlink
#
# unlink intercepts the main unlink function to prevent deletion of pinned record.
#
def unlink(self, cr, uid, ids, context=None):
for _obj in self.pool.get('code.category').browse(cr, uid, ids, context=context):
if _obj.pinned:
raise osv.except_osv(_('Error !'), _('Pinned Code Category cannot be deleted.'))
return super(code_category, self).unlink(cr, uid, ids, context=context)
code_category()
|
[
"aero@aero.(none)"
] |
aero@aero.(none)
|
518d0d93c4e09549a7524784f57e483f1929f267
|
59deb6307b1a55a043f944f00d9e929b97ca042c
|
/softdashdj/wsgi.py
|
c0672262f57c4bfc068f3db09f1f37cc97083d0a
|
[] |
no_license
|
abykal/softdashdj
|
d0be04f4786bf1f72b8ea77e5875da9d104ab503
|
e969d348461698f1e45392b3007952732361e9ce
|
refs/heads/main
| 2023-06-17T10:15:19.536349
| 2021-07-09T09:16:55
| 2021-07-09T09:16:55
| 383,747,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for softdashdj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'softdashdj.settings')
application = get_wsgi_application()
|
[
"abyabrkal@gmail.com"
] |
abyabrkal@gmail.com
|
3be5e6031a6351f732e4aa3e3ecf6dc74d11eb6c
|
f5c62bab2e95bb2dc6986ba271662ade8cae4da0
|
/docs/PythonSAI/LineProperties.py
|
c3e7401f40524540e570646be946433183820cfd
|
[] |
no_license
|
Has3ong/X3DViewer
|
d211b159c29523e61158eddc015bb320e4ba7c9d
|
c629305c24b5c25fd41d3a46816efbf1f74d0092
|
refs/heads/master
| 2021-06-25T16:36:46.278469
| 2021-01-03T11:26:02
| 2021-01-03T11:26:02
| 180,564,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,729
|
py
|
from . import *
# LineProperties defines a concrete node interface that extends interface X3DAppearanceChildNode.
class CLineProperties(CX3DAppearanceChildNode):
m_strNodeName = "LineProperties"
def __init__(self):
self.m_strNodeName = "LineProperties"
self.m_Parent = [None]
self.children = []
self.DEF = ""
self.USE = ""
self.n_Count = -1
self.depth = 0
# Return boolean result from SFBool inputOutput field named "applied"
def getApplied (self):
pass
# Assign boolean value to SFBool inputOutput field named "applied"
def setApplied (self, value):
pass
# Return int result [] from SFInt32 inputOutput field named "linetype"
def getLinetype (self):
pass
# Assign int value [] to SFInt32 inputOutput field named "linetype"
def setLinetype (self, value):
pass
# Return float result [] from SFFloat inputOutput field named "linewidthScaleFactor"
def getLinewidthScaleFactor (self):
pass
# Assign float value [] to SFFloat inputOutput field named "linewidthScaleFactor"
def setLinewidthScaleFactor (self, value):
pass
# ===== methods for fields inherited from parent interfaces =====
# Return X3DMetadataObject result (using a properly typed node or X3DPrototypeInstance) from SFNode inputOutput field named "metadata"
def getMetadata (self):
pass
# Assign X3DMetadataObject value (using a properly typed node) to SFNode inputOutput field named "metadata"
def setMetadata1 (self, node):
pass
# Assign X3DMetadataObject value (using a properly typed protoInstance)
def setMetadata2 (self, protoInstance):
pass
|
[
"khsh5592@naver.com"
] |
khsh5592@naver.com
|
4dc0710a308eb43121ff85c314929338cc1ad68d
|
f98c45d0079479b10c8276693dc31c704ccc087f
|
/api/apps/goods/models.py
|
9f7696a669fbacebb0c26a81978d4226843c2828
|
[
"MIT"
] |
permissive
|
TasHole/tokyo
|
b78c84d31b5c459a8a508fd671151a825db55835
|
d4e0b2cce2aae53d93cb2bbbd2ca12ff0aa6a219
|
refs/heads/master
| 2020-12-21T13:29:31.626154
| 2019-10-12T03:03:34
| 2019-10-12T03:03:34
| 236,445,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,857
|
py
|
from datetime import datetime
from django.db import models
class GoodsCategory(models.Model):
"""
商品カテゴリー
"""
CATEGORY_TYPE = (
(1, "一級カテゴリー"),
(2, "二級カテゴリー"),
(3, "三級カテゴリー")
)
name = models.CharField(default="", max_length=50, verbose_name="カテゴリー名", help_text="カテゴリー名")
code = models.CharField(default="", max_length=30, verbose_name="カテゴリーコード", help_text="カテゴリーコード")
desc = models.TextField(default="", verbose_name="カテゴリー説明", help_text="カテゴリー説明")
category_type = models.IntegerField(choices=CATEGORY_TYPE, verbose_name="カテゴリーレベル", help_text="カテゴリーレベル")
parent_category = models.ForeignKey("self", null=True, blank=True, verbose_name="親カテゴリー", help_text="親カテゴリー",
on_delete=models.CASCADE, related_name="sub_cat")
is_tab = models.BooleanField(default=False, verbose_name="ナビなのか", help_text="ナビなのか")
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "商品カテゴリー"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class GoodsCategoryBrand(models.Model):
"""
ブランド名
"""
category = models.ForeignKey(GoodsCategory, related_name="brands", null=True, blank=True,
verbose_name="商品カテゴリー名", on_delete=models.CASCADE)
name = models.CharField(default="", max_length=30, verbose_name="ブランド名", help_text="ブランド名")
desc = models.CharField(default="", max_length=200, verbose_name="ブランド説明", help_text="ブランド説明")
image = models.ImageField(max_length=200, upload_to="brands/")
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "ブランド"
verbose_name_plural = verbose_name
db_table = "goods_goodsbrand"
def __str__(self):
return self.name
class Goods(models.Model):
"""
商品
"""
category = models.ForeignKey(GoodsCategory, null=True, blank=True,
verbose_name="商品カテゴリー", on_delete=models.CASCADE)
goods_sn = models.CharField(max_length=50, default="", verbose_name="商品識別番号")
name = models.CharField(max_length=100, verbose_name="商品名")
click_num = models.IntegerField(default=0, verbose_name="クリック数")
sold_num = models.IntegerField(default=0, verbose_name="販売数")
fav_num = models.IntegerField(default=0, verbose_name="お気に入り登録数")
goods_num = models.IntegerField(default=0, verbose_name="在庫数")
market_price = models.FloatField(default=0, verbose_name="原価")
shop_price = models.FloatField(default=0, verbose_name="販売値段")
goods_brief = models.TextField(max_length=500, verbose_name="商品説明")
ship_free = models.BooleanField(default=True, verbose_name="送料負担")
goods_front_image = models.ImageField(max_length=200, upload_to="goods/images/",
null=True, blank=True, verbose_name="表紙")
is_new = models.BooleanField(default=False, verbose_name="新品なのか")
is_hot = models.BooleanField(default=False, verbose_name="売れているのか")
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "商品"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class GoodsImage(models.Model):
"""
商品swiperImages
"""
goods = models.ForeignKey(Goods, verbose_name="商品", related_name="images", on_delete=models.CASCADE)
image = models.ImageField(upload_to="", verbose_name="画像", null=True, blank=True)
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "商品swiperImages"
verbose_name_plural = verbose_name
def __str__(self):
return self.goods.name
class Banner(models.Model):
"""
swiper用の商品image
"""
goods = models.ForeignKey(Goods, verbose_name="商品", on_delete=models.CASCADE)
image = models.ImageField(upload_to="banner", verbose_name="ホームページswiper用画像")
index = models.IntegerField(default=0, verbose_name="swiper順番")
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "swiper用の商品image"
verbose_name_plural = verbose_name
def __str__(self):
return self.goods.name
class IndexAd(models.Model):
category = models.ForeignKey(GoodsCategory, related_name="category",
verbose_name="商品カテゴリー", on_delete=models.CASCADE)
goods = models.ForeignKey(Goods, related_name='goods', on_delete=models.CASCADE)
class Meta:
verbose_name = "ホームページ商品カテゴリー広告"
verbose_name_plural = verbose_name
def __str__(self):
return self.goods.name
class HotSearchWords(models.Model):
"""
人気キーワード
"""
keywords = models.CharField(default="", max_length=20, verbose_name="人気キーワード")
index = models.IntegerField(default=0, verbose_name="並び順")
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "人気キーワード"
verbose_name_plural = verbose_name
def __str__(self):
return self.keywords
|
[
"txy1226052@gmail.com"
] |
txy1226052@gmail.com
|
7539bdceb81f567363b6d422c1297a92195ff9db
|
a17b81c68b9d6cba745f00aa6b1b26ca7dcd5cbc
|
/Final/Source Code/NBAstats/manage.py
|
73181ca30d8ea012642eaf883fbd8527f97d1775
|
[] |
no_license
|
julianbcook/NBAStats
|
2cd689a04c09cd66f1f6046bf087fd7721ed581e
|
561c7e360fd12a2260f1b56f20bfd1d124550ff6
|
refs/heads/master
| 2020-04-09T06:14:34.077974
| 2019-08-09T02:36:30
| 2019-08-09T02:36:30
| 160,104,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NBAstats.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"jherrera1497@gmail.com"
] |
jherrera1497@gmail.com
|
5be4e98dfe3ad8ffc97705d8d5603a2acd95de51
|
81b384655e970623333971ed063d85ebfe940ed5
|
/hallo/test/modules/random/test_eight_ball.py
|
09873ea43e78dbc223dba1ce75f8e3d48e17082f
|
[] |
no_license
|
wirenic/Hallo
|
c3c8a3f11dd1f03729385f2761e0a6b216c6e1d2
|
68595816fd146c4af35e3f1bc91c58cdc6fa741c
|
refs/heads/master
| 2023-03-18T06:50:14.111226
| 2021-02-26T07:59:33
| 2021-02-26T07:59:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,988
|
py
|
import unittest
from hallo.events import EventMessage
from hallo.inc.commons import Commons
from hallo.modules.random.eight_ball import EightBall
from hallo.test.test_base import TestBase
from hallo.test.modules.random.mock_chooser import MockChooser
class EightBallTest(TestBase, unittest.TestCase):
def setUp(self):
super().setUp()
self.chooser = MockChooser()
self.old_choice_method = Commons.get_random_choice
Commons.get_random_choice = self.chooser.choose
def tearDown(self):
super().tearDown()
Commons.get_random_choice = self.old_choice_method
def test_eightball(self):
all_responses = (
EightBall.RESPONSES_YES_TOTALLY
+ EightBall.RESPONSES_YES_PROBABLY
+ EightBall.RESPONSES_MAYBE
+ EightBall.RESPONSES_NO
)
self.function_dispatcher.dispatch(
EventMessage(self.server, None, self.test_user, "eight ball")
)
data = self.server.get_send_data(1, self.test_user, EventMessage)
assert data[0].text.lower() in [
"{}.".format(x.lower()) for x in all_responses
], "Response isn't valid."
def test_eightball_with_message(self):
all_responses = (
EightBall.RESPONSES_YES_TOTALLY
+ EightBall.RESPONSES_YES_PROBABLY
+ EightBall.RESPONSES_MAYBE
+ EightBall.RESPONSES_NO
)
self.function_dispatcher.dispatch(
EventMessage(
self.server,
None,
self.test_user,
"magic eightball will this test pass?",
)
)
data = self.server.get_send_data(1, self.test_user, EventMessage)
assert data[0].text.lower() in [
"{}.".format(x.lower()) for x in all_responses
], "Response isn't valid."
def test_all_responses(self):
all_responses = (
EightBall.RESPONSES_YES_TOTALLY
+ EightBall.RESPONSES_YES_PROBABLY
+ EightBall.RESPONSES_MAYBE
+ EightBall.RESPONSES_NO
)
responses = []
for x in range(len(all_responses)):
# Set RNG
self.chooser.choice = x
# Shake magic eight ball
self.function_dispatcher.dispatch(
EventMessage(self.server, None, self.test_user, "magic8-ball")
)
data = self.server.get_send_data(1, self.test_user, EventMessage)
responses.append(data[0].text.lower()[:-1])
assert data[0].text.lower() in [
"{}.".format(x.lower()) for x in all_responses
], "Response isn't valid."
# Check all responses given
assert len(responses) == len(
all_responses
), "Not the same number of responses as possible responses"
assert set(responses) == set(
[x.lower() for x in all_responses]
), "Not all responses are given"
|
[
"joshua@coales.co.uk"
] |
joshua@coales.co.uk
|
d0c3ca61479d518272c9503a1c470f4db684357a
|
b1b45393aefc27f85d3fd3e454929b035c390be6
|
/tests/func/test_bucket_it.py
|
b4351630ed79a6ae9ff4201d4ba9288d5a507d50
|
[
"Apache-2.0"
] |
permissive
|
Jiaming1999/baas-sdk-python
|
c5f579f2ae0ff591ab49af8c167845515f2c0019
|
83b0916af0e4f3167a232ac7eb06b82331adc172
|
refs/heads/master
| 2022-01-07T19:12:31.184660
| 2019-01-22T09:49:17
| 2019-01-22T09:49:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,954
|
py
|
# -*- coding: utf-8 -*-
import pytest
from requests import HTTPError
import necbaas as baas
from . import util
class TestBucket(object):
service = None
# type: baas.Service
def setup(self):
self.masterService = util.create_service(master=True)
def teardown(self):
for b in self.bucket.query():
self.bucket.remove(b["name"])
def test_upsert(self):
"""正常にバケット作成・更新できること"""
self.bucket = baas.Buckets(self.masterService, "object")
# create
create_res = self.bucket.upsert("bucket1")
assert create_res["name"] == "bucket1"
assert create_res["description"] == ""
assert "ACL" in create_res
assert "contentACL" in create_res
# update
with pytest.raises(HTTPError) as ei:
self.bucket.upsert("bucket1")
status_code = ei.value.response.status_code
assert status_code == 400
def test_upsert_with_options(self):
"""正常にバケット作成・更新できること"""
self.bucket = baas.Buckets(self.masterService, "file")
# create
desc = "test bucket description"
acl = {"u": ["g:authenticated"]}
content_acl = {"d": ["g:anonymous"]}
res = self.bucket.upsert("bucket1", desc=desc, acl=acl, content_acl=content_acl)
assert res["name"] == "bucket1"
assert res["description"] == desc
assert res["ACL"]["u"] == ["g:authenticated"]
assert res["contentACL"]["d"] == ["g:anonymous"]
# update
desc = "upsert description"
acl = {"u": ["g:anonymous"]}
content_acl = {"d": ["g:authenticated"]}
res = self.bucket.upsert("bucket1", desc=desc, acl=acl, content_acl=content_acl)
assert res["name"] == "bucket1"
assert res["description"] == desc
assert res["ACL"]["u"] == ["g:anonymous"]
assert res["contentACL"]["d"] == ["g:authenticated"]
def test_query(self):
"""正常にバケット全件検索できること"""
self.bucket = baas.Buckets(self.masterService, "object")
num = 10
for i in range(num):
self.bucket.upsert("bucket" + str(i))
# query
results = self.bucket.query()
assert len(results) == num
for i in range(num):
assert results[i]["name"] == "bucket" + str(i)
def test_get(self):
"""正常にバケット取得できること"""
self.bucket = baas.Buckets(self.masterService, "object")
create_res = self.bucket.upsert("bucket1")
# get
get_res = self.bucket.get("bucket1")
assert create_res == get_res
def test_remove(self):
"""正常にバケット削除できること"""
self.bucket = baas.Buckets(self.masterService, "object")
self.bucket.upsert("bucket1")
# remove
self.bucket.remove("bucket1")
|
[
"tamura.jn@ncos.nec.co.jp"
] |
tamura.jn@ncos.nec.co.jp
|
1fc2d9f87296f7c77e0b2ba4b7fe8747035f1088
|
a032cab55c78de1ca1e5b4a50a24a93b9fd67ceb
|
/proto/media/yuvfile.py
|
572ad0000b1ce55e9b45600de65fc36af1548385
|
[] |
no_license
|
xinjuehu/ns3-study
|
47a80545dedd4f1e0c663ef57d24ba67dc03574c
|
994b80169058f6b14cf0d8659e7e3864274a90fd
|
refs/heads/master
| 2021-08-29T12:27:51.373068
| 2017-12-14T00:57:15
| 2017-12-14T00:57:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,044
|
py
|
#!/usr/bin/env python2.7
# -*- coding:utf-8 -*-
import numpy as np
from PIL import Image
import os
from jmenc import YUVEncode
class VmafComp(object):
def __init__(self, enc):
self._encoder = enc
self._root = os.path.split(os.path.realpath(__file__))[0] + os.sep
self._lib = self._root + '..' + os.sep + 'lib' + os.sep
def comp(self, w, h, source, f1):
assert os.path.exists(source)
assert os.path.exists(f1)
pass
def comp_yuv(self, w, h, source, f1):
assert os.path.exists(source)
assert os.path.exists(f1)
cmd = self._lib + "psnr yuv420p {2} {3} {0} {1}"
cmd = cmd.format(w, h, source, f1)
ret = self._encoder.wait_proc(cmd)
avg = []
for i in ret:
i = i.strip().split()[-1]
avg.append(float(i))
avg = np.mean(avg)
cmd = self._lib + "ssim yuv420p {2} {3} {0} {1}"
cmd = cmd.format(w, h, source, f1)
ret = self._encoder.wait_proc(cmd)
avg2 = []
for i in ret:
i = i.strip().split()
if i[0] == 'ssim:':
avg2.append(float(i[-1]))
avg2 = np.mean(avg2)
return avg, avg2
class FFComp(object):
def __init__(self, enc):
self._encoder = enc
def comp(self, w, h, source, f1):
assert os.path.exists(source)
assert os.path.exists(f1)
cmd = "ffmpeg -i {2} -pix_fmt yuv420p -s {0}x{1} -i {3}" + \
" -lavfi \"ssim='stats_file={3}_ssim.log';[0:v][1:v]psnr='stats_file={3}_psnr.log'\" -f null -"
cmd = cmd.format(w, h, source, f1)
self._encoder.wait_proc(cmd)
return self.read_log(f1)
def comp_yuv(self, w, h, source, f1):
assert os.path.exists(source)
assert os.path.exists(f1)
cmd = "ffmpeg -s {0}x{1} -i {2} -s {0}x{1} -i {3}" + \
" -lavfi \"ssim='stats_file={3}_ssim.log';[0:v][1:v]psnr='stats_file={3}_psnr.log'\" -f null -"
cmd = cmd.format(w, h, source, f1)
self._encoder.wait_proc(cmd)
return self.read_log(f1)
@staticmethod
def read_log(f1):
with open(f1 + "_ssim.log") as f:
c1 = f.readlines()
with open(f1 + "_psnr.log") as f:
c2 = f.readlines()
ssim, psnr = [float(i.strip().split('All:')[1].split()[0]) for i in c1], \
[float(i.strip().split('psnr_y:')[1].split()[0]) for i in c2]
psnr = np.mean(psnr)
ssim = np.mean(ssim)
return psnr, ssim
class YUVUtil(object):
def __init__(self, name, width=352, height=288, comp=FFComp):
self._w = width
self._h = height
self._size = (self._w, self._h)
self._root = os.path.split(os.path.realpath(__file__))[0] + os.sep + '..' + os.sep
if os.path.exists(name):
self._source = name
self._output = self._root + 'output' + os.sep + 'default_'
else:
self._source = self._root + 'input' + os.sep + name + '.yuv'
self._output = self._root + 'output' + os.sep + name + '_'
self._encoder = YUVEncode(self._output)
self._comp = comp(self._encoder)
def get_output(self):
return os.sep.join(self._output.split(os.sep)[:-1]) + os.sep
def get_source(self):
return self._source
def read420(self):
with open(self._source, 'rb') as f:
while f.tell() < os.fstat(f.fileno()).st_size:
y = f.read(self._w * self._h)
u = f.read(self._w * self._h / 4)
v = f.read(self._w * self._h / 4)
y = np.array(bytearray(y), dtype=np.uint8)
u = np.array(bytearray(u), dtype=np.uint8)
v = np.array(bytearray(v), dtype=np.uint8)
y = y.reshape((self._h, self._w))
u = u.reshape((self._h / 2, self._w / 2))
v = v.reshape((self._h / 2, self._w / 2))
assert len(y) == self._h
assert len(y[0]) == self._w
yield y, u, v
def yuv_ffmpeg_h264(self, output='sp.264'):
return self._encoder.ffmpeg_h264(self._source, self._size, output)
def comp(self, f1='sp.mp4', in_file=None):
if in_file is None:
in_file = self._source
if not os.path.exists(f1):
f1 = self._output + f1
return self._comp.comp(self._w, self._h, in_file, f1)
def comp_yuv(self, f1=None, in_file=None):
if in_file is None:
in_file = self._source
if f1 is None:
f1 = self._source
if not os.path.exists(f1):
f1 = self._output + f1
return self._comp.comp_yuv(self._w, self._h, in_file, f1)
def rgb2img(self, (r, g, b)):
im_r = Image.frombytes('L', self._size, r.tostring())
im_g = Image.frombytes('L', self._size, g.tostring())
im_b = Image.frombytes('L', self._size, b.tostring())
return Image.merge('RGB', (im_r, im_g, im_b))
def yuv2img(self, (y, u, v)):
u = np.repeat(u, 2, 0)
u = np.repeat(u, 2, 1)
v = np.repeat(v, 2, 0)
v = np.repeat(v, 2, 1)
im_r = Image.frombytes('L', self._size, y.tostring())
im_g = Image.frombytes('L', self._size, u.tostring())
im_b = Image.frombytes('L', self._size, v.tostring())
return Image.merge('YCbCr', (im_r, im_g, im_b))
def yuv_split(self, (y, u, v), (w, h), (off_w, off_h)):
assert w + off_w <= self._w
assert h + off_h <= self._h
u = np.repeat(u, 2, 0)
u = np.repeat(u, 2, 1)
v = np.repeat(v, 2, 0)
v = np.repeat(v, 2, 1)
y = y[off_h:off_h + h, off_w:off_w + w]
u = u[off_h:off_h + h, off_w:off_w + w]
v = v[off_h:off_h + h, off_w:off_w + w]
u = u[::2, ::2]
v = v[::2, ::2]
return y, u, v
def img2yuv(self):
pass
def yuv_merge(self):
pass
def show_img(self):
index = 0
for frm in self.read420():
co = self.yuv2img(frm)
co.save(self._output + str(index) + '.jpg')
index += 1
return index
def split_run(self, tmp_size, tmp_off, output='sp.yuv'):
output = self._output + output
if os.path.exists(output):
return output
with open(output, 'wb') as f:
for frm in self.read420():
data = self.yuv_split(frm, tmp_size, tmp_off)
f.write(''.join([i.tostring() for i in data]))
return output
def make_tile(self, tile):
x, y = tile
w = self._w / x
h = self._h / y
x = 0
y = 0
data = []
for j in range(0, self._h, h):
for i in range(0, self._w, w):
ret = self.split_run((w, h), (i, j), "sp_{0}_{1}_{2}_{3}.yuv".format(x, y, w, h))
print 'MAKE', ret
data.append(ret)
y += 1
y = 0
x += 1
return data, w, h
|
[
"zxyqwe_2004@126.com"
] |
zxyqwe_2004@126.com
|
300682d48f2cb716193d184532e5d3018b6188db
|
8e69eee9b474587925e22413717eb82e4b024360
|
/v2.5.7/toontown/shtiker/HtmlView.py
|
6f65c5d143302ce38dc6ebb01cf2b6f26205dff4
|
[
"MIT"
] |
permissive
|
TTOFFLINE-LEAK/ttoffline
|
afaef613c36dc3b70514ccee7030ba73c3b5045b
|
bb0e91704a755d34983e94288d50288e46b68380
|
refs/heads/master
| 2020-06-12T15:41:59.411795
| 2020-04-17T08:22:55
| 2020-04-17T08:22:55
| 194,348,185
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,473
|
py
|
import array, sys
from direct.showbase.DirectObject import DirectObject
from direct.task.Task import Task
from direct.directnotify import DirectNotifyGlobal
from panda3d.core import Texture
from panda3d.core import CardMaker
from panda3d.core import NodePath
from panda3d.core import Point3, Vec3, Vec4, VBase4D, Point2
from panda3d.core import PNMImage
from panda3d.core import TextureStage
from panda3d.core import Texture
from panda3d.core import WindowProperties
from direct.interval.IntervalGlobal import *
from panda3d.core import AwWebView
from panda3d.core import AwWebCore
WEB_WIDTH_PIXELS = 784
WEB_HEIGHT_PIXELS = 451
WEB_WIDTH = 1024
WEB_HEIGHT = 512
WEB_HALF_WIDTH = WEB_WIDTH / 2
WIN_WIDTH = 800
WIN_HEIGHT = 600
GlobalWebcore = None
class HtmlView(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('HtmlView')
useHalfTexture = config.GetBool('news-half-texture', 0)
def __init__(self, parent=aspect2d):
global GlobalWebcore
self.parent = parent
self.mx = 0
self.my = 0
self.htmlFile = 'index.html'
self.transparency = False
if GlobalWebcore:
pass
else:
GlobalWebcore = AwWebCore(AwWebCore.LOGVERBOSE, True, AwWebCore.PFBGRA)
GlobalWebcore.setBaseDirectory('.')
for errResponse in xrange(400, 600):
GlobalWebcore.setCustomResponsePage(errResponse, 'error.html')
self.webView = GlobalWebcore.createWebView(WEB_WIDTH, WEB_HEIGHT, self.transparency, False, 70)
frameName = ''
inGameNewsUrl = self.getInGameNewsUrl()
self.imgBuffer = array.array('B')
for i in xrange(WEB_WIDTH * WEB_HEIGHT):
self.imgBuffer.append(0)
self.imgBuffer.append(0)
self.imgBuffer.append(0)
self.imgBuffer.append(255)
if self.useHalfTexture:
self.leftBuffer = array.array('B')
for i in xrange(WEB_HALF_WIDTH * WEB_HEIGHT):
self.leftBuffer.append(0)
self.leftBuffer.append(0)
self.leftBuffer.append(0)
self.leftBuffer.append(255)
self.rightBuffer = array.array('B')
for i in xrange(WEB_HALF_WIDTH * WEB_HEIGHT):
self.rightBuffer.append(0)
self.rightBuffer.append(0)
self.rightBuffer.append(0)
self.rightBuffer.append(255)
self.setupTexture()
if self.useHalfTexture:
self.setupHalfTextures()
self.accept('mouse1', self.mouseDown, [AwWebView.LEFTMOUSEBTN])
self.accept('mouse3', self.mouseDown, [AwWebView.RIGHTMOUSEBTN])
self.accept('mouse1-up', self.mouseUp, [AwWebView.LEFTMOUSEBTN])
self.accept('mouse3-up', self.mouseUp, [AwWebView.RIGHTMOUSEBTN])
def getInGameNewsUrl(self):
result = config.GetString('fallback-news-url', 'http://cdn.toontown.disney.go.com/toontown/en/gamenews/')
override = config.GetString('in-game-news-url', '')
if override:
self.notify.info('got an override url, using %s for in a game news' % override)
result = override
else:
try:
launcherUrl = base.launcher.getValue('GAME_IN_GAME_NEWS_URL', '')
if launcherUrl:
result = launcherUrl
self.notify.info('got GAME_IN_GAME_NEWS_URL from launcher using %s' % result)
else:
self.notify.info('blank GAME_IN_GAME_NEWS_URL from launcher, using %s' % result)
except:
self.notify.warning('got exception getting GAME_IN_GAME_NEWS_URL from launcher, using %s' % result)
return result
def setupTexture(self):
cm = CardMaker('quadMaker')
cm.setColor(1.0, 1.0, 1.0, 1.0)
aspect = base.camLens.getAspectRatio()
htmlWidth = 2.0 * aspect * WEB_WIDTH_PIXELS / float(WIN_WIDTH)
htmlHeight = 2.0 * float(WEB_HEIGHT_PIXELS) / float(WIN_HEIGHT)
cm.setFrame(-htmlWidth / 2.0, htmlWidth / 2.0, -htmlHeight / 2.0, htmlHeight / 2.0)
bottomRightX = WEB_WIDTH_PIXELS / float(WEB_WIDTH + 1)
bottomRightY = WEB_HEIGHT_PIXELS / float(WEB_HEIGHT + 1)
cm.setUvRange(Point2(0, 1 - bottomRightY), Point2(bottomRightX, 1))
card = cm.generate()
self.quad = NodePath(card)
self.quad.reparentTo(self.parent)
self.guiTex = Texture('guiTex')
self.guiTex.setupTexture(Texture.TT2dTexture, WEB_WIDTH, WEB_HEIGHT, 1, Texture.TUnsignedByte, Texture.FRgba)
self.guiTex.setMinfilter(Texture.FTLinear)
self.guiTex.setKeepRamImage(True)
self.guiTex.makeRamImage()
self.guiTex.setWrapU(Texture.WMRepeat)
self.guiTex.setWrapV(Texture.WMRepeat)
ts = TextureStage('webTS')
self.quad.setTexture(ts, self.guiTex)
self.quad.setTexScale(ts, 1.0, -1.0)
self.quad.setTransparency(0)
self.quad.setTwoSided(True)
self.quad.setColor(1.0, 1.0, 1.0, 1.0)
self.calcMouseLimits()
def setupHalfTextures(self):
self.setupLeftTexture()
self.setupRightTexture()
self.fullPnmImage = PNMImage(WEB_WIDTH, WEB_HEIGHT, 4)
self.leftPnmImage = PNMImage(WEB_HALF_WIDTH, WEB_HEIGHT, 4)
self.rightPnmImage = PNMImage(WEB_HALF_WIDTH, WEB_HEIGHT, 4)
def setupLeftTexture(self):
cm = CardMaker('quadMaker')
cm.setColor(1.0, 1.0, 1.0, 1.0)
aspect = base.camLens.getAspectRatio()
htmlWidth = 2.0 * aspect * WEB_WIDTH / float(WIN_WIDTH)
htmlHeight = 2.0 * float(WEB_HEIGHT) / float(WIN_HEIGHT)
cm.setFrame(-htmlWidth / 2.0, 0, -htmlHeight / 2.0, htmlHeight / 2.0)
card = cm.generate()
self.leftQuad = NodePath(card)
self.leftQuad.reparentTo(self.parent)
self.leftGuiTex = Texture('guiTex')
self.leftGuiTex.setupTexture(Texture.TT2dTexture, WEB_HALF_WIDTH, WEB_HEIGHT, 1, Texture.TUnsignedByte, Texture.FRgba)
self.leftGuiTex.setKeepRamImage(True)
self.leftGuiTex.makeRamImage()
self.leftGuiTex.setWrapU(Texture.WMClamp)
self.leftGuiTex.setWrapV(Texture.WMClamp)
ts = TextureStage('leftWebTS')
self.leftQuad.setTexture(ts, self.leftGuiTex)
self.leftQuad.setTexScale(ts, 1.0, -1.0)
self.leftQuad.setTransparency(0)
self.leftQuad.setTwoSided(True)
self.leftQuad.setColor(1.0, 1.0, 1.0, 1.0)
def setupRightTexture(self):
cm = CardMaker('quadMaker')
cm.setColor(1.0, 1.0, 1.0, 1.0)
aspect = base.camLens.getAspectRatio()
htmlWidth = 2.0 * aspect * WEB_WIDTH / float(WIN_WIDTH)
htmlHeight = 2.0 * float(WEB_HEIGHT) / float(WIN_HEIGHT)
cm.setFrame(0, htmlWidth / 2.0, -htmlHeight / 2.0, htmlHeight / 2.0)
card = cm.generate()
self.rightQuad = NodePath(card)
self.rightQuad.reparentTo(self.parent)
self.rightGuiTex = Texture('guiTex')
self.rightGuiTex.setupTexture(Texture.TT2dTexture, WEB_HALF_WIDTH, WEB_HEIGHT, 1, Texture.TUnsignedByte, Texture.FRgba)
self.rightGuiTex.setKeepRamImage(True)
self.rightGuiTex.makeRamImage()
self.rightGuiTex.setWrapU(Texture.WMClamp)
self.rightGuiTex.setWrapV(Texture.WMClamp)
ts = TextureStage('rightWebTS')
self.rightQuad.setTexture(ts, self.rightGuiTex)
self.rightQuad.setTexScale(ts, 1.0, -1.0)
self.rightQuad.setTransparency(0)
self.rightQuad.setTwoSided(True)
self.rightQuad.setColor(1.0, 1.0, 1.0, 1.0)
def calcMouseLimits(self):
ll = Point3()
ur = Point3()
self.quad.calcTightBounds(ll, ur)
self.notify.debug('ll=%s ur=%s' % (ll, ur))
offset = self.quad.getPos(aspect2d)
self.notify.debug('offset = %s ' % offset)
ll.setZ(ll.getZ() + offset.getZ())
ur.setZ(ur.getZ() + offset.getZ())
self.notify.debug('new LL=%s, UR=%s' % (ll, ur))
relPointll = self.quad.getRelativePoint(aspect2d, ll)
self.notify.debug('relPoint = %s' % relPointll)
self.mouseLL = (aspect2d.getScale()[0] * ll[0], aspect2d.getScale()[2] * ll[2])
self.mouseUR = (aspect2d.getScale()[0] * ur[0], aspect2d.getScale()[2] * ur[2])
self.notify.debug('original mouseLL=%s, mouseUR=%s' % (self.mouseLL, self.mouseUR))
def writeTex(self, filename='guiText.png'):
self.notify.debug('writing texture')
self.guiTex.generateRamMipmapImages()
self.guiTex.write(filename)
def toggleRotation(self):
if self.interval.isPlaying():
self.interval.finish()
else:
self.interval.loop()
def mouseDown(self, button):
messenger.send('wakeup')
self.webView.injectMouseDown(button)
def mouseUp(self, button):
self.webView.injectMouseUp(button)
def reload(self):
pass
def zoomIn(self):
self.webView.zoomIn()
def zoomOut(self):
self.webView.zoomOut()
def toggleTransparency(self):
self.transparency = not self.transparency
self.webView.setTransparent(self.transparency)
def update(self, task):
if base.mouseWatcherNode.hasMouse():
x, y = self._translateRelativeCoordinates(base.mouseWatcherNode.getMouseX(), base.mouseWatcherNode.getMouseY())
if self.mx - x != 0 or self.my - y != 0:
self.webView.injectMouseMove(x, y)
self.mx, self.my = x, y
if self.webView.isDirty():
self.webView.render(self.imgBuffer.buffer_info()[0], WEB_WIDTH * 4, 4)
Texture.setTexturesPower2(2)
textureBuffer = self.guiTex.modifyRamImage()
textureBuffer.setData(self.imgBuffer.tostring())
if self.useHalfTexture:
self.guiTex.store(self.fullPnmImage)
self.leftPnmImage.copySubImage(self.fullPnmImage, 0, 0, 0, 0, WEB_HALF_WIDTH, WEB_HEIGHT)
self.rightPnmImage.copySubImage(self.fullPnmImage, 0, 0, WEB_HALF_WIDTH, 0, WEB_HALF_WIDTH, WEB_HEIGHT)
self.leftGuiTex.load(self.leftPnmImage)
self.rightGuiTex.load(self.rightPnmImage)
self.quad.hide()
Texture.setTexturesPower2(1)
GlobalWebcore.update()
return Task.cont
def _translateRelativeCoordinates(self, x, y):
sx = int((x - self.mouseLL[0]) / (self.mouseUR[0] - self.mouseLL[0]) * WEB_WIDTH_PIXELS)
sy = WEB_HEIGHT_PIXELS - int((y - self.mouseLL[1]) / (self.mouseUR[1] - self.mouseLL[1]) * WEB_HEIGHT_PIXELS)
return (
sx, sy)
def unload(self):
self.ignoreAll()
self.webView.destroy()
self.webView = None
return
def onCallback(self, name, args):
if name == 'requestFPS':
pass
def onBeginNavigation(self, url, frameName):
pass
def onBeginLoading(self, url, frameName, statusCode, mimeType):
pass
def onFinishLoading(self):
self.notify.debug('finished loading')
def onReceiveTitle(self, title, frameName):
pass
def onChangeTooltip(self, tooltip):
pass
def onChangeCursor(self, cursor):
pass
def onChangeKeyboardFocus(self, isFocused):
pass
def onChangeTargetURL(self, url):
pass
|
[
"s0mberdemise@protonmail.com"
] |
s0mberdemise@protonmail.com
|
f2759daadeefa6b3f075de304b18660a2ca0c449
|
5b4c803f68e52849a1c1093aac503efc423ad132
|
/UnPyc/tests/tests/CFG/2/pass/pass_try+finally_while_.py
|
258bc6132acf68332fcb6f7045d21347c4336fcd
|
[] |
no_license
|
Prashant-Jonny/UnPyc
|
9ce5d63b1e0d2ec19c1faa48d932cc3f71f8599c
|
4b9d4ab96dfc53a0b4e06972443e1402e9dc034f
|
refs/heads/master
| 2021-01-17T12:03:17.314248
| 2013-02-22T07:22:35
| 2013-02-22T07:22:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 44
|
py
|
while 1:
try:
pass
finally:
pass
|
[
"d.v.kornev@gmail.com"
] |
d.v.kornev@gmail.com
|
0d2b5b6c9b8399006a452a9d38b656e956d77c3a
|
9e86aa077e7d4f10e20d7bc7de1f53c5ad51716d
|
/t01/t01_05_e0925.py
|
7cf9e40fd1f8b8795fc38c5a295fd99a94b24ae4
|
[] |
no_license
|
Klevtsovskyi/PythonAud1
|
16b77e7aaf6b7ce951cca56b8f1be386b9bb4864
|
2f6fb9dbd2960543f5a12ccca4a5bf99db21c835
|
refs/heads/master
| 2023-09-03T13:36:08.743459
| 2021-11-04T10:10:55
| 2021-11-04T10:10:55
| 296,278,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
import math
x1, y1, x2, y2, x3, y3 = [float(d) for d in input().split()]
a = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
b = ((x3 - x2) ** 2 + (y3 - y2) ** 2) ** 0.5
c = ((x3 - x1) ** 2 + (y3 - y1) ** 2) ** 0.5
P = a + b + c
p = P / 2
S = math.sqrt(p * (p - a) * (p - b) * (p - c))
print("%.4f %.4f" % (P, S))
|
[
"avklevtsovskiy@gmail.com"
] |
avklevtsovskiy@gmail.com
|
d966c7041e447cabc4bf303ad9feffacf3c1e20b
|
62902de8b202780ec95a63ea89667062fa8530ee
|
/2018_Fall/Data Structure/experiment 3/Huffmann Zipper.py
|
b0099316955a7a44062f8348f3716722c881a5cf
|
[] |
no_license
|
ToniChopp/USTC-CS
|
41af3cbc7aad57e6c1debbde840d1720e75b07c5
|
1cb8fd686e720fb7c98a95bcc4af7da5952c5e77
|
refs/heads/master
| 2023-08-28T03:59:15.172727
| 2021-06-08T15:40:30
| 2021-06-08T15:40:30
| 353,741,356
| 7
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,235
|
py
|
import six
import tkinter as tk
import sys
window=tk.Tk()
window.title('Huffman Zipper')
window.geometry('500x400')
class HuffNode(object):
def get_wieght(self):
raise NotImplementedError(
"The Abstract Node Class doesn't define 'get_wieght'")
def isleaf(self):
raise NotImplementedError(
"The Abstract Node Class doesn't define 'isleaf'")
class LeafNode(HuffNode):
def __init__(self, value=0, freq=0, ):
super(LeafNode, self).__init__()
# 节点的值
self.value = value
self.wieght = freq
def isleaf(self):
return True
def get_wieght(self):
return self.wieght
def get_value(self):
return self.value
class IntlNode(HuffNode):
def __init__(self, left_child=None, right_child=None):
super(IntlNode, self).__init__()
# 节点的值
self.wieght = left_child.get_wieght() + right_child.get_wieght()
# 节点的左右子节点
self.left_child = left_child
self.right_child = right_child
def isleaf(self):
return False
def get_wieght(self):
return self.wieght
def get_left(self):
return self.left_child
def get_right(self):
return self.right_child
class HuffTree(object):
def __init__(self, flag, value=0, freq=0, left_tree=None, right_tree=None):
super(HuffTree, self).__init__()
if flag == 0:
self.root = LeafNode(value, freq)
else:
self.root = IntlNode(left_tree.get_root(), right_tree.get_root())
def get_root(self):
return self.root
def get_wieght(self):
return self.root.get_wieght()
def traverse_huffman_tree(self, root, code, char_freq):
if root.isleaf():
char_freq[root.get_value()] = code
print(("it = %c and freq = %d code = %s") % (chr(root.get_value()), root.get_wieght(), code))
return None
else:
self.traverse_huffman_tree(root.get_left(), code + '0', char_freq)
self.traverse_huffman_tree(root.get_right(), code + '1', char_freq)
def buildHuffmanTree(list_hufftrees):
while len(list_hufftrees) > 1: # 按照weight 对huffman树进行从小到大的排序
list_hufftrees.sort(key=lambda x: x.get_wieght()) # 跳出weight 最小的两个huffman编码树
temp1 = list_hufftrees[0]
temp2 = list_hufftrees[1]
list_hufftrees = list_hufftrees[2:] # 构造一个新的huffman树
newed_hufftree = HuffTree(1, 0, 0, temp1, temp2) # 存入数组
list_hufftrees.append(newed_hufftree)
return list_hufftrees[0]
def compress(): # 以二进制的方式打开文件
global input
global output
inputfilename=input.get()
outputfilename=output.get()
f = open(inputfilename, 'rb')
filedata = f.read() # 获取文件的字节总数
filesize = f.tell() # 统计频率
# 保存在字典 char_freq中
char_freq = {}
for x in range(filesize):
tem = filedata[x]
if tem in char_freq.keys():
char_freq[tem] = char_freq[tem] + 1
else:
char_freq[tem] = 1
for tem in char_freq.keys():
print(tem, ' : ', char_freq[tem])
# 构造huffman编码树数组
list_hufftrees = []
for x in char_freq.keys():
# 使用 HuffTree的构造函数定义一棵只包含一个叶节点的Huffman树
tem = HuffTree(0, x, char_freq[x], None, None)
# 将其添加到数组 list_hufftrees 当中
list_hufftrees.append(tem)
#频率的信息
# 保存叶节点的个数
length = len(char_freq.keys())
output = open(outputfilename, 'wb')
a4 = length & 255
length = length >> 8
a3 = length & 255
length = length >> 8
a2 = length & 255
length = length >> 8
a1 = length & 255
output.write(six.int2byte(a1))
output.write(six.int2byte(a2))
output.write(six.int2byte(a3))
output.write(six.int2byte(a4))
# 每个值及其出现的频率的信息
# 遍历字典 char_freq
for x in char_freq.keys():
output.write(six.int2byte(x))
temp = char_freq[x]
# 同样出现的次数是int型,分成四个字节写入到压缩文件当中
a4 = temp & 255
temp = temp >> 8
a3 = temp & 255
temp = temp >> 8
a2 = temp & 255
temp = temp >> 8
a1 = temp & 255
output.write(six.int2byte(a1))
output.write(six.int2byte(a2))
output.write(six.int2byte(a3))
output.write(six.int2byte(a4))
# 构造huffman编码树,并且获取到每个字符对应的编码
tem = buildHuffmanTree(list_hufftrees)
tem.traverse_huffman_tree(tem.get_root(), '', char_freq)
# 开始对文件进行压缩
code = ''
for i in range(filesize):
key = filedata[i]
code = code + char_freq[key]
out = 0
while len(code) > 8:
for x in range(8):
out = out << 1
if code[x] == '1':
out = out | 1
code = code[8:]
output.write(six.int2byte(out))
out = 0
# 处理剩下来的不满8位的code
output.write(six.int2byte(len(code)))
out = 0
for i in range(len(code)):
out = out << 1
if code[i] == '1':
out = out | 1
for i in range(8 - len(code)):
out = out << 1
# 把最后一位给写入到文件当中
output.write(six.int2byte(out))
output.close()
def decompress(): # 读取文件
global input
global output
inputfilename=input.get()
outputfilename=output.get()
f = open(inputfilename, 'rb')
filedata = f.read()
# 获取文件的字节总数
filesize = f.tell()
a1 = filedata[0]
a2 = filedata[1]
a3 = filedata[2]
a4 = filedata[3]
j = 0
j = j | a1
j = j << 8
j = j | a2
j = j << 8
j = j | a3
j = j << 8
j = j | a4
leaf_node_size = j
# 读取频率
# 构造一个字典char_freq一遍重建 Huffman编码树
char_freq = {}
for i in range(leaf_node_size):
c = filedata[4 + i * 5 + 0]
a1 = filedata[4 + i * 5 + 1]
a2 = filedata[4 + i * 5 + 2]
a3 = filedata[4 + i * 5 + 3]
a4 = filedata[4 + i * 5 + 4]
j = 0
j = j | a1
j = j << 8
j = j | a2
j = j << 8
j = j | a3
j = j << 8
j = j | a4
print(c, j)
char_freq[c] = j
# 重建huffman 编码树
list_hufftrees = []
for x in char_freq.keys():
tem = HuffTree(0, x, char_freq[x], None, None)
list_hufftrees.append(tem)
tem = buildHuffmanTree(list_hufftrees)
tem.traverse_huffman_tree(tem.get_root(), '', char_freq)
# 使用步骤3中重建的huffman编码树,对压缩文件进行解压缩
output = open(outputfilename, 'wb')
code = ''
currnode = tem.get_root()
for x in range(leaf_node_size * 5 + 4, filesize):
# python3
c = filedata[x]
for i in range(8):
if c & 128:
code = code + '1'
else:
code = code + '0'
c = c << 1
while len(code) > 24:
if currnode.isleaf():
tem_byte = six.int2byte(currnode.get_value())
output.write(tem_byte)
currnode = tem.get_root()
if code[0] == '1':
currnode = currnode.get_right()
else:
currnode = currnode.get_left()
code = code[1:]
# 处理最后 24位
sub_code = code[-16:-8]
last_length = 0
for i in range(8):
last_length = last_length << 1
if sub_code[i] == '1':
last_length = last_length | 1
code = code[:-16] + code[-8:-8 + last_length]
while len(code) > 0:
if currnode.isleaf():
tem_byte = six.int2byte(currnode.get_value())
output.write(tem_byte)
currnode = tem.get_root()
if code[0] == '1':
currnode = currnode.get_right()
else:
currnode = currnode.get_left()
code = code[1:]
if currnode.isleaf():
tem_byte = six.int2byte(currnode.get_value())
output.write(tem_byte)
currnode = tem.get_root()
output.close()
tk.Label(window,text='input filename',font=('HGMaruGothicMPRO',16),fg='white',bg='dark blue').place(x=100,y=100)
tk.Label(window,text='output filename',font=('HGMaruGothicMPRO',16),fg='white',bg='dark blue').place(x=100,y=200)
input=tk.StringVar()
entry=tk.Entry(window,textvariable=input,font=('HGMaruGothicMPRO',16),width=16,show=None).place(x=100,y=130)
output=tk.StringVar()
entry=tk.Entry(window,textvariable=output,font=('HGMaruGothicMPRO',16),width=16,show=None).place(x=100,y=230)
ch1=tk.Button(window,text='compress',font=('HGMaruGothicMPRO',8),command=compress,width=10).place(x=100,y=300)
ch2=tk.Button(window,text='decompress',font=('HGMaruGothicMPRO',8),command=decompress,width=10).place(x=300,y=300)
window.mainloop()
|
[
"wrs792141579@mail.ustc.edu.cn"
] |
wrs792141579@mail.ustc.edu.cn
|
b941f4fec6db3324f517391c833d36bd9deb602e
|
1a114943c92a5db40034470ff31a79bcf8ddfc37
|
/stdlib_exam/unicodedata-example-1.py
|
8ab800f4c75d0ac65e9f6fbc5d28206808558553
|
[] |
no_license
|
renwl/mylinux
|
1924918599efd6766c266231d66b2a7ed6f6cdd1
|
0602fc6d2b0d254a8503e57310f848fc3e1a73b4
|
refs/heads/master
| 2020-07-10T22:12:03.259349
| 2017-01-02T12:32:04
| 2017-01-02T12:32:04
| 66,467,007
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 396
|
py
|
import unicodedata
for char in [u"A", u"-", u"1", u"\N{LATIN CAPITAL LETTER O WITH DIAERESIS}"]:
print repr(char),
print unicodedata.category(char),
print repr(unicodedata.decomposition(char)),
print unicodedata.decimal(char, None),
print unicodedata.numeric(char, None)
## u'A' Lu '' None None
## u'-' Pd '' None None
## u'1' Nd '' 1 1.0
## u'Ö' Lu '004F 0308' None None
|
[
"wenliang.ren@quanray.com"
] |
wenliang.ren@quanray.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.