blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c8bb720062730af2e1e92d5dc5cfeb4a68621f33
|
2d040fde3d3638939718ccfd42eff9e35c352331
|
/EXAMPLES/EDABIT/EXPERT/001_100/32_word_buckets.py
|
7af06c08fa43a2ad265f550404360d8d86dd1a38
|
[] |
no_license
|
ceyhunsahin/TRAINING
|
fcb9837833d2099dc5724f46a675efe4edf3d5f1
|
73cc7dba8447e4066ccfe7adadc727d134ffbf0b
|
refs/heads/master
| 2023-07-13T01:12:53.160300
| 2021-08-17T11:50:51
| 2021-08-17T11:50:51
| 397,591,819
| 1
| 0
| null | 2021-08-18T12:25:45
| 2021-08-18T12:25:44
| null |
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
"""
Word Buckets
Write a function that divides a phrase into word buckets, with each bucket containing n or fewer characters.
Only include full words inside each bucket.
Examples
split_into_buckets("she sells sea shells by the sea", 10)
➞ ["she sells", "sea shells", "by the sea"]
split_into_buckets("the mouse jumped over the cheese", 7)
➞ ["the", "mouse", "jumped", "over", "the", "cheese"]
split_into_buckets("fairy dust coated the air", 20)
➞ ["fairy dust coated", "the air"]
split_into_buckets("a b c d e", 2)
➞ ["a", "b", "c", "d", "e"]
Notes
Spaces count as one character.
Trim beginning and end spaces for each word bucket (see final example).
If buckets are too small to hold a single word, return an empty list: []
The final goal isn't to return just the words with a length equal (or lower) to the given n,
but to return the entire given phrase bucketized (if possible). So, for the specific case of "by" the only word with a proper length, the phrase can't be bucketized, and the returned list has to be empty.
"""
def split_into_buckets(phrase, n):
a, b = [[]], []
p = phrase.split(" ")
if len(sorted(p)[0])>n:
return []
if len(sorted(p)[0])==n:
if n == 1:
return phrase.split(" ")
return []
while len(p)>0:
while len(" ".join(a[0])) + len(p[0]) < n:
a[0].append(p[0])
p = p[1:]
if len(p) == 0:
break
b.append(" ".join(a[0]))
a[0] = []
return (b)
#split_into_buckets("she sells sea shells by the sea", 10)
#➞ ["she sells", "sea shells", "by the sea"]
split_into_buckets("the mouse jumped over the cheese", 7)
#➞ ["the", "mouse", "jumped", "over", "the", "cheese"]
#split_into_buckets("fairy dust coated the air", 20)
#➞ ["fairy dust coated", "the air"]
#split_into_buckets("a b c d e", 1)
#➞ ["a", "b", "c", "d", "e"]
|
[
"mustafaankarali35@gmail.com"
] |
mustafaankarali35@gmail.com
|
26148a674bd24731dcff1e3a3b3e1a108a07024c
|
f4536137b9220a1b34a5f51584edaf8c4c70177b
|
/venv/bin/pip
|
ada115087321c2f8499315ca4e595d4d9b657f9b
|
[] |
no_license
|
aeaa1998/flat-shading
|
3e1fc1dc5b127038f459eb285c95e76512403099
|
3a29bd77036d2693a5caf3162eb948e459bf5715
|
refs/heads/master
| 2022-12-23T10:19:49.239098
| 2020-10-04T23:48:50
| 2020-10-04T23:48:50
| 301,250,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
#!/Users/tito/PycharmProjects/firstExampleGraficos/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"augustoalonso98@gmail.comn"
] |
augustoalonso98@gmail.comn
|
|
ce54656ea5db1e7a8ac9b575eb477e984a3bc010
|
d56f89fea712d9ff2cb5f15b9108e2bf8df0fef3
|
/samples/embed_m_freq.py
|
e326c3c208e9783e0f7c030ba90a01faea4735f5
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
huhailang9012/VideoDigitalWatermarking
|
91e159239ebb61c72b4fb18e16afcb06a19b7155
|
6439881dc88fb7257a3dd9856b185e5c667b89b4
|
refs/heads/master
| 2021-12-27T13:46:36.013660
| 2018-01-10T20:29:52
| 2018-01-10T20:29:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
#
# embed_m_freq.py
# Created by pira on 2017/08/16.
#
#coding: utf-8
from VideoDigitalWatermarking import *
import numpy as np
import math
fnin = 'test.bmp'
fnout = 'test_embeded.bmp'
secret_data = [1,1,1,1,0,0,0]
secret_length = len(secret_data)
N = math.ceil(math.log2(secret_length+1))
m = generateM(N)
print('m =', m, '\n')
rgb_data = readColorImage(fnin)
ycc_data = rgb2ycc(rgb_data)
y_data = get_y(ycc_data)
dct_data = dct_dim2(y_data)
embeded_dct_y_data = embedMseq(dct_data, secret_data, m, a=100, tau=1)
embeded_y_data = idct_dim2(embeded_dct_y_data)
#replace y_data to embeded_y_data
height = ycc_data.shape[0]
width = ycc_data.shape[1]
for i in np.arange(height):
for j in np.arange(width):
ycc_data[i][j][0] = embeded_y_data[i][j]
embeded_rgb_data = ycc2rgb(ycc_data)
writeImage(fnout, embeded_rgb_data)
|
[
"pira.y1121@gmail.com"
] |
pira.y1121@gmail.com
|
760af086281e9a6910f270a450ae709f09235e16
|
cd56a6f2e2cdc4ec79ecd7d1ee2fdf7032a8e0fd
|
/multiples_of_number.py
|
4cd0460da3a8675b4a301420e6c615f615fdb343
|
[] |
no_license
|
Jicaai/CodeEval
|
2f878c38999e6e8961222270343295ebb1f29938
|
ce9e138b1a8b9171754b82de41c3b00885573ccb
|
refs/heads/master
| 2019-05-14T07:09:08.817035
| 2011-08-10T16:33:22
| 2011-08-10T16:33:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
# -*- coding: utf-8 -*-
# Author: Tiny
# Date : July 30, 2011
# email : plakitboy@gmail.com
# Multiples of a number
'''
Description:
Given numbers x and n, where n is a power of 2, print out the smallest multiple of n which is greater than or equal to x. Do not use division or modulo operator.
Input sample:
The first argument will be a text file containing a comma separated list of two integers, one list per line. e.g.
13,8
17,16
Output sample:
Print to stdout, the smallest multiple of n which is greater than or equal to x, one per line.
e.g.
16
32
'''
import sys
file = open(sys.argv[1],'r')
try:
for line in file:
threshold, num = map(int, line.split(','))
tester = 1
while True:
if threshold<=num*tester:
print num*tester
break
else:
tester += 1
finally:
file.close()
|
[
"plakitboy@gmail.com"
] |
plakitboy@gmail.com
|
3ea4e9a92e3011fef6d2fd88811109593685b846
|
549de3f17e5af1e424d7a6b30fc6be663ec01e08
|
/pine/market/bitflyer.py
|
459bb8c6fd9440a18440c1e06abb49206ac4e694
|
[] |
no_license
|
kzh-dev/pine-bot-server
|
25eedcce3d62a3b788a256107ef491a46664a7c8
|
d36107328876adb3cd473587a5a31169fb6a5a98
|
refs/heads/master
| 2020-04-25T14:31:41.255771
| 2019-10-22T16:56:57
| 2019-10-22T16:56:57
| 172,844,592
| 13
| 15
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,021
|
py
|
# coding=utf-8
import requests
from .base import MarketBase, MarketError, empty_udf, utcunixtime, register_market, rows_to_udf
URL_TMPL = "https://api.cryptowat.ch/markets/bitflyer/btcfxjpy/ohlc?" + \
"periods={resolution}&after={f}&before={t}"
MARKET = 'BITFLYER'
SYMBOL = 'FXBTCJPY'
TICKERID = ':'.join((MARKET, SYMBOL))
class BitFlyerMarketBase (MarketBase):
SYMBOLS = (SYMBOL,)
def __init__ (self, symbol=SYMBOL, resolution=60):
super().__init__(MARKET, symbol, resolution)
def mintick (self):
return 1
class BitFlyerMarketDirect (BitFlyerMarketBase):
def __init__ (self, symbol=SYMBOL, resolution=60):
super().__init__(symbol, resolution)
resolution *= 60
unixtime = utcunixtime()
since = unixtime - resolution * 256
url = URL_TMPL.format(resolution=resolution, f=since-1, t=unixtime+1)
res = requests.get(url).json().get('result', None)
if res:
rows = res.get(str(resolution), None)
if rows:
self.data = rows_to_udf(rows)
# CandleProxyClient
from .base import PROXY_PORT
from mprpc import RPCClient
class BitFlyerMarket (BitFlyerMarketBase):
def __init__ (self, symbol=SYMBOL, resolution=60, port=PROXY_PORT):
super().__init__(symbol, resolution)
self.client = RPCClient('127.0.0.1', port)
self.data = self.client.call('ohlcv', TICKERID, self.resolution, 256)
def step_ohlcv (self, next_clock):
d1, d0 = self.client.call('step_ohlcv', TICKERID, self.resolution, next_clock)
if d1 is None:
return None
if d0['t'] <= self.data['t'][-1]:
return None
for k,v in d0.items():
self.data[k].pop(0)
self.data[k][-1] = d1[k]
self.data[k].append(v)
return d0['t']
register_market(MARKET, BitFlyerMarket)
#register_market(MARKET, BitFlyerMarketDirect)
# CandleProxyServer
from .base import MarketOhlcvAdapter
class BitFlyerOhlcAdaptor (MarketOhlcvAdapter):
def __init__ (self):
super().__init__(TICKERID)
def fetch_candles (self, resolution, from_, to):
resolution *= 60
url = URL_TMPL.format(resolution=resolution, f=from_, t=to)
intvl = 1.0
while True:
try:
j = requests.get(url).json()
res = j.get('result', None)
if res is None:
raise MarketError('missing result: {}'.format(j))
rows = res.get(str(resolution), None)
if rows is None:
raise MarketError('invalid result: {}'.format(res))
return rows_to_udf(rows)
except Exception as e:
print(e)
#if __name__ == '__main__':
# BitFlyerMarketDirect()
if __name__ == '__main__':
import os
from gevent.server import StreamServer
port = int(os.environ.get('PORT', PROXY_PORT))
cli = BitFlyerMarket(port=port)
print(cli.data)
|
[
"dev69.kzh@egmail.com"
] |
dev69.kzh@egmail.com
|
8eddb069f39ef6a0e48c88dcb87c3faa3408030c
|
e737e01d2937e313e2d55aa2418e8aee197fd5d8
|
/spect.py
|
e8cc1dd22d657f41399f4b9e0f27c2d92b352aa0
|
[] |
no_license
|
gauravuttarkar/soft_computing
|
f1c77f627ca5eea68e25c9f4e81ba4ff6eb8be6c
|
797a6751523e15e0e79e25987b35b00454a16856
|
refs/heads/master
| 2020-03-26T15:11:10.453497
| 2018-08-16T18:49:54
| 2018-08-16T18:49:54
| 145,026,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,643
|
py
|
import csv
n = 23
def func (w, data, threshold,lr):
sum = 0
for i in range(len(data)):
sum = sum + data[i] * w[i];
sum = sum + w[-1]
print(sum)
res = None
e = 0
if ( sum >= threshold ):
res = 1
else:
res = 0;
if data[-1] - res:
e = 1
for i in range(len(w)):
w[i] = w[i] + lr * ( data[-1] - res) * data[i]
return (w,e)
def test(w, data,threshold,lr):
sum = 0
for i in range(len(data)):
sum = sum + data[i] * w[i];
sum = sum + w[-1]
if ( sum >= threshold ):
res = 1
else:
res = 0;
print("error is ", data[-1] - res )
return data[-1] - res
data = []
with open('SPECT.csv') as csvfile:
reader = csv.DictReader(csvfile)
i = 0
for row in reader:
# print(row)
list1=[]
for i in range(1,23):
list1.append(float(row['Attr_'+str(i)]))
if row['Class'] == 'Yes':
list1.append(1)
else:
list1.append(0)
data.append(list1)
threshold = 2
lr = 0.2
init_weight = 1 / n
iterate = 5000
w = []
for i in range(n):
w.append(init_weight)
print(w)
ten_fold = 0
flag = 0
while(ten_fold<10):
print('*'*50)
for j in range(iterate):
for i in data[:ten_fold+int(267*0.9)]:
w,e = func(w,i,threshold,lr)
flag = e
if flag == 0:
for i in data[:int(267*0.9)]:
w,e = func(w,i,threshold,lr)
if e == 0:
break
print('Testing')
for i in data[int(267*0.9):]:
test(w,i,threshold,lr)
for i in data[:268-int(267*0.9)] :
test(w,i,threshold,lr)
ten_fold = ten_fold + 1
count = 0
flag = 0
for i in data:
count = count + 1
e = test(w,i,threshold,lr)
flag = e
if flag == 0:
print('Test Successful for ',count,"cases")
else:
print('Test failed')
print(w)
|
[
"gaurav.uttarkar@gmail.com"
] |
gaurav.uttarkar@gmail.com
|
4337652ec8fcaa828561209d2ef97f26833bde5b
|
80b7f2a10506f70477d8720e229d7530da2eff5d
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/dmlearnedinfo_4f43a9179cbbb274bfab560ae9dfbd66.py
|
bccd3b329da92782d108098a9e5f30e66f1a8192
|
[
"MIT"
] |
permissive
|
OpenIxia/ixnetwork_restpy
|
00fdc305901aa7e4b26e4000b133655e2d0e346a
|
c8ecc779421bffbc27c906c1ea51af3756d83398
|
refs/heads/master
| 2023-08-10T02:21:38.207252
| 2023-07-19T14:14:57
| 2023-07-19T14:14:57
| 174,170,555
| 26
| 16
|
MIT
| 2023-02-02T07:02:43
| 2019-03-06T15:27:20
|
Python
|
UTF-8
|
Python
| false
| false
| 9,343
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class DmLearnedInfo(Base):
"""This object holds lists of the dm learned information.
The DmLearnedInfo class encapsulates a list of dmLearnedInfo resources that are managed by the system.
A list of resources can be retrieved from the server using the DmLearnedInfo.find() method.
"""
__slots__ = ()
_SDM_NAME = "dmLearnedInfo"
_SDM_ATT_MAP = {
"AverageLooseRtt": "averageLooseRtt",
"AverageLooseRttVariation": "averageLooseRttVariation",
"AverageStrictRtt": "averageStrictRtt",
"AverageStrictRttVariation": "averageStrictRttVariation",
"DmQueriesSent": "dmQueriesSent",
"DmResponsesReceived": "dmResponsesReceived",
"IncomingLabelOuterInner": "incomingLabelOuterInner",
"MaxLooseRtt": "maxLooseRtt",
"MaxStrictRtt": "maxStrictRtt",
"MinLooseRtt": "minLooseRtt",
"MinStrictRtt": "minStrictRtt",
"OutgoingLabelOuterInner": "outgoingLabelOuterInner",
"Type": "type",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(DmLearnedInfo, self).__init__(parent, list_op)
@property
def AverageLooseRtt(self):
# type: () -> str
"""
Returns
-------
- str: This signifies the average loose RTT.
"""
return self._get_attribute(self._SDM_ATT_MAP["AverageLooseRtt"])
@property
def AverageLooseRttVariation(self):
# type: () -> str
"""
Returns
-------
- str: This signifies the average loose RTT variation.
"""
return self._get_attribute(self._SDM_ATT_MAP["AverageLooseRttVariation"])
@property
def AverageStrictRtt(self):
# type: () -> str
"""
Returns
-------
- str: This signifies the average strict RTT.
"""
return self._get_attribute(self._SDM_ATT_MAP["AverageStrictRtt"])
@property
def AverageStrictRttVariation(self):
# type: () -> str
"""
Returns
-------
- str: This signifies the average strict RTT variation.
"""
return self._get_attribute(self._SDM_ATT_MAP["AverageStrictRttVariation"])
@property
def DmQueriesSent(self):
# type: () -> int
"""
Returns
-------
- number: This signifies the number of DM queries sent.
"""
return self._get_attribute(self._SDM_ATT_MAP["DmQueriesSent"])
@property
def DmResponsesReceived(self):
# type: () -> int
"""
Returns
-------
- number: This signifies the total number of DM responses received.
"""
return self._get_attribute(self._SDM_ATT_MAP["DmResponsesReceived"])
@property
def IncomingLabelOuterInner(self):
# type: () -> str
"""
Returns
-------
- str: This signifies the incoming label information.
"""
return self._get_attribute(self._SDM_ATT_MAP["IncomingLabelOuterInner"])
@property
def MaxLooseRtt(self):
# type: () -> str
"""
Returns
-------
- str: This signifies the maximum loose RTT.
"""
return self._get_attribute(self._SDM_ATT_MAP["MaxLooseRtt"])
@property
def MaxStrictRtt(self):
# type: () -> str
"""
Returns
-------
- str: This signifies the maximum strict RTT.
"""
return self._get_attribute(self._SDM_ATT_MAP["MaxStrictRtt"])
@property
def MinLooseRtt(self):
# type: () -> str
"""
Returns
-------
- str: This signifies the minimum loose RTT.
"""
return self._get_attribute(self._SDM_ATT_MAP["MinLooseRtt"])
@property
def MinStrictRtt(self):
# type: () -> str
"""
Returns
-------
- str: This signifies the minimum strict RTT.
"""
return self._get_attribute(self._SDM_ATT_MAP["MinStrictRtt"])
@property
def OutgoingLabelOuterInner(self):
# type: () -> str
"""
Returns
-------
- str: This signifies the Outgoing Label information.
"""
return self._get_attribute(self._SDM_ATT_MAP["OutgoingLabelOuterInner"])
@property
def Type(self):
# type: () -> str
"""
Returns
-------
- str: This signifies the type of the learned information.
"""
return self._get_attribute(self._SDM_ATT_MAP["Type"])
def add(self):
"""Adds a new dmLearnedInfo resource on the json, only valid with batch add utility
Returns
-------
- self: This instance with all currently retrieved dmLearnedInfo resources using find and the newly added dmLearnedInfo resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(
self,
AverageLooseRtt=None,
AverageLooseRttVariation=None,
AverageStrictRtt=None,
AverageStrictRttVariation=None,
DmQueriesSent=None,
DmResponsesReceived=None,
IncomingLabelOuterInner=None,
MaxLooseRtt=None,
MaxStrictRtt=None,
MinLooseRtt=None,
MinStrictRtt=None,
OutgoingLabelOuterInner=None,
Type=None,
):
# type: (str, str, str, str, int, int, str, str, str, str, str, str, str) -> DmLearnedInfo
"""Finds and retrieves dmLearnedInfo resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dmLearnedInfo resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all dmLearnedInfo resources from the server.
Args
----
- AverageLooseRtt (str): This signifies the average loose RTT.
- AverageLooseRttVariation (str): This signifies the average loose RTT variation.
- AverageStrictRtt (str): This signifies the average strict RTT.
- AverageStrictRttVariation (str): This signifies the average strict RTT variation.
- DmQueriesSent (number): This signifies the number of DM queries sent.
- DmResponsesReceived (number): This signifies the total number of DM responses received.
- IncomingLabelOuterInner (str): This signifies the incoming label information.
- MaxLooseRtt (str): This signifies the maximum loose RTT.
- MaxStrictRtt (str): This signifies the maximum strict RTT.
- MinLooseRtt (str): This signifies the minimum loose RTT.
- MinStrictRtt (str): This signifies the minimum strict RTT.
- OutgoingLabelOuterInner (str): This signifies the Outgoing Label information.
- Type (str): This signifies the type of the learned information.
Returns
-------
- self: This instance with matching dmLearnedInfo resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of dmLearnedInfo data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the dmLearnedInfo resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"andy.balogh@keysight.com"
] |
andy.balogh@keysight.com
|
d47c6412a8baf2436f23808f2157667168e10d4e
|
394104f5ee856aadb0ef420631ddf7b65dc2e3b7
|
/purchase_mate/users/forms.py
|
0bc10d7e90d7f558d418b93c9ea9e33d3b23bb4d
|
[] |
no_license
|
LeeTaeGeom/Purchase-mate
|
7ac5f858f899afe5ce12907f10374a6622b0edc5
|
0314289d0aa680a5af61886a0c0ac84e650bcf8d
|
refs/heads/main
| 2023-05-31T02:31:55.114658
| 2021-07-04T10:21:19
| 2021-07-04T10:21:19
| 381,567,535
| 0
| 0
| null | 2021-06-30T03:48:06
| 2021-06-30T03:48:05
| null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
from django.contrib.auth.models import User
from django import forms
from .models import Profile
class SignupForm(forms.Form):
model = User
name = forms.CharField(label="이름")
phnum = forms.CharField(label="휴대폰 번호")
address = forms.CharField(label="자취방 주소")
def signup(self, request, user):
profile = Profile()
profile.user = user
profile.name = self.cleaned_data["name"]
profile.phnum = self.cleaned_data["phnum"]
profile.address = self.cleaned_data["address"]
profile.save()
user.save()
return user
|
[
"sangjune2000@naver.com"
] |
sangjune2000@naver.com
|
bfe4b996f1eac1b0d7132647fe8198f51939af12
|
94cb27460041192295e7f00bbf44aff58dd07439
|
/Orbicle/materials.py
|
d1d104643a8c1259a4f5edf41f516902b6079314
|
[] |
no_license
|
jaycoskey/Shapes
|
0ab820fc799d6aedd799263522288be17a0347ef
|
2f414ac196949ca31a4f31447bd7ae5615dc1690
|
refs/heads/master
| 2022-11-08T05:26:19.505922
| 2022-10-10T10:01:50
| 2022-10-10T10:01:50
| 58,186,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,598
|
py
|
# Note: This file is currently unused.
# mat_red = material_make('Red', (1,0,0), (1,0,0), 1)
# mat_green = material_make('Green', (0,1,0), (0,1,0), 1)
# mat_blue = material_make('Blue', (0,0,1), (0,0,1), 1)
# mat_yellow = material_make('Yellow', (1,1,0), (1,1,0), 1)
# mat_magenta = material_make('Magenta', (1,0,1), (1,0,1), 1)
# mat_cyan = material_make('Cyan', (0,1,1), (0,1,1), 1)
# mat_pink = material_make('Pink', (1, 0.7529,0.7961), (1, 0.7529,0.7961), 1)
# mat_purple = material_make('Purple', (0.6275,0.1254,0.9412), (0.6275,0.1254,0.9412), 1)
# mat_white = material_make('White', (1,1,1), (1,1,1), 1)
# material_set_by_name('tori12', mat_blue)
# material_set_by_name('tori20', mat_green)
# material_set_by_name('hexgrid_edge', mat_pink)
# material_set_by_name('hexgrid_vert', mat_red)
# See "makeMaterial" at
# https://wiki.blender.org/index.php/Dev:Py/Scripts/Cookbook/Code_snippets/Materials_and_textures
def material_make(name, diffuse, specular, alpha):
mat = bpy.data.materials.new(name)
mat.diffuse_color = diffuse
mat.diffuse_shader = 'LAMBERT'
mat.diffuse_intensity = 1.0
mat.specular_color = specular
mat.specular_shader = 'COOKTORR'
mat.specular_intensity = 0.5
mat.alpha = alpha
mat.ambient = 1
return mat
def material_set(obj, mat):
bpy.ops.object.mode_set(mode='OBJECT')
for mat_slot in obj.material_slots:
bpy.ops.object.material_slot_remove()
obj.data.materials.append(mat)
def material_set_by_name(name, mat):
# bpy.ops.object.editmode_toggle()
bpy.ops.object.select_all(action='DESELECT')
# bpy.ops.objects.select_pattern(pattern=name).select_all(action='SELECT')
# How does the following line treat multiple objects with the same name?
grp = bpy.data.groups.new(name)
obj = bpy.data.objects.get(name)
if (obj):
print('Found object(s) with name: ' + name)
obj.select = True
bpy.context.scene.objects.active = obj
# TODO: Linking to group should only appear in selected_objects loop. Duplicating here for testing.
# grp.objects.link(obj)
material_set(obj, mat)
else:
print('Did not find object with name: ' + name)
for obj in bpy.context.selected_objects:
grp.objects.link(obj)
material_set(obj, mat)
# bpy.ops.object.editmode_toggle()
def name_material_set(obj, name, mat):
obj.name = name
me = obj.data
me.materials.clear()
me.materials.append(mat)
|
[
"jay.coskey@gmail.com"
] |
jay.coskey@gmail.com
|
a6675eda16e06e5d9b102c16cafe8da9bda77128
|
0a32dafa81f92aa03de23ed0c1cec532ab1bc509
|
/flask/app-route/src/sample_api.py
|
b8879ce723d60555b5eefc8c9ed773c5e3cfc596
|
[] |
no_license
|
kokilavemula/python-1
|
c7ec09236cd49237dc32ed8dd88d117cb1bd4b2e
|
ed1e166ad4c9f1b27b231807f4400625a49e1cfa
|
refs/heads/master
| 2021-03-20T23:15:32.549693
| 2020-03-14T07:15:41
| 2020-03-14T07:15:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
from flask import Flask
app = Flask(__name__)
@app.route('/api/v1')
def view_message():
return {"message": "This is flask !"}
@app.route('/api/v1/version')
def api_version():
return {"version": "1.0.0"}
|
[
"noreply@github.com"
] |
kokilavemula.noreply@github.com
|
78b66f564eb446e6c12c6c82556db71f4cf8af99
|
58ac0e88ff5b3973940adeeb36c4c4aa5de005f7
|
/kullanıcı_girisi.py
|
38258e032372233adc5e3b38cfddda1b8966db52
|
[] |
no_license
|
MrSerhat/Python
|
ce9e0611b62343189e2cfbdefbbf74df815f9587
|
d9473ee191e54e4fb782f7dc174c6bff0128e6f3
|
refs/heads/master
| 2022-12-23T17:24:01.676036
| 2020-09-25T13:09:23
| 2020-09-25T13:09:23
| 298,575,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 867
|
py
|
print("""
***********************
Kullanıcı Girişi Programı
***********************
""")
sys_kullanıcı_adı="Serhat"
sys_parola="12345"
giriş_hakkı=3
while True:
kullanıcı_adı=input("Kullanıcı Adı:")
parola=input("Parola:")
if(kullanıcı_adı!=sys_kullanıcı_adı and parola==sys_parola):
print("Kullanıcı Adı Hatalı...")
giriş_hakkı-=1
elif (kullanıcı_adı==sys_kullanıcı_adı and parola!=sys_parola):
print("Parola hatalı")
giriş_hakkı-=1
elif(kullanıcı_adı!=sys_kullanıcı_adı and parola!=sys_parola):
print("Kullanıcı adı ve parola hatalı")
giriş_hakkı-=1
else:
print("Sisteme Başarıyla giriy yapıldı....")
break
if (giriş_hakkı==0):
print("Giriş hakkınız bitti....")
break
|
[
"noreply@github.com"
] |
MrSerhat.noreply@github.com
|
1c5097ca3b89c4671d58ce7b7bfd0ad33356c711
|
72fd1c4c2d0c49135bbd6b1f05d86bcd73ba7801
|
/[8] Two Pointers LC Solutions/remove_duplicates_sorted_array.py
|
c11d615891019f44d038b571038b2df68d38bc67
|
[] |
no_license
|
SUKESH127/bitsherpa
|
ca1400ad5b05ac8615f76f46251f572c96161b08
|
325cc124f44fa274744f023ea47660d14d1ab131
|
refs/heads/master
| 2022-12-29T18:29:47.300045
| 2020-10-18T02:39:17
| 2020-10-18T02:39:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
# QUESTION: https://leetcode.com/problems/remove-duplicates-from-sorted-array/
class Solution:
# SOLUTION: Not trivial. Since we need everything to
# be in place, we use a two pointers approach.
# The upper pointer, j, is going to go through all the elements (it will be the "fast" pointer).
# The lower pointer, i, will always be pointing to the
# the last confirmed unique element. This is the loop invariant.
# We start i at 0 and j at 1. We increment j until arr[j] is different from arr[i].
# When this happens, we know that arr[j] is at the next "distinct" element. Since we've found
# a new unique element, we increment i and then set arr[i] to arr[j].
#We continue until j has hit the end of the array. Since i points to the last uniquely seen
#element, i+1 will be the number of unique elements.
def removeDuplicates(self, nums: List[int]) -> int:
i = 0
for j in range(1, len(nums)):
if nums[j] != nums[i]:
i +=1
nums[i] = nums[j]
return i+1
|
[
"advaitchauhan95@gmail.com"
] |
advaitchauhan95@gmail.com
|
272b2c4c75903eb46eab6d6850e8b942c041f7cc
|
5c6a81adfa3f28e4016b604cc8ccc93d86871e8c
|
/setup.py
|
a886daff712d3ca6e9be885bc5a84c04e8a2ac4f
|
[
"MIT",
"CC-BY-3.0"
] |
permissive
|
ogut77/spleeter-gpu
|
bd85697269db8427955bb9317c2cbfe9879d0970
|
7b16b1bac6bfa4ab1a2c599e3426fcb2a00398dd
|
refs/heads/master
| 2022-09-02T14:04:21.866399
| 2020-05-27T08:52:07
| 2020-05-27T08:52:07
| 264,083,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,306
|
py
|
#!/usr/bin/env python
# coding: utf8
""" Distribution script. """
import sys
from os import path
from setuptools import setup
__email__ = 'research@deezer.com'
__author__ = 'Deezer Research'
__license__ = 'MIT License'
# Default project values.
project_name = 'spleeter-gpu'
project_version = '1.5.2'
tensorflow_dependency = 'tensorflow-gpu'
tensorflow_version = '1.15.3'
here = path.abspath(path.dirname(__file__))
readme_path = path.join(here, 'README.md')
with open(readme_path, 'r') as stream:
readme = stream.read()
# Package setup entrypoint.
setup(
name=project_name,
version=project_version,
description='''
The Deezer source separation library with
pretrained models based on tensorflow.
''',
long_description=readme,
long_description_content_type='text/markdown',
author='Deezer Research',
author_email='research@deezer.com',
url='https://github.com/deezer/spleeter',
license='MIT License',
packages=[
'spleeter',
'spleeter.audio',
'spleeter.commands',
'spleeter.model',
'spleeter.model.functions',
'spleeter.model.provider',
'spleeter.resources',
'spleeter.utils',
],
package_data={'spleeter.resources': ['*.json']},
python_requires='>=3.6, <3.8',
include_package_data=True,
install_requires=[
'ffmpeg-python',
'importlib_resources ; python_version<"3.7"',
'norbert==0.2.1',
'pandas==0.25.1',
'requests',
'setuptools>=41.0.0',
'librosa==0.7.2',
'{}=={}'.format(tensorflow_dependency, tensorflow_version),
],
extras_require={
'evaluation': ['musdb==0.3.1', 'museval==0.3.0']
},
entry_points={
'console_scripts': ['spleeter=spleeter.__main__:entrypoint']
},
classifiers=[
'Environment :: Console',
'Environment :: MacOS X',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Artistic Software',
'Topic :: Multimedia',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Multimedia :: Sound/Audio :: Conversion',
'Topic :: Multimedia :: Sound/Audio :: Sound Synthesis',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities']
)
|
[
"noreply@github.com"
] |
ogut77.noreply@github.com
|
4eda5ebc13377f729fd3c72e0c7b2a03e4975869
|
046674b67ad5727ec65725e826ea354ca07d71da
|
/investpy/data/certificates_data.py
|
3ba7558dcaa8205c5988f5e71f0675594cc587a1
|
[
"MIT"
] |
permissive
|
alexanu/investpy
|
cad2d0ef3f67b4fa4ecb23a4730ae96c1a162168
|
bc46d30af21f03ab5b7b9aa259b755db281277bb
|
refs/heads/master
| 2020-12-28T08:48:01.479075
| 2020-05-15T22:42:15
| 2020-05-15T22:42:15
| 238,252,327
| 0
| 1
|
MIT
| 2020-05-15T22:42:16
| 2020-02-04T16:31:29
| null |
UTF-8
|
Python
| false
| false
| 11,141
|
py
|
#!/usr/bin/python3
# Copyright 2018-2020 Alvaro Bartolome @ alvarob96 in GitHub
# See LICENSE for details.
import json
import unidecode
import pandas as pd
import pkg_resources
def certificates_as_df(country=None):
"""
This function retrieves all the data stored in `certificates.csv` file, which previously was retrieved from
Investing.com. Since the resulting object is a matrix of data, the certificate's data is properly structured
in rows and columns, where columns are the certificate data attribute names. Additionally, country
filtering can be specified, which will make this function return not all the stored certificates, but just
the data of the certificates from the introduced country.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available certificates from.
Returns:
:obj:`pandas.DataFrame` - certificates_df:
The resulting :obj:`pandas.DataFrame` contains all the certificate's data from the introduced country if specified,
or from every country if None was specified, as indexed in Investing.com from the information previously
retrieved by investpy and stored on a csv file.
So on, the resulting :obj:`pandas.DataFrame` will look like::
country | name | full_name | symbol | issuer | isin | asset_class | underlying
--------|------|-----------|--------|--------|------|-------------|------------
xxxxxxx | xxxx | xxxxxxxxx | xxxxxx | xxxxxx | xxxx | xxxxxxxxxxx | xxxxxxxxxx
Raises:
ValueError: raised whenever any of the introduced arguments is not valid.
FileNotFoundError: raised if `certificates.csv` file was not found.
IOError: raised when `certificates.csv` file is missing or empty.
"""
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'certificates', 'certificates.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
certificates = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0096: certificates file not found or errored.")
if certificates is None:
raise IOError("ERR#0097: certificates not found or unable to retrieve.")
certificates.drop(columns=['tag', 'id'], inplace=True)
certificates = certificates.where(pd.notnull(certificates), None)
if country is None:
certificates.reset_index(drop=True, inplace=True)
return certificates
elif unidecode.unidecode(country.lower()) in certificate_countries_as_list():
certificates = certificates[certificates['country'] == unidecode.unidecode(country.lower())]
certificates.reset_index(drop=True, inplace=True)
return certificates
def certificates_as_list(country=None):
"""
This function retrieves all the available certificates indexed on Investing.com, already stored on `certificates.csv`.
This function also allows the users to specify which country do they want to retrieve data from or if they
want to retrieve it from every listed country; so on, a listing of certificates will be returned. This function
helps the user to get to know which certificates are available on Investing.com.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available certificates from.
Returns:
:obj:`list` - certificates_list:
The resulting :obj:`list` contains the retrieved data from the `certificates.csv` file, which is
a listing of the names of the certificates listed on Investing.com, which is the input for data
retrieval functions as the name of the certificate to retrieve data from needs to be specified.
In case the listing was successfully retrieved, the :obj:`list` will look like::
certificates_list = ['SOCIETE GENERALE CAC 40 X10 31DEC99', 'COMMERZBANK SG 31Dec99', ...]
Raises:
ValueError: raised whenever any of the introduced arguments is not valid.
FileNotFoundError: raised if `certificates.csv` file was not found.
IOError: raised when `certificates.csv` file is missing or empty.
"""
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'certificates', 'certificates.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
certificates = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0096: certificates file not found or errored.")
if certificates is None:
raise IOError("ERR#0097: certificates not found or unable to retrieve.")
certificates.drop(columns=['tag', 'id'], inplace=True)
certificates = certificates.where(pd.notnull(certificates), None)
if country is None:
return certificates['name'].tolist()
elif unidecode.unidecode(country.lower()) in certificate_countries_as_list():
return certificates[certificates['country'] == unidecode.unidecode(country.lower())]['name'].tolist()
def certificates_as_dict(country=None, columns=None, as_json=False):
"""
This function retrieves all the available certificates indexed on Investing.com, stored on `certificates.csv`.
This function also allows the user to specify which country do they want to retrieve data from, or from every
listed country; the columns which the user wants to be included on the resulting :obj:`dict`; and the output
of the function will either be a :obj:`dict` or a :obj:`json`.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available certificates from.
columns (:obj:`list`, optional):
names of the columns of the etf data to retrieve <country, name, full_name, symbol, issuer, isin, asset_class, underlying>
as_json (:obj:`bool`, optional):
value to determine the format of the output data which can either be a :obj:`dict` or a :obj:`json`.
Returns:
:obj:`dict` or :obj:`json` - etfs_dict:
The resulting :obj:`dict` contains the retrieved data if found, if not, the corresponding fields are
filled with `None` values.
In case the information was successfully retrieved, the :obj:`dict` will look like::
{
"country": "france",
"name": "SOCIETE GENERALE CAC 40 X10 31DEC99",
"full_name": "SOCIETE GENERALE EFFEKTEN GMBH ZT CAC 40 X10 LEVERAGE 31DEC99",
"symbol": "FR0011214527",
"issuer": "Societe Generale Effekten GMBH",
"isin": "FR0011214527",
"asset_class": "index",
"underlying": "CAC 40 Leverage x10 NR"
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid.
FileNotFoundError: raised if `certificates.csv` file was not found.
IOError: raised when `certificates.csv` file is missing or empty.
"""
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'certificates', 'certificates.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
certificates = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0096: certificates file not found or errored.")
certificates.drop(columns=['tag', 'id'], inplace=True)
certificates = certificates.where(pd.notnull(certificates), None)
if certificates is None:
raise IOError("ERR#0097: certificates not found or unable to retrieve.")
if columns is None:
columns = certificates.columns.tolist()
else:
if not isinstance(columns, list):
raise ValueError("ERR#0020: specified columns argument is not a list, it can just be list type.")
if not all(column in certificates.columns.tolist() for column in columns):
raise ValueError("ERR#0021: specified columns does not exist, available columns are "
"<country, name, full_name, symbol, issuer, isin, asset_class, underlying>")
if country is None:
if as_json:
return json.dumps(certificates[columns].to_dict(orient='records'))
else:
return certificates[columns].to_dict(orient='records')
elif unidecode.unidecode(country.lower()) in certificate_countries_as_list():
if as_json:
return json.dumps(certificates[certificates['country'] == unidecode.unidecode(country.lower())][columns].to_dict(orient='records'))
else:
return certificates[certificates['country'] == unidecode.unidecode(country.lower())][columns].to_dict(orient='records')
def certificate_countries_as_list():
"""
This function retrieves all the available countries to retrieve certificates from, as the listed countries
are the ones indexed on Investing.com. The purpose of this function is to list the countries which
have available certificates according to Investing.com data, since the country parameter is needed when
retrieving data from any certificate available.
Returns:
:obj:`list` - countries:
The resulting :obj:`list` contains all the countries listed on Investing.com with available certificates
to retrieve data from.
In the case that the file reading of `certificate_countries.csv` which contains the names of the available
countries with certificates was successfully completed, the resulting :obj:`list` will look like::
countries = ['france', 'germany', 'italy', 'netherlands', 'sweden']
Raises:
FileNotFoundError: raised if `certificate_countries.csv` file was not found.
IOError: raised when `certificate_countries.csv` file is missing or empty.
"""
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'certificates', 'certificate_countries.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
countries = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0098: certificate countries file not found or errored.")
if countries is None:
raise IOError("ERR#0099: certificate countries not found or unable to retrieve.")
return countries['country'].tolist()
|
[
"alvarob96@usal.es"
] |
alvarob96@usal.es
|
6ea1a786a416dbffca32dc43c577825230ed29b5
|
a17ca4ed7c39a995976a6cce94052e1b58115727
|
/fairseq/models/rmmt.py
|
768982806010bf1522e4d7af6693be78c10478db
|
[] |
no_license
|
cocaer/Revisit-MMT
|
f3da52d99318aa3f27d66ffa776207e6bc7264c5
|
dc368d0af8d60270b8f4aaa2f3ac58771c551da3
|
refs/heads/master
| 2023-05-30T12:18:40.280612
| 2021-06-14T07:24:18
| 2021-06-14T07:24:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38,257
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict, namedtuple
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
import random
from transformers import BertModel, BertConfig
from transformers.tokenization_bert import BertTokenizer
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
class CaptionImageRetriever(nn.Module):
def __init__(self, config, *inputs, **kwargs):
super().__init__()
self.bert = kwargs['bert']
self.args = kwargs['args']
print('loding image embedding from:', self.args.image_embedding_file)
embeding_weights = np.load(self.args.image_embedding_file)
img_vocab, img_dim = embeding_weights.shape
embeddings_matrix = np.zeros((img_vocab + 1, img_dim))
embeddings_matrix[1:] = embeding_weights
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.image_vecs = torch.FloatTensor(embeddings_matrix).to(self.device) # 29001, 2048
# self.dropout = nn.Dropout(self.args.retriever_dropout)
self.text_to_hidden = nn.Linear(config.hidden_size, self.args.feature_dim, bias=False)
self.image_to_hidden = nn.Linear(img_dim, self.args.feature_dim, bias=False)
self.scaling = self.args.feature_dim ** -0.5 # scale the dot product as in Transformer
def forward(self, caption_input_ids, caption_segment_ids, caption_input_masks, labels=None):
caption_vec = self.bert(caption_input_ids, caption_input_masks, caption_segment_ids)[-1] # B, bert_dim
caption_vec = self.text_to_hidden(caption_vec) # B, feature_dim
image_vecs = self.image_to_hidden(self.image_vecs) # 29001, feature_dim
caption_vec = caption_vec.unsqueeze(1) # B, 1, feature_dim
dot_product = torch.matmul(caption_vec, image_vecs.t()) # B, 1, 29001
dot_product.squeeze_(1) # B, 29001
# dot_product *= self.scaling
probability = F.softmax(dot_product, dim=-1)
# probability.register_hook(lambda grad: print('probability grad: ', grad))
topk_values, topk_idx = torch.topk(probability, self.args.topk, dim=-1)
# topk_values.register_hook(lambda grad: print('topk_values grad: ', grad[0]))
# print(topk_values.view(topk_values.size(0))[:2])
return probability, topk_values, topk_idx
@register_model('static')
class StaticTransformerModel(FairseqEncoderDecoderModel):
def __init__(self, args, encoder, decoder):
super().__init__(encoder, decoder)
self.args = args
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
parser.add_argument('--layer-wise-attention', default=False, action='store_true',
help='perform layer-wise attention (cross-attention or cross+self-attention)')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
# fmt: on
# UVR-NMT Parameter
parser.add_argument('--merge_option', type=str, metavar='STR',
default='uvr')
parser.add_argument('--image_embedding_file', type=str, metavar='STR',
help='image_embedding_file')
parser.add_argument('--image_feature_file', type=str, metavar='STR',
help='image_feature_file')
parser.add_argument('--topk', type=int, metavar='N',
help='topk images')
# options for pre-trained caption_image_matcher
parser.add_argument('--retriever_dropout', type=float, default=0.1,
help='dropout probability for retriever')
parser.add_argument("--feature_dim", default=128, type=int,
help="Hidden size of matching features (for both T/image)")
parser.add_argument("--pretrained_retriever", type=str,
help="file path of the pre-trained retriever model")
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, 'max_source_positions', None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, 'max_target_positions', None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return cls(args, encoder, decoder)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, 'no_cross_attention', False),
)
def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_tokens, **kwargs):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_tokens=bert_tokens, **kwargs)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, **kwargs)
return decoder_out
EncoderOut = namedtuple('TransformerEncoderOut', [
'encoder_out', # T x B x C
'encoder_padding_mask', # B x T
'encoder_embedding', # B x T x C
'encoder_states', # List[T x B x C]
])
class SCAttention(nn.Module):
def __init__(self, input_size, hidden_size):
super(SCAttention, self).__init__()
self.hidden_size = hidden_size
self.W = nn.Linear(input_size, hidden_size)
self.map_linear = nn.Linear(hidden_size, hidden_size)
self.init_weights()
def init_weights(self):
nn.init.xavier_uniform_(self.W.weight.data)
self.W.bias.data.fill_(0.1)
def forward(self, passage, p_mask, question, q_mask):
Wp = F.relu(self.W(passage))
Wq = F.relu(self.W(question))
scores = torch.bmm(Wp, Wq.transpose(2, 1))
alpha = torch.nn.functional.softmax(scores, dim=-1)
output = torch.bmm(alpha, Wq)
# print(alpha.shape, Wq.shape, output.shape)
output = self.map_linear(output)
return output, scores
class TransformerEncoder(FairseqEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.encoder_layerdrop = args.encoder_layerdrop
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layer_wise_attention = getattr(args, 'layer_wise_attention', False)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
if getattr(args, 'layernorm_embedding', False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
# build and load retriever
bert_config = BertConfig.from_pretrained(args.bert_model_name)
self.bert_tokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
bert_encoder = BertModel(bert_config)
self.retriever = CaptionImageRetriever(bert_config, bert=bert_encoder, args=args)
matcher_state_dict = torch.load(args.pretrained_retriever, map_location="cpu")
self.retriever.load_state_dict(matcher_state_dict, strict=False)
# Turn off back prob of BERT
for p in self.retriever.bert.parameters():
p.requires_grad = False
# Turn off back prob of whole retriever
for p in self.retriever.parameters():
p.requires_grad = False
# print("image embedding processing...")
print('loding image feature from:', args.image_feature_file)
embeding_weights = np.load(args.image_feature_file)
img_vocab, self.img_dim = embeding_weights.shape
embeddings_matrix = np.zeros((img_vocab + 1, self.img_dim))
embeddings_matrix[1:] = embeding_weights
self.img_embeddings = nn.Embedding.from_pretrained(torch.FloatTensor(embeddings_matrix),
freeze=True) # update embedding
self.dense = nn.Linear(self.img_dim, embed_dim)
self.merge_option = args.merge_option
if self.merge_option == "uvr":
self.proj_attention = SCAttention(embed_dim, embed_dim)
self.sigmoid = nn.Sigmoid()
self.gate_dense = nn.Linear(2 * embed_dim, embed_dim)
self.out = open(args.save_dir + '/gate.txt', 'w')
# self.out = open('checkpoints/rmmt.en-fr.tiny/gate.txt', 'w')
def forward_embedding(self, src_tokens):
# embed tokens and positions
x = embed = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding:
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x, embed
def forward(self, src_tokens, src_lengths, bert_tokens, return_all_hiddens=False, **unused):
if self.layer_wise_attention:
return_all_hiddens = True
x, encoder_embedding = self.forward_embedding(src_tokens)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
text_mask = ~encoder_padding_mask
if not encoder_padding_mask.any():
encoder_padding_mask = None
encoder_states = [] if return_all_hiddens else None
# encoder layers
for layer in self.layers:
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not self.training or (dropout_probability > self.encoder_layerdrop):
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
if return_all_hiddens:
encoder_states[-1] = x
bert_encoder_padding_mask = bert_tokens.eq(self.bert_tokenizer.pad_token_id)
segments = torch.ones(bert_tokens.size(), dtype=torch.long, device=src_tokens.device)
segments.masked_fill_(bert_encoder_padding_mask, 0)
dot_product, sum_topk_probs, topk_idx = self.retriever(bert_tokens, segments, segments)
# process image-feature first
batch_image_ids = topk_idx
batch_size, num_img = batch_image_ids.size()
# should not use 0, cause we have image0
image_padding_mask = batch_image_ids.eq(-1)
image_mask = ~image_padding_mask
image_embedding = self.img_embeddings(batch_image_ids) # B*TopK*img_dim
image_embedding = image_embedding.view(batch_size, num_img, self.img_dim) # B, topk, img_dim
text_repr = x.transpose(0, 1) # T x B x C -> B x T x C
image_repr = self.dense(image_embedding) # B, Topk, C
if self.merge_option == "max":
image_repr = torch.max(image_repr, 1)[0]
b, t, c = text_repr.shape
output = image_repr.unsqueeze(1).expand(b, t, c)
assert output.shape[1] == text_repr.shape[1]
merge = torch.cat([text_repr, output], dim=-1)
gate = self.sigmoid(self.gate_dense(merge))
# mask = src_tokens.ne(self.padding_idx).unsqueeze(-1).expand(b, t, c)
# print(gate[mask].flatten().tolist(), file=self.out)
output = text_repr + gate * output
# output = (1 - gate) * text_repr + gate * output
x = output.transpose(0, 1)
return EncoderOut(
encoder_out=x, # T x B x C
encoder_padding_mask=encoder_padding_mask, # B x T
encoder_embedding=encoder_embedding, # B x T x C
encoder_states=encoder_states, # List[T x B x C]
)
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out.encoder_out is not None:
encoder_out = encoder_out._replace(
encoder_out=encoder_out.encoder_out.index_select(1, new_order)
)
if encoder_out.encoder_padding_mask is not None:
encoder_out = encoder_out._replace(
encoder_padding_mask=encoder_out.encoder_padding_mask.index_select(0, new_order)
)
if encoder_out.encoder_embedding is not None:
encoder_out = encoder_out._replace(
encoder_embedding=encoder_out.encoder_embedding.index_select(0, new_order)
)
if encoder_out.encoder_states is not None:
for idx, state in enumerate(encoder_out.encoder_states):
encoder_out.encoder_states[idx] = state.index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
print('deleting {0}'.format(weights_key))
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.decoder_layerdrop = args.decoder_layerdrop
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, self.padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.cross_self_attention = getattr(args, 'cross_self_attention', False)
self.layer_wise_attention = getattr(args, 'layer_wise_attention', False)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
if getattr(args, 'layernorm_embedding', False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
def forward(
self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
features_only=False,
**extra_args
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
**extra_args
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
**unused,
):
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding:
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn = None
inner_states = [x]
for idx, layer in enumerate(self.layers):
encoder_state = None
if encoder_out is not None:
if self.layer_wise_attention:
encoder_state = encoder_out.encoder_states[idx]
else:
encoder_state = encoder_out.encoder_out
if incremental_state is None:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not self.training or (dropout_probability > self.decoder_layerdrop):
x, attn = layer(
x,
encoder_state,
encoder_out.encoder_padding_mask if encoder_out is not None else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {'attn': attn, 'inner_states': inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, '_future_mask')
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
@register_model_architecture('static', 'static')
def base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.no_cross_attention = getattr(args, 'no_cross_attention', False)
args.cross_self_attention = getattr(args, 'cross_self_attention', False)
args.layer_wise_attention = getattr(args, 'layer_wise_attention', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)
args.layernorm_embedding = getattr(args, 'layernorm_embedding', False)
@register_model_architecture('static', 'static_iwslt_de_en')
def static_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture(args)
@register_model_architecture('static', 'static_wmt_en_de')
def static_wmt_en_de(args):
base_architecture(args)
@register_model_architecture('static', 'static_tiny')
def static_tiny(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 128)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 256)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 4)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 256)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 4)
base_architecture(args)
@register_model_architecture('static', 'static_vatex')
def static_vatex(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 512)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 512)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture(args)
|
[
"whucs2013wzy@gmail.com"
] |
whucs2013wzy@gmail.com
|
bc1a5861be1270c530f16f8819616c2c8a3b96b2
|
266947fd84eed629ed0c21f6d91134239512afd9
|
/BeginnerContest_B/040.py
|
428cbcd1c832fa505fa0b9cc5bed4714476cd59b
|
[] |
no_license
|
SkiMsyk/AtCoder
|
c86adeec4fa470ec14c1be7400c9fc8b3fb301cd
|
8102b99cf0fb6d7fa304edb942d21cf7016cba7d
|
refs/heads/master
| 2022-09-03T01:23:10.748038
| 2022-08-15T01:19:55
| 2022-08-15T01:19:55
| 239,656,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
n = int(input())
res = n
if n == 1:
print(0)
else:
for i in range(1,n):
for j in range(1, (n // i) + 1):
res = min(abs(i - j) + n - (i * j), res)
print(res)
|
[
"sakaimasayuki@sakaimasayukinoMacBook-puro.local"
] |
sakaimasayuki@sakaimasayukinoMacBook-puro.local
|
31962ac06ebd08c37d98e56bbc9ed04d8d609b38
|
6548d0d4dc3e2b6527e1f1a3a4c3189e85f781d4
|
/12yield/test4.py
|
31f8849dcca85a4fecd2481667f8c146ab51ae48
|
[] |
no_license
|
feipeixuan/pynote
|
f9d9d8df265688148ff8a9a7b96c1c467502a876
|
3305216f3ef9c418d8238df046836158da648943
|
refs/heads/master
| 2021-03-06T20:43:32.480689
| 2020-06-11T03:33:42
| 2020-06-11T03:33:42
| 246,222,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
import asyncio
import inspect
c = ""
async def async1():
global c
future = async3()
await future
async def async3():
print("2222")
a = async1()
for i in a:
print(i)
print("ssss")
break
for i in c:
print(i)
break
for i in a:
print(i)
print(inspect.getgeneratorstate(a))
# print(a)
|
[
"feipeixuan@163.com"
] |
feipeixuan@163.com
|
99fa6f1b7bc13e4e7222ca6cb7133da9c486957e
|
f201134e41f777edce48ed7659f67df106940333
|
/eqSolver.py
|
5cb996f9c92a0a0074a966d8f0fe9949a02435ef
|
[
"MIT"
] |
permissive
|
Alex-Gurung/MathSpeech
|
fd5e5b3a7a0a6056c8078db5d00d595b13ad13e4
|
87a1cf6d7b28847856cb07621a2600e3049f1804
|
refs/heads/master
| 2020-12-24T07:43:17.133638
| 2016-09-25T15:27:23
| 2016-09-25T15:27:23
| 52,043,679
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,034
|
py
|
import speech_recognition as sr #Import the speech recognition library
from sympy import * #Necessary for commented out code, but comment this import if not using them
def eqSolver():
#Setup speech
r = sr.Recognizer() #Creates a variable to listen to the audio
#Code to get speech
with sr.Microphone() as source: #Need this block every time you want to input speech
#print("Do you want to solve an expression or solve for a variable?") #To be implemented lated with sympy
print("Say your equation(with variable z):")
#audio = input("Input your equation") #For text input, can be moved outside of with statement
#x = Symbol('x')
#diff(audio, x) #For direct derivative typed input
audio = r.listen(source) #Sets a variable called audio to the audio input, to be later interpreted
z = Symbol('z') #Because speech recognition sometimes uses 'x' as 'times', z must be used
####BELOW CODE SHOULD BE CHANGED
try:
equation = r.recognize_google(audio).lower() #Takes whatever the speech interpretor took the input as and makes it lower case to fit the dictionary
print(equation) #Prints the equation, check to see if it recognized your speech correctly
for i in range(len(keys)): #Goes through the equation replacing any phrases with their mathematical equivalents
equation = equation.replace(keys[i], results[i])
equation = equation.strip() #Removes unnecessary splaces at ends(shouldn't change anything)
except sr.UnknownValueError: #This is the most common error, try audio again, making certain the program can clearly hear you
print("Google Speech Recognition could not understand audio") #The two excepts below are the standard for google speech recognition
return
except sr.RequestError as e: #Make sure to keep these excepts whenever calling recognize_google(audio)
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return
print(solve(equation, z)) #Solves the equation with a variable 'z' NOTE: must use 'z' and only 'z' as variable
|
[
"aag1234@gmail.com"
] |
aag1234@gmail.com
|
2414ae3beb14e12cb9fadc950d80842e81f42b08
|
5b8e75b97b57155eb729f933173b3b51223a21bb
|
/Python001/src/func/funcTest002.py
|
552f73820f8f7cdc5a13c87b45032e8256fd559a
|
[] |
no_license
|
suhongsheng/PythonStudy
|
253d9d181693a3d5c88a329f709e031b1f894cdc
|
56025f4bf107ee2b714cbad773d1398f0ed71a35
|
refs/heads/master
| 2021-01-12T07:15:57.599964
| 2016-12-23T02:16:06
| 2016-12-23T02:16:06
| 76,927,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
def add(a, b):
return a + b;
def ifunc1():
print("in ifunc1......");
return 5;
def ifunc2():
print("in ifunc2......");
return 4;
print(add(ifunc1(), ifunc2()))
|
[
"suhs@192.168.199.4"
] |
suhs@192.168.199.4
|
1453e30153cf98367fc5b85420afc78a62209898
|
b1c403ad1211221427dddc80a7f15956da498175
|
/0x00-python_variable_annotations/5-sum_list.py
|
ecabba3f646aededae76758584b268ee13eeb8b4
|
[] |
no_license
|
oumaymabg/holbertonschool-web_back_end
|
246dd47b9abdb277d6ef539c9bc38a8f0509554a
|
dce7ff683d8bce9ad986d72948c9e75ca7b80d2a
|
refs/heads/master
| 2023-09-05T22:27:28.961336
| 2021-11-15T21:05:53
| 2021-11-15T21:05:53
| 389,440,104
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
#!/usr/bin/env python3
'''
Module that takes a list of floats and returns their sum as a float
'''
from typing import List
def sum_list(input_list: List[float]) -> float:
'''
Type-annotated function that takes a list of floats and returns its sum as
a float
@input_list: list of floats to sum
Return: the sum of the list as a float
'''
res = 0
for num in input_list:
res += num
return res
|
[
"oumaymabou257@gmail.com"
] |
oumaymabou257@gmail.com
|
4936290e2ae3b24e8f1ee14bb292c2875b7a63b4
|
c4fc08f1b19bcb39f6d304987e7c0c3a1b12a6b5
|
/golly/Scripts/Python/LifeGenes/lifegenes_core/setupLog.py
|
4423f1889a4dead07d374b9dff90db16ba2ef503
|
[
"MIT"
] |
permissive
|
7yl4r/LifeGenes
|
ba35da389b13f17e6de06f41f8ad016be6707e09
|
cb2928ba9ddadd785fc55be443c117fb34eae967
|
refs/heads/master
| 2020-04-13T21:17:59.191858
| 2014-11-30T18:03:10
| 2014-11-30T18:03:10
| 10,677,365
| 11
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
#NOTE: this file only resets when golly is restarted,
# otherwise the log object is retained and reused,
# appending to the file as the script is run multiple times
from os.path import expanduser,join
from os import makedirs
import logging
from LifeGenes.lifegenes_core.__util.appdirs import user_log_dir
def setupLog(logName='noName.txt'):
logDir = user_log_dir('LifeGenes','7yl4r-ware')
try:
makedirs(logDir)
except OSError:
pass # probably the dir already exists...
logPath = join(logDir,logName)
logging.basicConfig(filename=logPath,\
level=logging.DEBUG,\
format='%(asctime)s %(levelname)s:%(message)s',\
filemode='w')
# # assume that you want your logs in LifeGenes source which is in your home directory
# # (this works best on my linux machine)
# home = expanduser("~")
# logDir = home+'/LifeGenes/__logs'
# try:
# mkdir(logDir)
# except OSError:
# pass # probably the dir already exists...
#
# logPath = logDir+'/'+logName
# print str(logging.getLogger())
# logging.basicConfig(filename=logPath,\
# level=logging.DEBUG,\
# format='%(asctime)s %(levelname)s:%(message)s',\
# filemode='w')
try:
import golly as g
g.show('created .log at '+str(logPath))
except ImportError:
print 'created .log at '+str(logPath)
|
[
"murray.tylar@gmail.com"
] |
murray.tylar@gmail.com
|
6d77335758946fb430e454dce90f2acb4f05c0ad
|
0ef8eab189c3a234dadb165e4e57cce24091b002
|
/src/utils/bot.py
|
5784c6001491885b6cd9c5b6420da4729b27eb3e
|
[
"MIT"
] |
permissive
|
TrendingTechnology/wm_bot
|
aca36650e8459593ba38e12928a933f6c14c62f1
|
120d529bca4b721506fdbd5f1ba84a84df51e74c
|
refs/heads/main
| 2023-07-02T16:24:19.684238
| 2021-08-08T14:08:23
| 2021-08-08T14:08:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,935
|
py
|
"""File for things releated to the bot."""
import asyncio
import os
from operator import attrgetter
from typing import Generator, List, Union
import aiogoogletrans
import aiohttp
import async_cleverbot
import async_cse
import asyncdagpi
import asyncpg
import discord
from akinator.async_aki import Akinator
from discord.ext import commands
from dotenv import load_dotenv
from playsound import PlaysoundException, playsound
from utils.classes import BlackListed, Config, CustomEmojis
from utils.functions import load_json, read_file
__all__ = ("WMBot", "WMBotContext")
async def get_prefix(_bot, message):
"""Use this to fetch the current servers prefix from the db.
Parameters
----------
_bot (commands.Bot): The bot to get the prefix of
message (discord.Message): the message to get some metadata
Returns
-------
typing.Union[str, List[str]]: prefix
"""
if isinstance(message.channel, discord.DMChannel):
return _bot.config.dm_prefix
if message.author == _bot.owner:
return _bot.config.owner_prefix
prefix_for_this_guild = await _bot.db.fetchrow(
"""
SELECT prefix
FROM guilds
WHERE id=$1
""",
message.guild.id,
)
if prefix_for_this_guild is None:
await _bot.db.execute(
"""
INSERT INTO guilds (id, prefix)
VALUES ($1, $2)
""",
message.guild.id,
",",
)
prefix_for_this_guild = {"prefix": _bot.config.default_prefix}
prefix_return = str(prefix_for_this_guild["prefix"])
return commands.when_mentioned_or(prefix_return)(_bot, message)
class WMBot(commands.Bot):
"""A subclass of commands.Bot."""
def __init__(self, *, specified_loop=None):
"""Makes a instance of WMBot."""
intents = discord.Intents(
members=True,
presences=True,
guilds=True,
emojis=True,
invites=True,
messages=True,
reactions=True,
voice_states=True,
)
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(loop=loop)
# Load all the environment variables
load_dotenv("config/Bot/token.env")
load_dotenv("config/Apis/tokens.env")
load_dotenv("config/Database/db.env")
# Read the emoji file
self.emoji_config = CustomEmojis.from_json(read_file("config/General/emojis.json"))
# Read the config file
self.config = Config.from_json(read_file("config/General/config.json"))
# Set the HTTPException error codes dict to a custom property for easy access
self.httpexception_codes = load_json("assets/data/httpexception_codes.json", make_keys_int=True)
# APIs
self.cleverbot = async_cleverbot.Cleverbot(
os.environ["cleverbot"],
session=session,
context=async_cleverbot.DictContext(),
)
self.dagpi = asyncdagpi.Client(os.environ["dagpi"])
self.google_api = async_cse.Search(os.environ["google_search"], session=session)
self.translate_api = aiogoogletrans.Translator()
self.aki = Akinator()
self.apis = ["OMDB", "tenor", "owlbot", "gender_api"]
self.api_keys = {api: os.environ[api.lower()] for api in self.apis}
# For the snipe command
self.snipes = {}
# For tracking commands
self.command_uses = {}
# For api requests
self.session = session
super().__init__(
command_prefix=get_prefix,
case_insensitive=True,
intents=intents,
session=session,
loop=specified_loop or loop,
strip_after_prefix=True,
owner_ids=self.config.owner_ids,
)
# For before_invoke
self._before_invoke = self.before_invoke
# For blacklisted check
self._checks.append(self.bot_check)
async def get_context(self, message: discord.Message, *, cls: commands.Context = None) -> commands.Context:
"""Return the custom context."""
return await super().get_context(message, cls=cls or WMBotContext)
async def close(self):
await self.session.close()
await super().close()
@property
def owner(self) -> discord.User:
"""Call to get the owner of the bot."""
if self.config.owner_id:
return self.get_user(self.config.owner_id)
if self.owner_ids:
return self.get_user(self.config.owner_ids[0])
return None
@property
def members(self) -> Generator[discord.Member, None, None]:
"""Use this to get all the members of the bot"""
for guild in self.guilds:
for member in guild.members:
yield member
@property
def member_count(self) -> List[discord.Member]:
"""Use this to get all the members of the bot"""
return sum([g.member_count for g in self.guilds])
@property
def humans(self) -> Generator[discord.User, None, None]:
"""Use this to get all the members of the bot"""
for user in self.users:
if not user.bot:
yield user
@property
def bots(self) -> Generator[discord.User, None, None]:
"""Use this to get all the members of the bot"""
for user in self.users:
if user.bot:
yield user
def get_config_emoji(self, emoji_name: str) -> str:
"""Gets a emoji from the bot config.
Parameters
----------
emoji_name : str
the emoji that it needs to get
Returns
-------
str
the emoji that it got, can be empty if it was not found
"""
return attrgetter(emoji_name)(self.emoji_config)
def get_user_named(self, name: str) -> Union[discord.User, None]:
"""Gets a user with the given name from the bot
Parameters
----------
name : str
The name of the user, can have the discriminator
Returns
-------
Union[discord.User, None]
The user if it was found, otherwise None
"""
result = None
users = self.users
if len(name) > 5 and name[-5] == "#":
# The 5 length is checking to see if #0000 is in the string,
# as a#0000 has a length of 6, the minimum for a potential
# discriminator lookup.
potential_discriminator = name[-4:]
# do the actual lookup and return if found
# if it isn't found then we'll do a full name lookup below.
result = discord.utils.get(users, name=name[:-5], discriminator=potential_discriminator)
if result is not None:
return result
def pred(user):
return user.nick == name or user.name == name
return discord.utils.find(pred, users)
async def before_invoke(self, ctx):
"""
Starts typing in the channel to let the user know that the bot received the command and is working on it.
Parameters
----------
ctx : commands.Context
Represents the context in which a command is being invoked under.
"""
await ctx.channel.trigger_typing()
async def on_command_completion(self, ctx):
"""Saves the command usage to database"""
command_name = ctx.command.qualified_name
usage = await self.db.fetchrow(
"""
SELECT usage
FROM usages
WHERE name=$1
""",
command_name,
)
if usage is None:
await self.db.execute(
"""
INSERT INTO usages (usage, name)
VALUES ($1, $2)
""",
1,
command_name,
)
else:
usage = usage["usage"]
usage += 1
await self.db.execute(
"""
UPDATE usages
SET usage = $2
WHERE name = $1;
""",
command_name,
usage,
)
async def on_command(self, ctx):
"""Saves the details about the user of the command
Parameters
----------
ctx (commands.Context): Represents the context in which
a command is being invoked under.
"""
user_id = ctx.author.id
usage = await self.db.fetchrow(
"""
SELECT usage
FROM users
WHERE user_id=$1
""",
user_id,
)
if usage is None:
await self.db.execute(
"""
INSERT INTO users (usage, user_id)
VALUES ($1, $2)
""",
1,
user_id,
)
else:
usage = usage["usage"]
usage += 1
await self.db.execute(
"""
UPDATE users
SET usage = $2
WHERE user_id = $1;
""",
user_id,
usage,
)
async def bot_check(self, ctx):
"""Checks if the user is blocked
Parameters
----------
ctx (commands.Context): the context in which the command was executed in
Raises
-------
BlackListed: error to be catched in the error handler
Returns
-------
bool: if the user can use the command
"""
blocked = await self.db.fetchrow(
"""
SELECT *
FROM blocks
WHERE user_id=$1
""",
ctx.author.id,
)
if blocked is None:
return True
raise BlackListed
class WMBotContext(commands.Context):
"""A subclass of commands.Context."""
@property
def owner(self) -> None:
"""Call to get the owner of the bot."""
return self.bot.get_user(self.bot.config.owner_ids[0])
async def send(self, *args, **kwargs) -> discord.Message:
"""Use this to send a message."""
if kwargs.get("no_reply") is True:
return await self.send(*args, **kwargs)
# Wrapping this in a try/except block because the original message can be deleted.
# and if it is deleted then we won't be able to reply and it will raise an error
try:
# First we try to reply
message = await self.reply(*args, **kwargs)
except discord.NotFound:
# If the original message was deleted, we just send it normally
message = await self.send(*args, **kwargs, no_reply=True)
if not hasattr(self.bot, "command_uses"):
# If for some reason the command_uses is not there, we just add it
self.bot.command_uses = {}
# We add the current message to the command_uses dictionary for tracking
self.bot.command_uses[self.message] = message
# We return the message because this is the intended behaviour of commands.Context.send
return message
async def create_db_pool(bot):
"""Connects to the db and sets it as a variable"""
bot.db = await asyncpg.create_pool(
host=os.environ["host"],
database=os.environ["database"],
user=os.environ["user"],
password=os.environ["password"],
ssl=os.environ["ssl"],
)
# We try to play a sound to let the user know that the bot is online
# If the sound playing fails we just ignore it
try:
playsound("assets/sounds/connected_to_database.mp3", block=False)
except PlaysoundException:
pass
|
[
"63045920+wasi-master@users.noreply.github.com"
] |
63045920+wasi-master@users.noreply.github.com
|
ec148790433155e8f149842682b6f0f1084b7e33
|
124df74bd27893e5d0de7f6ea48f5b2d7ac34c4f
|
/Chapter12/Python/03-diagnose-missing-values-in-python.py
|
048d2500e28511c3c619b54239096bfdd79b2ba8
|
[
"MIT"
] |
permissive
|
Micseb/Extending-Power-BI-with-Python-and-R
|
5dc3cf4051312e4d0e5bc915c17c8bb735e0a39b
|
12791b8f1499f70c9c7b0c9aeb2016d12e41f845
|
refs/heads/main
| 2023-08-24T07:08:43.000627
| 2021-11-01T07:50:24
| 2021-11-01T07:50:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,564
|
py
|
# THIS SCRIPT IS SUPPOSED TO RUN IN A JUPYTER NOTEBOOK (WE USED VS CODE)
# %%
import pandas as pd
import missingno as msno
from upsetplot import UpSet
import matplotlib.pyplot as plt
# %%
def miss_var_summary(data):
n_missing = data.isnull().sum()
percent_missing = data.isnull().sum() * 100 / len(df)
missing_value_df = pd.DataFrame({'variable': data.columns,
'n_miss' : n_missing,
'pct_miss': percent_missing}).sort_values('n_miss', ascending=False)
return(missing_value_df)
def upsetplot_miss(data):
null_cols_df = data.loc[:, data.isnull().any()]
missingness = pd.isna(null_cols_df).rename(columns=lambda x: x+'_NA')
for i, col in enumerate(missingness.columns):
null_cols_df = null_cols_df.set_index(missingness[col], append=i != 0)
tuple_false_values = (False, ) * sum(data.isnull().any())
null_cols_only_miss_df = null_cols_df.loc[null_cols_df.index != tuple_false_values, :]
upset = UpSet(null_cols_only_miss_df, subset_size='count',
show_counts = True, sort_by='cardinality')
return(upset)
# %%
df = pd.read_csv('http://bit.ly/titanic-dataset-csv')
# %%
msno.matrix(df)
# In case you're not using a Jupyter notebook run also the following:
# plt.show()
# %%
miss_var_summary(df)
# %%
plt = upsetplot_miss(df)
plt.plot()
# In case you're not using a Jupyter notebook run the following instead:
# chart = upsetplot_miss(df)
# chart.plot()
# plt.plot = chart.plot
# plt.show()
# %%
|
[
"lucazavarella@outlook.com"
] |
lucazavarella@outlook.com
|
2519e2c62c3f64feb7e828eeb460fca8cec23ba4
|
b5ee8f9645991fd427c583f4823b2f8a6d43c133
|
/Rdefish.py
|
69e1d9aeb87757041f166e1ef2aa5973ef69bdc4
|
[] |
no_license
|
Logaesh/CaptureId_Server
|
d098e06dd0324468016749f723893392d8ec6247
|
1c02c2e78dfcba28e62f26ce5c61bf433d869423
|
refs/heads/master
| 2020-03-28T04:16:15.367058
| 2018-09-06T16:38:18
| 2018-09-06T16:38:18
| 147,704,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
# You should replace these 3 lines with the output in calibration step
import cv2
assert cv2.__version__[0] == '3', 'The fisheye module requires opencv version >= 3.0.0'
import imutils
from number import transform_exe
import numpy as np
import re
import os
import glob
import sys
import time
#**House warming**DIM=(2592, 1944)
#K=np.array([[1296.362306990352, 0.0, 1473.832773002673], [0.0, 1300.4231696636355, 1406.8315980526995], [0.0, 0.0, 1.0]])
#D=np.array([[-0.03424083879891268], [0.032424581500647406], [-0.026989753296597918], [0.006958292392107039]])
#dot world
DIM=(3280, 2464)
K=np.array([[1629.0410564926335, 0.0, 1722.2789176387014], [0.0, 1630.2284745441577, 820.3114158477711], [0.0, 0.0, 1.0]])
D=np.array([[0.004592385780279527], [-0.07056727309628642], [0.10373344044130645], [-0.05913477899531448]])
Raw_image_path = '/home/pi/Project_ocr/IMAGE SERVER FILES/images/fisheye/'
img_destination_path = '/home/pi/CAPTUREiD iMAGES/'
def undistort():
if os.listdir(Raw_image_path):
print("Directory is not empty")
time.sleep(5)
count = 0;
print "defishing starts..."
os.chdir(Raw_image_path)
images = glob.glob('*.jpg')
for fname in images:
print "processing : "+fname
completeName= Raw_image_path+fname
print "Compltnme :"+completeName
img = cv2.imread(completeName)
h,w = img.shape[:2]
map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, DIM, cv2.CV_16SC2)
#fname = "df"+fname
undistorted_img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
count=count+1;
path=img_destination_path+fname
print(path)
cv2.imwrite(path,undistorted_img)
#****************************************************************
g=(stri,confin)=transform_exe(path,fname)
print(g)
if __name__ == '__main__':
while(1):
undistort()
|
[
"logaesh463@gmail.com"
] |
logaesh463@gmail.com
|
8c651b2ddf4b8cadc0a92b94e3d24a0ef5166507
|
b60ef7e69237a2dc2b9b7da89d2435565236255a
|
/store/migrations/0002_auto_20210512_1341.py
|
e97ab2487503d9498f9f40d31d437053c716e84d
|
[] |
no_license
|
Sudhanshu-jena/Eshop
|
c1ddee9c23da97ca46ff050de36274f491e30408
|
b50812046c34a5a1bc12999dac5f0d30decc5849
|
refs/heads/main
| 2023-06-01T15:12:04.071719
| 2021-06-10T16:10:01
| 2021-06-10T16:10:01
| 372,899,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
# Generated by Django 3.2.2 on 2021-05-12 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(upload_to='uploads/products/'),
),
]
|
[
"63591527+Sudhanshu-jena@users.noreply.github.com"
] |
63591527+Sudhanshu-jena@users.noreply.github.com
|
cc6024a350374cdff960105a0796e9522377275c
|
1b78a071c2134beafc265b839ba8acba63142be2
|
/intersight/models/connector_ssh_config_ref.py
|
b2b2e61309c6a25006f62c8ec863fd1c1748cfe8
|
[
"Apache-2.0"
] |
permissive
|
dyoshiha/intersight-python
|
59c2ed3f751726a1d7c0e4254f1203e6546f1d47
|
01d1abcf8a9dcee0fe9150cdec70eb39d76ca290
|
refs/heads/master
| 2020-12-30T07:32:16.452334
| 2020-02-03T21:32:36
| 2020-02-03T21:32:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,720
|
py
|
# coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1295
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ConnectorSshConfigRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'moid': 'str',
'selector': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'moid': 'Moid',
'selector': 'Selector'
}
def __init__(self, object_type=None, moid=None, selector=None):
"""
ConnectorSshConfigRef - a model defined in Swagger
"""
self._object_type = None
self._moid = None
self._selector = None
if object_type is not None:
self.object_type = object_type
if moid is not None:
self.moid = moid
if selector is not None:
self.selector = selector
@property
def object_type(self):
"""
Gets the object_type of this ConnectorSshConfigRef.
The Object Type of the referenced REST resource.
:return: The object_type of this ConnectorSshConfigRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this ConnectorSshConfigRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this ConnectorSshConfigRef.
:type: str
"""
self._object_type = object_type
@property
def moid(self):
"""
Gets the moid of this ConnectorSshConfigRef.
The Moid of the referenced REST resource.
:return: The moid of this ConnectorSshConfigRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this ConnectorSshConfigRef.
The Moid of the referenced REST resource.
:param moid: The moid of this ConnectorSshConfigRef.
:type: str
"""
self._moid = moid
@property
def selector(self):
"""
Gets the selector of this ConnectorSshConfigRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this ConnectorSshConfigRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this ConnectorSshConfigRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this ConnectorSshConfigRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ConnectorSshConfigRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"ucs-build@github.com"
] |
ucs-build@github.com
|
67cf83d02f4bff6eabea184f22477d015d40f62b
|
cd29f7fb82cb312766102fc33bd920d7a0321ebb
|
/manage.py
|
2789e0ddd1e30b5b02faae2d9a9c616e46ce4b4f
|
[] |
no_license
|
vincent7bedard/camp-nextgen
|
5175a2d6dd7acc86bd0ad1615601d3ce0e58b4b6
|
ad8a6a357ab8ab84f03d814c0fd9db32ec0a51bd
|
refs/heads/master
| 2021-10-10T17:25:38.295090
| 2019-01-14T16:31:07
| 2019-01-14T16:31:07
| 117,355,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "campnextgen.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"35401923+vincent7bedard@users.noreply.github.com"
] |
35401923+vincent7bedard@users.noreply.github.com
|
62681087bb51e21c1acf70c7608ba6d104d0c96e
|
9c07d422c7539d6b44b785b2c8c61e077af4f51e
|
/tests/unit/model_selection/test_data_preparation.py
|
c9ffdc8d0d3cc59910a399a9d13aa27180cff0dd
|
[
"MIT"
] |
permissive
|
therhaag/hcrystalball
|
9b82510204c53ff016fdf0155edbfb0d7d1dc2a7
|
666ad093a1353ca152d1490f26362fd089ee3a56
|
refs/heads/master
| 2022-11-12T00:49:05.813119
| 2020-07-03T10:36:44
| 2020-07-03T10:36:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,121
|
py
|
import pytest
import pandas as pd
from pandas.util.testing import assert_frame_equal
from hcrystalball.model_selection import (
partition_data,
partition_data_by_values,
filter_data,
prepare_data_for_training,
)
def test_partition_data(test_data_raw):
n_region = 2
n_plant = 3
n_product = 4
partition_by = ["Region", "Plant", "Product"]
partitions = partition_data(test_data_raw, partition_by)
assert isinstance(partitions, dict)
assert "labels" in partitions
assert isinstance(partitions["labels"], tuple)
assert len(partitions["labels"]) == n_region * n_plant * n_product
assert "data" in partitions
assert isinstance(partitions["data"], tuple)
assert len(partitions["data"]) == n_region * n_plant * n_product
# Note that here it is assumed that the order of nesting is the order of the partition_by columns
ind = 0
for ir in range(n_region):
region_value = "region_" + str(ir)
for ip in range(n_plant):
plant_value = "plant_" + str(ip)
for ipr in range(n_product):
product_value = "product_" + str(ipr)
assert partitions["labels"][ind]["Region"] == region_value
assert partitions["labels"][ind]["Plant"] == plant_value
assert partitions["labels"][ind]["Product"] == product_value
mask = (
(test_data_raw["Region"] == region_value)
& (test_data_raw["Plant"] == plant_value)
& (test_data_raw["Product"] == product_value)
)
df_tmp = test_data_raw.loc[mask, :]
df_tmp.drop(["Region", "Plant", "Product"], axis=1, inplace=True)
assert_frame_equal(df_tmp, partitions["data"][ind])
ind += 1
def test_partition_data_by_values(test_data_raw):
res = partition_data_by_values(
test_data_raw,
column="Plant",
partition_values=["plant_0", "plant_23"],
default_df=pd.DataFrame(
{"Plant": ["dummy"], "Region": ["dummy"], "Product": ["dummy"], "Quantity": [0.0],}
),
)
assert res["labels"][0]["Plant"] == "plant_0"
assert len(res["data"][0]) == 80
assert res["labels"][1]["Plant"] == "plant_23"
assert len(res["data"][1]) == 1
def test_filter_data_include(test_data_raw):
rules = {"Plant": ["plant_0", "plant_1"], "Region": ["region_0"]}
df = filter_data(test_data_raw, include_rules=rules)
assert isinstance(df, pd.DataFrame)
for key, value in rules.items():
assert value == list(df[key].unique())
with pytest.raises(TypeError):
_ = filter_data(test_data_raw, include_rules=[])
def test_filter_data_exclude(test_data_raw):
rules = {"Plant": ["plant_0", "plant_1"], "Region": ["region_0"]}
df = filter_data(test_data_raw, exclude_rules=rules)
assert isinstance(df, pd.DataFrame)
for key, value in rules.items():
filtered_values = list(df[key].unique())
for ival in value:
assert ival not in filtered_values
with pytest.raises(TypeError):
_ = filter_data(test_data_raw, exclude_rules=[])
def test_filter_data_include_and_exclude(test_data_raw):
include_rules = {"Plant": ["plant_0"]}
exclude_rules = {"Region": ["region_0"]}
df = filter_data(test_data_raw, include_rules=include_rules, exclude_rules=exclude_rules)
assert isinstance(df, pd.DataFrame)
for key, value in exclude_rules.items():
filtered_values = list(df[key].unique())
for ival in value:
assert ival not in filtered_values
for key, value in include_rules.items():
filtered_values = list(df[key].unique())
for ival in value:
assert ival in filtered_values
def test_filter_data_include_and_exclude_overlapping_conditions(test_data_raw):
include_rules = {"Plant": ["plant_0", "plant_1"]}
exclude_rules = {"Plant": ["plant_1"], "Region": ["region_0"]}
with pytest.raises(ValueError):
_ = filter_data(test_data_raw, include_rules=include_rules, exclude_rules=exclude_rules)
@pytest.mark.parametrize(
"unprepared_data, prepared_data, target_col_name, partition_columns, expected_error",
[
("", "", "delivery_quantity", ["cem_type"], None),
("", "without_logical_partition", "delivery_quantity", [], None),
],
indirect=["unprepared_data", "prepared_data"],
)
def test_prepare_data_for_training(
unprepared_data, prepared_data, target_col_name, partition_columns, expected_error
):
if expected_error is not None:
with pytest.raises(expected_error):
result = prepare_data_for_training(
unprepared_data, frequency="D", partition_columns=partition_columns
)
else:
result = prepare_data_for_training(
unprepared_data, frequency="D", partition_columns=partition_columns
)
prepared_data = prepared_data.rename({"target": target_col_name}, axis=1)
assert_frame_equal(prepared_data, result, check_like=True)
|
[
"michal.chromcak@heidelbergcement.com"
] |
michal.chromcak@heidelbergcement.com
|
1e51a2ec077d036a2f94ac256cd6abd66c5beceb
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_exocyst/optimized_15802_notags.py
|
3085e9e5f5adceda06f3afd8f41f11354495ad76
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,777
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Sec3_0" not in marker_sets:
s=new_marker_set('Sec3_0')
marker_sets["Sec3_0"]=s
s= marker_sets["Sec3_0"]
mark=s.place_marker((543.746, 619.522, 541.631), (0.21, 0.49, 0.72), 2)
if "Sec3_1" not in marker_sets:
s=new_marker_set('Sec3_1')
marker_sets["Sec3_1"]=s
s= marker_sets["Sec3_1"]
mark=s.place_marker((574.633, 619.107, 534.571), (0.21, 0.49, 0.72), 2)
if "Sec3_2" not in marker_sets:
s=new_marker_set('Sec3_2')
marker_sets["Sec3_2"]=s
s= marker_sets["Sec3_2"]
mark=s.place_marker((600.116, 619.125, 553.323), (0.21, 0.49, 0.72), 2)
if "Sec3_3" not in marker_sets:
s=new_marker_set('Sec3_3')
marker_sets["Sec3_3"]=s
s= marker_sets["Sec3_3"]
mark=s.place_marker((610.26, 603.421, 574.561), (0.21, 0.49, 0.72), 2)
if "Sec3_4" not in marker_sets:
s=new_marker_set('Sec3_4')
marker_sets["Sec3_4"]=s
s= marker_sets["Sec3_4"]
mark=s.place_marker((618.081, 588.788, 597.471), (0.21, 0.49, 0.72), 2)
if "Sec3_5" not in marker_sets:
s=new_marker_set('Sec3_5')
marker_sets["Sec3_5"]=s
s= marker_sets["Sec3_5"]
mark=s.place_marker((621.866, 577.263, 623.012), (0.21, 0.49, 0.72), 2)
if "Sec3_6" not in marker_sets:
s=new_marker_set('Sec3_6')
marker_sets["Sec3_6"]=s
s= marker_sets["Sec3_6"]
mark=s.place_marker((624.04, 566.178, 648.929), (0.21, 0.49, 0.72), 2)
if "Sec5_0" not in marker_sets:
s=new_marker_set('Sec5_0')
marker_sets["Sec5_0"]=s
s= marker_sets["Sec5_0"]
mark=s.place_marker((565.697, 631.103, 565.108), (0.6, 0.31, 0.64), 2)
if "Sec5_1" not in marker_sets:
s=new_marker_set('Sec5_1')
marker_sets["Sec5_1"]=s
s= marker_sets["Sec5_1"]
mark=s.place_marker((586.654, 631.072, 583.873), (0.6, 0.31, 0.64), 2)
if "Sec5_2" not in marker_sets:
s=new_marker_set('Sec5_2')
marker_sets["Sec5_2"]=s
s= marker_sets["Sec5_2"]
mark=s.place_marker((594.13, 612.488, 603.608), (0.6, 0.31, 0.64), 2)
if "Sec5_3" not in marker_sets:
s=new_marker_set('Sec5_3')
marker_sets["Sec5_3"]=s
s= marker_sets["Sec5_3"]
mark=s.place_marker((583.317, 587.234, 609.621), (0.6, 0.31, 0.64), 2)
if "Sec5_4" not in marker_sets:
s=new_marker_set('Sec5_4')
marker_sets["Sec5_4"]=s
s= marker_sets["Sec5_4"]
mark=s.place_marker((561.636, 569.389, 610.934), (0.6, 0.31, 0.64), 2)
if "Sec5_5" not in marker_sets:
s=new_marker_set('Sec5_5')
marker_sets["Sec5_5"]=s
s= marker_sets["Sec5_5"]
mark=s.place_marker((541.045, 550.839, 615.6), (0.6, 0.31, 0.64), 2)
if "Sec6_0" not in marker_sets:
s=new_marker_set('Sec6_0')
marker_sets["Sec6_0"]=s
s= marker_sets["Sec6_0"]
mark=s.place_marker((560.9, 611.641, 595.419), (1, 1, 0.2), 2)
if "Sec6_1" not in marker_sets:
s=new_marker_set('Sec6_1')
marker_sets["Sec6_1"]=s
s= marker_sets["Sec6_1"]
mark=s.place_marker((576.625, 599.171, 567.969), (1, 1, 0.2), 2)
if "Sec6_2" not in marker_sets:
s=new_marker_set('Sec6_2')
marker_sets["Sec6_2"]=s
s= marker_sets["Sec6_2"]
mark=s.place_marker((592.418, 588.62, 539.852), (1, 1, 0.2), 2)
if "Sec6_3" not in marker_sets:
s=new_marker_set('Sec6_3')
marker_sets["Sec6_3"]=s
s= marker_sets["Sec6_3"]
mark=s.place_marker((605.986, 577.966, 510.907), (1, 1, 0.2), 2)
if "Sec6_4" not in marker_sets:
s=new_marker_set('Sec6_4')
marker_sets["Sec6_4"]=s
s= marker_sets["Sec6_4"]
mark=s.place_marker((619.517, 567.197, 482.001), (1, 1, 0.2), 2)
if "Sec6_5" not in marker_sets:
s=new_marker_set('Sec6_5')
marker_sets["Sec6_5"]=s
s= marker_sets["Sec6_5"]
mark=s.place_marker((628.985, 556.766, 451.489), (1, 1, 0.2), 2)
if "Sec8_0" not in marker_sets:
s=new_marker_set('Sec8_0')
marker_sets["Sec8_0"]=s
s= marker_sets["Sec8_0"]
mark=s.place_marker((567.973, 546.159, 582.16), (0.65, 0.34, 0.16), 2)
if "Sec8_1" not in marker_sets:
s=new_marker_set('Sec8_1')
marker_sets["Sec8_1"]=s
s= marker_sets["Sec8_1"]
mark=s.place_marker((585.662, 567.544, 577.756), (0.65, 0.34, 0.16), 2)
if "Sec8_2" not in marker_sets:
s=new_marker_set('Sec8_2')
marker_sets["Sec8_2"]=s
s= marker_sets["Sec8_2"]
mark=s.place_marker((599.804, 551.597, 596.077), (0.65, 0.34, 0.16), 2)
if "Sec8_3" not in marker_sets:
s=new_marker_set('Sec8_3')
marker_sets["Sec8_3"]=s
s= marker_sets["Sec8_3"]
mark=s.place_marker((611.766, 536.491, 616.54), (0.65, 0.34, 0.16), 2)
if "Sec8_4" not in marker_sets:
s=new_marker_set('Sec8_4')
marker_sets["Sec8_4"]=s
s= marker_sets["Sec8_4"]
mark=s.place_marker((632.997, 528.213, 632.981), (0.65, 0.34, 0.16), 2)
if "Sec8_5" not in marker_sets:
s=new_marker_set('Sec8_5')
marker_sets["Sec8_5"]=s
s= marker_sets["Sec8_5"]
mark=s.place_marker((656.568, 527.011, 648.213), (0.65, 0.34, 0.16), 2)
if "Sec10_0" not in marker_sets:
s=new_marker_set('Sec10_0')
marker_sets["Sec10_0"]=s
s= marker_sets["Sec10_0"]
mark=s.place_marker((701.101, 506.558, 493.696), (0.3, 0.69, 0.29), 2)
if "Sec10_1" not in marker_sets:
s=new_marker_set('Sec10_1')
marker_sets["Sec10_1"]=s
s= marker_sets["Sec10_1"]
mark=s.place_marker((676.516, 495.753, 485.448), (0.3, 0.69, 0.29), 2)
if "Sec10_2" not in marker_sets:
s=new_marker_set('Sec10_2')
marker_sets["Sec10_2"]=s
s= marker_sets["Sec10_2"]
mark=s.place_marker((649.527, 497.932, 477.993), (0.3, 0.69, 0.29), 2)
if "Sec10_3" not in marker_sets:
s=new_marker_set('Sec10_3')
marker_sets["Sec10_3"]=s
s= marker_sets["Sec10_3"]
mark=s.place_marker((619.458, 511.278, 481.381), (0.3, 0.69, 0.29), 2)
if "Sec10_4" not in marker_sets:
s=new_marker_set('Sec10_4')
marker_sets["Sec10_4"]=s
s= marker_sets["Sec10_4"]
mark=s.place_marker((600.788, 528.749, 492.931), (0.3, 0.69, 0.29), 2)
if "Sec10_5" not in marker_sets:
s=new_marker_set('Sec10_5')
marker_sets["Sec10_5"]=s
s= marker_sets["Sec10_5"]
mark=s.place_marker((585.416, 547.895, 506.471), (0.3, 0.69, 0.29), 2)
if "Sec15_0" not in marker_sets:
s=new_marker_set('Sec15_0')
marker_sets["Sec15_0"]=s
s= marker_sets["Sec15_0"]
mark=s.place_marker((578.893, 545.344, 539.589), (0.97, 0.51, 0.75), 2)
if "Sec15_1" not in marker_sets:
s=new_marker_set('Sec15_1')
marker_sets["Sec15_1"]=s
s= marker_sets["Sec15_1"]
mark=s.place_marker((576.189, 520.714, 526.223), (0.97, 0.51, 0.75), 2)
if "Sec15_2" not in marker_sets:
s=new_marker_set('Sec15_2')
marker_sets["Sec15_2"]=s
s= marker_sets["Sec15_2"]
mark=s.place_marker((575.769, 495.878, 512.964), (0.97, 0.51, 0.75), 2)
if "Sec15_3" not in marker_sets:
s=new_marker_set('Sec15_3')
marker_sets["Sec15_3"]=s
s= marker_sets["Sec15_3"]
mark=s.place_marker((579.773, 469.843, 503.012), (0.97, 0.51, 0.75), 2)
if "Sec15_4" not in marker_sets:
s=new_marker_set('Sec15_4')
marker_sets["Sec15_4"]=s
s= marker_sets["Sec15_4"]
mark=s.place_marker((588.393, 443.352, 498.919), (0.97, 0.51, 0.75), 2)
if "Sec15_5" not in marker_sets:
s=new_marker_set('Sec15_5')
marker_sets["Sec15_5"]=s
s= marker_sets["Sec15_5"]
mark=s.place_marker((599.395, 417.469, 497.381), (0.97, 0.51, 0.75), 2)
if "Exo70_0" not in marker_sets:
s=new_marker_set('Exo70_0')
marker_sets["Exo70_0"]=s
s= marker_sets["Exo70_0"]
mark=s.place_marker((524.462, 571.472, 557.665), (0.89, 0.1, 0.1), 2)
if "Exo70_1" not in marker_sets:
s=new_marker_set('Exo70_1')
marker_sets["Exo70_1"]=s
s= marker_sets["Exo70_1"]
mark=s.place_marker((527.034, 580.441, 530.875), (0.89, 0.1, 0.1), 2)
if "Exo70_2" not in marker_sets:
s=new_marker_set('Exo70_2')
marker_sets["Exo70_2"]=s
s= marker_sets["Exo70_2"]
mark=s.place_marker((546.138, 577.472, 510.09), (0.89, 0.1, 0.1), 2)
if "Exo70_3" not in marker_sets:
s=new_marker_set('Exo70_3')
marker_sets["Exo70_3"]=s
s= marker_sets["Exo70_3"]
mark=s.place_marker((566.844, 574.091, 490.979), (0.89, 0.1, 0.1), 2)
if "Exo70_4" not in marker_sets:
s=new_marker_set('Exo70_4')
marker_sets["Exo70_4"]=s
s= marker_sets["Exo70_4"]
mark=s.place_marker((587.996, 571.11, 472.306), (0.89, 0.1, 0.1), 2)
if "Exo84_0" not in marker_sets:
s=new_marker_set('Exo84_0')
marker_sets["Exo84_0"]=s
s= marker_sets["Exo84_0"]
mark=s.place_marker((558.842, 589.233, 539.76), (1, 0.5, 0), 2)
if "Exo84_1" not in marker_sets:
s=new_marker_set('Exo84_1')
marker_sets["Exo84_1"]=s
s= marker_sets["Exo84_1"]
mark=s.place_marker((546.5, 552.691, 533.86), (1, 0.5, 0), 2)
if "Exo84_2" not in marker_sets:
s=new_marker_set('Exo84_2')
marker_sets["Exo84_2"]=s
s= marker_sets["Exo84_2"]
mark=s.place_marker((536.28, 515.902, 528.369), (1, 0.5, 0), 2)
if "Exo84_3" not in marker_sets:
s=new_marker_set('Exo84_3')
marker_sets["Exo84_3"]=s
s= marker_sets["Exo84_3"]
mark=s.place_marker((527.614, 485.031, 523.816), (1, 0.5, 0), 2)
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
6fe6f1837a2ae451cc49b713cdfe92c018dcc80e
|
49c56e91ed6a749a3f00b38fc5702d44da430546
|
/4. Lists/Assignment 5 - MaxNum/max_num.py
|
75be8847092128c6a46545542f7e86b06d47dcf8
|
[] |
no_license
|
sushilrajeeva/Python-Infosys
|
32349e30527d3c146fae8e271c94f063562288d2
|
0c9a9b90f368a5e5d846d6856b92250a05f462e1
|
refs/heads/main
| 2023-03-09T13:59:38.261451
| 2021-02-13T12:08:38
| 2021-02-13T12:08:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
def find_max(num1, num2):
max_num=-1
# Write your logic here
if num1 >= num2:
return -1
num_list = []
for i in range(num1,num2+1):
if len(str(i)) == 2:
if (int(str(i)[0])+int(str(i)[1]))%3 == 0:
if i%5 == 0:
num_list.append(i)
max_num = num_list[-1] if len(num_list) > 0 else -1
return max_num
#Provide different values for num1 and num2 and test your program.
max_num=find_max(2,14)
print(max_num)
|
[
"sushilrajeeva@gmail.com"
] |
sushilrajeeva@gmail.com
|
09c30c4ee8d5edcf60ea994e86328c9a777679b2
|
fa9d87827d7f2790360d109163c50ea0521c70a1
|
/dj_forms/urls.py
|
64e0434b98e3729204835a299694d45fb5991da1
|
[] |
no_license
|
rifqirosyidi/django-dynamic-forms
|
9b1e5115a5a0ca7f5f4a54a8494094260a406abe
|
8e4ad1f244e852c11b9c36bcacd3ffddc58647c7
|
refs/heads/master
| 2023-05-05T14:56:30.186239
| 2021-04-12T09:42:11
| 2021-04-12T09:42:11
| 210,821,163
| 1
| 0
| null | 2022-04-22T22:27:46
| 2019-09-25T10:47:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 483
|
py
|
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from core.views import bootstrap_filter_view
urlpatterns = [
path('admin/', admin.site.urls),
path('', bootstrap_filter_view, name='bootstrap_form')
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"rief.rosyidi@gmail.com"
] |
rief.rosyidi@gmail.com
|
a0604eaf903597a9c33564e33bbff940718abe8d
|
ef4f3faafb78609f88bdf9d2959041705a793fde
|
/Pages/Interactions/Interactions_Page.py
|
5bf8c9b6ac18536b8ccfecbd7237de66c170c451
|
[] |
no_license
|
LuckyAndrey/pomTutorial
|
14ff5e7604462503e0c0bcd5c9a6012cba193aee
|
cbf9927a5a41714b035c5e3096fdd7062688837a
|
refs/heads/master
| 2020-03-20T22:56:04.703585
| 2018-06-19T00:32:19
| 2018-06-19T00:32:19
| 137,821,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
from random import randint
class StaticDrop(object):
def __init__(self, driver):
self.driver = driver
def chose_menu_item(self, nameOfmenu):
self.driver.get("http://demo.automationtesting.in/Static.html")
self.driver.find_element_by_link_text("Interactions").click()
self.driver.find_element_by_link_text("Drag and Drop").click()
self.driver.find_element_by_link_text(nameOfmenu).click()
# self.driver.find_element_by_link_text("Static").click()
def verify_drag_items_are(self):
return self.driver.find_element_by_xpath(Locators.drag_items)
def static(self):
pass
class DynamicDrop(object):
def __init__(self, driver):
self.driver = driver
def verify_drag_items_are(self):
return self.driver.find_element_by_xpath(Locators.drag_items)
def check_max_size_of_dynamic_frame(self):
size = self.driver.find_element_by_xpath(Locators.droparea)
def drop_randomn_img(self):
elements = self.driver.find_element_by_xpath(Locators.drag_items)
return elements[randint(0,len(elements))]
class Locators:
url = "http://demo.automationtesting.in/Static.html"
menu_interaction = '//a[@href="Interactions.html"]'
sub_menu_drag_and_drop = ''
# Dynamic ---------------------------
nameOfMenu = 'Dynamic '
droparea ='//div[@id="droparea"]/p/div'
dragarea ='//div[@id="dragarea"]/p/div'
drag_items = '//div[@id="dragarea"]/div'
|
[
"aretivikh@mail.ru"
] |
aretivikh@mail.ru
|
29508ba756fefda65accb4e418dfd2d471a05776
|
a28edab2207fd9f58fe2f12207607bb832a21685
|
/test/qw_PbPb15_HIMB_randq.py
|
75e03501617ef9b08721b0c187d97a27c07e471e
|
[] |
no_license
|
BetterWang/QWQVector
|
4bfae038cf3d0f965af72102dd8b0ec091402b80
|
a7b95886e261d6dd5c2743874a3703bfc7652ef6
|
refs/heads/master
| 2020-12-24T07:37:02.876423
| 2016-06-30T13:35:40
| 2016-06-30T13:35:40
| 55,786,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,703
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("QVector")
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.MessageLogger.cerr.FwkReport.reportEvery = 100
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '75X_dataRun2_v13', '')
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound')
)
#fN = cms.untracked.vstring();
#for line in open('flist').read().splitlines():
# fN.append('file:'+line);
#
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("file:/afs/cern.ch/user/q/qwang/work/cleanroomRun2/Ana/CMSSW_7_5_8_patch2/src/QWAna/QWCumuV3/test/HIMinBias_28.root")
)
#import FWCore.PythonUtilities.LumiList as LumiList
#import FWCore.ParameterSet.Types as CfgTypes
#process.source.lumisToProcess = CfgTypes.untracked(CfgTypes.VLuminosityBlockRange())
#JSONfile = 'Cert_210498-211631_HI_PromptReco_Collisions13_JSON_v2.txt'
#myLumis = LumiList.LumiList(filename = JSONfile).getCMSSWString().split(',')
#process.source.lumisToProcess.extend(myLumis)
#
#
import HLTrigger.HLTfilters.hltHighLevel_cfi
process.hltMB = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone()
process.hltMB.HLTPaths = [
"HLT_HIL1MinimumBiasHF2AND_*",
"HLT_HIL1MinimumBiasHF1AND_*",
]
process.hltMB.andOr = cms.bool(True)
process.hltMB.throw = cms.bool(False)
process.QVector = cms.EDAnalyzer('QWQVector'
, centrality = cms.InputTag("centralityBin", "HFtowers")
, trackTag = cms.untracked.InputTag('hiGeneralTracks')
, vertexSrc = cms.untracked.InputTag('hiSelectedVertex', "")
, pterrorpt = cms.untracked.double(0.1)
, dzdzerror = cms.untracked.double(3.0)
, d0d0error = cms.untracked.double(3.0)
, minvz = cms.untracked.double(-1.0)
, maxvz = cms.untracked.double(15.0)
, minEta = cms.untracked.double(-2.4)
, maxEta = cms.untracked.double(2.4)
, minPt = cms.untracked.double(1.0)
, maxPt = cms.untracked.double(3.0)
, minCent = cms.untracked.int32(-1)
, maxCent = cms.untracked.int32(500)
, epSrc = cms.untracked.InputTag("hiEvtPlane")
# , fweight_ = cms.untracked.InputTag('PbPb_dijet_TT_5TeV_v2.root')
# , bEff_ = cms.untracked.bool(False)
, algoParameters = cms.vint32(4,5,6,7)
, bGen = cms.untracked.bool(False)
, bRandQ = cms.untracked.bool(True)
)
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
QVector = cms.PSet(
initialSeed = cms.untracked.uint32(123456789),
engineName = cms.untracked.string('HepJamesRandom')
)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('qvector.root')
)
process.load("RecoHI.HiCentralityAlgos.CentralityBin_cfi")
process.centralityBin.Centrality = cms.InputTag("hiCentrality")
process.centralityBin.centralityVariable = cms.string("HFtowers")
process.load('HeavyIonsAnalysis.Configuration.collisionEventSelection_cff')
process.clusterCompatibilityFilter.clusterPars = cms.vdouble(0.0,0.006)
process.eventSelection = cms.Sequence(
process.hfCoincFilter3
+ process.primaryVertexFilter
+ process.clusterCompatibilityFilter
)
process.path= cms.Path(process.hltMB*process.eventSelection*process.centralityBin*process.QVector)
process.schedule = cms.Schedule(
process.path,
)
|
[
"BetterWang@gmail.com"
] |
BetterWang@gmail.com
|
1962b63a6c31ec4894edce0ef34ab038c15e6c79
|
84762d3732524bc732bb8f087b57d1d4c941eed1
|
/backgrounds.py
|
9d98bbb0e67870f83a4447fd7570562a056adc28
|
[] |
no_license
|
Hiphopbob/DnD-GUI
|
de31955a8e4dcf9deecb891b6a481589aeb7475e
|
8a45ac0a79b0c232f9a70fe2e327fdeefd9bf955
|
refs/heads/master
| 2020-07-08T06:58:38.147074
| 2019-08-21T14:25:39
| 2019-08-21T14:25:39
| 203,600,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,110
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 17 19:24:05 2019
@author: Steffen 000
"""
import numpy as np
def bg_description(bg_name):
if bg_name == 'Alert1':
return '''
You gain +5 bonus to initiative. \n
You can't be surprised while you are conscious. \n
Other creatures don't gain advantage on attack rolls against you as a
result of being hidden.
'''
elif bg_name == 'Athlete1':
return '''
Increase your Strength or Dexterity by 1, to a maximum of 20.\n
When you are prone, standing up only requires 5 feet of movement. \n
Climbing doesn't cost you any extra movement. \n
You can make a running jump after only 5 feet on foot.
'''
elif bg_name == 'Actor1':
return '''
Increase your Charisma by 1, to a maximum of 20.\n
You have advantage of Charisma (Deception) and Charisma (Performance)
checks to pass yourself off as someone else.\n
You can mimic the speech of another person or the sounds made by other
creatures. You must have heard the speech or sound for at least 1
minute. A successful Wisdom (Insight) check contested by your Charisma
(Deception) check allows a listener to determine the effect is faked.
'''
elif bg_name == 'Blade Master (UA)1':
return '''
You master the shortsword, longsword, scimitar, rapier, and greatsword.
You gain the following benefits when using any of them:\n
You gain a +1 bonus to attack rolls you make with the weapon.\n
On your turn, you can use your reaction to assume a parrying stance,
provided you have the weapon in hand. Doing so grants you a +1 bonus
to your AC until the start of your next turn or until you’re not
holding the weapon.\n
When you make an opportunity attack with the weapon, you have advantage
on the attack roll.
'''
else:
return '''
This feat has not been added yet
'''
bg_list = ['Alert1','Athlete1','Actor1','Blade Master (UA)1']
bg_N=len(bg_list)
def test_bg():
for i in xrange(len(bg_list)):
print bg_description(bg_list[i])
return
#test_bg()
|
[
"noreply@github.com"
] |
Hiphopbob.noreply@github.com
|
0e8f51d09fa9a99cc12bfc76c2e268f90f67dbbe
|
000090bbdb8afb26098dc86420ae66d2dd915baa
|
/test/scanner_files/wl_events.py
|
61278fbc94255398859c91151951adecf57d0be6
|
[
"Apache-2.0"
] |
permissive
|
flacjacket/pywayland
|
7b25f9730a16f3ebf79e84f3a68b9cd18fbca88d
|
5d1fafc1935cc489b68faac544404c65b9449787
|
refs/heads/main
| 2023-08-07T17:11:52.211565
| 2023-07-26T15:28:46
| 2023-07-26T15:28:46
| 33,219,523
| 71
| 18
|
Apache-2.0
| 2022-12-20T03:02:43
| 2015-04-01T01:26:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,280
|
py
|
# This file has been autogenerated by the pywayland scanner
# Copyright 2015 Sean Vig
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from pywayland.protocol_core import (
Argument,
ArgumentType,
Global,
Interface,
Proxy,
Resource,
)
from .wl_core import WlCore
from .wl_requests import WlRequests
class WlEvents(Interface):
"""Events object
The interface object with the different types of events.
"""
name = "wl_events"
version = 2
class WlEventsProxy(Proxy[WlEvents]):
interface = WlEvents
class WlEventsResource(Resource):
interface = WlEvents
@WlEvents.event(
Argument(ArgumentType.NewId, interface=WlRequests),
Argument(ArgumentType.Int),
Argument(ArgumentType.Uint),
Argument(ArgumentType.FileDescriptor),
)
def send_event(self, id: WlRequests, the_int: int, the_uint: int, the_fd: int) -> None:
"""Send the data
Request for data from the client. Send the data as the specified mime
type over the passed file descriptor, then close it.
:param id:
:type id:
:class:`~pywayland.protocol.scanner_test.WlRequests`
:param the_int:
:type the_int:
`ArgumentType.Int`
:param the_uint:
the arg summary
:type the_uint:
`ArgumentType.Uint`
:param the_fd:
:type the_fd:
`ArgumentType.FileDescriptor`
"""
self._post_event(0, id, the_int, the_uint, the_fd)
@WlEvents.event()
def no_args(self) -> None:
"""Event with no args
An event method that does not have any arguments.
"""
self._post_event(1)
@WlEvents.event(
Argument(ArgumentType.NewId, interface=WlCore),
)
def create_id(self, id: WlCore) -> None:
"""Create an id
With a description
:param id:
:type id:
:class:`~pywayland.protocol.scanner_test.WlCore`
"""
self._post_event(2, id)
@WlEvents.event(
Argument(ArgumentType.NewId, interface=WlCore),
)
def create_id2(self, id: WlCore) -> None:
"""Create an id without a description
:param id:
:type id:
:class:`~pywayland.protocol.scanner_test.WlCore`
"""
self._post_event(3, id)
@WlEvents.event(
Argument(ArgumentType.String, nullable=True),
)
def allow_null_event(self, null_string: str | None) -> None:
"""A event with an allowed null argument
An event where one of the arguments is allowed to be null.
:param null_string:
:type null_string:
`ArgumentType.String` or `None`
"""
self._post_event(4, null_string)
@WlEvents.event(
Argument(ArgumentType.NewId, interface=WlRequests),
Argument(ArgumentType.Object, interface=WlCore, nullable=True),
)
def make_import(self, id: WlRequests, object: WlCore | None) -> None:
"""Event that causes an import
An event method that causes an imoprt of other interfaces
:param id:
:type id:
:class:`~pywayland.protocol.scanner_test.WlRequests`
:param object:
:type object:
:class:`~pywayland.protocol.scanner_test.WlCore` or `None`
"""
self._post_event(5, id, object)
@WlEvents.event(version=2)
def versioned(self) -> None:
"""A versioned event
An event that is versioned.
"""
self._post_event(6)
class WlEventsGlobal(Global):
interface = WlEvents
WlEvents._gen_c()
WlEvents.proxy_class = WlEventsProxy
WlEvents.resource_class = WlEventsResource
WlEvents.global_class = WlEventsGlobal
|
[
"sean.v.775@gmail.com"
] |
sean.v.775@gmail.com
|
cf5075cf714f5e01484506f7b558a2f019d88a01
|
bfd9b16aa55e4887ba574438870593106f74ada6
|
/keras_retinanet/bin/trainHCC.py
|
16971128c989c08d7ae3e7d2c4b62ccb4ab578ec
|
[
"Apache-2.0"
] |
permissive
|
jmj23/keras-retinanet
|
e11f75a90b95b2af11cb22afe2ac1e5861e48c29
|
33b5b0986dee0d0280847fb60b29cb401d596ae8
|
refs/heads/master
| 2020-07-01T05:53:56.950382
| 2019-08-12T15:14:31
| 2019-08-12T15:14:31
| 201,066,844
| 0
| 0
| null | 2019-08-07T14:28:24
| 2019-08-07T14:28:23
| null |
UTF-8
|
Python
| false
| false
| 14,490
|
py
|
#!/usr/bin/env python
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import warnings
from types import SimpleNamespace
from .CalculateStats import CalculateStatsOnFold
import keras
import keras.preprocessing.image
import tensorflow as tf
from glob import glob
# Allow relative imports when being executed as script.
if __name__ == "__main__" and __package__ is None or __package__ is '':
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import keras_retinanet.bin # noqa: F401
__package__ = "keras_retinanet.bin"
# Change these to absolute imports if you copy this script outside the keras_retinanet package.
from .. import layers # noqa: F401
from .. import losses
from .. import models
from ..callbacks import RedirectModel
from ..callbacks.eval import Evaluate
from ..models.retinanet import retinanet_bbox
from ..preprocessing.csv_generator_hcc import CSVGenerator
from ..utils.anchors import make_shapes_callback
from ..utils.config import read_config_file, parse_anchor_parameters
from ..utils.keras_version import check_keras_version
from ..utils.model import freeze as freeze_model
from ..utils.transform import random_transform_generator
from ..utils.image import random_visual_effect_generator
def makedirs(path):
# Intended behavior: try to create the directory,
# pass if the directory exists already, fails otherwise.
# Meant for Python 2.7/3.n compatibility.
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def RenameWeights(directory,new_file_name):
# Rename most recent (best) weights
h5files = glob(os.path.join(directory,'*.h5'))
load_file = max(h5files, key=os.path.getctime)
os.rename(load_file, new_file_name)
print('Renamed weights file {} to {}'.format(
load_file, new_file_name))
def get_session():
""" Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=no-member
return tf.Session(config=config)
def model_with_weights(model, weights, skip_mismatch):
""" Load weights for model.
Args
model : The model to load weights for.
weights : The weights to load.
skip_mismatch : If True, skips layers whose shape of weights doesn't match with the model.
"""
if weights is not None:
model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)
return model
def create_models(backbone_retinanet, num_classes, weights, multi_gpu=0,
freeze_backbone=False, lr=1e-5, config=None):
""" Creates three models (model, training_model, prediction_model).
Args
backbone_retinanet : A function to call to create a retinanet model with a given backbone.
num_classes : The number of classes to train.
weights : The weights to load into the model.
multi_gpu : The number of GPUs to use for training.
freeze_backbone : If True, disables learning for the backbone.
config : Config parameters, None indicates the default configuration.
Returns
model : The base model. This is also the model that is saved in snapshots.
training_model : The training model. If multi_gpu=0, this is identical to model.
prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).
"""
modifier = freeze_model if freeze_backbone else None
# load anchor parameters, or pass None (so that defaults will be used)
anchor_params = None
num_anchors = None
if config and 'anchor_parameters' in config:
anchor_params = parse_anchor_parameters(config)
num_anchors = anchor_params.num_anchors()
# Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
# optionally wrap in a parallel model
if multi_gpu > 1:
from keras.utils import multi_gpu_model
with tf.device('/cpu:0'):
model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)
training_model = multi_gpu_model(model, gpus=multi_gpu)
else:
model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)
training_model = model
# make prediction model
prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)
# compile model
training_model.compile(
loss={
'regression' : losses.smooth_l1(),
'classification': losses.focal()
},
optimizer=keras.optimizers.adam(lr=lr, clipnorm=0.001)
)
return model, training_model, prediction_model
def create_callbacks(model, training_model, prediction_model, validation_generator, args):
""" Creates the callbacks to use during training.
Args
model: The base model.
training_model: The model that is used for training.
prediction_model: The model that should be used for validation.
validation_generator: The generator for creating validation data.
args: parseargs args object.
Returns:
A list of callbacks used for training.
"""
callbacks = []
tensorboard_callback = None
if args.tensorboard_dir:
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir = args.tensorboard_dir,
histogram_freq = 0,
batch_size = args.batch_size,
write_graph = True,
write_grads = False,
write_images = False,
embeddings_freq = 0,
embeddings_layer_names = None,
embeddings_metadata = None
)
callbacks.append(tensorboard_callback)
if args.evaluation and validation_generator:
if args.dataset_type == 'coco':
from ..callbacks.coco import CocoEval
# use prediction model for evaluation
evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)
else:
evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback, weighted_average=args.weighted_average)
evaluation = RedirectModel(evaluation, prediction_model)
callbacks.append(evaluation)
# save the model
if args.snapshots:
# ensure directory created first; otherwise h5py will error after epoch.
makedirs(args.snapshot_path)
checkpoint = keras.callbacks.ModelCheckpoint(
os.path.join(
args.snapshot_path,
'{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)
),
verbose=1,
save_best_only=True,
# monitor="mAP",
# mode='max'
)
checkpoint = RedirectModel(checkpoint, model)
callbacks.append(checkpoint)
callbacks.append(keras.callbacks.ReduceLROnPlateau(
monitor = 'loss',
factor = 0.1,
patience = 2,
verbose = 1,
mode = 'auto',
min_delta = 0.0001,
cooldown = 0,
min_lr = 0
))
return callbacks
def create_generators(args, preprocess_image):
""" Create generators for training and validation.
Args
args : parseargs object containing configuration for generators.
preprocess_image : Function that preprocesses an image for the network.
"""
common_args = {
'batch_size' : args.batch_size,
'config' : args.config,
'image_min_side' : args.image_min_side,
'image_max_side' : args.image_max_side,
'preprocess_image' : preprocess_image,
}
# create random transform generator for augmenting training data
if args.random_transform:
transform_generator = random_transform_generator(
min_rotation=-0.1,
max_rotation=0.1,
min_translation=(-0.1, -0.1),
max_translation=(0.1, 0.1),
min_shear=-0.1,
max_shear=0.1,
min_scaling=(0.9, 0.9),
max_scaling=(1.1, 1.1),
flip_x_chance=0.5,
flip_y_chance=0.5,
)
visual_effect_generator = random_visual_effect_generator(
contrast_range=(0.9, 1.1),
brightness_range=(-.1, .1),
hue_range=(-0.05, 0.05),
saturation_range=(0.95, 1.05)
)
else:
transform_generator = random_transform_generator(flip_x_chance=0.5)
visual_effect_generator = None
if args.dataset_type == 'csv':
train_generator = CSVGenerator(
args.annotations,
args.classes,
seqs=args.sequences,
transform_generator=transform_generator,
visual_effect_generator=visual_effect_generator,
**common_args
)
if args.val_annotations:
validation_generator = CSVGenerator(
args.val_annotations,
args.classes,
seqs = args.sequences,
shuffle_groups=False,
**common_args
)
else:
validation_generator = None
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return train_generator, validation_generator
def main(args=None):
# create object that stores backbone information
backbone = models.backbone(args.backbone)
# make sure keras is the minimum required version
check_keras_version()
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
keras.backend.tensorflow_backend.set_session(get_session())
# optionally load config parameters
if args.config:
args.config = read_config_file(args.config)
# create the generators
train_generator, validation_generator = create_generators(args, backbone.preprocess_image)
# create the model
if args.snapshot is not None:
print('Loading model, this may take a second...')
model = models.load_model(args.snapshot, backbone_name=args.backbone)
training_model = model
anchor_params = None
if args.config and 'anchor_parameters' in args.config:
anchor_params = parse_anchor_parameters(args.config)
prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)
else:
weights = args.weights
# default to imagenet if nothing else is specified
if weights is None and args.imagenet_weights:
weights = backbone.download_imagenet()
print('Creating model, this may take a second...')
model, training_model, prediction_model = create_models(
backbone_retinanet=backbone.retinanet,
num_classes=train_generator.num_classes(),
weights=weights,
multi_gpu=args.multi_gpu,
freeze_backbone=args.freeze_backbone,
lr=args.lr,
config=args.config
)
# print model summary
# print(model.summary())
# this lets the generator compute backbone layer shapes using the actual backbone model
if 'vgg' in args.backbone or 'densenet' in args.backbone:
train_generator.compute_shapes = make_shapes_callback(model)
if validation_generator:
validation_generator.compute_shapes = train_generator.compute_shapes
# create the callbacks
callbacks = create_callbacks(
model,
training_model,
prediction_model,
validation_generator,
args,
)
if not args.compute_val_loss:
validation_generator = None
# start training
return training_model.fit_generator(
generator=train_generator,
steps_per_epoch=args.steps,
epochs=args.epochs,
verbose=args.verbose,
callbacks=callbacks,
workers=args.workers,
use_multiprocessing=args.multiprocessing,
max_queue_size=args.max_queue_size,
validation_data=validation_generator
)
if __name__ == '__main__':
seqs = ['Inp','Out','T2f','T1p','T1a','T1v','T1d','Dw1','Dw2']
args = SimpleNamespace()
args.dataset_type = 'csv'
args.backbone = 'resnet50'
args.classes = 'C:\\Users\\jmj136.UWHIS\\Documents\\keras-retinanet\\keras_retinanet\\bin\\class_mapping.csv'
args.snapshot = None
args.snapshots = True
args.weights = None
args.imagenet_weights = True
args.freeze_backbone = False
args.sequences = seqs[3:6]
args.numSeq = len(args.sequences)
args.image_min_side = 384
args.image_max_side = 384
args.batch_size = 2
args.lr = 1e-5
args.epochs = 50
args.steps = 1000
args.compute_val_loss = True
args.config = None
args.verbose = 1
args.multiprocessing = False
args.workers = 1
args.max_queue_size = 1
args.evaluation = True
args.weighted_average = False
args.random_transform = True
args.gpu = None
args.multi_gpu = False
args.tensorboard_dir = './tensorboard_logs'
for fold in range(5):
args.annotations ='C:\\Users\\jmj136.UWHIS\\Documents\\keras-retinanet\\keras_retinanet\\hcc_retinadata_train_fold{}.csv'.format(fold)
args.val_annotations = 'C:\\Users\\jmj136.UWHIS\\Documents\\keras-retinanet\\keras_retinanet\\hcc_retinadata_val_fold{}.csv'.format(fold)
args.snapshot_path = './snapshots_fold{}'.format(fold)
main(args)
RenameWeights(args.snapshot_path,os.path.join(args.snapshot_path,'best_weights.h5'))
|
[
"johnsonj118@gmail.com"
] |
johnsonj118@gmail.com
|
431f0362eda816928191d97d21a5e2ccfd75a80a
|
db9cf192173986e79f8a52934d9f7694de7e8b5f
|
/Alfred.alfredpreferences/workflows/user.workflow.0B78589C-6586-4A25-9118-FAB641EDB920/list_sources.py
|
317718c364f6d806f1bb5df183b2fdf50317af3e
|
[] |
no_license
|
dcerniglia/dotfiles
|
01bdf5f3cb5f5cb7aa5921fa9c9d5abe8243d5af
|
8c4e3a2eda06e9c8f9212cc44ef5bb092822b436
|
refs/heads/master
| 2023-04-17T22:50:53.183402
| 2021-04-28T14:19:46
| 2021-04-28T14:19:46
| 294,868,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
#!/usr/bin/env python
from sys import stdout
from json import dumps
from SwitchAudioSource import get_sources
output_items = filter(lambda source: source.output, get_sources())
items = map(lambda source: source.__dict__, output_items)
json_output = dumps({
"items": items
})
stdout.write(json_output)
|
[
"dcerniglia@gmail.com"
] |
dcerniglia@gmail.com
|
930c7dd124dcecefff3b9d3d5928936668d26b18
|
46af3499de3cf50bd21719fd994696fa35c10943
|
/audio/tasks.py
|
263188abf2491550e235560ab69d61f0ac9e6e57
|
[] |
no_license
|
bubiche/audio-sharing
|
188acd371119ed2b64e6cacacebe33a4ffd16e8e
|
549d21d673153e82dd354d3d2ae4b483c5993391
|
refs/heads/master
| 2023-08-13T07:16:35.895382
| 2023-05-03T02:17:46
| 2023-05-03T02:17:46
| 169,017,977
| 0
| 0
| null | 2023-07-25T21:40:20
| 2019-02-04T02:20:23
|
Python
|
UTF-8
|
Python
| false
| false
| 110
|
py
|
from celery import shared_task
@shared_task
def test_background_task():
print('Async worker is alive!')
|
[
"npnguyen@apcs.vn"
] |
npnguyen@apcs.vn
|
d5b84ed498afae38c1ebb8e4980c5f349f91daa8
|
96c7720bba82942269eb946c49f02a15ea857f54
|
/Alice_In_Wonderland/theHiddenWord.py
|
c460b22f3299593c2a03c6f9ce266a81e1830074
|
[] |
no_license
|
RandBetween/checkio
|
147257574a759fe94702165da5992c0929b3bb2d
|
7a8854cb8a120d252100b101da24d5cafcc78eff
|
refs/heads/master
| 2021-01-13T13:56:25.381571
| 2016-11-06T19:08:26
| 2016-11-06T19:08:26
| 72,950,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,764
|
py
|
def checkio(text, word):
text = text.lower()
text = text.replace(" ", "")
y = text.splitlines()
y_rows = len(y)
horizontal_search = "".join(y)
y2 = flip_text(y)
y2_rows = len(y2)
vertical_search = "".join(y2)
try:
position = horizontal_search.index(word)
row_cutoffs = []
z = 0
for i in range(y_rows):
row_cutoffs.append(len(y[i])+z)
z += len(y[i])
first_row = 0
second_row = 0
second_col = 0
for k in range(len(row_cutoffs)):
if position < row_cutoffs[k]:
first_row = k
break
first_col = y[first_row].find(word)
if first_col + len(word) <= len(y[first_row]):
second_row = first_row
second_col = first_col + len(word)
else:
second_row = first_row + 1
second_col = len(word) - (len(y[first_row]) - first_col)
return [first_row + 1, first_col + 1, second_row + 1, second_col]
except ValueError:
position = vertical_search.index(word)
row_cutoffs = []
z = 0
for i in range(y2_rows):
row_cutoffs.append(len(y2[i])+z)
z += len(y2[i])
first_row = 0
second_row = 0
second_col = 0
for k in range(len(row_cutoffs)):
if position < row_cutoffs[k]:
first_row = k
break
first_col = y2[first_row].find(word)
if first_col + len(word) <= len(y2[first_row]):
second_row = first_row
second_col = first_col + len(word)
else:
second_row = first_row + 1
second_col = len(word) - (len(y2[first_row]) - first_col)
return [first_col + 1, first_row + 1, second_col, second_row + 1]
def flip_text(text):
new_text = []
cols = len(max(text))
rows = len(text)
for i in range(cols):
temp_row = []
for j in range(rows):
try:
temp_row.append(text[j][i])
except IndexError:
temp_row.append(" ")
new_text.append("".join(temp_row))
return new_text
|
[
"c09joe.mercurio@gmail.com"
] |
c09joe.mercurio@gmail.com
|
6c0caebaae3a59cdc30d8020023d8a3eabf7078d
|
bf1808d017c966d22e3f3373ad649f4616327190
|
/corp/scotscorr/scot_to_vrt.py
|
9b90208c03782fe08dca58d1a4325ac274a7d4d9
|
[] |
no_license
|
CSCfi/Kielipankki-utilities
|
911b2463776fa8fb487309310d8ff815980509c9
|
ac54d9e3340cd6f2293bf9d94b0f8737dc0a8adb
|
refs/heads/master
| 2023-08-21T20:44:43.582318
| 2023-08-15T12:41:45
| 2023-08-15T12:41:45
| 237,216,605
| 7
| 1
| null | 2023-03-08T09:09:35
| 2020-01-30T13:15:48
|
Python
|
UTF-8
|
Python
| false
| false
| 12,830
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import re
import sys
"""
asahala/finclarin
pyytton skripti.py < infile.html > outfile.vrt
the input file should be .docx converted into .html
"""
ignore = r'\/|P|HTML|META|STYLE|BODY|FONT|TITLE|SPAN|A|!'
regions = {'Moray': 'North', 'Invernessshire': 'North', 'Sutherland': 'North', 'Ross': 'North',
'Aberdeenshire': 'North-East', 'Angus': 'North-East', 'Perthshire': 'Central', 'Lanarkshire': 'Central',
'Fife': 'South-East', 'East Lothian': 'South-East', 'Lothian': 'South-East',
'Stirlingshire': 'South-East', 'Border': 'South-East',
'Argyllshire': 'South-West', 'Ayrshire': 'South-West', 'South-West': 'South-West',
'unspecified': 'Unlocalised', 'unlocalised': 'Unlocalised',
'Court': 'Court', 'Professional': 'Professional',
'Borders': 'South-East', 'Home': 'Home'}
def fix(line):
line = line.replace('(sor', 'sor')
line = line.replace('(which', '( which')
line = line.replace('(thankes', '( thankes')
line = line.replace('!xiij', 'xiij')
line = line.replace(',?', ',{<?}')
line = line.replace(',whither', ', whither')
line = line.replace(':Glascuen', ': Glascuen')
line = line.replace(' ;? ', ' ;{<?} ')
line = line.replace(';france', 'france')
line = line.replace('affect:', 'affect :')
line = line.replace('also,', 'also ,')
line = line.replace('Ap:', 'Ap :')
line = line.replace('you,', 'you ,')
line = line.replace('},', '} ,')
line = line.replace('};', '} ;')
line = line.replace('{space}', ' {space} ')
line = line.replace('>)', '>}')
line = line.replace('space vertically ', 'space vertically} ')
line = line.replace('}{', '} {')
line = line.replace('{grace}', '{=grace}')
line = re.sub('\{([-\.\?])\}', r'\1', line)
line = line.replace('{\\}', '\\')
line = line.replace('{b}', '(b) {<possibly added later}')
line = line.replace('{hime}', '{ins}')
line = line.replace('{trouble', '{=trouble')
line = line.replace('{correcting another', '{<correcting another')
line = re.sub('{<([a-zA-Z])>', r'{<<\1>', line)
line = re.sub('‘(.)’', r'<\1>', line)
line = line.replace('{y correcting e}', '{<<y> correcting <e>}')
line = line.replace('{u correcting y}', '{<<u> correcting <y>}')
line = line.replace('{i probably correcting j}', '{<<i> probably correcting <j>}')
line = line.replace('{a correcting i}', '{<<a> correcting <i>}')
line = line.replace('{z replacing', '{<<z> replacing')
line = line.replace('{v correcting o?}', '{<<v> possibly correcting <o>}')
line = line.replace('{the second', '{<the second')
line = line.replace('{e correcting is}', '{<<e> correcting <is>}')
line = line.replace('{first', '{<the first')
line = line.replace('{O correcting', '{<<O> correcting')
line = line.replace('{l correcting r}', '{<<l> corecting <r>}')
line = line.replace('{o correcting d}', '{<<o> correcting <d>}')
line = re.sub('\{< ', '\{<', line)
line = re.sub(' >\}; ', '>\}', line)
line = line.replace("(end}", "{end}")
line = line.replace("[<reduced}", "{<reduced}")
line = line.replace("[ins}", "{ins}")
line = line.replace("[space}", "{space}")
line = line.replace('except ofr', 'except for')
line = line.replace('exept for', 'except for')
line = line.replace('corecting', 'correcting')
line = line.replace('damamged', 'damaged')
line = line.replace('wirds danaged', 'words damaged')
line = line.replace('preceiding', 'preceding')
line = line.replace('superfluos', 'superfluous')
line = line.replace('verticlaly', 'vertically')
line = line.replace("{=AGAIN}", "{=again}")
line = line.replace("{=AGO}", "{=ago}")
line = line.replace("{=AN}", "{=an}")
line = line.replace("{=BECAUSE}", "{=because}")
line = line.replace("{=BEFORE}", "{=before}")
line = line.replace("{=BYGONES}", "{=bygones}")
line = line.replace("{=CAN NOT}", "{=can not}")
line = line.replace("{=HAVE}", "{=have}")
line = line.replace("{=INCREDULOUS}", "{=increndulous}")
line = line.replace("{=MAY}", "{=may}")
line = line.replace("{=NOTE}", "{=note}")
line = line.replace("{=THE}", "{=the}")
line = line.replace("{=UN-}", "{=un-}")
line = line.replace("{=UNANSWERED}", "{=unanswered}")
line = line.replace("{address>}", "{address>} ")
line = line.replace("{centred>}", "{centred>} ")
line = line.replace("{f2}", " {f2} ")
line = line.replace("{adjacent<}", "{<adjacent}")
line = line.replace("{>adjacent}", "{adjacent>}")
line = line.replace("{<a?s?> cancelled}", "{<a?s?> cancelled}")
line = line.replace("{hand 1> ", "{hand 1>} ")
line = line.replace("{del\ an", "{del} an")
line = line.replace("{ins]", "{ins}")
line = line.replace("{hand 1>}}", "{hand 1>}")
line = line.replace("tyme }", "tyme )")
return line
def read_file(text):
f = sys.stdin.readlines()
for line in f:
if not line.endswith('</P>\n') and line != '\n':
text += re.sub('\n', ' ', line)
else:
if line != '\n':
text += line
# Make formatted HTML compatible with the old script
text = re.sub('<({tags}).+?>'.format(tags=ignore), '', text).strip()
text = re.sub('<BR>', ' ', text)
#print(text)
#text = re.sub(' \n', ' ', text.strip())
return ['<p class=MsoNormal>' + fix(y) + '</p>' for y in text.split('\n')]#re.sub('<.+?>', '', text).split('\n')
def clear(string):
string = re.sub('({.*?}|=\\\|”|\*.+?\%|\?|\=|~|“|_)', '', string)
string = re.sub('([A-Za-z])\\\([A-Za-z])', r'\1\2', string)
return string
def clear_attr(string):
string = re.sub('_', ' ', string)
string = re.sub('[{}]', '', string)
string = re.sub('&.*;', '', string)
return string
def parse_letter(letter):
formatted = ''
final = []
temp = []
conv_space = False
# Muunna tagien vÀlit alaviivoiksi
for char in letter:
if char == '{':
conv_space = True
if char == '}':
conv_space = False
if conv_space and char == ' ':
formatted += '_'
else:
formatted += char
split_letter = formatted.split(' ')
return split_letter
def parse_people(people, type_):
from_ = re.sub('.*by (.+?) to.+', r'\1', people)
to = re.sub('.*to (.+?)', r'\1', people)
if type_ == 'from':
return from_.rstrip(',')
else:
return to
def parse_date(date_):
date = date_.split(' ')
if len(date) == 2:
date[0] = date[0][0:4]
if date[1] == '%CO':
date[1] = 'January'
if date[1] == '30':
date[1] == 'January'
date[1] = date[1].replace('-', '/')
date[1] = date[1].split('/')[0]
date.append('01')
else:
pass
if len(date) == 3:
months = {'January':'01',
'February':'02',
'March':'03',
'April':'04',
'Aprill': '04',
'May':'05',
'June':'06',
'July':'07',
'August':'08',
'September':'09',
'October':'10',
'November':'11',
'December':'12',
'unspecified':'01'}
year = date[0][0:4]
month = months[date[1]]
if '-' in date[2]:
date[2] = date[2].split('-')[0]
if '/' in date[2]:
date[2] = date[2].split('/')[0]
if len(date[2]) == 1:
day = '0' + date[2]
else:
day = date[2]
q = year + month + day
return q
else:
if date_ == 'unspecified':
date_ = ''
else:
date_ = date_[0:4] + '0101'
return date_
def rank_gender(meta, type_):
info = '_U'
if type_ == 'addressee':
keys = ['AF', 'AM', 'AR']
else:
keys = ['IF', 'IM', 'IR']
for k in keys:
if meta[k] != 'unspecified':
info = k
types = {'M': 'male', 'F': 'female', 'R': 'royal', 'U': 'unspecified'}
return types[info[1]]
def printfile(x):
'''
meta keys
id = letter id
inf = informant
MS = catalogue number in RNS/NLS
ST =
DA = detailed date
CO = writer and addressee
BI = information about previous editions
IF = informant's gender/rank
AR = addressee's gender/rank
HD1 = type
HD2 = type 2
LC = region of writer, place where letter was written
FN = filename
WC = word count
'''
meta = {'id':'',
'inf':'',
'MS':'unspecified',
'ST':'unspecified',
'DA':'unspecified',
'CO':'unspecified',
'BI':'unspecified',
'IF':'unspecified',
'IM':'unspecified',
'IR':'unspecified',
'AF':'unspecified',
'AM':'unspecified',
'AR':'unspecified',
'HD1':'unspecified',
'HD2':'unspecified',
'LC':'unspecified',
'FN':'unspecified',
'WC':'unspecified',
'fraser': 'unspecified'}
letter = ''
for l in x:
line = re.sub('</?[pP].*?>', '', l.lstrip())
line = line.strip()
if line.startswith('%'):
key = re.sub('%(.+?):.*', r'\1', line)
value = line.split(':')[1]
meta[key] = value.strip()
elif line.startswith('='):
meta['inf'] = line.strip('=')
elif line.startswith('#'):
meta['id'] = line.strip('# ')
elif line.startswith('{Fraser'):
meta['fraser'] = line.strip('{}')
elif line.startswith('+'):
pass
elif len(line) < 70:
pass
else:
letter = line
final = parse_letter(letter)
"""
if meta['HD1'] == 'unspecified' and meta['HD2'] != 'unspecified':
meta['HD1'] = meta['HD2']
"""
if ',' in meta['HD1']:
scripttype = meta['HD1'].split(',')[1].strip()
lettertype = meta['HD1'].split(',')[0].strip()
else:
scripttype = 'information unavailable'
lettertype = 'information unavailable'
if ',' in meta['HD2']:
scripttypetwo = meta['HD2'].split(',')[1].strip()
lettertypetwo = meta['HD2'].split(',')[0].strip()
else:
scripttypetwo = 'information unavailable'
lettertypetwo = 'information unavailable'
print('<text date="{datetitle}" datefrom="{date}" dateto="{date}" from="{from_}" to="{to}" largeregion="{large}" year="{year}" fraser="{fraser}" lettertype="{lettertype}" scripttype="{scripttype}" lettertypetwo="{lettertypetwo}" scripttypetwo="{scripttypetwo}" id="{id_}" bi="{bi}" ms="{ms}" fn="{fn}" srg="{srg}" arg="{arg}" lcinf="{lci}" lclet="{lcl}" wc="{wc}" st="{st}">'.format(year=re.sub('.*(\d\d\d\d).*', r'\1', meta['DA']),
id_=meta['id'],
bi=meta['BI'],
ms=meta['MS'],
fn=meta['FN'],
large=regions[meta['LC'].split(',')[0].strip()],
arg=rank_gender(meta, 'addressee'),
srg=rank_gender(meta, 'sender'),
lci=meta['LC'].split(',')[0].strip(),
lcl=meta['LC'].split(',')[1].strip(),
wc=meta['WC'],
scripttype=scripttype,
lettertype=lettertype,
scripttypetwo=scripttypetwo,
lettertypetwo=lettertypetwo,
st=meta['ST'],
date=parse_date(meta['DA']), datetitle = meta['DA'],
from_=parse_people(meta['CO'], 'from'),
to=parse_people(meta['CO'], 'to'),
fraser=meta['fraser']
))
print('<paragraph>')
print('<sentence>')
state = ''
for x in final:
if re.match('{.+}', x):
x = re.sub('_', ' ', x)
print(x)
print('</sentence>\n</paragraph>\n</text>')
printfile(read_file(''))
|
[
"jyrki.niemi@helsinki.fi"
] |
jyrki.niemi@helsinki.fi
|
ccc635fd5fc5453d6912d30b383cf2b274f3d005
|
23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6
|
/rootfs/usr/lib/pymodules/python2.6/orca/scripts/apps/notify-osd/script.py
|
a3bce8bbb7aa7d53ee4003c63c14a568ae16186d
|
[] |
no_license
|
xinligg/trainmonitor
|
07ed0fa99e54e2857b49ad3435546d13cc0eb17a
|
938a8d8f56dc267fceeb65ef7b867f1cac343923
|
refs/heads/master
| 2021-09-24T15:52:43.195053
| 2018-10-11T07:12:25
| 2018-10-11T07:12:25
| 116,164,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 58
|
py
|
/usr/share/pyshared/orca/scripts/apps/notify-osd/script.py
|
[
"root@xinli.xinli"
] |
root@xinli.xinli
|
6ceec64b34f5c0a9c312b870046ea52c6f0f325a
|
2a7d0e2228994d76e679e25a344a19a5eac8313b
|
/Python/zamka.py
|
ed74ed3893bd6b214def5b8442f199438b83e68c
|
[
"MIT"
] |
permissive
|
JaredLGillespie/OpenKattis
|
ad1dfaa1ff7563401cc273620d0874f66487ca40
|
71d26883cb5b8a4a1d63a072587de5575d7c29af
|
refs/heads/master
| 2020-03-28T15:08:45.477934
| 2018-12-15T02:18:45
| 2018-12-15T02:18:45
| 148,560,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
# https://open.kattis.com/problems/zamka
l = int(input())
d = int(input())
x = int(input())
print(min([i for i in range(l, d + 1) if sum(map(int, str(i))) == x]))
print(max([i for i in range(l, d + 1) if sum(map(int, str(i))) == x]))
|
[
"jaredlgillespie@yahoo.com"
] |
jaredlgillespie@yahoo.com
|
0850e4a010cb942035df138c5b89aea4d5507ffd
|
3ab62ab0e4e166154c810b16c148b1a0b0655050
|
/PuddleWorld3D.py
|
8f86c74855e0b96e0a3b73a15dd43dae5535f6b6
|
[] |
no_license
|
lsqshr/NS2
|
4d59de1e3515b680ebdcf5dbdb601d3723d2b24c
|
fd883e13dcebaf953807e27551b8e612d116ff78
|
refs/heads/master
| 2021-01-20T19:40:12.929083
| 2016-06-08T04:48:41
| 2016-06-08T04:48:41
| 60,575,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,397
|
py
|
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from euclid import *
from itertools import count
from lib.tf_rl.simulation.karpathy_game import GameObject
from utils import decimal_to_ternary
class Wall(object):
def __init__(self, center, rotatevec, width, height):
self.center = center
self.width = width
self.height = height
self.rotatevec = rotatevec
self._genpts()
def _genpts(self):
self._pts = []
basecenter = Point3(0, 0, 0)
basepoints = []
X = np.arange(-self.width/2, self.width/2, 0.02)
Y = np.arange(-self.height/2, self.height/2, 0.02)
# Get rotation matrix
q1 = Quaternion.new_rotate_axis(self.rotatevec.x, Vector3(1, 0, 0))
q2 = Quaternion.new_rotate_axis(self.rotatevec.y, Vector3(0, 1, 0))
q3 = Quaternion.new_rotate_axis(self.rotatevec.z, Vector3(0, 0, 1))
R = q1 * q2 * q3
# Get mesh
pts = [R * Point3(x, y, 0) for x, y in zip(X, Y)]
X, Y = np.meshgrid([p.x + self.center.x for p in pts], [p.y + self.center.y for p in pts])
self.meshx = X
self.meshy = Y
self.meshz = [p.z + self.center.z for p in pts]
def check_collision(self, oldp, newp):
# It now assumes the wall is orthogonal to one axis
if self.rotatevec.x > 0: # Find the axis it is orthogonal to
t = self.center.y
if (oldp.y - t) * (newp.y - t) < 0:
return True
else:
return False
elif self.rotatevec.y > 0:
t = self.center.x
if (oldp.x - t) * (newp.x - t) < 0:
return True
else:
return False
else: # It means it is still parallel to Z plane, only the z axis matters now
t = self.center.z
if (oldp.z - t) * (newp.z - t) < 0:
return True
else:
return False
def draw(self, ax):
ax.plot_wireframe(self.meshx, self.meshy, self.meshz)
class Stalker(GameObject):
def __init__(self, position, speed, angular_speed, walls, settings):
super(Stalker, self).__init__(position, speed, 'stalker', settings)
# Face rotation with Yaw-Pitch-Roll angles
self.num_actions = 27 * 2
phi = np.random.random_sample() * 2 * np.pi;
theta = np.random.random_sample() * 2 * np.pi;
psi = np.random.random_sample() * 2 * np.pi;
self.angles = Vector3(phi, theta, psi)
self.angular_speed = angular_speed; # euclid.Vector3
self.angular_diff = np.pi / 12
self.paddlepower = 0.0
self.walls = walls
def _is_collisions(self, p):
# Check the world boundary
if p.x < 0 or p.x > self.settings['world_size'][0] or\
p.y < 0 or p.y > self.settings['world_size'][1] or\
p.z < 0 or p.z > self.settings['world_size'][2]:
return True
for w in self.walls:
if w.check_collision(self.position, p):
return True
return False
def move(self, dt):
p = self.position + dt * self.speed
if not self._is_collisions(p):
self.position = p # Only update when this move does not collide with anything
def steer(self, action_id):
print('action:%d' % action_id)
# Handle rotation
rotateid = action_id % self.num_actions
rotatecode = decimal_to_ternary(rotateid, 3)
rotatecode = [d-1 for d in rotatecode]
self.angular_speed = Vector3(rotatecode[0] * self.angular_diff, rotatecode[1] * self.angular_diff, rotatecode[2] * self.angular_diff)
# Handle leap
if action_id > 26:
self.paddlepower = self.settings['paddlepower']
else:
self.paddlepower = 0.0
def rotate(self, dt):
self.angles += dt * self.angular_speed
q1 = Quaternion.new_rotate_axis(self.angles.x, Vector3(1, 0, 0))
q2 = Quaternion.new_rotate_axis(self.angles.y, Vector3(0, 1, 0))
q3 = Quaternion.new_rotate_axis(self.angles.z, Vector3(0, 0, 1))
R = q1 * q2 * q3
basevec = Vector3(self.paddlepower, 0, 0)
rotatedvec = R * basevec
print(basevec, rotatedvec)
self.speed = rotatedvec
def step(self, dt):
# self.check_collisions()
self.rotate(dt)
self.move(dt)
print('step with speed %f, %f, %f\tangles %f, %f, %f\tpaddle-power:%f\tX:%f, Y:%f, Z:%f' \
% (self.speed.x, self.speed.y, self.speed.z, self.angles.x, self.angles.y, self.angles.z,\
self.paddlepower, self.position.x, self.position.y, self.position.z))
def draw(self, ax):
p = self.position
s = self.speed * 2
ax.scatter(p.x, p.y, p.z)
ax.plot([p.x, p.x + s.x], [p.y, p.y + s.y],[p.z, p.z + s.z])
class PuddleWorld3D(object):
def __init__(self, settings):
self.settings = settings
self.size = settings['world_size']
self.walls = []
self.walls.append(Wall(Point3(0.7, 0.5, 0.5), Vector3(0, np.pi/2, 0), 0.6, 0.6))
stalkersettings = {'object_radius': 0.01, \
'world_size': settings['world_size'], \
'paddlepower': settings['paddlepower']}
self.stalker = Stalker(Point3(0.5, 0.5, 0.5), Vector3(0, 0, 0), Vector3(0, 0, 0), self.walls, stalkersettings)
self.observation_size = 6 # Without coarse coding
def perform_action(self, action_id):
assert(0 <= action_id < self.stalker.num_actions)
self.stalker.steer(action_id)
def spawn_object(self, obj_type):
self.stalker = Stalker(Point3(0.3, 0.3, 0.3), Vector3(0, 0, 0), Vector3(0, 0, 0),\
"stalker", {'object_radius': 0.01, 'world_size': settings['world_size'], 'paddlepower': settings['paddlepower']})
def step(self, dt):
self.stalker.step(dt)
def observe(self):
observation = np.zeros(self.observation_size)
p = self.stalker.position
s = self.stalker.speed
observation[0:3] = [p.x, p.y, p.z]
observation[3:] = [s.x, s.y, s.z]
return observation
def collect_reward(self):
def draw(self, ax):
for w in self.walls:
w.draw(ax)
self.stalker.draw(ax)
ax.set_xlim3d(0, 1)
ax.set_ylim3d(0, 1)
ax.set_zlim3d(0, 1)
# Tests
def testdrawwall():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
wall = Wall(Point3(0.7, 0.7, 0.8), Vector3(0, np.pi/2, 0), 0.6, 0.6)
wall.draw(ax)
plt.show()
def teststalker():
# Randomly Generate Action ID for stalker
chunks_per_frame = 1
fps = 30.0
chunk_length_s = 1.0 / fps
settings = {'world_size': [1, 1, 1], 'paddlepower': 0.01}
world = PuddleWorld3D(settings)
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for frameidx in count():
for _ in range(chunks_per_frame):
world.step(chunks_per_frame)
new_ob = world.observe()
reward = world.collect_reward()
world.perform_action(np.random.randint(27 * 2 - 1))
plt.cla()
world.draw(ax) # Draw world
plt.draw()
if __name__ == '__main__':
teststalker()
|
[
"lsqshr@gmail.com"
] |
lsqshr@gmail.com
|
e6aab578b33662764874352e24200262490ef1f3
|
438c369db51e74fe9be69a7d3486e1b0276574ab
|
/request.py
|
bd0f2c0b22b7eacd3fdff7c7d876338abada98e7
|
[] |
no_license
|
hsiangfeng/pythonExample
|
3d93a81cc8473ce5dfec8d17d0ad42aa8b14a3ea
|
f195aaf935b5126d7c671fa7c7d0534b1a8fa158
|
refs/heads/main
| 2023-04-01T00:55:48.707083
| 2021-04-12T08:17:38
| 2021-04-12T08:17:38
| 357,092,439
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
import requests
url = 'https://hsiangfeng.github.io/'
response = requests.get(url)
if response.status_code == 200:
fp = open('blog/index.html', 'w')
fp.write(response.text)
fp.close()
else:
print('請求出現錯誤')
|
[
"loveyou520babytw@gmail.com"
] |
loveyou520babytw@gmail.com
|
dec38d35bb18391537f14e3e34debaa3e1baa9aa
|
17df1eb2f067f00f01a399b9b3e25d655100f458
|
/capstone_etl.py
|
9e11dc013847dc160d03dd61967b56b84b0838a7
|
[] |
no_license
|
priyanshigupta/Analysing-US-Immigration-Data-using-AWS-EMR-and-AWS-Athena
|
a7e8fa282e12b7b6654a0e5f53bc279250c40921
|
f3007476b7600271d26881ae9170ba2ce9f52e7e
|
refs/heads/main
| 2023-03-13T02:20:00.882116
| 2021-03-01T13:14:21
| 2021-03-01T13:14:21
| 343,263,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,296
|
py
|
import time
import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek
from pyspark.sql.types import TimestampType, DateType
from pyspark.sql import functions as F
from datetime import datetime, timedelta
from pyspark.sql import types as T
start = time.time()
config = configparser.ConfigParser()
config.read('C:/Users/Priyanshi Gupta/Documents/Data Engineer NanoDegree/Capstone/Code/dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config.get('AWS','AWS_ACCESS_KEY_ID')
os.environ['AWS_SECRET_ACCESS_KEY']=config.get('AWS','AWS_SECRET_ACCESS_KEY')
def create_spark_session():
""" Defining the spark session"""
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_airport_data(spark, input_data, output_data):
""" Processing airport data to create its
corresponding dimension to create table and
write it back to S3 in form of partitioned parquet files"""
#Read data from source
df=spark.read.format('csv').options(header='true').load(input_data)
#creating Airport dimension table
Dim_airport = df.select("ident", "type", "iata_code", "name", "iso_country","iso_region","municipality","gps_code","coordinates")
Dim_airport.createOrReplaceTempView("airport_table")
Dim_airport = spark.sql("""
SELECT DISTINCT ident,
type, iata_code, name, iso_country,iso_region,municipality,gps_code,coordinates
FROM airport_table
""")
Dim_airport.createOrReplaceTempView("Dim_airport_table")
## write Airport dimension table to parquet files partitioned by region into data lake
Dim_airport.write.partitionBy("iso_region").mode('overwrite').parquet(os.path.join(output_data, 'Airport'))
def process_us_demographic_data(spark, input_data, output_data):
""" Processing US Demographic data to create its
corresponding dimension to create table and
write it back to S3 in form of partitioned parquet files"""
#Read data from source
df=spark.read.format('csv').options(inferSchema="true",delimiter=";",header="true").load(input_data)
#creating us-cities-demographics dimension table
Stage_us_demo = df.select("City","State","Median Age","Male Population","Female Population","Total Population","Number of Veterans","Foreign-born","Average Household Size","State Code","Race","Count")
Stage_us_demo.createOrReplaceTempView("Stage_us_demo_table")
Dim_us_demo = spark.sql("""SELECT DISTINCT City,State,`Median Age`as Median_Age
,`Male Population` as Male_Population,`Female Population` as Female_Population,`Total Population` as Total_Population,
`Number of Veterans` as Number_of_Veterans,`Foreign-born` as Foreign_born,`Average Household Size` as Average_Household_Size,
`State Code` as State_Code FROM Stage_us_demo_table """)
Dim_us_demo.createOrReplaceTempView("Dim_us_demo_table")
## write us-cities-demographics dimension table to parquet files partitioned by State into data lake
Dim_us_demo.write.partitionBy("State").mode('overwrite').parquet(os.path.join(output_data, 'US_Demographic'))
#creating us-cities-demographics_by_Race dimension table
Dim_us_demo_by_race = spark.sql("""SELECT DISTINCT City,State,race,count FROM Stage_us_demo_table """)
Dim_us_demo_by_race.createOrReplaceTempView("Dim_us_demo_by_race_table")
## write us-cities-demographics_by_race dimension table to parquet files partitioned by State into data lake
Dim_us_demo_by_race.write.partitionBy("State").mode('overwrite').parquet(os.path.join(output_data, 'US_Demographic_by_Race'))
def process_i94_data(spark, input_data, output_data):
""" Processing Immigration data to create its
corresponding dimension to create table and
write it back to S3 in form of partitioned parquet files"""
#load data from source
df=spark.read.parquet(input_data)
#1) Converting Dates to proper format
def convert_datetime(x):
try:
start = datetime(1960, 1, 1)
return start + timedelta(days=int(x))
except:
return None
udf_datetime_from_sas = udf(lambda x: convert_datetime(x), T.DateType())
df=df.withColumn("arrival_date", udf_datetime_from_sas("arrdate")) \
.withColumn("departure_date", udf_datetime_from_sas("depdate"))
#creating i94 stage table
Stage_i94 = df.select("cicid","i94yr","i94mon","arrdate","depdate","arrival_date","departure_date","i94cit","i94res","i94port","i94mode","i94addr","i94bir","i94visa","visapost","entdepa","entdepd","entdepu","matflag","biryear","gender","airline","fltno","visatype")
Stage_i94=Stage_i94.withColumn("i94yr", Stage_i94['i94yr'].cast('int')).withColumn("i94mon", Stage_i94['i94mon'].cast('int'))\
.withColumn("i94mode", Stage_i94['i94mode'].cast('int')).withColumn("biryear", Stage_i94['biryear'].cast('int'))\
.withColumn("i94cit", Stage_i94['i94cit'].cast('int')).withColumn("i94res", Stage_i94['i94res'].cast('int'))\
.withColumn("arrdate", Stage_i94['arrdate'].cast('int')).withColumn("depdate", Stage_i94['depdate'].cast('int'))
Stage_i94.createOrReplaceTempView("Stage_i94_table")
#creating Time dimension from i94 stage
Dim_time = spark.sql("""SELECT DISTINCT arrdate as dateid, arrival_date as date,cast(i94yr as int) as year,cast(i94mon as int) as month FROM Stage_i94_table
union
SELECT DISTINCT depdate as dateid,departure_date as date,cast(i94yr as int) as year,cast(i94mon as int) as month FROM Stage_i94_table""")
Dim_time=Dim_time.withColumn('day',dayofmonth(Dim_time.date)).withColumn('weekday',date_format(col("date"), "EEEE"))\
.withColumn('weekday',date_format(col("date"), "EEEE")).withColumn("weeek_num", date_format(col("date"), "W"))\
Dim_time.createOrReplaceTempView("Dim_time_table")
## write Time dimension table to parquet files partitioned by State into data lake
Dim_time.write.partitionBy("i94yr").mode('overwrite').parquet(os.path.join(output_data, 'Time'))
#Creating Status dimension from i94 stage
Dim_status = spark.sql("""SELECT DISTINCT entdepa,entdepd,entdepu,matflag FROM Stage_i94_table""")
Dim_status = Dim_status.withColumn("statusid", F.monotonically_increasing_id())
Dim_status.createOrReplaceTempView("Dim_status_table")
## write Status dimension table to parquet files partitioned by State into data lake
Dim_status.write.mode('overwrite').parquet(os.path.join(output_data, 'Status'))
#Creating arrival_port dimension from i94 stage
Dim_arrival_port = spark.sql("""SELECT DISTINCT i94port,i94addr FROM Stage_i94_table""")
Dim_arrival_port = Dim_arrival_port.withColumn("arrival_portid", F.monotonically_increasing_id())
Dim_arrival_port.createOrReplaceTempView("Dim_arrival_port_table")
## write Status dimension table to parquet files partitioned by State into data lake
Dim_arrival_port.write.mode('overwrite').parquet(os.path.join(output_data, 'Arrival_port'))
#Creating Flight_detail dimension from i94 stage
Dim_flight_dtl = spark.sql("""SELECT DISTINCT airline,fltno FROM Stage_i94_table""")
Dim_flight_dtl = Dim_flight_dtl.withColumn("fltid", F.monotonically_increasing_id())
Dim_flight_dtl.createOrReplaceTempView("Dim_flight_dtl_table")
## write Flight dimension table to parquet files partitioned by State into data lake
Dim_flight_dtl.write.mode('overwrite').parquet(os.path.join(output_data, 'Flight'))
#Create Fact from dimension
Fact_i94_table = spark.sql("""
select distinct cicid,statusid,arrival_portid,fltid,src.i94addr as state_code, arrdate, depdate,i94mode,i94visa,visatype
from
(SELECT * from Stage_i94_table )src
LEFT join
(select * from Dim_status_table)stat
ON src.entdepa <=> stat.entdepa AND
src.entdepd <=> stat.entdepd AND
src.entdepu <=> stat.entdepu AND
src.matflag <=> stat.matflag
left join
(select * from Dim_arrival_port_table)arvl
ON src.i94port <=> arvl.i94port
and src.i94addr <=> arvl.i94addr
left join
(select * from Dim_flight_dtl_table)flt
ON src.airline <=> flt.airline
and src.fltno <=> flt.fltno
""")
## write Flight dimension table to parquet files partitioned by State into data lake
Fact_i94_table.write.partitionBy("state_code").mode('overwrite').parquet(os.path.join(output_data, 'i94_data'))
def main():
spark = create_spark_session()
output_data = "s3a://capstone-data-lake/"
#input_data = "/user/Capstone-data/airport-codes_csv.csv"
input_data = "C:/Users/Priyanshi Gupta/Documents/Data Engineer NanoDegree/Capstone/Dataset/airport-codes_csv.csv"
process_airport_data(spark, input_data, output_data)
input_data = "C:/Users/Priyanshi Gupta/Documents/Data Engineer NanoDegree/Capstone/Dataset/us-cities-demographics.csv"
#input_data = "/user/Capstone-data/us-cities-demographics.csv"
process_us_demographic_data(spark, input_data, output_data)
#input_data = "/user/Capstone-data/i94/part-*.parquet"
input_data = "C:/Users/Priyanshi Gupta/Documents/Data Engineer NanoDegree/Capstone/Dataset/i94/part-*.parquet"
process_i94_data(spark, input_data, output_data)
end = time.time()
print(str(end - start)+"Program end time")
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
priyanshigupta.noreply@github.com
|
cf5ee8bf40a239389bbca4a93cf0570748922d21
|
9805caed5b83b1ccd8f76ab82bb0fb3a9ecd414e
|
/Echocardiograph/lib/net/BScanSeg/Vnet8x.py
|
4a1cd89ccb42fc1acb1027616bfdc2c9c627f068
|
[] |
no_license
|
SwanKnightZJP/Echocardiography-Segmentation
|
7f7edaf74fa46165baaa65e8c9e17221b1ff2aa7
|
4ca8c006a10e3fdc21560aa1c4b5f33f695b9395
|
refs/heads/main
| 2023-07-21T14:19:28.801826
| 2021-08-25T08:55:21
| 2021-08-25T08:55:21
| 360,116,647
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,919
|
py
|
"""
BaseLine Vnet:
3D version
dilateConv / defaultConv
input_img:
shape: 112 * 112 * 112
input_tensor:
shape: B 1 112 112 112
torchsummary.test
2 1 112 112 112 -- at single card
================================================================
Total params: 45,600,316
Trainable params: 45,600,316
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 5.36
Forward/backward pass size (MB): 5112.17
Params size (MB): 173.95
Estimated Total Size (MB): 5291.48
----------------------------------------------------------------
linked at net/task/__init__.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
import os
import numpy as np
def passthrough(x, **kwargs):
return x
def ELUCons(elu, nchan):
if elu:
return nn.ELU(inplace=True)
else:
return nn.PReLU(nchan)
# normalization between sub-volumes is necessary
# for good performance
class ContBatchNorm3d(nn.modules.batchnorm._BatchNorm):
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
# super(ContBatchNorm3d, self)._check_input_dim(input) ###############
def forward(self, input):
self._check_input_dim(input)
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
True, self.momentum, self.eps)
class LUConv(nn.Module):
def __init__(self, nchan, elu):
super(LUConv, self).__init__()
self.relu1 = ELUCons(elu, nchan)
self.conv1 = nn.Conv3d(nchan, nchan, kernel_size=5, padding=2)
self.bn1 = ContBatchNorm3d(nchan)
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
return out
def _make_nConv(nchan, depth, elu):
layers = []
for _ in range(depth):
layers.append(LUConv(nchan, elu))
return nn.Sequential(*layers)
class InputTransition(nn.Module):
def __init__(self, outChans, elu):
super(InputTransition, self).__init__()
self.conv1 = nn.Conv3d(1, 8, kernel_size=5, padding=2)
self.bn1 = ContBatchNorm3d(8)
self.relu1 = ELUCons(elu, 8)
def forward(self, x):
# do we want a PRELU here as well?
out = self.bn1(self.conv1(x)) # x from c=1 to c=16
# split input in to 16 channels
x16 = torch.cat((x, x, x, x, x, x, x, x), 1) # repeat 16 times
out = self.relu1(torch.add(out, x16)) # then add channel wised X and 16times x
return out
class DownTransition(nn.Module):
def __init__(self, inChans, nConvs, elu, dropout=False):
super(DownTransition, self).__init__()
outChans = 2*inChans
self.down_conv = nn.Conv3d(inChans, outChans, kernel_size=2, stride=2)
self.bn1 = ContBatchNorm3d(outChans)
self.do1 = passthrough
self.relu1 = ELUCons(elu, outChans)
self.relu2 = ELUCons(elu, outChans)
if dropout:
self.do1 = nn.Dropout3d()
self.ops = _make_nConv(outChans, nConvs, elu)
def forward(self, x):
down = self.relu1(self.bn1(self.down_conv(x)))
out = self.do1(down)
out = self.ops(out)
out = self.relu2(torch.add(out, down))
return out
class UpTransition(nn.Module):
def __init__(self, inChans, outChans, nConvs, elu, dropout=False):
super(UpTransition, self).__init__()
self.up_conv = nn.ConvTranspose3d(inChans, outChans // 2, kernel_size=2, stride=2)
self.bn1 = ContBatchNorm3d(outChans // 2)
self.do1 = passthrough
self.do2 = nn.Dropout3d()
self.relu1 = ELUCons(elu, outChans // 2)
self.relu2 = ELUCons(elu, outChans)
if dropout:
self.do1 = nn.Dropout3d()
self.ops = _make_nConv(outChans, nConvs, elu)
def forward(self, x, skipx):
out = self.do1(x)
skipxdo = self.do2(skipx)
out = self.relu1(self.bn1(self.up_conv(out)))
xcat = torch.cat((out, skipxdo), 1)
out = self.ops(xcat)
out = self.relu2(torch.add(out, xcat))
return out
class OutputTransition(nn.Module):
"""
input: [B 32 112 112 112]
-- conv1(32,2,k=5,p=2) -- [B 2 112 112 112]
-- BN1 -- [ 2 ]
-- conv2(2,2,k=1) -- [B 2 112 112 112]
-- ELU -- [2]
-- permute(0, 2, 3, 4, 1).contiguous() -- [B 112 112 112 2]
-- view -- [B*W*H*Z 2]
-- softmax()
"""
def __init__(self, inChans, elu, nll, out_channel):
super(OutputTransition, self).__init__()
self.conv1 = nn.Conv3d(inChans, out_channel, kernel_size=5, padding=2)
self.bn1 = ContBatchNorm3d(out_channel)
self.conv2 = nn.Conv3d(out_channel, out_channel, kernel_size=1)
self.relu1 = ELUCons(elu, out_channel)
self.out_channel = out_channel
if nll:
self.softmax = nn.LogSoftmax(dim=1)
else:
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
# convolve 32 down to n channels
out = self.relu1(self.bn1(self.conv1(x)))
out = self.conv2(out)
# make channels the last axis
out = out.permute(0, 2, 3, 4, 1).contiguous() # from [B C W H Z] -- [B W H Z C]
# flatten
out = out.view(out.numel() // self.out_channel, self.out_channel) # dimation change from [B C W H Z] -- [ B*W*H*Z, C ]
# out = self.softmax(out)
# treat channel 0 as the predicted output
return out
class OutputTransitionMap(nn.Module):
"""
input: [B 32 112 112 112]
-- conv1(32,n,k=5,p=2) -- [B n 112 112 112]
-- BN1 -- [ n ]
-- conv2(n,n,k=1) -- [B n 112 112 112]
-- ELU -- [ n ]
-- softmax(dim=1)
"""
def __init__(self, inChans, elu, nll, out_channel):
super(OutputTransitionMap, self).__init__()
self.conv1 = nn.Conv3d(inChans, out_channel, kernel_size=5, padding=2)
self.bn1 = ContBatchNorm3d(out_channel)
self.conv2 = nn.Conv3d(out_channel, out_channel, kernel_size=1)
self.relu1 = ELUCons(elu, out_channel)
if nll:
self.softmax = nn.LogSoftmax(dim=1)
else:
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
# convolve 32 down to 3 channels
out = self.relu1(self.bn1(self.conv1(x)))
out = self.conv2(out)
# make channels the last axis
# out = out.permute(0, 2, 3, 4, 1).contiguous()
# flatten
# out = out.view(out.numel() // 2, 2)
out = self.softmax(out)
# treat channel 0 as the predicted output
return out
class VNet(nn.Module):
# the number of convolutions in each layer corresponds
# to what is in the actual prototxt, not the intent
def __init__(self, elu=True, nll=False, out_channel=2):
super(VNet, self).__init__()
self.in_tr = InputTransition(8, elu) # size = input_X
self.down_tr16 = DownTransition(8, 1, elu) # 1 layer in 16 out 32 size=input_X/2
self.down_tr32 = DownTransition(16, 2, elu) # 2 layers in 32 out 64 size=input_X/4
self.down_tr64 = DownTransition(32, 3, elu, dropout=True) # 3 layers in 64 out 128 size=input_X/8
# -------------- warning !!! ---------------------- #
self.down_tr128 = DownTransition(64, 2, elu, dropout=True) # 2 layers in 128 out 256 size=input_X/16
self.up_tr128 = UpTransition(128, 128, 2, elu, dropout=True) # 2 layers in 256 out 256 size=input_X/2
self.up_tr64 = UpTransition(128, 64, 2, elu, dropout=True)
# self.up_tr128 = UpTransition(128, 128, 2, elu, dropout=True)
# -------------- warning !!! ---------------------- #
self.up_tr32 = UpTransition(64, 32, 1, elu)
self.up_tr16 = UpTransition(32, 16, 1, elu)
self.out_tr = OutputTransition(16, elu, nll, out_channel) # out channel = ? num_channel
self.out_map = OutputTransitionMap(16, elu, nll, out_channel) # out channel = ? num_channel
# The network topology as described in the diagram
# in the VNet paper
# def __init__(self):
# super(VNet, self).__init__()
# self.in_tr = InputTransition(16)
# # the number of convolutions in each layer corresponds
# # to what is in the actual prototxt, not the intent
# self.down_tr32 = DownTransition(16, 2)
# self.down_tr64 = DownTransition(32, 3)
# self.down_tr128 = DownTransition(64, 3)
# self.down_tr256 = DownTransition(128, 3)
# self.up_tr256 = UpTransition(256, 3)
# self.up_tr128 = UpTransition(128, 3)
# self.up_tr64 = UpTransition(64, 2)
# self.up_tr32 = UpTransition(32, 1)
# self.out_tr = OutputTransition(16)
def forward(self, x):
out8 = self.in_tr(x)
out16 = self.down_tr16(out8)
out32 = self.down_tr32(out16)
out64 = self.down_tr64(out32)
out128 = self.down_tr128(out64)
# torch.save(out8, 'out8E.pt')
# torch.save(out16, 'out16E.pt')
# torch.save(out32, 'out32E.pt')
# torch.save(out64, 'out64E.pt')
# torch.save(out128, 'out128E.pt')
out = self.up_tr128(out128, out64)
# torch.save(out, 'out128D.pt')
out = self.up_tr64(out, out32)
# torch.save(out64, 'out64D.pt')
out = self.up_tr32(out, out16)
# torch.save(out32, 'out32D.pt')
out = self.up_tr16(out, out8) # [2,32,112,112,112] (B,C,W H Z)
# torch.save(out16, 'out16D.pt')
out02 = self.out_map(out)
# torch.save(out02, 'outFin.pt')
# out01 = self.out_tr(out) # [2, 32->n, 112, 112, 112] --> [2*112*112*112, 2] warning !!!
# [B, n, 112, 112, 112] warning !!!
output = {'map': out02} # shape = {B, n, 112, 112, 112} -- Dice
# outLine = {'compress': out01} # shape = {n, BWHZ}} -- CrossEntropy
# output.update(outLine)
return output
def init(module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.ConvTranspose3d):
nn.init.kaiming_normal_(module.weight, 0.25)
nn.init.constant_(module.bias, 0)
def get_network(cfg, is_train):
net = VNet(elu=cfg.elu, nll=cfg.nll, out_channel=cfg.num_class+1)
net.apply(init)
return net
# --- debug_test --- #
if __name__ == '__main__':
test_net = VNet(elu=True, nll=False, out_channel=2+1)
os.environ['CUDA_VISIBLE_DEVICES'] = '3,2,1,0'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = test_net.to(device)
summary(test_net, (1, 128, 128, 160), batch_size=1) # channel/ depth / width / height
Final = 'the end'
print(Final)
|
[
"mnrsmzy@163.com"
] |
mnrsmzy@163.com
|
84143466f2c2c99f9e7f7b095dd726ff238f09af
|
d86c5aa92a9763510b539776510ad9795d33ae89
|
/September 2020/01-Lists-as-Stacks-and-Queues-Lab/04-Water-Dispenser.py
|
5869230151c9e3db1f63cdafc98081b14b610c02
|
[
"MIT"
] |
permissive
|
eclipse-ib/Software-University-Professional-Advanced-Module
|
42e3bd50ac5f0df8082add29f4113cffb87889e1
|
636385f9e5521840f680644824d725d074b93c9a
|
refs/heads/main
| 2023-02-13T06:02:53.246980
| 2021-01-06T21:12:14
| 2021-01-06T21:12:14
| 306,282,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
from collections import deque
water_quantity = int(input())
names = deque([])
while True:
name = input()
if name == "Start":
break
names.append(name)
while True:
commands = input().split()
if commands[0] == "End":
print(f"{water_quantity} liters left")
break
elif commands[0] == "refill":
added_water = int(commands[1])
water_quantity += added_water
else:
person_liters = int(commands[0])
if int(person_liters) > water_quantity:
person = names[0]
print(f"{person} must wait")
names.popleft()
continue
print(f"{names[0]} got water")
water_quantity -= int(person_liters)
names.popleft()
|
[
"65770519+eclipse-ib@users.noreply.github.com"
] |
65770519+eclipse-ib@users.noreply.github.com
|
71d446e42f1934646a3e8f22908c8ed75b492439
|
591b49e3a92de93254f60502553fdc4f900b9883
|
/euler008.py
|
92550de144aa0478a273dec4af71d4528e7d6579
|
[] |
no_license
|
drewatk/projecteuler
|
bde65f3bd7194e866dda6d66c9720f96bc1525e3
|
10592f3c274efca8bf0ca9afb13fab42c1ccdef6
|
refs/heads/master
| 2020-04-05T23:41:03.186752
| 2015-09-08T21:07:22
| 2015-09-08T21:07:22
| 42,138,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
#!/usr/bin/env python
from numpy import prod
number = 7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450
number_string = str(number)
digits = []
for ch in number_string:
digits.append(int(ch))
highest = 0
for i in range(12, len(digits)):
l = digits[i - 12: i + 1]
p = prod(l)
if p > highest:
highest = p
print highest
|
[
"drewatkinson5@gmail.com"
] |
drewatkinson5@gmail.com
|
03a4058a228eaf68c015f9e7bf3ee0a506cba37e
|
a2f7fb39609201af57732998b5f1c0e2bfac7cb6
|
/QrCode.py
|
6d44760aa48a9e0b03e5420b7c6e4be104ba0a35
|
[
"MIT"
] |
permissive
|
vkyadavdel/hacktoberfest-coer
|
67b04a0f861dc513a71fac96a8144d24c07fefe7
|
ece4cc958d588169a5f1550ecfaf9bbe532b2bdd
|
refs/heads/main
| 2023-08-31T06:32:47.423218
| 2021-10-02T04:28:14
| 2021-10-02T04:28:14
| 412,683,176
| 0
| 0
|
MIT
| 2021-10-02T07:21:16
| 2021-10-02T03:51:16
|
Python
|
UTF-8
|
Python
| false
| false
| 365
|
py
|
import qrcode
# Link for website
input_data = "https://towardsdatascience.com/face-detection-in-10-lines-for-beginners-1787aa1d9127"
#Creating an instance of qrcode
qr = qrcode.QRCode(
version=1,
box_size=10,
border=5)
qr.add_data(input_data)
qr.make(fit=True)
img = qr.make_image(fill='black', back_color='white')
img.save('qrcode001.png')
|
[
"noreply@github.com"
] |
vkyadavdel.noreply@github.com
|
1c06ff41ef48ef2f0d60901b600ba444363ac321
|
6a601728cefef4e18106c0919c6bfde29c894218
|
/setup.py
|
a1ff200fcfb4d30f1517efbfc406107b3c173b9f
|
[] |
no_license
|
pombredanne/drf-nested-decorator
|
a2cd8a367ff16959de451c09738490ba8b5d1a12
|
7b101adcd43a6f4b2d431b5e55814b671947c46a
|
refs/heads/master
| 2021-01-24T21:07:43.835332
| 2015-07-13T14:03:55
| 2015-07-13T14:03:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
# -*- coding: utf-8 -*-
from distutils.core import setup
import sys
PY_VERSION = sys.version_info[0], sys.version_info[1]
if PY_VERSION < (3, 0):
long_description = open('README.md').read()
else:
long_description = open('README.md', encoding='utf-8').read()
setup(
name='drf-nested-decorator',
version='0.3',
author=u'Zowie Langdon',
author_email='zowie@akoten.com',
packages=['drf_nested_decorator'],
url='https://github.com/Akoten/drf-nested-decorator',
license='None yet',
description='An extra decorator for Django Rest Framework that allows methods of a viewset to accept a nested key.',
long_description=long_description,
zip_safe=False,
install_requires=['djangorestframework>=3.1.3'],
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3.3',
],
)
|
[
"zowielangdon@gmail.com"
] |
zowielangdon@gmail.com
|
c50bbd3c71af60fe7dda63c1ea6cb14c9c27a559
|
001331667834d46cf06646bcfe3653fe9c65e5b7
|
/project_5_quadcopter/agents/ou_noise.py
|
b3f0f7fc0cf49ba328308c06e3206763928fb78b
|
[] |
no_license
|
wanghsinwei/machine-learning-udacity
|
688d74adf36a49abbde53d98ae52cb6468eb5f06
|
80afa496488930ed93abee404f01ff9134ae5c91
|
refs/heads/master
| 2022-12-02T22:41:56.012722
| 2019-08-29T19:32:23
| 2019-08-29T19:32:23
| 204,920,022
| 1
| 0
| null | 2022-11-22T01:39:59
| 2019-08-28T11:53:13
|
HTML
|
UTF-8
|
Python
| false
| false
| 891
|
py
|
import numpy as np
import copy
class OUNoise:
"""Ornstein-Uhlenbeck process."""
# It essentially generates random samples from a Gaussian (Normal) distribution, but each sample affects the next one such that two consecutive samples are more likely to be closer together than further apart.
def __init__(self, size, mu, theta, sigma):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
|
[
"wanghsinwei@msn.com"
] |
wanghsinwei@msn.com
|
65ef10bde58d6cc5085d5fcb6ad2c9d19be6fe09
|
89fbfa05cb69360772be76b3dd6b956cfa58baad
|
/Hangman/GetWord.py
|
5933c942f4ca3412bb8ff04a7fc5c44dcb5e978c
|
[] |
no_license
|
nilam23/Mini_Projects
|
0cdb48132a36060840c9e0149af3a1c66418107d
|
c31c61b63d37414814494c41bb6ef0215fcfaeb8
|
refs/heads/master
| 2022-11-06T18:24:50.463494
| 2020-06-25T20:21:42
| 2020-06-25T20:21:42
| 264,739,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
import random
def GetRandomIndex():
with open('words.txt', 'r') as file:
content=file.readlines()
limit=len(content)
index=random.randint(1, limit)
file.close()
return (index)
def GetRandomWord():
index=GetRandomIndex()
with open('words.txt', 'r') as file:
i=1
while i<=index:
word=file.readline()
word=word.strip()
if i==index:
return word
i=i+1
|
[
"nilampathak72@gmail.com"
] |
nilampathak72@gmail.com
|
afe48de8c8fd31d56cc097cb54c8f1cd53f65d45
|
b7553d43485ec4e236c9a7675c198075375312f1
|
/implementing_multiple_dispatch_with_function_annotations_20.py
|
4ab0794e150aaa187b5fe056e28f4d6439151179
|
[] |
no_license
|
aleph2c/meta_programming_in_python
|
556f98f7ffbbfc6bb391fb624015039c1f06bd4a
|
8cc1f4c2faa1b04d3ea978ddd2cd85defdd21fa3
|
refs/heads/master
| 2020-07-13T23:46:09.849494
| 2019-09-04T13:16:46
| 2019-09-04T13:16:46
| 205,181,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,134
|
py
|
# method overloading based on types in python
# could we write code like this?
# class Spam:
# def bar(self, x:int, y:int):
# print('Bar 1', x, y)
# def bar(self, s:str, n:int=0):
# print('Bar 2', s, n)
#
# s = Spam()
# s.bar(2, 3) # => Bar 1: 2 3
# s.bar('hello') # => Bar 2: hello 0
import inspect
import types
class MultiMethod:
'''represents a signal multimethod'''
def __init__(self, name):
self._methods = {}
self.__name__ = name
def register(self, method):
'''register a new method as a multimethod'''
sig = inspect.signature(method)
# Build a type signature from the method's annotation
types = []
for name, param in sig.parameters.items():
if name == 'self':
continue
if param.annotation is inspect.Parameter.empty:
raise TypeError(
'Argument {} must be anotated with a type'.format(name)
)
if not isinstance(param.annotation, type):
raise TypeError(
'Argument {} annotation must be a type'.format(name)
)
if param.default is not inspect.Parameter.empty:
self._methods[tuple(types)] = method
types.append(param.annotation)
self._methods[tuple(types)] = method
def __call__(self, *args):
'''
call a method based on type signature of the arguments
'''
types = tuple(type(arg) for arg in args[1:])
method = self._methods.get(types, None)
if method:
return method(*args)
else:
raise TypeError('No matching method for types {}'.format(types))
def __get__(self, instance, cls):
'''
descriptor method needed to make calls work in a class
'''
if instance is not None:
return types.MethodType(self, instance)
else:
return self
class MultiDict(dict):
'''
special dictionary to build multimethods in a metaclass
'''
def __setitem__(self, key, value):
if key in self:
# If key alread exists, it must be a multimethod or callable
current_value = self[key]
if isinstance(current_value, MultiMethod):
current_value.register(value)
else:
mvalue = MultiMethod(key)
mvalue.register(current_value)
mvalue.register(value)
super().__setitem__(key, mvalue)
else:
super().__setitem__(key, value)
class MultipleMeta(type):
'''
Metaclass that allows multiple dispatch of methods
'''
def __new__(cls, clsname, bases, clsdict):
return type.__new__(cls, clsname, bases, dict(clsdict))
@classmethod
def __prepare__(cls, clsname, bases):
return MultiDict()
# to use this code:
class Spam(metaclass=MultipleMeta):
def bar(self, x:int, y:int):
print('Bar 1:', x, y)
def bar(self, s:str, n:int = 0):
print('Bar 2:', s, n)
# Example: overloading __init__
import time
class Date(metaclass=MultipleMeta):
def __init__(self, year:int, month:int, day:int):
self.year = year
self.month = month
self.day = day
def __init__(self):
t = time.localtime()
self.__init__(t.tm_year, t.tm_mon, t.tm_mday)
s = Spam()
s.bar(2, 3)
s.bar('hello')
d = Date(2012, 12, 21)
e = Date()
print(e.year)
|
[
"scottvolk@gmail.com"
] |
scottvolk@gmail.com
|
9ea7f7f60d5ac0a795ae27f5cb043b203376e2b7
|
8a4ac7b0d48a44dcf66d683218b87a2f2a3b3c9e
|
/ITGK øvinger/Øving 9 uke 45/Forelesninger/Lottomengde.py
|
f0ae0a1c55a392f31668b255f90ebd5f9d67b216
|
[] |
no_license
|
jorul/ITGK
|
9445e7004a31b49465d0720a75afc67ced871d7e
|
579414998cbb1a79599dc679e4854684c732452f
|
refs/heads/master
| 2020-07-07T14:36:06.924590
| 2019-08-21T19:16:37
| 2019-08-21T19:16:37
| 147,521,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
vinnertall = {1,5,10,15,20,25,30}
mine_tall ={0}
for i in range(7):
mine_tall.add(int(input('Skriv et tall: ')))
def sjekke_rekke(gjett, vinnertall):
riktige = gjett.intersection(vinnertall)
return len(riktige)
print(sjekke_rekke(mine_tall,vinnertall))
for item in liste:
freq[item] = freq(item,0)+1
|
[
"jorul@stud.ntnu.no"
] |
jorul@stud.ntnu.no
|
cf28c7d8a1554d94b8906b7ebec17e1532bb844b
|
a4c9bdbf4ecaec29667b8b5799bbf647d274e291
|
/ProyectoSena/AppSena/migrations/0001_initial.py
|
c535191808cabcdfd6349b746839b566b4f282ee
|
[] |
no_license
|
alejandra0407/ProyectoSena
|
77221cb16e51413da7101036b95e2a5efbfb58db
|
d39cd4395811858937e60d6e84182a94609c067c
|
refs/heads/master
| 2020-03-13T23:49:43.567226
| 2018-04-27T22:13:55
| 2018-04-27T22:13:55
| 131,343,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,484
|
py
|
# Generated by Django 2.0.2 on 2018-04-27 19:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Ficha',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numeroFicha', models.CharField(max_length=20, unique=True)),
('jornada', models.CharField(choices=[('Mañana', 'Mañana'), ('Tarde', 'Tarde'), ('Noche', 'Noche')], max_length=20)),
('ambiente', models.CharField(max_length=50)),
('lider', models.CharField(max_length=100)),
('fechaFinEtapaLectiva', models.DateField()),
],
),
migrations.CreateModel(
name='Permiso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('motivo', models.CharField(choices=[('Enfermedad', 'Enfermedad'), ('Accidente', 'Accidente'), ('Calamidad domestica', 'Calamidad domestica'), ('Otro', 'Otro')], max_length=45)),
('solicitoPermisoPor', models.CharField(blank=True, max_length=300, null=True)),
('permisoPorHora', models.CharField(blank=True, max_length=45, null=True)),
('permisoPorDias', models.CharField(blank=True, max_length=45, null=True)),
('horaSalida', models.CharField(max_length=45)),
('fecha', models.DateField()),
],
),
migrations.CreateModel(
name='Permiso_persona',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('estado', models.CharField(choices=[('En Espera', 'En Espera'), ('Aprobado', 'Aprobado'), ('Cancelado', 'Cancelado'), ('Rechazado', 'Rechazado'), ('Finalizado', 'Finalizado')], max_length=20)),
('instructor', models.CharField(blank=True, max_length=100, null=True)),
('vigilante', models.CharField(blank=True, max_length=100, null=True)),
('permiso', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='AppSena.Permiso')),
],
),
migrations.CreateModel(
name='Persona',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('documentoIdentidad', models.CharField(max_length=20, unique=True)),
('primerNombre', models.CharField(max_length=45)),
('segundoNombre', models.CharField(blank=True, max_length=45, null=True)),
('primerApellido', models.CharField(max_length=45)),
('segundoApellido', models.CharField(blank=True, max_length=45, null=True)),
('contacto', models.CharField(max_length=10)),
('usuario', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Persona_ficha',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ficha', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='AppSena.Ficha')),
('persona', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='AppSena.Persona')),
],
),
migrations.CreateModel(
name='Programa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('codigoPrograma', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Rol',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rol', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Rol_persona',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('persona', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='AppSena.Persona')),
('rol', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='AppSena.Rol')),
],
),
migrations.AddField(
model_name='persona_ficha',
name='programa',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='AppSena.Programa'),
),
migrations.AddField(
model_name='permiso_persona',
name='persona',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='AppSena.Persona'),
),
migrations.AddField(
model_name='ficha',
name='programa',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='AppSena.Programa'),
),
]
|
[
"mamapallo2@misena.edu.co"
] |
mamapallo2@misena.edu.co
|
e4f93eee93cf78c39f0897ac23b5fa9ebded5576
|
32a56cf1f4764adc75b6fab32d6e14bfecdeaf97
|
/Django Level 2/ProTwo/AppTwo/urls.py
|
fdcc4173ea9494ce96bdaf46868cb2c363d2f2a8
|
[] |
no_license
|
sanchit-zeus/Workspace_html
|
52dfef7881e3c1f309bc6904a8887dcbc593728c
|
4ab344a151be2b426ecd9271ba7d877d64ab8808
|
refs/heads/master
| 2020-05-09T13:19:41.981502
| 2019-04-13T09:19:25
| 2019-04-13T09:19:25
| 181,147,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
from django.conf.urls import url
from AppTwo import views
urlpatterns = [
url(r'^$',views.users,name='users')
]
|
[
"35419687+sanchit-zeus@users.noreply.github.com"
] |
35419687+sanchit-zeus@users.noreply.github.com
|
814f92f698b5160248339fcd2097feb6ff2efc10
|
ae043b9178cdcb22fe701954e90fc377efbc4a36
|
/minFQ/Errors.py
|
234cee30da94487de8d0cccd43cf95033c55dcc2
|
[
"MIT"
] |
permissive
|
LooseLab/minotourcli
|
8d637a238eb830008a4557feab0058f778ce92a6
|
f70155c8795eb3d6faa80f6e7553f24f5d508309
|
refs/heads/master
| 2022-07-29T08:56:52.069841
| 2021-09-14T14:39:59
| 2021-09-14T14:39:59
| 117,558,065
| 9
| 2
|
MIT
| 2022-04-13T11:38:16
| 2018-01-15T14:50:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
class MTConnectionError(Exception):
"""
Exception raised when we get an unexpected status code from a connection to the minoTour sever
"""
def __init__(
self, response, message="Unexpected status code (not in expected values)"
):
"""
Parameters
----------
response: requests.models.Response
The requests object from the request
message: str
The message to display at the bottom of the Exception
"""
self.response = response
self.message = message
super().__init__(self.message)
def __str__(self):
"""
Overides the displayed message string
Returns
-------
str
"""
lookup_message = {401: "Nuh uh - unauthorised", 500: "MinoTour says no"}
return "{} \n -> {} \n -> \n {} \n -> {}".format(
self.response.status_code,
lookup_message.get(self.response.status_code, ""),
self.message,
self.response.text,
)
def except_rpc(wrapped_function):
def _wrapper(*args, **kwargs):
try:
# do something before the function call
result = wrapped_function(*args, **kwargs)
# do something after the function call
except TypeError:
print("TypeError")
except IndexError:
print("IndexError")
# return result
return _wrapper
|
[
"roryjmunro1@gmail.com"
] |
roryjmunro1@gmail.com
|
35d4c19d8deb5ccdec9b14b7e39019967fe44be3
|
d2cdf402308eb117968a036fa89fe7b32ec40de3
|
/levenschtein_distance.py
|
363c94d7dd2dd22e9381dfad022421ed433b2f42
|
[] |
no_license
|
jaronjlee/algorithms
|
1a0aeace0a36ce6b2abe30203ae4f8c5818d11a7
|
222534b031a85a0d6a74a4f692713afbdf657303
|
refs/heads/main
| 2023-03-25T23:43:08.258550
| 2021-03-17T20:35:48
| 2021-03-17T20:35:48
| 305,541,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
def levenshteinDistance(str1, str2):
# build labels for table
edits = [[x for x in range(len(str1) + 1)] for y in range(len(str2) + 1)]
for i in range(1, len(str2) + 1):
edits[i][0] = edits[i - 1][0] + 1
#build rest of table
for i in range(1, len(str2) + 1):
for j in range(1, len(str1) + 1):
if str2[i-1] == str1[j - 1]:
edits[i][j] = edits[i-1][j-1]
else:
edits[i][j] = 1 + min(edits[i-1][j-1], edits[i][j-1], edits[i-1][j])
return edits[-1][-1]
|
[
"jaronjlee@gmail.com"
] |
jaronjlee@gmail.com
|
ac38edd12a2bfa6d2c1d0d3d6327b08234196573
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/tflite/src/tensorflow/python/ops/linalg/linear_operator_lower_triangular.py
|
1c73a2013972d1b22757d55dea709883bf19ef6c
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 7,998
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a lower triangular matrix."""
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"LinearOperatorLowerTriangular",
]
@tf_export("linalg.LinearOperatorLowerTriangular")
@linear_operator.make_composite_tensor
class LinearOperatorLowerTriangular(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] square lower triangular matrix.
This operator acts like a [batch] lower triangular matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix.
`LinearOperatorLowerTriangular` is initialized with a `Tensor` having
dimensions `[B1,...,Bb, N, N]`. The upper triangle of the last two
dimensions is ignored.
```python
# Create a 2 x 2 lower-triangular linear operator.
tril = [[1., 2.], [3., 4.]]
operator = LinearOperatorLowerTriangular(tril)
# The upper triangle is ignored.
operator.to_dense()
==> [[1., 0.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
tril = tf.random.normal(shape=[2, 3, 4, 4])
operator = LinearOperatorLowerTriangular(tril)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
Suppose `operator` is a `LinearOperatorLowerTriangular` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` involves `N^2 * R` multiplications.
* `operator.solve(x)` involves `N * R` size `N` back-substitutions.
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
tril,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorLowerTriangular"):
r"""Initialize a `LinearOperatorLowerTriangular`.
Args:
tril: Shape `[B1,...,Bb, N, N]` with `b >= 0`, `N >= 0`.
The lower triangular part of `tril` defines this operator. The strictly
upper triangle is ignored.
is_non_singular: Expect that this operator is non-singular.
This operator is non-singular if and only if its diagonal elements are
all non-zero.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. This operator is self-adjoint only if it is diagonal with
real-valued diagonal entries. In this case it is advised to use
`LinearOperatorDiag`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If `is_square` is `False`.
"""
parameters = dict(
tril=tril,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
if is_square is False:
raise ValueError(
"Only square lower triangular operators supported at this time.")
is_square = True
with ops.name_scope(name, values=[tril]):
self._tril = linear_operator_util.convert_nonref_to_tensor(tril,
name="tril")
self._check_tril(self._tril)
super(LinearOperatorLowerTriangular, self).__init__(
dtype=self._tril.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
@property
def tril(self):
"""The lower triangular matrix defining this operator."""
return self._tril
def _check_tril(self, tril):
"""Static check of the `tril` argument."""
if tril.shape.ndims is not None and tril.shape.ndims < 2:
raise ValueError(
"Argument tril must have at least 2 dimensions. Found: %s"
% tril)
def _get_tril(self):
"""Gets the `tril` kwarg, with upper part zero-d out."""
return array_ops.matrix_band_part(self._tril, -1, 0)
def _get_diag(self):
"""Gets the diagonal part of `tril` kwarg."""
return array_ops.matrix_diag_part(self._tril)
def _shape(self):
return self._tril.shape
def _shape_tensor(self):
return array_ops.shape(self._tril)
def _assert_non_singular(self):
return linear_operator_util.assert_no_entries_with_modulus_zero(
self._get_diag(),
message="Singular operator: Diagonal contained zero values.")
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return math_ops.matmul(
self._get_tril(), x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _determinant(self):
return math_ops.reduce_prod(self._get_diag(), axis=[-1])
def _log_abs_determinant(self):
return math_ops.reduce_sum(
math_ops.log(math_ops.abs(self._get_diag())), axis=[-1])
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
return linalg.triangular_solve(
self._get_tril(), rhs, lower=True, adjoint=adjoint)
def _to_dense(self):
return self._get_tril()
def _eigvals(self):
return self._get_diag()
@property
def _composite_tensor_fields(self):
return ("tril",)
@property
def _experimental_parameter_ndims_to_matrix_ndims(self):
return {"tril": 2}
|
[
"jengelh@inai.de"
] |
jengelh@inai.de
|
884882ce343a9b5222f18692d40b1942e83690de
|
ba028902a8593c78887c388ff1cf7d348586995c
|
/greet/app.py
|
4163128ea5ba8e00a0b031c642307b09a32c36dd
|
[] |
no_license
|
Monkeysteingames/flask-greet-calc
|
7dfb7a1167afa2bc5d93037db63819a0c4f12ec3
|
458295cf7cc9da38a466fc72b14076e74979a5e1
|
refs/heads/master
| 2023-08-15T13:28:24.710480
| 2021-09-25T20:54:32
| 2021-09-25T20:54:32
| 410,372,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
from flask import Flask
app = Flask('a')
@app.route('/welcome')
def welcome():
return 'welcome'
@app.route('/welcome/home')
def welcome_home():
return 'welcome home'
@app.route('/welcome/back')
def welcome_back():
return 'welcome back'
|
[
"40122779+Monkeysteingames@users.noreply.github.com"
] |
40122779+Monkeysteingames@users.noreply.github.com
|
62a8dfb144a52d032da3ce929f95f898ffbc344a
|
dbca22d3ab295e5e4903d49c9ac7f1f444061583
|
/для репозитория/sketch_8_0/sketch_8_0.pyde
|
1cb80560822fcc2cf1763db9ae1fd48fce716112
|
[
"MIT"
] |
permissive
|
Kotkovvv/2019-fall-polytech-cs
|
6c8062a9f088f66d09a1832c26fde141c599f5cd
|
a5cba5b419a03bb8df784afcdddd8d8c815ffd84
|
refs/heads/master
| 2022-03-08T10:18:10.333551
| 2019-12-19T10:16:40
| 2019-12-19T10:16:40
| 213,690,067
| 1
| 0
|
MIT
| 2022-02-18T15:46:16
| 2019-10-08T16:04:04
|
Python
|
UTF-8
|
Python
| false
| false
| 235
|
pyde
|
size(600,600)
noLoop()
background(100)
smooth()
strokeWeight(50)
stroke(200)
translate(width/2,height/2-100)
line(-100,0,100,0)
translate(0,100)
scale(1.5,1.5)
line(-100,0,100,0)
translate(0,-150)
scale(1.5,1.5)
line(-100,0,100,0)
|
[
"kotkov.vv@edu.spbstu.ru"
] |
kotkov.vv@edu.spbstu.ru
|
1c0f3b9d943c27964bbe45dc63f338fa01db28d8
|
477cc717d0a9b83a816713cce0003606138d0cd5
|
/django_apidoc_center/urls.py
|
e29d0a25a2572b6bee6ff583f986b9b779ae4add
|
[] |
no_license
|
jiangningstar/django_apidoc_center
|
ba7616a3915afeb0397be4220cb2d6131c48e790
|
fd3191712d87aaef187eaf629dd32c1640b0a6d6
|
refs/heads/master
| 2020-03-27T12:35:13.568573
| 2018-08-29T06:30:14
| 2018-08-29T06:30:14
| 145,812,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
# -*- coding: utf-8 -*-
"""django_apidoc_center URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/v1/', include('apidoc_center.api.urls')),
]
|
[
"jiangning@yun-ji.cn"
] |
jiangning@yun-ji.cn
|
c5a3c0f9f0edf08a46992d9ef551dc43da740d11
|
afbbd4079e7d3b3375392483f889cf927c017af5
|
/Coursework3/LoadDatatest3.py
|
512f210c8d328e88ac7903e472d85b6d085794cc
|
[] |
no_license
|
friver01/basiccode
|
bc6cd7098547d9e4d06d3a38e860efbd0707957b
|
9d5274e2a827c39e7bd40d7381bd1663a41d52be
|
refs/heads/master
| 2022-12-06T19:58:59.958661
| 2020-09-02T10:58:51
| 2020-09-02T10:58:51
| 235,529,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,893
|
py
|
## LoadData Function
# Read the EGL and RGS files
def LoadData (sName):
# Define the lists where we will be appending values:
outcomes = []
propertyNames = []
ranges = []
SEPARATOR = '----------'
rangesmin = []
rangesmax = []
# Open the input file
CompleteFileName = (str(sName) + '.dat')
print (CompleteFileName)
inputFile = open (CompleteFileName, 'r')
for line in inputFile:
line = line.rstrip() # Removes the space at the end
fields = line.split (',')
if len (fields) == 3:
defproperty = fields [0]
propertyNames.append (defproperty)
defrangesmin= float(fields [1])
rangesmin.append (defrangesmin)
defrangesmax= float(fields [2])
rangesmax.append (defrangesmax)
else:
if fields [0] != SEPARATOR:
defoutcomes = fields [0]
outcomes.append (defoutcomes)
for i in range(0, len(rangesmin)):
srange = [rangesmin [i], rangesmax [i]]
ranges.append (srange)
variables = (outcomes, propertyNames, ranges)
print (outcomes)
print (propertyNames)
print (ranges)
print (variables)
#Complete this function for Q1
# You may add any helper functions you wish
return (outcomes, propertyNames, ranges)
CompleteFileName.close()
# Check files in the directory
import os
contents = os.listdir()
print (contents)
# Prompt for the input file name
inputFileName = input ('Input file: ')
sname = (str(inputFileName) + '.dat')
print (sname)
while not os.path.exists (sname):
print ('Data for this institute not currently available')
inputFileName = input ('Input file: ')
sname = (inputFileName + '.dat')
LoadData (inputFileName)
|
[
"noreply@github.com"
] |
friver01.noreply@github.com
|
e117d696a8f1c12fafa5f80230748cda739cd054
|
71b95003a81f79b23b2802a42885bc5180901978
|
/ttt_ai.py
|
3b2d1d77cfe0afce2f8829f25c7082eac4c22dba
|
[] |
no_license
|
jrf034/AI_tic_tac_toe
|
10fcbe6361e0ae9ac1dc4c712861dbe5aa802a0b
|
3425524700f109c973165487a80a495b71d208bf
|
refs/heads/master
| 2020-12-14T07:27:55.251003
| 2017-07-05T15:47:39
| 2017-07-05T15:47:39
| 95,555,824
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,241
|
py
|
from random import randint
from random import choice
from decision_tree import *
from random import choice
import pdb
"""
The goal for this AI is to implement a complete decision making tree, through trial and error
to beat any opponent in Tic-Tac-Toe. Or at least to never fail and always force a draw.
It does this by documenting its own steps and determining the best posible options given
the current state of the game.
"""
class TTT_ai:
def __init__(self):
self.dt_0 = Node(0, None)
self.dt_1 = Node(1, None)
self.dt_2 = Node(2, None)
self.dt_3 = Node(3, None)
self.dt_4 = Node(4, None)
self.dt_5 = Node(5, None)
self.dt_6 = Node(6, None)
self.dt_7 = Node(7, None)
self.dt_8 = Node(8, None)
self.dt_array = [self.dt_0, self.dt_1, self.dt_2, self.dt_3, self.dt_4, self.dt_5, self.dt_6, self.dt_7, self.dt_8]
self.play_tree = [None, None, None]
self.last_move = None
self.current_tree = None
self.opps_pos = None #needs to update everytime the opponent makes a move
self.best_tree = None #which tree is the best to start in?
def population_memory(self, board, first):
pick = choice(board)
self.update_memory(pick, first)
return pick
def update_memory(self, pick, first, turn = 0):
if (first): #Everytime we first start it picks a random spot, and goes to that tree, we're trying to identify the best starting position
self.current_tree = self.dt_array[pick]
self.last_move = self.current_tree #the move we just took was into the top of the current_tree
return #we're done, no need to continue
else: #we're down the tree now, last_move should be where we left off, just made a move and need to move last_move into it
if (self.last_move.children[self.opps_pos][pick] == None): #we're making a new move, tag it and move on
move = Node(pick, self.last_move)
self.last_move.children[self.opps_pos][pick] = move
self.last_move = self.last_move.children[self.opps_pos][pick]
else: #our pick was something old, move into it and continue
self.last_move = self.last_move.children[self.opps_pos][pick]
#at the end of the game this gets called and the last_move node updates to reflect the outcome
def finalize_score(self, result):
comp = self.last_move.wins + self.last_move.losses + self.last_move.draws
if (comp != 0): #if it isn't 0 we've already been here, logged it, and push it up the stack. This should not happen again
return
if (result == 1):
self.last_move.wins = 1
self.last_move.final_win = True
elif (result == 0):
self.last_move.draws = 1
else:
self.last_move.losses = 1
self.last_move.final_win = False
win = self.last_move.wins #document what happened
draw = self.last_move.draws
lose = self.last_move.losses
self.last_move = self.last_move.parent
while(self.last_move != None):
# pdb.set_trace()
self.last_move.wins += win #update the parent with whatever happened
self.last_move.draws += draw #we're only changing the one variable, and only by 1
self.last_move.losses += lose #this means we'll have a definite outcome, not aggregates growing out of control
self.last_move = self.last_move.parent #move up into the parent
#now we're actually playing the game, access the memory
#start_mem_connection has already been called by this point
#so we need only to access the play_tree
def play_game(self, board, first):
if (first):
self.last_move = choice(self.play_tree) #move last_move into the play tree and begin
return self.last_move.position
else: #opponent just moved, set_opps_pos was called, move into the appropiate child
best_pick = self.get_best(self.last_move.children[self.opps_pos])
if (best_pick == None):
pick = choice(board)
self.update_memory(pick, first)
return pick
else:
self.last_move = best_pick
return self.last_move.position
def get_best(self, chl_array):
non_losses = 0
ret_tree = None
losses = 10**10
loss_tree = None
exists = True
for x in chl_array:
if(x != None):
exists = False
#none of the children existed, we never hit this.
if(exists):
return None
for decision in chl_array:
if (decision == None):
continue
if (decision.final_win == True):
return decision
comp = (decision.wins + decision.draws) - decision.losses
if (comp >= non_losses): #find the tree with the most wins + draws, we're aiming to not lose, not necessarily to win
non_losses = comp
ret_tree = decision
elif (decision.losses <= losses): #even if there are no wins, let's get the least losses (mostly just to catch if we fall down a failuer tree)
losses = decision.losses
loss_tree = decision
if (ret_tree == None):
return loss_tree
else:
return ret_tree
def start_mem_connection(self):
non_losses = 0
ret_tree = [self.dt_array[0], self.dt_array[4], self.dt_array[8]]
for tree in self.dt_array:
comp = (tree.wins + tree.draws)
print(comp)
if (comp > non_losses): #find the tree with the most wins + draws, we're aiming to not lose, not necessarily to win
non_losses = comp
ret_tree[2] = ret_tree[1]
ret_tree[1] = ret_tree[0]
ret_tree[0] = tree
self.play_tree = ret_tree
def set_opps_pos(self, position):
self.opps_pos = position
|
[
"jfaith@VENTERA.com"
] |
jfaith@VENTERA.com
|
ea0155d6c504cedaed85c803d4d8d724a68b062c
|
b02bcd352f17c256a3d07404f8f799959e2766d2
|
/build/baxter_common/baxter_maintenance_msgs/catkin_generated/pkg.installspace.context.pc.py
|
1405f6081caf20537168b99539569c6e51b7663a
|
[] |
no_license
|
kmxz2016/baxter
|
dec73117d6af04c0a0bb4b246f101e26aad43736
|
2b745f5e2b23c0aaadf1bd452cd3402f1b410d3f
|
refs/heads/master
| 2021-01-06T20:43:29.921525
| 2018-05-10T04:02:30
| 2018-05-10T04:02:30
| 99,546,545
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/km/baxter_ws/install/include".split(';') if "/home/km/baxter_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "baxter_maintenance_msgs"
PROJECT_SPACE_DIR = "/home/km/baxter_ws/install"
PROJECT_VERSION = "1.2.0"
|
[
"601985329@qq.com"
] |
601985329@qq.com
|
08797dc17cc0a5ecf1632354e021a89542b0360c
|
31ff643c4d2fbb9f30aa7583311653206532300e
|
/auth.py
|
2c15c294c04e596f7b1d109810cd316881e7fd99
|
[] |
no_license
|
cone387/auto-post
|
c1fdf2e292544bde6aa344b89603c7b94f913c25
|
191956e360b87e87049b4f6f325444ebf65b0519
|
refs/heads/master
| 2021-03-23T12:16:04.956755
| 2020-03-18T06:53:47
| 2020-03-18T06:53:47
| 247,451,645
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
# -*- coding:utf-8 -*-
# author: Cone
# datetime: 2020-03-16 09:27
# software: PyCharm
class Auth:
pass
|
[
"fishlover387@gmail.com"
] |
fishlover387@gmail.com
|
b6e46df09922f43941c367dc634a82fc29d68ace
|
3e8b385b6158a60fa98ce95f22d6955343c3be4e
|
/util.py
|
a8e007744c0b04c3026d76683ea30273f12851e7
|
[] |
no_license
|
instagibbs/taproot-workshop
|
57fea55e82c8a3591dfb19a733ccc74af932956b
|
0aade37a4f05b37f034d066489cfd15a18376a7f
|
refs/heads/master
| 2020-08-26T16:37:00.775656
| 2019-10-16T21:19:17
| 2019-10-17T20:12:04
| 217,075,257
| 2
| 0
| null | 2019-10-23T14:16:48
| 2019-10-23T14:16:47
| null |
UTF-8
|
Python
| false
| false
| 3,843
|
py
|
import argparse
import configparser
import os
from test_framework.test_framework import BitcoinTestFramework
# Read configuration from config.ini
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/config.ini"
config.read_file(open(configfile, encoding="utf8"))
SOURCE_DIRECTORY = config["path"]["SOURCE_DIRECTORY"]
assert not SOURCE_DIRECTORY == '', 'SOURCE_DIRECTORY not configured! Edit config.ini to configure SOURCE_DIRECTORY.'
print("Source directory configured as {}".format(SOURCE_DIRECTORY))
class TestWrapper:
"""Singleton TestWrapper class.
This wraps the actual TestWrapper class to ensure that users only ever
instantiate a single TestWrapper."""
class __TestWrapper(BitcoinTestFramework):
"""Wrapper Class for BitcoinTestFramework.
Provides the BitcoinTestFramework rpc & daemon process management
functionality to external python projects."""
def set_test_params(self):
# This can be overriden in setup() parameter.
self.num_nodes = 1
def run_test(self):
pass
def setup(self,
bitcoind=os.path.abspath(SOURCE_DIRECTORY + "/src/bitcoind"),
bitcoincli=None,
setup_clean_chain=True,
num_nodes=1,
network_thread=None,
rpc_timeout=60,
supports_cli=False,
bind_to_localhost_only=True,
nocleanup=False,
noshutdown=False,
cachedir=os.path.abspath(SOURCE_DIRECTORY + "/test/cache"),
tmpdir=None,
loglevel='INFO',
trace_rpc=False,
port_seed=os.getpid(),
coveragedir=None,
configfile=os.path.abspath(SOURCE_DIRECTORY + "/test/config.ini"),
pdbonfailure=False,
usecli=False,
perf=False,
randomseed=None):
if self.running:
print("TestWrapper is already running!")
return
self.setup_clean_chain = setup_clean_chain
self.num_nodes = num_nodes
self.network_thread = network_thread
self.rpc_timeout = rpc_timeout
self.supports_cli = supports_cli
self.bind_to_localhost_only = bind_to_localhost_only
self.options = argparse.Namespace
self.options.nocleanup = nocleanup
self.options.noshutdown = noshutdown
self.options.cachedir = cachedir
self.options.tmpdir = tmpdir
self.options.loglevel = loglevel
self.options.trace_rpc = trace_rpc
self.options.port_seed = port_seed
self.options.coveragedir = coveragedir
self.options.configfile = configfile
self.options.pdbonfailure = pdbonfailure
self.options.usecli = usecli
self.options.perf = perf
self.options.randomseed = randomseed
self.options.bitcoind = bitcoind
self.options.bitcoincli = bitcoincli
super().setup()
self.running = True
def shutdown(self):
if not self.running:
print("TestWrapper is not running!")
else:
super().shutdown()
self.running = False
instance = None
def __new__(cls):
if not TestWrapper.instance:
TestWrapper.instance = TestWrapper.__TestWrapper()
TestWrapper.instance.running = False
return TestWrapper.instance
def __getattr__(self, name):
return getattr(self.instance, name)
def __setattr__(self, name):
return setattr(self.instance, name)
|
[
"jonnynewbs@gmail.com"
] |
jonnynewbs@gmail.com
|
fcc137377998ec47b0894729724dabaea04564b4
|
6364bb727b623f06f6998941299c49e7fcb1d437
|
/msgraph-cli-extensions/src/bookings/azext_bookings/vendored_sdks/bookings/models/_models_py3.py
|
e75a748fc9f1fb1217cf3e2f63cc32ab0bb3f17c
|
[
"MIT"
] |
permissive
|
kanakanaidu/msgraph-cli
|
1d6cd640f4e10f4bdf476d44d12a7c48987b1a97
|
b3b87f40148fb691a4c331f523ca91f8a5cc9224
|
refs/heads/main
| 2022-12-25T08:08:26.716914
| 2020-09-23T14:29:13
| 2020-09-23T14:29:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51,356
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._bookings_enums import *
class CollectionOfBookingAppointment(msrest.serialization.Model):
"""Collection of bookingAppointment.
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingAppointment]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingAppointment]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["MicrosoftGraphBookingAppointment"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingAppointment, self).__init__(**kwargs)
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingAppointment0(msrest.serialization.Model):
"""Collection of bookingAppointment.
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingAppointment]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingAppointment]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["MicrosoftGraphBookingAppointment"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingAppointment0, self).__init__(**kwargs)
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingBusiness(msrest.serialization.Model):
"""Collection of bookingBusiness.
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingBusiness]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingBusiness]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["MicrosoftGraphBookingBusiness"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingBusiness, self).__init__(**kwargs)
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingCurrency(msrest.serialization.Model):
"""Collection of bookingCurrency.
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingCurrency]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingCurrency]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["MicrosoftGraphBookingCurrency"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingCurrency, self).__init__(**kwargs)
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingCustomer(msrest.serialization.Model):
"""Collection of bookingCustomer.
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingPerson]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingPerson]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["MicrosoftGraphBookingPerson"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingCustomer, self).__init__(**kwargs)
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingService(msrest.serialization.Model):
"""Collection of bookingService.
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingService]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingService]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["MicrosoftGraphBookingService"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingService, self).__init__(**kwargs)
self.value = value
self.odata_next_link = odata_next_link
class CollectionOfBookingStaffMember(msrest.serialization.Model):
"""Collection of bookingStaffMember.
:param value:
:type value: list[~bookings.models.MicrosoftGraphBookingStaffMember]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MicrosoftGraphBookingStaffMember]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["MicrosoftGraphBookingStaffMember"]] = None,
odata_next_link: Optional[str] = None,
**kwargs
):
super(CollectionOfBookingStaffMember, self).__init__(**kwargs)
self.value = value
self.odata_next_link = odata_next_link
class MicrosoftGraphEntity(msrest.serialization.Model):
"""entity.
:param id: Read-only.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphEntity, self).__init__(**kwargs)
self.id = id
class MicrosoftGraphBookingAppointment(MicrosoftGraphEntity):
"""Represents a booked appointment of a service by a customer in a business.
:param id: Read-only.
:type id: str
:param self_service_appointment_id:
:type self_service_appointment_id: str
:param customer_id: The id of the booking customer associated with this appointment.
:type customer_id: str
:param customer_name:
:type customer_name: str
:param customer_email_address:
:type customer_email_address: str
:param customer_phone:
:type customer_phone: str
:param customer_notes: Notes from the customer associated with this appointment.
:type customer_notes: str
:param service_id: The id of the booking service associated with this appointment.
:type service_id: str
:param service_name: The name of the booking service associated with this appointment.
:type service_name: str
:param start: dateTimeTimeZone.
:type start: ~bookings.models.MicrosoftGraphDateTimeZone
:param end: dateTimeTimeZone.
:type end: ~bookings.models.MicrosoftGraphDateTimeZone
:param duration:
:type duration: ~datetime.timedelta
:param pre_buffer:
:type pre_buffer: ~datetime.timedelta
:param post_buffer:
:type post_buffer: ~datetime.timedelta
:param price_type: Possible values include: "undefined", "fixedPrice", "startingAt", "hourly",
"free", "priceVaries", "callUs", "notSet".
:type price_type: str or ~bookings.models.MicrosoftGraphBookingPriceType
:param price:
:type price: float
:param service_notes:
:type service_notes: str
:param reminders:
:type reminders: list[~bookings.models.MicrosoftGraphBookingReminder]
:param opt_out_of_customer_email:
:type opt_out_of_customer_email: bool
:param staff_member_ids:
:type staff_member_ids: list[str]
:param invoice_amount:
:type invoice_amount: float
:param invoice_date: dateTimeTimeZone.
:type invoice_date: ~bookings.models.MicrosoftGraphDateTimeZone
:param invoice_id:
:type invoice_id: str
:param invoice_status: Possible values include: "draft", "reviewing", "open", "canceled",
"paid", "corrective".
:type invoice_status: str or ~bookings.models.MicrosoftGraphBookingInvoiceStatus
:param invoice_url:
:type invoice_url: str
:param display_name_service_location_display_name: The name associated with the location.
:type display_name_service_location_display_name: str
:param location_email_address_service_location_email_address: Optional email address of the
location.
:type location_email_address_service_location_email_address: str
:param address_service_location_address: physicalAddress.
:type address_service_location_address: ~bookings.models.MicrosoftGraphPhysicalAddress
:param coordinates_service_location_coordinates: outlookGeoCoordinates.
:type coordinates_service_location_coordinates:
~bookings.models.MicrosoftGraphOutlookGeoCoordinates
:param location_uri_service_location_uri: Optional URI representing the location.
:type location_uri_service_location_uri: str
:param location_type_service_location_type: Possible values include: "default",
"conferenceRoom", "homeAddress", "businessAddress", "geoCoordinates", "streetAddress", "hotel",
"restaurant", "localBusiness", "postalAddress".
:type location_type_service_location_type: str or ~bookings.models.MicrosoftGraphLocationType
:param unique_id_service_location_unique_id: For internal use only.
:type unique_id_service_location_unique_id: str
:param unique_id_type_service_location_unique_id_type: Possible values include: "unknown",
"locationStore", "directory", "private", "bing".
:type unique_id_type_service_location_unique_id_type: str or
~bookings.models.MicrosoftGraphLocationUniqueIdType
:param display_name_customer_location_display_name: The name associated with the location.
:type display_name_customer_location_display_name: str
:param location_email_address_customer_location_email_address: Optional email address of the
location.
:type location_email_address_customer_location_email_address: str
:param address_customer_location_address: physicalAddress.
:type address_customer_location_address: ~bookings.models.MicrosoftGraphPhysicalAddress
:param coordinates_customer_location_coordinates: outlookGeoCoordinates.
:type coordinates_customer_location_coordinates:
~bookings.models.MicrosoftGraphOutlookGeoCoordinates
:param location_uri_customer_location_uri: Optional URI representing the location.
:type location_uri_customer_location_uri: str
:param location_type_customer_location_type: Possible values include: "default",
"conferenceRoom", "homeAddress", "businessAddress", "geoCoordinates", "streetAddress", "hotel",
"restaurant", "localBusiness", "postalAddress".
:type location_type_customer_location_type: str or ~bookings.models.MicrosoftGraphLocationType
:param unique_id_customer_location_unique_id: For internal use only.
:type unique_id_customer_location_unique_id: str
:param unique_id_type_customer_location_unique_id_type: Possible values include: "unknown",
"locationStore", "directory", "private", "bing".
:type unique_id_type_customer_location_unique_id_type: str or
~bookings.models.MicrosoftGraphLocationUniqueIdType
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'self_service_appointment_id': {'key': 'selfServiceAppointmentId', 'type': 'str'},
'customer_id': {'key': 'customerId', 'type': 'str'},
'customer_name': {'key': 'customerName', 'type': 'str'},
'customer_email_address': {'key': 'customerEmailAddress', 'type': 'str'},
'customer_phone': {'key': 'customerPhone', 'type': 'str'},
'customer_notes': {'key': 'customerNotes', 'type': 'str'},
'service_id': {'key': 'serviceId', 'type': 'str'},
'service_name': {'key': 'serviceName', 'type': 'str'},
'start': {'key': 'start', 'type': 'MicrosoftGraphDateTimeZone'},
'end': {'key': 'end', 'type': 'MicrosoftGraphDateTimeZone'},
'duration': {'key': 'duration', 'type': 'duration'},
'pre_buffer': {'key': 'preBuffer', 'type': 'duration'},
'post_buffer': {'key': 'postBuffer', 'type': 'duration'},
'price_type': {'key': 'priceType', 'type': 'str'},
'price': {'key': 'price', 'type': 'float'},
'service_notes': {'key': 'serviceNotes', 'type': 'str'},
'reminders': {'key': 'reminders', 'type': '[MicrosoftGraphBookingReminder]'},
'opt_out_of_customer_email': {'key': 'optOutOfCustomerEmail', 'type': 'bool'},
'staff_member_ids': {'key': 'staffMemberIds', 'type': '[str]'},
'invoice_amount': {'key': 'invoiceAmount', 'type': 'float'},
'invoice_date': {'key': 'invoiceDate', 'type': 'MicrosoftGraphDateTimeZone'},
'invoice_id': {'key': 'invoiceId', 'type': 'str'},
'invoice_status': {'key': 'invoiceStatus', 'type': 'str'},
'invoice_url': {'key': 'invoiceUrl', 'type': 'str'},
'display_name_service_location_display_name': {'key': 'serviceLocation.displayName', 'type': 'str'},
'location_email_address_service_location_email_address': {'key': 'serviceLocation.locationEmailAddress', 'type': 'str'},
'address_service_location_address': {'key': 'serviceLocation.address', 'type': 'MicrosoftGraphPhysicalAddress'},
'coordinates_service_location_coordinates': {'key': 'serviceLocation.coordinates', 'type': 'MicrosoftGraphOutlookGeoCoordinates'},
'location_uri_service_location_uri': {'key': 'serviceLocation.locationUri', 'type': 'str'},
'location_type_service_location_type': {'key': 'serviceLocation.locationType', 'type': 'str'},
'unique_id_service_location_unique_id': {'key': 'serviceLocation.uniqueId', 'type': 'str'},
'unique_id_type_service_location_unique_id_type': {'key': 'serviceLocation.uniqueIdType', 'type': 'str'},
'display_name_customer_location_display_name': {'key': 'customerLocation.displayName', 'type': 'str'},
'location_email_address_customer_location_email_address': {'key': 'customerLocation.locationEmailAddress', 'type': 'str'},
'address_customer_location_address': {'key': 'customerLocation.address', 'type': 'MicrosoftGraphPhysicalAddress'},
'coordinates_customer_location_coordinates': {'key': 'customerLocation.coordinates', 'type': 'MicrosoftGraphOutlookGeoCoordinates'},
'location_uri_customer_location_uri': {'key': 'customerLocation.locationUri', 'type': 'str'},
'location_type_customer_location_type': {'key': 'customerLocation.locationType', 'type': 'str'},
'unique_id_customer_location_unique_id': {'key': 'customerLocation.uniqueId', 'type': 'str'},
'unique_id_type_customer_location_unique_id_type': {'key': 'customerLocation.uniqueIdType', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
self_service_appointment_id: Optional[str] = None,
customer_id: Optional[str] = None,
customer_name: Optional[str] = None,
customer_email_address: Optional[str] = None,
customer_phone: Optional[str] = None,
customer_notes: Optional[str] = None,
service_id: Optional[str] = None,
service_name: Optional[str] = None,
start: Optional["MicrosoftGraphDateTimeZone"] = None,
end: Optional["MicrosoftGraphDateTimeZone"] = None,
duration: Optional[datetime.timedelta] = None,
pre_buffer: Optional[datetime.timedelta] = None,
post_buffer: Optional[datetime.timedelta] = None,
price_type: Optional[Union[str, "MicrosoftGraphBookingPriceType"]] = None,
price: Optional[float] = None,
service_notes: Optional[str] = None,
reminders: Optional[List["MicrosoftGraphBookingReminder"]] = None,
opt_out_of_customer_email: Optional[bool] = None,
staff_member_ids: Optional[List[str]] = None,
invoice_amount: Optional[float] = None,
invoice_date: Optional["MicrosoftGraphDateTimeZone"] = None,
invoice_id: Optional[str] = None,
invoice_status: Optional[Union[str, "MicrosoftGraphBookingInvoiceStatus"]] = None,
invoice_url: Optional[str] = None,
display_name_service_location_display_name: Optional[str] = None,
location_email_address_service_location_email_address: Optional[str] = None,
address_service_location_address: Optional["MicrosoftGraphPhysicalAddress"] = None,
coordinates_service_location_coordinates: Optional["MicrosoftGraphOutlookGeoCoordinates"] = None,
location_uri_service_location_uri: Optional[str] = None,
location_type_service_location_type: Optional[Union[str, "MicrosoftGraphLocationType"]] = None,
unique_id_service_location_unique_id: Optional[str] = None,
unique_id_type_service_location_unique_id_type: Optional[Union[str, "MicrosoftGraphLocationUniqueIdType"]] = None,
display_name_customer_location_display_name: Optional[str] = None,
location_email_address_customer_location_email_address: Optional[str] = None,
address_customer_location_address: Optional["MicrosoftGraphPhysicalAddress"] = None,
coordinates_customer_location_coordinates: Optional["MicrosoftGraphOutlookGeoCoordinates"] = None,
location_uri_customer_location_uri: Optional[str] = None,
location_type_customer_location_type: Optional[Union[str, "MicrosoftGraphLocationType"]] = None,
unique_id_customer_location_unique_id: Optional[str] = None,
unique_id_type_customer_location_unique_id_type: Optional[Union[str, "MicrosoftGraphLocationUniqueIdType"]] = None,
**kwargs
):
super(MicrosoftGraphBookingAppointment, self).__init__(id=id, **kwargs)
self.self_service_appointment_id = self_service_appointment_id
self.customer_id = customer_id
self.customer_name = customer_name
self.customer_email_address = customer_email_address
self.customer_phone = customer_phone
self.customer_notes = customer_notes
self.service_id = service_id
self.service_name = service_name
self.start = start
self.end = end
self.duration = duration
self.pre_buffer = pre_buffer
self.post_buffer = post_buffer
self.price_type = price_type
self.price = price
self.service_notes = service_notes
self.reminders = reminders
self.opt_out_of_customer_email = opt_out_of_customer_email
self.staff_member_ids = staff_member_ids
self.invoice_amount = invoice_amount
self.invoice_date = invoice_date
self.invoice_id = invoice_id
self.invoice_status = invoice_status
self.invoice_url = invoice_url
self.display_name_service_location_display_name = display_name_service_location_display_name
self.location_email_address_service_location_email_address = location_email_address_service_location_email_address
self.address_service_location_address = address_service_location_address
self.coordinates_service_location_coordinates = coordinates_service_location_coordinates
self.location_uri_service_location_uri = location_uri_service_location_uri
self.location_type_service_location_type = location_type_service_location_type
self.unique_id_service_location_unique_id = unique_id_service_location_unique_id
self.unique_id_type_service_location_unique_id_type = unique_id_type_service_location_unique_id_type
self.display_name_customer_location_display_name = display_name_customer_location_display_name
self.location_email_address_customer_location_email_address = location_email_address_customer_location_email_address
self.address_customer_location_address = address_customer_location_address
self.coordinates_customer_location_coordinates = coordinates_customer_location_coordinates
self.location_uri_customer_location_uri = location_uri_customer_location_uri
self.location_type_customer_location_type = location_type_customer_location_type
self.unique_id_customer_location_unique_id = unique_id_customer_location_unique_id
self.unique_id_type_customer_location_unique_id_type = unique_id_type_customer_location_unique_id_type
class MicrosoftGraphBookingNamedEntity(MicrosoftGraphEntity):
"""Booking entities that provide a display name.
:param id: Read-only.
:type id: str
:param display_name: Display name of this entity.
:type display_name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphBookingNamedEntity, self).__init__(id=id, **kwargs)
self.display_name = display_name
class MicrosoftGraphBookingBusiness(MicrosoftGraphBookingNamedEntity):
"""Represents a Microsot Bookings Business.
:param id: Read-only.
:type id: str
:param display_name: Display name of this entity.
:type display_name: str
:param business_type:
:type business_type: str
:param address: physicalAddress.
:type address: ~bookings.models.MicrosoftGraphPhysicalAddress
:param phone:
:type phone: str
:param email:
:type email: str
:param web_site_url: The URL of the business web site.
:type web_site_url: str
:param default_currency_iso:
:type default_currency_iso: str
:param business_hours:
:type business_hours: list[~bookings.models.MicrosoftGraphBookingWorkHours]
:param scheduling_policy: bookingSchedulingPolicy.
:type scheduling_policy: ~bookings.models.MicrosoftGraphBookingSchedulingPolicy
:param is_published:
:type is_published: bool
:param public_url:
:type public_url: str
:param appointments: All appointments in this business.
:type appointments: list[~bookings.models.MicrosoftGraphBookingAppointment]
:param calendar_view: A calendar view of appointments in this business.
:type calendar_view: list[~bookings.models.MicrosoftGraphBookingAppointment]
:param customers: All customers of this business.
:type customers: list[~bookings.models.MicrosoftGraphBookingPerson]
:param services: All services offered by this business.
:type services: list[~bookings.models.MicrosoftGraphBookingService]
:param staff_members: All staff members that provides services in this business.
:type staff_members: list[~bookings.models.MicrosoftGraphBookingStaffMember]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'business_type': {'key': 'businessType', 'type': 'str'},
'address': {'key': 'address', 'type': 'MicrosoftGraphPhysicalAddress'},
'phone': {'key': 'phone', 'type': 'str'},
'email': {'key': 'email', 'type': 'str'},
'web_site_url': {'key': 'webSiteUrl', 'type': 'str'},
'default_currency_iso': {'key': 'defaultCurrencyIso', 'type': 'str'},
'business_hours': {'key': 'businessHours', 'type': '[MicrosoftGraphBookingWorkHours]'},
'scheduling_policy': {'key': 'schedulingPolicy', 'type': 'MicrosoftGraphBookingSchedulingPolicy'},
'is_published': {'key': 'isPublished', 'type': 'bool'},
'public_url': {'key': 'publicUrl', 'type': 'str'},
'appointments': {'key': 'appointments', 'type': '[MicrosoftGraphBookingAppointment]'},
'calendar_view': {'key': 'calendarView', 'type': '[MicrosoftGraphBookingAppointment]'},
'customers': {'key': 'customers', 'type': '[MicrosoftGraphBookingPerson]'},
'services': {'key': 'services', 'type': '[MicrosoftGraphBookingService]'},
'staff_members': {'key': 'staffMembers', 'type': '[MicrosoftGraphBookingStaffMember]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
business_type: Optional[str] = None,
address: Optional["MicrosoftGraphPhysicalAddress"] = None,
phone: Optional[str] = None,
email: Optional[str] = None,
web_site_url: Optional[str] = None,
default_currency_iso: Optional[str] = None,
business_hours: Optional[List["MicrosoftGraphBookingWorkHours"]] = None,
scheduling_policy: Optional["MicrosoftGraphBookingSchedulingPolicy"] = None,
is_published: Optional[bool] = None,
public_url: Optional[str] = None,
appointments: Optional[List["MicrosoftGraphBookingAppointment"]] = None,
calendar_view: Optional[List["MicrosoftGraphBookingAppointment"]] = None,
customers: Optional[List["MicrosoftGraphBookingPerson"]] = None,
services: Optional[List["MicrosoftGraphBookingService"]] = None,
staff_members: Optional[List["MicrosoftGraphBookingStaffMember"]] = None,
**kwargs
):
super(MicrosoftGraphBookingBusiness, self).__init__(id=id, display_name=display_name, **kwargs)
self.business_type = business_type
self.address = address
self.phone = phone
self.email = email
self.web_site_url = web_site_url
self.default_currency_iso = default_currency_iso
self.business_hours = business_hours
self.scheduling_policy = scheduling_policy
self.is_published = is_published
self.public_url = public_url
self.appointments = appointments
self.calendar_view = calendar_view
self.customers = customers
self.services = services
self.staff_members = staff_members
class MicrosoftGraphBookingCurrency(MicrosoftGraphEntity):
"""bookingCurrency.
:param id: Read-only.
:type id: str
:param symbol:
:type symbol: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'symbol': {'key': 'symbol', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
symbol: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphBookingCurrency, self).__init__(id=id, **kwargs)
self.symbol = symbol
class MicrosoftGraphBookingPerson(MicrosoftGraphBookingNamedEntity):
"""Represents a booking customer or staff member.
:param id: Read-only.
:type id: str
:param display_name: Display name of this entity.
:type display_name: str
:param email_address: The e-mail address of this person.
:type email_address: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
email_address: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphBookingPerson, self).__init__(id=id, display_name=display_name, **kwargs)
self.email_address = email_address
class MicrosoftGraphBookingCustomer(MicrosoftGraphBookingPerson):
"""Represents a customer of the business.
:param id: Read-only.
:type id: str
:param id: Read-only.
:type id: str
:param display_name: Display name of this entity.
:type display_name: str
:param email_address: The e-mail address of this person.
:type email_address: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
id: Optional[str] = None,
display_name: Optional[str] = None,
email_address: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphBookingCustomer, self).__init__(id=id, id=id, display_name=display_name, email_address=email_address, **kwargs)
class MicrosoftGraphBookingReminder(msrest.serialization.Model):
"""bookingReminder.
:param offset: How much time before an appointment the reminder should be sent.
:type offset: ~datetime.timedelta
:param recipients: Possible values include: "allAttendees", "staff", "customer".
:type recipients: str or ~bookings.models.MicrosoftGraphBookingReminderRecipients
:param message: Message to send.
:type message: str
"""
_attribute_map = {
'offset': {'key': 'offset', 'type': 'duration'},
'recipients': {'key': 'recipients', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
offset: Optional[datetime.timedelta] = None,
recipients: Optional[Union[str, "MicrosoftGraphBookingReminderRecipients"]] = None,
message: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphBookingReminder, self).__init__(**kwargs)
self.offset = offset
self.recipients = recipients
self.message = message
class MicrosoftGraphBookingSchedulingPolicy(msrest.serialization.Model):
"""bookingSchedulingPolicy.
:param time_slot_interval: Duration of each time slot.
:type time_slot_interval: ~datetime.timedelta
:param minimum_lead_time: Minimum lead time for bookings and cancellations.
:type minimum_lead_time: ~datetime.timedelta
:param maximum_advance: Maximum number of days in advance that a booking can be made.
:type maximum_advance: ~datetime.timedelta
:param send_confirmations_to_owner: Notify the business via email when a booking is created or
changed.
:type send_confirmations_to_owner: bool
:param allow_staff_selection: Allow customers to choose a specific person for the booking.
:type allow_staff_selection: bool
"""
_attribute_map = {
'time_slot_interval': {'key': 'timeSlotInterval', 'type': 'duration'},
'minimum_lead_time': {'key': 'minimumLeadTime', 'type': 'duration'},
'maximum_advance': {'key': 'maximumAdvance', 'type': 'duration'},
'send_confirmations_to_owner': {'key': 'sendConfirmationsToOwner', 'type': 'bool'},
'allow_staff_selection': {'key': 'allowStaffSelection', 'type': 'bool'},
}
def __init__(
self,
*,
time_slot_interval: Optional[datetime.timedelta] = None,
minimum_lead_time: Optional[datetime.timedelta] = None,
maximum_advance: Optional[datetime.timedelta] = None,
send_confirmations_to_owner: Optional[bool] = None,
allow_staff_selection: Optional[bool] = None,
**kwargs
):
super(MicrosoftGraphBookingSchedulingPolicy, self).__init__(**kwargs)
self.time_slot_interval = time_slot_interval
self.minimum_lead_time = minimum_lead_time
self.maximum_advance = maximum_advance
self.send_confirmations_to_owner = send_confirmations_to_owner
self.allow_staff_selection = allow_staff_selection
class MicrosoftGraphBookingService(MicrosoftGraphBookingNamedEntity):
"""Represents a particular service offered by a booking business.
:param id: Read-only.
:type id: str
:param display_name: Display name of this entity.
:type display_name: str
:param default_duration:
:type default_duration: ~datetime.timedelta
:param default_price:
:type default_price: float
:param default_price_type: Possible values include: "undefined", "fixedPrice", "startingAt",
"hourly", "free", "priceVaries", "callUs", "notSet".
:type default_price_type: str or ~bookings.models.MicrosoftGraphBookingPriceType
:param default_reminders: The default reminders set in an appointment of this service.
:type default_reminders: list[~bookings.models.MicrosoftGraphBookingReminder]
:param description:
:type description: str
:param is_hidden_from_customers:
:type is_hidden_from_customers: bool
:param notes:
:type notes: str
:param pre_buffer:
:type pre_buffer: ~datetime.timedelta
:param post_buffer:
:type post_buffer: ~datetime.timedelta
:param scheduling_policy: bookingSchedulingPolicy.
:type scheduling_policy: ~bookings.models.MicrosoftGraphBookingSchedulingPolicy
:param staff_member_ids:
:type staff_member_ids: list[str]
:param display_name_default_location_display_name: The name associated with the location.
:type display_name_default_location_display_name: str
:param location_email_address: Optional email address of the location.
:type location_email_address: str
:param address: physicalAddress.
:type address: ~bookings.models.MicrosoftGraphPhysicalAddress
:param coordinates: outlookGeoCoordinates.
:type coordinates: ~bookings.models.MicrosoftGraphOutlookGeoCoordinates
:param location_uri: Optional URI representing the location.
:type location_uri: str
:param location_type: Possible values include: "default", "conferenceRoom", "homeAddress",
"businessAddress", "geoCoordinates", "streetAddress", "hotel", "restaurant", "localBusiness",
"postalAddress".
:type location_type: str or ~bookings.models.MicrosoftGraphLocationType
:param unique_id: For internal use only.
:type unique_id: str
:param unique_id_type: Possible values include: "unknown", "locationStore", "directory",
"private", "bing".
:type unique_id_type: str or ~bookings.models.MicrosoftGraphLocationUniqueIdType
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'default_duration': {'key': 'defaultDuration', 'type': 'duration'},
'default_price': {'key': 'defaultPrice', 'type': 'float'},
'default_price_type': {'key': 'defaultPriceType', 'type': 'str'},
'default_reminders': {'key': 'defaultReminders', 'type': '[MicrosoftGraphBookingReminder]'},
'description': {'key': 'description', 'type': 'str'},
'is_hidden_from_customers': {'key': 'isHiddenFromCustomers', 'type': 'bool'},
'notes': {'key': 'notes', 'type': 'str'},
'pre_buffer': {'key': 'preBuffer', 'type': 'duration'},
'post_buffer': {'key': 'postBuffer', 'type': 'duration'},
'scheduling_policy': {'key': 'schedulingPolicy', 'type': 'MicrosoftGraphBookingSchedulingPolicy'},
'staff_member_ids': {'key': 'staffMemberIds', 'type': '[str]'},
'display_name_default_location_display_name': {'key': 'defaultLocation.displayName', 'type': 'str'},
'location_email_address': {'key': 'defaultLocation.locationEmailAddress', 'type': 'str'},
'address': {'key': 'defaultLocation.address', 'type': 'MicrosoftGraphPhysicalAddress'},
'coordinates': {'key': 'defaultLocation.coordinates', 'type': 'MicrosoftGraphOutlookGeoCoordinates'},
'location_uri': {'key': 'defaultLocation.locationUri', 'type': 'str'},
'location_type': {'key': 'defaultLocation.locationType', 'type': 'str'},
'unique_id': {'key': 'defaultLocation.uniqueId', 'type': 'str'},
'unique_id_type': {'key': 'defaultLocation.uniqueIdType', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
default_duration: Optional[datetime.timedelta] = None,
default_price: Optional[float] = None,
default_price_type: Optional[Union[str, "MicrosoftGraphBookingPriceType"]] = None,
default_reminders: Optional[List["MicrosoftGraphBookingReminder"]] = None,
description: Optional[str] = None,
is_hidden_from_customers: Optional[bool] = None,
notes: Optional[str] = None,
pre_buffer: Optional[datetime.timedelta] = None,
post_buffer: Optional[datetime.timedelta] = None,
scheduling_policy: Optional["MicrosoftGraphBookingSchedulingPolicy"] = None,
staff_member_ids: Optional[List[str]] = None,
display_name_default_location_display_name: Optional[str] = None,
location_email_address: Optional[str] = None,
address: Optional["MicrosoftGraphPhysicalAddress"] = None,
coordinates: Optional["MicrosoftGraphOutlookGeoCoordinates"] = None,
location_uri: Optional[str] = None,
location_type: Optional[Union[str, "MicrosoftGraphLocationType"]] = None,
unique_id: Optional[str] = None,
unique_id_type: Optional[Union[str, "MicrosoftGraphLocationUniqueIdType"]] = None,
**kwargs
):
super(MicrosoftGraphBookingService, self).__init__(id=id, display_name=display_name, **kwargs)
self.default_duration = default_duration
self.default_price = default_price
self.default_price_type = default_price_type
self.default_reminders = default_reminders
self.description = description
self.is_hidden_from_customers = is_hidden_from_customers
self.notes = notes
self.pre_buffer = pre_buffer
self.post_buffer = post_buffer
self.scheduling_policy = scheduling_policy
self.staff_member_ids = staff_member_ids
self.display_name_default_location_display_name = display_name_default_location_display_name
self.location_email_address = location_email_address
self.address = address
self.coordinates = coordinates
self.location_uri = location_uri
self.location_type = location_type
self.unique_id = unique_id
self.unique_id_type = unique_id_type
class MicrosoftGraphBookingStaffMember(MicrosoftGraphBookingPerson):
"""Represents a staff member who provides services in a business.
:param id: Read-only.
:type id: str
:param id: Read-only.
:type id: str
:param display_name: Display name of this entity.
:type display_name: str
:param email_address: The e-mail address of this person.
:type email_address: str
:param availability_is_affected_by_personal_calendar:
:type availability_is_affected_by_personal_calendar: bool
:param color_index:
:type color_index: int
:param role: Possible values include: "guest", "administrator", "viewer", "externalGuest".
:type role: str or ~bookings.models.MicrosoftGraphBookingStaffRole
:param use_business_hours:
:type use_business_hours: bool
:param working_hours:
:type working_hours: list[~bookings.models.MicrosoftGraphBookingWorkHours]
"""
_validation = {
'color_index': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'str'},
'availability_is_affected_by_personal_calendar': {'key': 'availabilityIsAffectedByPersonalCalendar', 'type': 'bool'},
'color_index': {'key': 'colorIndex', 'type': 'int'},
'role': {'key': 'role', 'type': 'str'},
'use_business_hours': {'key': 'useBusinessHours', 'type': 'bool'},
'working_hours': {'key': 'workingHours', 'type': '[MicrosoftGraphBookingWorkHours]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
id: Optional[str] = None,
display_name: Optional[str] = None,
email_address: Optional[str] = None,
availability_is_affected_by_personal_calendar: Optional[bool] = None,
color_index: Optional[int] = None,
role: Optional[Union[str, "MicrosoftGraphBookingStaffRole"]] = None,
use_business_hours: Optional[bool] = None,
working_hours: Optional[List["MicrosoftGraphBookingWorkHours"]] = None,
**kwargs
):
super(MicrosoftGraphBookingStaffMember, self).__init__(id=id, id=id, display_name=display_name, email_address=email_address, **kwargs)
self.availability_is_affected_by_personal_calendar = availability_is_affected_by_personal_calendar
self.color_index = color_index
self.role = role
self.use_business_hours = use_business_hours
self.working_hours = working_hours
class MicrosoftGraphBookingWorkHours(msrest.serialization.Model):
"""bookingWorkHours.
:param day: Possible values include: "sunday", "monday", "tuesday", "wednesday", "thursday",
"friday", "saturday".
:type day: str or ~bookings.models.MicrosoftGraphDayOfWeek
:param time_slots: A list of start/end times during a day.
:type time_slots: list[~bookings.models.MicrosoftGraphBookingWorkTimeSlot]
"""
_attribute_map = {
'day': {'key': 'day', 'type': 'str'},
'time_slots': {'key': 'timeSlots', 'type': '[MicrosoftGraphBookingWorkTimeSlot]'},
}
def __init__(
self,
*,
day: Optional[Union[str, "MicrosoftGraphDayOfWeek"]] = None,
time_slots: Optional[List["MicrosoftGraphBookingWorkTimeSlot"]] = None,
**kwargs
):
super(MicrosoftGraphBookingWorkHours, self).__init__(**kwargs)
self.day = day
self.time_slots = time_slots
class MicrosoftGraphBookingWorkTimeSlot(msrest.serialization.Model):
"""bookingWorkTimeSlot.
:param start:
:type start: ~datetime.time
:param end:
:type end: ~datetime.time
"""
_attribute_map = {
'start': {'key': 'start', 'type': 'time'},
'end': {'key': 'end', 'type': 'time'},
}
def __init__(
self,
*,
start: Optional[datetime.time] = None,
end: Optional[datetime.time] = None,
**kwargs
):
super(MicrosoftGraphBookingWorkTimeSlot, self).__init__(**kwargs)
self.start = start
self.end = end
class MicrosoftGraphDateTimeZone(msrest.serialization.Model):
"""dateTimeTimeZone.
:param date_time: A single point of time in a combined date and time representation
({date}T{time}; for example, 2017-08-29T04:00:00.0000000).
:type date_time: str
:param time_zone: Represents a time zone, for example, 'Pacific Standard Time'. See below for
more possible values.
:type time_zone: str
"""
_attribute_map = {
'date_time': {'key': 'dateTime', 'type': 'str'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
}
def __init__(
self,
*,
date_time: Optional[str] = None,
time_zone: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphDateTimeZone, self).__init__(**kwargs)
self.date_time = date_time
self.time_zone = time_zone
class MicrosoftGraphOutlookGeoCoordinates(msrest.serialization.Model):
"""outlookGeoCoordinates.
:param altitude: The altitude of the location.
:type altitude: float
:param latitude: The latitude of the location.
:type latitude: float
:param longitude: The longitude of the location.
:type longitude: float
:param accuracy: The accuracy of the latitude and longitude. As an example, the accuracy can be
measured in meters, such as the latitude and longitude are accurate to within 50 meters.
:type accuracy: float
:param altitude_accuracy: The accuracy of the altitude.
:type altitude_accuracy: float
"""
_attribute_map = {
'altitude': {'key': 'altitude', 'type': 'float'},
'latitude': {'key': 'latitude', 'type': 'float'},
'longitude': {'key': 'longitude', 'type': 'float'},
'accuracy': {'key': 'accuracy', 'type': 'float'},
'altitude_accuracy': {'key': 'altitudeAccuracy', 'type': 'float'},
}
def __init__(
self,
*,
altitude: Optional[float] = None,
latitude: Optional[float] = None,
longitude: Optional[float] = None,
accuracy: Optional[float] = None,
altitude_accuracy: Optional[float] = None,
**kwargs
):
super(MicrosoftGraphOutlookGeoCoordinates, self).__init__(**kwargs)
self.altitude = altitude
self.latitude = latitude
self.longitude = longitude
self.accuracy = accuracy
self.altitude_accuracy = altitude_accuracy
class MicrosoftGraphPhysicalAddress(msrest.serialization.Model):
"""physicalAddress.
:param type: Possible values include: "unknown", "home", "business", "other".
:type type: str or ~bookings.models.MicrosoftGraphPhysicalAddressType
:param post_office_box:
:type post_office_box: str
:param street: The street.
:type street: str
:param city: The city.
:type city: str
:param state: The state.
:type state: str
:param country_or_region: The country or region. It's a free-format string value, for example,
'United States'.
:type country_or_region: str
:param postal_code: The postal code.
:type postal_code: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'post_office_box': {'key': 'postOfficeBox', 'type': 'str'},
'street': {'key': 'street', 'type': 'str'},
'city': {'key': 'city', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'country_or_region': {'key': 'countryOrRegion', 'type': 'str'},
'postal_code': {'key': 'postalCode', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "MicrosoftGraphPhysicalAddressType"]] = None,
post_office_box: Optional[str] = None,
street: Optional[str] = None,
city: Optional[str] = None,
state: Optional[str] = None,
country_or_region: Optional[str] = None,
postal_code: Optional[str] = None,
**kwargs
):
super(MicrosoftGraphPhysicalAddress, self).__init__(**kwargs)
self.type = type
self.post_office_box = post_office_box
self.street = street
self.city = city
self.state = state
self.country_or_region = country_or_region
self.postal_code = postal_code
class OdataError(msrest.serialization.Model):
"""OdataError.
All required parameters must be populated in order to send to Azure.
:param error: Required.
:type error: ~bookings.models.OdataErrorMain
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'OdataErrorMain'},
}
def __init__(
self,
*,
error: "OdataErrorMain",
**kwargs
):
super(OdataError, self).__init__(**kwargs)
self.error = error
class OdataErrorDetail(msrest.serialization.Model):
"""OdataErrorDetail.
All required parameters must be populated in order to send to Azure.
:param code: Required.
:type code: str
:param message: Required.
:type message: str
:param target:
:type target: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
*,
code: str,
message: str,
target: Optional[str] = None,
**kwargs
):
super(OdataErrorDetail, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
class OdataErrorMain(msrest.serialization.Model):
"""OdataErrorMain.
All required parameters must be populated in order to send to Azure.
:param code: Required.
:type code: str
:param message: Required.
:type message: str
:param target:
:type target: str
:param details:
:type details: list[~bookings.models.OdataErrorDetail]
:param innererror: The structure of this object is service-specific.
:type innererror: object
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[OdataErrorDetail]'},
'innererror': {'key': 'innererror', 'type': 'object'},
}
def __init__(
self,
*,
code: str,
message: str,
target: Optional[str] = None,
details: Optional[List["OdataErrorDetail"]] = None,
innererror: Optional[object] = None,
**kwargs
):
super(OdataErrorMain, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
self.innererror = innererror
class Paths1Bomg32BookingbusinessesBookingbusinessIdCalendarviewBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths1Bomg32BookingbusinessesBookingbusinessIdCalendarviewBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema.
:param cancellation_message:
:type cancellation_message: str
"""
_attribute_map = {
'cancellation_message': {'key': 'cancellationMessage', 'type': 'str'},
}
def __init__(
self,
*,
cancellation_message: Optional[str] = None,
**kwargs
):
super(Paths1Bomg32BookingbusinessesBookingbusinessIdCalendarviewBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.cancellation_message = cancellation_message
class Paths1K88Cl0BookingbusinessesBookingbusinessIdAppointmentsBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths1K88Cl0BookingbusinessesBookingbusinessIdAppointmentsBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema.
:param cancellation_message:
:type cancellation_message: str
"""
_attribute_map = {
'cancellation_message': {'key': 'cancellationMessage', 'type': 'str'},
}
def __init__(
self,
*,
cancellation_message: Optional[str] = None,
**kwargs
):
super(Paths1K88Cl0BookingbusinessesBookingbusinessIdAppointmentsBookingappointmentIdMicrosoftGraphCancelPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.cancellation_message = cancellation_message
|
[
"japhethobalak@gmail.com"
] |
japhethobalak@gmail.com
|
9bc772e784fcb4872decb19b72e0aa8f010a0cd3
|
0a727f3ffde045805b9b789abbaa9c8497667f8e
|
/Results.py
|
53ba841769561b9aea6162911e8cea3615dc2cf9
|
[
"MIT"
] |
permissive
|
esitarski/CrossMgr
|
ff4a632089a144f6ecc57970e2b29a7c31a15118
|
a95ac1d65f2d0cab712cc6e5f9393668c1bbf83c
|
refs/heads/master
| 2023-08-30T22:48:43.457978
| 2023-08-24T14:12:44
| 2023-08-24T14:12:44
| 1,042,402
| 33
| 20
|
MIT
| 2023-04-30T13:32:11
| 2010-11-01T17:25:15
|
Python
|
UTF-8
|
Python
| false
| false
| 31,446
|
py
|
import wx
import re
import os
import sys
import Model
import Utils
import ColGrid
from collections import defaultdict
from FixCategories import FixCategories, SetCategory
from GetResults import GetResults, RidersCanSwap
from ExportGrid import ExportGrid
from RiderDetail import ShowRiderDetailDialog
from EditEntry import CorrectNumber, ShiftNumber, InsertNumber, DeleteEntry, SwapEntry
from Undo import undo
import Flags
bitmapCache = {}
class IOCCodeRenderer(wx.grid.GridCellRenderer):
def getImgWidth( self, ioc, height ):
img = Flags.GetFlagImage( ioc )
if img:
imgHeight = int( height * 0.8 )
imgWidth = int( float(img.GetWidth()) / float(img.GetHeight()) * float(imgHeight) )
padding = int(height * 0.1)
return img, imgWidth, imgHeight, padding
return None, 0, 0, 0
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
text = grid.GetCellValue(row, col)
dc.SetFont( attr.GetFont() )
w, h = dc.GetTextExtent( text )
ioc = text[:3]
img, imgWidth, imgHeight, padding = self.getImgWidth(ioc, h)
fg = attr.GetTextColour()
bg = attr.GetBackgroundColour()
if isSelected:
fg, bg = bg, fg
dc.SetBrush( wx.Brush(bg, wx.SOLID) )
dc.SetPen( wx.TRANSPARENT_PEN )
dc.DrawRectangle( rect )
rectText = wx.Rect( rect.GetX()+padding+imgWidth, rect.GetY(), rect.GetWidth()-padding-imgWidth, rect.GetHeight() )
hAlign, vAlign = attr.GetAlignment()
dc.SetTextForeground( fg )
dc.SetTextBackground( bg )
grid.DrawTextRectangle(dc, text, rectText, hAlign, vAlign)
if img:
key = (ioc, imgHeight)
if key not in bitmapCache:
bitmapCache[key] = img.Scale(imgWidth, imgHeight, wx.IMAGE_QUALITY_HIGH).ConvertToBitmap()
dc.DrawBitmap( bitmapCache[key], rect.GetX(), rect.GetY()+(rect.GetHeight()-imgHeight)//2 )
def GetBestSize(self, grid, attr, dc, row, col):
text = grid.GetCellValue(row, col)
dc.SetFont(attr.GetFont())
w, h = dc.GetTextExtent( text )
img, imgWidth, imgHeight, padding = self.getImgWidth(text[:3], h)
if img:
return wx.Size(w + imgWidth + padding, h)
else:
return wx.Size(w, h)
def Clone(self):
return IOCCodeRenderer()
reNonDigits = re.compile( '[^0-9]' )
reLapMatch = re.compile( '<?Lap>? ([0-9]+)' )
class Results( wx.Panel ):
DisplayLapTimes = 0
DisplayRaceTimes = 1
DisplayLapSpeeds = 2
DisplayRaceSpeeds = 3
def __init__( self, parent, id = wx.ID_ANY ):
super().__init__(parent, id)
self.category = None
self.showRiderData = True
self.selectDisplay = 0
self.firstDraw = True
self.rcInterp = set()
self.rcNumTime = set()
self.numSelect = None
self.isEmpty = True
self.reSplit = re.compile( '[\[\]\+= ]+' ) # separators for the fields.
self.iLap = None
self.entry = None
self.iRow, self.iCol = None, None
self.iLastLap = 0
self.fastestLapRC = None
self.hbs = wx.BoxSizer(wx.HORIZONTAL)
self.categoryLabel = wx.StaticText( self, label = _('Category:') )
self.categoryChoice = wx.Choice( self )
self.Bind(wx.EVT_CHOICE, self.doChooseCategory, self.categoryChoice)
self.showRiderDataToggle = wx.ToggleButton( self, label = _('Show Rider Data'), style=wx.BU_EXACTFIT )
self.showRiderDataToggle.SetValue( self.showRiderData )
self.Bind( wx.EVT_TOGGLEBUTTON, self.onShowRiderData, self.showRiderDataToggle )
self.showLapTimesRadio = wx.RadioButton( self, label = _('Lap Times'), style=wx.BU_EXACTFIT|wx.RB_GROUP )
self.showLapTimesRadio.SetValue( self.selectDisplay == Results.DisplayLapTimes )
self.Bind( wx.EVT_RADIOBUTTON, self.onSelectDisplayOption, self.showLapTimesRadio )
self.showLapTimesRadio.SetToolTip(wx.ToolTip(_('Useful for finding the fastest lap.')))
self.showRaceTimesRadio = wx.RadioButton( self, label = _('Race Times'), style=wx.BU_EXACTFIT )
self.showRaceTimesRadio.SetValue( self.selectDisplay == Results.DisplayRaceTimes )
self.Bind( wx.EVT_RADIOBUTTON, self.onSelectDisplayOption, self.showRaceTimesRadio )
self.showRaceTimesRadio.SetToolTip(wx.ToolTip(_('Useful for finding for Prime winners.\nAfter selecting, click on a lap header to sort.')))
self.showLapSpeedsRadio = wx.RadioButton( self, label = _('Lap Speeds'), style=wx.BU_EXACTFIT )
self.showLapSpeedsRadio.SetValue( self.selectDisplay == Results.DisplayLapSpeeds )
self.Bind( wx.EVT_RADIOBUTTON, self.onSelectDisplayOption, self.showLapSpeedsRadio )
self.showLapSpeedsRadio.SetToolTip(wx.ToolTip(_('Useful for finding the fastest lap.')))
self.showRaceSpeedsRadio = wx.RadioButton( self, label = _('Race Speeds'), style=wx.BU_EXACTFIT )
self.showRaceSpeedsRadio.SetValue( self.selectDisplay == Results.DisplayRaceSpeeds )
self.Bind( wx.EVT_RADIOBUTTON, self.onSelectDisplayOption, self.showRaceSpeedsRadio )
self.showRaceSpeedsRadio.SetToolTip(wx.ToolTip(_("Useful to predict how long a race will take based on rider's average speed.")))
f = self.showLapTimesRadio.GetFont()
self.boldFont = wx.Font( f.GetPointSize()+2, f.GetFamily(), f.GetStyle(), wx.FONTWEIGHT_BOLD, f.GetUnderlined() )
self.search = wx.SearchCtrl(self, size=(80,-1), style=wx.TE_PROCESS_ENTER )
# self.search.ShowCancelButton( True )
self.Bind(wx.EVT_SEARCHCTRL_SEARCH_BTN, self.OnSearch, self.search)
self.Bind(wx.EVT_SEARCHCTRL_CANCEL_BTN, self.OnCancelSearch, self.search)
self.Bind(wx.EVT_TEXT_ENTER, self.OnDoSearch, self.search)
bitmap = wx.Bitmap( os.path.join(Utils.getImageFolder(), 'Zoom-In-icon.png'), wx.BITMAP_TYPE_PNG )
self.zoomInButton = wx.BitmapButton( self, wx.ID_ZOOM_IN, bitmap, style=wx.BU_EXACTFIT | wx.BU_AUTODRAW )
self.Bind( wx.EVT_BUTTON, self.onZoomIn, self.zoomInButton )
bitmap = wx.Bitmap( os.path.join(Utils.getImageFolder(), 'Zoom-Out-icon.png'), wx.BITMAP_TYPE_PNG )
self.zoomOutButton = wx.BitmapButton( self, wx.ID_ZOOM_OUT, bitmap, style=wx.BU_EXACTFIT | wx.BU_AUTODRAW )
self.Bind( wx.EVT_BUTTON, self.onZoomOut, self.zoomOutButton )
self.hbs.Add( self.categoryLabel, flag=wx.TOP | wx.BOTTOM | wx.LEFT | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.categoryChoice, flag=wx.ALL, border=4 )
self.hbs.Add( self.showRiderDataToggle, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.showLapTimesRadio, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.showRaceTimesRadio, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.showLapSpeedsRadio, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.showRaceSpeedsRadio, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.AddStretchSpacer()
self.hbs.Add( self.search, flag=wx.TOP | wx.BOTTOM | wx.LEFT | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.zoomInButton, flag=wx.TOP | wx.BOTTOM | wx.LEFT | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.zoomOutButton, flag=wx.TOP | wx.BOTTOM | wx.RIGHT | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.whiteColour = wx.Colour( 255, 255, 255 )
self.blackColour = wx.Colour( 0, 0, 0 )
self.yellowColour = wx.Colour( 255, 255, 0 )
self.orangeColour = wx.Colour( 255, 165, 0 )
self.greyColour = wx.Colour( 150, 150, 150 )
self.greenColour = wx.Colour( 127, 210, 0 )
self.lightBlueColour = wx.Colour( 153, 205, 255 )
self.splitter = wx.SplitterWindow( self )
self.labelGrid = ColGrid.ColGrid( self.splitter, style=wx.BORDER_SUNKEN )
self.labelGrid.SetRowLabelSize( 0 )
self.labelGrid.SetMargins( 0, 0 )
self.labelGrid.SetRightAlign( True )
self.labelGrid.AutoSizeColumns( True )
self.labelGrid.DisableDragColSize()
self.labelGrid.DisableDragRowSize()
# put a tooltip on the cells in a column
self.labelGrid.GetGridWindow().Bind(wx.EVT_MOTION, self.onMouseOver)
self.lapGrid = ColGrid.ColGrid( self.splitter, style=wx.BORDER_SUNKEN )
self.lapGrid.SetRowLabelSize( 0 )
self.lapGrid.SetMargins( 0, 0 )
self.lapGrid.SetRightAlign( True )
self.lapGrid.AutoSizeColumns( True )
self.lapGrid.DisableDragColSize()
self.lapGrid.DisableDragRowSize()
self.splitter.SetMinimumPaneSize(100)
self.splitter.SplitVertically(self.labelGrid, self.lapGrid, 400)
# Sync the two vertical scrollbars.
self.labelGrid.Bind(wx.EVT_SCROLLWIN, self.onScroll)
self.lapGrid.Bind(wx.EVT_SCROLLWIN, self.onScroll)
self.Bind( wx.grid.EVT_GRID_SELECT_CELL, self.doNumSelect )
self.Bind( wx.grid.EVT_GRID_CELL_LEFT_DCLICK, self.doNumDrilldown )
self.Bind( wx.grid.EVT_GRID_CELL_RIGHT_CLICK, self.doRightClick )
self.lapGrid.Bind( wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.doLabelClick )
self.labelGrid.Bind( wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.doLabelClick )
bs = wx.BoxSizer(wx.VERTICAL)
#bs.Add(self.hbs)
#bs.Add(self.lapGrid, 1, wx.GROW|wx.ALL, 5)
bs.Add(self.hbs, 0, wx.EXPAND )
bs.Add(self.splitter, 1, wx.EXPAND|wx.GROW|wx.ALL, 5 )
self.SetDoubleBuffered( True )
self.SetSizer(bs)
bs.SetSizeHints(self)
def onScroll(self, evt):
if evt.GetOrientation() == wx.SB_VERTICAL:
if evt.GetEventObject() == self.lapGrid:
wx.CallAfter( Utils.AlignVerticalScroll, self.lapGrid, self.labelGrid )
else:
wx.CallAfter( Utils.AlignVerticalScroll, self.labelGrid, self.lapGrid )
evt.Skip()
def onMouseOver( self, event ):
"""
Displays a tooltip for the close finishes.
"""
x, y = self.labelGrid.CalcUnscrolledPosition(event.GetX(),event.GetY())
row, col = self.labelGrid.XYToCell(x, y)
try:
num = int(self.labelGrid.GetCellValue(row, 1))
except Exception:
return
if num in self.closeFinishBibs:
try:
pos = int(self.labelGrid.GetCellValue(row, 0))
except Exception:
return
event.GetEventObject().SetToolTip('{} {}, {} {}: {} {}'.format(
_('Pos'), pos,
_('Bib'), num,
_('close finish to'), ','.join( '{} {}'.format(_('Bib'), bib) for bib in self.closeFinishBibs[num]),
)
)
else:
event.GetEventObject().SetToolTip('')
def alignLabelToLapScroll(self):
Utils.AlignVerticalScroll( self.labelGrid, self.lapGrid )
def alignLapToLabelScroll(self):
Utils.AlignVerticalScroll( self.lapGrid, self.labelGrid )
def OnSearch( self, event ):
self.OnDoSearch()
def OnCancelSearch( self, event ):
self.search.SetValue( '' )
def OnDoSearch( self, event = None ):
wx.CallAfter( self.search.SetFocus )
n = self.search.GetValue()
if n:
n = reNonDigits.sub( '', n )
self.search.SetValue( n )
if not n:
n = None
if n:
self.numSelect = n
if self.category and not self.category.matches( int(n) ):
self.setCategoryAll()
self.refresh()
if Utils.isMainWin():
Utils.getMainWin().setNumSelect( n )
self.ensureVisibleNumSelect()
def onZoomOut( self, event ):
self.labelGrid.Zoom( False )
self.lapGrid.Zoom( False )
self.splitter.UpdateSize()
wx.CallAfter( self.refresh )
def onZoomIn( self, event ):
self.labelGrid.Zoom( True )
self.lapGrid.Zoom( True )
self.splitter.UpdateSize()
wx.CallAfter( self.refresh )
def onShowRiderData( self, event ):
self.showRiderData ^= True
wx.CallAfter( self.refresh )
def onSelectDisplayOption( self, event ):
for i, r in enumerate([self.showLapTimesRadio, self.showRaceTimesRadio, self.showLapSpeedsRadio, self.showRaceSpeedsRadio]):
if r.GetValue():
self.selectDisplay = i
break
wx.CallAfter( self.refresh )
def doLabelClick( self, event ):
col = event.GetCol()
with Model.LockRace() as race:
race.sortLap = None
race.sortLabel = None
if event.GetEventObject() == self.lapGrid:
label = self.lapGrid.GetColLabelValue( col )
if label.startswith( _('Lap') ):
race.sortLap = int(label.split()[1])
else:
label = self.labelGrid.GetColLabelValue( col )
if label[:1] != '<':
race.sortLabel = label
wx.CallAfter( self.refresh )
def doRightClick( self, event ):
wx.CallAfter( self.search.SetFocus )
self.doNumSelect( event )
if self.numSelect is None:
return
allCases = 0
interpCase = 1
nonInterpCase = 2
if not hasattr(self, 'popupInfo'):
self.popupInfo = [
(_('Passings'), _('Switch to Passings tab'), self.OnPopupHistory, allCases),
(_('RiderDetail'), _('Show RiderDetail Dialog'), self.OnPopupRiderDetail, allCases),
(None, None, None, None),
(_('Show Photos'), _('Show Photos'), self.OnPopupShowPhotos, allCases),
(None, None, None, None),
(_('Correct...'), _('Change number or lap time...'), self.OnPopupCorrect, interpCase),
(_('Shift...'), _('Move lap time earlier/later...'), self.OnPopupShift, interpCase),
(_('Delete...'), _('Delete lap time...'), self.OnPopupDelete, nonInterpCase),
(None, None, None, None),
(_('Swap with Rider before'), _('Swap with Rider before'), self.OnPopupSwapBefore, allCases),
(_('Swap with Rider after'), _('Swap with Rider after'), self.OnPopupSwapAfter, allCases),
]
self.menuOptions = {}
for numBefore in [False, True]:
for numAfter in [False, True]:
for caseCode in range(3):
menu = wx.Menu()
for name, text, callback, cCase in self.popupInfo:
if not name:
Utils.addMissingSeparator( menu )
continue
if caseCode < cCase:
continue
if (name.endswith(_('before')) and not numBefore) or (name.endswith(_('after')) and not numAfter):
continue
item = menu.Append( wx.ID_ANY, name, text )
self.Bind( wx.EVT_MENU, callback, item )
Utils.deleteTrailingSeparators( menu )
self.menuOptions[(numBefore,numAfter,caseCode)] = menu
num = int(self.numSelect)
with Model.LockRace() as race:
if not race or num not in race.riders:
return
category = FixCategories( self.categoryChoice, getattr(race, 'resultsCategory', 0) )
riderResults = dict( (r.num, r) for r in GetResults(category) )
entries = race.riders[num].interpolate()
try:
laps = riderResults[num].laps
self.entry = next(e for e in entries if e.t == riderResults[num].raceTimes[laps])
caseCode = 1 if self.entry.interp else 2
except (TypeError, IndexError, KeyError):
caseCode = 0
except StopIteration:
return
self.numBefore, self.numAfter = None, None
for iRow, attr in [(self.iRow - 1, 'numBefore'), (self.iRow + 1, 'numAfter')]:
if not (0 <= iRow < self.lapGrid.GetNumberRows()):
continue
numAdjacent = int( self.labelGrid.GetCellValue(iRow, 1) )
if RidersCanSwap( riderResults, num, numAdjacent ):
setattr( self, attr, numAdjacent )
menu = self.menuOptions[(self.numBefore is not None, self.numAfter is not None, caseCode)]
try:
self.PopupMenu( menu )
except Exception as e:
Utils.writeLog( 'Results:doRightClick: {}'.format(e) )
def OnPopupCorrect( self, event ):
CorrectNumber( self, self.entry )
def OnPopupShift( self, event ):
ShiftNumber( self, self.entry )
def OnPopupDelete( self, event ):
DeleteEntry( self, self.entry )
def swapEntries( self, num, numAdjacent ):
if not num or not numAdjacent:
return
with Model.LockRace() as race:
if (not race or
num not in race.riders or
numAdjacent not in race ):
return
e1 = race.getRider(num).interpolate()
e2 = race.getRider(numAdjacent).interpolate()
category = FixCategories( self.categoryChoice, getattr(race, 'resultsCategory', 0) )
riderResults = dict( (r.num, r) for r in GetResults(category) )
try:
rr1, rr2 = riderResults[num], riderResults[numAdjacent]
laps = rr1.laps
undo.pushState()
ee1 = next( e for e in e1 if e.t == rr1.raceTimes[laps] )
ee2 = next( e for e in e2 if e.t == rr2.raceTimes[laps] )
with Model.LockRace() as race:
SwapEntry( ee1, ee2 )
wx.CallAfter( self.refresh )
except (KeyError, StopIteration):
pass
def showLastLap( self ):
if not self.isEmpty:
self.iLastLap = max( min(self.lapGrid.GetNumberCols()-1, self.iLastLap), 0 )
self.labelGrid.MakeCellVisible( 0, 0 )
self.lapGrid.MakeCellVisible( 0, self.iLastLap )
def OnPopupSwapBefore( self, event ):
self.swapEntries( int(self.numSelect), self.numBefore )
def OnPopupSwapAfter( self, event ):
self.swapEntries( int(self.numSelect), self.numAfter )
def OnPopupHistory( self, event ):
mainWin = Utils.getMainWin()
if mainWin:
mainWin.showPageName( mainWin.iPassingsPage )
def OnPopupRiderDetail( self, event ):
ShowRiderDetailDialog( self, self.numSelect )
def OnPopupShowPhotos( self, event ):
mainWin = Utils.mainWin
if not mainWin:
return
mainWin.photoDialog.Show( True )
mainWin.photoDialog.setNumSelect( int(self.numSelect) )
def ensureVisibleNumSelect( self ):
try:
numSelectSearch = int(self.numSelect)
except (TypeError, ValueError):
return
for r in range(self.labelGrid.GetNumberRows()-1, -1, -1):
try:
cellNum = int(self.labelGrid.GetCellValue(r,1))
except Exception:
continue
if cellNum == numSelectSearch:
self.labelGrid.MakeCellVisible( r, 1 )
wx.CallAfter( Utils.AlignVerticalScroll, self.labelGrid, self.lapGrid )
break
def showNumSelect( self ):
race = Model.race
if race is None:
return
try:
numSelectSearch = int(self.numSelect)
except (TypeError, ValueError):
numSelectSearch = None
textColourLap = {}
backgroundColourLap = { rc:self.yellowColour for rc in self.rcInterp }
backgroundColourLap.update( { rc:self.orangeColour for rc in self.rcNumTime } )
if self.fastestLapRC is not None:
backgroundColourLap[self.fastestLapRC] = self.greenColour
textColourLabel = {}
backgroundColourLabel = {}
timeCol = None
for c in range(self.labelGrid.GetNumberCols()):
if self.labelGrid.GetColLabelValue(c) == _('Time'):
timeCol = c
break
for r in range(self.lapGrid.GetNumberRows()):
try:
cellNum = int(self.labelGrid.GetCellValue(r,1))
except Exception:
continue
if cellNum == numSelectSearch:
for c in range(self.labelGrid.GetNumberCols()):
textColourLabel[ (r,c) ] = self.whiteColour
backgroundColourLabel[ (r,c) ] = self.blackColour
for c in range(self.lapGrid.GetNumberCols()):
textColourLap[ (r,c) ] = self.whiteColour
backgroundColourLap[ (r,c) ] = self.blackColour if (r,c) not in self.rcInterp and (r,c) not in self.rcNumTime else self.greyColour
if cellNum in self.closeFinishBibs:
textColourLabel[ (r,0) ] = self.blackColour
backgroundColourLabel[ (r,0) ] = self.lightBlueColour
if timeCol is not None:
textColourLabel[ (r,timeCol) ] = self.blackColour
backgroundColourLabel[ (r,timeCol) ] = self.lightBlueColour
# Highlight the sorted columns.
for c in range(self.lapGrid.GetNumberCols()):
if self.lapGrid.GetColLabelValue(c).startswith('<'):
for r in range(self.lapGrid.GetNumberRows()):
textColourLap[ (r,c) ] = self.whiteColour
backgroundColourLap[ (r,c) ] = self.blackColour \
if (r,c) not in self.rcInterp and (r,c) not in self.rcNumTime else self.greyColour
break
for c in range(self.labelGrid.GetNumberCols()):
if self.labelGrid.GetColLabelValue(c).startswith('<'):
for r in range(self.labelGrid.GetNumberRows()):
textColourLabel[ (r,c) ] = self.whiteColour
backgroundColourLabel[ (r,c) ] = self.blackColour
break
self.labelGrid.Set( textColour=textColourLabel, backgroundColour=backgroundColourLabel )
self.lapGrid.Set( textColour=textColourLap, backgroundColour=backgroundColourLap )
self.labelGrid.Reset()
self.lapGrid.Reset()
def doNumDrilldown( self, event ):
self.doNumSelect( event )
mainWin = Utils.getMainWin()
if self.numSelect is not None and mainWin:
ShowRiderDetailDialog( self, self.numSelect )
def doNumSelect( self, event ):
grid = event.GetEventObject()
self.iLap = None
if self.isEmpty:
return
row, col = event.GetRow(), event.GetCol()
self.iRow, self.iCol = row, col
if row >= self.labelGrid.GetNumberRows():
return
if grid == self.lapGrid and self.lapGrid.GetCellValue(row, col):
try:
colName = self.lapGrid.GetColLabelValue( col )
self.iLap = int( reLapMatch.match(colName).group(1) )
except Exception:
pass
value = self.labelGrid.GetCellValue( row, 1 )
numSelect = value if value else None
if self.numSelect != numSelect:
self.numSelect = numSelect
self.showNumSelect()
mainWin = Utils.getMainWin()
if mainWin:
historyCategoryChoice = mainWin.history.categoryChoice
historyCat = FixCategories( historyCategoryChoice )
if historyCat is not None:
cat = FixCategories( self.categoryChoice )
if historyCat != cat:
Model.setCategoryChoice( self.categoryChoice.GetSelection(), 'resultsCategory' )
SetCategory( historyCategoryChoice, cat )
mainWin.setNumSelect( numSelect )
def setCategoryAll( self ):
FixCategories( self.categoryChoice, 0 )
Model.setCategoryChoice( 0, 'resultsCategory' )
def setCategory( self, category ):
for i, c in enumerate(Model.race.getCategories( startWaveOnly=False ) if Model.race else [], 1):
if c == category:
SetCategory( self.categoryChoice, c )
Model.setCategoryChoice( i, 'resultsCategory' )
return
SetCategory( self.categoryChoice, None )
Model.setCategoryChoice( 0, 'resultsCategory' )
def doChooseCategory( self, event ):
Model.setCategoryChoice( self.categoryChoice.GetSelection(), 'resultsCategory' )
self.refresh()
def reset( self ):
self.numSelect = None
def setNumSelect( self, num ):
self.numSelect = num if num is None else '{}'.format(num)
if self.numSelect:
self.search.SetValue( self.numSelect )
def clearGrid( self ):
self.labelGrid.Set( data = [], colnames = [], textColour = {}, backgroundColour = {} )
self.labelGrid.Reset()
self.lapGrid.Set( data = [], colnames = [], textColour = {}, backgroundColour = {} )
self.lapGrid.Reset()
def refresh( self ):
self.category = None
self.isEmpty = True
self.iLastLap = 0
self.rcInterp = set() # Set of row/col coordinates of interpolated numbers.
self.rcNumTime = set()
self.search.SelectAll()
CloseFinishTime = 0.07
self.closeFinishBibs = defaultdict( list )
race = Model.race
if not race:
self.clearGrid()
return
category = FixCategories( self.categoryChoice, getattr(race, 'resultsCategory', 0) )
self.hbs.Layout()
for si in self.hbs.GetChildren():
if si.IsWindow():
si.GetWindow().Refresh()
self.category = category
sortLap = getattr( race, 'sortLap', None )
sortLabel = getattr( race, 'sortLabel', None )
if race.isTimeTrial:
def getSortTime( rr ):
try:
return rr.firstTime + rr._lastTimeOrig
except Exception:
return 0
else:
def getSortTime( rr ):
try:
return rr._lastTimeOrig
except Exception:
return 0
results = sorted(
(rr for rr in GetResults(category)
if rr.status==Model.Rider.Finisher and rr.lapTimes and getSortTime(rr) > 0),
key = getSortTime
)
for i in range(1, len(results)):
if results[i]._lastTimeOrig - results[i-1]._lastTimeOrig <= CloseFinishTime:
self.closeFinishBibs[results[i-1].num].append( results[i].num )
self.closeFinishBibs[results[i].num].append( results[i-1].num )
labelLastX, labelLastY = self.labelGrid.GetViewStart()
lapLastX, lapLastY = self.lapGrid.GetViewStart()
exportGrid = ExportGrid()
exportGrid.setResultsOneList( category, self.showRiderData, showLapsFrequency = 1 )
if not exportGrid.colnames:
self.clearGrid()
return
# Fix the speed column.
speedUnit = None
iSpeedCol = None
try:
iSpeedCol = next(i for i, c in enumerate(exportGrid.colnames) if c == _('Speed'))
except StopIteration:
pass
if iSpeedCol is not None:
for r, d in enumerate(exportGrid.data[iSpeedCol]):
d = d.strip()
if not d:
continue
dSplit = d.split()
if not speedUnit and len(dSplit) > 1:
exportGrid.colnames[iSpeedCol] = speedUnit = dSplit[1]
exportGrid.data[iSpeedCol][r] = dSplit[0]
if exportGrid.data[iSpeedCol][r] == '"':
exportGrid.data[iSpeedCol][r] += ' '
colnames = exportGrid.colnames
data = exportGrid.data
sortCol = None
if sortLap:
race.sortLabel = sortLabel = None
for i, name in enumerate(colnames):
if name.startswith(_('Lap')) and int(name.split()[1]) == sortLap:
sortCol = i
break
elif sortLabel:
race.sortLap = sortLap = None
if sortLabel not in {_('Pos'), _('Gap'), _('Time'), _('mph'), _('km/h')}:
for i, name in enumerate(colnames):
if name == sortLabel:
sortCol = i
break
if sortCol is None:
race.sortLabel = race.sortLap = sortLabel = sortLap = None
results = GetResults( category )
hasSpeeds = False
for result in results:
if getattr(result, 'lapSpeeds', None) or getattr(result, 'raceSpeeds', None):
hasSpeeds = True
break
if not hasSpeeds:
self.showLapSpeedsRadio.Enable( False )
self.showRaceSpeedsRadio.Enable( False )
if self.selectDisplay > Results.DisplayRaceTimes:
self.selectDisplay = Results.DisplayRaceTimes
self.showRaceTimesRadio.SetValue( True )
else:
self.showLapSpeedsRadio.Enable( True )
self.showRaceSpeedsRadio.Enable( True )
'''
for r in [self.showLapTimesRadio, self.showRaceTimesRadio, self.showLapSpeedsRadio, self.showRaceSpeedsRadio]:
if r.GetValue():
r.SetFont( self.boldFont )
else:
r.SetFont( wx.NullFont )
self.hbs.Layout()
'''
# Find the fastest lap time.
self.fastestLapRC, fastestLapSpeed, fastestLapTime = None, 0.0, sys.float_info.max
for r, result in enumerate(results):
if getattr(result, 'lapSpeeds', None): # Use speeds if available.
for c, s in enumerate(result.lapSpeeds):
if s > fastestLapSpeed:
fastestLapSpeed = s
self.fastestLapRC = (r, c)
elif result.lapTimes: # Else, use times.
for c, t in enumerate(result.lapTimes):
if t < fastestLapTime:
fastestLapTime = t
self.fastestLapRC = (r, c)
highPrecision = Model.highPrecisionTimes()
try:
firstLapCol = next(i for i, name in enumerate(colnames) if name.startswith(_('Lap')))
except StopIteration:
firstLapCol = len(colnames)
# Convert to race times, lap speeds or race speeds as required.
'''
DisplayLapTimes = 0
DisplayRaceTimes = 1
DisplayLapSpeeds = 2
DisplayRaceSpeeds = 3
'''
if self.selectDisplay == Results.DisplayRaceTimes:
for r, result in enumerate(results):
for i, t in enumerate(result.raceTimes[1:]):
try:
data[i+firstLapCol][r] = Utils.formatTimeCompressed(t, highPrecision)
except IndexError:
pass
elif self.selectDisplay == Results.DisplayLapSpeeds:
for r, result in enumerate(results):
if getattr(result, 'lapSpeeds', None):
for i, s in enumerate(result.lapSpeeds):
try:
data[i+firstLapCol][r] = '{:.2f}'.format(s)
except IndexError:
pass
elif self.selectDisplay == Results.DisplayRaceSpeeds:
for r, result in enumerate(results):
if getattr(result, 'raceSpeeds', None):
for i, s in enumerate(result.raceSpeeds):
try:
data[i+firstLapCol][r] = '{:.2f}'.format(s)
except IndexError:
pass
# Sort by the given lap, if there is one.
# Also, add a position for the lap itself.
if sortCol is not None:
maxVal = 1000.0*24.0*60.0*60.0
if sortLap:
if self.selectDisplay in [Results.DisplayLapTimes, Results.DisplayRaceTimes]:
getFunc = Utils.StrToSeconds
else:
getFunc = lambda x: -float(x)
else:
if colnames[sortCol] in [_('Start'), _('Finish'), _('Time')]:
getFunc = Utils.StrToSeconds
elif colnames[sortCol] in [_('mph'), _('km')]:
getFunc = lambda x: -float(x) if x else 0.0
elif colnames[sortCol] == _('Factor'):
getFunc = lambda x: float(x) if x else maxVal
elif colnames[sortCol] in [_('Pos'), _('Bib')]:
getFunc = lambda x: int(x) if x and '{}'.format(x).isdigit() else maxVal
else:
getFunc = lambda x: '{}'.format(x)
maxVal = '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
sortPairs = []
for r, result in enumerate(results):
try:
k = (getFunc(data[sortCol][r]), r)
except Exception as e:
k = (maxVal, r)
sortPairs.append( (k, r) )
sortPairs.sort()
for c in range(len(data)):
col = data[c]
data[c] = [col[i] if i < len(col) else '' for k, i in sortPairs]
if colnames[sortCol] != _('Bib'):
for r in range(len(data[sortCol])):
if data[sortCol][r]:
data[sortCol][r] = '{} [{}: {}]'.format(data[sortCol][r], r+1, data[1][r])
# Highlight the sorted column.
if sortLap:
colnames = []
for name in exportGrid.colnames:
try:
if int(name.split()[1]) == sortLap:
name = '<{}>\n{}'.format(name,
[_('by Lap Time'), _('by Race Time'), _('by Lap Speed'), _('by Race Speed')][self.selectDisplay])
except Exception:
pass
colnames.append( name )
elif sortLabel:
colnames = []
for name in exportGrid.colnames:
if name == sortLabel:
name = '<{}>'.format(name)
colnames.append( name )
else:
colnames = exportGrid.colnames
try:
iLabelMax = next(i for i, name in enumerate(colnames) if name.startswith(_('Lap')) or name.startswith('<' + _('Lap')))
except StopIteration:
iLabelMax = len(colnames)
colnamesLabels = colnames[:iLabelMax]
dataLabels = data[:iLabelMax]
colnameLaps = colnames[iLabelMax:]
dataLaps = data[iLabelMax:]
self.labelGrid.Set( data = dataLabels, colnames = colnamesLabels )
self.labelGrid.SetLeftAlignCols( exportGrid.leftJustifyCols )
self.labelGrid.AutoSizeColumns( True )
self.labelGrid.Reset()
try:
iUCICodeCol = colnamesLabels.index( _('UCICode') )
self.labelGrid.SetColRenderer( iUCICodeCol, IOCCodeRenderer() )
except ValueError:
pass
try:
iNatCodeCol = colnamesLabels.index( _('NatCode') )
self.labelGrid.SetColRenderer( iNatCodeCol, IOCCodeRenderer() )
except ValueError:
pass
self.lapGrid.Set( data = dataLaps, colnames = colnameLaps )
self.lapGrid.Reset()
self.lapGrid.AutoSizeColumns( self.lapGrid.GetNumberCols() < 100 )
self.isEmpty = False
# Find interpolated entries.
with Model.LockRace() as race:
numTimeInfo = race.numTimeInfo
riders = race.riders
for r in range(self.lapGrid.GetNumberRows()):
try:
rider = riders[int(self.labelGrid.GetCellValue(r, 1))]
except Exception:
continue
try:
entries = rider.interpolate()
except (ValueError, IndexError):
continue
if not entries:
continue
for c in range(self.lapGrid.GetNumberCols()):
if not self.lapGrid.GetCellValue(r, c):
break
try:
if entries[c+1].interp:
self.rcInterp.add( (r, c) )
elif numTimeInfo.getInfo(entries[c+1].num, entries[c+1].t) is not None:
self.rcNumTime.add( (r, c) )
elif c > self.iLastLap:
self.iLastLap = c
except IndexError:
pass
self.labelGrid.Scroll( labelLastX, labelLastY )
self.lapGrid.Scroll( lapLastX, lapLastY )
self.showNumSelect()
if self.firstDraw:
self.firstDraw = False
self.splitter.SetSashPosition( 400 )
# Fix the grids' scrollbars.
self.labelGrid.FitInside()
self.lapGrid.FitInside()
def commit( self ):
pass
if __name__ == '__main__':
Utils.disable_stdout_buffering()
app = wx.App(False)
mainWin = wx.Frame(None,title="CrossMan", size=(600,200))
Model.setRace( Model.Race() )
Model.getRace()._populate()
Model.race.winAndOut = True
results = Results(mainWin)
results.refresh()
mainWin.Show()
app.MainLoop()
|
[
"edward.sitarski@gmail.com"
] |
edward.sitarski@gmail.com
|
a824b23db4e01779cf4236b3668113861b23443d
|
343eb4d3633d2b93981fbb166f06e16dcc4836fb
|
/shared_spider/spiders/company_news_spider.py
|
3783653c849488a8fba2d4be4cfb41fa3df93972
|
[] |
no_license
|
ly2014/shared_spider
|
120630ea19e2308b4598240ca08478b473aaa030
|
a6c4d0cbd74ac649128e5aef64e24090208491ba
|
refs/heads/master
| 2020-05-17T02:54:19.517044
| 2019-04-25T15:50:32
| 2019-04-25T15:50:44
| 183,465,241
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,299
|
py
|
import scrapy
import datetime
from shared_spider.items import ArticleItem
from scrapy_redis.spiders import RedisSpider
class CompanyNews(RedisSpider):
name = 'cn_spider'
redis_key = 'cn:start_urls'
# start_urls = ['http://stock.stockstar.com/list/10.shtml']
def parse(self, response):
news = response.xpath('//div[@class="listnews"]/ul/li')
now = datetime.datetime.now()
for new in news:
dt = new.xpath('span/text()').extract_first()
if not dt:
continue
dt = datetime.datetime.strptime(dt, '%Y-%m-%d %H:%M:%S')
if ((now.year - dt.year) * 12 + now.month - dt.month) > 2:
return
item = ArticleItem()
title = new.xpath('a/text()').extract_first()
item['title'] = title
item['publish_time'] = dt
item['type'] = 4
item['content'] = ''
url = new.xpath('a/@href').extract_first()
yield scrapy.Request(url=url, callback=self.parse_content, meta={'item': item})
pages = response.xpath('//div[@class="pageControl"]/a')
if pages[-1].xpath('text()').extract_first() == '下一页':
next_url = pages[-1].xpath('@href').extract_first()
url = 'http://stock.stockstar.com' + next_url
yield scrapy.Request(url=url, callback=self.parse)
def parse_content(self, response):
item = response.meta['item']
content = item['content']
tr = response.xpath('//div[@id="container-article"]//table//tr')
for r in tr:
content += r.xpath('string(.)').extract_first()
ps = response.xpath('//div[@id="container-article"]/p')
for p in ps:
if p.xpath('select').extract_first():
continue
if p.xpath('@class').extract_first() == 'noIndent':
continue
content += p.xpath('string(.)').extract_first()
item['content'] = content
next_page = response.xpath('//div[@id="Page"]/span[2]/a/@href').extract_first()
if next_page:
url = 'https://stock.stockstar.com/' + next_page
yield scrapy.Request(url=url, callback=self.parse_content, meta={'item': item})
else:
yield(item)
|
[
"ly1996lh@126.com"
] |
ly1996lh@126.com
|
a0a6944034a0d9818d25cd205674bc574b9e1179
|
124fb764c4dab141ca243cdc9ccf464730554b24
|
/blueapi.py
|
06f1502f907cab0b0a2c9452667b771f0f6132a3
|
[
"MIT"
] |
permissive
|
jacobbendicksen/BlueAPI
|
3ec639f44a66cebc84a3c9a44e8809fda0b3821b
|
344cad123769af7b0151013559f95965c4561fb2
|
refs/heads/master
| 2021-01-19T11:23:11.484779
| 2015-07-25T20:16:47
| 2015-07-25T20:16:47
| 24,250,368
| 0
| 0
| null | 2014-10-28T02:56:33
| 2014-09-20T01:11:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,330
|
py
|
#####################################################
#
# A library for accessing the Blue Alliance API
#
# Authors: Andrew Merrill and Jacob Bendicksen (Fall 2014)
#
######################################################
import urllib
import urllib2
import json
# given the trailing part of an api request,
# returns a dictionary of data
# example selector: 'team/frc1540'
def sendAPIRequest(selector):
api_server = "www.thebluealliance.com"
api_base_url = '/api/v2/'
full_url = 'http://' + api_server + api_base_url + selector
headers = dict()
headers['X-TBA-App-Id'] = 'frcyyyy:your_app:v1' #replace this with something that describes you/your team
#print 'URL:', full_url
request = urllib2.Request(full_url, None, headers)
connection = urllib2.urlopen(request)
code = connection.getcode()
if code == 200:
data = connection.read()
datadict = json.loads(data)
return datadict
else:
raise Exception('blue alliance returned code: ' + str(code))
#given a team number,
# returns data about that team
def getTeamInfo(teamNumber):
return sendAPIRequest('team/frc' + str(teamNumber))
#given a team number and year,
# returns data for how that team did at all events that year
def getTeamYearInfo(teamNumber, year):
return sendAPIRequest('team/frc'+str(teamNumber)+'/'+str(year)+'/events')
#given a page number,
# returns a list of teams starting at 500*number and ending at 499+number
#this returns a massive block of text!!!
def getTeams(pageNumber):
return sendAPIRequest('teams/' + str(pageNumber))
#given a team number and event key,
# returns the award(s) won by the team at the event
def getTeamEventAwards(teamNumber, eventKey):
return sendAPIRequest('team/frc' + str(teamNumber) + '/event/' + str(eventKey) + '/awards')
#given a team number and event key,
# returns the team's matches from that event
def getTeamEventMatches(teamNumber, eventKey):
return sendAPIRequest('team/frc' + str(teamNumber) + '/event/' + str(eventKey) + '/matches')
#given a team number,
# returns a list of years participated in FRC
def getTeamYearsParticipated(teamNumber):
return sendAPIRequest('team/frc' + str(teamNumber) + '/years_participated')
#given a team number and year,
# returns links to robot photos/videos from that year (depending on what they've posted)
def getTeamMedia(teamNumber, year):
return sendAPIRequest('team/frc' + str(teamNumber) + '/' + str(year) + '/media')
#given a year,
# returns a whole lotta data for that year's events (I'll figure out what it is later)
def getEventList(year):
return sendAPIRequest('events/' + str(year))
#given an event key,
# returns a list of teams attending with all of their data
def getEventTeams(eventKey):
return sendAPIRequest('event/' + str(eventKey) + '/teams')
#given an event key,
# returns a list of all matches and their results
def getEventMatches(eventKey):
return sendAPIRequest('event/' + str(eventKey) + '/matches')
#given an event key,
# returns a list of calculated stats (OPR, CCWM, DPR)
def getEventStats(eventKey):
return sendAPIRequest('event/' + str(eventKey) + '/stats')
#given an event key,
# returns the final rankings with key stats
def getEventRankings(eventKey):
return sendAPIRequest('event/' + str(eventKey) + '/rankings')
#given an event key,
# returns the awards won at that event and which teams won them
def getEventAwards(eventKey):
return sendAPIRequest('event/' + str(eventKey) + '/awards')
#given an event key,
# returns the district/qualification points gained at that event
def getEventDistrictPoints(eventKey):
return sendAPIRequest('event/' + str(eventKey) + '/district_points')
#given a year,
# returns which districts existed that year (i.e. fim, pnw, ne, mar)
def getDistrictList(year):
return sendAPIRequest('districts/' + str(year))
#given a district area and year,
# returns all district events (including district championship), results, and elimination alliances
def getDistrictEvents(districtArea, year):
return sendAPIRequest('district/' + str(districtArea) + '/' + str(year) + '/events')
#given a district area and year,
# returns rankings and event breakdowns for each team
def getDistrictRankings(districtArea, year):
return sendAPIRequest('district/' + str(districtArea) + '/' + str(year) + '/rankings')
|
[
"jacob.bendicksen@gmail.com"
] |
jacob.bendicksen@gmail.com
|
e80c9b09979e6929f73d30f254918dbade6f6524
|
5b6da19d954f9b4c79a21c4ed9aa5a5bf620ce35
|
/scripts/script_evaluate_parameter_secML.py
|
4f9d882e26623685785254a00f8bd23ca379b9c5
|
[
"MIT"
] |
permissive
|
jctaillandier/ethical-adversaries
|
04a249b7b762c3e7dd17da92334d2875e2b75f35
|
3a25e94aec0f52ee68a51e2e5b0e2807d7e29975
|
refs/heads/master
| 2022-09-24T18:54:54.399695
| 2020-06-02T15:21:51
| 2020-06-02T15:21:51
| 268,537,160
| 0
| 0
|
MIT
| 2020-06-01T13:56:58
| 2020-06-01T13:56:58
| null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
from secml.ml.features.normalization import CNormalizerMinMax
from secml.array import CArray
from secml.figure import CFigure
def plot_loss_after_attack(evasAttack):
"""
This function plots the evolution of the loss function of the surrogate classifier
after an attack is performed.
The loss function is normalized between 0 and 1.
It helps to know whether parameters given to the attack algorithm are well tuned are not;
the loss should be as minimal as possible.
The script is inspired from https://secml.gitlab.io/tutorials/11-ImageNet_advanced.html#Visualize-and-check-the-attack-optimization
"""
n_iter = evasAttack.x_seq.shape[0]
itrs = CArray.arange(n_iter)
# create a plot that shows the loss during the attack iterations
# note that the loss is not available for all attacks
fig = CFigure(width=10, height=4, fontsize=14)
# apply a linear scaling to have the loss in [0,1]
loss = evasAttack.f_seq
if loss is not None:
loss = CNormalizerMinMax().fit_transform(CArray(loss).T).ravel()
fig.subplot(1, 2, 1)
fig.sp.xlabel('iteration')
fig.sp.ylabel('loss')
fig.sp.plot(itrs, loss, c='black')
fig.tight_layout()
fig.show()
|
[
"pieter.delobelle@gmail.com"
] |
pieter.delobelle@gmail.com
|
f6039b4b1dc79bedc28b810b94aa9391d9d6e772
|
959f9c455401745d071114cf569ec458a3657f3e
|
/bin/easy_install
|
9b4b77dfb6d3bbf7868eeb7b892c1e6ff009b384
|
[] |
no_license
|
swallville/trab2
|
56b72594aa12a9e584c42fb40b7c0d0cd73b4cb5
|
ca67a71dc4c0e276d79208cc27cc85e0ace8acfa
|
refs/heads/master
| 2021-01-18T00:18:35.391288
| 2016-09-21T22:48:13
| 2016-09-21T22:48:13
| 68,666,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
#!/Users/sineideferreira/Virtualenvs/12_0127377_Lukas/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"unlisislukasferreira@hotmail.com"
] |
unlisislukasferreira@hotmail.com
|
|
380862cf499a03601dd3c006b1d568d254dd3689
|
5dc77812f857755688aaa187d0eaeb2b5e4aaa53
|
/data_loader.py
|
b86bd4afedaa39b71f2d4c5f8cdfbf0a15edd668
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Lcrypto/topoGAN
|
a1de60902e725ce8df26457ad126c15e1c50defa
|
2f2b97d2022decfb0203a8c6c09184a336bdd3a6
|
refs/heads/main
| 2023-04-10T07:11:15.432537
| 2021-04-25T16:17:57
| 2021-04-25T16:17:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
import torch.utils.data as data_utils
import numpy as np
import torch
import SIMLR
import os
def get_loader(features, batch_size, num_workers=1):
"""
Build and return a data loader.
"""
dataset = data_utils.TensorDataset(torch.Tensor(features))
loader = data_utils.DataLoader(dataset,
batch_size=batch_size,
shuffle = True, #set to True in case of training and False when testing the model
num_workers=num_workers
)
return loader
def learn_adj(x):
y = []
for t in x:
b = t.numpy()
y.append(b)
x = np.array(y)
batchsize = x.shape[0]
simlr = SIMLR.SIMLR_LARGE(1, batchsize/3, 0)
adj, _,_, _ = simlr.fit(x)
array = adj.toarray()
tensor = torch.Tensor(array)
return tensor
def to_tensor(x):
y = []
for t in x:
b = t.numpy()
y.append(b)
x = np.array(y)
x = x[0]
tensor = torch.Tensor(x)
return tensor
def create_dirs_if_not_exist(dir_list):
if isinstance(dir_list, list):
for dir in dir_list:
if not os.path.exists(dir):
os.makedirs(dir)
else:
if not os.path.exists(dir_list):
os.makedirs(dir_list)
|
[
"islem.rekik@gmail.com"
] |
islem.rekik@gmail.com
|
95193817353a775dd049bc66d3752eb32961629a
|
9b695bda060d67d3b9d00e566daa7c6dacf6b806
|
/Chapter 38 Classes/38_6_Class_methods_alternate_initializers.py
|
6b4946b872bd5fd9c5be8f343e6d9aebb788f988
|
[] |
no_license
|
erauner12/python-for-professionals
|
cc6409348e27d894e2cde8e75c14a5d57aa3f852
|
d50a14806cd0ad0727ed28facb8423bf714c4383
|
refs/heads/main
| 2023-08-05T03:49:18.890747
| 2021-09-22T23:53:19
| 2021-09-22T23:53:19
| 406,197,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,959
|
py
|
class Person(object):
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.full_name = first_name + " " + last_name
def greet(self):
print("Hello, my name is " + self.full_name + ".")
# Wrong
# two main problems with this bit of code:
# The parameters first_name and last_name are now misleading, since you can enter a full name for
# first_name. Also, if there are more cases and / or more parameters that have this kind of flexibility, the
# if/elif/else branching can get annoying fast.
# 2. Not quite as important, but still worth pointing out: what if last_name is None, but first_name doesn't split
# into two or more things via spaces? We have yet another layer of input validation and / or exception
# handling...
# class Person(object):
# def __init__(self, first_name, age, last_name=None):
# if last_name is None:
# self.first_name, self.last_name = first_name.split(" ", 2)
# else:
# self.first_name = first_name
# self.last_name = last_name
# self.full_name = self.first_name + " " + self.last_name
# self.age = age
# def greet(self):
# print("Hello, my name is " + self.full_name + ".")
class Person(object):
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.full_name = first_name + " " + last_name
@classmethod
def from_full_name(cls, name, age):
if " " not in name:
raise ValueError
first_name, last_name = name.split(" ", 2)
return cls(first_name, last_name, age)
def greet(self):
print("Hello, my name is " + self.full_name + ".")
bob = Person("Bob", "Bobberson", 42)
alice = Person.from_full_name("Alice Henderson", 31)
bob.greet()
alice.greet()
|
[
"erauner@medallia.com"
] |
erauner@medallia.com
|
65e8766de21c1235d5ea3b89e8c07e6dba389747
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/pytype/pytype/tests/py3/test_flax_overlay.py
|
32c3a4258e1cbeb5054e55d2ed9323d2c0c7fd99
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:495313b7611bacd6c8bb8059f4d8f1a6b3a13d672ac8f314be4faabe700c56b3
size 5162
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
335c667703905a4669a0da4115ee0777807e85a7
|
cd557e3c2b34f30f2e7caf7c79c07ff6e109fbd3
|
/zaf/zaf/builtin/config/test/test_validator.py
|
5877634a63a4f0fa670dbc5c8139d9370c11652a
|
[
"Apache-2.0"
] |
permissive
|
Zenterio/opensourcelib
|
f005174c049df0f5deddc1269d7c343a8e219ca5
|
07f0dabffaceb7b6202b5f691cbad46dac5868a8
|
refs/heads/master
| 2022-12-09T02:53:36.444094
| 2021-04-28T18:03:24
| 2021-05-27T13:14:58
| 186,092,997
| 5
| 6
|
NOASSERTION
| 2022-12-07T23:37:26
| 2019-05-11T05:44:37
|
Groovy
|
UTF-8
|
Python
| false
| false
| 11,099
|
py
|
import re
import unittest
from unittest.mock import patch
from zaf.builtin.config.validator import ConfigurationValidator
from zaf.commands import COMMAND
from zaf.commands.command import CommandId
from zaf.config.manager import ConfigManager
from zaf.config.options import ConfigOption, ConfigOptionId
from zaf.config.types import Choice, ConfigChoice, Flag, Path
class TestValidator(unittest.TestCase):
def setUp(self):
self.validator = ConfigurationValidator()
def test_validate_none_value(self):
self.assert_validate_ok(STR_OPTION, None)
def test_validate_str_option(self):
self.assert_validate_ok(STR_OPTION, 'str')
def test_validate_str_option_fails(self):
self.assert_validate_fails(
STR_OPTION, 1, "Option 'str.option' has unexpected type 'int', expected 'str'")
def test_validate_int_option(self):
self.assert_validate_ok(INT_OPTION, 1)
def test_validate_int_option_fails(self):
self.assert_validate_fails(
INT_OPTION, 'str', "Option 'int.option' has unexpected type 'str', expected 'int'")
def test_validate_int_option_fails_on_float(self):
self.assert_validate_fails(
INT_OPTION, 1.0, "Option 'int.option' has unexpected type 'float', expected 'int'")
def test_validate_float_option(self):
self.assert_validate_ok(FLOAT_OPTION, 1.0)
def test_validate_float_option_accepts_int_values(self):
self.assert_validate_ok(FLOAT_OPTION, 1)
def test_validate_float_option_fails(self):
self.assert_validate_fails(
FLOAT_OPTION, 'str',
"Option 'float.option' has unexpected type 'str', expected 'float'")
def test_validate_bool_option(self):
self.assert_validate_ok(BOOL_OPTION, True)
def test_validate_bool_option_fails(self):
self.assert_validate_fails(
BOOL_OPTION, 'str', "Option 'bool.option' has unexpected type 'str', expected 'bool'")
def test_validate_flag_option(self):
self.assert_validate_ok(FLAG_OPTION, True)
def test_validate_flag_option_fails(self):
self.assert_validate_fails(
FLAG_OPTION, 'string',
"Flag 'flag.option' has value 'string' of unexpected type 'str', expected 'bool'")
def test_validate_path_option(self):
with patch('os.path.exists', return_value=True):
self.assert_validate_ok(PATH_OPTION, '/path/to/file')
def test_validate_path_option_fails(self):
self.assert_validate_fails(
PATH_OPTION, 1, "Path has value '1' of unexpected type 'int', expected 'str'")
def test_validate_path_option_fails_if_expected_path_does_not_exists(self):
with patch('os.path.exists', return_value=False):
self.assert_validate_fails(
PATH_OPTION, '/path/to/file',
"Path '/path/to/file' for 'path.option' does not exist")
def test_validate_choice_option(self):
self.assert_validate_ok(CHOICE_OPTION, '1')
def test_validate_choice_option_fails(self):
self.assert_validate_fails(
CHOICE_OPTION, 1,
"Choice 'choice.option' has value '1' of unexpected type 'int', expected 'str'")
def test_validate_choice_option_fails_if_choice_is_not_valid(self):
self.assert_validate_fails(
CHOICE_OPTION, '3', "'3' is not a valid Choice for 'choice.option', expected '1, 2'")
def test_validate_config_choice_option(self):
self.assert_validate_ok(
CONFIG_CHOICE_OPTION, '1', '', options={
MULTIPLE_STR_OPTION: ['1', '2']
})
def test_validate_config_choice_option_fails(self):
self.assert_validate_fails(
CONFIG_CHOICE_OPTION, 1,
"ConfigChoice 'config.choice.option' has value '1' of unexpected type 'int', expected 'str'"
)
def test_validate_config_choice_option_fails_if_choice_is_not_valid(self):
self.assert_validate_fails(
CONFIG_CHOICE_OPTION,
'3',
"'3' is not a valid ConfigChoice for 'config.choice.option', expected '1, 2'",
options={
MULTIPLE_STR_OPTION: ['1', '2']
})
def test_validate_entity_config_option(self):
self.assert_validate_ok(ENTITY_OPTION, 'entity')
def test_validate_entity_option_containing_upper_case(self):
self.assert_validate_fails(
ENTITY_OPTION, 'A',
"'A' is not a valid Entity entity.option, entity names must be lower case.")
def test_validate_entity_option_containing_dash(self):
self.assert_validate_fails(
ENTITY_OPTION, '-',
"'-' is not a valid Entity entity.option, entity names must not contain '-'")
def test_validate_entity_option_containing_underscore(self):
self.assert_validate_fails(
ENTITY_OPTION, '_',
"'_' is not a valid Entity entity.option, entity names must not contain '_'")
def test_validate_entity_config_option_fails(self):
self.assert_validate_fails(
ENTITY_OPTION, 1,
"Entity 'entity.option' has value '1' of unexpected type 'int', expected 'str'")
def test_validate_at_entity_config_option(self):
self.assert_validate_ok(
AT_ENTITY_OPTION, 'at_entity', entity='entity', options={
ENTITY_OPTION: 'entity'
})
def test_validate_at_entity_config_option_fails_when_entity_is_of_wrong_type(self):
self.assert_validate_fails(
AT_ENTITY_OPTION,
'at_entity',
"Entity 'entity.option' has value '1' of unexpected type 'int', expected 'str'",
entity='entity',
options={
ENTITY_OPTION: 1
})
def test_validate_at_multiple_entity_config_option(self):
self.assert_validate_ok(
AT_MULTIPLE_ENTITY_OPTION,
'at_entity',
entity='entity1',
options={
MULTIPLE_ENTITY_OPTION: ['entity1', 'entity2']
})
def test_transform_does_not_affect_validate(self):
self.assert_validate_ok(TRANSFORM_OPTION, '2')
def test_transform_does_not_affect_validate_for_choices(self):
self.assert_validate_ok(TRANSFORM_CHOICE_OPTION, '2')
def test_transform_at_entity_option_does_not_affect_validate(self):
self.assert_validate_ok(
TRANSFORM_AT_ENTITY_OPTION, '1', entity='entity', options={
ENTITY_OPTION: 'entity'
})
def test_transform_at_multiple_entity_option_does_not_affect_validate(self):
self.assert_validate_ok(
TRANSFORM_AT_MULTIPLE_ENTITY_OPTION,
1,
entity='entity1',
options={
MULTIPLE_ENTITY_OPTION: ['entity1', 'entity2']
})
def test_validate_at_entity_option_with_transform(self):
self.assert_validate_ok(
AT_TRANSFORMED_ENTITY_OPTION,
'at_entity',
entity='11',
options={
TRANSFORMED_ENTITY_OPTION: ['1', '2']
})
def assert_validate_ok(self, option, value, entity=None, options={}):
config = create_config(
{
option.option_id: value,
COMMAND: 'cmd'
}, entity=entity, additional_options=options)
self.validator.get_config(config, [option], {CMD: []})
self.validator.get_config(config, [], {CMD: [option]})
def assert_validate_fails(self, option, value, message, entity=None, options={}):
config = create_config(
{
option.option_id: value,
COMMAND: 'cmd'
}, entity=entity, additional_options=options)
regex = '^{msg}$'.format(msg=re.escape(message))
all_options = [option]
all_options.extend(options.keys())
with self.assertRaisesRegex(TypeError, regex):
self.validator.get_config(config, all_options, {CMD: []})
with self.assertRaisesRegex(TypeError, regex):
self.validator.get_config(config, [], {CMD: all_options})
def create_config(options, entity=None, additional_options={}):
config = ConfigManager()
for option_id, value in options.items():
key = config._option_key(option_id, entity)
config._config[key].add(value, 1, '')
for option, value in additional_options.items():
key = config._option_key(option.option_id)
config._config[key].add(value, 1, '')
return config
CMD = CommandId('cmd', '', None, [])
MULTIPLE_STR_OPTION = ConfigOption(
ConfigOptionId('multiple.str.option', '', multiple=True), required=False)
STR_OPTION = ConfigOption(ConfigOptionId('str.option', ''), required=False)
INT_OPTION = ConfigOption(ConfigOptionId('int.option', '', option_type=int), required=False)
FLOAT_OPTION = ConfigOption(ConfigOptionId('float.option', '', option_type=float), required=False)
BOOL_OPTION = ConfigOption(ConfigOptionId('bool.option', '', option_type=bool), required=False)
FLAG_OPTION = ConfigOption(ConfigOptionId('flag.option', '', option_type=Flag()), required=False)
PATH_OPTION = ConfigOption(
ConfigOptionId('path.option', '', option_type=Path(exists=True)), required=False)
CHOICE_OPTION = ConfigOption(
ConfigOptionId('choice.option', '', option_type=Choice(['1', '2'])), required=False)
CONFIG_CHOICE_OPTION = ConfigOption(
ConfigOptionId(
'config.choice.option', '', option_type=ConfigChoice(MULTIPLE_STR_OPTION.option_id)),
required=False)
TRANSFORM_OPTION = ConfigOption(
ConfigOptionId('transform.option', '', transform=lambda s: int(s)), required=False)
TRANSFORM_CHOICE_OPTION = ConfigOption(
ConfigOptionId('transform.option', '', option_type=Choice(['1', '2']), transform=lambda s: 3),
required=False)
ENTITY_OPTION = ConfigOption(ConfigOptionId('entity.option', '', entity=True), required=False)
AT_ENTITY_OPTION = ConfigOption(
ConfigOptionId('at.entity.option', '', at=ENTITY_OPTION.option_id), required=False)
TRANSFORM_AT_ENTITY_OPTION = ConfigOption(
ConfigOptionId('transform.at.entity.option', '', at=ENTITY_OPTION.option_id, transform=int),
required=False)
MULTIPLE_ENTITY_OPTION = ConfigOption(
ConfigOptionId('multiple.entity.option', '', entity=True, multiple=True), required=False)
AT_MULTIPLE_ENTITY_OPTION = ConfigOption(
ConfigOptionId('multiple.at.entity.option', '', at=MULTIPLE_ENTITY_OPTION.option_id),
required=False)
TRANSFORM_AT_MULTIPLE_ENTITY_OPTION = ConfigOption(
ConfigOptionId(
'transform.at.entity.option',
'',
option_type=int,
at=MULTIPLE_ENTITY_OPTION.option_id,
transform=str),
required=False)
TRANSFORMED_ENTITY_OPTION = ConfigOption(
ConfigOptionId(
'transformed.entity.option', '', entity=True, transform=lambda s: ''.join(s * 2)),
required=False)
AT_TRANSFORMED_ENTITY_OPTION = ConfigOption(
ConfigOptionId('at.transformed.entity.option', '', at=TRANSFORMED_ENTITY_OPTION.option_id),
required=False)
|
[
"per.bohlin@zenterio.com"
] |
per.bohlin@zenterio.com
|
f20aaa52cfa93787d22f3ed858839461d594f59c
|
047f79b81387b33397fd91245e1d272cd973135f
|
/main4.py
|
2d629979e75740e0173163654e54464780f8f6a5
|
[] |
no_license
|
bstricker/hashcode2020
|
cfbed80a74c588b8f4e9d77d2dd9a36473de23b5
|
2e718d01a02db476a66f36a8ccc3f84b689d0767
|
refs/heads/master
| 2021-01-08T10:41:44.484629
| 2020-02-20T23:08:19
| 2020-02-20T23:09:14
| 242,006,504
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,603
|
py
|
import os
OUT_PATH = 'out4'
SCORE_FILE = os.path.join(OUT_PATH, 'scores.txt')
class Book:
def __init__(self, id_, score):
self.id = id_
self.score = score
def __repr__(self):
return f'{self.id}:{self.score}'
class Library:
def __init__(self, id_, n_books, days, ship, books):
self.id = id_
self.n_books = n_books
self.days = days
self.remaining_days = days
self.ship = ship
self.books = books
self.books_to_scan = [b for b in sorted(books, key=lambda x: x.score, reverse=True)]
self.scanned_books = []
def get_lib_score(self, scanned_books):
books = [b for b in self.books_to_scan if b not in scanned_books]
scores = sum((book.score for book in books))
return scores
def sign_up(self):
return self.days
def do(self):
if self.remaining_days > 0:
self.remaining_days -= 1
return False
return True
def scan(self, scanned_books=None):
if scanned_books:
self.books_to_scan = [b for b in self.books_to_scan if b not in scanned_books]
self.books_to_scan = [b for b in sorted(self.books_to_scan, key=lambda x: x.score, reverse=True)]
scanning = self.books_to_scan[: self.ship]
self.scanned_books.extend(scanning)
self.books_to_scan = self.books_to_scan[self.ship:]
return scanning
def is_scanning(self):
return len(self.books_to_scan) > 0
def __repr__(self):
return f'Lib(b={self.n_books} d={self.days} s={self.ship} bs={self.books})'
def scan(file):
print(f'Scanning file {file}')
n_books, n_libs, n_days, scores, libs = read(file)
print(f'Got {n_books} books, {n_libs} libraries and {n_days} days to work with.')
scanning_libs = []
scanner_libs = []
scanned_books = set()
n_scanning_libs = 0
signing_lib = None
for day in range(n_days):
if day % (n_days / 100) == 0:
print('.', end='')
if signing_lib is None:
signing_lib = get_next_lib(libs, scanned_books)
if signing_lib.do():
scanning_libs.append(signing_lib)
scanner_libs.append(signing_lib)
libs.remove(signing_lib)
n_scanning_libs += 1
signing_lib = None
for lib in scanning_libs:
scanned = lib.scan(scanned_books)
scanned_books.update(scanned)
if not lib.is_scanning():
scanning_libs.remove(lib)
print(f'\nLast day reached.')
print(f'Scanned {len(scanned_books)} books in {n_days} days.')
score = sum((b.score for b in scanned_books))
print(f'Score for file {file}: {score}')
write(file, n_scanning_libs, scanner_libs)
with open(SCORE_FILE, 'a') as f:
f.write(f'{file:<28} {score:>10}\n')
def get_next_lib(libs, scanned_books):
return max(libs, key=lambda l: l.get_lib_score(scanned_books))
def write(file, n_scanning_libs, libs):
if not os.path.exists(OUT_PATH):
os.mkdir(OUT_PATH)
with open(os.path.join(OUT_PATH, file), 'w') as f:
f.write(str(n_scanning_libs))
f.write('\n')
for lib in libs:
f.write(f'{lib.id} {len(lib.scanned_books)}')
f.write('\n')
f.write(' '.join((str(book.id) for book in lib.scanned_books)))
f.write('\n')
def read(file):
with open(f'in/{file}', 'r') as f:
n_books, n_libs, n_days = [int(d) for d in f.readline().split()]
scores = [int(d) for d in f.readline().split()]
books = [Book(i, s) for i, s in enumerate(scores)]
lines = [[int(d) for d in line.split()] for line in f.readlines()]
# for (n_books, days, ship), books in zip(lines[0::2], lines[1::2]):
# books = {book: score for book, score in zip(books, scores)}
# libs.append(Library(n_books, days, ship, books))
libs = [
Library(id_, int(n_books), int(days), int(ship), [books[book_id] for book_id in book_ids])
for id_, ((n_books, days, ship), book_ids) in enumerate(zip(lines[0::2], lines[1::2]))]
return n_books, n_libs, n_days, scores, libs
if __name__ == '__main__':
if os.path.exists(SCORE_FILE):
os.remove(SCORE_FILE)
scan('a_example.txt')
print('\n')
scan('b_read_on.txt')
print('\n')
scan('c_incunabula.txt')
print('\n')
# runs too long
# scan('d_tough_choices.txt')
print('\n')
scan('e_so_many_books.txt')
print('\n')
scan('f_libraries_of_the_world.txt')
|
[
"benedikt.stricker@gmx.at"
] |
benedikt.stricker@gmx.at
|
828f9ba6a476b29f11c8f2fe1f3ca352d1f858b9
|
67615566c188d7275925cd9c371ea98fe26b18f1
|
/pytorch-chatbot/config.py
|
f6755bab02d3c2a034d1c24c14d5b94e0da7b292
|
[] |
no_license
|
1021546/test_seq2seq
|
3175684edb175f981c4285e81d28f41dc6534f45
|
d6027a4776026d841e85c042e6f8c8628d57f533
|
refs/heads/master
| 2021-04-06T03:11:56.737503
| 2018-03-15T12:44:07
| 2018-03-15T12:44:07
| 125,353,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
import torch
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2,3"
USE_CUDA = torch.cuda.is_available()
MAX_LENGTH = 15
teacher_forcing_ratio = 1.0
save_dir = './save'
|
[
"noreply@github.com"
] |
1021546.noreply@github.com
|
aad763b7547791fc17c99e22c183701e08ff36d7
|
cdfaad683bfa12cbc72c16b696585dbe13b977a2
|
/generative_query_network/run/shepard_matzler/experiments/observation.py
|
1c28c10c31e067149e752538dfb4aeebb7e23874
|
[] |
no_license
|
yargel/generative-query-network
|
b109045792997aa15bffe5884f488c8330d1b8fa
|
e01340140ec01cdb65078a973e83725e0fcca9b3
|
refs/heads/master
| 2020-03-24T13:25:27.817520
| 2018-07-27T11:21:47
| 2018-07-27T11:21:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,267
|
py
|
import argparse
import math
import time
import sys
import os
import random
import chainer
import chainer.functions as cf
import cupy
import numpy as np
from chainer.backends import cuda
sys.path.append(os.path.join("..", "..", ".."))
import gqn
sys.path.append(os.path.join(".."))
from hyperparams import HyperParameters
from model import Model
def make_uint8(array):
if (array.shape[2] == 3):
return np.uint8(np.clip((to_cpu(array) + 1) * 0.5 * 255, 0, 255))
return np.uint8(
np.clip((to_cpu(array.transpose(1, 2, 0)) + 1) * 0.5 * 255, 0, 255))
def to_gpu(array):
if args.gpu_device >= 0:
return cuda.to_gpu(array)
return array
def to_cpu(array):
if args.gpu_device >= 0:
return cuda.to_cpu(array)
return array
def generate_random_query_viewpoint(ratio, xp):
rad = math.pi * 2 * ratio
eye = (3.0 * math.cos(rad), 0, 3.0 * math.sin(rad))
center = (0, 0, 0)
yaw = gqn.math.yaw(eye, center)
pitch = gqn.math.pitch(eye, center)
query_viewpoints = xp.array(
(eye[0], eye[1], eye[2], math.cos(yaw), math.sin(yaw), math.cos(pitch),
math.sin(pitch)),
dtype="float32")
query_viewpoints = xp.broadcast_to(
query_viewpoints, (args.num_generation, ) + query_viewpoints.shape)
return query_viewpoints
def main():
xp = np
using_gpu = args.gpu_device >= 0
if using_gpu:
cuda.get_device(args.gpu_device).use()
xp = cupy
hyperparams = HyperParameters()
model = Model(hyperparams, hdf5_path=args.snapshot_path)
if using_gpu:
model.to_gpu()
screen_size = hyperparams.image_size
camera = gqn.three.PerspectiveCamera(
eye=(3, 1, 0),
center=(0, 0, 0),
up=(0, 1, 0),
fov_rad=math.pi / 2.0,
aspect_ratio=screen_size[0] / screen_size[1],
z_near=0.1,
z_far=10)
figure = gqn.imgplot.figure()
axes_observations = []
axes_generations = []
sqrt_n = math.sqrt(args.num_views_per_scene)
axis_width = 0.5 / sqrt_n
axis_height = 1.0 / sqrt_n
for n in range(args.num_views_per_scene):
axis = gqn.imgplot.image()
x = n % sqrt_n
y = n // sqrt_n
figure.add(axis, x * axis_width, y * axis_height, axis_width,
axis_height)
axes_observations.append(axis)
sqrt_n = math.sqrt(args.num_generation)
axis_width = 0.5 / sqrt_n
axis_height = 1.0 / sqrt_n
for n in range(args.num_generation):
axis = gqn.imgplot.image()
x = n % sqrt_n
y = n // sqrt_n
figure.add(axis, x * axis_width + 0.5, y * axis_height, axis_width,
axis_height)
axes_generations.append(axis)
window = gqn.imgplot.window(figure, (1600, 800), "Dataset")
window.show()
raw_observed_images = np.zeros(screen_size + (3, ), dtype="uint32")
renderer = gqn.three.Renderer(screen_size[0], screen_size[1])
observed_images = xp.zeros(
(args.num_views_per_scene, 3) + screen_size, dtype="float32")
observed_viewpoints = xp.zeros(
(args.num_views_per_scene, 7), dtype="float32")
with chainer.no_backprop_mode():
while True:
if window.closed():
exit()
scene, _ = gqn.environment.shepard_metzler.build_scene(
num_blocks=random.choice([x for x in range(7, 8)]))
renderer.set_scene(scene)
# Generate images without observations
r = xp.zeros(
(
args.num_generation,
hyperparams.channels_r,
) + hyperparams.chrz_size,
dtype="float32")
total_frames = 50
for tick in range(total_frames):
if window.closed():
exit()
query_viewpoints = generate_random_query_viewpoint(
tick / total_frames, xp)
generated_images = to_cpu(
model.generate_image(query_viewpoints, r, xp))
for m in range(args.num_generation):
if window.closed():
exit()
image = make_uint8(generated_images[m])
axis = axes_generations[m]
axis.update(image)
for n in range(args.num_views_per_scene):
if window.closed():
exit()
rad_xz = random.uniform(0, math.pi * 2)
rad_y = random.uniform(0, math.pi * 2)
eye = (3.0 * math.cos(rad_xz), 3.0 * math.sin(rad_y),
3.0 * math.sin(rad_xz))
center = (0, 0, 0)
yaw = gqn.math.yaw(eye, center)
pitch = gqn.math.pitch(eye, center)
camera.look_at(
eye=eye,
center=center,
up=(0.0, 1.0, 0.0),
)
renderer.render(camera, raw_observed_images)
# [0, 255] -> [-1, 1]
observed_images[n] = to_gpu((raw_observed_images.transpose(
(2, 0, 1)) / 255 - 0.5) * 2.0)
observed_viewpoints[n] = xp.array(
(eye[0], eye[1], eye[2], math.cos(yaw), math.sin(yaw),
math.cos(pitch), math.sin(pitch)),
dtype="float32")
r = model.representation_network.compute_r(
observed_images[:n + 1], observed_viewpoints[:n + 1])
# (batch * views, channels, height, width) -> (batch, views, channels, height, width)
r = r.reshape((1, n + 1) + r.shape[1:])
# sum element-wise across views
r = cf.sum(r, axis=1)
r = cf.broadcast_to(r, (args.num_generation, ) + r.shape[1:])
axis = axes_observations[n]
axis.update(np.uint8(raw_observed_images))
total_frames = 50
for tick in range(total_frames):
if window.closed():
exit()
query_viewpoints = generate_random_query_viewpoint(
tick / total_frames, xp)
generated_images = to_cpu(
model.generate_image(query_viewpoints, r, xp))
for m in range(args.num_generation):
if window.closed():
exit()
image = make_uint8(generated_images[m])
axis = axes_generations[m]
axis.update(image)
raw_observed_images[...] = 0
for n in range(args.num_views_per_scene):
axis = axes_observations[n]
axis.update(np.uint8(raw_observed_images))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num-views-per-scene", "-k", type=int, default=9)
parser.add_argument("--num-generation", "-g", type=int, default=4)
parser.add_argument(
"--snapshot-path", "-snapshot", type=str, required=True)
parser.add_argument("--gpu-device", "-gpu", type=int, default=0)
args = parser.parse_args()
main()
|
[
"musyoku@users.noreply.github.com"
] |
musyoku@users.noreply.github.com
|
4862e89493a666fd1ac68a51f5013418f627bbb5
|
0809673304fe85a163898983c2cb4a0238b2456e
|
/src/lesson_email/imaplib_list_pattern.py
|
714974b6f175f4d93ca1223a58181ee0a8e7d680
|
[
"Apache-2.0"
] |
permissive
|
jasonwee/asus-rt-n14uhp-mrtg
|
244092292c94ff3382f88f6a385dae2aa6e4b1e1
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
refs/heads/master
| 2022-12-13T18:49:02.908213
| 2018-10-05T02:16:41
| 2018-10-05T02:16:41
| 25,589,776
| 3
| 1
|
Apache-2.0
| 2022-11-27T04:03:06
| 2014-10-22T15:42:28
|
Python
|
UTF-8
|
Python
| false
| false
| 220
|
py
|
import imaplib
from imaplib_connect import open_connection
with open_connection() as c:
typ, data = c.list(pattern='*Example*')
print('Response code:', typ)
for line in data:
print('Server response:', line)
|
[
"peichieh@gmail.com"
] |
peichieh@gmail.com
|
cb3d3d406b29c3f484f52b3d6c574ba60d6d7c89
|
1c10621b45d0efe73dd8c94acedc84d9c493d52f
|
/SQLAlchemyMixns/alias_mixin.py
|
037eaff9687bc05dfdaf773329337095f04ce699
|
[] |
no_license
|
Tartorus/MyLibs
|
89063d09f1d53c56b8ba14fe141b771ac0e5f40d
|
a5fecaee7f4f382506a12f8b9e9a366a49c239c8
|
refs/heads/master
| 2021-01-22T09:33:12.030445
| 2017-09-04T06:37:47
| 2017-09-04T06:37:47
| 102,326,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 703
|
py
|
from pyoreol import DBSession
class AliasMixin:
"""Добавляет методы выборки по столбцам алиасам. Алиасы должны быть уникальными"""
@classmethod
def aliased_list(cls, aliases):
"""
Выборка элементов по алиасу
:param aliases: [<str>, ...]
:return: [<SQLAlchemy>, ...]
"""
query = DBSession.query(cls).filter(cls.alias != None)
if aliases:
query = query.filter(cls.alias.in_(aliases))
return query.all()
@classmethod
def aliased_item(cls, alias):
return DBSession.query(cls).filter(cls.alias == alias).one()
|
[
"nclient@yandex.ru"
] |
nclient@yandex.ru
|
e6befb5908be579b27412e2a865232089d8de75c
|
56f95e346798fb70f5d1de0b2a94477da74bba97
|
/ex4_10.py
|
fdabff89d3d636ac19af930c6f55016bd0980155
|
[
"Apache-2.0"
] |
permissive
|
charliechocho/py-crash-course
|
ad0f9f5149f76a8d236814cc7b35587241d0db12
|
b42b8a4c1cb8d76e8316f55c1565ff42d920ee63
|
refs/heads/main
| 2023-03-05T06:48:01.793139
| 2021-02-18T21:33:54
| 2021-02-18T21:33:54
| 324,745,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
players = ['mattias','andreas','robin','daniel','linnéa']
#find the middle
the_odd_one = len(players)%2
print(the_odd_one)
middle = len(players)/2
print(int(middle))
print(f"The middle name of the list is {players[int(middle)]} ")
print(f"The last names from the middle are:\n {players[int(middle):]} ")
middle_end = [print(name) for name in players[int(middle):]]
|
[
"mattiassoderberj@gmail.com"
] |
mattiassoderberj@gmail.com
|
72d81760c7bd4b533efaedb3a98302f2c182885a
|
8c68a541d5f4fe9b4238c94eff1eedfa7b30bdaa
|
/sns-sqs-pubsub/app/publisher.py
|
fd88681424390b8ccfeed93239494c29364470f3
|
[] |
no_license
|
Darth-Knoppix/python-exercises
|
032785617d09e17074a3e80eb067795de0ace2f9
|
d7a6809a37e00d97c893ec81848bf51441cd0160
|
refs/heads/main
| 2023-04-15T04:44:17.952768
| 2021-04-22T12:10:18
| 2021-04-22T12:10:18
| 359,408,742
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
import boto3
from .bootstrap import aws_client_kwargs
class Publisher:
topic_arn = None
def __init__(self, topic_arn):
self.client = boto3.client('sns', **aws_client_kwargs)
self.topic_arn = topic_arn
def publish(self):
if self.topic_arn is None:
print('No topic to publish events to')
return
self.client.publish(
TopicArn=self.topic_arn,
Message='1x latte',
Subject='coffee',
)
|
[
"seth.corker@gmail.com"
] |
seth.corker@gmail.com
|
9fed0911ae9b7ec5dab4e26fc33bf2fe22e6e6b0
|
497e93f9050e23018ac47a412042551f6c008374
|
/script/pixels_test.py
|
1e26c2d77f35e11b00b071e4734411f2aa2f02d7
|
[] |
no_license
|
KhairulIzwan/thermal_imaging
|
7c5c0f44410641784c55a202aa467825b1ef1218
|
bbf159bed7da930207c86256b3dc93a923589068
|
refs/heads/master
| 2022-07-03T12:00:14.348873
| 2020-05-17T14:35:38
| 2020-05-17T14:35:38
| 254,891,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
#!/usr/bin/env python
#Title: Python Subscriber for Tank Navigation
#Author: Khairul Izwan Bin Kamsani - [23-01-2020]
#Description: Tank Navigation Subcriber Nodes (Python)
#remove or add the library/libraries for ROS
import rospy
import sys
import cv2
import imutils
import argparse
import numpy as np
#remove or add the message type
from std_msgs.msg import String
from Adafruit_AMG88xx import Adafruit_AMG88xx
if __name__=='__main__':
# Initializing your ROS Node
rospy.init_node("Pixel_Test_Node", anonymous=True)
# Create the Adafruit_AMG88xx object
sensor = Adafruit_AMG88xx()
rate=rospy.Rate(10)
while not rospy.is_shutdown():
print(sensor.readPixels())
|
[
"wansnap@gmail.com"
] |
wansnap@gmail.com
|
a4c3d6dcd11d93464a9549829839ee29c52bedb3
|
136fcdbc4ca7d72f5b1767928b86f7ff21fac534
|
/webdev/store/models.py
|
66f9f6f915a0e7fe30cef8cccf630dd0909f3c7c
|
[] |
no_license
|
YahyaStihi18/django_ecommerce
|
3c2052865cbb842aaf7e058d0b4b2a246072a2d7
|
c1f9659e2d01a04c17d31330a0a8069da509fb7c
|
refs/heads/master
| 2022-08-04T06:20:19.531705
| 2020-05-24T13:45:02
| 2020-05-24T13:45:02
| 266,549,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,798
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Customer(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE, null=True, blank=True)
name = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=200, null=True)
price = models.FloatField()
digital = models.BooleanField(default=False, null=True, blank=False)
image = models.ImageField(null=True, blank=True,default='placeholder.png')
def __str__(self):
return self.name
@property
def imageURL(self):
try:
url = self.image.url
except:
url = ''
return url
class Order(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, blank=True,null=True)
date_orderd = models.DateTimeField(auto_now_add=True)
complete = models.BooleanField(default=False,null=True,blank=False)
transaction_id = models.CharField(max_length=200,null=False)
def __str__(self):
return str(self.id)
@property
def shipping(self):
shipping = False
orderitems = self.orderitem_set.all()
for i in orderitems:
if i.product.digital == False:
shipping = True
return shipping
@property
def get_cart_total(self):
orderitems = self.orderitem_set.all()
total = sum([item.get_total for item in orderitems])
return total
@property
def get_cart_items(self):
orderitems = self.orderitem_set.all()
total = sum([item.quantity for item in orderitems])
return total
class OrderItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.SET_NULL,blank=True,null=True)
order = models.ForeignKey(Order,on_delete=models.SET_NULL,blank=True,null=True)
quantity = models.IntegerField(default=0,null=True,blank=True)
date_orderd = models.DateTimeField(auto_now_add=True)
@property
def get_total(self):
total = self.product.price * self.quantity
return total
class ShippingAdress(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, blank=True,null=True)
order = models.ForeignKey(Order,on_delete=models.SET_NULL,blank=True,null=True)
address = models.CharField(max_length=200, null=True)
city = models.CharField(max_length=200, null=True)
state = models.CharField(max_length=200, null=True)
zipcode = models.CharField(max_length=200, null=True)
date_orderd = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.address
|
[
"61831390+YahyaStihi18@users.noreply.github.com"
] |
61831390+YahyaStihi18@users.noreply.github.com
|
4c6c1ef23bd518f1ae613049f10244c30aec2418
|
caef0308b388eee0159e4a7184c22293e69690e8
|
/cookbooks/chapter_2/2_1.py
|
bfeedae8a80d2d4ba17b95117af22f7f3798e33b
|
[] |
no_license
|
AlwaysOnline233/cookbook
|
440ca4325842482157484ca2df9d0f1932b0b29e
|
edebb0f98d88566977cc2514631818d8371b78dd
|
refs/heads/master
| 2022-12-14T20:03:26.527927
| 2020-09-06T16:57:36
| 2020-09-06T16:57:36
| 262,815,273
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
# 使用多个界定符分割字符串 re.split()
# split()只适用于简单的字符串分割
import re
line = 'asdf fjdk; afed, fjek,asdf, foo' # 两个字符以上切割需要放在 [ ] 中
print(re.split(r'[;,\s]\s*', line)) # \s 匹配任意非空白字符,如空格、制表符、换页符等 [^\t\n\r\f\v]
print('使用括号捕获分组,被匹配的文本也将出现在结果列表中')
fields = re.split(r'(;|,|\s)\s*', line) # 使用括号捕获分组,默认保留分割符
print(fields)
print('————————————————————————————')
print('获取分割字符')
values = fields[::2]
delimiters = fields[1::2] + ['']
print(values)
print(delimiters)
print('————————————————————————————')
print('用前面的分割字符串重新构造一个新的输出字符串')
# Reform the line using the same delimiters
s = ''.join(v+d for v, d in zip(values, delimiters))
print(s)
print('————————————————————————————')
print('使用括号来分组正则表达式,不保留分割字符串到结果列表')
x = re.split(r'(?:,|;|\s)\s*', line) # 不保留分隔符 形如 (?:...)
print(x)
|
[
"941906145@qq.com"
] |
941906145@qq.com
|
242d9487cef93ea6a78241dfccefde6d41f44e65
|
75432ad0b9bce0f6bf02accf01d19b3d6be0e898
|
/Code Kata/check_2_str_have_common_char.py
|
086ac9539126b5ccee68a567bc0923fcb2ddc26e
|
[] |
no_license
|
Vijaya-Malini-A/guvi
|
7ee0c017e984922ceb4d71628010e424c4557d2b
|
80890224614982e84c23e4362d08af844ba82f4a
|
refs/heads/master
| 2020-06-03T00:08:19.273561
| 2019-08-05T11:22:09
| 2019-08-05T11:22:09
| 191,355,069
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
s1, s2 = list(map(str,input().split()))
f = 0
for i in range(len(s1)):
if s1[i] in s2:
f = 1
if f == 1:
print("yes")
else:
print("no")
|
[
"noreply@github.com"
] |
Vijaya-Malini-A.noreply@github.com
|
4f9305052e62adc12225284a46cfae829924c69c
|
b27f405120762a5f32f3eb10ed739d0298ea5c98
|
/src/pu_learning.py
|
4707245c67a3e5d43eb3d30d2e3924f81b39cec2
|
[] |
no_license
|
JulinaM/Deep-Networks-for-Graph-Representation
|
1b698ef96b612d95e48e10051e215d5068c637f2
|
30430344c198117a8fe79f841d2fe8ac5a12206a
|
refs/heads/main
| 2023-03-29T21:48:40.396374
| 2021-04-07T08:09:47
| 2021-04-07T08:09:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,802
|
py
|
import torch
import torch.nn as nn
import torch.optim as optim
from math import sqrt
from sklearn.model_selection import KFold
from autoencoder import *
from utils import *
from random_graphs import *
from dgnr import *
def get_train_test_masks(P, train_size=0.8, test_balance=True):
P = torch.Tensor(P)
Ipos = (P == 1.)
Ineg = (P == 0.)
pos_train = Ipos * torch.rand(P.size())
pos_train[Ineg] = 1.
pos_train = pos_train < train_size
train_neg_rel_size = torch.sum(pos_train) / torch.sum(Ineg)
neg_train = Ineg * torch.rand(P.size())
neg_train[Ipos] = 1.
neg_train = neg_train < train_neg_rel_size
train = pos_train + neg_train
if not test_balance:
test = ~train
pos_test = torch.logical_and(test, Ipos)
neg_test = torch.logical_and(test, Ineg)
else:
test = ~train
pos_test = torch.logical_and(test,Ipos)
neg_test = torch.logical_and(test, Ineg)
num_pos_test = torch.sum(pos_test)
test_neg_rel_size = num_pos_test / torch.sum(neg_test)
neg_test = neg_test * torch.rand(P.size())
neg_test[train] = 1.
neg_test[Ipos] = 1.
neg_test = neg_test < test_neg_rel_size
return(pos_train, neg_train, pos_test, neg_test)
class CustomMSELoss(nn.Module):
def __init__(self, alpha=0.2):
super(CustomMSELoss, self).__init__()
self.alpha=sqrt(alpha)
def forward(self, inputs, targets):
#comment out if your model contains a sigmoid or equivalent activation layer
neg_mask = (targets == 0.)
M = (targets-inputs)**2
M[neg_mask] *= self.alpha
loss_res = torch.mean(M)
return loss_res
class PU_Learner(nn.Module):
def __init__(self, k, Fd, Ft, X, Y, Nd, Nt, activation='identity', has_bias=False):
super().__init__()
self.k = k
self.Fd = Fd
self.Ft = Ft
self.H = torch.nn.Parameter(torch.randn(Fd, k)*1/sqrt(k))
self.W = torch.nn.Parameter(torch.randn(k, Ft)*1/sqrt(k))
if not has_bias:
self.b_x = torch.zeros(Nd)
self.b_y = torch.zeros(Nt)
else:
self.b_x = torch.nn.Parameter(torch.randn(Nd))
self.b_y = torch.nn.Parameter(torch.randn(Nt))
self.X = (X - X.mean(0))/(X.std(0)+1e-7)
self.Y = (Y - Y.mean(0))/(Y.std(0)+1e-7)
activ_dict = {'sigmoid': torch.nn.Sigmoid, 'identity': torch.nn.Identity}
self.activation = activ_dict[activation]()
def forward(self, id_x, id_y):
dot = torch.einsum('ij,jk,kl,li->i', self.X[id_x], self.H, self.W, torch.transpose(self.Y[id_y], 1, 0))
return(self.activation(dot + self.b_x[id_x] + self.b_y[id_y]))
def pu_learning(k, x, y, P, pos_train, neg_train, pos_test, neg_test, n_epochs=100, batch_size=100, lr=1e-3, alpha=1.0, gamma=0., activation='identity'):
#hidden_layers=[500,200,100]
#input_numer=784
# use gpu if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Fd = x.shape[1]
Ft = y.shape[1]
Nd = x.shape[0]
Nt = y.shape[0]
#Number of variables
N_variables = Fd * k + Ft * k
cartesian_product = torch.Tensor([[i, j] for i in range(Nd) for j in range(Nt)]).long().to(device)
x = torch.Tensor(x).to(device)
y = torch.Tensor(y).to(device)
P = torch.Tensor(P).to(device)
train_mask = torch.logical_or(pos_train, neg_train)
test_mask = torch.logical_or(pos_test, neg_test)
flat_train_mask = train_mask.flatten()
print("Building the train loader...")
train = torch.utils.data.TensorDataset(cartesian_product[flat_train_mask], P[train_mask].flatten())
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)
batch_num = len(train_loader)
print("Number of variables:", N_variables)
print("Finding positive and negative examples...")
print("Number of train examples:", P[train_mask].size()[0])
print("Number of positive examples in train set:", P[pos_train].size()[0])
print("Number of negative/unlabelled examples in train set:", P[neg_train].size()[0])
model = PU_Learner(k, Fd, Ft, x, y, Nd, Nt, activation=activation).to(device)
# create an optimizer object
# Adam optimizer with learning rate 1e-3
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=gamma)
# mean-squared error loss
criterion = CustomMSELoss(alpha=alpha)
for epoch in range(n_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs=inputs.to(device)
id_x_batch, id_y_batch = inputs[:,0], inputs[:,1]
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(id_x_batch, id_y_batch)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
S = model.activation(torch.chain_matmul(model.X, model.H, model.W, torch.transpose(model.Y,0,1)) + model.b_x.unsqueeze(-1).expand(-1,Nt) + model.b_y.expand(Nd,-1))
"""print(S)
print(model.H)
print(model.W)
print(model.b_x)
print(model.b_y)"""
auc = compute_auc(P[train_mask].clone(), S[train_mask].clone())
acc = compute_accuracy(P[train_mask].clone(), S[train_mask].clone())
print('[%d] loss: %.3f, auc: %.3f, acc: %.3f' %
(epoch + 1, running_loss / (batch_num), auc, acc))
running_loss = 0.0
print('Finished Training')
print("Now computing Z=HW^T, then will compute S...")
print(x.size(), model.H.size(), model.W.size(), y.size())
S = model.activation(torch.chain_matmul(model.X, model.H, model.W, torch.transpose(model.Y,0,1)) + model.b_x.unsqueeze(-1).expand(-1,Nt) + model.b_y.expand(Nd,-1))
return(S, model.H, model.W, model.b_x, model.b_y, train_mask, test_mask)
def pu_learning_new(k, x, y, P, n_epochs=100, batch_size=100, lr=1e-5, train_size=0.8, alpha=1.0, gamma=0., test_balance=True, activation='identity'):
print("Spliting train and test sets...")
pos_train, neg_train, pos_test, neg_test = get_train_test_masks(P, train_size=train_size, test_balance=test_balance)
return(pu_learning(k, x, y, P, pos_train, neg_train, pos_test, neg_test,
n_epochs=n_epochs, batch_size=batch_size, lr=lr,
alpha=alpha, gamma=gamma, activation=activation))
def cross_validate(k, x, y, P, N_folds, n_epochs=100, batch_size=100, lr=1e-5, train_size=0.8, alpha=1.0, gamma=0., activation='identity'):
pos_mask = (P==1)
neg_mask = (P==0)
N_pos = pos_mask.sum()
N_neg = neg_mask.sum()
N = min(N_pos, N_neg)
pos_idx = pos_mask.nonzero()
neg_idx = neg_mask.nonzero()
pos_idx = extract_samples(N, pos_idx)
neg_idx = extract_samples(N, neg_idx)
kfold = KFold(n_splits=N_folds, shuffle=False)
kfold_pos = list(kfold.split(pos_idx))
kfold_neg = list(kfold.split(neg_idx))
S_auc = 0
S_acc = 0
S_pr = 0
S_sol = 0
for fold in range(N_folds):
print("Fold %d" % (fold+1))
print("Preparing the masks...")
pos_train_idx = pos_idx[kfold_pos[fold][0]]
#print(pos_train_idx.max(0))
pos_test_idx = pos_idx[kfold_pos[fold][1]]
neg_train_idx = neg_idx[kfold_neg[fold][0]]
neg_test_idx = neg_idx[kfold_neg[fold][1]]
pos_train_mask = torch.zeros(pos_mask.size(),dtype=bool)
pos_train_mask[pos_train_idx[:,0],pos_train_idx[:,1]] = True
pos_test_mask = torch.zeros(pos_mask.size(),dtype=bool)
pos_test_mask[pos_test_idx[:,0],pos_test_idx[:,1]] = True
neg_train_mask = torch.zeros(neg_mask.size(),dtype=bool)
neg_train_mask[neg_train_idx[:,0],neg_train_idx[:,1]] = True
neg_test_mask = torch.zeros(neg_mask.size(),dtype=bool)
neg_test_mask[neg_test_idx[:,0],neg_test_idx[:,1]] = True
test_mask = torch.logical_or(pos_test_mask, neg_test_mask)
print("Starting to learn...")
S, H, W, b_x, b_y, _, _ = pu_learning(k, x, y, P,
pos_train_mask, neg_train_mask, pos_test_mask, neg_test_mask,
n_epochs=n_epochs, batch_size=batch_size, lr=lr,
alpha=alpha, gamma=gamma, activation=activation)
print("Evaluating on test set...")
auc, pr, acc, _ = eval_test_set(P, S, test_mask)
S_auc += auc
S_acc += acc
S_pr += pr
S_sol += S * auc
return(S_sol/S_auc, S_auc/N_folds, S_pr/N_folds, S_acc/N_folds)
def eval_test_set(P, S, test):
print("Evaluation on the test set...")
print("Test set statistics:")
n_pos = int(P[test].sum().item())
n_neg = int((1-P[test]).sum().item())
print("Number of positive examples:", n_pos)
print("Number of negative/unlabelled examples:", n_neg)
auc = compute_auc(P[test],S[test])
pr = compute_pr_auc(P[test], S[test])
acc = compute_accuracy(P[test],S[test])
confusion = compute_confusion_matrix(P[test], S[test])
print("\nROC auc: %f" % auc)
print("PR auc: %f" % pr)
print("Accuracy: %f" % acc)
print("Confusion matrix:")
print(confusion)
return(auc,pr,acc,confusion)
|
[
"michavaccaro@gmail.com"
] |
michavaccaro@gmail.com
|
434c6267bec6276f2e065d84d3b61dfb9d94b2f7
|
1aa7c0428e4d8d12c86c2bd7ca78e735e4310eba
|
/NagelSchreckenberg1D.py
|
5e1984505eda7800bf70f921b2853879e3078558
|
[] |
no_license
|
MJdeHaan/ComplexSystems
|
c4e07ad31f91cb8c33644656c927cf6502e296f3
|
ece88086df534325e3d669c2be8c5367c4ea524b
|
refs/heads/master
| 2020-04-05T14:05:27.486905
| 2017-06-30T09:14:47
| 2017-06-30T09:14:47
| 94,764,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,543
|
py
|
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.patches as patches
from copy import deepcopy
class CAoneD(object):
def __init__(self, N, start, p, maxvel):
self.N = N
self.grid = np.zeros(N)
self.velocities = np.zeros(N)
self.p = p
self.maxvel = maxvel
self.cars = len(start)
for i in start:
self.grid[i] = 1
self.velocities[i] = 1
self.plotState = []
self.distances = [] # Grid distances
def returnGrid(self):
'''
Function which returns the grid for plotting purposes. The coordinates
of the cars will be stored in an array.
'''
newGrid = []
for i in range(self.N):
if self.grid[i] == True:
newGrid.append((i, 0))
return newGrid
def updateGrid(self):
'''
Function which updates the grid, considering periodic boundary conditions
using a basic Nagel-Schreckenberg model
'''
self.plotState = [] # Init the plotstate again
self.distances = []
newGrid, newVelocities = np.zeros(self.N), np.zeros(self.N)
# First increase speed with 1 if maxspeed has not been reached
for i in range(self.N):
if self.grid[i] == True and self.velocities[i] < self.maxvel:
self.velocities[i] += 1
# Check if any cars in front are within maxspeed distance and slow down
for i in range(self.N):
for j in range(1, int(self.velocities[i]) + 1):
# Use modulo to implement periodic boundaries
if self.grid[(i+j) % self.N]:
self.velocities[i] = j - 1
break # Found a grid where a crash could occur
# Randomize speeds/slowdowns
for i in range(self.N):
if np.random.rand() < self.p and self.grid[i] == True \
and self.velocities[i] > 0:
self.velocities[i] -= 1
# Move the cars forward depending on their speed
for i in range(self.N):
j = int(self.velocities[i])
temp = [] # temporary array
if self.grid[i] == True:
temp.append((i, 0))
temp.append(((i+j) % self.N, 0))
newGrid[(i+j) % self.N] = 1
newVelocities[(i+j) % self.N] = j
self.plotState.append(temp)
self.distances.append(j)
self.velocities = newVelocities
self.grid = newGrid
def returnAverageVelocity(self):
return np.sum(self.velocities)/self.cars
def returnPlotState(self):
return self.plotState, self.distances
def findCoors(N, M, xmin, xmax, ymin, ymax ):
'''
Subdivide the x and y into N and M parts for the backgrounds
'''
dx, dy = float(xmax - xmin)/N, float(ymax - ymin)/M
coors = []
trans = {}
for i in zip(np.linspace(xmin, xmax, N), range(N)):
for j in zip(np.linspace(ymin, ymax, M), range(M)):
coors.append((i[0], j[0]))
trans[(i[1], j[1])] = (i[0] + dx/2, j[0] + dy/2)
return coors, float(xmax - xmin)/N, float(ymax - ymin)/M , trans
def init():
line.set_data([], [])
time_text.set_text('')
return line, time_text
def animateDataGen(lim):
n = 0
maxDist = xmax + dx
while n < lim:
test.updateGrid()
points, vels = test.returnPlotState()
realSpacePoints = []
realSpaceDistances = []
# Translate the grid coordinates to real space
for i in range(len(vels)):
realSpacePoints.append([trans[points[i][0]], trans[points[i][1]]])
realSpaceDistances.append(dx*vels[i])
# Create two arrays, containing intermediate x and y coordinates
xPoints, yPoints = [], []
for i in range(len(vels)):
xCoors = np.linspace(realSpacePoints[i][0][0],
realSpacePoints[i][0][0] + realSpaceDistances[i],
steps) % maxDist
yCoors = np.linspace(realSpacePoints[i][0][1], realSpacePoints[i][1][1], steps)
xPoints.append(xCoors)
yPoints.append(yCoors)
# Run through each of the coordinates and yield a list of x and y plot vals
for i in range(steps-1):
xList, yList = [], []
for j in range(len(vels)):
xList.append(xPoints[j][i])
yList.append(yPoints[j][i])
yield xList, yList
n += 1
def animate(i):
thisx, thisy = [], []
test.updateGrid()
points = test.returnGrid()
for point in points:
coor = trans[point]
thisx.append(coor[0])
thisy.append(coor[1])
line.set_data(thisx, thisy)
time_text.set_text(time_template.format(i))
return line, time_text
def animate2(i):
thisx, thisy = next(dataGen)
line.set_data(thisx, thisy)
time_text.set_text(time_template.format(i))
return line, time_text
N = 40 # Amount of cells needed for the CA
xmin, xmax, ymin, ymax = 0, 10, -3, 3 # For plotting
# Create a CA object
test = CAoneD(N, (0, 10), 0.1, 5)
# Find the translations for plotting the grid
coors,dx,dy,trans = findCoors(N, 1, xmin, xmax, -0.25, 0.25)
# These are variables for the plotting stuff
steps = 15
lim = 200
dataGen = animateDataGen(lim)
animatie = True
if animatie:
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(xmin, xmax), ylim=(ymin, ymax))
# Paint a background grid..
for i in coors:
ax.add_patch(
patches.Rectangle(
i, # (x,y)
dx, # width
dy, # height
color='black',
fill=False
)
)
line, = ax.plot([], [], 'rs', markersize=xmax/(0.05*N))
time_template = 'timestep {0}'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
plt.axis('equal')
ani = animation.FuncAnimation(fig, animate2, lim*(steps-1),
interval=20, blit=True, init_func=init, repeat=False)
plt.show()
|
[
"MJdeHaan@users.noreply.github.com"
] |
MJdeHaan@users.noreply.github.com
|
92bc4e43a334e90a0cee0b26ab3e3bf8d36fd59f
|
4fcc2ed36d464754581ea420d5f23bfd68505cce
|
/backend/farejando/urls.py
|
6abe3db020d2aa327b93a963c98c304bf2d302a1
|
[] |
no_license
|
codenation-dev/squad-3-ad-escale
|
b3d6dddf10a63b45a3ebd1d2443557653810d7f2
|
2d6d828d64321380230b346f76677b99bb2bfbe6
|
refs/heads/master
| 2023-01-13T18:06:14.540965
| 2019-08-20T01:29:12
| 2019-08-20T01:29:12
| 196,603,080
| 1
| 4
| null | 2023-01-04T06:16:01
| 2019-07-12T15:29:34
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 173
|
py
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/', include('apps.api.urls'))
]
|
[
"lillakoala67@gmail.com"
] |
lillakoala67@gmail.com
|
bd9a872ec0e392ce53fb4ba7691e0ae50f8e069b
|
97149b75bd21fb9f82aed657ab5180f765927746
|
/ietf/stats/migrations/0003_meetingregistration_attended.py
|
6500f7f5ae19a5c36a9bf5d13b7a99414c951111
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hassanakbar4/ietfdb-filter-repo
|
c74e7cbfdd5acb8f55ca0fcff6757166582d68db
|
67513183b7035014b494bfdd982f9f3990ee0647
|
refs/heads/main
| 2023-08-25T09:47:28.900431
| 2021-11-09T22:16:24
| 2021-11-09T22:16:24
| 426,613,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
# Generated by Django 2.0.13 on 2020-06-09 04:39
from django.db import migrations, models
def forward(apps, schema_editor):
MeetingRegistration = apps.get_model('stats', 'MeetingRegistration')
# Set attended=True on all existing records,
#
# Going forward, this will be unset on registration
# (ietf.api.views.api_new_meeting_registration()), and set on attendee
# import (ietf.stats.utils.get_meeting_registration_data() )
MeetingRegistration.objects.update(attended=True)
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('stats', '0002_add_meetingregistration_fields'),
('group', '0029_add_used_roles_and_default_used_roles'),
]
operations = [
migrations.AddField(
model_name='meetingregistration',
name='attended',
field=models.BooleanField(default=False),
),
migrations.RunPython(forward, reverse),
]
|
[
"henrik@levkowetz.com"
] |
henrik@levkowetz.com
|
784f1da7a3784333d0fd2b39d09226c825ecf155
|
a20827cb44b55bf10fdab90ebdf2f0e545443dd0
|
/techmanpy/packets/__init__.py
|
8c3ffce4b758d502f302b1bfa6d93e98069da33a
|
[
"MIT"
] |
permissive
|
jvdtoorn/techmanpy
|
ab729b6c47a85e9d7ed1d8d8de7a2a8e3626ab44
|
384ef92dc0601f93259e4a6e5a7e8b1c96876902
|
refs/heads/master
| 2023-02-22T16:27:14.653385
| 2021-01-23T13:05:25
| 2021-01-23T13:05:25
| 323,303,317
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
#!/usr/bin/env python
from .stateless_packet import *
from .stateful_packet import *
from .cperr_packet import *
from .tmsta_packet import *
from .tmsvr_packet import *
from .tmsct_packet import *
|
[
"J.vanderToorn@student.tudelft.nl"
] |
J.vanderToorn@student.tudelft.nl
|
e2d2761bec4e82b3c461da41211a3d4f211a5ea1
|
a56a74b362b9263289aad96098bd0f7d798570a2
|
/venv/lib/python3.8/site-packages/qtpy/QtHelp.py
|
ca9d93ddee9e23378d86fca8cf41c2367f65a25d
|
[
"MIT"
] |
permissive
|
yoonkt200/ml-theory-python
|
5812d06841d30e1068f6592b5730a40e87801313
|
7643136230fd4f291b6e3dbf9fa562c3737901a2
|
refs/heads/master
| 2022-12-21T14:53:21.624453
| 2021-02-02T09:33:07
| 2021-02-02T09:33:07
| 132,319,537
| 13
| 14
|
MIT
| 2022-12-19T17:23:57
| 2018-05-06T08:17:45
|
Python
|
UTF-8
|
Python
| false
| false
| 456
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009- The Spyder Development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""QtHelp Wrapper."""
import warnings
from . import PYQT5
from . import PYQT4
from . import PYSIDE
from . import PYSIDE2
if PYQT5:
from PyQt5.QtHelp import *
elif PYSIDE2:
from PySide2.QtHelp import *
elif PYQT4:
from PyQt4.QtHelp import *
elif PYSIDE:
from PySide.QtHelp import *
|
[
"kitae.yoon@deliveryhero.co.kr"
] |
kitae.yoon@deliveryhero.co.kr
|
40930de633d1e70b8db9423c6d3bc861463026c0
|
24e421bbb0a70faaa7a58adbc90bbce6dbcb1524
|
/basic.py
|
5c991d5f7f28e9b17212cfcd57ac0a61c010d7d8
|
[] |
no_license
|
Chowcow/RightStrain
|
a72c762cadb1c4094fbf146d15997f32737bf5c7
|
25c8b36df0d013df76ef7f4de7b87866ac4584f6
|
refs/heads/master
| 2020-03-30T17:32:23.372865
| 2018-12-04T22:07:29
| 2018-12-04T22:07:29
| 151,459,872
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
from flask import Flask, render_template,session, redirect, url_for,flash, request
from flask_wtf import FlaskForm
from wtforms import (StringField, BooleanField, DateTimeField,
RadioField, SelectField, TextField,
TextAreaField, SubmitField)
from wtforms.validators import DataRequired
import pandas as pd
import numpy as np
df = pd.read_csv('data_final.csv')
#df2=pd.read_csv('data_final.csv')
app= Flask(__name__)
@app.route('/')
def index():
return render_template('right-strain-landing.html')
@app.route('/strainfinder')
def test():
strains= df['strain_list'].tolist()
return render_template('right-strain-finder.html', strains= strains)
@app.route('/model', methods= ["POST"])
def jono():
strain= request.form.get('strain')
weight= request.form.get('weight')
print(strain)
print(weight)
def distance(row):
cols = ['Feat0','Feat1','Feat2','Feat3','Feat4','Feat5','Feat6','Feat7']
return(df[cols]-row[cols]).abs().sum(axis=1)
df.set_index('strain_list', inplace=True)
dist= df.apply(distance, axis=1)
recommendation= dist[strain].nsmallest(4).index.tolist()[1:]
first_choice= recommendation[0]
second_choice= recommendation[1]
third_choice= recommendation[2]
return render_template('right-strain-finder-results.html', first_choice= first_choice, second_choice=second_choice, third_choice= third_choice)
if __name__=='__main__':
app.run(debug=True)
|
[
"sam.chow38@gmail.com"
] |
sam.chow38@gmail.com
|
0c619ce66431d34528ebd8d6e3e739bd8e9170c9
|
c795ec7f77219892183a1222fb51b8be2e754944
|
/multiverse server/multiverse-server/multiverse/bin/login_manager.py
|
cf27e2c68a2baaaeef54271738d5e7bdc21ecdad
|
[
"MIT"
] |
permissive
|
radtek/MultiverseClientServer
|
89d9a6656953417170e1066ff3bd06782305f071
|
b64d7d754a0b2b1a3e5acabd4d6ebb80ab1d9379
|
refs/heads/master
| 2023-01-19T04:54:26.163862
| 2020-11-30T04:58:30
| 2020-11-30T04:58:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
from multiverse.mars import *
from multiverse.server.worldmgr import *
from multiverse.mars.objects import *
from multiverse.mars.util import *
from multiverse.server.math import *
from multiverse.server.events import *
from multiverse.server.objects import *
from multiverse.server.engine import *
from multiverse.server.util import *
from multiverse.msgsys import *
True=1
False=0
# Uncomment if you want to set a log level for this process
# that is different from the server's default log level
# Log.setLogLevel(1)
LoginPlugin.SecureToken = False
|
[
"demckay@outlook.com"
] |
demckay@outlook.com
|
75d7c372fc8bcf0b500dd490749c7b6bdcff9f68
|
4ba5c0fa1ef6a1673b1b43899285df205c9bc64d
|
/leetcode/039_combination_sum.py
|
31b7f9ab5ef972815c3071f50af2008c4a8b4c85
|
[
"MIT"
] |
permissive
|
aiden0z/snippets
|
90f86507b7989b3c4efdd0551835a5849c899534
|
671cbb00865363af437ad4df06020a793cadada9
|
refs/heads/master
| 2023-01-23T14:16:57.779722
| 2022-11-21T14:57:58
| 2022-11-21T14:57:58
| 43,037,323
| 0
| 0
|
MIT
| 2023-01-20T22:37:57
| 2015-09-24T01:33:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,875
|
py
|
"""Combination Sum
Given a set of candidate numbers (cadidates) (without duplicates) and a target number (target),
find all unique combinations in candidates where the candidate numbers sum to target.
The same repeated number may be chosen from candidates ulimited number of times.
Note:
* All numbers (include target) will be positive integers.
* The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [2, 3, 6, 7], target = 7,
A solution set is:
[
[7],
[2, 2, 3]
]
Example 2:
Input: candidates = [2, 3, 5], target = 8,
A solution set is:
[
[2, 2, 2, 2],
[2, 3, 3],
[3, 5]
]
"""
from typing import List
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
candidates = sorted(list(set(candidates)))
results = []
self.find(candidates, target, [], results, 0)
return results
# deep-first search
def find(self, candidates: List[int], target: int, answer: List[int], results: List[List[int]],
i: int):
if target < 0:
return
if target == 0:
results.append(answer[:])
return
while (i < len(candidates) and target - candidates[i] >= 0):
answer.append(candidates[i])
self.find(candidates, target - candidates[i], answer, results, i)
i += 1
answer.pop()
class SolutionB:
def combinationSum(self, candidate: List[int], target: int) -> List[List[int]]:
candidate = sorted(candidate)
return self.find(candidate, target)
def find(self, candidates, target):
results = []
for i, n in enumerate(candidates):
if target > (n + n):
tails = self.find(candidates[i:], target - n)
results += [[n] + l for l in tails]
elif target == (n + n):
results.append([n, n])
elif target == n:
results.append([n])
elif target < n:
break
return results
class SolutionDP:
# dynamic programming
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
candidates = sorted(candidates)
dp = [[[]]] + [[] for i in range(target)]
for i in range(1, target + 1):
for num in candidates:
if num > i:
break
for L in dp[i - num]:
if i == num or num >= L[-1]:
dp[i] += [L + [num]]
return dp[target]
if __name__ == '__main__':
cases = [([2, 3, 6, 7], 7), ([2, 3, 5], 8), ([2, 4, 6, 8], 8)]
for case in cases:
for S in [Solution, SolutionB, SolutionDP]:
print(S().combinationSum(case[0], case[1]))
|
[
"aiden0xz@gmail.com"
] |
aiden0xz@gmail.com
|
e218719bf68f43980484f8217119d9c3b6929946
|
a909f26d85c2d45460c089246bc5d656ad0dbc10
|
/diffeo_experiments/distance/distance_test.py
|
4da23cca0c445f81e8aaf3c6d2a9b68ee1fb9ef2
|
[] |
no_license
|
wuyou33/surf12adam
|
c58a6001ad6680033b2ab3ea6520e04f1e204859
|
b30d2ec2b0cd50360cf480d2848652fe8f2d4b87
|
refs/heads/master
| 2021-05-27T02:51:20.374143
| 2013-05-09T08:07:52
| 2013-05-09T08:07:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,778
|
py
|
#!/usr/bin/env python
import roslib
roslib.load_manifest('logitech_cam')
import sys
import rosbag
import rospy
import numpy as np
from sensor_msgs.msg import Image
from std_msgs.msg import Header
import cv
from cv_bridge import CvBridge, CvBridgeError
import pdb
import ParseMessages
from distance import *
import matplotlib.pyplot as plt
def get_image_array(image):
im,data,dimsizes = ParseMessages.imgmsg_to_pil(image)
pix = np.asarray(im).astype(np.uint8)
return pix
def load_processed_bag(infile):
bag = rosbag.Bag(infile)
list = bag.read_messages(topics=['Y0','Y1','U0'])
Y0list = []
Y1list = []
U0list = []
for topic, msg, t in list:
if topic == 'Y0':
Y0list.append(get_image_array(msg))
if topic == 'Y1':
Y1list.append(get_image_array(msg))
if topic == 'U0':
U0list.append(msg)
return Y0list,Y1list,U0list
Y0list, Y1list, U0list = load_processed_bag('/media/data/processed-data/commands100t60.processed.bag')
#dist = distance.distance_neighborhood(Y0list[0],Y1list[0])
#pdb.set_trace()
distance_function_list = [distance.distance_scaled_mean, distance.distance_scaled_std,distance.distance_neighborhood_bestmatch,distance.distance_neighborhood_bestmatch,distance.distance_neighborhood_distance]
#distance_function_list = [distance.distance_neighborhood]
#opt_list = [1,1,[4,4],[6,6],[8,8]]
#distance.neighborarea_default = [7,7]
try:
max_dept = int(sys.argv[sys.argv.index('-d')+1])
except ValueError:
print('Warning: specify dept of command sequence by: -d')
max_dept = 3
try:
n_samp = int(sys.argv[sys.argv.index('-n')+1])
except ValueError:
print('Warning: specify number of observations by: -n')
n_samp = 10
dist_mean_list = []
dist_std_list = []
plt.figure()
#for fi in range(len(distance_function_list)):
def plot(data,std,color,text='Untitled'):
max_data = np.max(data+std)
data = data/max_data
std = std/max_data
plt.plot(range(len(data)),data,color=color,label=text)
plt.plot(range(len(data)),data+std,color+'s')
plt.plot(range(len(data)),data-std,color+'s')
distance = distance()
distance.neighbor_indices_flat_init(Y0list[0],[6,6])
distance.calculate_max((160,120))
""" distance 5 """
#distance_function = distance_function_list[4]
#distance.neighbor_indices_flat_init(Y0list[0],[6,6])
#dist_list = [[]]*max_dept
#dist_mean = np.zeros(max_dept)
#dist_std = np.zeros(max_dept)
#for dept in range(max_dept):
# print('Computing dept: '+str(dept))
# dist = []
# for i in range(n_samp):
# if i+dept<len(Y0list):
# y1 = Y0list[i]
# y2 = Y0list[i+dept]
# dist.append(distance.distance_neighborhood_distance(y1,y2))
# dist_list[dept] = dist
# dist_mean[dept] = np.mean(dist)
# dist_std[dept] = np.std(dist)
#dist_mean_list.append(dist_mean)
#dist_std_list.append(dist_std)
#print dist_mean
#print dist_std
#plt.plot(range(max_dept),dist_mean,color='c',label='distance_neighborhood_distance, area=[6,6]')
#plt.plot(range(max_dept),dist_mean+dist_std,'cs')
#plt.plot(range(max_dept),dist_mean-dist_std,'cs')
#""" distance 6 """
#
#distance_function = distance_function_list[4]
#distance.neighbor_indices_flat_init(Y0list[0],[6,6])
#dist_list = [[]]*max_dept
#dist_mean = np.zeros(max_dept)
#dist_std = np.zeros(max_dept)
#for dept in range(max_dept):
# print('Computing dept: '+str(dept))
# dist = []
# for i in range(n_samp):
# if i+dept<len(Y0list):
# y1 = Y0list[i]
# y2 = Y0list[i+dept]
# dist.append(distance.distance_neighborhood_distance(y1,y2))
# dist_list[dept] = dist
# dist_mean[dept] = np.mean(dist)
# dist_std[dept] = np.std(dist)
#dist_mean_list.append(dist_mean)
#dist_std_list.append(dist_std)
#print dist_mean
#print dist_std
#plt.plot(range(max_dept),dist_mean,color='y',label='distance_neighborhood_distance, area=[6,6]')
#plt.plot(range(max_dept),dist_mean+dist_std,'ys')
#plt.plot(range(max_dept),dist_mean-dist_std,'ys')
""" distance 1 """
distance_function = distance_function_list[0]
dist_list = [[]]*max_dept
dist_mean = np.zeros(max_dept)
dist_std = np.zeros(max_dept)
for dept in range(max_dept):
print('Computing dept: '+str(dept))
dist = []
for i in range(n_samp):
if i+dept<len(Y0list):
y1 = Y0list[i]
y2 = Y0list[i+dept]
# pdb.set_trace()
dist.append(distance.distance_scaled_mean(y1,y2))
dist_list[dept] = dist
dist_mean[dept] = np.mean(dist)
dist_std[dept] = np.std(dist)
dist_mean_list.append(dist_mean)
dist_std_list.append(dist_std)
print dist_mean
print dist_std
plot(dist_mean, dist_std, 'g','distance_scaled_mean, scale=1.0')
#plt.plot(range(max_dept),dist_mean,color='g',label='distance_scaled_mean, scale=1.0')
#plt.plot(range(max_dept),dist_mean+dist_std,'gs')
#plt.plot(range(max_dept),dist_mean-dist_std,'gs')
""" distance 2 """
distance_function = distance_function_list[1]
dist_list = [[]]*max_dept
dist_mean = np.zeros(max_dept)
dist_std = np.zeros(max_dept)
for dept in range(max_dept):
print('Computing dept: '+str(dept))
dist = []
for i in range(n_samp):
if i+dept<len(Y0list):
y1 = Y0list[i]
y2 = Y0list[i+dept]
dist.append(distance.distance_scaled_std(y1,y2))
dist_list[dept] = dist
dist_mean[dept] = np.mean(dist)
dist_std[dept] = np.std(dist)
dist_mean_list.append(dist_mean)
dist_std_list.append(dist_std)
print dist_mean
print dist_std
plot(dist_mean, dist_std, 'b','distance_scaled_std, scale=1.0')
#plt.plot(range(max_dept),dist_mean,color='b',label='distance_scaled_std, scale=1.0')
#plt.plot(range(max_dept),dist_mean+dist_std,'bs')
#plt.plot(range(max_dept),dist_mean-dist_std,'bs')
""" distance 3 """
distance_function = distance_function_list[2]
distance.neighbor_indices_flat_init(Y0list[0],[4,4])
dist_list = [[]]*max_dept
dist_mean = np.zeros(max_dept)
dist_std = np.zeros(max_dept)
for dept in range(max_dept):
print('Computing dept: '+str(dept))
dist = []
for i in range(n_samp):
if i+dept<len(Y0list):
y1 = Y0list[i]
y2 = Y0list[i+dept]
dist.append(distance.distance_neighborhood_bestmatch(y1,y2))
dist_list[dept] = dist
dist_mean[dept] = np.mean(dist)
dist_std[dept] = np.std(dist)
dist_mean_list.append(dist_mean)
dist_std_list.append(dist_std)
print dist_mean
print dist_std
plot(dist_mean, dist_std, 'r','distance_neighborhood_bestmatch, area=[4,4]')
#plt.plot(range(max_dept),dist_mean,color='r',label='distance_neighborhood_bestmatch, area=[4,4]')
#plt.plot(range(max_dept),dist_mean+dist_std,'rs')
#plt.plot(range(max_dept),dist_mean-dist_std,'rs')
""" distance 4 """
distance_function = distance_function_list[3]
distance.neighbor_indices_flat_init(Y0list[0],[6,6])
dist_list = [[]]*max_dept
dist_mean = np.zeros(max_dept)
dist_std = np.zeros(max_dept)
for dept in range(max_dept):
print('Computing dept: '+str(dept))
dist = []
for i in range(n_samp):
if i+dept<len(Y0list):
y1 = Y0list[i]
y2 = Y0list[i+dept]
dist.append(distance.distance_neighborhood_bestmatch(y1,y2))
dist_list[dept] = dist
dist_mean[dept] = np.mean(dist)
dist_std[dept] = np.std(dist)
dist_mean_list.append(dist_mean)
dist_std_list.append(dist_std)
print dist_mean
print dist_std
plot(dist_mean, dist_std, 'm','distance_neighborhood_bestmatch, area=[6,6]')
#plt.plot(range(max_dept),dist_mean,color='m',label='distance_neighborhood_bestmatch, area=[6,6]')
#plt.plot(range(max_dept),dist_mean+dist_std,'ms')
#plt.plot(range(max_dept),dist_mean-dist_std,'ms')
#plt.legend(loc='best')
plt.savefig('distande-plot.png')
#pdb.set_trace()
|
[
"adam.nilss@gmail.com"
] |
adam.nilss@gmail.com
|
c7ceb3fafbdf89b4e714fca9f60c6d1ef8adab9a
|
7bfb8432bb2fc7aee932b15db00fa8efc0ac51e4
|
/template.py
|
a9d6dcebbec75be72d16eacf356ccb6b4dc49ecd
|
[
"MIT"
] |
permissive
|
justinjhendrick/advent_of_code_2020
|
cd20b4ece9216d1e39021765619d18e7b6081874
|
2b9bb15efef26d1c7a211ac2770159f4a12f9658
|
refs/heads/main
| 2023-02-03T10:54:51.411262
| 2020-12-25T05:44:47
| 2020-12-25T05:44:47
| 318,698,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
import sys
def stripped_lines(filename):
with open(filename) as f:
for line in f.readlines():
yield line.strip()
def parse(line):
pass
def parse_file(filename):
return [parse(line) for line in stripped_lines(filename)]
def p1(inputs):
pass
def p2(inputs):
pass
def main(args):
inputs = parse_file(args[1])
p1_ans = p1(inputs)
print(f'part one: {p1_ans}')
p2_ans = p2(inputs)
print(f'part two: {p2_ans}')
if __name__ == '__main__':
main(sys.argv)
|
[
"justinjhendrick@gmail.com"
] |
justinjhendrick@gmail.com
|
4cc0a300ec09f73a726f6cd74dffdf268e64db08
|
f578b4f2d9d875c2b18c744c26d6c10c95364bb8
|
/tags/20050524/code/vellum/util/filesystem.py
|
09d54252c4d2fc8bd117e60a71b4a31b4be3f808
|
[] |
no_license
|
BackupTheBerlios/vellum-svn
|
679e480674ed64d8a7712152e8e8fd3b55188790
|
6c3704d60d38c28fd0df6761c6d86b8397a734d5
|
refs/heads/master
| 2021-01-21T13:11:12.721917
| 2007-01-12T07:10:29
| 2007-01-12T07:10:29
| 40,803,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
import os
class Filesystem:
def __init__(self, path, mkdir=0):
if mkdir:
try:
os.makedirs(path)
except EnvironmentError:
pass
self.path = os.path.abspath(path)
def __call__(self, *paths):
return os.path.join(self.path, *paths)
|
[
"corydodt@dac6abc3-03f5-0310-8df6-960e0b1e0653"
] |
corydodt@dac6abc3-03f5-0310-8df6-960e0b1e0653
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.