hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c576551072f708a32f1945826e72ae5d21285cce
| 2,605
|
py
|
Python
|
scripts/multiprocess_tokenizer/worker.py
|
talolard/vampire
|
e2ae46112fda237b072453c9f1c5e89bd7b4135b
|
[
"Apache-2.0"
] | null | null | null |
scripts/multiprocess_tokenizer/worker.py
|
talolard/vampire
|
e2ae46112fda237b072453c9f1c5e89bd7b4135b
|
[
"Apache-2.0"
] | null | null | null |
scripts/multiprocess_tokenizer/worker.py
|
talolard/vampire
|
e2ae46112fda237b072453c9f1c5e89bd7b4135b
|
[
"Apache-2.0"
] | null | null | null |
import typing
from typing import Any
import json
import os
from multiprocessing import Process, Queue
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from spacy.tokenizer import Tokenizer
import spacy
from tqdm.auto import tqdm
import time
nlp = spacy.load("en")
class TokenizingWorker(Process):
def __init__(
self,
pbar: Any,
is_json: bool,
queue_in: Queue, # Queue where text comes in
queue_out: Queue, #Queue where tokens go
tokenizer_type: str = "just_spaces",
):
super(TokenizingWorker, self).__init__()
self.queue_in = queue_in
self.queue_out = queue_out
self.is_json = is_json
self.pbar = pbar
if tokenizer_type == "just_spaces":
tokenizer = SpacyWordSplitter()
self.tokenizer = lambda text: list(map(str, tokenizer.split_words(text)))
elif tokenizer_type == "spacy":
tokenizer = Tokenizer(nlp.vocab)
self.tokenizer = lambda text: list(map(str, tokenizer(text)))
def run(self):
for line in iter(self.queue_in.get, None):
if self.is_json:
text = json.loads(line)["text"]
else:
text = line
tokens = self.tokenizer(text)
while self.queue_out.full():
time.sleep(0.01)
self.queue_out.put(" ".join(tokens),block=False,)
self.pbar.update()
def multi_proc_data_loader(data_path: str, tokenizer_type: str = "just_spaces"):
num_processes = max(1, os.cpu_count() - 1)
queue_in = Queue()
queue_out = Queue(maxsize=10000)
workers =[]
is_json = data_path.endswith(".jsonl") or data_path.endswith(".json")
pbar = tqdm()
for _ in range(num_processes): # minus one if the main processus is CPU intensive
worker = TokenizingWorker(
pbar=pbar,
is_json=is_json, queue_in=queue_in, queue_out=queue_out,tokenizer_type=tokenizer_type
)
workers.append(worker)
worker.start()
with (open(data_path, "r")) as f:
for line in f:
queue_in.put(line)
for worker in workers:
#ensure each worker gets a None which tells it to stop
queue_in.put(None)
alive = any(map(lambda x:x.is_alive(),workers))
res=[]
while alive:
while not queue_out.empty():
tokens =queue_out.get(block=False)
res.append(tokens)
alive = any(map(lambda x: x.is_alive(), workers))
if alive:
time.sleep(0.01)
return res
| 28.315217
| 97
| 0.6119
| 1,171
| 0.44952
| 0
| 0
| 0
| 0
| 0
| 0
| 230
| 0.088292
|
c578aaaded2e7b75110f4c69848a9fb001f45ff0
| 5,823
|
py
|
Python
|
MC-Fisher.py
|
hosua/Minecraft-Fisher
|
416c476cd6e5ef0c6bb978aacd9816aa9ba36f7e
|
[
"MIT"
] | null | null | null |
MC-Fisher.py
|
hosua/Minecraft-Fisher
|
416c476cd6e5ef0c6bb978aacd9816aa9ba36f7e
|
[
"MIT"
] | null | null | null |
MC-Fisher.py
|
hosua/Minecraft-Fisher
|
416c476cd6e5ef0c6bb978aacd9816aa9ba36f7e
|
[
"MIT"
] | null | null | null |
# For larger scale projects, I really should learn to use classes... lol
from PIL import ImageGrab, ImageTk, Image
import keyboard
import pyautogui
import tkinter as tk
import os
import time, datetime
import text_redirect as TR
import sys
# GUI stuff
TITLE = "Minecraft-Fisher - Made by Hoswoo"
DARK_BLUE = '#0A3D62'
LIGHT_BLUE = "#7ddeff"
DARK_GREY = "#2C3335"
CONSOLE_BG = '#A1AAB5'
FONT_BIG = ('calibre', 12, 'bold')
FONT = ('calibre', 10, 'bold')
FONT_CONSOLE = ('Times', 10, 'normal')
SIZE = ("400x500")
root = tk.Tk()
root.configure(bg=DARK_BLUE)
root.title(TITLE)
root.geometry(SIZE)
root_dir = os.getcwd()
# GUI Console
console_frame = tk.Frame(root, bg=DARK_BLUE, height=250, width=200)
console_sub_frame = tk.Frame(console_frame, bg=DARK_BLUE)
console_text = tk.Text(root, height=12,
width=60, bg=CONSOLE_BG, fg=DARK_GREY, font=FONT_CONSOLE)
console_text.config(state="disabled")
console_text.see("end")
sys.stdout = TR.TextRedirector(console_text) # Send console output to textbox instead of actual console.
# sys.stderr = TR.TextRedirector(console_text) # Errors will output in console
print("PLEASE READ BEFORE USING:\n")
print("The bot works by detecting a specific shade of red on the bobber. With that being said...")
print("Before you use the bot, you should turn your brightness all the way up.")
print("You will also have to map your right-mouse-click to 'r'. (This was a workaround due to the mouse input causing issues)")
print("For best results, ensure you are in a very well lit area and that the fish bobber appears within your capture region!")
print("NOTE: If your health hearts are in the capture region, it will falsely detect the bobber.")
# Global constants
BOBBER_COLOR = (208, 41, 41, 255)
BOBBER_COLOR_NIGHT = (206, 40, 39, 255)
region_var = tk.StringVar()
region_var.set(300) # Default to 300, should work for most people.
BOX_SIZE = int(region_var.get()) # get box size from spinbox
FILENAME = "pic.png"
x = 0
y = 0
def grab_image():
global x, y
#image = ImageGrab.grab(bbox=(x-(BOX_SIZE/2), y-(BOX_SIZE/2), x+(BOX_SIZE/2), y+(BOX_SIZE/2)))
image = ImageGrab.grab(bbox=(x-(BOX_SIZE/2), y-(BOX_SIZE/2), x+(BOX_SIZE/2), y+(BOX_SIZE/2)))
data = list(image.getdata())
image.save(FILENAME)
return data
def validate(user_input): # I don't really remember how to get validation to properly work.. so I'm just not gonna allow
# users to type anything lol
# Sourced from https://www.geeksforgeeks.org/python-tkinter-spinbox-range-validation/
if user_input:
#print("Typing not allowed")
return False
region_label = tk.Label(root, text="Region size",
bg=DARK_BLUE, fg=LIGHT_BLUE, font=FONT)
region_spinbox = tk.Spinbox(root, from_=25, to=1000,
increment=25, textvariable=region_var, width=6)
range_validation = root.register(validate)
region_spinbox.config(validate="key", validatecommand=(range_validation, '% P')) # Absolutely no idea how this works lol
pic_frame = tk.Frame(root, bg="#FFFFFF", height=BOX_SIZE, width=BOX_SIZE)
#img = ImageTk.PhotoImage(Image.open(FILENAME))
pic_frame_label = tk.Label(pic_frame)
pic_frame_label.pack()
pic_frame.pack()
running = False
times_not_detected = 0
def loop_action():
timestamp = "(" + '{:%H:%M:%S}'.format(datetime.datetime.now()) + ")"
def check_for_bobber():
img = Image.open(FILENAME)
img = img.convert("RGBA")
data = list(img.getdata())
# print(data)
if BOBBER_COLOR_NIGHT in data or BOBBER_COLOR in data:
print(timestamp + " Bobber detected")
return True
else:
print(timestamp + " Bobber not detected")
keyboard.press_and_release("r")
return False
console_text.see("end")
grab_image()
img = ImageTk.PhotoImage(Image.open(FILENAME)) # set image to grabbed image
pic_frame_label.configure(image=img) # configure label to show new image
pic_frame_label.image = img
return check_for_bobber() # Return True if bobber is detected and False if not.
def screenshot_loop(event=None): # Do this while running
global running, times_not_detected
if running:
bobber_detected = loop_action()
if not bobber_detected:
if times_not_detected != 2: # Delay for recast
time.sleep(1.0)
else:
pass
times_not_detected += 1
else:
times_not_detected = 0
root.after(100, screenshot_loop)
def start_task(event=None):
global BOX_SIZE
global running
global x,y
BOX_SIZE = int(region_var.get()) # get box size from spinbox
x = pyautogui.position()[0]
y = pyautogui.position()[1]
if running is False:
print("(" + '{:%H:%M:%S}'.format(datetime.datetime.now()) + ") Starting...\n")
running = True
screenshot_loop()
else:
print("(" + '{:%H:%M:%S}'.format(datetime.datetime.now()) + ") I'm already running!...\n")
def stop_task(event=None):
global running, times_not_detected
if running is True:
print("(" + '{:%H:%M:%S}'.format(datetime.datetime.now()) + ") Stopping...\n")
running = False
times_not_detected = 0
start_btn = tk.Button(root, text="Start (~)", bg=DARK_GREY,
fg=LIGHT_BLUE, command=start_task, width=10)
stop_btn = tk.Button(root, text="Stop (F1)", bg=DARK_GREY,
fg=LIGHT_BLUE, command=stop_task, width=10)
region_label.pack()
region_spinbox.pack()
start_btn.pack()
stop_btn.pack()
console_frame.pack()
console_sub_frame.pack()
console_text.pack()
keyboard.add_hotkey('`', start_task)
keyboard.add_hotkey('F1', stop_task)
root.mainloop()
| 34.052632
| 127
| 0.665636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,856
| 0.318736
|
3d63dfe6fe9f0bef4a7c9bfd9c4a5ff955fbcafe
| 1,248
|
py
|
Python
|
ModelAnalysis/biomodel_iterator.py
|
BioModelTools/ModelAnalysis
|
89d6426ec9fbbb6836897889266848793d109dcc
|
[
"MIT"
] | null | null | null |
ModelAnalysis/biomodel_iterator.py
|
BioModelTools/ModelAnalysis
|
89d6426ec9fbbb6836897889266848793d109dcc
|
[
"MIT"
] | 3
|
2017-09-04T20:06:45.000Z
|
2017-09-07T01:57:45.000Z
|
ModelAnalysis/biomodel_iterator.py
|
BioModelTools/ModelAnalysis
|
89d6426ec9fbbb6836897889266848793d109dcc
|
[
"MIT"
] | null | null | null |
"""
Iterates through a collection of BioModels
"""
from sbml_shim import SBMLShim
import sys
import os.path
################################################
# Classes that count pattern occurrences
################################################
class BiomodelIterator(object):
def __init__(self, path, excludes=None):
"""
:param str path: path to a file containing a list of Biomodel IDs to process
The file should contain a list of BioModels identifiers,
one per line.
:param list-of-str excludes: Biomodel IDs to exclude
"""
self._path = path
self._idx = 0
with open(self._path, 'r') as fh:
ids = fh.readlines() # Biomodels Ids
if excludes is None:
excludes = []
pruned_ids = [id.replace('\n', '') for id in ids]
self._ids = [id.replace('\n', '') for id in pruned_ids
if not id in excludes]
def __iter__(self):
return self
def next(self):
"""
:return SBMLShim: next bio model
:raises StopIteration:
"""
if self._idx < len(self._ids):
shim = SBMLShim.getShimForBiomodel(self._ids[self._idx])
self._idx += 1
return shim
else:
raise StopIteration()
if __name__ == '__main__':
main(sys.argv)
| 25.469388
| 80
| 0.584936
| 945
| 0.757212
| 0
| 0
| 0
| 0
| 0
| 0
| 533
| 0.427083
|
3d640bec431e81affc07c61301d5e5f1d49c75e8
| 411
|
py
|
Python
|
app/domain/company/models.py
|
JBizarri/fast-api-crud
|
3eb0391c1a1f2e054092de717b73898c7efed5cb
|
[
"MIT"
] | null | null | null |
app/domain/company/models.py
|
JBizarri/fast-api-crud
|
3eb0391c1a1f2e054092de717b73898c7efed5cb
|
[
"MIT"
] | null | null | null |
app/domain/company/models.py
|
JBizarri/fast-api-crud
|
3eb0391c1a1f2e054092de717b73898c7efed5cb
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import TYPE_CHECKING, List
from sqlalchemy import Column, String
from sqlalchemy.orm import relationship
from ...database import BaseModel
if TYPE_CHECKING:
from ..user.models import User
class Company(BaseModel):
name: str = Column(String)
users: List[User] = relationship(
"User", back_populates="company", cascade="all, delete"
)
| 20.55
| 63
| 0.737226
| 165
| 0.40146
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.068127
|
3d66a81186dceebace0295ecba9cdcb9533d8966
| 1,913
|
py
|
Python
|
tools/captcha_image_downloader.py
|
metormaon/signum-py
|
7c6eaf11025f77c4cfbe6fb9aa77b5dadb485d8c
|
[
"MIT"
] | null | null | null |
tools/captcha_image_downloader.py
|
metormaon/signum-py
|
7c6eaf11025f77c4cfbe6fb9aa77b5dadb485d8c
|
[
"MIT"
] | 1
|
2020-08-01T23:28:38.000Z
|
2020-08-01T23:28:38.000Z
|
tools/captcha_image_downloader.py
|
metormaon/signum-py
|
7c6eaf11025f77c4cfbe6fb9aa77b5dadb485d8c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from os import path
from google_images_download import google_images_download
for keyword in [
"dog", "cat", "bird", "elephant", "fork", "knife", "spoon", "carrot", "orange", "turnip", "tomato", "potato",
"water", "hair", "table", "chair", "house", "factory", "microwave", "cigarette", "ashtray", "brush", "battery",
"comb", "box", "book", "bag", "calendar", "computer", "lipstick", "pencil", "perfume", "telephone", "television",
"headset", "angry", "apple", "armour", "baby", "bag", "ball", "bank", "basket", "bath", "bear", "bean", "bell",
"blue", "bottle", "bread", "bridge", "bus", "cake", "candle", "car", "card", "cheese", "chicken", "chocolate",
"circle", "clock", "cloud", "coffee", "coat", "coin", "cook", "corn", "cup", "dance", "deer", "desk", "door",
"dress", "duck", "happy", "smile", "yellow", "ear", "earth", "mars", "saturn", "jupiter", "egg", "eight", "one",
"two", "three", "four", "five", "six", "seven", "nine", "ten", "electricity", "piano", "guitar", "flute", "drum",
"exit", "dark", "excited", "surprise", "eye", "nose", "mouth", "leg", "hand", "face", "family", "farm", "fat",
"fear", "finger", "fire", "flag", "flower", "fly", "food", "football", "forest", "fox", "friend", "garden", "game",
"gate"
]:
if not path.exists("../captcha-images/" + keyword):
response = google_images_download.googleimagesdownload()
arguments = {"keywords": keyword,
"limit": 15,
"print_urls": True,
"usage_rights": "labeled-for-reuse",
"output_directory": "../captcha-images",
"safe_search": True,
"format": "jpg",
"size": "medium"
}
paths = response.download(arguments)
print(paths)
else:
print("Skipping " + keyword)
| 51.702703
| 119
| 0.526398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,074
| 0.561422
|
3d6b7691e8c5eed4e135eafd2eed629b0d7310de
| 4,752
|
py
|
Python
|
ott2butKAMA1/jessetkdata/dnafiles/BNB-USDT 2018-02-15 2021-01-01.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 38
|
2021-09-18T15:33:28.000Z
|
2022-02-21T17:29:08.000Z
|
ott2butKAMA1/jessetkdata/dnafiles/BNB-USDT 2018-02-15 2021-01-01.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 4
|
2022-01-02T14:46:12.000Z
|
2022-02-16T18:39:41.000Z
|
ott2butKAMA1/jessetkdata/dnafiles/BNB-USDT 2018-02-15 2021-01-01.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 11
|
2021-10-19T06:21:43.000Z
|
2022-02-21T17:29:10.000Z
|
dnas = [
['jVXfX<', 37, 64, 24.67, 14, 7, -4.53, {'ott_len': 42, 'ott_percent': 508, 'stop_loss': 263, 'risk_reward': 65, 'chop_rsi_len': 31, 'chop_bandwidth': 83}],
['o:JK9p', 50, 62, 32.74, 37, 8, -0.2, {'ott_len': 45, 'ott_percent': 259, 'stop_loss': 201, 'risk_reward': 41, 'chop_rsi_len': 12, 'chop_bandwidth': 274}],
['tGVME/', 35, 74, 20.06, 20, 10, -4.75, {'ott_len': 48, 'ott_percent': 375, 'stop_loss': 254, 'risk_reward': 43, 'chop_rsi_len': 20, 'chop_bandwidth': 36}],
['a<<sMo', 59, 27, 25.74, 33, 6, -1.06, {'ott_len': 36, 'ott_percent': 277, 'stop_loss': 139, 'risk_reward': 76, 'chop_rsi_len': 24, 'chop_bandwidth': 271}],
['`Ol@gL', 29, 65, 9.47, 25, 8, -2.95, {'ott_len': 36, 'ott_percent': 446, 'stop_loss': 351, 'risk_reward': 31, 'chop_rsi_len': 40, 'chop_bandwidth': 142}],
['SWJi?Y', 36, 73, 32.8, 37, 8, -0.92, {'ott_len': 28, 'ott_percent': 516, 'stop_loss': 201, 'risk_reward': 68, 'chop_rsi_len': 16, 'chop_bandwidth': 190}],
['v@WLkU', 46, 47, 45.51, 20, 10, -4.43, {'ott_len': 49, 'ott_percent': 313, 'stop_loss': 258, 'risk_reward': 42, 'chop_rsi_len': 43, 'chop_bandwidth': 175}],
['lR\\iHN', 38, 62, 35.84, 28, 7, -4.01, {'ott_len': 43, 'ott_percent': 472, 'stop_loss': 280, 'risk_reward': 68, 'chop_rsi_len': 21, 'chop_bandwidth': 149}],
['l7\\gc^', 60, 35, 42.7, 25, 8, -1.2, {'ott_len': 43, 'ott_percent': 233, 'stop_loss': 280, 'risk_reward': 66, 'chop_rsi_len': 38, 'chop_bandwidth': 208}],
['wLXY\\1', 36, 71, 20.85, 14, 7, -4.76, {'ott_len': 50, 'ott_percent': 419, 'stop_loss': 263, 'risk_reward': 53, 'chop_rsi_len': 34, 'chop_bandwidth': 43}],
['i7nMgb', 54, 24, 28.38, 0, 4, -2.04, {'ott_len': 41, 'ott_percent': 233, 'stop_loss': 360, 'risk_reward': 43, 'chop_rsi_len': 40, 'chop_bandwidth': 223}],
['F/0eI[', 40, 154, 33.68, 42, 21, 2.91, {'ott_len': 20, 'ott_percent': 162, 'stop_loss': 85, 'risk_reward': 64, 'chop_rsi_len': 22, 'chop_bandwidth': 197}],
['\\ERgMp', 53, 28, 16.3, 33, 6, -2.59, {'ott_len': 33, 'ott_percent': 357, 'stop_loss': 236, 'risk_reward': 66, 'chop_rsi_len': 24, 'chop_bandwidth': 274}],
['_7@QqN', 44, 87, 28.24, 46, 15, 3.21, {'ott_len': 35, 'ott_percent': 233, 'stop_loss': 156, 'risk_reward': 46, 'chop_rsi_len': 46, 'chop_bandwidth': 149}],
['OEJO,F', 41, 105, 33.62, 20, 10, -4.61, {'ott_len': 25, 'ott_percent': 357, 'stop_loss': 201, 'risk_reward': 45, 'chop_rsi_len': 4, 'chop_bandwidth': 120}],
['5swn)a', 30, 86, 13.25, 8, 12, -6.03, {'ott_len': 9, 'ott_percent': 765, 'stop_loss': 400, 'risk_reward': 72, 'chop_rsi_len': 3, 'chop_bandwidth': 219}],
['4juD3[', 36, 95, 32.91, 14, 7, -3.13, {'ott_len': 8, 'ott_percent': 685, 'stop_loss': 391, 'risk_reward': 35, 'chop_rsi_len': 9, 'chop_bandwidth': 197}],
['91u6iJ', 33, 163, 31.1, 25, 27, -3.59, {'ott_len': 12, 'ott_percent': 180, 'stop_loss': 391, 'risk_reward': 22, 'chop_rsi_len': 41, 'chop_bandwidth': 135}],
['c3rg61', 39, 91, 11.05, 27, 11, -1.18, {'ott_len': 38, 'ott_percent': 197, 'stop_loss': 378, 'risk_reward': 66, 'chop_rsi_len': 11, 'chop_bandwidth': 43}],
['\\BAZGb', 40, 71, 22.33, 36, 11, -3.44, {'ott_len': 33, 'ott_percent': 330, 'stop_loss': 161, 'risk_reward': 54, 'chop_rsi_len': 21, 'chop_bandwidth': 223}],
['H<XF,l', 40, 98, 31.16, 16, 12, -5.22, {'ott_len': 21, 'ott_percent': 277, 'stop_loss': 263, 'risk_reward': 37, 'chop_rsi_len': 4, 'chop_bandwidth': 260}],
['5Bl/TL', 32, 153, 26.35, 28, 21, 0.03, {'ott_len': 9, 'ott_percent': 330, 'stop_loss': 351, 'risk_reward': 16, 'chop_rsi_len': 29, 'chop_bandwidth': 142}],
['DFRlX-', 38, 112, 21.16, 27, 11, -1.95, {'ott_len': 18, 'ott_percent': 366, 'stop_loss': 236, 'risk_reward': 70, 'chop_rsi_len': 31, 'chop_bandwidth': 28}],
['1EkquE', 33, 156, 45.58, 27, 18, -1.61, {'ott_len': 7, 'ott_percent': 357, 'stop_loss': 347, 'risk_reward': 75, 'chop_rsi_len': 49, 'chop_bandwidth': 116}],
['D9YmB.', 35, 139, 12.09, 42, 14, -1.17, {'ott_len': 18, 'ott_percent': 251, 'stop_loss': 267, 'risk_reward': 71, 'chop_rsi_len': 18, 'chop_bandwidth': 32}],
['_(KrZG', 40, 145, 18.09, 28, 21, -4.73, {'ott_len': 35, 'ott_percent': 100, 'stop_loss': 205, 'risk_reward': 76, 'chop_rsi_len': 32, 'chop_bandwidth': 124}],
['1CndgF', 34, 156, 49.82, 41, 17, 2.8, {'ott_len': 7, 'ott_percent': 339, 'stop_loss': 360, 'risk_reward': 63, 'chop_rsi_len': 40, 'chop_bandwidth': 120}],
['tutp,b', 50, 40, 52.45, 0, 5, -5.75, {'ott_len': 48, 'ott_percent': 782, 'stop_loss': 387, 'risk_reward': 74, 'chop_rsi_len': 4, 'chop_bandwidth': 223}],
['07t1iJ', 30, 199, 23.05, 26, 30, -1.64, {'ott_len': 6, 'ott_percent': 233, 'stop_loss': 387, 'risk_reward': 18, 'chop_rsi_len': 41, 'chop_bandwidth': 135}],
['75\\adC', 37, 200, 68.9, 21, 32, -4.78, {'ott_len': 10, 'ott_percent': 215, 'stop_loss': 280, 'risk_reward': 61, 'chop_rsi_len': 38, 'chop_bandwidth': 109}],
]
| 144
| 159
| 0.619529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,526
| 0.531566
|
3d6d1e7bb92fb8ada9eb142b244859a83f2f343d
| 2,909
|
py
|
Python
|
modules/winrm/isodate/__init__.py
|
frankyrumple/smc
|
975945ddcff754dd95f2e1a8bd4bf6e43a0f91f6
|
[
"MIT"
] | null | null | null |
modules/winrm/isodate/__init__.py
|
frankyrumple/smc
|
975945ddcff754dd95f2e1a8bd4bf6e43a0f91f6
|
[
"MIT"
] | null | null | null |
modules/winrm/isodate/__init__.py
|
frankyrumple/smc
|
975945ddcff754dd95f2e1a8bd4bf6e43a0f91f6
|
[
"MIT"
] | null | null | null |
##############################################################################
# Copyright 2009, Gerhard Weis
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT
##############################################################################
'''
Import all essential functions and constants to re-export them here for easy
access.
This module contains also various pre-defined ISO 8601 format strings.
'''
from .isodates import parse_date, date_isoformat
from .isotime import parse_time, time_isoformat
from .isodatetime import parse_datetime, datetime_isoformat
from .isoduration import parse_duration, duration_isoformat, Duration
from .isoerror import ISO8601Error
from .isotzinfo import parse_tzinfo, tz_isoformat
from .tzinfo import UTC, FixedOffset, LOCAL
from .duration import Duration
from .isostrf import strftime
from .isostrf import DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE
from .isostrf import DATE_BAS_WEEK, DATE_BAS_WEEK_COMPLETE
from .isostrf import DATE_CENTURY, DATE_EXT_COMPLETE
from .isostrf import DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK
from .isostrf import DATE_EXT_WEEK_COMPLETE, DATE_MONTH, DATE_YEAR
from .isostrf import TIME_BAS_COMPLETE, TIME_BAS_MINUTE
from .isostrf import TIME_EXT_COMPLETE, TIME_EXT_MINUTE
from .isostrf import TIME_HOUR
from .isostrf import TZ_BAS, TZ_EXT, TZ_HOUR
from .isostrf import DT_BAS_COMPLETE, DT_EXT_COMPLETE
from .isostrf import DT_BAS_ORD_COMPLETE, DT_EXT_ORD_COMPLETE
from .isostrf import DT_BAS_WEEK_COMPLETE, DT_EXT_WEEK_COMPLETE
from .isostrf import D_DEFAULT, D_WEEK, D_ALT_EXT, D_ALT_BAS
from .isostrf import D_ALT_BAS_ORD, D_ALT_EXT_ORD
| 51.946429
| 78
| 0.77243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,687
| 0.579924
|
3d6fef82415cc33c1f679313aef262f6b3b670a9
| 17,848
|
py
|
Python
|
sbvat/utils.py
|
thudzj/BVAT
|
2c7073cb7967583035eece7f4819821b313d73e6
|
[
"MIT"
] | 3
|
2019-08-04T03:05:51.000Z
|
2021-04-24T02:35:05.000Z
|
sbvat/utils.py
|
thudzj/BVAT
|
2c7073cb7967583035eece7f4819821b313d73e6
|
[
"MIT"
] | null | null | null |
sbvat/utils.py
|
thudzj/BVAT
|
2c7073cb7967583035eece7f4819821b313d73e6
|
[
"MIT"
] | 1
|
2019-12-29T13:49:22.000Z
|
2019-12-29T13:49:22.000Z
|
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
import tensorflow as tf
import os
import time
import json
from networkx.readwrite import json_graph
from sklearn.metrics import f1_score
import multiprocessing
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def save_sparse_csr(filename,array):
np.savez(filename,data = array.data ,indices=array.indices,
indptr =array.indptr, shape=array.shape )
def load_sparse_csr(filename):
loader = np.load(filename)
return sp.csr_matrix(( loader['data'], loader['indices'], loader['indptr']),
shape = loader['shape'])
def starfind_4o_nbrs(args):
return find_4o_nbrs(*args)
def find_4o_nbrs(adj, li):
nbrs = []
for i in li:
print(i)
tmp = adj[i]
for ii in np.nonzero(adj[i])[1]:
tmp += adj[ii]
for iii in np.nonzero(adj[ii])[1]:
tmp += adj[iii]
tmp += adj[np.nonzero(adj[iii])[1]].sum(0)
nbrs.append(np.nonzero(tmp)[1])
return nbrs
def load_data(dataset_str, is_sparse):
if dataset_str == "ppi":
return load_graphsage_data('data/ppi/ppi', is_sparse)
"""Load data."""
if dataset_str != 'nell':
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
features = preprocess_features(features, is_sparse)
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
support = preprocess_adj(adj)
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
# y_train = np.zeros(labels.shape)
# y_val = np.zeros(labels.shape)
# y_test = np.zeros(labels.shape)
# y_train = labels[train_mask, :]
# y_val[val_mask, :] = labels[val_mask, :]
# y_test[test_mask, :] = labels[test_mask, :]
else:
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/savedData/{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/savedData/{}.test.index".format(dataset_str))
features = allx.tolil()
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = ally
features = preprocess_features(features, is_sparse)
support = preprocess_adj(adj)
idx_test = test_idx_reorder
idx_train = range(len(y))
idx_val = range(len(y), len(y)+969)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
if not os.path.isfile("data/{}.nbrs.npz".format(dataset_str)):
N = adj.shape[0]
pool = multiprocessing.Pool(processes=56)
lis = []
for i in range(32):
li = range(int(N/32)*i, int(N/32)*(i+1))
if i == 31:
li = range(int(N/32)*i, N)
print(li)
lis.append(li)
adjs = [adj] * 32
results = pool.map(starfind_4o_nbrs, zip(adjs, lis))
pool.close()
pool.join()
nbrs = []
for re in results:
nbrs += re
print(len(nbrs))
np.savez("data/{}.nbrs.npz".format(dataset_str), data = nbrs)
else:
loader = np.load("data/{}.nbrs.npz".format(dataset_str))
nbrs = loader['data']
print(adj.shape, len(nbrs))
return nbrs, support, support, features, labels, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features, sparse=True):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
if sparse:
return sparse_to_tuple(features)
else:
return features.toarray()
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders, nbrs):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support']: support})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
r1 = sample_nodes(nbrs)
feed_dict.update({placeholders['adv_mask1']: r1})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
def sample_nodes(nbrs, num=100):
N = len(nbrs)
flag = np.zeros([N])
output = [0] * num
#norm_mtx = np.zeros([N, N])
for i in range(num):
a = np.random.randint(0, N)
while flag[a] == 1:
a = np.random.randint(0, N)
output[i] = a
# for nell to speed up
flag[nbrs[a]] = 1
# tmp = np.zeros([N])
# tmp[nbrs[a]] = 1
#norm_mtx[nbrs[a]] = tmp
# output_ = np.ones([N])
# output_[output] = 0
# output_ = np.nonzero(output_)[0]
return sample_mask(output, N)#, norm_mtx
def kl_divergence_with_logit(q_logit, p_logit, mask=None):
if not mask is None:
q = tf.nn.softmax(q_logit)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
qlogq = tf.reduce_mean(tf.reduce_sum(q * tf.nn.log_softmax(q_logit), 1) * mask)
qlogp = tf.reduce_mean(tf.reduce_sum(q * tf.nn.log_softmax(p_logit), 1) * mask)
return - qlogp
else:
q = tf.nn.softmax(q_logit)
qlogq = tf.reduce_sum(q * tf.nn.log_softmax(q_logit), 1)
qlogp = tf.reduce_sum(q * tf.nn.log_softmax(p_logit), 1)
return tf.reduce_mean( - qlogp)
def entropy_y_x(logit):
p = tf.nn.softmax(logit)
return -tf.reduce_mean(tf.reduce_sum(p * tf.nn.log_softmax(logit), 1))
def get_normalized_vector(d, sparse=False, indices=None, dense_shape=None):
if sparse:
d /= (1e-12 + tf.reduce_max(tf.abs(d)))
d2 = tf.SparseTensor(indices, tf.square(d), dense_shape)
d = tf.SparseTensor(indices, d, dense_shape)
d /= tf.sqrt(1e-6 + tf.sparse_reduce_sum(d2, 1, keep_dims=True))
return d
else:
d /= (1e-12 + tf.reduce_max(tf.abs(d)))
d /= tf.sqrt(1e-6 + tf.reduce_sum(tf.pow(d, 2.0), 1, keepdims=True))
return d
def get_normalized_matrix(d, sparse=False, indices=None, dense_shape=None):
if not sparse:
return tf.nn.l2_normalize(d, [0,1])
else:
return tf.SparseTensor(indices, tf.nn.l2_normalize(d, [0]), dense_shape)
def load_graphsage_data(prefix, is_sparse, normalize=True, max_degree=-1):
version_info = map(int, nx.__version__.split('.'))
major = version_info[0]
minor = version_info[1]
assert (major <= 1) and (minor <= 11), "networkx major version must be <= 1.11 in order to load graphsage data"
# Save normalized version
if max_degree==-1:
npz_file = prefix + '.npz'
else:
npz_file = '{}_deg{}.npz'.format(prefix, max_degree)
if os.path.exists(npz_file):
start_time = time.time()
print('Found preprocessed dataset {}, loading...'.format(npz_file))
data = np.load(npz_file)
num_data = data['num_data']
feats = data['feats']
labels = data['labels']
train_data = data['train_data']
val_data = data['val_data']
test_data = data['test_data']
train_adj = data['train_adj']
full_adj = data['full_adj']
train_adj_nonormed = sp.csr_matrix((data['train_adj_data'], data['train_adj_indices'], data['train_adj_indptr']), shape=data['train_adj_shape'])
print('Finished in {} seconds.'.format(time.time() - start_time))
else:
print('Loading data...')
start_time = time.time()
G_data = json.load(open(prefix + "-G.json"))
G = json_graph.node_link_graph(G_data)
feats = np.load(prefix + "-feats.npy").astype(np.float32)
id_map = json.load(open(prefix + "-id_map.json"))
if id_map.keys()[0].isdigit():
conversion = lambda n: int(n)
else:
conversion = lambda n: n
id_map = {conversion(k):int(v) for k,v in id_map.iteritems()}
walks = []
class_map = json.load(open(prefix + "-class_map.json"))
if isinstance(class_map.values()[0], list):
lab_conversion = lambda n : n
else:
lab_conversion = lambda n : int(n)
class_map = {conversion(k): lab_conversion(v) for k,v in class_map.iteritems()}
## Remove all nodes that do not have val/test annotations
## (necessary because of networkx weirdness with the Reddit data)
broken_count = 0
to_remove = []
for node in G.nodes():
if not id_map.has_key(node):
#if not G.node[node].has_key('val') or not G.node[node].has_key('test'):
to_remove.append(node)
broken_count += 1
for node in to_remove:
G.remove_node(node)
print("Removed {:d} nodes that lacked proper annotations due to networkx versioning issues".format(broken_count))
# Construct adjacency matrix
print("Loaded data ({} seconds).. now preprocessing..".format(time.time()-start_time))
start_time = time.time()
edges = []
for edge in G.edges():
if id_map.has_key(edge[0]) and id_map.has_key(edge[1]):
edges.append((id_map[edge[0]], id_map[edge[1]]))
print('{} edges'.format(len(edges)))
num_data = len(id_map)
if max_degree != -1:
print('Subsampling edges...')
edges = subsample_edges(edges, num_data, max_degree)
val_data = np.array([id_map[n] for n in G.nodes()
if G.node[n]['val']], dtype=np.int32)
test_data = np.array([id_map[n] for n in G.nodes()
if G.node[n]['test']], dtype=np.int32)
is_train = np.ones((num_data), dtype=np.bool)
is_train[val_data] = False
is_train[test_data] = False
train_data = np.array([n for n in range(num_data) if is_train[n]], dtype=np.int32)
val_data = sample_mask(val_data, num_data)
test_data = sample_mask(test_data, num_data)
train_data = sample_mask(train_data, num_data)
train_edges = [(e[0], e[1]) for e in edges if is_train[e[0]] and is_train[e[1]]]
edges = np.array(edges, dtype=np.int32)
train_edges = np.array(train_edges, dtype=np.int32)
# Process labels
if isinstance(class_map.values()[0], list):
num_classes = len(class_map.values()[0])
labels = np.zeros((num_data, num_classes), dtype=np.float32)
for k in class_map.keys():
labels[id_map[k], :] = np.array(class_map[k])
else:
num_classes = len(set(class_map.values()))
labels = np.zeros((num_data, num_classes), dtype=np.float32)
for k in class_map.keys():
labels[id_map[k], class_map[k]] = 1
if normalize:
from sklearn.preprocessing import StandardScaler
train_ids = np.array([id_map[n] for n in G.nodes()
if not G.node[n]['val'] and not G.node[n]['test']])
train_feats = feats[train_ids]
scaler = StandardScaler()
scaler.fit(train_feats)
feats = scaler.transform(feats)
def _normalize_adj(edges):
adj = sp.csr_matrix((np.ones((edges.shape[0]), dtype=np.float32),
(edges[:,0], edges[:,1])), shape=(num_data, num_data))
adj += adj.transpose()
tmp = adj
# rowsum = np.array(adj.sum(1)).flatten()
# d_inv = 1.0 / (rowsum+1e-20)
# d_mat_inv = sp.diags(d_inv, 0)
adj = normalize_adj(adj + sp.eye(adj.shape[0]))#d_mat_inv.dot(adj).tocoo()
coords = np.array((adj.row, adj.col)).astype(np.int32)
return tmp, adj.data, coords
train_adj_nonormed, train_v, train_coords = _normalize_adj(train_edges)
_, full_v, full_coords = _normalize_adj(edges)
def _get_adj(data, coords):
adj = sp.csr_matrix((data, (coords[0,:], coords[1,:])),
shape=(num_data, num_data))
return adj
train_adj = sparse_to_tuple(_get_adj(train_v, train_coords))
full_adj = sparse_to_tuple(_get_adj(full_v, full_coords))
# train_feats = train_adj.dot(feats)
# test_feats = full_adj.dot(feats)
print("Done. {} seconds.".format(time.time()-start_time))
with open(npz_file, 'wb') as fwrite:
np.savez(fwrite, num_data=num_data,
train_adj=train_adj,
train_adj_data=train_adj_nonormed.data, train_adj_indices=train_adj_nonormed.indices, train_adj_indptr=train_adj_nonormed.indptr, train_adj_shape=train_adj_nonormed.shape,
full_adj=full_adj,
feats=feats,
labels=labels,
train_data=train_data, val_data=val_data,
test_data=test_data)
return train_adj_nonormed, train_adj, full_adj, feats, labels, train_data, val_data, test_data
def calc_f1(y_pred, y_true, multitask):
if multitask:
y_pred[y_pred>0] = 1
y_pred[y_pred<=0] = 0
else:
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
return f1_score(y_true, y_pred, average="micro"), \
f1_score(y_true, y_pred, average="macro")
| 38.218415
| 200
| 0.601244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,531
| 0.141809
|
3d71bcc45f53747aca6197878307201d4f4b2564
| 506
|
py
|
Python
|
tags/models.py
|
yuyuyuhaoshi/Blog-BE
|
a485d5159076d619d4fd6019fe9b96ac04020d4d
|
[
"Apache-2.0"
] | null | null | null |
tags/models.py
|
yuyuyuhaoshi/Blog-BE
|
a485d5159076d619d4fd6019fe9b96ac04020d4d
|
[
"Apache-2.0"
] | null | null | null |
tags/models.py
|
yuyuyuhaoshi/Blog-BE
|
a485d5159076d619d4fd6019fe9b96ac04020d4d
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.utils.timezone import now
from django.contrib.auth.models import User
from utils.base_model import SoftDeletionModel
class Tag(SoftDeletionModel):
name = models.CharField('标题名', max_length=100, unique=True, blank=False, null=False)
created_time = models.DateTimeField('创建时间', default=now)
class Meta:
ordering = ['name']
verbose_name = "标签"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
| 26.631579
| 88
| 0.717391
| 362
| 0.69084
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.074427
|
3d71fa9e2abe22d155154c76e5151b1d3926e5d7
| 1,410
|
py
|
Python
|
validate_staging_area.py
|
DataBiosphere/hca-import-validation
|
f57710ec05e3b343bac15cc85d372b4ce2fbe15f
|
[
"Apache-2.0"
] | null | null | null |
validate_staging_area.py
|
DataBiosphere/hca-import-validation
|
f57710ec05e3b343bac15cc85d372b4ce2fbe15f
|
[
"Apache-2.0"
] | 11
|
2021-02-17T21:16:36.000Z
|
2022-01-14T22:49:27.000Z
|
validate_staging_area.py
|
DataBiosphere/hca-import-validation
|
f57710ec05e3b343bac15cc85d372b4ce2fbe15f
|
[
"Apache-2.0"
] | 1
|
2021-06-24T15:10:03.000Z
|
2021-06-24T15:10:03.000Z
|
"""
Runs a pre-check of a staging area to identify issues that might cause the
snapshot or indexing processes to fail.
"""
import argparse
import sys
from hca.staging_area_validator import StagingAreaValidator
def _parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--staging-area', '-s',
required=True,
help='The Google Cloud Storage URL of the staging area. '
'Syntax is gs://<bucket>[/<path>].')
parser.add_argument('--ignore-dangling-inputs', '-I',
action='store_true',
default=False,
help='Ignore errors caused by metadata files not found '
'in the staging area for input-only entities.')
parser.add_argument('--no-json-validation', '-J',
action='store_false',
default=True,
dest='validate_json',
help='Do not validate JSON documents against their schema.')
return parser.parse_args(argv)
if __name__ == '__main__':
args = _parse_args(sys.argv[1:])
adapter = StagingAreaValidator(
staging_area=args.staging_area,
ignore_dangling_inputs=args.ignore_dangling_inputs,
validate_json=args.validate_json
)
sys.exit(adapter.main())
| 38.108108
| 84
| 0.592199
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 486
| 0.344681
|
3d7257323cd6a29d01231ce12bd9760e4b104696
| 6,621
|
py
|
Python
|
spider_service/app/spider/selenium/webdriver.py
|
seniortesting/python-spider
|
0b70817373e2e22267ddf3b80b9b7eb15931e41e
|
[
"MIT"
] | null | null | null |
spider_service/app/spider/selenium/webdriver.py
|
seniortesting/python-spider
|
0b70817373e2e22267ddf3b80b9b7eb15931e41e
|
[
"MIT"
] | null | null | null |
spider_service/app/spider/selenium/webdriver.py
|
seniortesting/python-spider
|
0b70817373e2e22267ddf3b80b9b7eb15931e41e
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import random
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from app.api.util.web_request import WebRequest, USER_AGENT_PC, USER_AGENT_MOBILE
class SpiderWebDriver(object):
def __init__(self, url: str,
userAgent: str = None,referer: str=None, proxy: str = None):
# 进入浏览器设置
chrome_options = Options()
# 配置参数: http://chromedriver.chromium.org/capabilities
# 详细参数: https://peter.sh/experiments/chromium-command-line-switches/
chrome_options.add_argument('lang=zh_CN.UTF-8')
# chrome_options.add_argument('headless')
# chrome_options.add_argument('window-size=1024,768')
chrome_options.add_argument('no-sandbox')
chrome_options.add_argument("disable-gpu")
chrome_options.add_argument("ignore-certificate-errors");
chrome_options.add_argument("disable-popup-blocking");
chrome_options.add_argument("disable-default-apps");
# Chrome is being controlled by automated test software
if userAgent is None:
# 默认safari pc端浏览器
userAgent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2'
chrome_options.add_argument('user-agent="' + userAgent + '"')
chrome_options.add_argument('referer="https://www.google.com/"')
if proxy is not None:
proxy_str = "http://{proxy}".format(proxy=proxy)
chrome_options.add_argument('proxy-server=' + proxy_str)
# http://chromedriver.storage.googleapis.com/index.html
self.driver = webdriver.Chrome(options=chrome_options)
self.driver.maximize_window()
if url:
self.driver.get(url=url)
def close(self):
driver = self.driver
if driver is None:
return
try:
driver.close()
driver.quit()
finally:
self.driver = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
del exc_info
self.close()
def open(self, url):
self.driver.get(url)
def get_cookies(self):
cookies_dict = {}
cookies = self.driver.get_cookies()
for cookie in cookies:
cookies_dict[cookie['name']] = cookie['value']
return cookies_dict
def execute_js(self, js, *args):
return self.driver.execute_script(js, args)
def adsenseClick():
# 获取wordpress的随机文章
url = 'https://pingbook.top/wp-json/wp/v2/posts'
r=WebRequest()
post_list=r.pc().get(url=url).json()
# links=[ item.get('link') for item in post_list]
# print(links)
# post_list =[{'link': 'https://pingbook.top/vue-videojs-m3u8-player-a-html5-video-player/'}]
# 模拟操作打开文章
proxyset = set()
for num in range(10000):
post=random.choice(post_list)
post_url=post.get('link')
print('发送请求的文章地址是: {}'.format(post_url))
agents = USER_AGENT_PC + USER_AGENT_MOBILE
time_count = num + 1
driver = None
try:
content = r.pc().get('https://open.pingbook.top/proxy/get?type=valid').json()
proxy = content.get('data').get('proxy')
print('发送请求的代理是: {}'.format(proxy))
if proxy not in proxyset:
# 时候重复的使用了相同的ip地址
proxyset.add(proxy)
agent = random.choice(agents)
driver = SpiderWebDriver(post_url, agent, proxy)
driver.open(post_url)
print('已经打开博客地址: {}'.format(post_url))
driver.driver.refresh()
submitBtn =driver.driver.find_element_by_id('submit')
if submitBtn:
# 滚动到对应的广告部分
driver.driver.execute_script('arguments[0].scrollIntoView(true);',submitBtn)
submitBtn.click()
time.sleep(3)
# driver.driver.refresh()
# wait = WebDriverWait(driver.driver, 6)
# element = wait.until(expected_conditions.element_to_be_clickable((By.ID, 'ads')))
# driver.close()
print('第{}次轮训成功,代理: {}。。。。'.format(time_count, proxy))
# actionBtn = driver.driver.find_element_by_class_name('copy-btn')
# if actionBtn:
# driver.driver.refresh()
# wait = WebDriverWait(driver.driver, 6)
# element = wait.until(expected_conditions.element_to_be_clickable((By.ID, 'ads')))
# actionBtn.click()
# driver.close()
# print('第{}次轮训成功,代理: {}。。。。'.format(time, proxy))
else:
print('当前代理地址: {}已经存在,不再使用该地址进行测试,代理池大小: {}!'.format(proxy,len(proxyset)))
except Exception as e:
print('第{}次轮训失败,失败信息: {}。。。。'.format(time_count, e))
# raise
finally:
if driver is not None:
driver.close()
def searchGoogle():
keyword= 'nuxt create nuxt app error :pingbook.top'
# 模拟操作打开文章
proxyset = set()
r=WebRequest()
agents=USER_AGENT_PC
for num in range(10000):
driver = None
try:
content = r.pc().get('https://open.pingbook.top/proxy/get?type=valid').json()
proxy = content.get('data').get('proxy')
print('发送请求的代理是: {}'.format(proxy))
if proxy not in proxyset:
# 时候重复的使用了相同的ip地址
proxyset.add(proxy)
agent = random.choice(agents)
spider = SpiderWebDriver(None, agent, proxy)
spider.open('https://google.com')
driver =spider.driver
# 输入关键字
inputbox=driver.find_element_by_name('q')
if inputbox:
inputbox.send_keys(keyword)
inputbox.send_keys(Keys.ENTER)
time.sleep(3)
# 点击第一条记录
first_record=driver.find_element_by_css_selector('#rso > div:nth-child(1) > div > div:nth-child(1) > div > div > div.r > a')
first_record.click()
time.sleep(5)
driver.refresh()
time.sleep(6)
except Exception as e:
print('第{}次轮训失败,失败信息: {}。。。。'.format(num, e))
finally:
if driver is not None:
driver.quit()
if __name__ == '__main__':
adsenseClick()
| 37.196629
| 144
| 0.564718
| 2,312
| 0.328829
| 0
| 0
| 0
| 0
| 0
| 0
| 2,395
| 0.340634
|
3d7270ed2ccd3fdf53730944e85357d2c3e72251
| 2,879
|
py
|
Python
|
Extended Programming Challenges Python/Mnozenie Macierzy/test_main.py
|
szachovy/School-and-Training
|
70f07c0d077da7ba1920d28d881fff7ddcbc37d9
|
[
"MIT"
] | null | null | null |
Extended Programming Challenges Python/Mnozenie Macierzy/test_main.py
|
szachovy/School-and-Training
|
70f07c0d077da7ba1920d28d881fff7ddcbc37d9
|
[
"MIT"
] | null | null | null |
Extended Programming Challenges Python/Mnozenie Macierzy/test_main.py
|
szachovy/School-and-Training
|
70f07c0d077da7ba1920d28d881fff7ddcbc37d9
|
[
"MIT"
] | null | null | null |
import unittest
import main
import re
class MatrixRowsVerification(unittest.TestCase):
def setUp(self):
self.matrix1 = {0: [1, 2, 3], 1: [4, 5, 6]}
self.matrix2 = {0: [1, 2], 1: [3, 4], 2: [5, 6]}
def test_getRowsType(self):
self.assertIsInstance(main.getRows(self.matrix1), int, 'wrong type of returned number of rows')
def test_getRowsNonNegative(self):
self.assertGreaterEqual(main.getRows(self.matrix1), 0, 'rows of matrix cannot be negative number')
def test_getRowsVerification(self):
self.assertEqual(main.getRows(self.matrix1), 2, 'returned number of rows isnt correct')
self.assertEqual(main.getRows(self.matrix2), 3, 'returned number of rows isnt correct')
class MatrixColsVerification(unittest.TestCase):
def setUp(self):
self.matrix1 = {0: [1, 2, 3], 1: [4, 5, 6]}
self.matrix2 = {0: [1, 2], 1: [3, 4], 2: [5, 6]}
def test_getColsType(self):
self.assertIsInstance(main.getCols(self.matrix1), int, 'wrong type of returned number of columns')
def test_getColsNonNegative(self):
self.assertGreaterEqual(main.getCols(self.matrix1), 0, 'rows of matrix cannot be negative number')
def test_getColsVerification(self):
self.assertEqual(main.getCols(self.matrix1), 3, 'returned number of rows isnt correct')
self.assertEqual(main.getCols(self.matrix2), 2, 'returned number of rows isnt correct')
class AutocompleteVerification(unittest.TestCase):
def test_autocomplete(self):
matrix = {0: [1, 2, 3], 1: [4], 2: [5, 6]}
expectedmatrix = {0: [1, 2, 3], 1: [4, 0, 0], 2: [5, 6, 0]}
self.assertEqual(main.autocomplete(matrix), expectedmatrix, 'autocomplete zeros not handled')
class WrongInputException(Exception):
pass
class WriteRowsVerification(unittest.TestCase):
def setUp(self):
self.matrix = main.writerows()
def test_wrong_input(self):
self.assertTrue(re.findall(r"[A-Za-z]*$", str(self.matrix.values())), 'Letters in matrix has been found')
def test_returnsDict(self):
try:
self.assertIsInstance(self.matrix, dict)
except WrongInputException:
self.fail('writing rows doesnt format matrix (dict with rows and cols)')
class VerifyFinalMatrix(unittest.TestCase):
def setUp(self):
self.matrix1 = {0: [1, 2, 3], 1: [4, 5, 6]}
self.matrix2 = {0: [1, 2], 1: [3, 4], 2: [5, 6]}
self.final = {0: [9, 12, 15], 1: [19, 26, 33], 2: [29, 40, 51]}
def test_checkFinal(self):
self.assertEqual(main.Calculate(self.matrix1, self.matrix2).multiply(), self.final, 'Unexpected final matrix '
'after calculations')
def tearDown(self):
self.final.clear()
if __name__ == '__main__':
unittest.main()
| 38.905405
| 118
| 0.632511
| 2,779
| 0.965266
| 0
| 0
| 0
| 0
| 0
| 0
| 513
| 0.178187
|
3d73ea7a25229da399450bef857ee8338b98b235
| 1,210
|
py
|
Python
|
setup.py
|
m45t3r/livedumper
|
f6441283269b4a602cafea3be5cda9446fc64005
|
[
"BSD-2-Clause"
] | 17
|
2015-02-10T12:18:22.000Z
|
2018-03-23T05:28:51.000Z
|
setup.py
|
m45t3r/livedumper
|
f6441283269b4a602cafea3be5cda9446fc64005
|
[
"BSD-2-Clause"
] | 3
|
2015-01-12T17:32:20.000Z
|
2016-12-13T23:55:38.000Z
|
setup.py
|
m45t3r/livedumper
|
f6441283269b4a602cafea3be5cda9446fc64005
|
[
"BSD-2-Clause"
] | 3
|
2015-02-06T09:58:09.000Z
|
2016-01-04T23:46:28.000Z
|
import os
from setuptools import setup
def read(fname):
filename = os.path.join(os.path.dirname(__file__), fname)
return open(filename).read().replace('#', '')
setup(
name="livedumper",
version="0.3.0",
author="Thiago Kenji Okada",
author_email="thiago.mast3r@gmail.com",
description=("Livestreamer stream dumper"),
license="Simplified BSD",
keywords="video streaming downloader dumper",
url='https://github.com/m45t3r/livedumper',
packages=["livedumper"],
package_dir={"": "src"},
scripts=["src/livedumper_cli/livedumper"],
install_requires=("appdirs", "livestreamer", "requests"),
long_description=read("README.rst"),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Environment :: Console",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Multimedia :: Sound/Audio",
"Topic :: Multimedia :: Video",
"Topic :: Utilities",
],
)
| 31.842105
| 61
| 0.613223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 661
| 0.546281
|
3d75f72f3f1eb09ca962b85e8adb34487fcfe9b8
| 2,862
|
py
|
Python
|
scripts/show_yolo.py
|
markpp/object_detectors
|
8a6cac32ec2d8b578c0d301feceef19390343e85
|
[
"MIT"
] | 2
|
2021-03-10T13:13:46.000Z
|
2021-03-11T09:03:33.000Z
|
scripts/show_yolo.py
|
markpp/object_detectors
|
8a6cac32ec2d8b578c0d301feceef19390343e85
|
[
"MIT"
] | null | null | null |
scripts/show_yolo.py
|
markpp/object_detectors
|
8a6cac32ec2d8b578c0d301feceef19390343e85
|
[
"MIT"
] | null | null | null |
import os
import argparse
import numpy as np
import csv
import cv2
img_w = 0
img_h = 0
def relativ2pixel(detection, frameHeight, frameWidth):
center_x, center_y = int(detection[0] * frameWidth), int(detection[1] * frameHeight)
width, height = int(detection[2] * frameWidth), int(detection[3] * frameHeight)
left, top = int(center_x - width / 2), int(center_y - height / 2)
return [left, top, width, height]
def get_bbs_from_file(path):
boxes_file = open(path,"r")
bb_lines = boxes_file.readlines()
bbs = []
for bb_line in bb_lines:
x1, y1, x2, y2 = bb_line.split(' ')
x1, y1, x2, y2 = float(x1), float(y1), float(x2), float(y2)
bbs.append([x1, y1, x2-x1, y2-y1])
return bbs
def map_bbs_to_img(img, bbs):
for bb in bbs:
h_pixels, w_pixels = img.shape[:2]
x1, y1, x2, y2 = int(bb[0]*w_pixels), int(bb[1]*h_pixels), int((bb[0]+bb[2])*w_pixels), int((bb[1]+bb[3])*h_pixels)
img = cv2.rectangle(img,(x1, y1),(x2, y2),(0,255,0),2)
return img
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim), 1/r
if __name__ == "__main__":
"""
Command:
python show_yolo.py -g
"""
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-g", "--gt", type=str,
help="Path to gt bb .txt")
args = vars(ap.parse_args())
img_path = args["gt"].replace("txt", "png")
img = cv2.imread(img_path,-1)
if len(img.shape) < 3:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
# start a new yolo txt file with name of image
boxes = get_bbs_from_file(args["gt"])
img = map_bbs_to_img(img, boxes)
'''
if img.shape[0] > img.shape[1]:
img, _ = ResizeWithAspectRatio(img, height=1400)
else:
img, _ = ResizeWithAspectRatio(img, width=1400)
'''
'''
print(img.shape)
img_h, img_w = img.shape[1], img.shape[0]
boxes = []
lines = []
with open(args["gt"]) as f:
lines = f.read().splitlines()
for line in lines:
cl, c_x, c_y, w, h = line.split(' ')
boxes.append(relativ2pixel([float(c_x), float(c_y), float(w), float(h)], img_w, img_h))
for box in boxes:
print(box)
cv2.rectangle(img, (box[0],box[1]), (box[0]+box[2],box[1]+box[3]), (0,255,0), 1)
'''
cv2.putText(img, os.path.basename(img_path), (10,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
cv2.imshow("output",img[-400:,:])
key = cv2.waitKey()
| 28.336634
| 123
| 0.589099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 874
| 0.305381
|
3d770d3cc83356d38e93ea226253df080988393a
| 8,687
|
py
|
Python
|
xp/build/scripts/gg_post_process_xcode_project.py
|
vladcorneci/golden-gate
|
fab6e11c4df942c6a915328d805d3265f9ccc8e0
|
[
"Apache-2.0"
] | 262
|
2020-05-05T21:25:17.000Z
|
2022-03-22T09:11:15.000Z
|
xp/build/scripts/gg_post_process_xcode_project.py
|
vladcorneci/golden-gate
|
fab6e11c4df942c6a915328d805d3265f9ccc8e0
|
[
"Apache-2.0"
] | 22
|
2020-05-07T21:20:42.000Z
|
2022-02-25T02:44:50.000Z
|
xp/build/scripts/gg_post_process_xcode_project.py
|
vladcorneci/golden-gate
|
fab6e11c4df942c6a915328d805d3265f9ccc8e0
|
[
"Apache-2.0"
] | 18
|
2020-05-06T07:21:43.000Z
|
2022-02-08T09:49:23.000Z
|
#! /urs/bin/env python
# Copyright 2017-2020 Fitbit, Inc
# SPDX-License-Identifier: Apache-2.0
#####################################################################
# This script post-processes the XCode project generated
# by CMake, so that it no longer contains absolute paths.
# It also remaps UUIDs so that they are stable across invocations
# of this script, which allows the generated project to be put under
# source code control.
#####################################################################
#####################################################################
# Imports
#####################################################################
import sys
import re
import os
import shutil
#####################################################################
# Constants
#####################################################################
XCODE_PROJECT_FILE_NAME = "project.pbxproj"
#####################################################################
def print_usage_and_exit():
sys.stderr.write("""\
Usage: gg_post_process_xcode_project.py <project_file_in> <project_file_out> <gg_root> <gg_variant>
Where <project_file_in> is the XCode project generated by CMake,
<project_file_out> is the post-processed XCode project generated by
this script, <gg_root> is the directory where the GG repo is checked
out, and <gg_variant> is 'iOS' or 'macOS'
""")
sys.exit(1)
#####################################################################
def print_error(error):
sys.stderr.write("ERROR: %s\n" % (error))
#####################################################################
def replace_substrings(original, replacements):
cursor = 0
segments = []
for replacement in replacements:
start, end, string = replacement
segments.append(original[cursor:start])
segments.append(string)
cursor = end
segments.append(original[cursor:])
return "".join(segments)
#####################################################################
# Even after making paths relative, we still have some include paths
# path point to CMake-generated directories.
# They have the form: xp/build/cmake/<platform>
# We replace them by an equivalent, pointing to the `generated` subdir
# of xp/build
#####################################################################
def fix_header_search_paths(match):
return match.group(1) + match.group(2).replace('xp/build/cmake', 'xp/build/generated')
#####################################################################
def process_project_file(input_file, output_file, gg_root, uuid_prefix):
# Read the entire project file
project = open(os.path.join(input_file, XCODE_PROJECT_FILE_NAME), "r").read()
# Remove SYMROOT entries, so that we use the default location for XCode
project = re.sub(r'(SYMROOT = )', r'// Removed by GG script \1', project)
# Remove CONFIGURATION_BUILD_DIR entries
project = re.sub(r'(CONFIGURATION_BUILD_DIR = )', r'// Removed by GG script \1', project)
# Replace defaultConfigurationName to Release
project = re.sub(r'(defaultConfigurationName = Debug)', r'defaultConfigurationName = Release', project)
# Compute the relative path from the output project to the GG root
abs_output_dir_path = os.path.abspath(os.path.dirname(output_file))
abs_gg_root_path = os.path.abspath(gg_root)
abs_gg_xp_root_path = os.path.join(abs_gg_root_path, "xp")
gg_xp_root_relpath = os.path.relpath(abs_gg_xp_root_path, abs_output_dir_path)
# Rewrite the `projectDirPath` definition in the project
project_dir_path = "projectDirPath = " + gg_xp_root_relpath + ";"
project = re.sub(r'projectDirPath = \S+;', project_dir_path, project, 1)
# Replace absolute paths with paths relative to `projectDirPath`
project = re.sub(abs_gg_root_path, '..', project)
# Replace references to object files and libraries.
# They have the form: ../xp/<some-path>/<prefix>$(EFFECTIVE_PLATFORM_NAME)/<build-variant>/<object-name>
# We replace them with just the object name, relative to the built products directory.
# NOTE: those entries can end with a quote, or a whitespace
project = re.sub(r'(\.\./xp/\S+\$\(EFFECTIVE_PLATFORM_NAME\)/[^/ ]+/)([^/" ]+[" ])', r'$(BUILT_PRODUCTS_DIR)/\2', project)
# Scan for all entity IDs and store them in a map, associating them with
# a number equal to their order or appearance in the file
# Entity IDs generated by CMake: we're looking for a block of 24 uppercase hex chars
# preceded by whitespace and followed by whitespace or a separator
entity_id_pattern = re.compile(re.compile(r'(\s)([0-9A-F]{24})(\s|[,;])'))
entity_id_map = {}
entity_ids = entity_id_pattern.findall(project)
for (_, entity_id, _) in entity_ids:
if entity_id not in entity_id_map:
entity_id_map[entity_id] = "%s%022X" % (uuid_prefix, len(entity_id_map))
# Replace IDs with their mapped value
project = entity_id_pattern.sub(
lambda match: match.group(1) + entity_id_map[match.group(2)] + match.group(3), project)
# Fix HEADER_SEARCH_PATHS elements
# Look for: HEADER_SEARCH_PATHS = (...)
project = re.sub(r'(HEADER_SEARCH_PATHS\s*=\s*\()([^\(\)]+)', fix_header_search_paths, project)
# Fix Info.plist references
project = re.sub(r'(INFOPLIST_FILE\s*=\s*)"(.*GoldenGateXP\.dir/Info.plist)"',
r'\1"bundle/Info.plist"',
project)
# Replace the shell script generated by CMake for the gg-common target
# For simplicity, we just look for a `shellScript` entry with the term `gg-common` in it
gg_common_shell_script = 'shellScript = "$PROJECT_DIR/build/scripts/gg_process_version_info_header.py \\\"$PROJECT_FILE_PATH/..\\\"";'
gg_common_input_paths = 'inputPaths = ( "$(BUILT_PRODUCTS_DIR)" );'
gg_common_output_paths = 'outputPaths = ();'
project = re.sub(r'shellScript\s*=\s*".*gg-common_preBuildCommands.*";',
gg_common_shell_script + "\n" + gg_common_input_paths + "\n" + gg_common_output_paths,
project)
# Replace the ALL_BUILD shell script so that it doesn't depend on a CMake-generated script
# We use a script file that's just a comment, because we don't need to actually do anything
all_build_shell_script = 'shellScript = "# replaced by gg_post_process_xcode_project.py";'
project = re.sub(r'shellScript\s*=\s*".*ALL_BUILD_cmakeRulesBuildPhase.*";',
all_build_shell_script,
project)
open(os.path.join(output_file, XCODE_PROJECT_FILE_NAME), "w+").write(project)
#####################################################################
def copy_generated_files(gg_root, gg_variant_dir):
for filename in ["config/lwipopts.h"]:
src = os.path.join(gg_root, "xp/build/cmake", gg_variant_dir, filename)
dst = os.path.join(gg_root, "xp/build/generated", gg_variant_dir, filename)
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
shutil.copyfile(src, dst)
#####################################################################
# main
#####################################################################
def main():
if len(sys.argv) != 5:
print_error("ERROR: invalid/missing arguments")
print_usage_and_exit()
# Assign the parameters
input_file = sys.argv[1]
output_file = sys.argv[2]
gg_root = sys.argv[3]
gg_variant = sys.argv[4]
# Check that the input and output project files are XCode projects (XCode Project files are directories that
# contain a project.pbxproj file, and other files). For the output, it is Ok that the project.pbxproj file
# doesn't yet exist, since we will be writing it
if not os.path.isfile(os.path.join(input_file, XCODE_PROJECT_FILE_NAME)):
print_error("ERROR: input file is not a valid XCode project")
return 1
if not os.path.isdir(output_file):
print_error("ERROR: output file is not a valid XCode project")
return 1
if not os.path.isdir(gg_root):
print_error("ERROR: Golden Gate root isn't a directory")
return 1
# Pick a UUID prefix based on the variant, to try and avoid having the same UUID in two
# different project files.
uuid_prefix_map = {
'iOS': '01',
'macOS': '02'
}
uuid_prefix = uuid_prefix_map.get(gg_variant, '00')
process_project_file(input_file, output_file, gg_root, uuid_prefix)
gg_variant_dir = 'xcode-' + gg_variant
copy_generated_files(gg_root, gg_variant_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
| 44.778351
| 138
| 0.610568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,041
| 0.580292
|
3d780dd389a1180a4ebe2e338ba4584066d6c9fa
| 3,091
|
py
|
Python
|
scripts/US-visa-early-appointment.py
|
atb00ker/scripts-lab
|
71a5cc9c7f301c274798686db4a227e84b65926a
|
[
"MIT"
] | 2
|
2020-03-16T17:18:20.000Z
|
2020-10-19T05:11:19.000Z
|
scripts/US-visa-early-appointment.py
|
atb00ker/scripts-lab
|
71a5cc9c7f301c274798686db4a227e84b65926a
|
[
"MIT"
] | null | null | null |
scripts/US-visa-early-appointment.py
|
atb00ker/scripts-lab
|
71a5cc9c7f301c274798686db4a227e84b65926a
|
[
"MIT"
] | null | null | null |
#!/bin/python3
# Application for getting early US visa interview:
# The tool will Scrape the CGI website and check
# available date before the current appointment date,
# if a date is available, the program will beep.
# NOTE: SET THESE GLOBAL VARIABLES BEFORE USE
# COOKIE: After you login, there is a `cookie`
# header send in your request, paste
# the value of that variable here.
# CURRENT_APPOINTMENT_DATE: Date you've currently have for embassy.
# CURRENT_VAC_DATE: Date you current have for VAC appointment.
import subprocess
import time
import os
# For users to change
CURRENT_APPOINTMENT_DATE = "March 22, 2019"
CURRENT_VAC_DATE = "March 11, 2019"
COOKIE = ""
# For developer usage only
AGENT = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.96 Safari/537.36"
SED_COMMAND = "'s/First Available Appointment Is \w* //p'"
def reqModprobe():
reqCmd = "sudo modprobe pcspkr;"
subprocess.Popen(reqCmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).stdout.read()
time.sleep(15)
def awesomeBeep():
while True:
beepCmd = "beep -f 659 -l 460 -n -f 784 -l 340 -n -f 659 -l 230 -n -f 659 -l 110 -n -f 880 -l 230 -n -f 659 -l 230 -n -f 587 -l 230 -n -f 659 -l 460 -n -f 988 -l 340 -n -f 659 -l 230 -n -f 659 -l 110 -n -f 1047-l 230 -n -f 988 -l 230 -n -f 784 -l 230 -n -f 659 -l 230 -n -f 988 -l 230 -n -f 1318 -l 230 -n -f 659 -l 110 -n -f 587 -l 230 -n -f 587 -l 110 -n -f 494 -l 230 -n -f 740 -l 230 -n -f 659 -l 460"
subprocess.Popen(beepCmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).stdout.read()
time.sleep(15)
def checkAppointmentTime():
print("Started...")
while True:
time.sleep(60)
cmd = "curl -X GET -H 'Cookie: " + COOKIE + \
"' -H 'Host: cgifederal.secure.force.com' -H 'Referer: https://cgifederal.secure.force.com/apex/LoginLandingPage' -H 'User-Agent: " + \
AGENT + "' -s -i 'https://cgifederal.secure.force.com/applicanthome' | sed -n -e" + SED_COMMAND
date = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).stdout.read()
try:
strDate = date.strip().decode("utf-8")
availableDate = time.strptime(strDate, "%B %d, %Y.")
# print(availableDate)
except:
print(
"Getting incorrect date format: %s, please check the cookie variable" % date)
awesomeBeep()
currentDate = time.strptime(
CURRENT_APPOINTMENT_DATE, "%B %d, %Y")
vacDate = time.strptime(
CURRENT_VAC_DATE, "%B %d, %Y")
if currentDate > availableDate and vacDate < availableDate:
print(date.strip())
awesomeBeep()
if __name__ == "__main__":
reqModprobe()
checkAppointmentTime()
| 38.160494
| 413
| 0.591071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,552
| 0.502103
|
3d781809d6d69006559cbf3b7edc3aab98c386ee
| 815
|
py
|
Python
|
LeetCode/lc960.py
|
SryImNoob/ProblemSet-1
|
70a4bf1519110ce4918b76b6b456520f713fa446
|
[
"MIT"
] | null | null | null |
LeetCode/lc960.py
|
SryImNoob/ProblemSet-1
|
70a4bf1519110ce4918b76b6b456520f713fa446
|
[
"MIT"
] | null | null | null |
LeetCode/lc960.py
|
SryImNoob/ProblemSet-1
|
70a4bf1519110ce4918b76b6b456520f713fa446
|
[
"MIT"
] | 2
|
2019-06-05T03:42:26.000Z
|
2020-10-14T05:57:37.000Z
|
def createArray(dims) :
if len(dims) == 1:
return [0 for _ in range(dims[0])]
return [createArray(dims[1:]) for _ in range(dims[0])]
def f(A, x, y):
m = len(A)
for i in range(m):
if A[i][x] > A[i][y]:
return 0
return 1
class Solution(object):
def minDeletionSize(self, A):
"""
:type A: List[str]
:rtype: int
"""
n = len(A[0])
g = createArray([n, n])
for i in range(n):
for j in range(i+1, n):
g[i][j] = f(A, i, j)
dp = createArray([n])
for i in range(0, n):
dp[i] = 1
for j in range(0, i):
if g[j][i] == 1:
if dp[i] < dp[j] + 1:
dp[i] = dp[j] + 1
return n - max(dp)
| 23.970588
| 55
| 0.402454
| 542
| 0.665031
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.076074
|
3d7952d5919e3aadff896edcbf8705b6c7253f29
| 3,883
|
py
|
Python
|
src/misc_utils.py
|
wr339988/TencentAlgo19
|
6506bc47dbc301018064e96cd1e7528609b5cb6c
|
[
"Apache-2.0"
] | null | null | null |
src/misc_utils.py
|
wr339988/TencentAlgo19
|
6506bc47dbc301018064e96cd1e7528609b5cb6c
|
[
"Apache-2.0"
] | 4
|
2021-04-08T16:38:32.000Z
|
2021-04-12T08:36:59.000Z
|
src/misc_utils.py
|
wr339988/TencentAlgo19
|
6506bc47dbc301018064e96cd1e7528609b5cb6c
|
[
"Apache-2.0"
] | 1
|
2021-04-02T11:09:05.000Z
|
2021-04-02T11:09:05.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generally useful utility functions."""
from __future__ import print_function
import codecs
import collections
import json
import math
import os
import sys
import time
import numpy as np
import tensorflow as tf
import pandas as pd
def hash_single_batch(batch,hparams):
for b in batch:
for i in range(len(b)):
b[i]=abs(hash('key_'+str(i)+' value_'+str(b[i]))) % hparams.single_hash_num
return batch
def hash_multi_batch(batch,hparams):
lengths=0
for b in batch:
for i in range(len(b)):
b[i]=[abs(hash('key_'+str(i)+' value_'+str(x)))% hparams.multi_hash_num for x in str(b[i]).split()]
lengths=max(lengths,len(b[i]))
if len(b[i])==0:
b[i]=[abs(hash('key_'+str(i)+' value_'+str('<pad>')))% hparams.multi_hash_num]
batch_t=np.zeros((len(batch),len(hparams.multi_features),min(hparams.max_length,lengths)))
weights_t=np.zeros((len(batch),len(hparams.multi_features),min(hparams.max_length,lengths)))
for i in range(len(batch)):
for j in range(len(batch[i])):
for k in range(min(hparams.max_length,len(batch[i][j]))):
batch_t[i,j,k]=batch[i][j][k]
weights_t[i,j,k]=1
return batch_t,weights_t
def print_time(s, start_time):
"""Take a start time, print elapsed duration, and return a new time."""
print("%s, time %ds, %s." % (s, (time.time() - start_time), time.ctime()))
sys.stdout.flush()
return time.time()
def print_out(s, f=None, new_line=True):
"""Similar to print but with support to flush and output to a file."""
if isinstance(s, bytes):
s = s.decode("utf-8")
if f:
f.write(s.encode("utf-8"))
if new_line:
f.write(b"\n")
# stdout
out_s = s.encode("utf-8")
if not isinstance(out_s, str):
out_s = out_s.decode("utf-8")
print(out_s, end="", file=sys.stdout)
if new_line:
sys.stdout.write("\n")
sys.stdout.flush()
def print_step_info(prefix,epoch, global_step, info):
print_out("%sepoch %d step %d lr %g loss %.6f gN %.2f, %s" %
(prefix, epoch,global_step, info["learning_rate"],
info["train_ppl"], info["avg_grad_norm"], time.ctime()))
def print_hparams(hparams, skip_patterns=None, header=None):
"""Print hparams, can skip keys based on pattern."""
if header: print_out("%s" % header)
values = hparams.values()
for key in sorted(values.keys()):
if not skip_patterns or all(
[skip_pattern not in key for skip_pattern in skip_patterns]):
print_out(" %s=%s" % (key, str(values[key])))
def normalize(inputs, epsilon=1e-8):
'''
Applies layer normalization
Args:
inputs: A tensor with 2 or more dimensions
epsilon: A floating number to prevent Zero Division
Returns:
A tensor with the same shape and data dtype
'''
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon) ** (.5))
outputs = gamma * normalized + beta
return outputs
| 33.188034
| 112
| 0.64409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,346
| 0.346639
|
3d7a603d1af477e68cfea29362bbe8cb1160699c
| 10,713
|
py
|
Python
|
custom/icds_reports/ucr/tests/test_infra_form_ucr.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
custom/icds_reports/ucr/tests/test_infra_form_ucr.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | 1
|
2021-06-02T04:45:16.000Z
|
2021-06-02T04:45:16.000Z
|
custom/icds_reports/ucr/tests/test_infra_form_ucr.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
from mock import patch
from custom.icds_reports.ucr.tests.test_base_form_ucr import BaseFormsTest
@patch('custom.icds_reports.ucr.expressions._get_user_location_id',
lambda user_id: 'qwe56poiuytr4xcvbnmkjfghwerffdaa')
@patch('corehq.apps.locations.ucr_expressions._get_location_type_name',
lambda loc_id, context: 'awc')
class TestInfraForms(BaseFormsTest):
ucr_name = "static-icds-cas-static-infrastructure_form"
def test_infra_form_v10326(self):
self._test_data_source_results(
'infrastructure_details_v10326',
[{
"doc_id": None,
"submitted_on": None,
"month": None,
"where_housed": None,
"provided_building": None,
"other_building": None,
"awc_building": None,
"access_physically_challenged": '',
"toilet_facility": "2",
"type_toilet": None,
"source_drinking_water": "2",
"kitchen": '',
"space_storing_supplies": '',
"adequate_space_pse": '',
"space_pse": None,
"medicine_kits_available": 0,
"preschool_kit_available": None,
"baby_scale_available": 0,
"flat_scale_available": 0,
"adult_scale_available": 1,
"cooking_utensils_available": 0,
"iec_bcc_available": 0,
"nhed_kit_available": 0,
"referral_slip_available": 0,
"plates_available": 0,
"tumblers_available": 0,
"measure_cups_available": 0,
"food_storage_available": 0,
"water_storage_available": 0,
"chair_available": 0,
"table_available": 0,
"mats_available": 0,
"medicine_kits_usable": 0,
"preschool_kit_usable": None,
"baby_scale_usable": 0,
"flat_scale_usable": 0,
"adult_scale_usable": 0,
"cooking_utensils_usable": 0,
"iec_bcc_usable": 0,
"nhed_kit_usable": 0,
"referral_slip_usable": 0,
"plates_usable": 0,
"tumblers_usable": 0,
"measure_cups_usable": 0,
"food_storage_usable": 0,
"water_storage_usable": 0,
"chair_usable": 0,
"table_usable": 0,
"mats_usable": 0,
"use_salt": 0,
"type_of_building": None,
"type_of_building_pucca": 0,
"type_of_building_semi_pucca": 0,
"type_of_building_kuccha": 0,
"type_of_building_partial_covered_space": 0,
"clean_water": 1,
"functional_toilet": 0,
"has_adequate_space_pse": 0,
"electricity_awc": None,
"infantometer": None,
"stadiometer": None,
}])
def test_infra_form_v10475(self):
self._test_data_source_results(
'infrastructure_details_v10475',
[{
"doc_id": None,
"submitted_on": None,
"month": None,
"where_housed": None,
"provided_building": None,
"other_building": None,
"awc_building": None,
"access_physically_challenged": '1',
"toilet_facility": '1',
"type_toilet": '1',
"source_drinking_water": '2',
"kitchen": '1',
"space_storing_supplies": '1',
"adequate_space_pse": '1',
"space_pse": '1',
"medicine_kits_available": 1,
"preschool_kit_available": 1,
"baby_scale_available": 0,
"flat_scale_available": 1,
"adult_scale_available": 1,
"cooking_utensils_available": 1,
"iec_bcc_available": 0,
"nhed_kit_available": 0,
"referral_slip_available": 1,
"plates_available": 1,
"tumblers_available": 1,
"measure_cups_available": 0,
"food_storage_available": 1,
"water_storage_available": 1,
"chair_available": 1,
"table_available": 1,
"mats_available": 1,
"medicine_kits_usable": 1,
"preschool_kit_usable": 1,
"baby_scale_usable": 0,
"flat_scale_usable": 0,
"adult_scale_usable": 1,
"cooking_utensils_usable": 1,
"iec_bcc_usable": 0,
"nhed_kit_usable": 0,
"referral_slip_usable": 1,
"plates_usable": 1,
"tumblers_usable": 1,
"measure_cups_usable": 0,
"food_storage_usable": 1,
"water_storage_usable": 1,
"chair_usable": 1,
"table_usable": 1,
"mats_usable": 1,
"use_salt": 1,
"type_of_building": None,
"type_of_building_pucca": 0,
"type_of_building_semi_pucca": 0,
"type_of_building_kuccha": 0,
"type_of_building_partial_covered_space": 0,
"clean_water": 1,
"functional_toilet": 1,
"has_adequate_space_pse": 1,
"electricity_awc": 1,
"infantometer": 1,
"stadiometer": 1,
}])
@patch('custom.icds_reports.ucr.expressions._get_user_location_id',
lambda user_id: 'qwe56poiuytr4xcvbnmkjfghwerffdaa')
@patch('corehq.apps.locations.ucr_expressions._get_location_type_name',
lambda loc_id, context: 'awc')
class TestInfraFormsV2(BaseFormsTest):
ucr_name = "static-icds-cas-static-infrastructure_form_v2"
def test_infra_form_v10326(self):
self._test_data_source_results(
'infrastructure_details_v10326',
[{
"doc_id": None,
"timeend": None,
"where_housed": None,
"provided_building": None,
"other_building": None,
"awc_building": None,
"access_physically_challenged": None,
"toilet_facility": 2,
"type_toilet": None,
"source_drinking_water": 2,
"kitchen": None,
"space_storing_supplies": None,
"adequate_space_pse": None,
"space_pse": None,
"medicine_kits_available": None,
"preschool_kit_available": None,
"baby_scale_available": 0,
"flat_scale_available": None,
"adult_scale_available": 1,
"cooking_utensils_available": None,
"iec_bcc_available": None,
"nhed_kit_available": None,
"referral_slip_available": None,
"plates_available": None,
"tumblers_available": None,
"measure_cups_available": None,
"food_storage_available": None,
"water_storage_available": None,
"chair_available": None,
"table_available": None,
"mats_available": None,
"medicine_kits_usable": None,
"preschool_kit_usable": None,
"baby_scale_usable": None,
"adult_scale_usable": None,
"cooking_utensils_usable": None,
"iec_bcc_usable": None,
"nhed_kit_usable": None,
"referral_slip_usable": None,
"plates_usable": None,
"tumblers_usable": None,
"measure_cups_usable": None,
"food_storage_usable": None,
"water_storage_usable": None,
"chair_usable": None,
"table_usable": None,
"mats_usable": None,
"use_salt": None,
"toilet_functional": None,
"electricity_awc": None,
"infantometer_usable": None,
"stadiometer_usable": None,
}])
def test_infra_form_v10475(self):
self._test_data_source_results(
'infrastructure_details_v10475',
[{
"doc_id": None,
"timeend": None,
"where_housed": None,
"provided_building": None,
"other_building": None,
"awc_building": None,
"access_physically_challenged": 1,
"toilet_facility": 1,
"type_toilet": 1,
"source_drinking_water": 2,
"kitchen": 1,
"space_storing_supplies": 1,
"adequate_space_pse": 1,
"space_pse": 1,
"medicine_kits_available": 1,
"preschool_kit_available": 1,
"baby_scale_available": 0,
"flat_scale_available": 1,
"adult_scale_available": 1,
"cooking_utensils_available": 1,
"iec_bcc_available": 0,
"nhed_kit_available": 0,
"referral_slip_available": 1,
"plates_available": 1,
"tumblers_available": 1,
"measure_cups_available": 0,
"food_storage_available": 1,
"water_storage_available": 1,
"chair_available": 1,
"table_available": 1,
"mats_available": 1,
"medicine_kits_usable": 1,
"preschool_kit_usable": 1,
"baby_scale_usable": 0,
"adult_scale_usable": 1,
"cooking_utensils_usable": 1,
"iec_bcc_usable": 0,
"nhed_kit_usable": 0,
"referral_slip_usable": 1,
"plates_usable": 1,
"tumblers_usable": 1,
"measure_cups_usable": 0,
"food_storage_usable": 1,
"water_storage_usable": 1,
"chair_usable": 1,
"table_usable": 1,
"mats_usable": 1,
"use_salt": 1,
"toilet_functional": 1,
"electricity_awc": 1,
"infantometer_usable": 1,
"stadiometer_usable": 1,
}])
| 39.677778
| 74
| 0.493419
| 10,134
| 0.945954
| 0
| 0
| 10,608
| 0.990199
| 0
| 0
| 4,891
| 0.456548
|
3d7ab6cf1374f5cd2e87a03c6e24173bb82d35b7
| 2,898
|
py
|
Python
|
uq_benchmark_2019/imagenet/end_to_end_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
uq_benchmark_2019/imagenet/end_to_end_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
uq_benchmark_2019/imagenet/end_to_end_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""End-to-end test for ImageNet.
Tests for imagenet.resnet50_train, run_predict, run_temp_scaling, and
run_metrics. Real data doesn't work under blaze, so execute the test binary
directly.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
from uq_benchmark_2019.imagenet import resnet50_train # pylint: disable=line-too-long
from uq_benchmark_2019.imagenet import run_metrics
from uq_benchmark_2019.imagenet import run_predict
from uq_benchmark_2019.imagenet import run_temp_scaling
gfile = tf.io.gfile
flags.DEFINE_bool('fake_data', True, 'Use dummy random data.')
flags.DEFINE_bool('fake_training', True, 'Train with trivial number of steps.')
DATA_NAMES = ['train', 'test', 'corrupt-static-gaussian_noise-2', 'celeb_a']
METHODS = ['vanilla', 'll_dropout', 'll_svi', 'dropout']
class EndToEndTest(parameterized.TestCase):
@parameterized.parameters(*[(d, m) for d in DATA_NAMES for m in METHODS]) # pylint: disable=g-complex-comprehension
def test_end_to_end_train(self, data_name, method):
with tempfile.TemporaryDirectory() as model_dir:
metrics = ['sparse_categorical_crossentropy']
if flags.FLAGS.fake_data and (data_name != 'test'):
pass
else:
temp_model_dir = os.path.join(model_dir, data_name, method)
resnet50_train.run(
method, temp_model_dir, task_number=0, use_tpu=False, tpu=None,
metrics=metrics, fake_data=flags.FLAGS.fake_data,
fake_training=flags.FLAGS.fake_training)
run_predict.run(
data_name, temp_model_dir, batch_size=8, predictions_per_example=4,
max_examples=44, output_dir=temp_model_dir,
fake_data=flags.FLAGS.fake_data)
tmpl = os.path.join(temp_model_dir, '*_small_*')
glob_results = gfile.glob(tmpl)
path = glob_results[0]
if data_name == 'valid':
run_temp_scaling(path)
run_metrics.run(path, path, model_dir_ensemble=None,
use_temp_scaling=False)
if __name__ == '__main__':
absltest.main()
| 35.341463
| 118
| 0.733954
| 1,203
| 0.415114
| 0
| 0
| 1,156
| 0.398896
| 0
| 0
| 1,133
| 0.390959
|
3d7b2d7375396a8c241a8c99281ec5431deb5055
| 1,257
|
py
|
Python
|
tests/windows/get_physicaldisk/test_getting_unique_ids_from_output.py
|
Abd-Elrazek/InQRy
|
ab9d19a737a41673e8dcc419d49ca0e96476d560
|
[
"MIT"
] | 37
|
2017-05-12T02:32:26.000Z
|
2019-05-03T14:43:08.000Z
|
tests/windows/get_physicaldisk/test_getting_unique_ids_from_output.py
|
Abd-Elrazek/InQRy
|
ab9d19a737a41673e8dcc419d49ca0e96476d560
|
[
"MIT"
] | 11
|
2017-08-27T03:36:18.000Z
|
2018-10-28T01:31:12.000Z
|
tests/windows/get_physicaldisk/test_getting_unique_ids_from_output.py
|
Abd-Elrazek/InQRy
|
ab9d19a737a41673e8dcc419d49ca0e96476d560
|
[
"MIT"
] | 15
|
2019-06-13T11:29:12.000Z
|
2022-02-28T06:40:14.000Z
|
from inqry.system_specs import win_physical_disk
UNIQUE_ID_OUTPUT = """
UniqueId
--------
{256a2559-ce63-5434-1bee-3ff629daa3a7}
{4069d186-f178-856e-cff3-ba250c28446d}
{4da19f06-2e28-2722-a0fb-33c02696abcd}
50014EE20D887D66
eui.0025384161B6798A
5000C5007A75E216
500A07510F1A545C
ATA LITEONIT LMT-256M6M mSATA 256GB TW0XXM305508532M0705
IDE\Diskpacker-virtualbox-iso-1421140659-disk1__F.R7BNPC\5&1944dbef&0&0.0.0:vagrant-2012-r2
"""
def test_creating_list_of_unique_disk_ids():
expected_physical_disks = {'{256a2559-ce63-5434-1bee-3ff629daa3a7}',
'{4069d186-f178-856e-cff3-ba250c28446d}',
'{4da19f06-2e28-2722-a0fb-33c02696abcd}',
'50014EE20D887D66',
'eui.0025384161B6798A',
'5000C5007A75E216',
'500A07510F1A545C',
'ATA LITEONIT LMT-256M6M mSATA 256GB TW0XXM305508532M0705',
"IDE\Diskpacker-virtualbox-iso-1421140659-disk1__F.R7BNPC\5&1944dbef&0&0.0.0:vagrant-2012-r2"}
assert expected_physical_disks == set(win_physical_disk.get_physical_disk_identifiers(UNIQUE_ID_OUTPUT))
| 43.344828
| 125
| 0.638823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 734
| 0.58393
|
3d7ca16d1d0cb0fd5ce512de12142e0f598017a2
| 572
|
py
|
Python
|
app/models/link.py
|
aries-zhang/flask-template
|
369d77f2910f653f46668dd9bda735954b6c145e
|
[
"MIT"
] | null | null | null |
app/models/link.py
|
aries-zhang/flask-template
|
369d77f2910f653f46668dd9bda735954b6c145e
|
[
"MIT"
] | null | null | null |
app/models/link.py
|
aries-zhang/flask-template
|
369d77f2910f653f46668dd9bda735954b6c145e
|
[
"MIT"
] | null | null | null |
import time # NOQA
from app import db
class Link(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String)
url = db.Column(db.String)
description = db.Column(db.String)
type = db.Column(db.Integer)
enabled = db.Column(db.Boolean)
createtime = db.Column(db.DateTime)
def __init__(self, title, url, description, type, enabled):
self.title = title
self.url = url
self.description = description
self.type = type
self.enabled = enabled
self.createtime = time.time()
| 27.238095
| 63
| 0.63986
| 530
| 0.926573
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.01049
|
3d7e43dc6fabcfe8138a99da18574265d9a525c8
| 1,786
|
py
|
Python
|
pyopenproject/business/services/command/priority/find_all.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 5
|
2021-02-25T15:54:28.000Z
|
2021-04-22T15:43:36.000Z
|
pyopenproject/business/services/command/priority/find_all.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 7
|
2021-03-15T16:26:23.000Z
|
2022-03-16T13:45:18.000Z
|
pyopenproject/business/services/command/priority/find_all.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 6
|
2021-06-18T18:59:11.000Z
|
2022-03-27T04:58:52.000Z
|
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.get_request import GetRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.find_list_command import FindListCommand
from pyopenproject.business.services.command.priority.priority_command import PriorityCommand
from pyopenproject.business.util.filters import Filters
from pyopenproject.business.util.url import URL
from pyopenproject.business.util.url_parameter import URLParameter
from pyopenproject.model.priority import Priority
class FindAll(PriorityCommand):
def __init__(self, connection, offset, page_size, filters, sort_by):
super().__init__(connection)
self.offset = offset
self.page_size = page_size
self.filters = filters
self.sort_by = sort_by
self.filters = filters
def execute(self):
try:
request = GetRequest(self.connection, str(URL(f"{self.CONTEXT}",
[
Filters(
self.filters),
URLParameter
("sortBy", self.sort_by)
])))
return FindListCommand(self.connection, request, Priority).execute()
# for priority in json_obj["_embedded"]["elements"]:
# yield Priority(priority)
except RequestError as re:
raise BusinessError("Error finding all priorities") from re
| 49.611111
| 93
| 0.594625
| 1,152
| 0.645017
| 0
| 0
| 0
| 0
| 0
| 0
| 137
| 0.076708
|
3d7f09d4c114419bab9ec9c8e10674cc7fff831b
| 1,745
|
py
|
Python
|
photos/tests/test_views.py
|
AndreasMilants/django-photos
|
721c2515879a424333859ac48f65d6382b7a48d4
|
[
"BSD-3-Clause"
] | null | null | null |
photos/tests/test_views.py
|
AndreasMilants/django-photos
|
721c2515879a424333859ac48f65d6382b7a48d4
|
[
"BSD-3-Clause"
] | null | null | null |
photos/tests/test_views.py
|
AndreasMilants/django-photos
|
721c2515879a424333859ac48f65d6382b7a48d4
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse_lazy
from ..models import PHOTO_MODEL, UploadedPhotoModel, IMAGE_SIZES
from .model_factories import get_image_file, get_zip_file
import time
from uuid import uuid4
class UploadPhotoApiViewTest(TestCase):
def check_photo_ok_and_delete(self, photo):
self.assertTrue(photo.image.storage.exists(photo.image.name))
for size in IMAGE_SIZES.values():
self.assertTrue(photo.image.storage.exists(photo.get_filepath_for_size(size)))
photo.delete()
def test_upload_photo(self):
self.client.post(reverse_lazy('image_upload'), {'file': get_image_file(), 'upload_id': str(uuid4())})
time.sleep(1) # Different process implementations might need a little bit longer
self.assertEqual(1, PHOTO_MODEL.objects.count())
self.assertEqual(1, UploadedPhotoModel.objects.count())
self.assertEqual(PHOTO_MODEL.objects.first(), UploadedPhotoModel.objects.first().photo)
photo = PHOTO_MODEL.objects.first()
self.check_photo_ok_and_delete(photo)
UploadedPhotoModel.objects.all().delete()
def test_upload_zip(self):
zip_file = get_zip_file(images=[get_image_file(name='img1.png'), get_image_file(name='img2.png')])
self.client.post(reverse_lazy('image_upload'), {'file': zip_file, 'upload_id': str(uuid4())})
time.sleep(1) # Different process implementations might need a little bit longer
self.assertEqual(2, PHOTO_MODEL.objects.count())
self.assertEqual(2, UploadedPhotoModel.objects.count())
for photo in PHOTO_MODEL.objects.all():
self.check_photo_ok_and_delete(photo)
UploadedPhotoModel.objects.all().delete()
| 39.659091
| 109
| 0.719198
| 1,513
| 0.867049
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.122636
|
3d81056b0a805d88fa50d75883361df24c0f7eae
| 16,756
|
py
|
Python
|
app.py
|
cherishsince/PUBG_USB
|
f9b06d213a0fe294afe4cf2cf6dccce4bb363062
|
[
"MulanPSL-1.0"
] | 46
|
2020-07-04T13:33:40.000Z
|
2022-03-29T13:42:29.000Z
|
app.py
|
kiminh/PUBG_USB
|
f3a1fa1aedce751fc48aeefd60699a1f02a29a70
|
[
"MulanPSL-1.0"
] | 1
|
2020-09-01T01:58:29.000Z
|
2020-09-06T11:45:46.000Z
|
app.py
|
kiminh/PUBG_USB
|
f3a1fa1aedce751fc48aeefd60699a1f02a29a70
|
[
"MulanPSL-1.0"
] | 21
|
2020-07-08T07:53:56.000Z
|
2022-02-02T23:43:56.000Z
|
import os
import time
from PIL import Image
import pyscreenshot as ImageGrab
import resource
from drive import box_drive64
from util import image_util, data_config_parser
from util.data_parser import read_data
from weapon import weapon, page_check, weapon_selection, left_right_correction
import environment
import threadpool
import threading
import pythoncom
import PyHook3
import logging
from util import common
# 启动线程池
_executor = environment.env.executor
# 识别的配件信息
_identifying_parts = []
# init 的参数
_lib, _handle, _init_data, _init_weapon_name_data = -1, -1, [], {}
# 配置文件数据
_config_data = {}
# 当前 - 武器配置数据
_current_config_data = None
_current_parts = None
# 是否开枪
_has_shoot = False
_shoot_task = None
# tab 操作是否打开,用于标记多次 tab 按键处理
_has_tab_open = False
# 切换武器,避免多次按
_has_open_selection = False
# 是否选中武器
_has_selection = False
# 是否已识别 武器配件
_has_identification = False
# 武器选择
_weapon_select = 1
# 射击 count
_shoot_count = 0
# 射击修正
_shoot_correction = 0
# 截屏的图片
_capture_image = None
def onMouseEvent(event):
"""
鼠标事件
:param event:
:return:
"""
global _has_shoot, _executor, _shoot_task, _has_identification
# 鼠标滚轮建 522
# 鼠标左键 按下
if event.Message == 513:
logging.debug("鼠标 513 -> {}".format(event.MessageName))
_has_shoot = True
# 只有已识别,才能进行鼠标操作
# if _has_identification:
# _shoot_task = _executor.submit(handle_shoot_correction)
# _shoot_task = _executor.submit(handle_control_shoot)
# 鼠标左键 弹起
elif event.Message == 514:
logging.debug("鼠标 514 -> {}".format(event.MessageName))
_has_shoot = False
if _shoot_task is not None:
print('取消....')
# 鼠标右键键 按下
elif event.Message == 516:
logging.debug("鼠标右键键 516 -> {}".format(event.MessageName))
# 鼠标右键键 弹起
elif event.Message == 517:
logging.debug("鼠标右键键 517 -> {}".format(event.MessageName))
else:
pass
return True
def onKeyboardEvent(event):
"""
监听键盘事件
:param event:
:return:
"""
global _has_tab_open, _executor
keyid = event.KeyID
# 1 49,2 50,3 51
if keyid == 9:
# tab 按键
logging.debug('tab 按键')
# 创建一个线程执行
if not _has_tab_open:
_has_tab_open = True
_executor.submit(handle_tab)
if keyid == 49 or keyid == 50 or keyid == 51:
print('123')
# 武器选择
# if not _has_open_selection:
# _executor.submit(handle_weapon_select)
else:
pass
return True
def handle_capture_image():
"""
实施截图,用于图片分析 每次截图在
0.03366827964782715
0.03325605392456055
0.03352046012878418
0.033231496810913086
0.033119916915893555
0.034018754959106445
:return:
"""
global _capture_image
while 1:
_capture_image = image_util.capture(None)
time.sleep(0.2)
"""
/////////////
事件控制和切换
/////////////
"""
def handle_tab():
"""
处理 tab 事件
:return:
"""
global _identifying_parts, _lib, _handle, _init_data, _init_weapon_name_data, \
_has_tab_open, _config_data, _has_identification, _executor, _capture_image
# time.sleep(0.5)
# image = image_util.capture()
try:
# 判断是否是背包页面
# image = image_util.capture(None)
package_positions = page_check.package_positions()
package_position_images = page_check.package_positions_images(_capture_image, package_positions)
has_package_page = page_check.has_package_page(package_position_images)
# 绘制线
# image_util.drawing_line(image, package_positions)
# image.show()
# package_position_images[0].show()
# 不是则 return
print('是否背包页面 {}'.format(has_package_page))
if not has_package_page:
return
# 获取配件信息
main_positions = weapon.main_weapon_parts_positions()
main_parts_images = weapon.get_weapon_parts(_capture_image, main_positions)
# 识别配件
now = time.time()
identifying_parts = weapon.identifying_parts(_init_data, _init_weapon_name_data, main_parts_images)
print(identifying_parts)
if len(identifying_parts) <= 0:
print('未获取到武器信息 不更新武器信息!')
return
_identifying_parts = identifying_parts
print("识别耗时 {}".format(time.time() - now))
# 识别成功
_has_identification = True
# 选择武器
# _executor.submit(handle_weapon_select)
except Exception as e:
print(e)
finally:
# 处理完标记
_has_tab_open = False
def capture_selection():
"""
截屏
:return:
"""
# 截屏图片
if environment.is_debug():
# path = resource.resource_path(os.path.join('img', 'screenshot', '20190413085144_2.jpg'))
# image = Image.open(path)
image = ImageGrab.grab()
else:
image = ImageGrab.grab()
return image
def handle_weapon_select():
"""
处理武器选择,关系压枪数据
:return:
"""
global _identifying_parts, _lib, _handle, _init_data, _init_weapon_name_data, \
_has_tab_open, _config_data, _current_config_data, _current_parts, \
_has_open_selection, _has_selection, _capture_image
weapon_positions = weapon_selection.weapon_positions()
while True:
try:
# 获取选择的武器
# image = capture_selection()
weapon_images = weapon_selection.weapon_selection_images(_capture_image, weapon_positions)
weapon_index = weapon_selection.get_selection(weapon_images)
# logging.info('选择武器成功! {}'.format(weapon_index))
if weapon_index is None:
# 0.1 延迟
_has_selection = False
time.sleep(0.6)
logging.debug('为选择武器!')
# print('为选择武器')
continue
logging.info('选择武器成功! {}'.format(weapon_index))
# 通过识别的数据 - 关联压枪数据
index = 0
for parts_info in _identifying_parts:
index = index + 1
if weapon_index != index:
continue
if parts_info['name'] is None:
continue
weapon_config_data = _config_data[parts_info['name']]
if weapon_config_data is None:
logging.info('没有找到压枪数据 {}', parts_info)
# 获取到的数据,和返回数据
_current_parts = parts_info
_current_config_data = weapon_config_data
_has_open_selection = False
_has_selection = True
break
# 0.1 延迟
time.sleep(0.6)
except Exception as e:
print(e)
def handle_shoot_correction():
"""
处理 射击修正
:return:
"""
global _lib, _handle, _init_data, _init_weapon_name_data, _has_identification, \
_has_shoot, _current_config_data, _current_parts, _shoot_count, _shoot_correction
correction_positions = left_right_correction.get_positions()
# 先初始化位0
_shoot_correction = 0
# 首次数据纪律
corr_first_diff = None
while True:
# 如果没有识别
if not _has_identification:
time.sleep(0.1)
continue
# 则 continue,不退出循环,重复创建线程消耗内存
if not _has_shoot:
time.sleep(0.1)
# 先初始化位0
_shoot_correction = 0
# 首次数据纪律
corr_first_diff = None
continue
now1 = time.time()
overtime = None
# 瞄具 信息
has_left_right_correction = _current_config_data.left_right_correction
# 配置超时的时间
speed = _current_config_data.speed
if has_left_right_correction == 1:
overtime = now1 + speed - 0.01
if overtime is None:
logging.debug('error 没有起开数据修正')
now = time.time()
# 左右修正
# image = image_util.capture(None)
image = _capture_image
if corr_first_diff is None:
position_images = left_right_correction.get_position_images(image, correction_positions)
corr_first_1, corr_first_2 = left_right_correction.correction(position_images)
corr_first_diff = corr_first_1 + corr_first_2
else:
# 持续动作获取
position_images = left_right_correction.get_position_images(image, correction_positions)
corr_first_1, corr_first_2 = left_right_correction.correction(position_images)
corr_diff = corr_first_1 + corr_first_2
x_diff = corr_first_diff - corr_diff
# 计算偏移值
if x_diff < 0:
_shoot_correction = abs(x_diff)
elif x_diff > 0:
_shoot_correction = -abs(x_diff)
# 替代 sleep 方式,需要每次压枪时间要保持一致
now2 = time.time()
while True:
time.sleep(0.005)
if overtime <= time.time():
break
logging.info('处理图片 {} {} 修正的数据 {}'.format(now2 - now, time.time() - now, _shoot_correction))
def handle_control_shoot():
"""
控制 射击
:return:
"""
global _lib, _handle, _init_data, _init_weapon_name_data, _has_identification, \
_has_shoot, _current_config_data, _current_parts, _shoot_count, \
_shoot_correction, _has_selection, _capture_image
try:
while True:
# 如果没有识别
if not _has_identification:
time.sleep(0.1)
continue
# 则 continue,不退出循环,重复创建线程消耗内存
if not _has_shoot:
time.sleep(0.1)
_shoot_count = 0
continue
# 没有获取到配置,则退出
if _current_config_data is None:
time.sleep(0.1)
print('_current_config_data')
continue
if not _has_selection:
time.sleep(0.1)
print('_has_selection')
continue
y = 0
x = 0
# 每次开始初始化 _shoot_count
now = time.time()
# 计算时间,需要保证每次出发的时间一致
overtime1 = None
overtime2 = None
# 检查是否可以射击
shoot_images = page_check.shoot_images(_capture_image, page_check.shoot_positions())
has_shoot = page_check.check_shoot(shoot_images)
if not has_shoot:
time.sleep(0.1)
continue
# 瞄具 信息
has_left_right_correction = _current_config_data.left_right_correction
# 配置超时的时间
speed = _current_config_data.speed
if has_left_right_correction == 1:
overtime1 = now + speed - 0.02
overtime2 = now + speed
# 检查射击姿势
stance_images = page_check.stance_images(_capture_image, page_check.stance_positions())
stance = page_check.check_stance(stance_images)
if stance is None:
stance = 'stand'
shoot_type = stance
# 获取瞄具数据1
parts5_value = _current_parts['parts5']
if parts5_value is None:
parts5_value = 1
sight = _current_config_data.sight
shoot_type_data = sight[shoot_type]
has_parts5_value = common.arr_contain(shoot_type_data.keys(), str(parts5_value))
if has_parts5_value:
shoot_type_data2 = shoot_type_data[str(parts5_value)]
y = y + mouse_calc_config_data(_shoot_count, shoot_type_data2)
# 枪口信息
parts1_values = _current_parts['parts1']
if parts1_values is not None:
muzzle = _current_config_data.muzzle
muzzle_type_data = muzzle[shoot_type]
has_muzzle_type_data = common.arr_contain(muzzle_type_data.keys(), str(parts1_values))
if has_muzzle_type_data:
muzzle_type_data2 = muzzle_type_data[parts1_values]
y = y + mouse_calc_config_data(_shoot_count, muzzle_type_data2)
# 握把
parts2_values = _current_parts['parts2']
if parts2_values is not None:
grip = _current_config_data.grip
grip_type_data = grip[shoot_type]
has_grip_type_data = common.arr_contain(grip_type_data.keys(), str(parts2_values))
if has_grip_type_data:
grip_type_data2 = grip_type_data[parts2_values]
y = y + mouse_calc_config_data(_shoot_count, grip_type_data2)
# 屁股
parts4_values = _current_parts['parts4']
if parts4_values is not None:
butt = _current_config_data.butt
butt_type_data = butt[shoot_type]
has_butt_type_data = common.arr_contain(butt_type_data.keys(), str(parts2_values))
if has_butt_type_data:
butt_type_data2 = butt_type_data[parts2_values]
y = y + mouse_calc_config_data(_shoot_count, butt_type_data2)
# 替代 sleep 方式,需要每次压枪时间要保持一致
while 1:
# now9 = time.time()
time.sleep(0.001)
# print('休眠时间{}'.format(time.time() - now9))
if overtime1 <= time.time():
break
# 控制鼠标移动
x = _shoot_correction
box_drive64.mouse_move_r(_lib, _handle, x, y)
_shoot_count = _shoot_count + 1
# 替代 sleep 方式,需要每次压枪时间要保持一致
while 1:
# now9 = time.time()
time.sleep(0.001)
# print('休眠时间{}'.format(time.time() - now9))
if overtime2 <= time.time():
break
logging.info("鼠标移动 射击子弹 {} 鼠标x {} 鼠标y {} 射击姿势 {} 耗时:{}"
.format(_shoot_count - 1, x, y, shoot_type, time.time() - now))
except Exception as e:
print(e)
finally:
print('finally')
def mouse_calc_config_data(count, data_arr):
"""
计算鼠标 data_config data
:return:
"""
for i in range(len(data_arr)):
data = data_arr[len(data_arr) - 1 - i]
max_count = data[0]
move_speed = data[1]
if count >= max_count:
# print("move_speed {}", move_speed)
return move_speed
return 0
"""
/////////////
数据准备
/////////////
"""
def init():
global _lib, _handle, _init_data, _init_weapon_name_data, _config_data
# 初始化 drive
path = resource.resource_path('box64.dll')
if environment.env.usb_has_default == 1:
vid = None
pid = None
_lib, _handle = box_drive64.init(path, vid, pid)
else:
vid = 0xc230
pid = 0x6899
_lib, _handle = box_drive64.init(path, vid, pid)
box_drive64.mouse_move_r(_lib, _handle, 0, 200)
logging.info('加载 drive 成功!')
# 读取配置文件
if os.path.exists('data_config'):
config_data_path = os.path.join(os.getcwd(), 'data_config')
print('加载外部 data_config 配置文件..')
else:
print('加载exe data_config 配置文件..')
config_data_path = resource.resource_path('data_config')
_config_data = data_config_parser.parser(config_data_path)
logging.info('加载 data_config 成功!')
# 初始化 配件信息
parts_path = resource.resource_path(os.path.join('img', 'parts'))
weapon_name_path = resource.resource_path(os.path.join('img', 'weapon_name'))
_init_data = weapon.init_parts(parts_path)
_init_weapon_name_data = weapon.init_weapon_name(weapon_name_path)
logging.info('加载配件图片数据成功!')
# 设置返回数据
return _lib, _handle, _init_data, _init_weapon_name_data
if __name__ == '__main__':
try:
# path = resource.resource_path(os.path.join('img', 'screenshot', '20190413085144_2.jpg'))
# print('path {}'.format(path))
# Image.open(path).show()
# 设置日志信息
if environment.is_debug():
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.INFO)
# 初始化
lib, handle, init_data, init_weapon_name_data = init()
# 提前启动,开枪和 左右纠正
# _executor.submit(handle_shoot_correction)
# logging.info('开启左右修正!')
_executor.submit(handle_control_shoot)
logging.info('开启-自动压枪!')
logging.info('开启-子弹0不压枪!')
logging.info('开启-手雷烟雾弹识别!')
# 选择武器
_executor.submit(handle_weapon_select)
logging.info('开启-选择武器!')
# 开启实时截图
_executor.submit(handle_capture_image)
logging.info('开启-实时截屏!')
# 监听事件
hm = PyHook3.HookManager()
hm.KeyDown = onKeyboardEvent
hm.HookKeyboard()
hm.MouseAll = onMouseEvent
hm.HookMouse()
pythoncom.PumpMessages()
except Exception as e:
print(e)
finally:
os.system('pause')
| 28.691781
| 107
| 0.59334
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,599
| 0.251147
|
3d81143199d30bf1afb752289d20dfe6d3a3f506
| 16,009
|
py
|
Python
|
src/dataset-dl.py
|
Mokuichi147/dataset-dl
|
e669243ccd2d64aa5ccbdd17b430e3d130bb13cd
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
src/dataset-dl.py
|
Mokuichi147/dataset-dl
|
e669243ccd2d64aa5ccbdd17b430e3d130bb13cd
|
[
"Apache-2.0",
"MIT"
] | 2
|
2022-01-01T16:56:58.000Z
|
2022-02-27T14:32:32.000Z
|
src/dataset-dl.py
|
Mokuichi147/dataset-dl
|
e669243ccd2d64aa5ccbdd17b430e3d130bb13cd
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, as_completed
import csv
import dearpygui.dearpygui as dpg
from os.path import isfile, isdir, join
import pyperclip
import subprocess
import sys
from tempfile import gettempdir
from traceback import print_exc
import core
import extruct
import utilio
from pytube import YouTube, Playlist
import ffmpeg
if sys.platform == 'darwin':
from tkinter import Tk
from tkinter.filedialog import askdirectory, askopenfilename
save_dir_dialog_mac = False
load_csv_dialog_mac = False
tkinter_root = Tk()
tkinter_root.withdraw()
dpg.create_context()
APPNAME = 'dataset-dl'
TEMPDIR = join(gettempdir(), APPNAME)
MAXWOREKR = 20
TAGS = []
def check_save_dir():
dpg.set_value('save_dir_check', isdir(dpg.get_value('save_dir_path')))
if sys.platform == 'darwin':
def save_dir_dialog():
global save_dir_dialog_mac
save_dir_dialog_mac = True
def load_csv_dialog():
global load_csv_dialog_mac
load_csv_dialog_mac = True
else:
def save_dir_dialog():
save_dir = utilio.ask_directry()
if save_dir != '':
dpg.set_value('save_dir_path', save_dir)
check_save_dir()
def load_csv_dialog():
load_csv = utilio.ask_open_file([('', '.csv')])
if load_csv != '':
dpg.set_value('csv_path', load_csv)
check_csv_path()
def check_csv_path():
csv_path = dpg.get_value('csv_path')
dpg.set_value('csv_path_check', isfile(csv_path) and csv_path.lower().endswith('.csv'))
def check_url():
url_str = dpg.get_value('url')
is_url = extruct.get_video_id(url_str) != '' or extruct.get_playlist_id(url_str) != ''
dpg.set_value('url_check', is_url)
def paste_url():
dpg.set_value('url', pyperclip.paste())
check_url()
def lock_ui():
for tag in TAGS:
dpg.configure_item(tag, enabled=False)
def unlock_ui():
for tag in TAGS:
dpg.configure_item(tag, enabled=True)
def run_url():
lock_ui()
parent_tag = 'url_tab'
if not (dpg.get_value('save_dir_check') and dpg.get_value('url_check')):
unlock_ui()
return
generate_entire_progress(parent_tag)
input_url = dpg.get_value('url')
if extruct.get_playlist_id(input_url) != '':
video_urls = Playlist(input_url).video_urls
else:
video_urls = ['https://www.youtube.com/watch?v=' + extruct.get_video_id(input_url)]
with ThreadPoolExecutor(max_workers=MAXWOREKR) as executor:
tasks = [executor.submit(
download,
video_url,
core.NameMode.TITLE,
0,
0,
parent_tag
) for video_url in video_urls]
complete_count = 0
max_task_count = len(tasks)
for task in as_completed(tasks):
complete_count += 1
dpg.set_value('entire_bar', complete_count / max_task_count)
dpg.set_value('entire_text', f'Completed: {complete_count:>7} / {max_task_count}')
dpg.delete_item('entire_group')
unlock_ui()
def run_csv():
lock_ui()
parent_tag = 'csv_tab'
if not (dpg.get_value('save_dir_check') and dpg.get_value('csv_path_check')):
unlock_ui()
return
generate_entire_progress(parent_tag)
with open(dpg.get_value('csv_path'), 'r', encoding='utf-8') as f,\
ThreadPoolExecutor(max_workers=MAXWOREKR) as executor:
reader = csv.reader(f)
tasks = []
for row in reader:
if row[0].startswith('#'):
continue
video_url = 'https://www.youtube.com/watch?v=' + row[0]
tasks.append(executor.submit(
download,
video_url,
core.NameMode.ID,
int(float(row[1])),
int(float(row[2])),
parent_tag
))
complete_count = 0
max_task_count = len(tasks)
for task in as_completed(tasks):
complete_count += 1
dpg.set_value('entire_bar', complete_count / max_task_count)
dpg.set_value('entire_text', f'Completed: {complete_count:>7} / {max_task_count}')
dpg.delete_item('entire_group')
unlock_ui()
def generate_entire_progress(parent_tag: str):
dpg.add_group(tag='entire_group', parent=parent_tag, horizontal=True)
dpg.add_progress_bar(tag='entire_bar', parent='entire_group')
dpg.add_text('Downloading...', tag=f'entire_text', parent=f'entire_group')
def set_progress(stream, chunk, bytes_remaining):
stream_id = extruct.file_hash(f'{stream.title}_{stream.filesize}')
dpg.set_value(stream_id, 1 - bytes_remaining / stream.filesize)
def download(video_url: str, naming: core.NameMode, start_time: int, end_time: int, parent_tag: str):
yt = YouTube(video_url, on_progress_callback=set_progress)
quality_mode = core.get_qualitymode(dpg.get_value('quality_radio'))
stream_video = core.get_video_stream(yt, quality_mode)
stream_audio = core.get_audio_stream(yt, quality_mode)
if not quality_mode.is_audio:
return
stream_audio_id = extruct.file_hash(f'{stream_audio.title}_{stream_audio.filesize}')
if not quality_mode.is_video:
request_type = core.get_request_type(quality_mode.extension_audio)
save_path = TEMPDIR if quality_mode == core.QualityMode.OPUS or quality_mode == core.QualityMode.MP3 else dpg.get_value('save_dir_path')
file_name = None if quality_mode == core.QualityMode.OPUS or quality_mode == core.QualityMode.MP3 else extruct.file_name(stream_audio.title)
with ThreadPoolExecutor(max_workers=MAXWOREKR*2) as executor:
tasks = []
tasks.append(executor.submit(
download_stream,
stream_audio,
save_path,
request_type,
parent_tag,
filename = file_name
))
for task in as_completed(tasks):
pass
dpg.delete_item(f'{stream_audio_id}_group')
if quality_mode != core.QualityMode.OPUS and quality_mode != core.QualityMode.MP3:
return
if naming == core.NameMode.ID:
audio_id = extruct.get_video_id(video_url)
save_path = f"{join(dpg.get_value('save_dir_path'), extruct.file_name(audio_id))}.{quality_mode.extension_audio}"
else:
save_path = f"{join(dpg.get_value('save_dir_path'), extruct.file_name(stream_audio.title))}.{quality_mode.extension_audio}"
audio_temp_path = f'{join(TEMPDIR, stream_audio_id)}'
auodio_save(quality_mode, save_path, audio_temp_path, start_time, end_time)
stream_video_id = extruct.file_hash(f'{stream_video.title}_{stream_video.filesize}')
with ThreadPoolExecutor(max_workers=MAXWOREKR*2) as executor:
tasks = []
tasks.append(executor.submit(
download_stream,
stream_video,
TEMPDIR,
quality_mode.extension_video,
parent_tag
))
tasks.append(executor.submit(
download_stream,
stream_audio,
TEMPDIR,
quality_mode.extension_audio,
parent_tag
))
for task in as_completed(tasks):
pass
dpg.delete_item(f'{stream_video_id}_group')
dpg.delete_item(f'{stream_audio_id}_group')
if naming == core.NameMode.ID:
stream_id = extruct.get_video_id(video_url)
save_path = f"{join(dpg.get_value('save_dir_path'), extruct.file_name(stream_id))}.{quality_mode.extension_video}"
else:
save_path = f"{join(dpg.get_value('save_dir_path'), extruct.file_name(stream_video.title))}.{quality_mode.extension_video}"
video_temp_path = f'{join(TEMPDIR, stream_video_id)}.{quality_mode.extension_video}'
audio_temp_path = f'{join(TEMPDIR, stream_audio_id)}.{quality_mode.extension_audio}'
marge_save(save_path, video_temp_path, audio_temp_path, start_time, end_time)
def auodio_save(quality_mode: core.QualityMode, save_path: str, audio_temp_path: str, start_time: int, end_time: int):
try:
if quality_mode == core.QualityMode.OPUS or quality_mode == core.QualityMode.MP3:
opus_temp_path = f'{audio_temp_path}.{core.get_request_type(quality_mode.extension_audio)}'
audio_temp_path = f'{audio_temp_path}.{quality_mode.extension_audio}'
opus_audio = ffmpeg.input(opus_temp_path)
if quality_mode == core.QualityMode.OPUS:
opus_audio_stream = ffmpeg.output(opus_audio, audio_temp_path, acodec='copy').global_args('-loglevel', 'quiet')
else:
opus_audio_stream = ffmpeg.output(opus_audio, audio_temp_path).global_args('-loglevel', 'quiet')
startupinfo = None
if sys.platform == 'win32':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(ffmpeg.compile(opus_audio_stream, overwrite_output=True), startupinfo=startupinfo)
out, err = process.communicate()
retcode = process.poll()
if retcode:
raise ffmpeg.Error('ffmpeg', out, err)
utilio.delete_file(opus_temp_path)
else:
audio_temp_path = f'{audio_temp_path}.{quality_mode.extension_audio}'
if start_time < end_time and not (start_time == 0 == end_time):
audio = ffmpeg.input(audio_temp_path, ss=start_time, to=end_time)
else:
audio = ffmpeg.input(audio_temp_path)
audio_stream = ffmpeg.output(audio, save_path, acodec='copy').global_args('-loglevel', 'quiet')
startupinfo = None
if sys.platform == 'win32':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(ffmpeg.compile(audio_stream, overwrite_output=True), startupinfo=startupinfo)
out, err = process.communicate()
retcode = process.poll()
if retcode:
raise ffmpeg.Error('ffmpeg', out, err)
utilio.delete_file(audio_temp_path)
except:
print_exc()
def marge_save(save_path: str, video_temp_path: str, audio_temp_path: str,
start_time: int, end_time: int):
try:
if start_time < end_time and not (start_time == 0 == end_time):
video = ffmpeg.input(video_temp_path, ss=start_time, to=end_time)
audio = ffmpeg.input(audio_temp_path, ss=start_time, to=end_time)
else:
video = ffmpeg.input(video_temp_path)
audio = ffmpeg.input(audio_temp_path)
marge_stream = ffmpeg.output(video, audio, save_path, vcodec='copy', acodec='copy').global_args('-loglevel', 'quiet')
startupinfo = None
if sys.platform == 'win32':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(ffmpeg.compile(marge_stream, overwrite_output=True), startupinfo=startupinfo)
out, err = process.communicate()
retcode = process.poll()
if retcode:
raise ffmpeg.Error('ffmpeg', out, err)
utilio.delete_file(video_temp_path)
utilio.delete_file(audio_temp_path)
except:
print_exc()
def download_stream(stream, output_path, extension, parent_tag, filename=None):
stream_id = extruct.file_hash(f'{stream.title}_{stream.filesize}')
if filename == None:
filename = f'{stream_id}.{extension}'
else:
filename = f'{filename}.{extension}'
dpg.add_group(tag=f'{stream_id}_group', parent=parent_tag, horizontal=True)
dpg.add_progress_bar(tag=stream_id, parent=f'{stream_id}_group')
dpg.add_text(stream.title, tag=f'{stream_id}_text', parent=f'{stream_id}_group')
try:
stream.download(output_path=output_path, filename=filename)
except:
print_exc()
with dpg.font_registry():
with dpg.font(extruct.get_fullpath(join('resources', 'fonts', 'NotoSansJP-Regular.otf')), 22) as default_font:
dpg.add_font_range_hint(dpg.mvFontRangeHint_Default)
dpg.add_font_range_hint(dpg.mvFontRangeHint_Japanese)
with open(extruct.get_fullpath(join('resources', 'fonts', 'OFL.txt')), 'r', encoding='utf-8') as f:
font_license = f.read()
with dpg.window(tag='Primary Window'):
dpg.bind_font(default_font)
with dpg.menu_bar():
with dpg.menu(label='License'):
dpg.add_text('NotoSansJP-Regular')
dpg.add_input_text(default_value=font_license, multiline=True, readonly=True)
dpg.add_text('Save Directory')
with dpg.group(horizontal=True):
dpg.add_checkbox(default_value=False, enabled=False, tag='save_dir_check')
dpg.add_input_text(callback=check_save_dir, tag='save_dir_path')
dpg.add_button(label='Select', tag='save_dir_button', callback=save_dir_dialog)
TAGS.append('save_dir_path')
TAGS.append('save_dir_button')
dpg.add_spacer(height=10)
dpg.add_text('Quality')
dpg.add_radio_button(
[quality_mode.text for quality_mode in core.QualityMode],
tag = 'quality_radio',
default_value = core.QualityMode.HIGH.text,
horizontal = True
)
TAGS.append('quality_radio')
dpg.add_spacer(height=10)
dpg.add_text('Mode')
with dpg.tab_bar():
with dpg.tab(label='Video OR Playlist URL', tag='url_tab'):
with dpg.group(horizontal=True):
dpg.add_checkbox(default_value=False, enabled=False, tag='url_check')
dpg.add_input_text(callback=check_url, tag='url')
dpg.add_button(label='Paste', tag='url_paste_button', callback=paste_url)
dpg.add_button(label='Run', tag='url_run_button', callback=run_url)
TAGS.append('url')
TAGS.append('url_paste_button')
TAGS.append('url_run_button')
with dpg.tab(label='CSV File', tag='csv_tab'):
with dpg.group(horizontal=True):
dpg.add_checkbox(default_value=False, enabled=False, tag='csv_path_check')
dpg.add_input_text(callback=check_csv_path, tag='csv_path')
dpg.add_button(label='Select', tag='csv_path_button', callback=load_csv_dialog)
dpg.add_button(label='Run', tag='csv_run_button', callback=run_csv)
TAGS.append('csv_path')
TAGS.append('csv_path_button')
TAGS.append('csv_run_button')
utilio.create_workdir(TEMPDIR)
icon = extruct.get_fullpath(join('resources', 'dataset-dl.ico')) if sys.platform == 'win32' else ''
dpg.create_viewport(title=APPNAME, width=1000, height=500, large_icon=icon)
dpg.setup_dearpygui()
dpg.show_viewport()
dpg.set_primary_window('Primary Window', True)
if not sys.platform == 'darwin':
dpg.start_dearpygui()
else:
while dpg.is_dearpygui_running():
dpg.render_dearpygui_frame()
if save_dir_dialog_mac:
save_dir = askdirectory()
if save_dir != '':
dpg.set_value('save_dir_path', save_dir)
check_save_dir()
save_dir_dialog_mac = False
elif load_csv_dialog_mac:
load_csv = askopenfilename(filetypes=[('', '.csv')])
if load_csv != '':
dpg.set_value('csv_path', load_csv)
check_csv_path()
load_csv_dialog_mac = False
tkinter_root.destroy()
dpg.destroy_context()
utilio.delete_workdir(TEMPDIR)
| 37.757075
| 151
| 0.639578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,543
| 0.158848
|
3d82652d7d5f527c23d139f61d27dabd1f54a20e
| 3,813
|
py
|
Python
|
src/robot/parsing/parser/parser.py
|
bhirsz/robotframework
|
d62ee5091ed932aee8fc12ae5e340a5b19288f05
|
[
"ECL-2.0",
"Apache-2.0"
] | 7,073
|
2015-01-01T17:19:16.000Z
|
2022-03-31T22:01:29.000Z
|
src/robot/parsing/parser/parser.py
|
bhirsz/robotframework
|
d62ee5091ed932aee8fc12ae5e340a5b19288f05
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,412
|
2015-01-02T09:29:05.000Z
|
2022-03-31T13:10:46.000Z
|
src/robot/parsing/parser/parser.py
|
bhirsz/robotframework
|
d62ee5091ed932aee8fc12ae5e340a5b19288f05
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,298
|
2015-01-03T02:47:15.000Z
|
2022-03-31T02:00:16.000Z
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..lexer import Token, get_tokens, get_resource_tokens, get_init_tokens
from ..model import Statement
from .fileparser import FileParser
def get_model(source, data_only=False, curdir=None):
"""Parses the given source to a model represented as an AST.
How to use the model is explained more thoroughly in the general
documentation of the :mod:`robot.parsing` module.
:param source: The source where to read the data. Can be a path to
a source file as a string or as ``pathlib.Path`` object, an already
opened file object, or Unicode text containing the date directly.
Source files must be UTF-8 encoded.
:param data_only: When ``False`` (default), returns all tokens. When set
to ``True``, omits separators, comments, continuation markers, and
other non-data tokens. Model like this cannot be saved back to
file system.
:param curdir: Directory where the source file exists. This path is used
to set the value of the built-in ``${CURDIR}`` variable during parsing.
When not given, the variable is left as-is. Should only be given
only if the model will be executed afterwards. If the model is saved
back to disk, resolving ``${CURDIR}`` is typically not a good idea.
Use :func:`get_resource_model` or :func:`get_init_model` when parsing
resource or suite initialization files, respectively.
"""
return _get_model(get_tokens, source, data_only, curdir)
def get_resource_model(source, data_only=False, curdir=None):
"""Parses the given source to a resource file model.
Otherwise same as :func:`get_model` but the source is considered to be
a resource file. This affects, for example, what settings are valid.
"""
return _get_model(get_resource_tokens, source, data_only, curdir)
def get_init_model(source, data_only=False, curdir=None):
"""Parses the given source to a init file model.
Otherwise same as :func:`get_model` but the source is considered to be
a suite initialization file. This affects, for example, what settings are
valid.
"""
return _get_model(get_init_tokens, source, data_only, curdir)
def _get_model(token_getter, source, data_only=False, curdir=None):
tokens = token_getter(source, data_only)
statements = _tokens_to_statements(tokens, curdir)
model = _statements_to_model(statements, source)
model.validate_model()
return model
def _tokens_to_statements(tokens, curdir=None):
statement = []
EOS = Token.EOS
for t in tokens:
if curdir and '${CURDIR}' in t.value:
t.value = t.value.replace('${CURDIR}', curdir)
if t.type != EOS:
statement.append(t)
else:
yield Statement.from_tokens(statement)
statement = []
def _statements_to_model(statements, source=None):
parser = FileParser(source=source)
model = parser.model
stack = [parser]
for statement in statements:
while not stack[-1].handles(statement):
stack.pop()
parser = stack[-1].parse(statement)
if parser:
stack.append(parser)
return model
| 38.515152
| 79
| 0.702072
| 0
| 0
| 362
| 0.094938
| 0
| 0
| 0
| 0
| 2,299
| 0.602937
|
3d83dae1b7cb47bf096db3ece76a46efed3fa5a8
| 1,835
|
py
|
Python
|
astronomy_datamodels/tags/fixed_location.py
|
spacetelescope/astronomy_datamodels
|
ca5db82d5982781ea763cef9851d4c982fd86328
|
[
"BSD-3-Clause"
] | 1
|
2019-03-08T03:06:43.000Z
|
2019-03-08T03:06:43.000Z
|
astronomy_datamodels/tags/fixed_location.py
|
spacetelescope/astronomy_datamodels
|
ca5db82d5982781ea763cef9851d4c982fd86328
|
[
"BSD-3-Clause"
] | 1
|
2020-10-29T19:54:28.000Z
|
2020-10-29T19:54:28.000Z
|
astronomy_datamodels/tags/fixed_location.py
|
spacetelescope/astronomy_datamodels
|
ca5db82d5982781ea763cef9851d4c982fd86328
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf import yamlutil
from asdf.versioning import AsdfSpec
from ..types import AstronomyDataModelType
from ..fixed_location import FixedLocation
class FixedLocationType(AstronomyDataModelType):
name = 'datamodel/fixed_location'
version = '1.0.0'
supported_versions = ['1.0.0']
types = ['astronomy_datamodels.fixed_location.FixedLocation']
requires = ["astropy"]
@classmethod
def to_tree(cls, node, ctx): # to ASDF representation
d = {}
d['solar_system_body'] = node.solar_system_body
d['latitude'] = yamlutil.custom_tree_to_tagged_tree(node.latitude, ctx)
d['longitude'] = yamlutil.custom_tree_to_tagged_tree(node.longitude, ctx)
if node.altitude is not None:
d['altitude'] = yamlutil.custom_tree_to_tagged_tree(node.altitude, ctx)
if node.meta is not None:
d['meta'] = yamlutil.custom_tree_to_tagged_tree(node.meta, ctx)
return d
@classmethod
def from_tree(cls, node, ctx): # from ASDF to object representation
solar_system_body = node['solar_system_body']
latitude = yamlutil.tagged_tree_to_custom_tree(node['latitude'], ctx)
longitude = yamlutil.tagged_tree_to_custom_tree(node['longitude'], ctx)
fixed_location = FixedLocation(latitude=latitude, longitude=longitude,
solar_system_body=solar_system_body)
if 'altitude' in node:
fixed_location.altitude = yamlutil.tagged_tree_to_custom_tree(node['altitude'], ctx)
if 'meta' in node:
fixed_location.meta = yamlutil.tagged_tree_to_custom_tree(node['meta'], ctx)
return fixed_location
@classmethod
def assert_equal(cls, old, new):
pass
| 40.777778
| 96
| 0.683924
| 1,594
| 0.868665
| 0
| 0
| 1,340
| 0.730245
| 0
| 0
| 374
| 0.203815
|
3d85f7e617337855186eb9a6630f328826ed38ef
| 868
|
py
|
Python
|
app/migrations/0003_contacts.py
|
Joshua-Barawa/Django-IP4
|
5665efe73cf8d2244b7bb35ed627e4e237902156
|
[
"Unlicense"
] | null | null | null |
app/migrations/0003_contacts.py
|
Joshua-Barawa/Django-IP4
|
5665efe73cf8d2244b7bb35ed627e4e237902156
|
[
"Unlicense"
] | null | null | null |
app/migrations/0003_contacts.py
|
Joshua-Barawa/Django-IP4
|
5665efe73cf8d2244b7bb35ed627e4e237902156
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-03-21 13:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_remove_profile_caption_alter_profile_profile_pic_and_more'),
]
operations = [
migrations.CreateModel(
name='Contacts',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, null=True)),
('unit', models.CharField(blank=True, max_length=100, null=True)),
('m_number', models.IntegerField(default=0)),
('hood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.neighborhood')),
],
),
]
| 34.72
| 117
| 0.623272
| 742
| 0.854839
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.207373
|
3d8734a866fbee3cba78ae6db665c5cbc41ba2ea
| 440
|
py
|
Python
|
assessment/seeders/base_seeder.py
|
kenware/Assessment
|
69f5e3fbf18dfa2c59eaf3b083ebdba7ca66c9b7
|
[
"MIT"
] | null | null | null |
assessment/seeders/base_seeder.py
|
kenware/Assessment
|
69f5e3fbf18dfa2c59eaf3b083ebdba7ca66c9b7
|
[
"MIT"
] | 3
|
2020-02-11T23:31:01.000Z
|
2021-06-10T21:04:34.000Z
|
assessment/seeders/base_seeder.py
|
kenware/Assessment
|
69f5e3fbf18dfa2c59eaf3b083ebdba7ca66c9b7
|
[
"MIT"
] | null | null | null |
from .seed_assessment_type import seed_assessment
from .seed_question import seed_question
from .seed_answer import seed_answer
from .seed_user import seed_user
from .seed_score import seed_score
from .seed_assessment_name import seed_assessment_name
class Seeder(object):
def seed_all(self):
seed_assessment_name()
seed_assessment()
seed_question()
seed_answer()
seed_user()
seed_score()
| 24.444444
| 54
| 0.752273
| 186
| 0.422727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3d89564a5d0fa853d134b34b86a84b5003e24ceb
| 328
|
py
|
Python
|
contek_tusk/metric_data.py
|
contek-io/contek-tusk
|
74dc73388367adb958848819b29fe24316c4f6f4
|
[
"MIT"
] | null | null | null |
contek_tusk/metric_data.py
|
contek-io/contek-tusk
|
74dc73388367adb958848819b29fe24316c4f6f4
|
[
"MIT"
] | null | null | null |
contek_tusk/metric_data.py
|
contek-io/contek-tusk
|
74dc73388367adb958848819b29fe24316c4f6f4
|
[
"MIT"
] | null | null | null |
from pandas import DataFrame
from contek_tusk.table import Table
class MetricData:
def __init__(self, table: Table, df: DataFrame) -> None:
self._table = table
self._df = df
def get_table(self) -> Table:
return self._table
def get_data_frame(self) -> DataFrame:
return self._df
| 19.294118
| 60
| 0.655488
| 259
| 0.789634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3d8aee839cc7a45416c287f7da1460240d9b1dd8
| 28
|
py
|
Python
|
inlinec/__init__.py
|
ssize-t/inlinec
|
20eca6bf8556a77906ba5f420f09006d6daf4355
|
[
"Apache-2.0"
] | 22
|
2020-10-10T18:25:04.000Z
|
2021-11-09T18:56:34.000Z
|
inlinec/__init__.py
|
ssize-t/inlinec
|
20eca6bf8556a77906ba5f420f09006d6daf4355
|
[
"Apache-2.0"
] | 1
|
2020-11-10T03:50:05.000Z
|
2020-11-10T03:50:05.000Z
|
inlinec/__init__.py
|
ssize-t/inlinec
|
20eca6bf8556a77906ba5f420f09006d6daf4355
|
[
"Apache-2.0"
] | 2
|
2020-10-10T16:09:42.000Z
|
2021-03-10T16:43:11.000Z
|
from .inlinec import inlinec
| 28
| 28
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3d90245ccc4e47d064d2a5aa4296f527b42e0ce2
| 3,360
|
py
|
Python
|
mcastropi.py
|
martinohanlon/MinecraftInteractiveAstroPi
|
0e9f30b25cad83b52553b257103b0e89a09ecc38
|
[
"BSD-3-Clause"
] | null | null | null |
mcastropi.py
|
martinohanlon/MinecraftInteractiveAstroPi
|
0e9f30b25cad83b52553b257103b0e89a09ecc38
|
[
"BSD-3-Clause"
] | null | null | null |
mcastropi.py
|
martinohanlon/MinecraftInteractiveAstroPi
|
0e9f30b25cad83b52553b257103b0e89a09ecc38
|
[
"BSD-3-Clause"
] | null | null | null |
"""
SpaceCRAFT - Astro Pi competition[http://astro-pi.org/] entry
Conceived by Hannah Belshaw
Created by Martin O'Hanlon[http://www.stuffaboutcode.com]
For the Raspberry Pi Foundation[https://www.raspberrypi.org]
mcastropi.py
A movable minecraft model of a Raspberry Pi with an Astro Pi on top
"""
from minecraftstuff import MinecraftShape
from minecraftstuff import ShapeBlock
from mcpi.minecraft import Minecraft
from mcpi.minecraft import Vec3
from mcpi import block
from time import sleep
class MCAstroPi(MinecraftShape):
def __init__(self, mc, pos):
self.pos = pos
self.mc = mc
#init the MinecraftShape
MinecraftShape.__init__(self, self.mc, self.pos, visible = False)
#create the AstroPi using setBlock(s) commands
#boards
self.setBlocks(-6, -3, -9, 7, -3, 11, 35, 13, tag = "rpi_board")
self.setBlocks(-6, 0, -9, 7, 0, 6, 35, 13, tag = "astropi_board")
#pillars
self.setBlocks(-6, -2, -9, -6, -1, -9, 42)
self.setBlocks(7, -2, -9, 7, -1, -9, 42)
self.setBlocks(-6, -2, 6, -6, -1, 6, 42)
self.setBlocks(7, -2, 6, 7, -1, 6, 42)
#gpio headers
self.setBlocks(7, 1, -8, 7, 1, 5, 35, 15, tag = "astropi_gpio")
self.setBlocks(7, -2, -8, 7, -1, 5, 35, 15, tag = "rpi_gpio")
#usb and ethernet port
self.setBlocks(4, -2, 8, 6, 0, 11, 42, tag = "usb")
self.setBlocks(0, -2, 8, 2, 0, 11, 42, tag = "usb" )
self.setBlocks(-5, -2, 8, -2, 0, 11, 42, tag = "ethernet")
#camera, display, power, hdmi, composite ports
self.setBlocks(-5, -2, 1, -2, -2, 1, 35, 15, tag = "camera")
self.setBlocks(2, -2, -9, -1, -2, -9, 35, 15, tag = "display")
self.setBlocks(-6, -2, -7, -6, -2, -6, 42, tag = "power")
self.setBlocks(-6, -2, -2, -6, -2, 0, 42, tag = "hdmi")
self.setBlock(-6, -2, 3, 35, 15, tag = "composite")
#processor
self.setBlocks(0, -2, -2, 2, -2, -4, 35, 15, tag = "processor")
#led grid
self.setBlocks(-3, 1, -8, 4, 1, -1, 35, 0, tag = "led")
#other astro pi components
self.setBlocks(3, 1, 1, 4, 1, 2, 35, 15, tag = "level_shifter")
self.setBlocks(3, 1, 4, 4, 1, 5, 35, 15, tag = "atmel" )
self.setBlocks(0, 1, 1, 0, 1, 2, 35, 15, tag = "orientation")
self.setBlock(1, 1, 5, 35, 15, tag = "humidity")
self.setBlock(-1, 1, 5, 35, 15, tag = "pressure")
self.setBlock(-2, 1, 3, 35, 15, tag = "eeprom")
self.setBlocks(-6, 1, -5, -5, 1, -4, 35, 15, tag = "led_driver")
#astropi joystick
self.setBlock(-5, 1, 4, 42, tag = "joy_left")
self.setBlock(-4, 1, 5, 42, tag = "joy_up")
self.setBlock(-5, 1, 6, 42, tag = "joy_right")
self.setBlock(-6, 1, 5, 42, tag = "joy_down")
self.setBlock(-5, 2, 5, 35, 15, tag = "joy_button")
#astro pi gaps
self.setBlocks(-1, 0, -9, 2, 0, -9, 0)
self.setBlocks(-5, 0, 1, -2, 0, 1, 0)
#make the astro pi visible
self.draw()
#test
if __name__ == "__main__":
mc = Minecraft.create()
pos = Vec3(0, 20, 0)
mcastropi = MCAstroPi(mc, pos)
try:
sleep(5)
finally:
mcastropi.clear()
| 37.752809
| 74
| 0.535714
| 2,622
| 0.780357
| 0
| 0
| 0
| 0
| 0
| 0
| 858
| 0.255357
|
3d9080c01f26c55604e47fcbe8181d860f113c89
| 1,444
|
py
|
Python
|
utils/pack_images.py
|
1mplex/segmentation_image_augmentation
|
bd93c1589078247c0c7aff8556afc16a7e15be39
|
[
"MIT"
] | 15
|
2020-07-21T08:57:38.000Z
|
2022-01-24T21:59:10.000Z
|
utils/pack_images.py
|
el-lilya/segmentation_image_augmentation
|
c16604274a220e00a6fbc4d653ab9c90276a8eba
|
[
"MIT"
] | 1
|
2021-02-15T21:24:11.000Z
|
2021-02-15T21:24:11.000Z
|
utils/pack_images.py
|
el-lilya/segmentation_image_augmentation
|
c16604274a220e00a6fbc4d653ab9c90276a8eba
|
[
"MIT"
] | 9
|
2021-07-01T02:42:22.000Z
|
2022-01-24T21:59:12.000Z
|
import copy
import math
import numpy as np
# import rpack
from rectpack import newPacker
from rectpack.maxrects import MaxRectsBssf
def _change_dim_order(sizes):
return [[s[1], s[0]] for s in sizes]
# def get_pack_coords(sizes):
# # list of [height, width] i.e. img.shape order
# sizes = _change_dim_order(sizes)
# positions = rpack.pack(sizes)
# return _change_dim_order(positions)
def _pack(rectangles, bins):
packer = newPacker(pack_algo=MaxRectsBssf)
for r in rectangles:
packer.add_rect(*r)
for b in bins:
packer.add_bin(*b)
packer.pack()
all_rects = packer.rect_list()
res = []
for rect in all_rects:
res.append(np.array(rect))
res = np.array(res)
res.view('i8,i8,i8,i8,i8,i8,').sort(order=['f5'], axis=0)
res = [list(i) for i in res[:, 1:3]]
return res
def get_pack_coords(sizes):
s = copy.deepcopy(sizes)
[s[i].append(i + 1) for i in range(len(s))]
s = np.array([np.array(i) for i in s]).copy()
total_h, total_w, _ = s.sum(axis=0)
max_h = s[:, 0].max(axis=0)
virtual_cols = math.ceil(math.sqrt(len(sizes)))
height_limit = max(max_h, int(1.2 * (total_h / virtual_cols)))
rectangles = [tuple(i) for i in s]
bins = [(height_limit, total_w)]
coords = _pack(rectangles, bins)
if len(coords) != len(sizes):
coords = _pack(rectangles, [(int(2 * max_h), total_w)])
return coords
| 22.5625
| 66
| 0.629501
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 233
| 0.161357
|
3d90bec081e48c3692736a49abca5a861a8e0892
| 626
|
py
|
Python
|
scripts/modules/task_plan_types/date.py
|
vkostyanetsky/Organizer
|
b1f0a05c0b6c6e6ea7a78a6bd7a3c70f85b33eba
|
[
"MIT"
] | null | null | null |
scripts/modules/task_plan_types/date.py
|
vkostyanetsky/Organizer
|
b1f0a05c0b6c6e6ea7a78a6bd7a3c70f85b33eba
|
[
"MIT"
] | null | null | null |
scripts/modules/task_plan_types/date.py
|
vkostyanetsky/Organizer
|
b1f0a05c0b6c6e6ea7a78a6bd7a3c70f85b33eba
|
[
"MIT"
] | null | null | null |
# DD.MM.YYYY (DD — номер дня, MM — номер месяца, YYYY — номер года)
import re
import datetime
def is_task_current(task, date):
result = None
groups = re.match('([0-9]{1,2}).([0-9]{1,2}).([0-9]{4})', task['condition'])
type_is_correct = groups != None
if type_is_correct:
task_date_year = int(groups[3])
task_date_month = int(groups[2])
task_date_day = int(groups[1])
task_date = datetime.datetime(task_date_year, task_date_month, task_date_day)
task['outdated'] = task_date < date
result = date == task_date
return result
| 26.083333
| 91
| 0.600639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 160
| 0.242424
|
3d92ede6e5d24bbbfeb9c757cc08cd7affa9cd34
| 268
|
py
|
Python
|
src/pyons/setup.py
|
larioandr/thesis-models
|
ecbc8c01aaeaa69034d6fe1d8577ab655968ea5f
|
[
"MIT"
] | 1
|
2021-01-17T15:49:03.000Z
|
2021-01-17T15:49:03.000Z
|
src/pyons/setup.py
|
larioandr/thesis-models
|
ecbc8c01aaeaa69034d6fe1d8577ab655968ea5f
|
[
"MIT"
] | null | null | null |
src/pyons/setup.py
|
larioandr/thesis-models
|
ecbc8c01aaeaa69034d6fe1d8577ab655968ea5f
|
[
"MIT"
] | 1
|
2021-03-07T15:31:06.000Z
|
2021-03-07T15:31:06.000Z
|
from setuptools import setup
setup(
name='pyons',
version='1.0',
author="Andrey Larionov",
author_email="larioandr@gmail.com",
license="MIT",
py_modules=['pyons'],
install_requires=[
],
tests_requires=[
'pytest',
],
)
| 15.764706
| 39
| 0.589552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.261194
|
3d95e63a148b7fb62965e71316967e479358de64
| 2,262
|
py
|
Python
|
html2markdown.py
|
DeusFigendi/fefebot
|
935338c7b082502f25f97ae4874b4e896a04972e
|
[
"MIT"
] | 4
|
2016-09-19T03:54:31.000Z
|
2021-03-27T23:06:34.000Z
|
html2markdown.py
|
DeusFigendi/fefebot
|
935338c7b082502f25f97ae4874b4e896a04972e
|
[
"MIT"
] | 1
|
2017-08-01T15:04:57.000Z
|
2017-08-08T22:02:46.000Z
|
html2markdown.py
|
DeusFigendi/fefebot
|
935338c7b082502f25f97ae4874b4e896a04972e
|
[
"MIT"
] | 6
|
2015-08-24T09:37:41.000Z
|
2018-12-26T19:40:42.000Z
|
#! /usr/bin/env python3.2
import re
def _subpre(text):
list=re.split('(<pre>|</pre>)',text)
for i in range(len(list)):
# begin of pre
if i%4==1:
list[i]='\n\n '
# in pre
elif i%4==2:
list[i]=re.sub('<p>|<br>|\n\n', '\n\n ',list[i])
# end of pre
elif i%4==3:
list[i]='\n\n'
return ''.join(list)
def _subblock(text):
list=re.split('(<blockquote>|</blockquote>)',text)
for i in range(len(list)):
# begin of blockquote
if i%4==1:
list[i]='\n\n> '
# in blockquote
elif i%4==2:
list[i]=re.sub('<p>|<br>|\n\n', '\n\n> ',list[i])
# end of blockquote
elif i%4==3:
list[i]='\n\n'
return ''.join(list)
def _sublinks(text):
return re.sub('<a href=\"(?P<link>.*?)\">(?P<linktext>.*?)</a>', lambda m : '[' + _markdownify_linktext(m.group('linktext')) + '](' + _fefe_linksintern(m.group('link')) + ')', text)
def _markdownify(text):
list=re.split('(\[.*\]\(.*\))',text)
# only change when not a link
for i in range(0,len(list),2):
list[i]=re.sub('\*','\\*',list[i])
list[i]=re.sub('_','\\_',list[i])
list[i]=re.sub('<b>','**',list[i])
list[i]=re.sub('</b>','**',list[i])
list[i]=re.sub('<i>','_',list[i])
list[i]=re.sub('</i>','_',list[i])
list[i]=re.sub('<u>','\n',list[i])
list[i]=re.sub('</u>','\n',list[i])
list[i]=re.sub('<li>','\n - ',list[i])
list[i]=re.sub('</li>','\n',list[i])
list[i]=re.sub('<p>','\n\n',list[i])
list[i]=re.sub('</p>','\n\n',list[i])
list[i]=re.sub('<br>','\n\n',list[i])
return ''.join(list)
def _markdownify_linktext(text):
list=re.split('(\[.*\]\(.*\))',text)
# only change when not a link
for i in range(0,len(list),2):
list[i]=re.sub('\*','\\*',list[i])
list[i]=re.sub('_','\\_',list[i])
list[i]=re.sub('<b>','**',list[i])
list[i]=re.sub('</b>','**',list[i])
list[i]=re.sub('<i>','_',list[i])
list[i]=re.sub('</i>','_',list[i])
return ''.join(list)
def _fefe_linksintern(text):
text=re.sub('^\/\?ts=','https://blog.fefe.de/?ts=',text)
text=re.sub('^\/\?q=','https://blog.fefe.de/?q=',text)
return text
def html2md(html):
html=_subpre(html)
html=_subblock(html)
html=_sublinks(html)
html=_markdownify(html)
return html
| 29
| 183
| 0.517683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 667
| 0.294872
|
3d9613c4bf3516cfc004d7af07118d7c31dd361e
| 2,572
|
py
|
Python
|
Uebung10/Aufgabe29.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | 1
|
2018-04-18T19:10:06.000Z
|
2018-04-18T19:10:06.000Z
|
Uebung10/Aufgabe29.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | null | null | null |
Uebung10/Aufgabe29.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | 1
|
2018-04-29T08:48:00.000Z
|
2018-04-29T08:48:00.000Z
|
class Encoder:
def __init__(self, encoding = {}):
self.encoding = encoding
def updateEncoding(self,string1,string2):
list1 = str.split(string1)
list2 = str.split(string2)
self.encoding = {}
for i in range(len(list1)):
self.encoding[list1[i]] = list2[i]
def encode(self, string):
encodedstring = ""
toencode = str.split(string)
for i in range(len(toencode)):
encodedstring += self.encoding[toencode[i]] + " "
return encodedstring
def decode(self, string):
decodedic = {}
for key in self.encoding:
decodedic[self.encoding[key]] = key
decodedstring = ""
todecode = str.split(string)
for i in range(len(todecode)):
decodedstring += decodedic[todecode[i]] + " "
return decodedstring
##################################
"""
29.5:
nein es gilt nicht, wenn z.B. das Dictionary für verschiedene schlüssel gleiche
Bedeutungen hat
z.B. dict erstellt mit den strings:
"haus baum welt"
"rot blau blau"
und übersetzt werden soll:
"baum welt haus"
dann erhält man am ende: "welt welt haus"
"""
#####################################
#sauce foooter:
from random import randint
try:
#Create an Encoder object
enc = Encoder()
# Create two strings
st1 = "Lorem ipsum dolor sit amet consetetur sadipscing elitr sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat voluptua"
st2 = "At vero eos at accusam sit justo duo dolores et ea rebum Stet clita kasd gubergren no sea takimata sanctus est Lorem ipsum"
# set the dictionary
enc.updateEncoding(st1,st2)
# create a random sentence from words of the first sentence
bagOfWords = str.split(st1)
st3 = ""
for i in range(19):
st3 += bagOfWords[randint(0,len(bagOfWords)-1)]+" "
st3 += bagOfWords[1]
# encode the random sentence
st4 = enc.encode(st3)
# decode it
st5 = enc.decode(st4)
# print the random sentence
print("#Encode String:",st3)
# print the encoded sentence
print("#Decode String:",st4)
# print the decoded sentence
print("#Result:",st5)
# in this case: if the random and the decoded sentence are equal, the test is passed
if(str.split(st3) == str.split(st5)):
print("correct")
else:
print("Encoding or Decoding incorrect")
print("Line #Encode String: and Line #Result: should be equal")
except:
print("Some names or functions do not work correctly or are wrongly named")
| 28.263736
| 154
| 0.626361
| 864
| 0.335404
| 0
| 0
| 0
| 0
| 0
| 0
| 1,182
| 0.458851
|
3d97e3a10c2e5eda50ea446fddb6d02e4af4f7fc
| 543
|
py
|
Python
|
p2p/adapters.py
|
baltimore-sun-data/p2p-python
|
5f9648839d17c003104d88fd6cc6ca7a8eddd2c6
|
[
"MIT"
] | 9
|
2015-07-23T06:35:59.000Z
|
2020-06-01T04:33:56.000Z
|
p2p/adapters.py
|
baltimore-sun-data/p2p-python
|
5f9648839d17c003104d88fd6cc6ca7a8eddd2c6
|
[
"MIT"
] | 28
|
2015-10-16T19:09:58.000Z
|
2019-02-28T21:09:54.000Z
|
p2p/adapters.py
|
baltimore-sun-data/p2p-python
|
5f9648839d17c003104d88fd6cc6ca7a8eddd2c6
|
[
"MIT"
] | 5
|
2015-10-15T22:56:10.000Z
|
2018-11-13T20:44:39.000Z
|
from requests.adapters import HTTPAdapter, DEFAULT_POOLBLOCK
from requests.packages.urllib3.poolmanager import PoolManager
class TribAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block,
ssl_version='TLSv1')
| 38.785714
| 78
| 0.664825
| 417
| 0.767956
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.012891
|
3d9a1b0edafd4fb0b37e8206295d03027352213c
| 18
|
py
|
Python
|
mltk/marl/algorithms/__init__.py
|
lqf96/mltk
|
7187be5d616781695ee68674cd335fbb5a237ccc
|
[
"MIT"
] | null | null | null |
mltk/marl/algorithms/__init__.py
|
lqf96/mltk
|
7187be5d616781695ee68674cd335fbb5a237ccc
|
[
"MIT"
] | 2
|
2019-12-24T01:54:21.000Z
|
2019-12-24T02:23:54.000Z
|
mltk/marl/algorithms/__init__.py
|
lqf96/mltk
|
7187be5d616781695ee68674cd335fbb5a237ccc
|
[
"MIT"
] | null | null | null |
from .phc import *
| 18
| 18
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3d9ccca595c0005acda152685faed3168eed5797
| 14,006
|
py
|
Python
|
src/elementary_modules.py
|
rmldj/random-graph-nn-paper
|
b04537f3312113b118878c37cb314a527c5b3a11
|
[
"MIT"
] | 3
|
2020-03-23T14:00:35.000Z
|
2020-09-24T13:56:18.000Z
|
src/elementary_modules.py
|
rmldj/random-graph-nn-paper
|
b04537f3312113b118878c37cb314a527c5b3a11
|
[
"MIT"
] | null | null | null |
src/elementary_modules.py
|
rmldj/random-graph-nn-paper
|
b04537f3312113b118878c37cb314a527c5b3a11
|
[
"MIT"
] | null | null | null |
import sympy as sym
import torch
import torch.nn as nn
import torch.nn.functional as F
class LambdaLayer(nn.Module):
"""
Layer that applies a given function on the input
"""
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class IdentityLayer(nn.Module):
"""
Layer performing identity mapping
"""
def __init__(self):
super(IdentityLayer, self).__init__()
def forward(self, x):
return x
class AbstractNode(nn.Module):
"""
Abstract class used to create other modules. The AbstractNode module with the blocktype=='simple' consists of a
weight sum of the inputs, followed by a ReLu activation, convolution and batch_norm.
"""
def __init__(self, in_channels, out_channels, num_inputs, kernel_size=3, stride=1, restype="C", blocktype="simple"):
"""
Constructor of the class.
:param in_channels: number of input channels.
:param out_channels: number of output channels.
:param num_inputs: number of inputs (ingoing edges). Should be >= 1.
:param kernel_size: The size of the kernel. Default = 3.
:param stride: The stride size. Default = 1.
:param restype: The type of the residual connection. Default = 'C'. If set to None, no residual connection will be added to the node.
:param blocktype: The type of block of operations performed in the node. Default = 'simple'.
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.num_inputs = num_inputs
self.kernel_size = kernel_size
self.stride = stride
if num_inputs > 1:
self.weights = nn.Parameter(torch.randn(num_inputs, 1, 1, 1, 1), requires_grad=True)
self.blocktype = blocktype
self.__set_block()
self.restype = restype
if self.restype is not None:
self.__set_residual_connection()
def __set_block(self):
if self.blocktype == "simple":
self.block = nn.Sequential(
nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size,
padding=self.kernel_size // 2, stride=self.stride, bias=False),
nn.BatchNorm2d(self.out_channels))
elif self.blocktype == "res":
self.block = nn.Sequential(
nn.Conv2d(self.in_channels, self.out_channels, kernel_size=self.kernel_size,
padding=self.kernel_size // 2, stride=self.stride, bias=False),
nn.BatchNorm2d(self.out_channels),
nn.ReLU(),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=self.kernel_size,
padding=self.kernel_size // 2, stride=1, bias=False),
nn.BatchNorm2d(self.out_channels)
)
else:
raise ValueError("Unknown blocktype {}".format(self.blocktype))
def __set_residual_connection(self):
if self.restype == "A":
channel_pad = (self.out_channels - self.in_channels) // 2
self.project = LambdaLayer(lambda x:
F.pad(x[:, :, ::self.stride, ::self.stride],
(0, 0, 0, 0, channel_pad, channel_pad), "constant", 0))
elif self.restype == "B":
if self.in_channels != self.out_channels or self.stride > 1:
self.project = nn.Sequential(
nn.Conv2d(self.in_channels, self.out_channels, 1, stride=self.stride, bias=False),
nn.BatchNorm2d(self.out_channels)
)
else:
self.project = IdentityLayer()
elif self.restype == "C":
self.project = nn.Sequential(
nn.Conv2d(self.in_channels, self.out_channels, 1, stride=self.stride, bias=False),
nn.BatchNorm2d(self.out_channels)
)
else:
raise ValueError("unknown restype: {}".format(self.restype))
def forward(self, inputs):
"""
the forward pass of the module.
:param inputs: Inputs to the model. Should be a list (if self.num_inputs>1)
or just single tensor (if self.num_inputs == 1)
:return: returns the tensor being a result of the forward pass of the network
"""
x = self.aggregated_sum(inputs)
y = F.relu(x)
output = self.block(y)
if self.restype is None:
return output
else:
return output + self.project(x)
def aggregated_sum(self, inputs):
if self.num_inputs > 1:
if type(inputs) == list:
assert len(inputs) != 0 and len(inputs) == self.num_inputs, \
"inputs length cannot be zero and must much num_inputs: {}".format(self.num_inputs)
shape = list(inputs[0].size())
inputs = torch.cat(inputs).view([len(inputs)] + shape)
x = torch.sum(torch.mul(inputs, torch.sigmoid(self.weights)), dim=0)
else:
x = inputs
return x
def get_block_count(self):
'''
Computes the number of parameters.
:return: the number of parameters (a scalar) used by this node.
'''
if self.blocktype == "simple":
conv_params = self.in_channels * self.kernel_size * self.kernel_size * self.out_channels
weights = self.num_inputs if self.num_inputs > 1 else 0
batch_norm = 2 * self.out_channels
return conv_params + weights + batch_norm
elif self.blocktype == "res":
conv_params = self.in_channels * self.kernel_size * self.kernel_size * self.out_channels
conv_params += self.out_channels * self.kernel_size * self.kernel_size * self.out_channels
weights = self.num_inputs if self.num_inputs > 1 else 0
batch_norm = 4 * self.out_channels
return conv_params + weights + batch_norm
else:
raise ValueError("unknown bloktype: {}".format(self.blocktype))
@staticmethod
def __get_block_count_sym(C_in, C_out, num_inputs, kernel_size, blocktype):
if blocktype == "simple":
sym_conv = C_in * C_out * sym.sympify(kernel_size) ** 2
sym_weights = sym.sympify(num_inputs) if num_inputs > 1 else sym.sympify(0.0)
sym_batch_norm = 2 * C_out
return sym_conv + sym_weights + sym_batch_norm
elif blocktype == "res":
sym_conv = C_in * C_out * sym.sympify(kernel_size) ** 2
sym_conv = sym_conv + (C_out * C_out * sym.sympify(kernel_size) ** 2)
sym_weights = sym.sympify(num_inputs) if num_inputs > 1 else sym.sympify(0.0)
sym_batch_norm = 4 * C_out
return sym_conv + sym_weights + sym_batch_norm
else:
raise ValueError("unknown bloktype: {}".format(blocktype))
def params_count(self):
"""
function calculating the number of parameters in the network
:return: the number of trainable parameters in the module
"""
block_params = self.get_block_count()
residual_params = 0
if self.restype == "C" or (self.restype == "B" and (self.in_channels != self.out_channels or self.stride > 1)):
residual_params += self.in_channels * self.out_channels
residual_params += 2 * self.out_channels
return block_params + residual_params
@staticmethod
def params_count_sym(C_in, C_out, num_inputs=1, kernel_size=3, restype="C", blocktype="simple", stride=1):
"""
function returning symbolic equation for the number of parameters in the module.
:param C_in: symbolic variable for the number of input channels
:param C_out: symbolic variable for the number of output channels
:param num_inputs: number of inputs to the layer (default=1)
:param kernel_size: the size of the kernel (default=3)
:param restype: the type of the residual connection (default='C')
:param blocktype: the type of the node operation block (default='simple')
:param stride: the stride of the convolution (default=1)
:return: The symbolic equation defining the number of parameters in the module.
The C_in and C_out should always be functions of the same, one symbolic variable C
(i.e C_in = g(C) and C_out = f(C))
"""
sym_block = AbstractNode.__get_block_count_sym(C_in, C_out, num_inputs, kernel_size, blocktype)
residual_params = sym.sympify(0.0)
if restype == "C" or (restype == "B" and (C_in != C_out or stride > 1)):
residual_params = C_in * C_out + residual_params
residual_params = 2 * C_out + residual_params
return sym_block + residual_params
class Node(AbstractNode):
"""
Class representing a single node in the Net.
Consist of weighted sum, ReLu, convolution layer and batchnorm.
Number of input channels is equal to number of output channels.
"""
def __init__(self, channels, num_inputs, kernel_size=3, restype="C", blocktype="simple"):
"""
Constructor of the module.
:param channels: number of the input channels.
:param num_inputs: number of inputs.
:param kernel_size: the kernel size in convolution layer. Default = 3.
:param restype: the type of residual connection.
:param blocktype: the type of the node block operations.
"""
super().__init__(channels, channels, num_inputs, kernel_size, restype=restype, blocktype=blocktype)
@staticmethod
def params_count_sym(C_in, C_out, num_inputs=1, kernel_size=3, restype="C", blocktype="simple", stride=1):
return AbstractNode.params_count_sym(C_in, C_out, num_inputs, kernel_size, restype, blocktype, stride)
class Reduce(AbstractNode):
"""
The module performing spatial dimension reduction.
Consists of ReLu activation, convolution with stride 2 and batchnorm
"""
def __init__(self, in_channels, out_channels, reduce_ratio, kernel_size=3, restype="C", blocktype="simple"):
"""
Constructor of the module.
:param in_channels: number of input channels.
:param out_channels: number of output channels.
:param reduce_ratio: the reduction ratio.
:param kernel_size: the size of the kernel in convolution. default = 3.
:param restype: the type of residual connection.
:param blocktype: the type of the node block operations.
"""
super().__init__(in_channels, out_channels, num_inputs=1, kernel_size=kernel_size, stride=reduce_ratio,
restype=restype, blocktype=blocktype)
@staticmethod
def params_count_sym(C_in, C_out, num_inputs=1, kernel_size=3, restype="C", blocktype="simple", stride=2):
return AbstractNode.params_count_sym(C_in, C_out, num_inputs, kernel_size, restype, blocktype, stride)
class Input(AbstractNode):
"""
The module for performing initial channels expansion (or reduction).
"""
def __init__(self, channels, num_inputs=1, kernel_size=3, restype="C", blocktype="simple"):
"""
the constructor of the input node. The input channels are assumed to be 3.
:param channels: number of output channels.
:param num_inputs: number of inputs (ingoing edges), default = 1.
:param kernel_size: the size of the kernel in convolution, default = 3.
:param restype: the type of residual connection.
:param blocktype: the type of the node block operations.
"""
super().__init__(3, channels, num_inputs, kernel_size, stride=1, restype=restype, blocktype=blocktype)
@staticmethod
def params_count_sym(C_in, C_out, num_inputs=1, kernel_size=3, restype="C", blocktype="simple", stride=1):
return AbstractNode.params_count_sym(C_in, C_out, num_inputs, kernel_size, restype, blocktype, stride)
def forward(self, inputs):
x = super().aggregated_sum(inputs)
if self.restype == "C":
y = F.relu(x)
else:
y = x
output = self.block(y)
if self.restype is None:
return output
else:
return output + self.project(x)
class Output(nn.Module):
"""
The module performing final prediction head operations. Consists of average pooling
and a dense layer (with no activation).
"""
def __init__(self, in_channels, num_outputs=10):
'''
The constructor of the module.
:param in_channels: the number of input channels.
:param num_outputs: the number of prediction outputs. default = 10.
'''
super().__init__()
self.in_channels = in_channels
self.num_outputs = num_outputs
self.linear = nn.Linear(self.in_channels, self.num_outputs)
def forward(self, inputs):
"""
Performs the forward pass. averages the outputs over all channels and applies
a linear layer with bias.
:param inputs: the inputs.
:return: The result of the last linear layer without activation.
"""
# assumes N*C*H*W input shape
out = F.avg_pool2d(inputs, inputs.size()[3])
out = out.view(out.size(0), -1)
return self.linear(out)
def params_count(self):
"""
Returns the number of parameters.
:return:
"""
return self.in_channels * self.num_outputs + self.num_outputs
@staticmethod
def params_count_sym(C, num_outputs):
"""
Calculates the symbolic number of parameters.
:param C: the symbolic variable for the number of inputs.
:return: the symbolic equation for the total number of parameters in this module.
"""
return C * sym.sympify(num_outputs) + sym.sympify(num_outputs)
| 43.228395
| 141
| 0.628873
| 13,898
| 0.992289
| 0
| 0
| 3,252
| 0.232186
| 0
| 0
| 5,129
| 0.3662
|
3d9d90c223017d9e1ce9c0cffb8a666b613826f2
| 1,326
|
py
|
Python
|
actions.py
|
rodrigocamposdf/MovieBot
|
927ded61a201e6b5c33efd88e9e9a0271a43a4d4
|
[
"MIT"
] | 1
|
2021-09-21T00:00:25.000Z
|
2021-09-21T00:00:25.000Z
|
actions.py
|
rodrigocamposdf/MovieBot
|
927ded61a201e6b5c33efd88e9e9a0271a43a4d4
|
[
"MIT"
] | null | null | null |
actions.py
|
rodrigocamposdf/MovieBot
|
927ded61a201e6b5c33efd88e9e9a0271a43a4d4
|
[
"MIT"
] | 5
|
2020-07-20T18:43:59.000Z
|
2020-11-03T22:49:17.000Z
|
import movies
def action_handler(action, parameters, return_var):
return_values = {}
if action == 'trendings':
return_values = get_trendings(parameters, return_var)
elif action == 'search':
return_values = search_movies(parameters, return_var)
return {
'skills': {
'main skill': {
'user_defined': return_values
}
}
}
def get_trendings(parameters, return_var):
is_day = (parameters['periodo'] == 'dia')
movie_titles = movies.get_trendings(is_day)
# trato os nomes aqui para facilitar, tratar no assistant eh mais complexo
# pois nao tenho o mesmo poder de programacao
movie_string = '\n\n'
for movie in movie_titles:
movie_string += movie + ',\n'
movie_string = movie_string[:-2]
return {
return_var: movie_string
}
def search_movies(parameters, return_var):
query = parameters['termo']
movie_titles = movies.search_movies(query)
# trato os nomes aqui para facilitar, tratar no assistant eh mais complexo
# pois nao tenho o mesmo poder de programacao
movie_string = '\n\n'
for movie in movie_titles:
movie_string += movie + ',\n'
movie_string = movie_string[:-2]
return {
return_var: movie_string
}
| 28.212766
| 78
| 0.630468
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 334
| 0.251885
|
3d9dc45f332b2fb283e892734ee2a5da821f63dd
| 118
|
py
|
Python
|
Exercicios7/percorrendoLista.py
|
vinihf/Prog1_ADS_2019
|
97d2e0cddf72c00a73d0bc3070bb9731e66e19e2
|
[
"CC-BY-4.0"
] | 1
|
2019-04-18T13:43:15.000Z
|
2019-04-18T13:43:15.000Z
|
Exercicios7/percorrendoLista.py
|
vinihf/Prog1_ADS_2019
|
97d2e0cddf72c00a73d0bc3070bb9731e66e19e2
|
[
"CC-BY-4.0"
] | null | null | null |
Exercicios7/percorrendoLista.py
|
vinihf/Prog1_ADS_2019
|
97d2e0cddf72c00a73d0bc3070bb9731e66e19e2
|
[
"CC-BY-4.0"
] | null | null | null |
lista = list(range(0,10001))
for cont in range(0,10001):
print(lista[cont])
for valor in lista:
print(valor)
| 16.857143
| 28
| 0.669492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3d9e72965d75f1eba7d57fa18ca18b2a64265bc7
| 8,282
|
py
|
Python
|
core/spacy_parser.py
|
teodor-cotet/DiacriticsRestoration
|
e7b41d75b84ab2131694f16b9bd93448e83069e1
|
[
"Apache-2.0"
] | 1
|
2020-12-05T15:45:48.000Z
|
2020-12-05T15:45:48.000Z
|
core/spacy_parser.py
|
teodor-cotet/DiacriticsRestoration
|
e7b41d75b84ab2131694f16b9bd93448e83069e1
|
[
"Apache-2.0"
] | 2
|
2021-03-18T07:37:28.000Z
|
2021-07-27T14:45:14.000Z
|
core/spacy_parser.py
|
teodor-cotet/DiacriticsRestoration
|
e7b41d75b84ab2131694f16b9bd93448e83069e1
|
[
"Apache-2.0"
] | null | null | null |
import spacy
from spacy.lang.ro import Romanian
from typing import Dict, List, Iterable
from nltk import sent_tokenize
import re
# JSON Example localhost:8081/spacy application/json
# {
# "lang" : "en",
# "blocks" : ["După terminarea oficială a celui de-al doilea război mondial, în conformitate cu discursul lui W. Churchill (prim ministru al Regatului Unit la acea dată), de la Fulton, s-a declanșat Războiul rece și a apărut conceptul de cortină de fier. Urmare a politicii consecvente de apărare a sistemului economic și politic (implicit a intereslor economice ale marelui capital din lumea occidentală) trupele germane, în calitate de prizonieri, aflate pe teritoriul Germaniei de Vest au fost reînarmate și au constituit baza viitorului Bundeswehr - armata regulată a R.F.G."]
# }
models = {
'en': 'en_coref_lg',
'nl': 'nl',
'fr': 'fr_core_news_md',
'es': 'es',
'de': 'de',
'it': 'it',
'ro': 'models/model3'
}
normalization = {
'ro': [
(re.compile("ş"), "ș"),
(re.compile("Ş"), "Ș"),
(re.compile("ţ"), "ț"),
(re.compile("Ţ"), "Ț"),
(re.compile("(\w)î(\w)"), "\g<1>â\g<2>")
]
}
def convertToPenn(pos: str, lang: str) -> str:
if lang == 'fr':
pos = pos.lower()
if pos.startswith('noun') or pos.startswith('propn'):
return "NN"
if pos.startswith("verb"):
return "VB"
if pos.startswith("adj"):
return "JJ"
if pos.startswith("adv"):
return "RB"
if pos.startswith("adp"):
return "IN"
if pos.startswith("cconj"):
return "CC"
return ""
if lang == 'nl':
pos = pos.lower()
if pos.startswith('n_') or pos.startswith('n|') or pos.startswith('propn'):
return "NN"
if pos.startswith("v_") or pos.startswith("v|"):
return "VB"
if pos.startswith("adj"):
return "JJ"
if pos.startswith("adv"):
return "RB"
if pos.startswith("adp"):
return "IN"
if pos.startswith("cconj") or pos.startswith("conj"):
return "CC"
return ""
if lang == 'ro':
pos = pos.lower()
if pos.startswith("n"):
return "NN"
if pos.startswith("v"):
return "VB"
if pos.startswith("a"):
return "JJ"
if pos.startswith("r"):
return "RB"
if pos.startswith("s") or pos.startswith("cs"):
return "IN"
if pos.startswith("c"):
return "CC"
return ""
if len(pos) > 2:
return pos[:2]
return pos
class SpacyParser:
def __init__(self):
self.ner = spacy.load('xx_ent_wiki_sm')
# self.romanian = Romanian()
self.pipelines = {
lang: spacy.util.get_lang_class(lang)()
for lang in models
}
# for pipeline in self.pipelines.values():
# component = pipeline.create_pipe('tagger') # 3. create the pipeline components
# pipeline.add_pipe(component)
self.loaded_models = {}
def preprocess(self, text: str, lang: str) -> str:
if lang not in normalization:
return text
for pattern, replacement in normalization[lang]:
text = re.sub(pattern, replacement, text)
return text
def get_tokens_lemmas(self, sentences: Iterable, lang: str) -> Iterable:
if lang not in self.pipelines:
return None
pipeline = self.pipelines[lang]
# sbd = pipeline.create_pipe('sentencizer')
# pipeline.add_pipe(sbd)
doc = pipeline.pipe((sent[:1].lower() + sent[1:] for sent in sentences), batch_size=100000, n_threads=16)
# print([sent.string.strip() for sent in doc.sents])
# print(len(doc.sents))
# print("====================")
# for token in doc:
# print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
# token.shape_, token.is_alpha, token.is_stop)
# print("====================")
return doc
# return [(token.text, token.lemma_) for token in doc]
def tokenize_sentences(self, block: str) -> List[str]:
return sent_tokenize(block)
def parse(self, sentence: str, lang: str):
if lang not in self.loaded_models:
self.loaded_models[lang] = spacy.load(models[lang])
model = self.loaded_models[lang]
doc = model(sentence)
# print([sent.string.strip() for sent in doc.sents])
# for chunk in doc.noun_chunks:
# print(chunk.text, chunk.root.text, chunk.root.dep_,
# chunk.root.head.text)
# print("********************")
# for token in doc:
# print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
# token.shape_, token.is_alpha, token.is_stop)
# print("********************")
# return [(token.text, token.lemma_, token.pos_, token.tag_) for token in doc]
return doc
def process(self, doc):
lang = doc["lang"]
for block in doc["blocks"]:
sents = sent_tokenize(block["text"])
block["sentences"] = list()
for sent in sents:
ne = self.ner(sent)
tokens = self.parse(sent, lang)
# print(ne)
# print(pos)
res_sent = {}
res_sent["text"] = sent
res_sent["words"] = []
# get pos tags
for w in tokens:
wp = {"text" : w.text}
wp["index"] = w.i
wp["lemma"] = w.lemma_
wp["pos"] = convertToPenn(w.tag_, lang)
wp["dep"] = w.dep_
wp["ner"] = w.ent_type_
wp["head"] = w.head.i
res_sent["words"].append(wp)
# get named entities
for ent in [token for token in ne if token.ent_type != 0]:
for w in res_sent["words"]:
# or (' ' in ent[0] and w["word"] in ent[0])
if w["index"] == ent.i:
w["ner"] = ent.ent_type_
block["sentences"].append(res_sent)
return doc
if __name__ == "__main__":
spacyInstance = SpacyParser()
sent = """
După terminarea oficială a celui de-al doilea război mondial, în conformitate cu discursul lui W. Churchill (prim ministru al Regatului Unit la acea dată), de la Fulton, s-a declanșat Războiul rece și a apărut conceptul de cortină de fier. Urmare a politicii consecvente de apărare a sistemului economic și politic (implicit a intereslor economice ale marelui capital din lumea occidentală) trupele germane, în calitate de "prizonieri", aflate pe teritoriul Germaniei de Vest au fost reînarmate și au constituit baza viitorului "Bundeswehr" - armata regulată a R.F.G.
Pe fondul evenimentelor din 1948 din Cehoslovacia (expulzări ale etnicilor germani, alegeri, reconstrucție economică) apare infiltrarea agenților serviciilor speciale ale S.U.A. și Marii Britanii cu rol de "agitatori". Existând cauza, trupele sovietice nu părăsesc Europa Centrală și de Est cucerită-eliberată, staționând pe teritoriul mai multor state. Aflate pe linia de demarcație dintre cele două blocuri foste aliate, armata sovietică nu a plecat din Ungaria decât după dizolvarea Tratatului de la Varșovia.
"""
# sent = """
# După terminarea oficială a celui de-al doilea război mondial, în conformitate cu discursul lui Churchill, de la Fulton, s-a declanșat Războiul rece și a apărut conceptul de cortină de fier."""
# print(spacyInstance.get_ner(sent))
# print(spacyInstance.get_tokens_lemmas(sent))
# doc = spacyInstance.parse("My sister has a dog. She loves him.", 'en')
doc = spacyInstance.parse("Pensée des enseignants, production d’écrits, ingénierie éducative, enseignement à distance, traitement automatique de la langue, outils cognitifs, feedback automatique", 'fr')
for token in doc:
print(convertToPenn(token.tag_, 'fr'))
# print(spacyInstance.preprocess("coborî", 'ro'))
| 42.255102
| 584
| 0.579087
| 3,699
| 0.442305
| 0
| 0
| 0
| 0
| 0
| 0
| 4,080
| 0.487863
|
3da0bfcdf3a8e5f3c1aebf2e4b45b14e05c629a8
| 1,375
|
py
|
Python
|
code/bot/bot3.py
|
josemac95/umucv
|
f0f8de17141f4adcb4966281c3f83539ebda5f0b
|
[
"BSD-3-Clause"
] | null | null | null |
code/bot/bot3.py
|
josemac95/umucv
|
f0f8de17141f4adcb4966281c3f83539ebda5f0b
|
[
"BSD-3-Clause"
] | null | null | null |
code/bot/bot3.py
|
josemac95/umucv
|
f0f8de17141f4adcb4966281c3f83539ebda5f0b
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
# comando con argumentos
# y procesamiento de una imagen
# enviada por el usuario
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from io import BytesIO
from PIL import Image
import cv2 as cv
import skimage.io as io
updater = Updater('api token del bot')
def sendImage(bot, cid, frame):
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
image = Image.fromarray(frame, mode = 'RGB')
byte_io = BytesIO()
image.save(byte_io, 'PNG')
byte_io.seek(0)
bot.sendPhoto(chat_id=cid, photo=byte_io)
def hello(bot, update):
update.message.reply_text('Hello {}'.format(update.message.from_user.first_name))
def argu(bot, update, args):
print('arguments:')
for arg in args:
print(arg)
def work(bot, update):
file_id = update.message.photo[-1].file_id
path = bot.get_file(file_id)['file_path']
img = io.imread(path)
print(img.shape)
update.message.reply_text('{}x{}'.format(img.shape[1],img.shape[0]))
r = cv.cvtColor(cv.cvtColor(img, cv.COLOR_RGB2GRAY), cv.COLOR_GRAY2RGB)
sendImage(bot, update.message.chat_id, r)
updater.dispatcher.add_handler(CommandHandler('hello', hello))
updater.dispatcher.add_handler(CommandHandler('argu' , argu, pass_args=True))
updater.dispatcher.add_handler(MessageHandler(Filters.photo, work))
updater.start_polling()
updater.idle()
| 28.061224
| 85
| 0.722182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 183
| 0.133091
|
3da10758c9f1e0fdc4bba0b279e9579ff6f1b0c5
| 1,236
|
py
|
Python
|
AUTOENCODERS/DataPreparing/CICIDSPreprocessor.py
|
pawelptak/AI-Anomaly-Detection
|
0d3e6072e273d6cc59ba79d5f8c73f393d1ec4e5
|
[
"MIT"
] | 1
|
2022-03-23T10:18:17.000Z
|
2022-03-23T10:18:17.000Z
|
AUTOENCODERS/DataPreparing/CICIDSPreprocessor.py
|
pawelptak/AI-Anomaly-Detection
|
0d3e6072e273d6cc59ba79d5f8c73f393d1ec4e5
|
[
"MIT"
] | null | null | null |
AUTOENCODERS/DataPreparing/CICIDSPreprocessor.py
|
pawelptak/AI-Anomaly-Detection
|
0d3e6072e273d6cc59ba79d5f8c73f393d1ec4e5
|
[
"MIT"
] | null | null | null |
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler, OneHotEncoder
import numpy as np
import pandas as pd
import tqdm
"""
Class for Preprocessing CICIDS2017 Data represented as rows
"""
class CICIDSPreprocessor:
def __init__(self):
self.to_delete_columns = ['Flow ID', ' Timestamp']
self.label_column = ' Label'
def preprocess_train_data(self, df: pd.DataFrame, label="BENIGN"):
df = df.drop(self.to_delete_columns, axis=1)
df = df[df[self.label_column] == label]
df.reset_index(drop=True, inplace=True)
df.drop(self.label_column, axis=1, inplace=True)
return df.fillna(0)
def preprocess_test_data(self, df: pd.DataFrame, label="BENIGN"):
df = df.drop(self.to_delete_columns, axis=1)
df = df[df[self.label_column] == label]
df.reset_index(drop=True, inplace=True)
df.drop(self.label_column, axis=1, inplace=True)
return df.fillna(0)
def __get_windows(self, df, window_size=20, stride=10):
windows_arr = []
for i in tqdm.tqdm(range(0, len(df)-window_size+1, stride)):
windows_arr.append(df.iloc[i:i+window_size, :].to_numpy())
return np.array(windows_arr)
| 33.405405
| 91
| 0.670712
| 1,021
| 0.826052
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.090615
|
3da195067ff01ae97b234bc41093431b6cebf500
| 646
|
py
|
Python
|
class3/collateral/show_genie.py
|
twin-bridges/netmiko_course
|
31943e4f6f66dbfe523d62d5a2f03285802a8c56
|
[
"Apache-2.0"
] | 11
|
2020-09-16T06:53:16.000Z
|
2021-08-24T21:27:37.000Z
|
class3/collateral/show_genie.py
|
twin-bridges/netmiko_course
|
31943e4f6f66dbfe523d62d5a2f03285802a8c56
|
[
"Apache-2.0"
] | null | null | null |
class3/collateral/show_genie.py
|
twin-bridges/netmiko_course
|
31943e4f6f66dbfe523d62d5a2f03285802a8c56
|
[
"Apache-2.0"
] | 5
|
2020-10-18T20:25:59.000Z
|
2021-10-20T16:27:00.000Z
|
import os
from netmiko import ConnectHandler
from getpass import getpass
from pprint import pprint
# Code so automated tests will run properly
# Check for environment variable, if that fails, use getpass().
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
my_device = {
"device_type": "cisco_xe",
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": password,
}
with ConnectHandler(**my_device) as net_connect:
output = net_connect.send_command("show ip int brief", use_genie=True)
# output = net_connect.send_command("show ip arp", use_genie=True)
pprint(output)
| 30.761905
| 88
| 0.733746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 304
| 0.470588
|
3da20b359813d6186015461736f4d52256b59084
| 2,793
|
py
|
Python
|
pints/tests/test_toy_hes1_michaelis_menten_model.py
|
lisaplag/pints
|
3de6617e57ba5b395edaca48961bfc5a4b7209b3
|
[
"RSA-MD"
] | null | null | null |
pints/tests/test_toy_hes1_michaelis_menten_model.py
|
lisaplag/pints
|
3de6617e57ba5b395edaca48961bfc5a4b7209b3
|
[
"RSA-MD"
] | null | null | null |
pints/tests/test_toy_hes1_michaelis_menten_model.py
|
lisaplag/pints
|
3de6617e57ba5b395edaca48961bfc5a4b7209b3
|
[
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python3
#
# Tests if the HES1 Michaelis-Menten toy model runs.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import unittest
import numpy as np
import pints
import pints.toy
class TestHes1Model(unittest.TestCase):
"""
Tests if the HES1 Michaelis-Menten toy model runs.
"""
def test_run(self):
model = pints.toy.Hes1Model()
self.assertEqual(model.n_parameters(), 4)
self.assertEqual(model.n_outputs(), 1)
times = model.suggested_times()
parameters = model.suggested_parameters()
values = model.simulate(parameters, times)
self.assertEqual(values.shape, (len(times),))
self.assertTrue(np.all(values > 0))
states = model.simulate_all_states(parameters, times)
self.assertEqual(states.shape, (len(times), 3))
self.assertTrue(np.all(states > 0))
suggested_values = model.suggested_values()
self.assertEqual(suggested_values.shape, (len(times),))
self.assertTrue(np.all(suggested_values > 0))
# Test setting and getting init cond.
self.assertFalse(np.all(model.initial_conditions() == 10))
model.set_initial_conditions(10)
self.assertTrue(np.all(model.initial_conditions() == 10))
# Test setting and getting implicit param.
self.assertFalse(np.all(model.implicit_parameters() == [10, 10, 10]))
model.set_implicit_parameters([10, 10, 10])
self.assertTrue(np.all(model.implicit_parameters() == [10, 10, 10]))
# Initial conditions cannot be negative
model = pints.toy.Hes1Model(0)
self.assertRaises(ValueError, pints.toy.Hes1Model, -1)
# Implicit parameters cannot be negative
model = pints.toy.Hes1Model(0, [0, 0, 0])
self.assertRaises(ValueError, pints.toy.Hes1Model, *(0, [-1, 0, 0]))
self.assertRaises(ValueError, pints.toy.Hes1Model, *(0, [0, -1, 0]))
self.assertRaises(ValueError, pints.toy.Hes1Model, *(0, [0, 0, -1]))
self.assertRaises(ValueError, pints.toy.Hes1Model, *(0, [-1, -1, -1]))
def test_values(self):
# value-based tests for Hes1 Michaelis-Menten
times = np.linspace(0, 10, 101)
parameters = [3.8, 0.035, 0.15, 7.5]
iparameters = [4.5, 4.0, 0.04]
y0 = 7
model = pints.toy.Hes1Model(y0=y0, implicit_parameters=iparameters)
values = model.simulate(parameters, times)
self.assertEqual(values[0], y0)
self.assertAlmostEqual(values[1], 7.011333, places=6)
self.assertAlmostEqual(values[100], 5.420750, places=6)
if __name__ == '__main__':
unittest.main()
| 38.260274
| 78
| 0.653419
| 2,396
| 0.857859
| 0
| 0
| 0
| 0
| 0
| 0
| 550
| 0.196921
|
3da3144e79a3871eba136a301ca02449b8340d18
| 390
|
py
|
Python
|
pyctogram/instagram_client/relations/__init__.py
|
RuzzyRullezz/pyctogram
|
b811c55dc1c74d57ef489810816322e7f2909f3d
|
[
"MIT"
] | 1
|
2019-12-10T08:01:58.000Z
|
2019-12-10T08:01:58.000Z
|
pyctogram/instagram_client/relations/__init__.py
|
RuzzyRullezz/pyctogram
|
b811c55dc1c74d57ef489810816322e7f2909f3d
|
[
"MIT"
] | null | null | null |
pyctogram/instagram_client/relations/__init__.py
|
RuzzyRullezz/pyctogram
|
b811c55dc1c74d57ef489810816322e7f2909f3d
|
[
"MIT"
] | null | null | null |
from . base import Actions, get_users
def get_followers(username, password, victim_username, proxies=None):
return get_users(username, password, victim_username, proxies=proxies, relation=Actions.followers)
def get_followings(username, password, victim_username, proxies=None):
return get_users(username, password, victim_username, proxies=proxies, relation=Actions.followings)
| 39
| 103
| 0.810256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3da323f7d830c432cc131d570a30ac74ba6392bd
| 1,636
|
py
|
Python
|
day-40-API-Cheapest-Flight-Multiple-Users/data_manager.py
|
anelshaer/Python100DaysOfCode
|
012ae7dda28dc790d3bc4d26df807a4dba179ffe
|
[
"MIT"
] | null | null | null |
day-40-API-Cheapest-Flight-Multiple-Users/data_manager.py
|
anelshaer/Python100DaysOfCode
|
012ae7dda28dc790d3bc4d26df807a4dba179ffe
|
[
"MIT"
] | null | null | null |
day-40-API-Cheapest-Flight-Multiple-Users/data_manager.py
|
anelshaer/Python100DaysOfCode
|
012ae7dda28dc790d3bc4d26df807a4dba179ffe
|
[
"MIT"
] | null | null | null |
import requests
import os
from user_data import UserData
import json
class DataManager:
"""This class is responsible for talking to the Google Sheet."""
def __init__(self) -> None:
self.SHEETY_URL = f"https://api.sheety.co/{os.environ['SHEETY_SHEET_ID']}/pythonFlightDeals"
self.sheet_data = {}
self.bearer_token = os.environ["SHEETY_TOKEN"]
self.headers = {
"Authorization": f"Bearer {self.bearer_token}"
}
def get_cities(self):
response = requests.get(url=f"{self.SHEETY_URL}/prices", headers=self.headers)
response.raise_for_status()
self.sheet_data = response.json()
return self.sheet_data
def update_city(self, row_id, city_data):
self.headers["Content-Type"] = "application/json"
response = requests.put(url=f"{self.SHEETY_URL}/prices/{row_id}", json=city_data,headers=self.headers)
response.raise_for_status()
def get_users(self):
response = requests.get(url=f"{self.SHEETY_URL}/subscribers", headers=self.headers)
response.raise_for_status()
return response.json()['subscribers']
def add_user(self, user: UserData):
self.headers["Content-Type"] = "application/json"
user_data = {
"subscriber": {
"first": str(user.first_name),
"last": str(user.last_name),
"email": str(user.email),
}
}
response = requests.post(url=f"{self.SHEETY_URL}/subscribers", json=user_data, headers=self.headers)
print(response.text)
response.raise_for_status()
| 34.808511
| 110
| 0.630807
| 1,564
| 0.95599
| 0
| 0
| 0
| 0
| 0
| 0
| 432
| 0.264059
|
3da40761377898e0edc360572dbd5d864963e85c
| 4,232
|
py
|
Python
|
crime_data/resources/incidents.py
|
18F/crime-data-api
|
3e8cab0fad4caac1d7d8ef1b62ae7a1441752c6c
|
[
"CC0-1.0"
] | 51
|
2016-09-16T00:37:56.000Z
|
2022-01-22T03:48:24.000Z
|
crime_data/resources/incidents.py
|
harrisj/crime-data-api
|
9b49b5cc3cd8309dda888f49356ee5168c43851a
|
[
"CC0-1.0"
] | 605
|
2016-09-15T19:16:49.000Z
|
2018-01-18T20:46:39.000Z
|
crime_data/resources/incidents.py
|
harrisj/crime-data-api
|
9b49b5cc3cd8309dda888f49356ee5168c43851a
|
[
"CC0-1.0"
] | 12
|
2018-01-18T21:15:34.000Z
|
2022-02-17T10:09:40.000Z
|
from webargs.flaskparser import use_args
from itertools import filterfalse
from crime_data.common import cdemodels, marshmallow_schemas, models, newmodels
from crime_data.common.base import CdeResource, tuning_page, ExplorerOffenseMapping
from crime_data.extensions import DEFAULT_MAX_AGE
from flask.ext.cachecontrol import cache
from flask import jsonify
def _is_string(col):
col0 = list(col.base_columns)[0]
return issubclass(col0.type.python_type, str)
class AgenciesSumsState(CdeResource):
'''''
Agency Suboffense Sums by (year, agency) - Only agencies reporting all 12 months.
'''''
schema = marshmallow_schemas.AgencySumsSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgs)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, agency_ori = None):
self.verify_api_key(args)
model = newmodels.AgencySums()
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = model.get(state = state_abbr, agency = agency_ori, year = year, explorer_offense = explorer_offense)
filename = 'agency_sums_state'
return self.render_response(agency_sums, args, csv_filename=filename)
class AgenciesSumsCounty(CdeResource):
'''''
Agency Suboffense Sums by (year, agency) - Only agencies reporting all 12 months.
'''''
schema = marshmallow_schemas.AgencySumsSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgsYear)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, county_fips_code = None, agency_ori = None):
'''''
Year is a required field atm.
'''''
self.verify_api_key(args)
model = newmodels.AgencySums()
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = model.get(agency = agency_ori, year = year, county = county_fips_code, state=state_abbr, explorer_offense=explorer_offense)
filename = 'agency_sums_county'
return self.render_response(agency_sums, args, csv_filename=filename)
class AgenciesOffensesCount(CdeResource):
'''''
Agency Offense counts by year.
'''''
schema = marshmallow_schemas.AgencyOffensesSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgs)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, agency_ori = None):
self.verify_api_key(args)
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = None
# ugh
if explorer_offense == 'violent' or explorer_offense == 'property':
agency_sums = newmodels.AgencyClassificationCounts().get(state = state_abbr, agency = agency_ori, year = year, classification = explorer_offense)
else:
agency_sums = newmodels.AgencyOffenseCounts().get(state = state_abbr, agency = agency_ori, year = year, explorer_offense = explorer_offense)
filename = 'agency_offenses_state'
return self.render_response(agency_sums, args, csv_filename=filename)
class AgenciesOffensesCountyCount(CdeResource):
'''''
Agency Offense counts by year.
'''''
schema = marshmallow_schemas.AgencyOffensesSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgsYear)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, county_fips_code = None, agency_ori = None):
'''''
Year is a required field atm.
'''''
self.verify_api_key(args)
model = newmodels.AgencyOffenseCounts()
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = model.get(agency = agency_ori, year = year, county = county_fips_code, state=state_abbr, explorer_offense=explorer_offense)
filename = 'agency_sums_county'
return self.render_response(agency_sums, args, csv_filename=filename)
| 41.087379
| 157
| 0.708176
| 3,755
| 0.887287
| 0
| 0
| 2,906
| 0.686673
| 0
| 0
| 618
| 0.14603
|
3da4a9becaa6b35a7f34b4f9c1a6f2e59d92599e
| 1,522
|
py
|
Python
|
deploy_config_generator/output/kube_kong_consumer.py
|
ApplauseAQI/applause-deploy-config-generator
|
46f957fbfe991677f920d5db74b0670385b6e505
|
[
"MIT"
] | 3
|
2019-04-05T14:16:17.000Z
|
2021-06-25T20:53:03.000Z
|
deploy_config_generator/output/kube_kong_consumer.py
|
ApplauseAQI/applause-deploy-config-generator
|
46f957fbfe991677f920d5db74b0670385b6e505
|
[
"MIT"
] | 6
|
2019-04-04T20:20:16.000Z
|
2021-09-27T21:04:39.000Z
|
deploy_config_generator/output/kube_kong_consumer.py
|
ApplauseAQI/applause-deploy-config-generator
|
46f957fbfe991677f920d5db74b0670385b6e505
|
[
"MIT"
] | null | null | null |
import copy
from deploy_config_generator.utils import yaml_dump
from deploy_config_generator.output import kube_common
class OutputPlugin(kube_common.OutputPlugin):
NAME = 'kube_kong_consumer'
DESCR = 'Kubernetes KongConsumer output plugin'
FILE_EXT = '.yaml'
DEFAULT_CONFIG = {
'fields': {
'kong_consumers': dict(
metadata=dict(
type='dict',
required=True,
fields=copy.deepcopy(kube_common.METADATA_FIELD_SPEC),
),
username=dict(
type='str',
),
custom_id=dict(
type='str',
),
credentials=dict(
type='list',
subtype='str',
),
),
}
}
def generate_output(self, app_vars):
# Basic structure
data = {
'apiVersion': 'configuration.konghq.com/v1',
'kind': 'KongConsumer',
}
data['metadata'] = self.build_metadata(app_vars['APP']['metadata'])
for field in ('username', 'custom_id', 'credentials'):
if app_vars['APP'][field]:
data.update(self.build_generic(app_vars['APP'], {field: self._fields['kong_consumers'][field]}, camel_case=False))
data = self._template.render_template(data, app_vars)
output = yaml_dump(data)
return (output, self.get_output_filename_suffix(data))
| 31.061224
| 130
| 0.532194
| 1,399
| 0.919185
| 0
| 0
| 0
| 0
| 0
| 0
| 280
| 0.183968
|
3da7fc4300dabd09ec4c470043ea127780e60a3b
| 2,450
|
py
|
Python
|
EyePatterns/clustering_algorithms/custom_mean_shift.py
|
Sale1996/Pattern-detection-of-eye-tracking-scanpaths
|
15c832f26dce98bb95445f9f39f454f99bbb6029
|
[
"MIT"
] | 1
|
2021-12-07T08:02:30.000Z
|
2021-12-07T08:02:30.000Z
|
EyePatterns/clustering_algorithms/custom_mean_shift.py
|
Sale1996/Pattern-detection-of-eye-tracking-scanpaths
|
15c832f26dce98bb95445f9f39f454f99bbb6029
|
[
"MIT"
] | null | null | null |
EyePatterns/clustering_algorithms/custom_mean_shift.py
|
Sale1996/Pattern-detection-of-eye-tracking-scanpaths
|
15c832f26dce98bb95445f9f39f454f99bbb6029
|
[
"MIT"
] | null | null | null |
import numpy as np
class MeanShift:
def __init__(self, radius=2):
self.radius = radius
def fit(self, data):
centroids = self.initialize_starting_centroids(data)
self.centroids = self.make_centroids(centroids, data)
def initialize_starting_centroids(self, data):
centroids = {}
for i in range(len(data)):
centroids[i] = data[i]
return centroids
def make_centroids(self, centroids, data):
while True:
new_centroids = self.find_new_centroids(centroids, data)
unique_centroids = self.remove_duplicate_centroids(new_centroids)
prev_centroids = dict(centroids)
centroids = self.set_unique_centroids_as_final_centroids(unique_centroids)
is_optimized = self.check_if_optimized(centroids, prev_centroids)
if is_optimized:
break
return centroids
def find_new_centroids(self, centroids, data):
new_centroids = []
for i in centroids:
centroid = centroids[i]
in_bandwith = self.fill_in_bandiwth_with_features_in_radius(centroid, data)
new_centroid = self.find_average_number(in_bandwith)
new_centroids.append(tuple(new_centroid))
return new_centroids
def find_average_number(self, in_bandwith):
return np.average(in_bandwith, axis=0)
def fill_in_bandiwth_with_features_in_radius(self, centroid, data):
in_bandwith = []
for featureset in data:
if self.is_in_radius_number(featureset, centroid):
in_bandwith.append(featureset)
return in_bandwith
def is_in_radius_number(self, featureset, centroid):
if np.linalg.norm(featureset - centroid) < self.radius:
return True
else:
return False
def remove_duplicate_centroids(self, new_centroids):
return sorted(list(set(new_centroids)))
def set_unique_centroids_as_final_centroids(self, uniques):
centroids = {}
for i in range(len(uniques)):
centroids[i] = np.array(uniques[i])
return centroids
def check_if_optimized(self, centroids, prev_centroids):
optimized = True
# check is it optimized
for i in centroids:
if not np.array_equal(centroids[i], prev_centroids[i]):
optimized = False
break
return optimized
| 34.027778
| 87
| 0.646122
| 2,427
| 0.990612
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.009388
|
3da83d4179e3c0fa03b23a086938541e7c9c090e
| 931
|
py
|
Python
|
src/tentaclio/clients/athena_client.py
|
datavaluepeople/tentaclio
|
eb6920a0e115c6c08043063a8c1013d812ec34c8
|
[
"MIT"
] | 12
|
2019-04-30T16:07:42.000Z
|
2021-12-08T08:02:09.000Z
|
src/tentaclio/clients/athena_client.py
|
octoenergy/tentaclio
|
eb6920a0e115c6c08043063a8c1013d812ec34c8
|
[
"MIT"
] | 74
|
2019-04-25T11:18:22.000Z
|
2022-01-18T11:31:14.000Z
|
src/tentaclio/clients/athena_client.py
|
datavaluepeople/tentaclio
|
eb6920a0e115c6c08043063a8c1013d812ec34c8
|
[
"MIT"
] | 4
|
2019-05-05T13:13:21.000Z
|
2022-01-14T00:33:07.000Z
|
"""AWS Athena query client.
Overrides the `get_df` convenience methods for loading a DataFrame using PandasCursor,
which is more performant than using sql alchemy functions.
"""
import pandas as pd
from pyathena.pandas_cursor import PandasCursor
from . import decorators, sqla_client
__all__ = ["AthenaClient"]
class AthenaClient(sqla_client.SQLAlchemyClient):
"""Postgres client, backed by a SQLAlchemy connection."""
allowed_schemes = ["awsathena+rest"]
connect_args_default = dict(cursor_class=PandasCursor)
# Athena-specific fast query result retrieval:
@decorators.check_conn
def get_df(self, sql_query: str, params: dict = None, **kwargs) -> pd.DataFrame:
"""Run a raw SQL query and return a data frame."""
raw_conn = self._get_raw_conn()
raw_cursor = raw_conn.cursor(PandasCursor)
return raw_cursor.execute(sql_query, parameters=params, **kwargs).as_pandas()
| 32.103448
| 86
| 0.736842
| 612
| 0.657358
| 0
| 0
| 343
| 0.368421
| 0
| 0
| 361
| 0.387755
|
3da995d5085338f00dd3653e93f80c4fa924f8b7
| 3,592
|
py
|
Python
|
tests/unit/merge/merge_test.py
|
singulared/conflow
|
f74dec63b23da9791202e99496d3baadd458c1c5
|
[
"MIT"
] | 11
|
2018-03-27T17:24:35.000Z
|
2021-09-21T05:49:11.000Z
|
tests/unit/merge/merge_test.py
|
singulared/conflow
|
f74dec63b23da9791202e99496d3baadd458c1c5
|
[
"MIT"
] | 64
|
2018-01-24T16:34:42.000Z
|
2020-03-23T13:34:07.000Z
|
tests/unit/merge/merge_test.py
|
singulared/conflow
|
f74dec63b23da9791202e99496d3baadd458c1c5
|
[
"MIT"
] | null | null | null |
import pytest
from conflow.merge import merge_factory
from conflow.node import Node, NodeList, NodeMap
def test_merge_node_node(default_config):
base = Node('base', 'node_A')
other = Node('other', 'node_B')
assert merge_factory(base, other, default_config) == other
def test_merge_node_nodelist(default_config):
base = Node('base', 'node_A')
other = NodeList('other', [2])
assert merge_factory(base, other, default_config) == other
def test_merge_node_nodemap(default_config):
base = Node('base', 'node_A')
other = NodeMap('other', {
'db': {
'master': {
'host': 'other'
}
}
})
assert merge_factory(base, other, default_config) == other
def test_merge_nodelist_node(default_config):
base = NodeList('other', [2])
other = Node('base', 'node_A')
assert merge_factory(base, other, default_config) == other
def test_merge_nodelist_nodelist_override(default_config):
base = NodeList('base', [1])
other = NodeList('other', [2])
assert merge_factory(base, other, default_config) == other
def test_merge_nodelist_nodelist_extend(extend_list_config):
base = NodeList('base', [1])
other = NodeList('other', [2])
expected = NodeList('base', [1, 2])
assert merge_factory(base, other, extend_list_config) == expected
def test_merge_nodelist_nodemap(default_config):
base = NodeList('base', [1])
other = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
assert merge_factory(base, other, default_config) == other
def test_merge_nodemap_node(default_config):
base = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
other = Node('base', 'node_A')
assert merge_factory(base, other, default_config) == other
def test_merge_nodemap_nodelist(default_config):
base = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
other = NodeList('base', [1])
assert merge_factory(base, other, default_config) == other
def test_merge_nodemap_nodemap_override(default_config):
base = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
other = NodeMap('other', {
'db': {
'master': {
'host': 'other'
}
}
})
result = merge_factory(base, other, default_config)
assert result.db.master.host == 'other'
def test_merge_nodemap_nodemap_extend(default_config):
base = NodeMap('base', {
'master': {
'host': 'master'
}
})
other = NodeMap('other', {
'slave': {
'host': 'slave'
}
})
result = merge_factory(base, other, default_config)
assert 'master' in result
assert 'slave' in result
def test_merge_nodemap_nodemap_empty(default_config):
base = NodeMap('base', {})
other = NodeMap('other', {})
expected = NodeMap('expected', {})
assert merge_factory(base, other, default_config) == expected
def test_merge_different_types_strict(strict_config):
base = NodeMap('base', {'merged_key': {'a': 'b'}})
other = NodeMap('other', {'merged_key': 1})
with pytest.raises(RuntimeError) as error:
merge_factory(base, other, strict_config)
error_message = (
"Cannot merge `{'a': 'b'}` and `1` with key `merged_key`"
)
assert str(error.value) == error_message
| 26.218978
| 69
| 0.58686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 527
| 0.146715
|
3da9ac46abe5207f20db155757f945a1d90d40c8
| 864
|
py
|
Python
|
cartopolar/antarctica_maps.py
|
dlilien/cartopolar
|
a425ef205c72e25c5d140c65c1ec99d688618f49
|
[
"MIT"
] | null | null | null |
cartopolar/antarctica_maps.py
|
dlilien/cartopolar
|
a425ef205c72e25c5d140c65c1ec99d688618f49
|
[
"MIT"
] | null | null | null |
cartopolar/antarctica_maps.py
|
dlilien/cartopolar
|
a425ef205c72e25c5d140c65c1ec99d688618f49
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 dlilien <dlilien@hozideh>
#
# Distributed under terms of the MIT license.
"""
"""
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from .cartopy_overrides import SPS
# import shapely.geometry as sgeom
USP_EXTENT = (31000, 35000, -37750, -33750)
# USP_EXTENT = (-100000, 100000, -100000, 100000)
USP_ASP = (USP_EXTENT[1] - USP_EXTENT[0]) / (USP_EXTENT[3] - USP_EXTENT[2])
def upstream(ax=None, fig_kwargs=None):
if fig_kwargs is None:
fig_kwargs = {}
if ax is None:
_, ax = plt.subplots(**fig_kwargs, subplot_kw={'projection': SPS()})
ax.set_extent(USP_EXTENT, ccrs.epsg(3031))
ax._xlocs = np.arange(0, 180)
ax._ylocs = np.arange(-90, -80, 0.1)
ax._y_inline = False
ax._x_inline = False
return ax
| 24.685714
| 76
| 0.665509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 256
| 0.295954
|
3daa549e10afe7d4f29dbdbe102676caed6653f5
| 1,010
|
py
|
Python
|
cpdb/toast/tests/test_serializers.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 25
|
2018-07-20T22:31:40.000Z
|
2021-07-15T16:58:41.000Z
|
cpdb/toast/tests/test_serializers.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 13
|
2018-06-18T23:08:47.000Z
|
2022-02-10T07:38:25.000Z
|
cpdb/toast/tests/test_serializers.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 6
|
2018-05-17T21:59:43.000Z
|
2020-11-17T00:30:26.000Z
|
from django.test import TestCase
from robber import expect
from toast.serializers import ToastDesktopSerializer, ToastMobileSerializer
from toast.factories import ToastFactory
class ToastDesktopSerializerTestCase(TestCase):
def test_serialization(self):
toast = ToastFactory(
name='CR',
template='**CR #{crid}** *categorized as {category}*\nhappened in {incident_date} {action_type}.'
)
expect(ToastDesktopSerializer(toast).data).to.eq({
'name': 'CR',
'template': '**CR #{crid}** *categorized as {category}*\nhappened in {incident_date} {action_type}.'
})
class ToastMobileSerializerTestCase(TestCase):
def test_serialization(self):
toast = ToastFactory(
name='MOBILE CR',
template='CR #{crid} {action_type} pinboard'
)
expect(ToastMobileSerializer(toast).data).to.eq({
'name': 'CR',
'template': 'CR #{crid} {action_type} pinboard'
})
| 31.5625
| 112
| 0.634653
| 827
| 0.818812
| 0
| 0
| 0
| 0
| 0
| 0
| 301
| 0.29802
|
3daa64b4b3b876de59fee4ffa1f0970c52c6d7f9
| 12,063
|
py
|
Python
|
wirepas_backend_client/test/kpi_adv.py
|
PFigs/backend-client
|
e6f024d8c5b8ba3e7cd1b5c226d16ff643d4bd83
|
[
"Apache-2.0"
] | null | null | null |
wirepas_backend_client/test/kpi_adv.py
|
PFigs/backend-client
|
e6f024d8c5b8ba3e7cd1b5c226d16ff643d4bd83
|
[
"Apache-2.0"
] | null | null | null |
wirepas_backend_client/test/kpi_adv.py
|
PFigs/backend-client
|
e6f024d8c5b8ba3e7cd1b5c226d16ff643d4bd83
|
[
"Apache-2.0"
] | 1
|
2021-09-30T06:38:54.000Z
|
2021-09-30T06:38:54.000Z
|
"""
KPI ADV
=======
Script to execute an inventory and otap benchmark for the
advertiser feature.
.. Copyright:
Copyright 2019 Wirepas Ltd under Apache License, Version 2.0.
See file LICENSE for full license details.
"""
import queue
import random
import datetime
import importlib
import multiprocessing
import pandas
from wirepas_backend_client.messages import AdvertiserMessage
from wirepas_backend_client.tools import ParserHelper, LoggerHelper
from wirepas_backend_client.api import MySQLSettings, MySQLObserver
from wirepas_backend_client.api import MQTTObserver, MQTTSettings
from wirepas_backend_client.management import Daemon, Inventory
from wirepas_backend_client.test import TestManager
class AdvertiserManager(TestManager):
"""
Test Manager for the Advertiser use case
Attributes:
tx_queue: where a final report is sent
rx_queue: where Advertiser messages arrive
exit_signal: signals an exit request
inventory_target_nodes: nodes to look for during the inventory
inventory_target_otap: otap sequence to track during inventory
delay: amount of seconds to wait before starting test
duration: maximum duration of the test
logger: package logger
"""
# pylint: disable=locally-disabled, logging-format-interpolation, logging-too-many-args
def __init__(
self,
tx_queue: multiprocessing.Queue,
rx_queue: multiprocessing.Queue,
start_signal: multiprocessing.Event,
exit_signal: multiprocessing.Event,
storage_queue: multiprocessing.Queue = None,
inventory_target_nodes: set = None,
inventory_target_otap: int = None,
inventory_target_frequency: int = None,
delay: int = 5,
duration: int = 5,
logger=None,
):
super(AdvertiserManager, self).__init__(
tx_queue=tx_queue,
rx_queue=rx_queue,
start_signal=start_signal,
exit_signal=exit_signal,
logger=logger,
)
self.storage_queue = storage_queue
self.delay = delay
self.duration = duration
self.inventory = Inventory(
target_nodes=inventory_target_nodes,
target_otap_sequence=inventory_target_otap,
target_frequency=inventory_target_frequency,
start_delay=delay,
maximum_duration=duration,
logger=self.logger,
)
self._test_sequence_number = 0
self._timeout = 1
self._tasks = list()
def test_inventory(self, test_sequence_number=0) -> None:
"""
Inventory test
This test starts by calculating the time when it should start counting
and when it should stop its inventory.
Afterwards, before the time to start the count is reached, any message
coming in the queue is discarded. Discarding messages is necessary
otherwise it would lead to false results.
"""
self._test_sequence_number = test_sequence_number
self.inventory.sequence = test_sequence_number
self.inventory.wait()
self.start_signal.set()
self.logger.info(
"starting inventory #{}".format(test_sequence_number),
dict(sequence=self._test_sequence_number),
)
AdvertiserMessage.message_counter = 0
empty_counter = 0
while not self.exit_signal.is_set():
try:
message = self.rx_queue.get(timeout=self._timeout, block=True)
empty_counter = 0
except queue.Empty:
empty_counter = empty_counter + 1
if empty_counter > 10:
self.logger.debug(
"Advertiser messages " "are not being received"
)
empty_counter = 0
if self.inventory.is_out_of_time():
break
else:
continue
self.logger.info(message.serialize())
if self.storage_queue:
self.storage_queue.put(message)
if self.storage_queue.qsize() > 100:
self.logger.critical("storage queue is too big")
# create map of apdu["adv"]
for node_address, details in message.apdu["adv"].items():
self.inventory.add(
node_address=node_address,
rss=details["rss"],
otap_sequence=details["otap"],
timestamp=details["time"],
)
if self.inventory.is_out_of_time():
break
if self.inventory.is_complete():
self.logger.info(
"inventory completed for all target nodes",
dict(sequence=self._test_sequence_number),
)
break
if self.inventory.is_otaped():
self.logger.info(
"inventory completed for all otap targets",
dict(sequence=self._test_sequence_number),
)
break
if self.inventory.is_frequency_reached():
self.logger.info(
"inventory completed for frequency target",
dict(sequence=self._test_sequence_number),
)
break
self.inventory.finish()
report = self.report()
self.tx_queue.put(report)
record = dict(
test_sequence_number=self._test_sequence_number,
total_nodes=report["observed_total"],
inventory_start=report["start"].isoformat("T"),
inventory_end=report["end"].isoformat("T"),
node_frequency=str(report["node_frequency"]),
frequency_by_value=str(report["frequency_by_value"]),
target_nodes=str(self.inventory.target_nodes),
target_otap=str(self.inventory.target_otap_sequence),
target_frequency=str(self.inventory.target_frequency),
difference=str(self.inventory.difference()),
elapsed=report["elapsed"],
)
self.logger.info(record, dict(sequence=self._test_sequence_number))
def report(self) -> dict:
"""
Returns a string with the gathered results.
"""
msg = dict(
title="{}:{}".format(__TEST_NAME__, self._test_sequence_number),
start=self.inventory.start,
end=self.inventory.finish(),
elapsed=self.inventory.elapsed,
difference=self.inventory.difference(),
inventory_target_nodes=self.inventory.target_nodes,
inventory_target_otap=self.inventory.target_otap_sequence,
inventory_target_frequency=self.inventory.target_frequency,
node_frequency=self.inventory.frequency(),
frequency_by_value=self.inventory.frequency_by_value(),
observed_total=len(self.inventory.nodes),
observed=self.inventory.nodes,
)
return msg
def fetch_report(
args, rx_queue, timeout, report_output, number_of_runs, exit_signal, logger
):
""" Reporting loop executed between test runs """
reports = {}
for run in range(0, number_of_runs):
try:
report = rx_queue.get(timeout=timeout, block=True)
reports[run] = report
except queue.Empty:
report = None
logger.warning("timed out waiting for report")
if exit_signal.is_set():
raise RuntimeError
df = pandas.DataFrame.from_dict(reports)
if args.output_time:
filepath = "{}_{}".format(
datetime.datetime.now().isoformat(), args.output
)
else:
filepath = "{}".format(args.output)
df.to_json(filepath)
def main(args, logger):
""" Main loop """
# process management
daemon = Daemon(logger=logger)
mysql_settings = MySQLSettings(args)
mqtt_settings = MQTTSettings(args)
if mysql_settings.sanity():
mysql_available = True
daemon.build(
__STORAGE_ENGINE__,
MySQLObserver,
dict(mysql_settings=mysql_settings),
)
daemon.set_run(
__STORAGE_ENGINE__,
task_kwargs=dict(parallel=True),
task_as_daemon=False,
)
else:
mysql_available = False
logger.info("Skipping Storage module")
if mqtt_settings.sanity():
mqtt_process = daemon.build(
"mqtt",
MQTTObserver,
dict(
mqtt_settings=mqtt_settings,
logger=logger,
allowed_endpoints=set([AdvertiserMessage.source_endpoint]),
),
)
topic = "gw-event/received_data/{gw_id}/{sink_id}/{network_id}/{source_endpoint}/{destination_endpoint}".format(
gw_id=args.mqtt_subscribe_gateway_id,
sink_id=args.mqtt_subscribe_sink_id,
network_id=args.mqtt_subscribe_network_id,
source_endpoint=args.mqtt_subscribe_source_endpoint,
destination_endpoint=args.mqtt_subscribe_destination_endpoint,
)
mqtt_process.message_subscribe_handlers = {
topic: mqtt_process.generate_data_received_cb()
}
daemon.set_run("mqtt", task=mqtt_process.run)
# build each process and set the communication
adv_manager = daemon.build(
"adv_manager",
AdvertiserManager,
dict(
inventory_target_nodes=args.target_nodes,
inventory_target_otap=args.target_otap,
inventory_target_frequency=args.target_frequency,
logger=logger,
delay=args.delay,
duration=args.duration,
),
receive_from="mqtt",
storage=mysql_available,
storage_name=__STORAGE_ENGINE__,
)
adv_manager.execution_jitter(
_min=args.jitter_minimum, _max=args.jitter_maximum
)
adv_manager.register_task(
adv_manager.test_inventory, number_of_runs=args.number_of_runs
)
daemon.set_loop(
fetch_report,
dict(
args=args,
rx_queue=adv_manager.tx_queue,
timeout=args.delay + args.duration + 60,
report_output=args.output,
number_of_runs=args.number_of_runs,
exit_signal=daemon.exit_signal,
logger=logger,
),
)
daemon.start()
else:
print("Please check you MQTT settings")
print(mqtt_settings)
if __name__ == "__main__":
__MYSQL_ENABLED__ = importlib.util.find_spec("MySQLdb")
__STORAGE_ENGINE__ = "mysql"
__TEST_NAME__ = "test_advertiser"
PARSE = ParserHelper(description="KPI ADV arguments")
PARSE.add_mqtt()
PARSE.add_test()
PARSE.add_database()
PARSE.add_fluentd()
PARSE.add_file_settings()
SETTINGS = PARSE.settings()
LOGGER = LoggerHelper(
module_name=__TEST_NAME__, args=SETTINGS, level=SETTINGS.debug_level
).setup()
if SETTINGS.delay is None:
SETTINGS.delay = random.randrange(0, 60)
# pylint: disable=locally-disabled, no-member
try:
nodes = set({int(line) for line in open(SETTINGS.nodes, "r")})
except FileNotFoundError:
LOGGER.warning("Could not find nodes file")
nodes = set()
SETTINGS.target_nodes = nodes
if SETTINGS.jitter_minimum > SETTINGS.jitter_maximum:
SETTINGS.jitter_maximum = SETTINGS.jitter_minimum
LOGGER.info(
{
"test_suite_start": datetime.datetime.utcnow().isoformat("T"),
"run_arguments": SETTINGS.to_dict(),
}
)
# pylint: enable=no-member
main(SETTINGS, LOGGER)
PARSE.dump(
"run_information_{}.txt".format(datetime.datetime.now().isoformat())
)
| 32.340483
| 120
| 0.607643
| 6,397
| 0.530299
| 0
| 0
| 0
| 0
| 0
| 0
| 2,219
| 0.183951
|
3dac942409c65786150bee242bc747d471fc5414
| 1,608
|
py
|
Python
|
levenshtein_func.py
|
Lance-Easley/Document-Similarity
|
c83fa406acf6308da28867611f567776fc266884
|
[
"MIT"
] | null | null | null |
levenshtein_func.py
|
Lance-Easley/Document-Similarity
|
c83fa406acf6308da28867611f567776fc266884
|
[
"MIT"
] | null | null | null |
levenshtein_func.py
|
Lance-Easley/Document-Similarity
|
c83fa406acf6308da28867611f567776fc266884
|
[
"MIT"
] | null | null | null |
import doctest
def leven_distance(iterable1: str or list, iterable2: str or list) -> int:
"""Takes two strings or lists and will find the Levenshtein distance
between the two.
Both iterables must be same type (str or list) for proper functionality.
If given strings, function will find distance per character. If given
lists, function will find distance per term in list.
Capitalization will be counted as a difference.
>>> leven_distance('cat', 'hat')
1
>>> leven_distance('abcdef', 'azc3uf')
3
>>> leven_distance(['hi', 'there', 'kevin'], ['hello', 'there', 'kevin'])
1
"""
iterable1_count = len(iterable1) + 1
iterable2_count = len(iterable2) + 1
mem = []
# Set memoize list length
for i in range(0, iterable1_count):
mem.append([])
for j in range(0, iterable2_count):
mem[i].append(None)
# Assign empty string numbers to memoize chart
# Row
for r in range(0, iterable1_count):
mem[r][0] = r
# Column
for c in range(0, iterable2_count):
mem[0][c] = c
# Fill in rest of chart
for r in range(iterable1_count - 1):
for c in range(iterable2_count - 1):
if iterable1[r] == iterable2[c]:
mem[r + 1][c + 1] = mem[r][c]
else:
mem[r + 1][c + 1] = min(
mem[r][c] + 1,
mem[r + 1][c] + 1,
mem[r][c + 1] + 1
)
# Get last number in chart
return mem[-1][-1]
if __name__ == "__main__":
print(doctest.testmod())
| 29.777778
| 77
| 0.559701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 686
| 0.426617
|
3dada60e0249d722b9efc92d356114b02e3e0c6c
| 18,496
|
py
|
Python
|
filters/Filter.py
|
Paul1298/ITMO_FS
|
219537776d89e52df0c1c07de2c71ce91c679c50
|
[
"MIT"
] | null | null | null |
filters/Filter.py
|
Paul1298/ITMO_FS
|
219537776d89e52df0c1c07de2c71ce91c679c50
|
[
"MIT"
] | null | null | null |
filters/Filter.py
|
Paul1298/ITMO_FS
|
219537776d89e52df0c1c07de2c71ce91c679c50
|
[
"MIT"
] | null | null | null |
from .utils import *
class Filter(object):####TODO add logging
def __init__(self, measure, cutting_rule):
"""
Basic univariate filter class with chosen(even custom) measure and cutting rule
:param measure:
Examples
--------
>>> f=Filter("PearsonCorr", GLOB_CR["K best"](6))
"""
inter_class = 0.0
intra_class = 0.0
for value in np.unique(y_data):
index_for_this_value = np.where(y_data == value)[0]
n = np.sum(row[index_for_this_value])
mu = np.mean(row[index_for_this_value])
var = np.var(row[index_for_this_value])
inter_class += n * np.power((mu - mu), 2)
intra_class += (n - 1) * var
f_ratio = inter_class / intra_class
return f_ratio
@classmethod
def __f_ratio_measure(cls, X, y, n):
X, y = _DefaultMeasures.__check_input(X, y)
assert not 1 < X.shape[1] < n, 'incorrect number of features'
f_ratios = []
for feature in X.T:
f_ratio = _DefaultMeasures.__calculate_F_ratio(feature, y.T)
f_ratios.append(f_ratio)
f_ratios = np.array(f_ratios)
return np.argpartition(f_ratios, -n)[-n:]
@staticmethod
def f_ratio_measure(n):
return partial(_DefaultMeasures.__f_ratio_measure, n=n)
@staticmethod
def gini_index(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
cum_x = np.cumsum(X / np.linalg.norm(X, 1, axis=0), axis=0)
cum_y = np.cumsum(y / np.linalg.norm(y, 1))
diff_x = (cum_x[1:] - cum_x[:-1])
diff_y = (cum_y[1:] + cum_y[:-1])
return np.abs(1 - np.sum(np.multiply(diff_x.T, diff_y).T, axis=0))
# Calculate the entropy of y.
@staticmethod
def __calc_entropy(y):
dict_label = dict()
for label in y:
if label not in dict_label:
dict_label.update({label: 1})
else:
dict_label[label] += 1
entropy = 0.0
for i in dict_label.values():
entropy += -i / len(y) * log(i / len(y), 2)
return entropy
@staticmethod
def __calc_conditional_entropy(x_j, y):
dict_i = dict()
for i in range(x_j.shape[0]):
if x_j[i] not in dict_i:
dict_i.update({x_j[i]: [i]})
else:
dict_i[x_j[i]].append(i)
# Conditional entropy of a feature.
con_entropy = 0.0
# get corresponding values in y.
for f in dict_i.values():
# Probability of each class in a feature.
p = len(f) / len(x_j)
# Dictionary of corresponding probability in labels.
dict_y = dict()
for i in f:
if y[i] not in dict_y:
dict_y.update({y[i]: 1})
else:
dict_y[y[i]] += 1
# calculate the probability of corresponding label.
sub_entropy = 0.0
for l in dict_y.values():
sub_entropy += -l / sum(dict_y.values()) * log(l / sum(dict_y.values()), 2)
con_entropy += sub_entropy * p
return con_entropy
# IGFilter = filters.IGFilter() # TODO: unexpected .run() interface; .run() feature_names; no default constructor
@staticmethod
def ig_measure(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
entropy = _DefaultMeasures.__calc_entropy(y)
f_ratios = np.empty(X.shape[1])
for index in range(X.shape[1]):
f_ratios[index] = entropy - _DefaultMeasures.__calc_conditional_entropy(X[:, index], y)
return f_ratios
@staticmethod
def __contingency_matrix(labels_true, labels_pred):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
# TODO redo it with numpy
contingency = sp.csr_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
contingency.sum_duplicates()
return contingency
@staticmethod
def __mi(U, V):
contingency = _DefaultMeasures.__contingency_matrix(U, V)
nzx, nzy, nz_val = sp.find(contingency)
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = (pi.take(nzx).astype(np.int64, copy=False)
* pj.take(nzy).astype(np.int64, copy=False))
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
@classmethod
def __mrmr_measure(cls, X, y, n):
assert not 1 < X.shape[1] < n, 'incorrect number of features'
x, y = _DefaultMeasures.__check_input(X, y)
# print([_DefaultMeasures.__mi(X[:, j].reshape(-1, 1), y) for j in range(X.shape[1])])
return [MI(x[:, j].reshape(-1, 1), y) for j in range(x.shape[1])]
@staticmethod
def mrmr_measure(n):
return partial(_DefaultMeasures.__mrmr_measure, n=n)
# RandomFilter = filters.RandomFilter() # TODO: bad .run() interface; .run() feature_names; no default constructor
@staticmethod
def su_measure(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
entropy = _DefaultMeasures.__calc_entropy(y)
f_ratios = np.empty(X.shape[1])
for index in range(X.shape[1]):
entropy_x = _DefaultMeasures.__calc_entropy(X[:, index])
con_entropy = _DefaultMeasures.__calc_conditional_entropy(X[:, index], y)
f_ratios[index] = 2 * (entropy - con_entropy) / (entropy_x + entropy)
return f_ratios
@staticmethod
def spearman_corr(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
np.sort(X, axis=1) # need to sort, because Spearman is a rank correlation
np.sort(y)
n = X.shape[0]
c = 6 / (n * (n - 1) * (n + 1))
dif = X - np.repeat(y, X.shape[1]).reshape(X.shape)
return 1 - c * np.sum(dif * dif, axis=0)
@staticmethod
def pearson_corr(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
x_dev = X - np.mean(X, axis=0)
y_dev = y - np.mean(y)
sum_dev = y_dev.T.dot(x_dev)
sq_dev_x = x_dev * x_dev
sq_dev_y = y_dev * y_dev
return (sum_dev / np.sqrt(np.sum(sq_dev_y) * np.sum(sq_dev_x))).reshape((-1,))
# TODO concordation coef
@staticmethod
def fechner_corr(X, y):
"""
Sample sign correlation (also known as Fechner correlation)
"""
X, y = _DefaultMeasures.__check_input(X, y)
y_mean = np.mean(y)
n = X.shape[0]
f_ratios = np.zeros(X.shape[1])
for j in range(X.shape[1]):
y_dev = y[j] - y_mean
x_j_mean = np.mean(X[:, j])
for i in range(n):
x_dev = X[i, j] - x_j_mean
if x_dev >= 0 & y_dev >= 0:
f_ratios[j] += 1
else:
f_ratios[j] -= 1
f_ratios[j] /= n
return f_ratios
@staticmethod
def __label_binarize(y):
"""
Binarize labels in a one-vs-all fashion
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
"""
classes = np.unique(y)
n_samples = len(y)
n_classes = len(classes)
row = np.arange(n_samples)
col = [np.where(classes == el)[0][0] for el in y]
data = np.repeat(1, n_samples)
# TODO redo it with numpy
return sp.csr_matrix((data, (row, col)), shape=(n_samples, n_classes)).toarray()
@staticmethod
def __chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq
@staticmethod
def chi2_measure(X, y):
"""
This score can be used to select the n_features features with the highest values
for the test chi-squared statistic from X,
which must contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
"""
X, y = _DefaultMeasures.__check_input(X, y)
if np.any(X < 0):
raise ValueError("Input X must be non-negative.")
Y = _DefaultMeasures.__label_binarize(y)
# If you use sparse input
# you can use sklearn.utils.extmath.safe_sparse_dot instead
observed = np.dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _DefaultMeasures.__chisquare(observed, expected)
@staticmethod
def __distance_matrix(X, y, n_samples):
dm = np.zeros((n_samples, n_samples), dtype=tuple)
for i in range(n_samples):
for j in range(i, n_samples):
# using the Manhattan (L1) norm rather than
# the Euclidean (L2) norm,
# although the rationale is not specified
value = np.linalg.norm(X[i, :] - X[j, :], 1)
dm[i, j] = (value, j, y[j])
dm[j, i] = (value, i, y[i])
# sort_indices = dm.argsort(1)
# dm.sort(1)
# indices = np.arange(n_samples) #[sort_indices]
# dm = np.dstack((dm, indices))
return dm
# TODO redo with np.where
@staticmethod
def __take_k(dm_i, k, r_index, choice_func):
hits = []
dm_i = sorted(dm_i, key=lambda x: x[0])
for samp in dm_i:
if (samp[1] != r_index) & (k > 0) & (choice_func(samp[2])):
hits.append(samp)
k -= 1
return np.array(hits, int)
@staticmethod
def reliefF_measure(X, y, k_neighbors=1):
"""
Based on the ReliefF algorithm as introduced in:
R.J. Urbanowicz et al. Relief-based feature selection: Introduction and review
Journal of Biomedical Informatics 85 (2018) 189–203
Differs with skrebate.ReliefF
Only for complete X
Rather than repeating the algorithm m(TODO Ask Nikita about user defined) times,
implement it exhaustively (i.e. n times, once for each instance)
for relatively small n (up to one thousand).
:param X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
:param y: array-like {n_samples}
Training labels
:param k_neighbors: int (default: 1)
The number of neighbors to consider when assigning feature importance scores.
More neighbors results in more accurate scores, but takes longer.
Selection of k hits and misses is the basic difference to Relief
and ensures greater robustness of the algorithm concerning noise.
:return: array-like {n_features}
Feature importances
"""
X, y = _DefaultMeasures.__check_input(X, y)
f_ratios = np.zeros(X.shape[1])
classes, counts = np.unique(y, return_counts=True)
prior_prob = dict(zip(classes, np.array(counts) / len(y)))
n_samples = X.shape[0]
n_features = X.shape[1]
dm = _DefaultMeasures.__distance_matrix(X, y, n_samples)
for i in range(n_samples):
r = X[i]
dm_i = dm[i]
hits = _DefaultMeasures.__take_k(dm_i, k_neighbors, i, lambda x: x == y[i])
if len(hits) != 0:
ind_hits = hits[:, 1]
else:
ind_hits = []
value_hits = X.take(ind_hits, axis=0)
m_c = np.empty(len(classes), np.ndarray)
for j in range(len(classes)):
if classes[j] != y[i]:
misses = _DefaultMeasures.__take_k(dm_i, k_neighbors, i, lambda x: x == classes[j])
ind_misses = misses[:, 1]
m_c[j] = X.take(ind_misses, axis=0)
for A in range(n_features):
weight_hit = np.sum(np.abs(r[A] - value_hits[:, A]))
weight_miss = 0
for j in range(len(classes)):
if classes[j] != y[i]:
weight_miss += prior_prob[y[j]] * np.sum(np.abs(r[A] - m_c[j][:, A]))
f_ratios[A] += weight_miss / (1 - prior_prob[y[i]]) - weight_hit
# dividing by m * k guarantees that all final weights
# will be normalized within the interval [ − 1, 1].
f_ratios /= n_samples * k_neighbors
# The maximum and minimum values of A are determined over the entire
# set of instances.
# This normalization ensures that weight updates fall
# between 0 and 1 for both discrete and continuous features.
with np.errstate(divide='ignore', invalid="ignore"): # todo
return f_ratios / (np.amax(X, axis=0) - np.amin(X, axis=0))
VDM = filters.VDM() # TODO: probably not a filter
GLOB_MEASURE = {"FitCriterion": _DefaultMeasures.fit_criterion_measure,
"FRatio": _DefaultMeasures.f_ratio_measure,
"GiniIndex": _DefaultMeasures.gini_index,
"InformationGain": _DefaultMeasures.ig_measure,
"MrmrDiscrete": _DefaultMeasures.mrmr_measure,
"SymmetricUncertainty": _DefaultMeasures.su_measure,
"SpearmanCorr": _DefaultMeasures.spearman_corr,
"PearsonCorr": _DefaultMeasures.pearson_corr,
"FechnerCorr": _DefaultMeasures.fechner_corr,
"ReliefF": _DefaultMeasures.reliefF_measure,
"Chi2": _DefaultMeasures.chi2_measure}
class _DefaultCuttingRules:
@staticmethod
def select_best_by_value(value):
return partial(_DefaultCuttingRules.__select_by_value, value=value, more=True)
@staticmethod
def select_worst_by_value(value):
return partial(_DefaultCuttingRules.__select_by_value, value=value, more=False)
@staticmethod
def __select_by_value(scores, value, more=True):
features = []
for key, sc_value in scores.items():
if more:
if sc_value >= value:
features.append(key)
else:
if sc_value <= value:
features.append(key)
return features
@staticmethod
def select_k_best(k):
return partial(_DefaultCuttingRules.__select_k, k=k, reverse=True)
@staticmethod
def select_k_worst(k):
return partial(_DefaultCuttingRules.__select_k, k=k)
@classmethod
def __select_k(cls, scores, k, reverse=False):
if type(k) != int:
raise TypeError("Number of features should be integer")
return [keys[0] for keys in sorted(scores.items(), key=lambda kv: kv[1], reverse=reverse)[:k]]
GLOB_CR = {"Best by value": _DefaultCuttingRules.select_best_by_value,
"Worst by value": _DefaultCuttingRules.select_worst_by_value,
"K best": _DefaultCuttingRules.select_k_best,
"K worst": _DefaultCuttingRules.select_k_worst}
class Filter(object):
def __init__(self, measure, cutting_rule):
if type(measure) is str:
try:
self.measure = GLOB_MEASURE[measure]
except KeyError:
raise KeyError("No %r measure yet" % measure)
else:
self.measure = measure
if type(cutting_rule) is str:
try:
self.cutting_rule = GLOB_CR[cutting_rule]
except KeyError:
raise KeyError("No %r cutting rule yet" % measure)
else:
self.cutting_rule = cutting_rule
self.feature_scores = None
self.hash = None
def run(self, x, y, feature_names=None, store_scores=False, verbose=0):
try:
x = x.values
y = y.values
except AttributeError:
x = x
self.feature_scores = None
try:
feature_names = x.columns
except AttributeError:
if feature_names is None:
feature_names = list(range(x.shape[1]))
feature_scores = None
if not (self.hash == hash(self.measure)):
feature_scores = dict(zip(feature_names, self.measure(x, y)))
self.hash = hash(self.measure)
if store_scores:
self.feature_scores = feature_scores
selected_features = self.cutting_rule(feature_scores)
return x[:, selected_features]
| 37.670061
| 118
| 0.579477
| 17,515
| 0.946757
| 0
| 0
| 14,736
| 0.796541
| 0
| 0
| 4,979
| 0.269135
|
3dae0fc03c90ecfa32dc4ecfd3dd9dd3da1ccb4d
| 457
|
py
|
Python
|
h3.py
|
alexfmsu/pyquantum
|
78b09987cbfecf549e67b919bb5cb2046b21ad44
|
[
"MIT"
] | null | null | null |
h3.py
|
alexfmsu/pyquantum
|
78b09987cbfecf549e67b919bb5cb2046b21ad44
|
[
"MIT"
] | null | null | null |
h3.py
|
alexfmsu/pyquantum
|
78b09987cbfecf549e67b919bb5cb2046b21ad44
|
[
"MIT"
] | 2
|
2020-07-28T08:40:06.000Z
|
2022-02-16T23:04:58.000Z
|
from PyQuantum.TC3.Cavity import Cavity
from PyQuantum.TC3.Hamiltonian3 import Hamiltonian3
capacity = {
'0_1': 2,
'1_2': 2,
}
wc = {
'0_1': 0.2,
'1_2': 0.3,
}
wa = [0.2] * 3
g = {
'0_1': 1,
'1_2': 200,
}
cv = Cavity(wc=wc, wa=wa, g=g, n_atoms=3, n_levels=3)
# cv.wc_info()
# cv.wa_info()
# cv.g_info()
cv.info()
H = Hamiltonian3(capacity=capacity, cavity=cv, iprint=False)
H.print_states()
H.print_bin_states()
# H.iprint()
| 13.848485
| 60
| 0.603939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.181619
|
3daf0b7c2684b25ee98648b971b2e1076b2cf00c
| 1,058
|
py
|
Python
|
gamestate-changes/change_statistics/other/rectangleAnimation.py
|
phylib/MinecraftNDN-RAFNET19
|
c7bfa7962707af367fafe9d879bc63637c06aec7
|
[
"MIT"
] | 1
|
2020-05-18T15:55:09.000Z
|
2020-05-18T15:55:09.000Z
|
gamestate-changes/change_statistics/other/rectangleAnimation.py
|
phylib/MinecraftNDN-RAFNET19
|
c7bfa7962707af367fafe9d879bc63637c06aec7
|
[
"MIT"
] | null | null | null |
gamestate-changes/change_statistics/other/rectangleAnimation.py
|
phylib/MinecraftNDN-RAFNET19
|
c7bfa7962707af367fafe9d879bc63637c06aec7
|
[
"MIT"
] | null | null | null |
# https://stackoverflow.com/questions/31921313/matplotlib-animation-moving-square
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib import animation
x = [0, 1, 2]
y = [0, 10, 20]
y2 = [40, 30, 20]
colors = ['r','b','g','orange']
fig = plt.figure()
plt.axis('equal')
plt.grid()
ax = fig.add_subplot(111)
ax.set_xlim(-100, 100)
ax.set_ylim(-100, 100)
patch1 = patches.Rectangle((0, 0), 0, 0, fill=False, edgecolor=colors[0])
patch1.set_width(21)
patch1.set_height(21)
patch2 = patches.Rectangle((0, 0), 0, 0, fill=False, edgecolor=colors[1])
patch2.set_width(21)
patch2.set_height(21)
def init():
ax.add_patch(patch1)
ax.add_patch(patch2)
return patch1, patch2,
def animate(i):
patch1.set_xy([x[i], y[i]])
patch2.set_xy([x[i], y2[i]])
return patch1, patch2,
anim = animation.FuncAnimation(fig, animate,
init_func=init,
frames=len(x),
interval=500,
blit=True)
plt.show()
| 25.190476
| 81
| 0.614367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 105
| 0.099244
|
3daf498d7521399146cf380a60792cc98a71c488
| 6,145
|
py
|
Python
|
MakeMytripChallenge/script/IFtrial.py
|
divayjindal95/DataScience
|
d976a5e3ac9bd36e84149642a5b93f7bfc3540cf
|
[
"MIT"
] | null | null | null |
MakeMytripChallenge/script/IFtrial.py
|
divayjindal95/DataScience
|
d976a5e3ac9bd36e84149642a5b93f7bfc3540cf
|
[
"MIT"
] | null | null | null |
MakeMytripChallenge/script/IFtrial.py
|
divayjindal95/DataScience
|
d976a5e3ac9bd36e84149642a5b93f7bfc3540cf
|
[
"MIT"
] | null | null | null |
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold,cross_val_score,LeaveOneOut
#from sklearn.cross_validation import KFold,train_test_split,cross_val_score
train_data = pd.read_csv("../data/train.csv")
train_data_len=len(train_data)
test_data=pd.read_csv("../data/test.csv")
test_data_len=len(test_data)
def getint(data):
nicedata=data
cls=dict()
for i in xrange(len(nicedata.columns)):
if data.dtypes[i]==object and data.columns[i]!='P':
le = LabelEncoder()
nicedata[nicedata.columns[i]] = le.fit_transform(nicedata[nicedata.columns[i]])
cls[nicedata.columns[i]]=le.classes_
return nicedata,cls
data=pd.concat([train_data,test_data])
data.A=data.A.fillna(data['A'].mode()[0])
data.D=data.D.fillna(data['D'].mode()[0])
data.E=data.E.fillna(data['E'].mode()[0])
data.G=data.G.fillna(data['G'].mode()[0])
data.F=data.F.fillna(data['F'].mode()[0])
data.B=data.A.fillna(data['B'].median())
data.N=data.N.fillna(data['N'].median())
#print len(data.dropna())
#print data.describe()
data,cls=getint(data)
# data.O=np.log(data.O+1)
# data.H=np.log(data.H+1)
# data.K=np.log(data.K+1)
# data.N=np.log(data.N+1)
# data.C=np.log(data.C+1)
# sc = StandardScaler()
# data.O=sc.fit_transform(np.reshape(data.O,(len(data.O),1)))
# sc = StandardScaler()
# data.H=sc.fit_transform(np.reshape(data.H,(len(data.H),1)))
# sc = StandardScaler()
# data.K=sc.fit_transform(np.reshape(data.K,(len(data.K),1)))
# sc = StandardScaler()
# data.N=sc.fit_transform(np.reshape(data.N,(len(data.N),1)))
# sc = StandardScaler()
# data.C=sc.fit_transform(np.reshape(data.C,(len(data.C),1)))
# sc = StandardScaler()
# data.B=sc.fit_transform(np.reshape(data.B,(len(data.B),1)))
data['H_frac']=data.H-data.H.map(lambda x:int(x))
data['H_int'] = data.H.map(lambda x:int(x))
data['C_frac']=data.C-data.C.map(lambda x:int(x))
data['C_int'] = data.C.map(lambda x:int(x))
data['N_frac']=data.N-data.N.map(lambda x:int(x))
data['N_int'] = data.N.map(lambda x:int(x))
data=pd.concat([data,pd.get_dummies(data.A,'A')],axis=1)
data=pd.concat([data,pd.get_dummies(data.F,'F')],axis=1)
print data.head()
print data.columns
trncols=[u'A', u'B','C_frac','C_int', u'D', u'E', u'F', u'G', u'H_int','H_frac', u'I', u'J', u'K',
u'L', u'M','N_frac','N_int', u'O']
trncols=[u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'I', u'J', u'K', u'L', u'M', u'N', u'O', u'id', u'H_frac', u'H_int', u'C_frac', u'C_int', u'N_frac', u'N_int', u'A_0', u'A_1', u'F_0', u'F_1', u'F_2', u'F_3', u'F_4', u'F_5', u'F_6', u'F_7', u'F_8', u'F_9', u'F_10', u'F_11', u'F_12', u'F_13']
testcols=['P']
data_bin = ['A','I','J','L','F']
#trncols=data_bin
fin_train_data=data.iloc[:len(train_data)]
fin_test_data=data.iloc[len(train_data):]
#print fin_train_data[(fin_train_data.I==1) & (fin_train_data.J==0)].tostring()
print len(fin_train_data)
print len(fin_train_data[(fin_train_data.I==1) & (fin_train_data.J==1)]),len(fin_train_data[(fin_train_data.I==1) & (fin_train_data.J==1) & (fin_train_data.P==1)]),
print len(fin_train_data[(fin_train_data.I==0) & (fin_train_data.J==0)]),len(fin_train_data[(fin_train_data.I==0) & (fin_train_data.J==0) & (fin_train_data.P==0)])
print len(fin_train_data[(fin_train_data.I==0) & (fin_train_data.J==1)]),len(fin_train_data[(fin_train_data.I==0) & (fin_train_data.J==1) & (fin_train_data.P==0)])
print len(fin_test_data[(fin_test_data.I==1) & (fin_test_data.J==0)]),len(fin_test_data)
fin_train_data = fin_train_data[(fin_train_data.I==1) & (fin_train_data.J==0)]
from sklearn.utils import shuffle
fin_train_data= shuffle(fin_train_data)
X=fin_train_data[trncols]
Y=fin_train_data[testcols]
rfc=GradientBoostingClassifier(n_estimators=30)
#rfc=LogisticRegression()
rfc=LinearRegression()
#rfc=MultinomialNB()
kf=KFold(n_splits=5)
lo = LeaveOneOut()
accs=cross_val_score(rfc,X,Y,cv=kf)
accslo=cross_val_score(rfc,X,Y,cv=lo)
#print np.mean(accs),np.mean(accslo)
rfc.fit(X,Y)
#print rfc.score(X,Y)
#print rfc.predict(X)<0.5
rsss = pd.DataFrame((Y==0)==(rfc.predict(X)<0.5))
#print rsss[rsss.P==True]
# asnls=[]
#
# orans=y.P.tolist()
# x=x.reset_index(xrange(len(y)))
#
# for i in xrange(len(x)):
# if x.I.iloc[i]==0 and x.J.iloc[i]==0:
# asnls.append(1)
# if x.I.iloc[i]==1 and x.J.iloc[i]==1:
# asnls.append(1)
# if x.I.iloc[i]==0 and x.J.iloc[i]==1:
# asnls.append(1)
# if x.I.iloc[i]==1 and x.J.iloc[i]==0:
# asnls.append(orans[i])
# i+=1
#
# res=0
# for a,b in zip(asnls,orans):
# res+=np.abs(a-b)
# print res/len(orans)
fintestindex=fin_test_data.index
for e in fintestindex:
if (fin_test_data['I'][e]==1) and (fin_test_data['J'][e]==1):
fin_test_data['P'][e]=0
if (fin_test_data['I'][e]==0) and (fin_test_data['J'][e]==0):
fin_test_data['P'][e]=1
if (fin_test_data['I'][e]==0) and (fin_test_data['J'][e]==1):
fin_test_data['P'][e]=1
# if (fin_test_data['I'][e]==1) and (fin_test_data['J'][e]==0):
# fin_test_data['P']=0
print fin_test_data.P
remaining=fin_test_data[fin_test_data.P.isnull()]
remainingans =rfc.predict(remaining[trncols])>0.5
fin_test_data[fin_test_data.P.isnull()]['P'][:]=np.reshape(remainingans.astype(int),(len(remainingans)))
fin_test_data[fin_test_data.P.isnull()]['P'][:]=1
print fin_test_data[fin_test_data.P.isnull()]['P'][:]
#print fin_test_data.P
final = pd.DataFrame()
final['id']=fin_test_data.id
# #final['P']=pd.to_numeric(rfc.predict(fin_test_data[trncols]),downcast='signed')
# final['P']=rfc.predict(fin_test_data[trncols]).astype(int)
# final.to_csv('../data/final.csv',index=False)
| 34.138889
| 300
| 0.682832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,250
| 0.366151
|
3daf789bd0a2214d01837395979045b5721435c8
| 16,895
|
py
|
Python
|
qf_lib/backtesting/order/order_factory.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 198
|
2019-08-16T15:09:23.000Z
|
2022-03-30T12:44:00.000Z
|
qf_lib/backtesting/order/order_factory.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 13
|
2021-01-07T10:15:19.000Z
|
2022-03-29T13:01:47.000Z
|
qf_lib/backtesting/order/order_factory.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 29
|
2019-08-16T15:21:28.000Z
|
2022-02-23T09:53:49.000Z
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Mapping, Dict, List
from qf_lib.backtesting.broker.broker import Broker
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.backtesting.contract.contract_to_ticker_conversion.base import ContractTickerMapper
from qf_lib.backtesting.order.execution_style import ExecutionStyle
from qf_lib.backtesting.order.order import Order
from qf_lib.backtesting.order.time_in_force import TimeInForce
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.utils.logging.qf_parent_logger import qf_logger
from qf_lib.common.utils.miscellaneous.function_name import get_function_name
from qf_lib.data_providers.data_provider import DataProvider
class OrderFactory:
""" Creates Orders.
Parameters
----------
broker: Broker
broker used to access the portfolio
data_provider: DataProvider
data provider used to download prices. In case of backtesting, the DataHandler wrapper should be used.
contract_to_ticker_mapper: ContractTickerMapper
object mapping contracts to tickers
"""
def __init__(self, broker: Broker, data_provider: DataProvider, contract_to_ticker_mapper: ContractTickerMapper):
self.broker = broker
self.data_provider = data_provider
self.contract_to_ticker_mapper = contract_to_ticker_mapper
self.logger = qf_logger.getChild(self.__class__.__name__)
def orders(self, quantities: Mapping[Contract, int], execution_style: ExecutionStyle,
time_in_force: TimeInForce) -> List[Order]:
"""
Creates a list of Orders for given numbers of shares for each given asset.
Orders requiring 0 shares will be removed from resulting order list
Parameters
----------
quantities: Mapping[Contract, int]
mapping of a Contract to an amount of shares which should be bought/sold.
If number is positive then asset will be bought. Otherwise it will be sold.
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
order_list = []
for contract, quantity in quantities.items():
if quantity != 0:
order_list.append(Order(contract, quantity, execution_style, time_in_force))
return order_list
def target_orders(self, target_quantities: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, tolerance_quantities: Mapping[Contract, float] = None) -> List[Order]:
"""
Creates a list of Orders from a dictionary of desired target number of shares (number of shares which should be
present in the portfolio after executing the Order).
If the position doesn't already exist, the new Order is placed for the :target_quantity of shares.
If the position does exist the Order for the difference between the target number of shares
and the current number of shares is placed.
Parameters
----------
target_quantities: Mapping[Contract, int]
mapping of a Contract to a target number of shares which should be present in the portfolio after the Order
is executed. After comparing with tolerance the math.floor of the quantity will be taken.
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_quantities: None, Mapping[Contract, int]
tells what is a tolerance for the target_quantities (in both directions) for each Contract.
The tolerance is expressed in shares.
For example: assume that currently the portfolio contains 100 shares of asset A.
then calling target_orders({A: 101}, ..., tolerance_quantities={A: 2}) will not generate any trades as
the tolerance of 2 allows the allocation to be 100. while target value is 101.
Another example:
assume that currently the portfolio contains 100 shares of asset A.
then calling target_value_order({A: 103}, ..., tolerance_quantities={A: 2}) will generate a BUY order
for 3 shares
if abs(target - actual) > tolerance buy or sell assets to match the target
If tolerance for a specific contract is not provided it is assumed to be 0
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
# Dict of Contract -> Quantities of shares to buy/sell
quantities = dict()
if tolerance_quantities is None:
tolerance_quantities = {}
contract_to_positions = {position.contract(): position for position in self.broker.get_positions()}
for contract, target_quantity in target_quantities.items():
position = contract_to_positions.get(contract, None)
tolerance_quantity = tolerance_quantities.get(contract, 0)
if position is not None:
current_quantity = position.quantity()
else:
current_quantity = 0
quantity = target_quantity - current_quantity
if abs(quantity) > tolerance_quantity and quantity != 0: # tolerance_quantity can be 0
quantities[contract] = math.floor(quantity) # type: int
return self.orders(quantities, execution_style, time_in_force)
def value_orders(self, values: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, frequency: Frequency = None) -> List[Order]:
"""
Creates a list of Orders by specifying the amount of money which should be spent on each asset rather
than the number of shares to buy/sell.
Parameters
----------
values: Mapping[Contract, int]
mapping of a Contract to the amount of money which should be spent on the asset (expressed in the currency
in which the asset is traded)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
frequency: Frequency
frequency for the last available price sampling
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
quantities, _ = self._calculate_target_shares_and_tolerances(values, frequency=frequency)
int_quantities = {contract: math.floor(quantity) for contract, quantity in quantities.items()}
return self.orders(int_quantities, execution_style, time_in_force)
def percent_orders(self, percentages: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, frequency: Frequency = None) -> List[Order]:
"""
Creates a list of Orders by specifying the percentage of the current portfolio value which should be spent
on each asset.
Parameters
----------
percentages: Mapping[Contract, int]
mapping of a Contract to a percentage value of the current portfolio which should be allocated in the asset.
This is specified as a decimal value (e.g. 0.5 means 50%)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
portfolio_value = self.broker.get_portfolio_value()
values = {contract: portfolio_value * fraction for contract, fraction in percentages.items()}
return self.value_orders(values, execution_style, time_in_force, frequency)
def target_value_orders(self, target_values: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, tolerance_percentage: float = 0.0, frequency: Frequency = None)\
-> List[Order]:
"""
Creates a list of Orders by specifying how much should be allocated in each asset after the Orders
have been executed.
For example if we've already have 10M invested in 'SPY US Equity' and you call this method with target value of 11M
then only 1M will be spent on this asset
Parameters
----------
target_values: Mapping[Contract, int]
mapping of a Contract to a value which should be allocated in the asset after the Order has been executed
(expressed in the currency in which the asset is traded)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_percentage: float
tells the us what is a tolerance to the target_values (in both directions).
The tolerance is expressed as percentage of target_values.
For example: assume that currently the portfolio contains asset A with allocation 10 000$.
then calling target_value_order({A: 10 500}, ..., tolerance_percentage=0.05) will not generate any trades as
the tolerance of 0.05 allows the allocation to be 10 000$, while target value is 10 500$ (tolerance value
would be equal to 0.05 * 10 500 = 525 and the difference between current and target value would be < 525$).
Another example:
For example: assume that currently the portfolio contains asset A with allocation 10 000$.
then calling target_value_order({A: 13 000}, ..., tolerance_percentage=0.1) will generate a BUY order
corresponding to 3000$ of shares. The tolerance of 0.1 does not allow a difference of 3000$
if abs(target - actual) > tolerance_percentage * target value
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
assert 0.0 <= tolerance_percentage < 1.0, "The tolerance_percentage should belong to [0, 1) interval"
target_quantities, tolerance_quantities = \
self._calculate_target_shares_and_tolerances(target_values, tolerance_percentage, frequency)
return self.target_orders(target_quantities, execution_style, time_in_force, tolerance_quantities)
def target_percent_orders(self, target_percentages: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, tolerance_percentage: float = 0.0, frequency: Frequency = None) \
-> List[Order]:
"""
Creates an Order adjusting a position to a value equal to the given percentage of the portfolio.
Parameters
----------
target_percentages: Mapping[Contract, int]
mapping of a Contract to a percentage of a current portfolio value which should be allocated in each asset
after the Order has been carried out
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_percentage: float
tells the us what is a tolerance to the target_percentages (in both directions). The tolerance is expressed
in percentage points (0.02 corresponds to 2pp of the target_value). For more details look at the description
of target_value_orders.
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
assert 0.0 <= tolerance_percentage < 1.0, "The tolerance_percentage should belong to [0, 1) interval"
portfolio_value = self.broker.get_portfolio_value()
target_values = {
contract: portfolio_value * target_percent for contract, target_percent in target_percentages.items()}
return self.target_value_orders(target_values, execution_style, time_in_force, tolerance_percentage, frequency)
def _calculate_target_shares_and_tolerances(
self, contract_to_amount_of_money: Mapping[Contract, float], tolerance_percentage: float = 0.0,
frequency: Frequency = None) -> (Mapping[Contract, float], Mapping[Contract, float]):
"""
Returns
----------
Tuple(Mapping[Contract, float], Mapping[Contract, float])
Tells how many shares of each asset we should have in order to match the target and what is the tolerance
(in number of shares) for each asset
"""
tickers_to_contract_and_amount_of_money = self._make_tickers_to_contract_and_amount_of_money(
contract_to_amount_of_money)
tickers = list(tickers_to_contract_and_amount_of_money.keys())
# In case of live trading the get_last_available_price will use datetime.now() as the current time to obtain
# last price and in case of a backtest - it will use the data handlers timer to compute the date
current_prices = self.data_provider.get_last_available_price(tickers, frequency)
# Contract -> target number of shares
target_quantities = dict() # type: Dict[Contract, float]
# Contract -> tolerance expressed as number of shares
tolerance_quantities = dict() # type: Dict[Contract, float]
for ticker, (contract, amount_of_money) in tickers_to_contract_and_amount_of_money.items():
current_price = current_prices.loc[ticker]
divisor = (current_price * contract.contract_size)
target_quantity = amount_of_money / divisor # type: float
target_quantities[contract] = target_quantity
tolerance_quantity = target_quantity * tolerance_percentage
tolerance_quantities[contract] = tolerance_quantity
return target_quantities, tolerance_quantities
def _make_tickers_to_contract_and_amount_of_money(self, contract_to_amount_of_money):
tickers_to_contract_and_amount_of_money = dict()
for contract, amount_of_money in contract_to_amount_of_money.items():
ticker = self.contract_to_ticker_mapper.contract_to_ticker(contract)
tickers_to_contract_and_amount_of_money[ticker] = contract, amount_of_money
return tickers_to_contract_and_amount_of_money
def _log_function_call(self, params_dict):
if 'self' in params_dict:
del params_dict['self']
fn_name_level_above = get_function_name(1)
log_message = "Function call: '{}' with parameters:".format(fn_name_level_above)
for key, value in params_dict.items():
if isinstance(value, dict) and value:
value_str = ""
for inner_k, inner_v in value.items():
value_str += "\n\t\t{}: {}".format(inner_k, inner_v)
else:
value_str = str(value)
log_message += "\n\t{}: {}".format(key, value_str)
self.logger.debug(log_message)
| 48.409742
| 123
| 0.674223
| 15,534
| 0.919335
| 0
| 0
| 0
| 0
| 0
| 0
| 9,696
| 0.57383
|
3db22ed381d2b08ee0407932f289e02567c77fca
| 1,268
|
py
|
Python
|
src/test_network3.py
|
chansonzhang/FirstDL
|
41ad7def19c42882f0418fe44ce395f7b5492f36
|
[
"Apache-2.0"
] | null | null | null |
src/test_network3.py
|
chansonzhang/FirstDL
|
41ad7def19c42882f0418fe44ce395f7b5492f36
|
[
"Apache-2.0"
] | null | null | null |
src/test_network3.py
|
chansonzhang/FirstDL
|
41ad7def19c42882f0418fe44ce395f7b5492f36
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 Zhang, Chen. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# @Time : 3/12/2019 20:18
# @Author : Zhang, Chen (chansonzhang)
# @Email : ZhangChen.Shaanxi@gmail.com
# @FileName: test_network3.py
import network3
from network3 import Network
from network3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer
training_data, validation_data, test_data = network3.load_data_shared()
mini_batch_size = 10
net = Network([
FullyConnectedLayer(n_in=784, n_out=100),
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.1,
validation_data, test_data)
| 42.266667
| 80
| 0.69795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 826
| 0.65142
|
3db26a9a64ef3907fd6d3bfdd43c6b7c844f6a0f
| 303
|
py
|
Python
|
mood_sense/serializers.py
|
D-Denysenko/health-app
|
18d1e9c492fb00694e1987a6cdaa2197ff4efa11
|
[
"MIT"
] | null | null | null |
mood_sense/serializers.py
|
D-Denysenko/health-app
|
18d1e9c492fb00694e1987a6cdaa2197ff4efa11
|
[
"MIT"
] | 9
|
2021-03-19T08:05:00.000Z
|
2022-03-12T00:15:53.000Z
|
mood_sense/serializers.py
|
D-Denysenko/health-app
|
18d1e9c492fb00694e1987a6cdaa2197ff4efa11
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import Mood
class MoodSerializer(serializers.ModelSerializer):
class Meta:
model = Mood
fields = ['profile', 'characteristic', 'latitude', 'longitude', 'image', 'location']
read_only_fields = ['latitude', 'longitude']
| 23.307692
| 92
| 0.686469
| 234
| 0.772277
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.277228
|
3db6b1a2ad7d586c5f66023f21c351a35d9fd997
| 7,604
|
py
|
Python
|
Appserver/Test/ApiUnitTesting/testBusquedaCandidatos.py
|
seguijoaquin/taller2
|
f41232516de15fe045805131b09299e5c2634e5e
|
[
"MIT"
] | 2
|
2016-06-06T03:26:49.000Z
|
2017-08-06T18:12:33.000Z
|
Appserver/Test/ApiUnitTesting/testBusquedaCandidatos.py
|
seguijoaquin/taller2
|
f41232516de15fe045805131b09299e5c2634e5e
|
[
"MIT"
] | 60
|
2016-03-19T16:01:27.000Z
|
2016-06-23T16:26:10.000Z
|
Appserver/Test/ApiUnitTesting/testBusquedaCandidatos.py
|
seguijoaquin/taller2
|
f41232516de15fe045805131b09299e5c2634e5e
|
[
"MIT"
] | null | null | null |
import json
import requests
import unittest
import Utilities
# Precondiciones:
# Intereses:
# No debe haber ningun usuario en el Shared que tenga "interesUnico"
#
Address = "http://localhost:8000"
#Tal vez mandar las URIs a sus respectivas clases
URIResgistro = "/registro"
URILogin = "/login"
URIPedirCandidato = "/perfil"
URIEliminar = "/eliminar"
def crearHeadersParaRegistro(usuario):
return {'Usuario': usuario,'Password': "password"}#, 'Content-Type': 'application/json' }
def crearHeadersParaElLogin(usuario):
return {'Usuario': usuario,'Password': "password", 'TokenGCM': "APA91bFundy4qQCiRnhUbMOcsZEwUBpbuPjBm-wnyBv600MNetW5rp-5Cg32_UA0rY_gmqqQ8pf0Cn-nyqoYrAl6BQTPT3dXNYFuHeWYEIdLz0RwAhN2lGqdoiYnCM2V_O8MonYn3rL6hAtYaIz_b0Jl2xojcKIOqQ" }
def abrirJson(ruta):
with open(ruta, 'r') as archivoJson:
parseado = json.load(archivoJson)
return parseado
def crearHeadersParaBuscarCandidatos(usuario,token):
return {'Usuario': usuario, 'Token': token}
class TestBusquedaCandidatos(unittest.TestCase):
usuario1 = 'usuarioCandidato1'
usuario2 = 'usuarioCandidato2'
usuarioSinIntereses = "./usuario.json"
passwordCorrecto = 'password' #lo uso para todos los usuarios
#Una categoria que SI o SI esta en el Shared
categoriaValida = "outdoors"
interesUnico = "INTERES UNICO QUE NO TIENE NADIE MAS"
interesCompartido = "INTERES QUE SOLO DEBE SER COMPARTIDO POR DOS USUARIOS"
msgNoSeEncontraronCandidatos = "Candidato no encontrado"
msgSeEncontraronCandidatos = "Candidato encontrado"
def agregarEmailAlUsuario(self, bodyUsuario, email):
bodyUsuario["user"]["email"] = email
def agregarValorDeInteresAlUsuario(self,bodyUsuario, valorDeInteres):
interes = json.loads('{}')
interes["category"] = self.categoriaValida
interes["value"] = valorDeInteres
bodyUsuario["user"]["interests"].append(interes)
def hacerLoginDeUsuario(self, usuario):
headUsuarioRegistrado = crearHeadersDeUsuarioYPassword( usuario, self.passwordCorrecto)
reply = requests.get(Address + URILogin,headers=headUsuarioRegistrado)
return reply
usuariosParaBorrar = []
def tearDown(self):
for usuario in self.usuariosParaBorrar:
headEliminarUsuario = {'Usuario': usuario,'Password': self.passwordCorrecto }
replyDelete = requests.delete(Address + URIEliminar, headers=headEliminarUsuario)
del self.usuariosParaBorrar[:]
def test_UsuarioPideUnCandidatoPeroNoSeEncuentra(self):
#Para esto no debe haber ningun usuario en el shared con el interes "interesUnico"
#Aca creo el body del usuario con un interes unico, ningun otro lo debe usar
nombreUsuario = Utilities.transformarEnMail("test_UsuarioPideUnCandidatoPeroNoSeEncuentra")
bodyUsuario = abrirJson(self.usuarioSinIntereses)
self.agregarEmailAlUsuario(bodyUsuario, nombreUsuario)
self.agregarValorDeInteresAlUsuario(bodyUsuario, self.interesUnico)
headRegistrarUsuario = crearHeadersParaRegistro(nombreUsuario)
replyRegistro = requests.put(Address + URIResgistro, headers=headRegistrarUsuario, data=json.dumps(bodyUsuario))
#Se loguea
headLoginUsuario = crearHeadersParaElLogin(nombreUsuario)
replyLogin = requests.get(Address + URILogin, headers=headLoginUsuario)
#Pide un candidato
headPedirCandidatos = crearHeadersParaBuscarCandidatos(nombreUsuario,replyLogin.headers["Token"])
replyPedirCandidatos = requests.get(Address + URIPedirCandidato, headers=headPedirCandidatos)
self.assertEqual(replyPedirCandidatos.reason,self.msgNoSeEncontraronCandidatos)
self.assertEqual(replyPedirCandidatos.status_code,201)
self.usuariosParaBorrar.extend([nombreUsuario])
def crearBodyConUnInteres(self, email, interes):
bodyUsuario = abrirJson(self.usuarioSinIntereses)
self.agregarEmailAlUsuario(bodyUsuario, email)
self.agregarValorDeInteresAlUsuario(bodyUsuario, interes)
return bodyUsuario
def registrarUsuario(self, nombreUsuario, bodyUsuario):
headRegistrarUsuario = crearHeadersParaRegistro(nombreUsuario)
return requests.put(Address + URIResgistro, headers=headRegistrarUsuario, data=json.dumps(bodyUsuario))
def loguearUsuario(self, nombreUsuario):
headLoginUsuario = crearHeadersParaElLogin(nombreUsuario)
return requests.get(Address + URILogin, headers=headLoginUsuario)
def pedirCandidato(self, nombreUsuario, replyLogin):
headPedirCandidatos = crearHeadersParaBuscarCandidatos(nombreUsuario,replyLogin.headers["Token"])
return requests.get(Address + URIPedirCandidato, headers=headPedirCandidatos)
def test_DosUsuariosConUnInteresEspecificoPidenUnCandidatoYSeEncuentranUnoAlOtro(self):
nombreUsuario1 = Utilities.transformarEnMail("1test_DosUsuariosConUnInteresEspecificoPidenUnCandidatoYSeEncuentranUnoAlOtro")
nombreUsuario2 = Utilities.transformarEnMail("2test_DosUsuariosConUnInteresEspecificoPidenUnCandidatoYSeEncuentranUnoAlOtro")
bodyUsuario1 = self.crearBodyConUnInteres(nombreUsuario1, self.interesCompartido)
bodyUsuario2 = self.crearBodyConUnInteres(nombreUsuario2, self.interesCompartido)
replyRegistro1 = self.registrarUsuario(nombreUsuario1, bodyUsuario1)
replyRegistro2 = self.registrarUsuario(nombreUsuario2, bodyUsuario2)
replyLogin1 = self.loguearUsuario(nombreUsuario1)
replyLogin2 = self.loguearUsuario(nombreUsuario2)
#Pide un candidato
replyPedirCandidatos1 = self.pedirCandidato(nombreUsuario1, replyLogin1)
replyPedirCandidatos2 = self.pedirCandidato(nombreUsuario2, replyLogin2)
self.assertEqual(replyPedirCandidatos1.reason,self.msgSeEncontraronCandidatos)
self.assertEqual(replyPedirCandidatos1.status_code,200)
self.assertEqual(replyPedirCandidatos2.reason,self.msgSeEncontraronCandidatos)
self.assertEqual(replyPedirCandidatos2.status_code,200)
self.usuariosParaBorrar.extend([nombreUsuario1, nombreUsuario2])
def test_DosUsuariosMatcheanYVotanUnoPorElOtro(self):
nombreUsuario1 = Utilities.transformarEnMail("test_DosUsuariosMatcheanYVotanUnoPorElOtro1")
nombreUsuario2 = Utilities.transformarEnMail("test_DosUsuariosMatcheanYVotanUnoPorElOtro2")
categoria = "outdoors"
valor = "test_DosUsuariosMatcheanYVotanUnoPorElOtro"
Utilities.registrarUsuarioSinEmailYSinIntereses(nombreUsuario1,categoria, valor)
Utilities.registrarUsuarioSinEmailYSinIntereses(nombreUsuario2,categoria, valor)
tokenSesion1 = Utilities.registrarYLoguearAlUsuarioSinEmail(nombreUsuario1)
tokenSesion2 = Utilities.registrarYLoguearAlUsuarioSinEmail(nombreUsuario2)
candidatoParaUsuario1 = Utilities.pedirCandidato(nombreUsuario1,tokenSesion1)
candidatoParaUsuario2 = Utilities.pedirCandidato(nombreUsuario2,tokenSesion2)
replyVotacion1 = Utilities.likearCandidato(nombreUsuario1, tokenSesion1, candidatoParaUsuario1)
replyVotacion2 = Utilities.likearCandidato(nombreUsuario2, tokenSesion2, candidatoParaUsuario2)
self.assertEqual("El voto se registro correctamente",replyVotacion1.reason)
self.assertEqual(200,replyVotacion1.status_code)
self.assertEqual("El voto se registro correctamente",replyVotacion2.reason)
self.assertEqual(200,replyVotacion2.status_code)
self.usuariosParaBorrar.extend([nombreUsuario1, nombreUsuario2])
| 43.451429
| 233
| 0.768017
| 6,607
| 0.868885
| 0
| 0
| 0
| 0
| 0
| 0
| 1,495
| 0.196607
|
3db6b5d6bbd126263b54d30034f80a8d201b13af
| 3,639
|
py
|
Python
|
scripts/plots/yearly_summary.py
|
jarad/dep
|
fe73982f4c70039e1a31b9e8e2d9aac31502f803
|
[
"MIT"
] | 1
|
2019-11-26T17:49:19.000Z
|
2019-11-26T17:49:19.000Z
|
scripts/plots/yearly_summary.py
|
jarad/dep
|
fe73982f4c70039e1a31b9e8e2d9aac31502f803
|
[
"MIT"
] | 54
|
2018-12-12T18:02:31.000Z
|
2022-03-28T19:14:25.000Z
|
scripts/plots/yearly_summary.py
|
jarad/dep
|
fe73982f4c70039e1a31b9e8e2d9aac31502f803
|
[
"MIT"
] | 4
|
2020-03-02T22:59:38.000Z
|
2021-12-09T15:49:00.000Z
|
import datetime
import cStringIO
import psycopg2
from shapely.wkb import loads
import numpy as np
import sys
from geopandas import read_postgis
import matplotlib
matplotlib.use("agg")
from pyiem.plot import MapPlot
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.colors as mpcolors
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from pyiem.util import get_dbconn
V2NAME = {
"avg_loss": "Detachment",
"qc_precip": "Precipitation",
"avg_delivery": "Delivery",
"avg_runoff": "Runoff",
}
V2MULTI = {
"avg_loss": 4.463,
"qc_precip": 1.0 / 25.4,
"avg_delivery": 4.463,
"avg_runoff": 1.0 / 25.4,
}
V2UNITS = {
"avg_loss": "tons/acre",
"qc_precip": "inches",
"avg_delivery": "tons/acre",
"avg_runoff": "inches",
}
V2RAMP = {
"avg_loss": [0, 2.5, 5, 10, 20, 40, 60],
"qc_precip": [15, 25, 35, 45, 55],
"avg_delivery": [0, 2.5, 5, 10, 20, 40, 60],
"avg_runoff": [0, 2.5, 5, 10, 15, 30],
}
year = int(sys.argv[1])
v = sys.argv[2]
ts = datetime.date(year, 1, 1)
ts2 = datetime.date(year, 12, 31)
scenario = 0
# suggested for runoff and precip
if v in ["qc_precip", "avg_runoff"]:
c = ["#ffffa6", "#9cf26d", "#76cc94", "#6399ba", "#5558a1"]
# suggested for detachment
elif v in ["avg_loss"]:
c = ["#cbe3bb", "#c4ff4d", "#ffff4d", "#ffc44d", "#ff4d4d", "#c34dee"]
# suggested for delivery
elif v in ["avg_delivery"]:
c = ["#ffffd2", "#ffff4d", "#ffe0a5", "#eeb74d", "#ba7c57", "#96504d"]
cmap = mpcolors.ListedColormap(c, "james")
cmap.set_under("white")
cmap.set_over("black")
pgconn = get_dbconn("idep")
cursor = pgconn.cursor()
title = "for %s" % (ts.strftime("%-d %B %Y"),)
if ts != ts2:
title = "for period between %s and %s" % (
ts.strftime("%-d %b %Y"),
ts2.strftime("%-d %b %Y"),
)
m = MapPlot(
axisbg="#EEEEEE",
nologo=True,
sector="iowa",
nocaption=True,
title="DEP %s %s" % (V2NAME[v], title),
caption="Daily Erosion Project",
)
# Check that we have data for this date!
cursor.execute(
"""
SELECT value from properties where key = 'last_date_0'
"""
)
lastts = datetime.datetime.strptime(cursor.fetchone()[0], "%Y-%m-%d")
floor = datetime.date(2007, 1, 1)
df = read_postgis(
"""
WITH data as (
SELECT huc_12,
sum("""
+ v
+ """) as d from results_by_huc12
WHERE scenario = %s and valid >= %s and valid <= %s
GROUP by huc_12)
SELECT ST_Transform(simple_geom, 4326) as geo, coalesce(d.d, 0) as data
from huc12 i LEFT JOIN data d
ON (i.huc_12 = d.huc_12) WHERE i.scenario = %s and i.states ~* 'IA'
""",
pgconn,
params=(scenario, ts, ts2, scenario),
geom_col="geo",
index_col=None,
)
df["data"] = df["data"] * V2MULTI[v]
if df["data"].max() < 0.01:
bins = [0.01, 0.02, 0.03, 0.04, 0.05]
else:
bins = V2RAMP[v]
norm = mpcolors.BoundaryNorm(bins, cmap.N)
patches = []
# m.ax.add_geometries(df['geo'], ccrs.PlateCarree())
for i, row in df.iterrows():
c = cmap(norm([row["data"]]))[0]
arr = np.asarray(row["geo"].exterior)
points = m.ax.projection.transform_points(
ccrs.Geodetic(), arr[:, 0], arr[:, 1]
)
p = Polygon(points[:, :2], fc=c, ec="k", zorder=2, lw=0.1)
m.ax.add_patch(p)
# m.ax.add_collection(PatchCollection(patches, match_original=True))
m.drawcounties()
m.drawcities()
lbl = [round(_, 2) for _ in bins]
u = "%s, Avg: %.2f" % (V2UNITS[v], df["data"].mean())
m.draw_colorbar(
bins,
cmap,
norm,
clevlabels=lbl,
title="%s :: %s" % (V2NAME[v], V2UNITS[v]),
)
plt.savefig("%s_%s.png" % (year, v))
| 25.992857
| 74
| 0.622424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,346
| 0.369882
|
3db72a55f192a9c9ab68f0478ca0ffc316b36c78
| 1,053
|
py
|
Python
|
package/diana/utils/iter_dates.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 15
|
2019-02-12T23:26:09.000Z
|
2021-12-21T08:53:58.000Z
|
package/diana/utils/iter_dates.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 2
|
2019-01-23T21:13:12.000Z
|
2019-06-28T15:45:51.000Z
|
package/diana/utils/iter_dates.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 6
|
2019-01-23T20:22:50.000Z
|
2022-02-03T03:27:04.000Z
|
from datetime import datetime, timedelta
class IterDates(object):
def __init__(self, start: datetime, stop: datetime, step: timedelta):
self.start = start
self.stop = stop
self.step = step
self.value = (self.start, self.start + self.step)
def __iter__(self):
return self
def __next__(self):
next_value = self.value
if next_value[0] >= self.stop:
raise StopIteration
self.start = self.start + self.step
self.value = (self.start, min(self.stop, self.start + self.step))
return next_value
class FuncByDates(object):
def __init__(self, func, start: datetime, stop: datetime, step: timedelta):
self._func = func
self._iterdate = IterDates(start, stop, step)
self.value = self._func(*self._iterdate.value)
def __iter__(self):
return self
def __next__(self):
next_value = self.value
next(self._iterdate)
self.value = self._func(*self._iterdate.value)
return next_value
| 26.325
| 79
| 0.624881
| 1,006
| 0.955366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3db739475a32d4a4cd03afcbff8864712c35cad0
| 193
|
py
|
Python
|
Exercicios Curso Em Video Mundo 2/ex067.py
|
JorgeTranin/Python_Curso_Em_Video
|
be74c9301aafc055bdf883be649cb8b7716617e3
|
[
"MIT"
] | null | null | null |
Exercicios Curso Em Video Mundo 2/ex067.py
|
JorgeTranin/Python_Curso_Em_Video
|
be74c9301aafc055bdf883be649cb8b7716617e3
|
[
"MIT"
] | null | null | null |
Exercicios Curso Em Video Mundo 2/ex067.py
|
JorgeTranin/Python_Curso_Em_Video
|
be74c9301aafc055bdf883be649cb8b7716617e3
|
[
"MIT"
] | null | null | null |
cont = 1
while True:
t = int(input('Quer saber a tabuada de que numero ? '))
if t < 0:
break
for c in range (1, 11):
print(f'{t} X {c} = {t * c}')
print('Obrigado!')
| 24.125
| 59
| 0.507772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.373057
|
3db86f3d8bdc658afbe080624e5b8f952805ce4b
| 1,172
|
py
|
Python
|
src/PassGen/PassGen.py
|
Natthapolmnc/PasswordGenerator
|
1d481de1b4773af99558c68e9570d1801c1f6e2e
|
[
"MIT"
] | null | null | null |
src/PassGen/PassGen.py
|
Natthapolmnc/PasswordGenerator
|
1d481de1b4773af99558c68e9570d1801c1f6e2e
|
[
"MIT"
] | null | null | null |
src/PassGen/PassGen.py
|
Natthapolmnc/PasswordGenerator
|
1d481de1b4773af99558c68e9570d1801c1f6e2e
|
[
"MIT"
] | null | null | null |
import random as rd
def genPass(num , length):
print ("Password Generator")
print ("===================\n")
numpass=num
lenpass=length
AlphaLcase=[ chr(m) for m in range(65, 91)]
AlphaCcase=[ chr(n) for n in range(97, 123)]
Intset=[ chr(p) for p in range(48,58)]
listsetpass=[]
for j in range(lenpass):
randAlphaset=rd.randint(2,lenpass)
randAlphaL=rd.randint(1,randAlphaset)
randAlphaH=randAlphaset-randAlphaL
randIntset=lenpass-randAlphaset
password=[]
strpassword=""
for i in range(randAlphaH):
randindexAlphaH=rd.randint(0,len(AlphaCcase)-1)
password.append(AlphaCcase[randindexAlphaH])
for k in range(randAlphaL):
randindexAlphaL=rd.randint(0,len(AlphaLcase)-1)
password.append(AlphaLcase[randindexAlphaL])
for l in range(randIntset):
randindexInt=rd.randint(0,len(Intset)-1)
password.append(Intset[randindexInt])
for u in range(len(password)):
rd.shuffle(password)
strpassword+=str(password[u])
listsetpass+=[strpassword]
return listsetpass
| 35.515152
| 59
| 0.617747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.038396
|
3db8e72e1423808652d32817702cb2ec2246d0ea
| 5,413
|
py
|
Python
|
services/offers_service.py
|
martinmladenov/RankingBot
|
1df4e37b4b9a68b3f553b2f55acc77663163be1b
|
[
"MIT"
] | 2
|
2020-06-03T20:19:33.000Z
|
2021-04-29T08:05:09.000Z
|
services/offers_service.py
|
martinmladenov/RankingBot
|
1df4e37b4b9a68b3f553b2f55acc77663163be1b
|
[
"MIT"
] | 41
|
2020-06-09T11:11:37.000Z
|
2022-03-20T21:18:42.000Z
|
services/offers_service.py
|
martinmladenov/RankingBot
|
1df4e37b4b9a68b3f553b2f55acc77663163be1b
|
[
"MIT"
] | 9
|
2020-05-27T19:04:55.000Z
|
2021-11-01T12:57:55.000Z
|
from datetime import date, datetime, timedelta
from matplotlib import pyplot as plt, dates as mdates
from matplotlib.ticker import MaxNLocator
from helpers import programmes_helper
filename = 'offers.png'
class OffersService:
def __init__(self, db_conn):
self.db_conn = db_conn
async def generate_graph(self, programme: programmes_helper.Programme, step: bool, year: int):
if year not in programme.places:
raise ValueError
rows = await self.db_conn.fetch(
'SELECT rank, is_private, offer_date FROM ranks '
'WHERE programme = $1 AND rank > $2 AND offer_date IS NOT NULL AND year = $3 '
'ORDER BY offer_date, rank', programme.id, programme.places[year], year)
x_values = [date(year, 4, 15)]
y_values = [programme.places[year]]
if rows:
for i in range(len(rows)):
row = rows[i]
rank = row[0]
is_private = row[1]
offer_date = row[2]
# Round rank if it's private
if is_private:
rank = round_rank(rank)
# make sure it's not lower than the previous rank
if i > 0 and rank < y_values[i - 1]:
rank = y_values[i - 1]
# make sure it's not higher than the next public rank
for j in range(i, len(rows)):
if not rows[j][1]:
if rank > rows[j][0]:
rank = rows[j][0]
break
x_values.append(offer_date)
y_values.append(rank)
end_date = date(year, 8, 15)
curr_date = datetime.utcnow().date()
x_values.append(min(end_date, curr_date))
y_values.append(y_values[len(y_values) - 1])
fill_between_end = programme.places[year] - (y_values[len(y_values) - 1] - programme.places[year]) / 15
bottom_limit = fill_between_end - (y_values[len(y_values) - 1] - fill_between_end) / 40
bg_color = '#36393F'
fg_color = programme.graph_colour
plt.rcParams['ytick.color'] = 'w'
plt.rcParams['xtick.color'] = 'w'
plt.rcParams['axes.edgecolor'] = 'w'
plt.rcParams['axes.labelcolor'] = '#767676'
ax = plt.gca()
formatter = mdates.DateFormatter("%d %b")
ax.xaxis.set_major_formatter(formatter)
locator = mdates.WeekdayLocator(byweekday=x_values[0].weekday())
ax.xaxis.set_major_locator(locator)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel('Offer date')
ax.set_ylabel('Ranking number')
plt.setp(ax.spines.values(), visible=False)
ax.set_facecolor(bg_color)
ax.set_axisbelow(True)
plt.grid(color='#444444', linestyle='--')
if programme.visa_cutoff is not None:
cutoff_date = date(year, programme.visa_cutoff[1], programme.visa_cutoff[0])
if (datetime.utcnow() + timedelta(days=20)).date() >= cutoff_date:
plt.axvline(cutoff_date, ymin=0.02, linestyle='--', alpha=0.7, color=fg_color)
plt.text(cutoff_date, y_values[-1], "Non-EU cutoff", rotation='vertical', color=fg_color,
verticalalignment='center_baseline', horizontalalignment='right', stretch='condensed',
fontsize='small', fontweight='ultralight', fontstyle='italic')
if not step:
plt.plot(x_values, y_values, linestyle='--', color=fg_color)
plt.fill_between(x_values, y_values, y2=fill_between_end, alpha=0.15, color=fg_color)
plt.step(x_values, y_values, where='post', alpha=(0.5 if not step else None), color=fg_color)
plt.fill_between(x_values, y_values, y2=fill_between_end, step="post", alpha=(0.20 if not step else 0.35),
color=fg_color)
plt.title(f'{programme.uni_name} {programme.display_name} ({year})', color='w')
ax.set_ylim(bottom=bottom_limit)
# only show every second week
for label in ax.get_xaxis().get_ticklabels()[1::2]:
label.set_visible(False)
for label in ax.get_xaxis().get_major_ticks()[1::2]:
label.set_visible(False)
plt.savefig(filename, facecolor=bg_color, dpi=200)
plt.close()
async def get_highest_ranks_with_offers(self, year):
offers = await self.db_conn.fetch(
'select r.programme, r.rank, MAX(d.offer_date), d.is_private '
'from (select programme, max(rank) as rank from ranks '
'where ranks.offer_date is not null and ranks.year = $1 '
'group by programme) as r '
'inner join ranks as d '
'on r.programme = d.programme and r.rank = d.rank and d.year = $1 '
'and d.offer_date is not null '
'group by r.programme, r.rank, d.is_private '
'order by MAX(d.offer_date) desc', year)
for i in range(len(offers)):
programme_id, rank = offers[i][0:2]
places = programmes_helper.programmes[programme_id].places[year]
if rank <= places:
offers[i] = (programme_id, places, date(year, 4, 15), False)
return offers
def round_rank(number, base=5):
return base * round(number / base)
| 41.320611
| 114
| 0.585258
| 5,131
| 0.947903
| 0
| 0
| 0
| 0
| 5,035
| 0.930168
| 1,027
| 0.189728
|
3db9d9cd9e40d9cc018a319420be1ba7e9abac3d
| 11,397
|
py
|
Python
|
lib/python3.8/site-packages/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_user_obj_stat_info
short_description: Gather statistics about PostgreSQL user objects
description:
- Gathers statistics about PostgreSQL user objects.
version_added: '0.2.0'
options:
filter:
description:
- Limit the collected information by comma separated string or YAML list.
- Allowable values are C(functions), C(indexes), C(tables).
- By default, collects all subsets.
- Unsupported values are ignored.
type: list
elements: str
schema:
description:
- Restrict the output by certain schema.
type: str
db:
description:
- Name of database to connect.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
trust_input:
description:
- If C(no), check the value of I(session_role) is potentially dangerous.
- It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
type: bool
default: yes
version_added: '0.2.0'
notes:
- C(size) and C(total_size) returned values are presented in bytes.
- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.
See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.
seealso:
- module: community.postgresql.postgresql_info
- module: community.postgresql.postgresql_ping
- name: PostgreSQL statistics collector reference
description: Complete reference of the PostgreSQL statistics collector documentation.
link: https://www.postgresql.org/docs/current/monitoring-stats.html
author:
- Andrew Klychkov (@Andersson007)
- Thomas O'Donnell (@andytom)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Collect information about all supported user objects of the acme database
community.postgresql.postgresql_user_obj_stat_info:
db: acme
- name: Collect information about all supported user objects in the custom schema of the acme database
community.postgresql.postgresql_user_obj_stat_info:
db: acme
schema: custom
- name: Collect information about user tables and indexes in the acme database
community.postgresql.postgresql_user_obj_stat_info:
db: acme
filter: tables, indexes
'''
RETURN = r'''
indexes:
description: User index statistics
returned: always
type: dict
sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}}
tables:
description: User table statistics.
returned: always
type: dict
sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}}
functions:
description: User function statistics.
returned: always
type: dict
sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}}
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
# ===========================================
# PostgreSQL module specific support methods.
#
class PgUserObjStatInfo():
"""Class to collect information about PostgreSQL user objects.
Args:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
Attributes:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
executed_queries (list): List of executed queries.
info (dict): Statistics dictionary.
obj_func_mapping (dict): Mapping of object types to corresponding functions.
schema (str): Name of a schema to restrict stat collecting.
"""
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.info = {
'functions': {},
'indexes': {},
'tables': {},
}
self.obj_func_mapping = {
'functions': self.get_func_stat,
'indexes': self.get_idx_stat,
'tables': self.get_tbl_stat,
}
self.schema = None
def collect(self, filter_=None, schema=None):
"""Collect statistics information of user objects.
Kwargs:
filter_ (list): List of subsets which need to be collected.
schema (str): Restrict stat collecting by certain schema.
Returns:
``self.info``.
"""
if schema:
self.set_schema(schema)
if filter_:
for obj_type in filter_:
obj_type = obj_type.strip()
obj_func = self.obj_func_mapping.get(obj_type)
if obj_func is not None:
obj_func()
else:
self.module.warn("Unknown filter option '%s'" % obj_type)
else:
for obj_func in self.obj_func_mapping.values():
obj_func()
return self.info
def get_func_stat(self):
"""Get function statistics and fill out self.info dictionary."""
query = "SELECT * FROM pg_stat_user_functions"
if self.schema:
query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='functions',
schema_key='schemaname',
name_key='funcname')
def get_idx_stat(self):
"""Get index statistics and fill out self.info dictionary."""
query = "SELECT * FROM pg_stat_user_indexes"
if self.schema:
query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='indexes',
schema_key='schemaname',
name_key='indexrelname')
def get_tbl_stat(self):
"""Get table statistics and fill out self.info dictionary."""
query = "SELECT * FROM pg_stat_user_tables"
if self.schema:
query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='tables',
schema_key='schemaname',
name_key='relname')
def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):
# Convert result to list of dicts to handle it easier:
result = [dict(row) for row in result]
for elem in result:
# Add schema name as a key if not presented:
if not self.info[info_key].get(elem[schema_key]):
self.info[info_key][elem[schema_key]] = {}
# Add object name key as a subkey
# (they must be uniq over a schema, so no need additional checks):
self.info[info_key][elem[schema_key]][elem[name_key]] = {}
# Add other other attributes to a certain index:
for key, val in iteritems(elem):
if key not in (schema_key, name_key):
self.info[info_key][elem[schema_key]][elem[name_key]][key] = val
if info_key in ('tables', 'indexes'):
schemaname = elem[schema_key]
if self.schema:
schemaname = self.schema
relname = '%s.%s' % (schemaname, elem[name_key])
result = exec_sql(self, "SELECT pg_relation_size (%s)",
query_params=(relname,),
add_to_executed=False)
self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]
if info_key == 'tables':
result = exec_sql(self, "SELECT pg_total_relation_size (%s)",
query_params=(relname,),
add_to_executed=False)
self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]
def set_schema(self, schema):
"""If schema exists, sets self.schema, otherwise fails."""
query = ("SELECT 1 FROM information_schema.schemata "
"WHERE schema_name = %s")
result = exec_sql(self, query, query_params=(schema,),
add_to_executed=False)
if result and result[0][0]:
self.schema = schema
else:
self.module.fail_json(msg="Schema '%s' does not exist" % (schema))
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', aliases=['login_db']),
filter=dict(type='list', elements='str'),
session_role=dict(type='str'),
schema=dict(type='str'),
trust_input=dict(type="bool", default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
filter_ = module.params["filter"]
schema = module.params["schema"]
if not module.params["trust_input"]:
check_input(module, module.params['session_role'])
# Connect to DB and make cursor object:
pg_conn_params = get_conn_params(module, module.params)
# We don't need to commit anything, so, set it to False:
db_connection = connect_to_db(module, pg_conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
############################
# Create object and do work:
pg_obj_info = PgUserObjStatInfo(module, cursor)
info_dict = pg_obj_info.collect(filter_, schema)
# Clean up:
cursor.close()
db_connection.close()
# Return information:
module.exit_json(**info_dict)
if __name__ == '__main__':
main()
| 33.919643
| 137
| 0.623761
| 5,973
| 0.524085
| 0
| 0
| 0
| 0
| 0
| 0
| 5,922
| 0.51961
|
3dbac19444fd45965d236a4f1e5266c9a002aefd
| 1,586
|
py
|
Python
|
lib/run_config.py
|
king/s3vdc
|
baa6689a6344f417758d4d8b4e6c6e966a510b32
|
[
"MIT"
] | 10
|
2020-05-28T07:09:02.000Z
|
2021-04-18T07:38:01.000Z
|
lib/run_config.py
|
king/s3vdc
|
baa6689a6344f417758d4d8b4e6c6e966a510b32
|
[
"MIT"
] | 4
|
2020-11-13T18:51:09.000Z
|
2022-02-10T01:58:16.000Z
|
lib/run_config.py
|
king/s3vdc
|
baa6689a6344f417758d4d8b4e6c6e966a510b32
|
[
"MIT"
] | 4
|
2020-05-29T05:05:18.000Z
|
2021-04-22T01:33:17.000Z
|
"""
Copyright (C) king.com Ltd 2019
https://github.com/king/s3vdc
License: MIT, https://raw.github.com/king/s3vdc/LICENSE.md
"""
import tensorflow as tf
def _session_config() -> tf.ConfigProto:
"""Constructs a session config specifying gpu memory usage.
Returns:
tf.ConfigProto -- session config.
"""
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95, allow_growth=True)
session_config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
return session_config
def default_run_config(
model_dir: str,
save_summary_steps: int = 100,
save_checkpoints_mins: int = 5,
keep_checkpoint_max: int = 5,
) -> tf.estimator.RunConfig:
"""Constructs a tf.contrib.learn.RunConfig instance with the specified model dir and default values.
Arguments:
model_dir {str} -- The model directory to save checkpoints, summary outputs etc.
Keyword Arguments:
save_summary_steps {int} -- save summary every x steps (default: {100})
save_checkpoints_mins {int} -- save checkpoints every x steps (default: {5})
keep_checkpoint_max {int} -- keep maximum x checkpoints (default: {5})
Returns:
tf.estimator.RunConfig -- The constructed RunConfig.
"""
return tf.estimator.RunConfig(
model_dir=model_dir,
save_summary_steps=save_summary_steps,
save_checkpoints_steps=None,
save_checkpoints_secs=save_checkpoints_mins * 60, # seconds
keep_checkpoint_max=keep_checkpoint_max,
session_config=_session_config(),
)
| 31.098039
| 104
| 0.708071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 816
| 0.514502
|
3dbaf6caeb51e514bda230b2abe9f5f3e8537dce
| 974
|
py
|
Python
|
tests/test_address_book.py
|
kibernick/pycontacts
|
9ec7653cdea582b242a6d5f314b4d0c4bb92dd39
|
[
"MIT"
] | null | null | null |
tests/test_address_book.py
|
kibernick/pycontacts
|
9ec7653cdea582b242a6d5f314b4d0c4bb92dd39
|
[
"MIT"
] | null | null | null |
tests/test_address_book.py
|
kibernick/pycontacts
|
9ec7653cdea582b242a6d5f314b4d0c4bb92dd39
|
[
"MIT"
] | null | null | null |
from pycontacts import AddressBook
from pycontacts.models import Person
from pycontacts.managers import (
EmailAddressManager,
GroupManager,
PhoneNumberManager,
PersonManager,
StreetAddressManager,
)
def test_create_book():
book = AddressBook()
assert book._store is not None
assert isinstance(book._store, dict)
def test_create_person_model_class():
book = AddressBook()
p = book.persons.create()
assert isinstance(p, Person)
assert p.book is not None
assert isinstance(p.book, AddressBook)
assert p.book._store is book._store
def test_create_book_with_managers(address_book):
assert isinstance(address_book.email_addresses, EmailAddressManager)
assert isinstance(address_book.groups, GroupManager)
assert isinstance(address_book.phone_numbers, PhoneNumberManager)
assert isinstance(address_book.persons, PersonManager)
assert isinstance(address_book.street_addresses, StreetAddressManager)
| 29.515152
| 74
| 0.776181
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3dbc71f9f330f9191f0001053d461bd694f61316
| 46,266
|
py
|
Python
|
lifeloopweb/db/models.py
|
jaimecruz21/lifeloopweb
|
ba0ffe1ea94ba3323a4e9c66c9506a338cae3212
|
[
"MIT"
] | null | null | null |
lifeloopweb/db/models.py
|
jaimecruz21/lifeloopweb
|
ba0ffe1ea94ba3323a4e9c66c9506a338cae3212
|
[
"MIT"
] | null | null | null |
lifeloopweb/db/models.py
|
jaimecruz21/lifeloopweb
|
ba0ffe1ea94ba3323a4e9c66c9506a338cae3212
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# pylint: disable=no-value-for-parameter,too-many-nested-blocks
import contextlib
import datetime
import functools
import re
from abc import abstractmethod
import sqlalchemy as sa
from sqlalchemy import event, exc, func, select
from sqlalchemy.ext import declarative
from sqlalchemy.ext import hybrid
from sqlalchemy import orm
import sqlalchemy_utils
from lifeloopweb import config, constants, exception, logging, renders, subscription
from lifeloopweb.db import utils as db_utils
from lifeloopweb.webpack import webpack
from lifeloopweb.helpers.base_helper import Helper
from flask_login import UserMixin
LOG = logging.get_logger(__name__)
CONF = config.CONF
helper = Helper()
TABLE_KWARGS = {"mysql_engine": "InnoDB",
"mysql_charset": "utf8",
"mysql_collate": "utf8_general_ci"}
DB_NAME = "lifeloopweb_{}".format(CONF.get("ENVIRONMENT"))
# TODO(mdietz): when this comes from a configuration, we need to
# force the charset to utf8
ENGINE_URL = CONF.get("DB_ENGINE_URL")
if not ENGINE_URL:
ENGINE_URL = ("mysql+pymysql://root:@127.0.0.1/"
"{}?charset=utf8".format(DB_NAME))
connection_debug = CONF.get("database.connection.debug")
if connection_debug.lower() not in ["true", "false"]:
raise exception.InvalidConfigValue(value=connection_debug,
key="database.connection.debug")
connection_debug = connection_debug.lower() == "true"
connection_pool_size = int(CONF.get("database.connection.poolsize"))
connection_overflow_pool = int(CONF.get("database.connection.overflowpool"))
# NOTE: MySQL defaults to 8 hour connection timeouts. It's possible that
# docker-compose or our hosting provider will sever connections sooner.
# if we see "MySQL has gone away" tweaking this variable is the thing
# to revisit
connection_pool_recycle = int(CONF.get("database.connection.poolrecycle"))
engine_kwargs = {}
if "sqlite" not in ENGINE_URL:
engine_kwargs = {
"pool_size": connection_pool_size,
"max_overflow": connection_overflow_pool,
"pool_recycle": connection_pool_recycle}
engine = sa.create_engine(ENGINE_URL, echo=connection_debug,
**engine_kwargs)
SessionFactory = orm.sessionmaker(bind=engine, expire_on_commit=False,
autocommit=False, autoflush=True)
# TODO use of the scoped session needs to be evaluated against
# greenthreading servers like gunicorn and uwsgi. The scope
# by default is to thread local, as in threading.local
# and not the greenthread specifically. Things that use greenthreads
# have to be gt aware, so really we may just do Scoped and Unscoped
# sessions. Alternatively, we hack eventlet to attach the scope there
# http://docs.sqlalchemy.org/en/latest/orm/contextual.html#using-custom-created-scopes
ScopedSession = orm.scoped_session(SessionFactory)
Session = ScopedSession
# TODO We may only want to do this conditionally. I've used it in the past
# but I think the pool_recycling may be enough
@event.listens_for(engine, "engine_connect")
def ping_connection(connection, branch):
if branch:
return
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
connection.scalar(select([1]))
except exc.DBAPIError as err:
if err.connection_invalidated:
connection.scalar(select([1]))
else:
raise
finally:
connection.should_close_with_result = save_should_close_with_result
@contextlib.contextmanager
def transaction():
try:
session = ScopedSession()
yield session
session.commit()
except:
LOG.exception("Transaction failed! Rolling back...")
session.rollback()
raise
def teardown():
ScopedSession.remove()
def can_connect():
try:
engine.connect()
return True
except Exception:
return False
class MetaBase(declarative.DeclarativeMeta):
def __init__(cls, klsname, bases, attrs):
if klsname != "Base":
super().__init__(klsname, bases, attrs)
for attr_name, attr in attrs.items():
if isinstance(attr, sa.Column):
query_single_getter_name = "get_by_{}".format(attr_name)
query_all_getter_name = "get_all_by_{}".format(attr_name)
if not hasattr(cls, query_single_getter_name):
setattr(cls, query_single_getter_name,
functools.partial(cls._get_by, attr))
if not hasattr(cls, query_all_getter_name):
setattr(cls, query_all_getter_name,
functools.partial(cls._get_all_by, attr))
# TODO This does not work
# if isinstance(attr, hybrid.hybrid_property):
# print(attr, type(attr))
# setattr(cls, "get_by_{}".format(attr_name),
# functools.partial(cls._get_by_property, attr))
class ModelBase(object):
created_at = sa.Column(sa.DateTime(), server_default=func.now())
updated_at = sa.Column(sa.DateTime(), onupdate=func.now())
__table_args__ = TABLE_KWARGS
@declarative.declared_attr
def __tablename__(cls): # pylint: disable=no-self-argument
""" Returns a snake_case form of the table name. """
return db_utils.pluralize(db_utils.to_snake_case(cls.__name__))
def __eq__(self, other):
if not other:
return False
return self.id == other.id
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
if hasattr(self, key):
return setattr(self, key, value)
raise AttributeError(key)
def __contains__(self, key):
return hasattr(self, key)
def update(self, **fields):
for attr, value in fields.items():
if attr not in self:
raise exception.ModelUnknownAttrbute(model=self, attr=attr)
self[attr] = value
return self
@classmethod
def get(cls, pk):
return Session.query(cls).filter(cls.id == pk).first()
@classmethod
def _get_by_property(cls, prop):
LOG.debug("Fetching '%s' by property '%s'", cls, prop)
return Session.query(cls).filter(prop).first()
@classmethod
def _get_by(cls, field, value):
LOG.debug("Fetching one '%s.%s' by value '%s'", cls, field, value)
return Session.query(cls).filter(field == value).first()
@classmethod
def _get_all_by(cls, field, value):
LOG.debug("Fetching all '%s.%s' with value '%s'", cls, field, value)
return Session.query(cls).filter(field == value).all()
@classmethod
def last(cls):
return Session.query(cls).order_by(cls.id.desc()).first()
def save(self):
LOG.debug("Attempting to save '%s'", self)
with transaction() as session:
session.add(self)
def delete(self):
LOG.debug("Attempting to delete '%s'", self)
with transaction() as session:
session.delete(self)
def to_dict(self):
return {key: value for key, value in self.__dict__.items()
if not callable(value) and not key.startswith('_')}
Base = declarative.declarative_base(cls=ModelBase, bind=engine,
metaclass=MetaBase)
# pylint: disable=abstract-method,unused-argument
# TODO This parent class may not allow NULL to go into a UUID field :-|
class GUID(sqlalchemy_utils.UUIDType):
"""
Overload of the sqlalchemy_utils UUID class. There are issues
with it and alembic, acknowledged by the maintainer:
https://github.com/kvesteri/sqlalchemy-utils/issues/129
"""
def __init__(self, length=16, binary=True, native=True):
# pylint: disable=unused-argument
# NOTE(mdietz): Ignoring length, see:
# https://github.com/kvesteri/sqlalchemy-utils/issues/129
super(GUID, self).__init__(binary, native)
class HasId(object):
"""id mixin, add to subclasses that have an id."""
id = sa.Column(GUID,
primary_key=True,
default=db_utils.generate_guid)
class ImageMixin(object):
"""image main_image mixin, add to subclasses that have images."""
exclude = tuple(CONF.get('allowed.video.extensions').split(','))
@property
@abstractmethod
def images(self):
raise NotImplementedError
@property
def main_image(self):
images = [Image()]
if self.images:
images = [image for image in self.images if not image.image_url.endswith(self.exclude)]
if not images:
images = [Image()]
return images[-1]
class NotificationType(Base, HasId):
description = sa.Column(sa.String(80), nullable=False)
priority = sa.Column(sa.Integer(), nullable=True)
notifications = orm.relationship("Notification", backref="type")
def __str__(self):
return self.description
def __repr__(self):
return "NotificationType:{}, {}".format(self.id, self.description)
class Notification(Base, HasId):
notification_type_id = sa.Column(sa.ForeignKey("notification_types.id"),
nullable=False)
user_from_id = sa.Column(GUID(), sa.ForeignKey("users.id"), nullable=False)
user_to_id = sa.Column(GUID(), sa.ForeignKey("users.id"), nullable=False)
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"), nullable=True)
organization_id = sa.Column(GUID(), sa.ForeignKey("organizations.id"),
nullable=True)
acknowledge_only = sa.Column(sa.Boolean(), nullable=False, default=False)
blocked_as_spam = sa.Column(sa.Boolean(), nullable=False, default=False)
accepted = sa.Column(sa.DateTime(), nullable=True, default=None)
declined = sa.Column(sa.DateTime(), nullable=True, default=None)
acknowledged = sa.Column(sa.DateTime(), nullable=True, default=None)
@property
def needs_action(self):
return not self.acknowledge_only and not self.accepted and not self.declined
def prevent_duplicate(self):
user = User.get(self.user_to_id)
notifications = user.group_notifications(self.group_id)
for n in notifications:
if (n.user_from_id == self.user_from_id and
n.notification_type_id == self.notification_type_id and
n.organization_id == self.organization_id):
if n.blocked_as_spam:
return False
self.accepted = None
self.declined = None
self.acknowledged = None
elements = self.to_dict()
updated_notification = n.update(**elements)
return updated_notification
return self
class OrganizationRole(Base, HasId):
description = sa.Column(sa.String(120), nullable=False)
priority = sa.Column(sa.Integer(), nullable=True)
users = orm.relationship(
"User", secondary='organization_members',
back_populates="organization_roles")
def __str__(self):
return self.description
def __repr__(self):
return "OrganizationRole:{}, {}".format(self.id, self.description)
class User(Base, HasId, UserMixin, ImageMixin, renders.UserMixin):
# TODO IMO these need to be contact details and a separate table
first_name = sa.Column(sa.String(40), nullable=False)
last_name = sa.Column(sa.String(40), nullable=False)
# TODO Middle name?
# TODO Title?
# TODO Add a wholly separate ContactInfo table instead and one to
# many from this?
email = sa.Column(sa.String(254), nullable=False, unique=True)
# http://stackoverflow.com/questions/3350500/international-phone-number-max-and-min
phone_number = sa.Column(sa.String(16), nullable=True)
hashed_password = sa.Column(sa.String(128), nullable=False)
deleted_at = sa.Column(sa.DateTime(), nullable=True, default=None)
zoom_user_id = sa.Column(sa.String(80), nullable=True)
city = sa.Column(sa.String(80), nullable=True)
date_of_birth = sa.Column(sa.Date(), nullable=True)
super_admin = sa.Column(sa.Boolean(), nullable=False, default=False)
images = orm.relationship('Image', secondary='user_images')
privacy_and_terms_agreed_at = sa.Column(sa.DateTime(), nullable=True, default=None)
# By name of zone rather than offset, which changes all the time
timezone = sa.Column(sa.String(64), nullable=False)
opt_in_texts = sa.Column(sa.Boolean(), nullable=False, default=False)
opt_in_emails = sa.Column(sa.Boolean(), nullable=False, default=False)
notifications_on = sa.Column(sa.Boolean(), nullable=False, default=True)
# last_login = sa.Column(sa.DateTime(), server_default=func.now())
verified_at = sa.Column(sa.DateTime(), nullable=True, default=None)
organizations = orm.relationship(
"Organization", secondary='organization_members',
back_populates="users",
primaryjoin=(
'and_('
'OrganizationMember.user_id==User.id, '
'Organization.activated_at.isnot(None))'))
groups = orm.relationship(
"Group",
secondary='group_members',
back_populates="users",
primaryjoin=(
'and_('
'GroupMember.user_id==User.id, '
'GroupMember.group_id==Group.id, '
'OrganizationGroup.group_id==Group.id, '
'OrganizationGroup.organization_id==Organization.id, '
'Organization.activated_at.isnot(None), '
'Group.archived_at==None)'))
organization_roles = orm.relationship(
"OrganizationRole", secondary='organization_members',
back_populates="users")
group_roles = orm.relationship(
"GroupRole", secondary='group_members',
back_populates="users")
notifications = orm.relationship(
"Notification",
foreign_keys="[Notification.user_to_id]",
backref="to_user")
sent_notifications = orm.relationship(
"Notification",
foreign_keys="[Notification.user_from_id]",
backref="from_user")
group_members = orm.relationship(
"GroupMember",
back_populates="users")
organization_members = orm.relationship(
"OrganizationMember",
back_populates="users")
group_leaders = orm.relationship(
'Group',
secondary='group_members',
back_populates='users',
primaryjoin=(
"and_("
"GroupMember.user_id==User.id, "
"GroupMember.group_id==Group.id, "
"GroupMember.role_id==GroupRole.id, "
"OrganizationGroup.group_id==Group.id, "
"OrganizationGroup.organization_id==Organization.id, "
"GroupRole.description=='Group Leader')"))
def __str__(self):
return self.full_name_and_email
def __repr__(self):
return "User: {}, {}".format(self.id, self.full_name_and_email)
def organizations_created(self):
# TODO: Refactor.
# I think we should add Group.parent_org and Org.creator columns
# to avoid this huge db query
subquery = Session.query(func.min(
OrganizationMember.created_at).label('created_at')).group_by(
OrganizationMember.organization_id).subquery()
query = Session.query(Organization).join(
OrganizationMember, OrganizationRole, User).join(
subquery,
subquery.c.created_at == OrganizationMember.created_at).filter(
Organization.activated_at.isnot(None),
OrganizationRole.description == 'Owner',
User.email == self.email)
return query.all()
@property
def new_notifications(self):
return [n for n in self.notifications if
not n.acknowledged]
@property
def non_acknowledged_notifications(self):
return [n for n in self.sent_notifications if
not n.acknowledged and (n.accepted or n.declined)]
@property
def get_notifications(self):
return (self.new_notifications +
self.non_acknowledged_notifications)
@property
def full_name(self):
return "{} {}".format(self.first_name, self.last_name)
@property
def short_name(self):
return "{} {}.".format(self.first_name, self.last_name[:1])
@property
def full_name_and_email(self):
return "{} ({})".format(self.full_name, self.email)
def group_notifications(self, group_id):
return (n for n in self.notifications if
n.group_id == group_id)
def org_notifications(self, org_id):
return (n for n in self.notifications if
n.org_id is org_id)
# NOTE: this fails as soon as we allow a user to have more than one
# role in an organization
def role_for_org(self, org_id):
roles = [om.role for om in self.organization_members
if om.organization.id == org_id]
return roles[0] if roles else None
# NOTE: this fails as soon as we allow a user to have more than one
# role in an group
def role_for_group(self, group_id):
roles = [gm.role for gm in self.group_members
if gm.group and gm.group.id == group_id]
return roles[0] if roles else None
def is_group_member(self, group_id):
return group_id in [g.id for g in self.groups]
def is_org_creator(self, org_id):
organization = Organization.get(org_id)
return organization.creator.id == self.id
def is_org_owner(self, org_id=None):
if not org_id:
return 'Owner' in [g.description for g in
self.organization_roles]
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.role.description == 'Owner'])
def can_view_group_items(self, group_id):
g = Group.get(group_id)
return (self.super_admin or
self.is_group_member(group_id) or
self.is_group_admin(g.parent_org.id))
def is_org_admin(self, org_id=None):
if not org_id:
return 'Organization Administrator' in [g.description for g in
self.organization_roles]
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.role.description == 'Organization Administrator'])
def is_org_member(self, org_id):
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.role.description == 'Member'])
def is_in_org(self, org_id):
return org_id in [g.id for g in self.organizations]
def is_group_leader(self, group_id):
return any([gm for gm in self.group_members if
gm.group_id == group_id and
gm.role.description == 'Group Leader'])
def is_meeting_alternate_host(self, group_id):
return any([gm for gm in self.group_members if
gm.can_cohost_meeting == 1])
def is_group_admin(self, org_id=None):
if not org_id:
return 'Group Administrator' in [g.description for g in
self.organization_roles]
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.role.description == 'Group Administrator'])
def is_group_creator(self, org_id=None):
if not org_id:
return 'Group Creator' in [g.description for g in
self.organization_roles]
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.role.description == 'Group Creator'])
def can_add_group(self, group_id=None, org_id=None):
return (self.super_admin or
self.is_org_owner(org_id) or
self.is_org_admin(org_id) or
self.is_group_admin(org_id) or
self.is_group_creator(org_id))
def can_edit_group(self, group_id=None):
group = Group.get(group_id)
org_id = group.parent_org.id
return (self.super_admin or
self.is_group_leader(group.id) or
self.is_group_admin(org_id) or
self.can_edit_org(org_id))
def can_change_group_members_role(self, group):
org_id = group.parent_org.id
return (self.super_admin or
self.is_group_admin(org_id) or
self.can_edit_org(org_id))
def can_edit_org(self, org_id):
return (self.super_admin or
self.is_org_owner(org_id) or
self.is_org_admin(org_id))
def can_manage_subscription(self, org_id):
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.can_manage_subscription])
@classmethod
def get_email_from_full_name_and_email(cls, full_name_and_email):
regex = r"(\w+([-+.']\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*)"
matches = re.findall(regex, full_name_and_email)
if not matches:
raise exception.InvalidEmail()
return matches[0][0]
class LinkType(Base, HasId):
description = sa.Column(sa.String(200), nullable=False)
priority = sa.Column(sa.Integer(), nullable=True)
link = orm.relationship('Link', backref='link_type')
@property
def icon(self):
return '-'.join(self.description.lower().split(' '))
def __str__(self):
return self.description
def __repr__(self):
return "LinkType:{}, {}".format(self.id, self.description)
class Link(Base, HasId):
link_type_id = sa.Column(GUID(), sa.ForeignKey("link_types.id"))
icon_css_class = sa.Column(sa.String(120))
organization_id = sa.Column(GUID(), sa.ForeignKey("organizations.id"), nullable=True)
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"), nullable=True)
url = sa.Column(sa.String(250), nullable=False)
@property
def formatted_url(self):
if 'http' in self.url:
return self.url
return "http://{}".format(self.url)
class Address(Base, HasId):
# TODO I think this is the correct mapping
# organization_id = sa.Column(GUID(), sa.ForeignKey("organization.id"))
# organization = orm.relationship("Organization", backref="addresses")
# TODO Nothing International?
# TODO this needs to be split up into street number and street IMO
street_address = sa.Column(sa.String(100), nullable=False)
city = sa.Column(sa.String(100), nullable=False)
# TODO this should be an enum
state = sa.Column(sa.String(30), nullable=False)
# TODO No country?
zip_code = sa.Column(sa.String(9), nullable=True)
organization = orm.relationship('Organization', backref='address')
@property
def formatted(self):
return "{} {}, {} {}".format(self.street_address,
self.city,
self.state,
self.zip_code)
@property
def line1(self):
return "{}".format(self.street_address)
@property
def line2(self):
return "{}, {} {}".format(self.city,
self.state,
self.zip_code)
def __str__(self):
return self.formatted
def __repr__(self):
return "Address:{}, {}".format(self.id, self.formatted)
class ZoomMeeting(Base, HasId, renders.MeetingMixin):
# https://zoom.us/
# TODO Is this the only type they want to support?
# TODO This seems insufficient. Probably need Outlook-meeting-like
# granularity
SCHEDULED_MEETING = 2
REPEATED_MEETING = 3
DEFAULT_MEETING_LENGTH = 60
LIST_LIMIT = int(CONF.get('zoom.meeting.list.limit', 30))
meeting_id = sa.Column(sa.String(255), nullable=False)
duration = sa.Column(sa.Integer(), nullable=False, default=60)
meeting_start = sa.Column(sa.DateTime(), nullable=False, default=None)
# TODO model this as an enumerable type?
repeat_type = sa.Column(sa.String(10))
topic = sa.Column(sa.String(100), nullable=False)
start_url = sa.Column(sa.String(500), nullable=False)
join_url = sa.Column(sa.String(255), nullable=False)
repeat_end_date = sa.Column(sa.Date(), nullable=True, default=None)
user_id = sa.Column(GUID(), sa.ForeignKey("users.id"), nullable=False)
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
def url(self, user_id):
if self.can_host_meeting(user_id):
return self.start_url
return self.join_url
def can_host_meeting(self, user_id):
u = User.get(user_id)
return self.user_id == user_id or u.is_meeting_alternate_host(
self.group_id)
def info(self, timezone):
if self.repeat_type == str(self.REPEATED_MEETING):
output = "Every {} at {}".format(
helper.day_of_week(self.meeting_start, timezone),
helper.time_only_offset(self.meeting_start, timezone))
if self.repeat_end_date:
output += "<br/>{}-{}".format(
self.start_date_with_timezone(timezone),
self.repeat_end_date.strftime(constants.DATE_FORMAT))
return output
if self.single_day_event:
return "{} - {}".format(
self.start_with_timezone(timezone),
self.end_time_with_timezone(timezone))
return "{} - {}".format(
self.start_with_timezone(timezone),
self.end_with_timezone(timezone))
@property
def single_day_event(self):
if self.start_date == self.end_date:
return True
return False
@property
def duration_time(self):
return helper.seconds_to_hours_and_minutes(self.duration)
@property
def start_time(self):
return helper.time_only_offset(self.meeting_start)
@property
def start_date(self):
return helper.date_only_offset(self.meeting_start)
@property
def end_time(self):
return helper.time_only_offset(self.meeting_end)
@property
def end_date(self):
return helper.date_only_offset(self.meeting_end)
@property
def meeting_end(self):
return self.meeting_start + datetime.timedelta(minutes=self.duration)
def start_with_timezone(self, timezone):
return helper.datetime_offset(self.meeting_start, timezone)
def end_with_timezone(self, timezone):
return helper.datetime_offset(self.meeting_end, timezone)
def start_time_with_timezone(self, timezone):
return helper.time_only_offset(self.meeting_start, timezone)
def end_time_with_timezone(self, timezone):
return helper.time_only_offset(self.meeting_end, timezone)
def start_date_with_timezone(self, timezone):
return helper.date_only_offset(self.meeting_start, timezone)
def end_date_with_timezone(self, timezone):
return helper.date_only_offset(self.meeting_end, timezone)
class GroupMember(Base, HasId):
__table_args__ = (sa.UniqueConstraint("group_id", "user_id",
name="group_user_membership"),
TABLE_KWARGS)
# join table for groups and users
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
user_id = sa.Column(GUID(), sa.ForeignKey("users.id"))
role_id = sa.Column(GUID(), sa.ForeignKey("group_roles.id"))
can_cohost_meeting = sa.Column(sa.Boolean(), nullable=False, default=False)
# TODO IMO we don't keep deleted_at OR we keep *all* of them on all models
deleted_at = sa.Column(sa.DateTime(), nullable=True, default=None)
user = orm.relationship('User')
group = orm.relationship('Group')
role = orm.relationship('GroupRole')
users = orm.relationship(
"User",
back_populates="group_members")
# TODO If these represent permissions, we can probably do this better, globally
class GroupRole(Base, HasId):
description = sa.Column(sa.String(80), nullable=False)
priority = sa.Column(sa.Integer(), nullable=True)
users = orm.relationship(
"User", secondary='group_members',
back_populates="group_roles")
def __str__(self):
return self.description
def __repr__(self):
return "GroupRole:{}, {}".format(id, self.description)
class GroupDocument(Base, HasId, renders.GroupDocumentMixin):
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
friendly_name = sa.Column(sa.String(80), nullable=False)
file_url = sa.Column(sa.String(250), nullable=True)
class AgeRange(Base, HasId):
description = sa.Column(sa.String(80))
priority = sa.Column(sa.Integer(), nullable=True)
groups = orm.relationship('Group', backref='age_range')
def __str__(self):
return self.description
def __repr__(self):
return "AgeRange:{}, {}".format(id, self.description)
class GroupMeetTime(Base, HasId):
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
meet_time_type_id = sa.Column(GUID(), sa.ForeignKey("meet_time_types.id"))
def __str__(self):
return "GroupMeetTime group_id: {}, meet_time_type_id: {}".format(
self.group_id, self.meet_time_type_id)
def __repr__(self):
return "GroupMeetTime:{}, group_id: {}, meet_time_type_id: {}".format(
self.id, self.group_id, self.meet_time_type_id)
def __hash__(self):
return hash(str(self))
class MeetTimeType(Base, HasId):
description = sa.Column(sa.String(80), nullable=False)
group_meet_time = orm.relationship('GroupMeetTime',
backref='meet_time_type')
priority = sa.Column(sa.Integer(), nullable=True)
def __str__(self):
return self.description
def __repr__(self):
return "MeetTimeType:{}, {}".format(self.id, self.description)
class GroupType(Base, HasId):
description = sa.Column(sa.String(80), nullable=False)
priority = sa.Column(sa.Integer(), nullable=True)
# Has a one to many relationship to Groups, but Why? maybe backref?
groups = orm.relationship('Group', backref='group_type')
def __str__(self):
return self.description
def __repr__(self):
return "GroupType:{}, {}".format(self.id, self.description)
class GroupPrivacySetting(Base, HasId):
priority = sa.Column(sa.Integer(), nullable=True)
description = sa.Column(sa.String(80), nullable=False)
# has a one to many relationship to Groups, by Why? maybe backref?
@hybrid.hybrid_property
def is_public(self):
return self.description.startswith("Public")
@hybrid.hybrid_property
def is_org_only(self):
return self.description.startswith("Organization Only")
def __str__(self):
return self.description
def __repr__(self):
return "GroupPrivacySetting:{}, {}".format(self.id, self.description)
class OrganizationGroup(Base, HasId):
organization_id = sa.Column(GUID(), sa.ForeignKey("organizations.id"))
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
order = sa.Column(sa.Integer(), default=0)
organization = orm.relationship('Organization')
group = orm.relationship('Group')
class Group(Base, HasId, ImageMixin, renders.GroupMixin):
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text(), nullable=False)
member_limit = sa.Column(sa.Text(), nullable=True)
archived_at = sa.Column(sa.DateTime(), nullable=True, default=None)
tag_line = sa.Column(sa.String(80), nullable=True)
# TODO This is racey and requires locking
clicks = sa.Column(sa.Integer(), nullable=False, default=0)
age_range_id = sa.Column(GUID(), sa.ForeignKey("age_ranges.id"),
nullable=True)
anonymous = sa.Column(sa.Boolean(), nullable=False, default=False)
# NOTE For now, this will be M, F, and None, and should be an FK to
# an enum table
gender_focus = sa.Column(sa.String(80), nullable=True)
images = orm.relationship('Image', secondary='group_images')
privacy_setting_id = sa.Column(
GUID(), sa.ForeignKey("group_privacy_settings.id"))
privacy_settings = orm.relationship("GroupPrivacySetting",
backref="group")
group_type_id = sa.Column(GUID(), sa.ForeignKey("group_types.id"),
nullable=True)
organizations = orm.relationship('Organization',
secondary='organization_groups',
back_populates='groups')
documents = orm.relationship('GroupDocument',
backref='group')
meet_times = orm.relationship('GroupMeetTime', backref='group')
meetings = orm.relationship('ZoomMeeting',
backref='group')
users = orm.relationship('User',
secondary='group_members',
back_populates='groups')
gender_translation = {'M': "Men's Group",
'F': "Women's Group",
None: 'Men and Women',
'': 'Men and Women'}
notifications = orm.relationship("Notification", backref="group")
leaders = orm.relationship('User',
secondary='group_members',
back_populates='groups',
primaryjoin=(
"and_("
"GroupMember.user_id==User.id, "
"GroupMember.group_id==Group.id, "
"GroupMember.role_id==GroupRole.id, "
"GroupRole.description=='Group Leader')"))
links = orm.relationship('Link', backref='group')
@property
def parent_org(self):
return self.organizations[0]
@property
def org_creator(self):
org = Organization.get(self.parent_org.id)
return org.creator
@property
def is_payed_up(self):
org = Organization.get(self.parent_org.id)
return org.is_payed_up
def is_joinable(self):
if not self.member_limit:
return True
return self.member_limit > len(self.users)
@property
def get_meet_times(self):
ids = []
for meet_time in self.meet_times:
if meet_time.meet_time_type_id:
ids.append(meet_time.meet_time_type_id)
meet_descriptions = []
if ids:
with transaction() as session:
meet_types = (session.query(MeetTimeType)
.filter(MeetTimeType.id.in_(ids))
.options(orm.load_only('description'))
.all())
meet_descriptions = [meet_type.description for meet_type in meet_types]
return meet_descriptions
@property
def gender_focus_formatted(self):
return self.gender_translation.get(self.gender_focus, None)
def __str__(self):
return self.name
def __repr__(self):
return "Group:{}, {}".format(self.id, self.name)
class Organization(Base, HasId, ImageMixin, renders.OrganizationMixin):
# TODO We should talk to Toneo about allowing people to craft this
# model piecemeal, but only allow them to "publish" their Org
# after all the minimum detail is met. This also could use
# some vetting/approval process
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text(), nullable=True, default=None)
deleted_at = sa.Column(sa.DateTime(), nullable=True, default=None)
show_address = sa.Column(sa.Boolean(), nullable=False, default=True)
vanity_name = sa.Column(sa.String(80), nullable=True, default=None)
# TODO This is very clearly church focused. What should we do with this?
# and how should we migrate it?
service_times_description = sa.Column(sa.String(80), nullable=True,
default=None)
date_established = sa.Column(sa.DateTime(), nullable=True)
address_id = sa.Column(GUID(), sa.ForeignKey("addresses.id"))
users = orm.relationship('User',
secondary='organization_members',
back_populates='organizations',
order_by='OrganizationMember.created_at')
owners = orm.relationship('OrganizationMember',
secondary='organization_roles',
primaryjoin=(
'and_('
'OrganizationMember.organization_id=='
'Organization.id, '
'OrganizationRole.description=="Owner")'),
order_by='OrganizationMember.created_at')
links = orm.relationship('Link', backref='organization')
# The primaryjoin here excludes archived groups
groups = orm.relationship('Group',
secondary='organization_groups',
back_populates='organizations',
order_by='OrganizationGroup.order',
primaryjoin=(
'and_('
'OrganizationGroup.organization_id=='
'Organization.id, '
'OrganizationGroup.group_id==Group.id, '
'Organization.activated_at.isnot(None), '
'Group.archived_at==None)'))
group_leaders = orm.relationship(
'User',
secondary='group_members',
back_populates='organizations',
primaryjoin=('and_('
'GroupMember.user_id==User.id, '
'GroupMember.group_id==Group.id, '
'GroupMember.role_id==GroupRole.id, '
'OrganizationGroup.group_id==Group.id, '
'OrganizationGroup.organization_id==Organization.id, '
'GroupRole.description=="Group Leader", '
'Group.archived_at==None)'))
images = orm.relationship('Image', secondary='organization_images')
notifications = orm.relationship('Notification', backref='organization')
activated_at = sa.Column(sa.DateTime(), nullable=True, default=None)
# Cache elements
licenses = 0
allocated_licenses = 0
billing_date = False
sub_data = None
discount_data = 0
@property
def group_leader_count(self):
# TODO: Flag the correct organization as is_lifeloop, refer to that
# TODO: Add 'no_charge' flag to organizations who we don't bill
llw_org = Organization.get(CONF.get("llw.org.id"))
llw_leaders = llw_org.group_leaders
count = 0
for leader in self.group_leaders:
if leader not in llw_leaders:
count += 1
return count
@property
def purchased_licenses(self):
if not self.allocated_licenses and self.subscription_data:
subscription_driver = subscription.ChargifyDriver(self.id)
allocation = (
subscription_driver.
get_subscription_component_allocation(
self.subscription_data['id']))
self.allocated_licenses = allocation['quantity']
return self.allocated_licenses
@property
def available_licenses(self):
if not self.licenses:
purchased = self.purchased_licenses + 1 # base license
used = self.group_leader_count
total = purchased - used
self.licenses = 0 if total < 0 else total
return self.licenses
def next_billing_date(self):
if not self.billing_date:
if self.subscription_data:
data = self.subscription_data['current_period_ends_at']
date = data[0:19]
date_time = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S")
self.billing_date = helper.datetime_offset(
date_time, self.timezone)
return self.billing_date
@property
def cancel_at_end_of_period(self):
if self.subscription_data:
return self.subscription_data['cancel_at_end_of_period']
return False
def is_in_trial(self):
date = self.next_billing_date()
if date:
datetime_now = datetime.datetime.utcnow()
now = helper.datetime_offset(datetime_now, self.timezone)
if now < date:
return True
return False
@property
def subscription_data(self):
if not self.sub_data:
subscription_driver = subscription.ChargifyDriver(self.id)
self.sub_data = subscription_driver.get_subscription(self.id)
return self.sub_data
@property
def coupon(self):
LOG.debug(self.subscription_data)
if 'coupon_code' in self.subscription_data:
return self.subscription_data['coupon_code']
return None
@property
def discount(self):
if self.coupon and not self.discount_data:
subscription_driver = subscription.subscription_driver()
self.discount_data = subscription_driver.get_discount(self.coupon)
return self.discount_data
@property
def is_active(self):
return self.activated_at is not None
@property
def is_payed_up(self):
if self.subscription_data and self.available_licenses >= 0:
return True
return self.is_in_trial()
@property
def creator(self):
owners = self.owners
return (owners[0].user if len(owners) == 1 else
[om for om in owners if om.user.email.find("lifeloop.live") < 0][0].user)
@property
def timezone(self):
return self.creator.timezone
def public_groups(self):
return [g for g in self.groups
if g.privacy_settings.description.lower()
.startswith('public')]
def private_groups(self):
return [g for g in self.groups
if g.privacy_settings.description.lower()
.startswith('private')]
def org_only_groups(self):
return [g for g in self.groups
if g.privacy_settings.description.lower()
.startswith('organization only')]
def public_and_org_only_groups(self):
return [g for g in self.groups
if g.privacy_settings.description.lower()
.startswith('organization only') or g.privacy_settings
.description.lower().startswith('public')]
@property
def website(self):
for link in self.links:
if link.link_type.description.split(' ')[-1] == 'Website':
return link.url
return None
def __repr__(self):
return "Organization: {}, name: {}".format(
self.id, self.name)
def __hash__(self):
return hash(str(self))
def __lt__(self, other):
return self.name < other.name
class OrganizationMember(Base, HasId):
__table_args__ = (sa.UniqueConstraint("organization_id", "user_id",
name="org_user_membership"),
TABLE_KWARGS)
user_id = sa.Column(GUID(), sa.ForeignKey("users.id"))
organization_id = sa.Column(GUID(), sa.ForeignKey("organizations.id"), index=True)
# TODO Should be many?
role_id = sa.Column(GUID(), sa.ForeignKey("organization_roles.id"))
user = orm.relationship('User')
organization = orm.relationship('Organization')
role = orm.relationship('OrganizationRole')
can_manage_subscription = sa.Column(sa.Boolean(), nullable=False, default=False)
users = orm.relationship(
"User",
back_populates="organization_members")
def __str__(self):
return self.user.full_name
def __repr__(self):
return "OrganizationMember:{}, {}".format(self.id, self.user.full_name)
class UserImage(Base, HasId):
user_id = sa.Column(GUID(), sa.ForeignKey("users.id"))
image_id = sa.Column(GUID(), sa.ForeignKey("images.id"))
user = orm.relationship('User')
image = orm.relationship('Image')
class GroupImage(Base, HasId):
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
image_id = sa.Column(GUID(), sa.ForeignKey("images.id"))
group = orm.relationship('Group')
image = orm.relationship('Image')
class OrganizationImage(Base, HasId):
organization_id = sa.Column(GUID(), sa.ForeignKey("organizations.id"))
image_id = sa.Column(GUID(), sa.ForeignKey("images.id"))
organization = orm.relationship('Organization')
image = orm.relationship('Image')
class Image(Base, HasId):
image_url = sa.Column(sa.String(500), nullable=False)
public_id = sa.Column(sa.String(255), nullable=True)
# NOTE: TEMPORARY WHILE MIGRATING TO JOIN TABLES
organization_id = sa.Column(GUID(), nullable=True)
@property
def url(self):
if self.image_url:
return self.image_url
return webpack.asset_url_for('images/card.default.png')
class Page(Base, HasId):
title = sa.Column(sa.String(60), nullable=False)
content = sa.Column(sa.String(20000), nullable=False)
pagetype = sa.Column(sa.Integer(), nullable=False)
updated_by = sa.Column(sa.String(60), nullable=False)
| 37.371567
| 99
| 0.626162
| 41,819
| 0.903882
| 222
| 0.004798
| 8,912
| 0.192625
| 0
| 0
| 8,992
| 0.194354
|
3dbe95131f682ae91ac5d0ab7098a4da9541c391
| 267
|
py
|
Python
|
gc_win1.py
|
danz2004/learning_python
|
20cb7d33f898bcc406f33565308132dca31e11cd
|
[
"MIT"
] | null | null | null |
gc_win1.py
|
danz2004/learning_python
|
20cb7d33f898bcc406f33565308132dca31e11cd
|
[
"MIT"
] | null | null | null |
gc_win1.py
|
danz2004/learning_python
|
20cb7d33f898bcc406f33565308132dca31e11cd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
seq = 'ACGACGCAGGAGGAGAGTTTCAGAGATCACGAATACATCCATATTACCCAGAGAGAG'
w = 11
for i in range(len(seq) - w + 1):
count = 0
for j in range(i, i + w):
if seq[j] == 'G' or seq[j] == 'C':
count += 1
print(f'{i} {seq[i:i+w]} {(count / w) : .4f}')
| 26.7
| 65
| 0.595506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 126
| 0.47191
|
3dbf87737162b90ca8a50c6b75c42c1a4829f712
| 6,159
|
py
|
Python
|
test/test_auth.py
|
tjones-commits/server-client-python
|
b9309fb79564de9f28196b929ee77b0e77a8f504
|
[
"CC0-1.0",
"MIT"
] | 470
|
2016-09-14T23:38:48.000Z
|
2022-03-31T07:59:53.000Z
|
test/test_auth.py
|
jorwoods/server-client-python
|
fefd6f18d8a6617829c6323879d2c3ed77a4cda6
|
[
"CC0-1.0",
"MIT"
] | 772
|
2016-09-09T18:15:44.000Z
|
2022-03-31T22:01:08.000Z
|
test/test_auth.py
|
jorwoods/server-client-python
|
fefd6f18d8a6617829c6323879d2c3ed77a4cda6
|
[
"CC0-1.0",
"MIT"
] | 346
|
2016-09-10T00:05:00.000Z
|
2022-03-30T18:55:47.000Z
|
import unittest
import os.path
import requests_mock
import tableauserverclient as TSC
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
SIGN_IN_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in.xml')
SIGN_IN_IMPERSONATE_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_impersonate.xml')
SIGN_IN_ERROR_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_error.xml')
class AuthTests(unittest.TestCase):
def setUp(self):
self.server = TSC.Server('http://test')
self.baseurl = self.server.auth.baseurl
def test_sign_in(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_with_personal_access_tokens(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken',
personal_access_token='Random123Generated', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_impersonate(self):
with open(SIGN_IN_IMPERSONATE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password',
user_id_to_impersonate='dd2239f6-ddf1-4107-981a-4cf94e415794')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('MJonFA6HDyy2C3oqR13fRGqE6cmgzwq3', self.server.auth_token)
self.assertEqual('dad65087-b08b-4603-af4e-2887b8aafc67', self.server.site_id)
self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', self.server.user_id)
def test_sign_in_error(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('testuser', 'wrongpassword')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_invalid_token(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken', personal_access_token='invalid')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_without_auth(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('', '')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_out(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
m.post(self.baseurl + '/signout', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.sign_out()
self.assertIsNone(self.server._auth_token)
self.assertIsNone(self.server._site_id)
self.assertIsNone(self.server._user_id)
def test_switch_site(self):
self.server.version = '2.6'
baseurl = self.server.auth.baseurl
site_id, user_id, auth_token = list('123')
self.server._set_auth(site_id, user_id, auth_token)
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/switchSite', text=response_xml)
site = TSC.SiteItem('Samples', 'Samples')
self.server.auth.switch_site(site)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_revoke_all_server_admin_tokens(self):
self.server.version = "3.10"
baseurl = self.server.auth.baseurl
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/signin', text=response_xml)
m.post(baseurl + '/revokeAllServerAdminTokens', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.revoke_all_server_admin_tokens()
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
| 49.272
| 117
| 0.664069
| 5,776
| 0.937815
| 0
| 0
| 0
| 0
| 0
| 0
| 1,116
| 0.181198
|
3dbfa17a77ec527273235935d102cd0d8f5bcbb2
| 7,991
|
py
|
Python
|
gym_flock/envs/old/flocking.py
|
katetolstaya/gym-flock
|
3236d1dafcb1b9be0cf78b471672e8becb2d37af
|
[
"MIT"
] | 19
|
2019-07-29T22:19:58.000Z
|
2022-01-27T04:38:38.000Z
|
gym_flock/envs/old/flocking.py
|
henghenghahei849/gym-flock
|
b09bdfbbe4a96fe052958d1f9e1e9dd314f58419
|
[
"MIT"
] | null | null | null |
gym_flock/envs/old/flocking.py
|
henghenghahei849/gym-flock
|
b09bdfbbe4a96fe052958d1f9e1e9dd314f58419
|
[
"MIT"
] | 5
|
2019-10-03T14:44:49.000Z
|
2021-12-09T20:39:39.000Z
|
import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family': 'sans-serif',
'weight': 'bold',
'size': 14}
class FlockingEnv(gym.Env):
def __init__(self):
config_file = path.join(path.dirname(__file__), "params_flock.cfg")
config = configparser.ConfigParser()
config.read(config_file)
config = config['flock']
self.dynamic = False # if the agents are moving or not
self.mean_pooling = True # normalize the adjacency matrix by the number of neighbors or not
# number states per agent
self.nx_system = 4
# numer of observations per agent
self.n_features = 6
# number of actions per agent
self.nu = 2
# problem parameters from file
self.n_agents = int(config['network_size'])
self.comm_radius = float(config['comm_radius'])
self.comm_radius2 = self.comm_radius * self.comm_radius
self.dt = float(config['system_dt'])
self.v_max = float(config['max_vel_init'])
self.v_bias = self.v_max
self.r_max = float(config['max_rad_init'])
self.std_dev = float(config['std_dev']) * self.dt
# intitialize state matrices
self.x = np.zeros((self.n_agents, self.nx_system))
self.u = np.zeros((self.n_agents, self.nu))
self.mean_vel = np.zeros((self.n_agents, self.nu))
self.init_vel = np.zeros((self.n_agents, self.nu))
self.a_net = np.zeros((self.n_agents, self.n_agents))
# TODO : what should the action space be? is [-1,1] OK?
self.max_accel = 1
self.gain = 10.0 # TODO - adjust if necessary - may help the NN performance
self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),
dtype=np.float32)
self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, self.n_features),
dtype=np.float32)
self.fig = None
self.line1 = None
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
#u = np.reshape(u, (-1, 2))
assert u.shape == (self.n_agents, self.nu)
self.u = u
if self.dynamic:
# x position
self.x[:, 0] = self.x[:, 0] + self.x[:, 2] * self.dt
# y position
self.x[:, 1] = self.x[:, 1] + self.x[:, 3] * self.dt
# x velocity
self.x[:, 2] = self.x[:, 2] + self.gain * self.u[:, 0] * self.dt #+ np.random.normal(0, self.std_dev, (self.n_agents,))
# y velocity
self.x[:, 3] = self.x[:, 3] + self.gain * self.u[:, 1] * self.dt #+ np.random.normal(0, self.std_dev, (self.n_agents,))
return self._get_obs(), self.instant_cost(), False, {}
def instant_cost(self): # sum of differences in velocities
# TODO adjust to desired reward
# action_cost = -1.0 * np.sum(np.square(self.u))
#curr_variance = -1.0 * np.sum((np.var(self.x[:, 2:4], axis=0)))
versus_initial_vel = -1.0 * np.sum(np.sum(np.square(self.x[:, 2:4] - self.mean_vel), axis=1))
#return curr_variance + versus_initial_vel
return versus_initial_vel
def reset(self):
x = np.zeros((self.n_agents, self.nx_system))
degree = 0
min_dist = 0
min_dist_thresh = 0.1 # 0.25
# generate an initial configuration with all agents connected,
# and minimum distance between agents > min_dist_thresh
while degree < 2 or min_dist < min_dist_thresh:
# randomly initialize the location and velocity of all agents
length = np.sqrt(np.random.uniform(0, self.r_max, size=(self.n_agents,)))
angle = np.pi * np.random.uniform(0, 2, size=(self.n_agents,))
x[:, 0] = length * np.cos(angle)
x[:, 1] = length * np.sin(angle)
bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) + bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) + bias[1]
# compute distances between agents
a_net = self.dist2_mat(x)
# compute minimum distance between agents and degree of network to check if good initial configuration
min_dist = np.sqrt(np.min(np.min(a_net)))
a_net = a_net < self.comm_radius2
degree = np.min(np.sum(a_net.astype(int), axis=1))
# keep good initialization
self.mean_vel = np.mean(x[:, 2:4], axis=0)
self.init_vel = x[:, 2:4]
self.x = x
self.a_net = self.get_connectivity(self.x)
return self._get_obs()
def _get_obs(self):
# state_values = self.x
state_values = np.hstack((self.x, self.init_vel)) # initial velocities are part of state to make system observable
if self.dynamic:
state_network = self.get_connectivity(self.x)
else:
state_network = self.a_net
return (state_values, state_network)
def dist2_mat(self, x):
"""
Compute squared euclidean distances between agents. Diagonal elements are infinity
Args:
x (): current state of all agents
Returns: symmetric matrix of size (n_agents, n_agents) with A_ij the distance between agents i and j
"""
x_loc = np.reshape(x[:, 0:2], (self.n_agents,2,1))
a_net = np.sum(np.square(np.transpose(x_loc, (0,2,1)) - np.transpose(x_loc, (2,0,1))), axis=2)
np.fill_diagonal(a_net, np.Inf)
return a_net
def get_connectivity(self, x):
"""
Get the adjacency matrix of the network based on agent locations by computing pairwise distances using pdist
Args:
x (): current state of all agents
Returns: adjacency matrix of network
"""
a_net = self.dist2_mat(x)
a_net = (a_net < self.comm_radius2).astype(float)
if self.mean_pooling:
# Normalize the adjacency matrix by the number of neighbors - results in mean pooling, instead of sum pooling
n_neighbors = np.reshape(np.sum(a_net, axis=1), (self.n_agents,1)) # TODO or axis=0? Is the mean in the correct direction?
n_neighbors[n_neighbors == 0] = 1
a_net = a_net / n_neighbors
return a_net
def controller(self):
"""
Consensus-based centralized flocking with no obstacle avoidance
Returns: the optimal action
"""
# TODO implement Tanner 2003?
u = np.mean(self.x[:,2:4], axis=0) - self.x[:,2:4]
u = np.clip(u, a_min=-self.max_accel, a_max=self.max_accel)
return u
def render(self, mode='human'):
"""
Render the environment with agents as points in 2D space
"""
if self.fig is None:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line1, = ax.plot(self.x[:, 0], self.x[:, 1], 'bo') # Returns a tuple of line objects, thus the comma
ax.plot([0], [0], 'kx')
plt.ylim(-1.0 * self.r_max, 1.0 * self.r_max)
plt.xlim(-1.0 * self.r_max, 1.0 * self.r_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def close(self):
pass
| 36.99537
| 134
| 0.585659
| 7,705
| 0.96421
| 0
| 0
| 0
| 0
| 0
| 0
| 2,324
| 0.290827
|
3dc00d2a0bc2efe282c87c91e5370202da55e278
| 3,010
|
py
|
Python
|
dataPipelines/gc_scrapy/gc_scrapy/spiders/army_reserve_spider.py
|
ekmixon/gamechanger-crawlers
|
60a0cf20338fb3dc134eec117bccd519cede9288
|
[
"MIT"
] | 8
|
2021-05-20T18:39:35.000Z
|
2022-02-25T23:24:21.000Z
|
dataPipelines/gc_scrapy/gc_scrapy/spiders/army_reserve_spider.py
|
dod-advana/gamechanger-crawlers
|
e0113111a39f78bd13f70fa4b3359a688f7dc6e8
|
[
"MIT"
] | 4
|
2021-06-14T13:46:46.000Z
|
2022-03-02T02:01:49.000Z
|
dataPipelines/gc_scrapy/gc_scrapy/spiders/army_reserve_spider.py
|
ekmixon/gamechanger-crawlers
|
60a0cf20338fb3dc134eec117bccd519cede9288
|
[
"MIT"
] | 4
|
2021-06-30T22:18:52.000Z
|
2021-11-17T22:43:27.000Z
|
import scrapy
import re
from urllib.parse import urljoin, urlencode, parse_qs
from dataPipelines.gc_scrapy.gc_scrapy.items import DocItem
from dataPipelines.gc_scrapy.gc_scrapy.GCSpider import GCSpider
type_and_num_regex = re.compile(r"([a-zA-Z].*) (\d.*)")
class ArmyReserveSpider(GCSpider):
name = "Army_Reserve"
allowed_domains = ['usar.army.mil']
start_urls = [
'https://www.usar.army.mil/Publications/'
]
file_type = "pdf"
cac_login_required = False
section_selector = "div.DnnModule.DnnModule-ICGModulesExpandableTextHtml div.Normal"
@staticmethod
def clean(text):
return text.encode('ascii', 'ignore').decode('ascii').strip()
def parse(self, response):
selected_items = response.css(
"div.DnnModule.DnnModule-ICGModulesExpandableTextHtml div.Normal > div p")
for item in selected_items:
pdf_url = item.css('a::attr(href)').get()
if pdf_url is None:
continue
# join relative urls to base
web_url = urljoin(self.start_urls[0], pdf_url) if pdf_url.startswith(
'/') else pdf_url
# encode spaces from pdf names
web_url = web_url.replace(" ", "%20")
cac_login_required = True if "usar.dod.afpims.mil" in web_url else False
downloadable_items = [
{
"doc_type": self.file_type,
"web_url": web_url,
"compression_type": None
}
]
doc_name_raw = "".join(item.css('strong::text').getall())
doc_title_raw = item.css('a::text').get()
# some are nested in span
if doc_title_raw is None:
doc_title_raw = item.css('a span::text').get()
# some dont have anything except the name e.g. FY20 USAR IDT TRP Policy Update
if doc_title_raw is None:
doc_title_raw = doc_name_raw
doc_name = self.clean(doc_name_raw)
doc_title = self.clean(doc_title_raw)
type_and_num_groups = re.search(type_and_num_regex, doc_name)
if type_and_num_groups is not None:
doc_type = type_and_num_groups[1]
doc_num = type_and_num_groups[2]
else:
doc_type = "USAR Doc"
doc_num = ""
version_hash_fields = {
# version metadata found on pdf links
"item_currency": web_url.split('/')[-1],
"document_title": doc_title,
"document_number": doc_num
}
yield DocItem(
doc_name=doc_name,
doc_title=doc_title,
doc_num=doc_num,
doc_type=doc_type,
cac_login_required=cac_login_required,
downloadable_items=downloadable_items,
version_hash_raw_data=version_hash_fields,
)
| 34.597701
| 94
| 0.571429
| 2,748
| 0.912957
| 2,313
| 0.768439
| 104
| 0.034551
| 0
| 0
| 641
| 0.212957
|
3dc01664c6a8e4d90955ec90294ebb0c1cb73629
| 4,036
|
py
|
Python
|
lbrynet/daemon/Publisher.py
|
Invariant-Change/lbry
|
2ddd6b051d4457f0d747428e3d97aa37839f3c93
|
[
"MIT"
] | null | null | null |
lbrynet/daemon/Publisher.py
|
Invariant-Change/lbry
|
2ddd6b051d4457f0d747428e3d97aa37839f3c93
|
[
"MIT"
] | null | null | null |
lbrynet/daemon/Publisher.py
|
Invariant-Change/lbry
|
2ddd6b051d4457f0d747428e3d97aa37839f3c93
|
[
"MIT"
] | null | null | null |
import logging
import mimetypes
import os
from twisted.internet import defer
from lbrynet.core import file_utils
from lbrynet.file_manager.EncryptedFileCreator import create_lbry_file
log = logging.getLogger(__name__)
class Publisher(object):
def __init__(self, session, lbry_file_manager, wallet, certificate_id):
self.session = session
self.lbry_file_manager = lbry_file_manager
self.wallet = wallet
self.certificate_id = certificate_id
self.lbry_file = None
@defer.inlineCallbacks
def create_and_publish_stream(self, name, bid, claim_dict, file_path, claim_address=None,
change_address=None):
"""Create lbry file and make claim"""
log.info('Starting publish for %s', name)
if not os.path.isfile(file_path):
raise Exception("File {} not found".format(file_path))
if os.path.getsize(file_path) == 0:
raise Exception("Cannot publish empty file {}".format(file_path))
file_name = os.path.basename(file_path)
with file_utils.get_read_handle(file_path) as read_handle:
self.lbry_file = yield create_lbry_file(self.session, self.lbry_file_manager, file_name,
read_handle)
if 'source' not in claim_dict['stream']:
claim_dict['stream']['source'] = {}
claim_dict['stream']['source']['source'] = self.lbry_file.sd_hash
claim_dict['stream']['source']['sourceType'] = 'lbry_sd_hash'
claim_dict['stream']['source']['contentType'] = get_content_type(file_path)
claim_dict['stream']['source']['version'] = "_0_0_1" # need current version here
claim_out = yield self.make_claim(name, bid, claim_dict, claim_address, change_address)
# check if we have a file already for this claim (if this is a publish update with a new stream)
old_stream_hashes = yield self.session.storage.get_old_stream_hashes_for_claim_id(claim_out['claim_id'],
self.lbry_file.stream_hash)
if old_stream_hashes:
for lbry_file in filter(lambda l: l.stream_hash in old_stream_hashes,
list(self.lbry_file_manager.lbry_files)):
yield self.lbry_file_manager.delete_lbry_file(lbry_file, delete_file=False)
log.info("Removed old stream for claim update: %s", lbry_file.stream_hash)
yield self.session.storage.save_content_claim(
self.lbry_file.stream_hash, "%s:%i" % (claim_out['txid'], claim_out['nout'])
)
defer.returnValue(claim_out)
@defer.inlineCallbacks
def publish_stream(self, name, bid, claim_dict, stream_hash, claim_address=None, change_address=None):
"""Make a claim without creating a lbry file"""
claim_out = yield self.make_claim(name, bid, claim_dict, claim_address, change_address)
if stream_hash: # the stream_hash returned from the db will be None if this isn't a stream we have
yield self.session.storage.save_content_claim(stream_hash, "%s:%i" % (claim_out['txid'],
claim_out['nout']))
self.lbry_file = [f for f in self.lbry_file_manager.lbry_files if f.stream_hash == stream_hash][0]
defer.returnValue(claim_out)
@defer.inlineCallbacks
def make_claim(self, name, bid, claim_dict, claim_address=None, change_address=None):
claim_out = yield self.wallet.claim_name(name, bid, claim_dict,
certificate_id=self.certificate_id,
claim_address=claim_address,
change_address=change_address)
defer.returnValue(claim_out)
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
| 51.088608
| 117
| 0.631318
| 3,703
| 0.917493
| 3,318
| 0.822101
| 3,399
| 0.84217
| 0
| 0
| 638
| 0.158077
|
3dc0b7210fc8b7d9ca8c5c2087a4723a81de890a
| 10,498
|
py
|
Python
|
SAMPNet/train.py
|
bcmi/Image-Composition-Assessment-with-SAMP
|
35c093bafdaaa98923d8ba093a73ddf0079ffbc9
|
[
"MIT"
] | 27
|
2021-04-28T04:51:02.000Z
|
2022-03-04T08:57:03.000Z
|
SAMPNet/train.py
|
bcmi/Image-Composition-Assessment-with-SAMP
|
35c093bafdaaa98923d8ba093a73ddf0079ffbc9
|
[
"MIT"
] | 4
|
2021-10-30T13:28:33.000Z
|
2022-02-19T01:09:47.000Z
|
SAMPNet/train.py
|
bcmi/Image-Composition-Assessment-with-SAMP
|
35c093bafdaaa98923d8ba093a73ddf0079ffbc9
|
[
"MIT"
] | 3
|
2021-10-30T10:18:02.000Z
|
2022-01-16T08:44:43.000Z
|
import sys,os
from torch.autograd import Variable
import torch.optim as optim
from tensorboardX import SummaryWriter
import torch
import time
import shutil
from torch.utils.data import DataLoader
import csv
from samp_net import EMDLoss, AttributeLoss, SAMPNet
from config import Config
from cadb_dataset import CADBDataset
from test import evaluation_on_cadb
def calculate_accuracy(predict, target, threhold=2.6):
assert target.shape == predict.shape, '{} vs. {}'.format(target.shape, predict.shape)
bin_tar = target > threhold
bin_pre = predict > threhold
correct = (bin_tar == bin_pre).sum()
acc = correct.float() / target.size(0)
return correct,acc
def build_dataloader(cfg):
trainset = CADBDataset('train', cfg)
trainloader = DataLoader(trainset,
batch_size=cfg.batch_size,
shuffle=True,
num_workers=cfg.num_workers,
drop_last=False)
return trainloader
class Trainer(object):
def __init__(self, model, cfg):
self.cfg = cfg
self.model = model
self.device = torch.device('cuda:{}'.format(self.cfg.gpu_id))
self.trainloader = build_dataloader(cfg)
self.optimizer = self.create_optimizer()
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, mode='min', patience=5)
self.epoch = 0
self.iters = 0
self.avg_mse = 0.
self.avg_emd = 0.
self.avg_acc = 0.
self.avg_att = 0.
self.smooth_coe = 0.4
self.smooth_mse = None
self.smooth_emd = None
self.smooth_acc = None
self.smooth_att = None
self.mse_loss = torch.nn.MSELoss()
self.emd_loss = EMDLoss()
self.test_acc = []
self.test_emd1 = []
self.test_emd2 = []
self.test_mse = []
self.test_srcc = []
self.test_lcc = []
if cfg.use_attribute:
self.att_loss = AttributeLoss(cfg.attribute_weight)
self.least_metric = 1.
self.writer = self.create_writer()
def create_optimizer(self):
# for param in self.model.backbone.parameters():
# param.requires_grad = False
bb_params = list(map(id, self.model.backbone.parameters()))
lr_params = filter(lambda p:id(p) not in bb_params, self.model.parameters())
params = [
{'params': lr_params, 'lr': self.cfg.lr},
{'params': self.model.backbone.parameters(), 'lr': self.cfg.lr * 0.01}
]
if self.cfg.optimizer == 'adam':
optimizer = optim.Adam(params,
weight_decay=self.cfg.weight_decay)
elif self.cfg.optimizer == 'sgd':
optimizer = optim.SGD(params,
momentum=self.cfg.momentum,
weight_decay=self.cfg.weight_decay)
else:
raise ValueError(f"not such optimizer {self.cfg.optimizer}")
return optimizer
def create_writer(self):
print('Create tensorboardX writer...', self.cfg.log_dir)
writer = SummaryWriter(log_dir=self.cfg.log_dir)
return writer
def run(self):
for epoch in range(self.cfg.max_epoch):
self.run_epoch()
self.epoch += 1
self.scheduler.step(metrics=self.least_metric)
self.writer.add_scalar('Train/lr', self.optimizer.param_groups[0]['lr'], self.epoch)
if self.epoch % self.cfg.save_epoch == 0:
checkpoint_path = os.path.join(self.cfg.checkpoint_dir, 'model-{epoch}.pth')
torch.save(self.model.state_dict(), checkpoint_path.format(epoch=self.epoch))
print('Save checkpoint...')
if self.epoch % self.cfg.test_epoch == 0:
test_emd = self.eval_training()
if test_emd < self.least_metric:
self.least_metric = test_emd
checkpoint_path = os.path.join(self.cfg.checkpoint_dir, 'model-best.pth')
torch.save(self.model.state_dict(), checkpoint_path)
print('Update best checkpoint...')
self.writer.add_scalar('Test/Least EMD', self.least_metric, self.epoch)
def eval_training(self):
avg_acc, avg_r1_emd, avg_r2_emd, avg_mse, SRCC, LCC = \
evaluation_on_cadb(self.model, self.cfg)
self.writer.add_scalar('Test/Average EMD(r=2)', avg_r2_emd, self.epoch)
self.writer.add_scalar('Test/Average EMD(r=1)', avg_r1_emd, self.epoch)
self.writer.add_scalar('Test/Average MSE', avg_mse, self.epoch)
self.writer.add_scalar('Test/Accuracy', avg_acc, self.epoch)
self.writer.add_scalar('Test/SRCC', SRCC, self.epoch)
self.writer.add_scalar('Test/LCC', LCC, self.epoch)
error = avg_r1_emd
self.test_acc.append(avg_acc)
self.test_emd1.append(avg_r1_emd)
self.test_emd2.append(avg_r2_emd)
self.test_mse.append(avg_mse)
self.test_srcc.append(SRCC)
self.test_lcc.append(LCC)
self.write2csv()
return error
def write2csv(self):
csv_path = os.path.join(self.cfg.exp_path, '..', '{}.csv'.format(self.cfg.exp_name))
header = ['epoch', 'Accuracy', 'EMD r=1', 'EMD r=2', 'MSE', 'SRCC', 'LCC']
epoches = list(range(len(self.test_acc)))
metrics = [epoches, self.test_acc, self.test_emd1, self.test_emd2,
self.test_mse, self.test_srcc, self.test_lcc]
rows = [header]
for i in range(len(epoches)):
row = [m[i] for m in metrics]
rows.append(row)
for name, m in zip(header, metrics):
if name == 'epoch':
continue
index = m.index(min(m))
if name in ['Accuracy', 'SRCC', 'LCC']:
index = m.index(max(m))
title = 'best {} (epoch-{})'.format(name, index)
row = [l[index] for l in metrics]
row[0] = title
rows.append(row)
with open(csv_path, 'w') as f:
cw = csv.writer(f)
cw.writerows(rows)
print('Save result to ', csv_path)
def dist2ave(self, pred_dist):
pred_score = torch.sum(pred_dist* torch.Tensor(range(1,6)).to(pred_dist.device), dim=-1, keepdim=True)
return pred_score
def run_epoch(self):
self.model.train()
for batch, data in enumerate(self.trainloader):
self.iters += 1
image = data[0].to(self.device)
score = data[1].to(self.device)
score_dist = data[2].to(self.device)
saliency = data[3].to(self.device)
attributes = data[4].to(self.device)
weight = data[5].to(self.device)
pred_weight, pred_atts, pred_dist = self.model(image, saliency)
if self.cfg.use_weighted_loss:
dist_loss = self.emd_loss(score_dist, pred_dist, weight)
else:
dist_loss = self.emd_loss(score_dist, pred_dist)
if self.cfg.use_attribute:
att_loss = self.att_loss(attributes, pred_atts)
loss = dist_loss + att_loss
else:
loss = dist_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.avg_emd += dist_loss.item()
self.avg_att += att_loss.item()
pred_score = self.dist2ave(pred_dist)
correct, accuracy = calculate_accuracy(pred_score, score)
self.avg_acc += accuracy.item()
if (self.iters+1) % self.cfg.display_steps == 0:
print('ground truth: average={}'.format(score.view(-1)))
print('prediction: average={}'.format(pred_score.view(-1)))
self.avg_emd = self.avg_emd / self.cfg.display_steps
self.avg_acc = self.avg_acc / self.cfg.display_steps
if self.cfg.use_attribute:
self.avg_att = self.avg_att / self.cfg.display_steps
if self.smooth_emd != None:
self.avg_emd = (1-self.smooth_coe) * self.avg_emd + self.smooth_coe * self.smooth_emd
self.avg_acc = (1-self.smooth_coe) * self.avg_acc + self.smooth_coe * self.smooth_acc
if self.cfg.use_attribute:
self.avg_att = (1-self.smooth_coe) * self.avg_att + self.smooth_coe * self.smooth_att
self.writer.add_scalar('Train/AttributeLoss', self.avg_att, self.iters)
self.writer.add_scalar('Train/EMD_Loss', self.avg_emd, self.iters)
self.writer.add_scalar('Train/Accuracy', self.avg_acc, self.iters)
if self.cfg.use_attribute:
print('Traning Epoch:{}/{} Current Batch: {}/{} EMD_Loss:{:.4f} Attribute_Loss:{:.4f} ACC:{:.2%} lr:{:.6f} '.
format(
self.epoch, self.cfg.max_epoch,
batch, len(self.trainloader),
self.avg_emd, self.avg_att,
self.avg_acc,
self.optimizer.param_groups[0]['lr']))
else:
print(
'Traning Epoch:{}/{} Current Batch: {}/{} EMD_Loss:{:.4f} ACC:{:.2%} lr:{:.6f} '.
format(
self.epoch, self.cfg.max_epoch,
batch, len(self.trainloader),
self.avg_emd, self.avg_acc,
self.optimizer.param_groups[0]['lr']))
self.smooth_emd = self.avg_emd
self.smooth_acc = self.avg_acc
self.avg_mse = 0.
self.avg_emd = 0.
self.avg_acc = 0.
if self.cfg.use_attribute:
self.smooth_att = self.avg_att
self.avg_att = 0.
print()
if __name__ == '__main__':
cfg = Config()
cfg.create_path()
device = torch.device('cuda:{}'.format(cfg.gpu_id))
# evaluate(cfg)
for file in os.listdir('./'):
if file.endswith('.py'):
shutil.copy(file, cfg.exp_path)
print('Backup ', file)
model = SAMPNet(cfg)
model = model.train().to(device)
trainer = Trainer(model, cfg)
trainer.run()
| 40.689922
| 130
| 0.561155
| 9,075
| 0.86445
| 0
| 0
| 0
| 0
| 0
| 0
| 916
| 0.087255
|
3dc12e0ce591217b149c51e1d38a5ca5547d4627
| 3,282
|
py
|
Python
|
combine_layer.py
|
Lynton-Morgan/combine_layer
|
93b83ed69b8201db69fff80e60e8cb2955b40cd1
|
[
"MIT"
] | null | null | null |
combine_layer.py
|
Lynton-Morgan/combine_layer
|
93b83ed69b8201db69fff80e60e8cb2955b40cd1
|
[
"MIT"
] | null | null | null |
combine_layer.py
|
Lynton-Morgan/combine_layer
|
93b83ed69b8201db69fff80e60e8cb2955b40cd1
|
[
"MIT"
] | null | null | null |
import keras
import keras.backend as K
class Combine(keras.layers.Layer):
"""Combine layer
This layer recombines the output of its internal layers
#Arguments
layers: A list of Keras layers
output_spec: A list of integer lists, indices from each layer in 'layers'
that make up each output coordinate
reduction: A string, the function to use between layer coordinates
#Example
To make a 3-element softmax binary tree:
output_spec = [[0, 0], [0, 1], [1, -1]]
comb = Combine([Dense(2, activation='softmax'),
Dense(2, activation='softmax')],
output_spec=output_spec,
reduction='prod')
"""
def __init__(self, layers, output_spec, reduction='prod', **kwargs):
self.layers = layers
assert len(layers) > 0, "Must have layers in 'layers'"
self.output_spec = output_spec
for idx_spec in output_spec:
assert len(idx_spec) <= len(layers), \
"Length of each element in output_spec must not exceed the number of layers"
self.output_dim = len(output_spec)
self.reduction = reduction
reducer_dict = {
'max': K.max,
'mean': K.mean,
'min': K.min,
'prod': K.prod,
'std': K.std,
'sum': K.sum,
'var': K.var,
}
assert reduction in reducer_dict, "'reduction' must be one of %s" % (list(reducer_dict.keys()))
self.reducer = reducer_dict[reduction]
super(Combine, self).__init__(**kwargs)
def build(self, input_shape):
self.trainable_weights = []
for layer in self.layers:
layer.build(input_shape)
self.trainable_weights += layer.trainable_weights
super(Combine, self).build(input_shape)
def call(self, inputs):
layer_outputs = [layer(inputs) for layer in self.layers]
outputs = []
for indices in self.output_spec:
var = K.stack(
[layer_outputs[layer_idx][...,idx] for layer_idx, idx in enumerate(indices) if idx >= 0],
axis=0)
outputs.append(self.reducer(var, axis=0))
result = K.stack(outputs, axis=-1)
return result
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.output_dim
return tuple(output_shape)
def get_config(self):
base_config = super(Combine, self).get_config()
config={}
config['layers'] = [{'class_name': layer.__class__.__name__,
'config': layer.get_config()} for layer in self.layers]
config['output_spec'] = self.output_spec
config['reduction'] = self.reduction
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
from keras.layers import deserialize as deserialize_layer
layers_config = config.pop('layers')
layers = [deserialize_layer(layers_config[i]) for i in range(len(layers_config))]
return cls(layers, **config)
| 33.151515
| 109
| 0.59415
| 3,240
| 0.987203
| 0
| 0
| 286
| 0.087142
| 0
| 0
| 865
| 0.263559
|
3dc274928408de034cf930f3d624022d965d5166
| 4,308
|
py
|
Python
|
src/pystage/core/_sound.py
|
pystage/pystage
|
4a76e95f6de2df59736de17fe81219485fde1556
|
[
"MIT"
] | 12
|
2021-05-20T12:49:52.000Z
|
2022-01-12T02:15:33.000Z
|
src/pystage/core/_sound.py
|
pystage/pystage
|
4a76e95f6de2df59736de17fe81219485fde1556
|
[
"MIT"
] | 14
|
2021-05-25T09:28:33.000Z
|
2021-09-10T07:54:45.000Z
|
src/pystage/core/_sound.py
|
pystage/pystage
|
4a76e95f6de2df59736de17fe81219485fde1556
|
[
"MIT"
] | 3
|
2021-05-25T12:58:36.000Z
|
2022-02-18T04:19:21.000Z
|
import pygame
from pygame.mixer import music
from pystage.core.assets import SoundManager
from pystage.core._base_sprite import BaseSprite
import time
class _Sound(BaseSprite):
# Like for costumes and backdrops, we need a class structure here.
# Plus a global sound manager.
def __init__(self):
super().__init__()
self.sound_manager = SoundManager(self)
self.mixer = pygame.mixer
self.mixer.init(channels=2)
self.current_pan = 0
self.current_pitch = 0
self.current_volume = 100
def pystage_addsound(self, name):
self.sound_manager.add_sound(name)
def sound_play(self, name, loop=0):
channel = self.mixer.find_channel()
sound = self.sound_manager.get_sound(name)
if sound is not None:
channel.play(sound, loop)
return channel
def sound_playuntildone(self, name):
sound = self.sound_manager.get_sound(name)
if sound is not None:
self.mixer.find_channel().play(sound, 0)
# time.sleep(sound.get_length())
# This need to be done via wait time in code block
# TODO: Add this function to yield blocks.
self.code_manager.current_block.add_to_wait_time = sound.get_length()
def sound_stopallsounds(self):
self.mixer.stop()
def sound_changeeffectby_pitch(self, value):
# TODO: for pitching there is no ready to use code in pygame. To do so
# we must operate on the audio array itself.
# -360 to 360, 10 is a half-step, 120 an octave
# changes only the speed of the sound
pass
sound_changeeffectby_pitch.opcode = "sound_changeeffectby"
sound_changeeffectby_pitch.param = "EFFECT"
sound_changeeffectby_pitch.value = "PITCH"
sound_changeeffectby_pitch.translation = "sound_effects_pitch"
def sound_changeeffectby_pan(self, value):
# norm pan value from -100/100 to range 0/1
self.current_pan += value
self.current_pan = min(100, max(-100, self.current_pan))
self._apply()
sound_changeeffectby_pan.opcode = "sound_changeeffectby"
sound_changeeffectby_pan.param = "EFFECT"
sound_changeeffectby_pan.value = "PAN"
sound_changeeffectby_pan.translation = "sound_effects_pan"
def sound_seteffectto_pitch(self, value):
# TODO: for pitching there is no ready to use code in pygame. To do so
# we must operate on the audio array itself.
pass
sound_seteffectto_pitch.opcode = "sound_seteffectto"
sound_seteffectto_pitch.param = "EFFECT"
sound_seteffectto_pitch.value = "PITCH"
sound_seteffectto_pitch.translation = "sound_effects_pitch"
def sound_seteffectto_pan(self, value):
# Values from -100 (left) to 100 (right)
self.current_pan = value
self.current_pan = min(100, max(-100, self.current_pan))
self._apply()
sound_seteffectto_pan.opcode = "sound_seteffectto"
sound_seteffectto_pan.param = "EFFECT"
sound_seteffectto_pan.value = "PAN"
sound_seteffectto_pan.translation = "sound_effects_pan"
def sound_cleareffects(self):
self.current_pan = 0
self.current_pitch = 0
self._apply()
# apply pitch
def _apply(self):
# norm pan value from -100/100 to range 0/1
pgpan = (self.current_pan + 100) / 200
pgvolume = self.current_volume / 100
for channel_id in range(self.mixer.get_num_channels()):
if pgpan > 0.5:
self.mixer.Channel(channel_id).set_volume(1, 0)
else:
self.mixer.Channel(channel_id).set_volume(0, 1)
for channel_id in range(self.mixer.get_num_channels()):
self.mixer.Channel(channel_id).set_volume(pgvolume)
def sound_changevolumeby(self, value):
self.current_volume += value
self.current_volume = min(100, max(0, self.current_volume))
self._apply()
def sound_setvolumeto(self, value):
self.current_volume = value
self.current_volume = min(100, max(0, self.current_volume))
self._apply()
def sound_volume(self):
# as we hide the channel mechanic, we assume all channels are set to the same volume
return self.mixer.Channel(0).get_volume() * 100
| 35.02439
| 92
| 0.668524
| 4,153
| 0.96402
| 0
| 0
| 0
| 0
| 0
| 0
| 973
| 0.225859
|
3dc364b351e4b86533cd7ac27b461f7ca088a0a9
| 2,126
|
py
|
Python
|
tests/test_runner/test_discover_runner.py
|
tomleo/django
|
ebfb71c64a786620947c9d598fd1ebae2958acff
|
[
"BSD-3-Clause"
] | 1
|
2015-09-09T08:48:03.000Z
|
2015-09-09T08:48:03.000Z
|
tests/test_runner/test_discover_runner.py
|
tomleo/django
|
ebfb71c64a786620947c9d598fd1ebae2958acff
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_runner/test_discover_runner.py
|
tomleo/django
|
ebfb71c64a786620947c9d598fd1ebae2958acff
|
[
"BSD-3-Clause"
] | 1
|
2020-04-12T19:00:12.000Z
|
2020-04-12T19:00:12.000Z
|
from django.test import TestCase
from django.test.runner import DiscoverRunner
class DiscoverRunnerTest(TestCase):
def test_dotted_test_module(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample"],
).countTestCases()
self.assertEqual(count, 3)
def test_dotted_test_class_vanilla_unittest(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestVanillaUnittest"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_class_unittest2(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestUnittest2"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_class_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_vanilla_unittest(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestVanillaUnittest.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_unittest2(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestUnittest2.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_pattern(self):
count = DiscoverRunner(
pattern="*_tests.py",
).build_suite(["test_discovery_sample"]).countTestCases()
self.assertEqual(count, 1)
def test_file_path(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample/"],
).countTestCases()
self.assertEqual(count, 4)
| 30.811594
| 83
| 0.676388
| 2,044
| 0.96143
| 0
| 0
| 0
| 0
| 0
| 0
| 453
| 0.213076
|
3dc48feaabd6085099581154d9df3a8f76e956ee
| 1,265
|
py
|
Python
|
src/ggrc/rbac/__init__.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/rbac/__init__.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/rbac/__init__.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Basic permissions module."""
from sqlalchemy import or_
class SystemWideRoles(object):
"""List of system wide roles."""
# pylint: disable=too-few-public-methods
SUPERUSER = u"Superuser"
ADMINISTRATOR = u"Administrator"
EDITOR = u"Editor"
READER = u"Reader"
CREATOR = u"Creator"
NO_ACCESS = u"No Access"
def context_query_filter(context_column, contexts):
'''
Intended for use by `model.query.filter(...)`
If `contexts == None`, it's Admin (no filter), so return `True`
Else, return the full query
'''
if contexts is None:
# Admin context, no filter
return True
else:
filter_expr = None
# Handle `NULL` context specially
if None in contexts:
filter_expr = context_column.is_(None)
# We're modifying `contexts`, so copy
contexts = set(contexts)
contexts.remove(None)
if contexts:
filter_in_expr = context_column.in_(contexts)
if filter_expr is not None:
filter_expr = or_(filter_expr, filter_in_expr)
else:
filter_expr = filter_in_expr
if filter_expr is None:
# No valid contexts
return False
return filter_expr
| 25.816327
| 78
| 0.67747
| 263
| 0.207905
| 0
| 0
| 0
| 0
| 0
| 0
| 549
| 0.433992
|
3dc61360e96fb602ab782fcc77e9987334f638a2
| 2,075
|
py
|
Python
|
buildingspy/examples/dymola/plotResult.py
|
Mathadon/BuildingsPy
|
9b27c6f3c0e2c185d03b846de18ec818a1f10d95
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
buildingspy/examples/dymola/plotResult.py
|
Mathadon/BuildingsPy
|
9b27c6f3c0e2c185d03b846de18ec818a1f10d95
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
buildingspy/examples/dymola/plotResult.py
|
Mathadon/BuildingsPy
|
9b27c6f3c0e2c185d03b846de18ec818a1f10d95
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2022-02-16T14:04:15.000Z
|
2022-02-16T14:04:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# import from future to make Python2 behave like Python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from io import open
# end of from future import
def main():
""" Main method that plots the results
"""
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from buildingspy.io.outputfile import Reader
# Optionally, change fonts to use LaTeX fonts
# from matplotlib import rc
# rc('text', usetex=True)
# rc('font', family='serif')
# Read results
ofr1 = Reader(os.path.join("buildingspy", "examples", "dymola",
"case1", "PIDHysteresis.mat"), "dymola")
ofr2 = Reader(os.path.join("buildingspy", "examples", "dymola",
"case2", "PIDHysteresis.mat"), "dymola")
(time1, T1) = ofr1.values("cap.T")
(time1, y1) = ofr1.values("con.y")
(time2, T2) = ofr2.values("cap.T")
(time2, y2) = ofr2.values("con.y")
# Plot figure
fig = plt.figure()
ax = fig.add_subplot(211)
ax.plot(time1 / 3600, T1 - 273.15, 'r', label='$T_1$')
ax.plot(time2 / 3600, T2 - 273.15, 'b', label='$T_2$')
ax.set_xlabel('time [h]')
ax.set_ylabel(r'temperature [$^\circ$C]')
ax.set_xticks(list(range(25)))
ax.set_xlim([0, 24])
ax.legend()
ax.grid(True)
ax = fig.add_subplot(212)
ax.plot(time1 / 3600, y1, 'r', label='$y_1$')
ax.plot(time2 / 3600, y2, 'b', label='$y_2$')
ax.set_xlabel('time [h]')
ax.set_ylabel('y [-]')
ax.set_xticks(list(range(25)))
ax.set_xlim([0, 24])
ax.legend()
ax.grid(True)
# Save figure to file
plt.savefig('plot.pdf')
plt.savefig('plot.png')
# To show the plot on the screen, uncomment the line below
# plt.show()
# Main function
if __name__ == '__main__':
main()
| 27.302632
| 71
| 0.620723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 718
| 0.346024
|
3dc696f09fb0ebe8bc4f7011c19473f98ca4f506
| 335
|
py
|
Python
|
tango_with_django_project/rango/admin.py
|
DADDYKIKI/tango_with_django_project
|
da2bbb0b7fd2d587c9af4c7ac14068678b2c38cf
|
[
"MIT"
] | null | null | null |
tango_with_django_project/rango/admin.py
|
DADDYKIKI/tango_with_django_project
|
da2bbb0b7fd2d587c9af4c7ac14068678b2c38cf
|
[
"MIT"
] | null | null | null |
tango_with_django_project/rango/admin.py
|
DADDYKIKI/tango_with_django_project
|
da2bbb0b7fd2d587c9af4c7ac14068678b2c38cf
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from rango.models import Category, Page
admin.site.register(Page)
admin.site.register(Category)
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug':('name',)}
class PageAdmin(admin.ModelAdmin):
list_display = ('title',
'category', 'url')
| 22.333333
| 45
| 0.668657
| 188
| 0.561194
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.101493
|
3dc6d3255aa8efde45efdc9453d22aa71f26740f
| 1,334
|
py
|
Python
|
components/python/scripts/bootstrap_validate.py
|
cloudify-cosmo/cloudify-manager-blueprints
|
1908c1a0615fb15cbb118335aa2f9e055b9e5779
|
[
"Apache-2.0"
] | 35
|
2015-03-07T13:30:58.000Z
|
2022-02-14T11:44:48.000Z
|
components/python/scripts/bootstrap_validate.py
|
cloudify-cosmo/cloudify-manager-blueprints
|
1908c1a0615fb15cbb118335aa2f9e055b9e5779
|
[
"Apache-2.0"
] | 101
|
2015-03-18T03:07:57.000Z
|
2019-02-07T12:06:42.000Z
|
components/python/scripts/bootstrap_validate.py
|
cloudify-cosmo/cloudify-manager-blueprints
|
1908c1a0615fb15cbb118335aa2f9e055b9e5779
|
[
"Apache-2.0"
] | 76
|
2015-01-08T10:33:03.000Z
|
2021-05-11T08:45:50.000Z
|
#!/usr/bin/env python
from os.path import join, dirname
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
# Most images already ship with the following packages:
#
# python-setuptools
# python-backports
# python-backports-ssl_match_hostname
#
# - as they are dependencies of cloud-init, which is extremely popular.
#
# However, cloud-init is irrelevant for certain IaaS (such as vSphere) so
# images used there may not have these packages preinstalled.
#
# We're currently considering whether to include these libraries in the
# manager resources package. Until then, we only validate that they're
# preinstalled, and if not - instruct the user to install them.
missing_packages = set()
for pkg in ['python-setuptools',
'python-backports',
'python-backports-ssl_match_hostname']:
ctx.logger.info('Ensuring {0} is installed'.format(pkg))
is_installed = utils.RpmPackageHandler.is_package_installed(pkg)
if not is_installed:
missing_packages.add(pkg)
if missing_packages:
ctx.abort_operation('Prerequisite packages missing: {0}. '
'Please ensure these packages are installed and '
'try again'.format(', '.join(missing_packages)))
| 31.761905
| 73
| 0.709145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 810
| 0.607196
|
3dc72f281f6a609f6178afd5c15a1c8b5b592cd3
| 278
|
py
|
Python
|
subdomains/gen_master_data.py
|
sjy5386/subshorts
|
d8170ee4a66989c3e852f86aa83bab6341e3aa10
|
[
"MIT"
] | 3
|
2022-03-08T19:02:41.000Z
|
2022-03-16T23:04:37.000Z
|
subdomains/gen_master_data.py
|
sjy5386/subshorts
|
d8170ee4a66989c3e852f86aa83bab6341e3aa10
|
[
"MIT"
] | 5
|
2022-03-17T02:16:52.000Z
|
2022-03-18T02:55:25.000Z
|
subdomains/gen_master_data.py
|
sjy5386/subshorts
|
d8170ee4a66989c3e852f86aa83bab6341e3aa10
|
[
"MIT"
] | null | null | null |
from .models import ReservedName
def gen_master(apps, scheme_editor):
reserved_names = ['co', 'com', 'example', 'go', 'gov', 'icann', 'ne', 'net', 'nic', 'or', 'org', 'whois', 'www']
for reserved_name in reserved_names:
ReservedName(name=reserved_name).save()
| 34.75
| 116
| 0.647482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.248201
|
3dc7b5b71b827c183978d2d97338bcdc701937fb
| 5,180
|
py
|
Python
|
promort_tools/converters/zarr_to_tiledb.py
|
mdrio/promort_tools
|
26f1b96b27046b0480872dcf17b3be057660a51d
|
[
"MIT"
] | null | null | null |
promort_tools/converters/zarr_to_tiledb.py
|
mdrio/promort_tools
|
26f1b96b27046b0480872dcf17b3be057660a51d
|
[
"MIT"
] | null | null | null |
promort_tools/converters/zarr_to_tiledb.py
|
mdrio/promort_tools
|
26f1b96b27046b0480872dcf17b3be057660a51d
|
[
"MIT"
] | 2
|
2021-05-24T16:04:55.000Z
|
2021-09-16T13:58:48.000Z
|
# Copyright (c) 2021, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse, sys, os
import zarr
import tiledb
import numpy as np
from math import ceil
from promort_tools.libs.utils.logger import get_logger, LOG_LEVELS
class ZarrToTileDBConverter(object):
def __init__(self, logger):
self.logger = logger
def _get_array_shape(self, zarr_dataset):
shapes = set([arr[1].shape for arr in zarr_dataset.arrays()])
if len(shapes) == 1:
return shapes.pop()
else:
self.logger.error('Multiple shapes in zarr dataset arrays, cannot convert to tiledb')
sys.exit('Multiple shapes in zarr arrays')
def _get_array_attributes(self, zarr_dataset):
return [(a[0], a[1].dtype) for a in zarr_dataset.arrays()]
def _get_tiledb_path(self, zarr_dataset, out_folder):
return os.path.join(
out_folder,
'{0}.tiledb'.format(os.path.basename(os.path.normpath(zarr_dataset)))
)
def _init_tiledb_dataset(self, dataset_path, dataset_shape, zarr_attributes):
rows = tiledb.Dim(name='rows', domain=(0, dataset_shape[0]-1), tile=4, dtype=np.uint16)
columns = tiledb.Dim(name='columns', domain=(0, dataset_shape[1]-1), tile=4, dtype=np.uint16)
domain = tiledb.Domain(rows, columns)
attributes = list()
for a in zarr_attributes:
attributes.append(tiledb.Attr(a[0], dtype=a[1]))
schema = tiledb.ArraySchema(domain=domain, sparse=False, attrs=attributes)
tiledb.DenseArray.create(dataset_path, schema)
def _zarr_to_tiledb(self, zarr_dataset, tiledb_dataset_path, slide_resolution):
tiledb_data = dict()
tiledb_meta = {
'original_width': slide_resolution[0],
'original_height': slide_resolution[1],
'slide_path': zarr_dataset.attrs['filename']
}
for arr_label, arr_data in zarr_dataset.arrays():
tiledb_data[arr_label] = arr_data[:]
tiledb_meta.update(
{
'{0}.dzi_sampling_level'.format(arr_label): ceil(arr_data.attrs['dzi_sampling_level']),
'{0}.tile_size'.format(arr_label): arr_data.attrs['tile_size'],
'{0}.rows'.format(arr_label): arr_data.shape[1],
'{0}.columns'.format(arr_label): arr_data.shape[0]
}
)
with tiledb.open(tiledb_dataset_path, 'w') as A:
A[:] = tiledb_data
for k, v in tiledb_meta.items():
A.meta[k] = v
def run(self, zarr_dataset, out_folder):
z = zarr.open(zarr_dataset)
try:
slide_res = z.attrs['resolution']
except KeyError as ke:
self.logger.error('Missing key {0} in zarr attributes, exit'.format(ke))
sys.exit('Missing key {0}'.format(ke))
dset_shape = self._get_array_shape(z)
tiledb_dataset_path = self._get_tiledb_path(zarr_dataset, out_folder)
self.logger.info('TileDB dataset path: {0}'.format(tiledb_dataset_path))
attributes = self._get_array_attributes(z)
self._init_tiledb_dataset(tiledb_dataset_path, dset_shape, attributes)
self._zarr_to_tiledb(z, tiledb_dataset_path, slide_res)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--zarr-dataset', type=str, required=True,
help='path to the ZARR dataset to be converted')
parser.add_argument('--out-folder', type=str, required=True,
help='output folder for TileDB dataset')
parser.add_argument('--log-level', type=str, choices=LOG_LEVELS,
default='INFO', help='log level (default=INFO)')
parser.add_argument('--log-file', type=str, default=None, help='log file (default=stderr)')
return parser
def main(argv=None):
parser = make_parser()
args = parser.parse_args(argv)
logger = get_logger(args.log_level, args.log_file)
app = ZarrToTileDBConverter(logger)
app.run(args.zarr_dataset, args.out_folder)
if __name__ == '__main__':
main(sys.argv[1:])
| 43.166667
| 107
| 0.663127
| 3,053
| 0.589382
| 0
| 0
| 0
| 0
| 0
| 0
| 1,652
| 0.318919
|
3dc7bf9b590e7454e8a84ae7d5b2f66655fcd2d8
| 9,121
|
py
|
Python
|
rxmarbles/theme/pencil.py
|
enbandari/rx-marbles
|
b95813b5e24818eee272ab7ecf0f130510e60f39
|
[
"MIT"
] | null | null | null |
rxmarbles/theme/pencil.py
|
enbandari/rx-marbles
|
b95813b5e24818eee272ab7ecf0f130510e60f39
|
[
"MIT"
] | null | null | null |
rxmarbles/theme/pencil.py
|
enbandari/rx-marbles
|
b95813b5e24818eee272ab7ecf0f130510e60f39
|
[
"MIT"
] | null | null | null |
from numpy.random import random
import random
root = '''<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="%spx"
height="%spx"
viewBox="0 0 %s %s "
id="svg2"
version="1.1"
inkscape:version="0.91 r13725"
>
<defs
id="defs4">
<filter
style="color-interpolation-filters:sRGB;"
inkscape:label="Drop Shadow"
id="filter3443"
x="-25%%"
y="-25%%"
width="150%%"
height="150%%"
>
<feFlood
flood-opacity="0.498039"
flood-color="rgb(0,0,0)"
result="flood"
id="feFlood3445" />
<feComposite
in="flood"
in2="SourceGraphic"
operator="in"
result="composite1"
id="feComposite3447" />
<feGaussianBlur
in="composite1"
stdDeviation="3"
result="blur"
id="feGaussianBlur3449" />
<feOffset
dx="2"
dy="3"
result="offset"
id="feOffset3451" />
<feComposite
in="SourceGraphic"
in2="offset"
operator="over"
result="composite2"
id="feComposite3453" />
</filter>
<marker
inkscape:stockid="Arrow1Lend"
orient="auto"
refY="0.0"
refX="0.0"
id="Arrow1Lend"
style="overflow:visible;"
inkscape:isstock="true">
<path
d="M -3.0,0.0 L -3.0,-5.0 L -12.5,0.0 L -3.0,5.0 L -3.0,0.0 z "
style="fill-rule:evenodd;stroke:#003080;stroke-width:1pt;stroke-opacity:1;fill:#003080;fill-opacity:1"
transform="scale(0.8) rotate(180) translate(12.5,0)" />
</marker>
</defs>
%s
</svg>
'''
circ1 = '''
<g transform="translate(%s %s)">
<path
sodipodi:nodetypes="cccc"
inkscape:connector-curvature="0"
id="circle"
d="m 4.9388474,-19.439462 c 16.0642996,-0.12398 28.5596096,25.2132203 13.6726596,35.64262 -11.0573896,9.63907 -34.34364,12.39205 -40.14488,-4.43275 -5.99947,-18.2070397 12.2740204,-28.34201 25.6703704,-34.96158"
style="fill:#ffffff;fill-opacity:0.8627451;fill-rule:evenodd;stroke:#000000;stroke-width:1.42857146px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
inkscape:label="#path3567" />
<text
y="11"
x="0"
style="font-size:28px;font-family:purisa;text-align:center;text-anchor:middle;fill:#000000;"
xml:space="preserve">%s</text>
</g>
'''
circ2 = '''
<g transform="translate(%s %s)">
<path
sodipodi:nodetypes="ccc"
style="fill:#ffffff;fill-opacity:0.8627451;fill-rule:evenodd;stroke:#000000;stroke-width:1.42857158px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 1.5925919,21.477458 C 54.657578,22.391841 -4.4465257,-49.196211 -20.218549,-5.7426508 -25.112801,8.7120558 -15.351552,21.857363 2.9582607,24.135679"
id="circ2"
inkscape:connector-curvature="0"
inkscape:label="#path3569" />
<text
y="11"
x="0"
style="font-size:28px;font-family:purisa;text-align:center;text-anchor:middle;fill:#000000;"
xml:space="preserve">%s</text>
</g>
'''
circ3 = '''
<g transform="translate(%s %s)">
<path
sodipodi:nodetypes="ccccc"
inkscape:connector-curvature="0"
id="circ3"
d="M 4.0475415,-21.306002 C -11.703304,-26.547792 -23.641751,-7.9231854 -22.516473,6.1088129 -20.059942,26.830243 12.722358,33.867273 22.337406,14.863588 27.656584,4.0579388 23.204578,-8.3517124 15.784624,-16.859919 c -1.822,-3.127279 -5.336267,-5.723574 -9.3972065,-5.54123"
style="fill:#ffffff;fill-opacity:0.8627451;fill-rule:evenodd;stroke:#000000;stroke-width:1.42857158px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
inkscape:label="#path3571" />
<text
y="11"
x="0"
style="font-size:28px;font-family:purisa;text-align:center;text-anchor:middle;fill:#000000;"
xml:space="preserve">%s</text>
</g>
'''
circ4 = '''
<g transform="translate(%s %s)">
<path
style="fill:#ffffff;fill-opacity:0.8627451;fill-rule:evenodd;stroke:#000000;stroke-width:1.42857146px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 2.0536007,-17.942742 C -52.370629,-18.905944 8.2474086,56.504162 24.423439,10.730643 29.443049,-4.4957928 16.207176,-22.177911 -2.5716488,-24.577866"
id="circ5"
inkscape:connector-curvature="0"
inkscape:label="#path3433" />
<text
y="11"
x="0"
style="font-size:28px;font-family:purisa;text-align:center;text-anchor:middle;fill:#000000;"
xml:space="preserve">%s</text>
</g>
'''
arrow = '''
<g transform="scale(%s %s) translate(%s %s)">
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M -0.67660398,1.4566587 C 51.393331,1.3820987 103.49025,-3.9934243 155.52767,1.1808467 c 33.34887,0.89417 67.21197,-1.95060293 99.84156,5.535708 44.03188,2.2890288 88.09651,1.698567 131.74849,-3.79605 21.2474,-0.841106 42.51228,0.139269 63.76647,-0.199798"
id="axisLine"
inkscape:connector-curvature="0"
inkscape:label="#path3511" />
</g>
<g transform="translate(%s %s)">
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.42857146px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m -13.085216,-10.419073 c 2.66757,0.133318 4.1293297,2.8477214 6.5645197,3.6415244 2.19618,1.483387 4.27915,3.129365 6.74184,4.165938 3.6572898,1.62997797 0.28555,4.903303 -1.90365,6.045673 -2.08841,1.84505 -3.80877,3.732465 -6.63704,4.785017 -1.8518597,0.870578 -3.6440197,1.8066886 -5.3976897,2.8506076"
id="arrow_end"
inkscape:connector-curvature="0"
inkscape:label="#path3528" />
</g>
'''
end = '''
<g>
<path d="m %s,%s -1,32"
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:4px;" />
</g>
'''
err = '''
<g id="error">
<path
inkscape:connector-curvature="0"
d="m %s,%s -34,36"
style="stroke:#000000;stroke-width:3px;" />
<path
style="stroke:#000000;stroke-width:3px;"
d="m %s,%s 36,36"
/>
</g>
'''
# this one is used for operator box
block = '''
<g transform="scale(%s %s) translate(%s %s)">
<path
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.42857146px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 3.6131775,2.4809559 C 7.7262916,27.136376 -4.8390181,67.388756 10.311791,81.793736 c 56.57601,-7.35809 113.842299,-2.82956 170.815959,-4.56434 48.9116,1.31804 98.12281,2.30369 146.89949,0.25237 36.73272,-6.08907 74.34343,-4.60865 110.81369,1.7655 26.17801,-6.87142 7.26874,-47.02276 10.85636,-67.94864 C 435.2653,-11.614984 389.13054,8.5049456 362.01772,0.90526594 300.94038,0.67314594 239.26649,2.7131859 178.67384,0.60705594 118.08119,-1.4990741 86.699905,6.8117156 57.753682,4.3549359 28.807462,1.8981559 17.816805,1.4648659 0.01403178,-4.669534"
id="operator_box"
inkscape:connector-curvature="0"
sodipodi:nodetypes="ccccccczzc"
inkscape:label="#path3549" />
</g>
<text
x="%s"
y="%s"
style="font-size:24px;font-family:purisa;text-align:center;text-anchor:middle;fill:#000000;"
xml:space="preserve">%s</text>
'''
# - this one is used for groupping
groupping_block = '''
<g >
<rect
ry="25px"
rx="25px"
y="%s"
x="%s"
width="%s"
height="%s"
style="opacity:1;fill:%s;fill-opacity:0;stroke:#000000;stroke-width:1px;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
</g>
'''
#==================================================
# this is the theme interface
#==================================================
class Circle:
def __init__(self, x, y, text, color):
global circ1
global circ2
global circ3
shapes = [circ1, circ2, circ3]
index = random.randint(0, len(shapes) - 1)
circ = shapes[index]
self.node = circ % (x + 25, y, text)
class Arrow:
def __init__(self, x, y, start, size):
global arrow
self.node = arrow % (1.0 * size / 450.0, 0.75, x + 25 + start, y, x + 22 + start + size, y + 2)
class End:
def __init__(self, x, y):
global end
self.node = end % (x + 25, y - 12)
class Err:
def __init__(self, x, y):
global err
self.node = err % (x + 25 + 18, y - 18, x + 25 - 14, y - 18)
class BlockWithText:
def __init__(self, x, y, text, color, width, height):
global groupping_block
self.node = groupping_block % (y - 22, x, width, height, "white")
class Block:
def __init__(self, x, y, width, height, text, color):
global block
self.node = block % (width / 460.0, 1, x, y, x + width / 2.0, y + height / 2.0, text)
| 36.338645
| 559
| 0.621642
| 1,061
| 0.116325
| 0
| 0
| 0
| 0
| 0
| 0
| 7,904
| 0.866572
|
3dc93ff9707b2d135f50553fa063389f067d2b73
| 803
|
py
|
Python
|
awx/main/migrations/0082_v360_webhook_http_method.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 11,396
|
2017-09-07T04:56:02.000Z
|
2022-03-31T13:56:17.000Z
|
awx/main/migrations/0082_v360_webhook_http_method.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 11,046
|
2017-09-07T09:30:46.000Z
|
2022-03-31T20:28:01.000Z
|
awx/main/migrations/0082_v360_webhook_http_method.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 3,592
|
2017-09-07T04:14:31.000Z
|
2022-03-31T23:53:09.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def add_webhook_notification_template_fields(apps, schema_editor):
# loop over all existing webhook notification templates and make
# sure they have the new "http_method" field filled in with "POST"
NotificationTemplate = apps.get_model('main', 'notificationtemplate')
webhooks = NotificationTemplate.objects.filter(notification_type='webhook')
for w in webhooks:
w.notification_configuration['http_method'] = 'POST'
w.save()
class Migration(migrations.Migration):
dependencies = [
('main', '0081_v360_notify_on_start'),
]
operations = [
migrations.RunPython(add_webhook_notification_template_fields, migrations.RunPython.noop),
]
| 30.884615
| 98
| 0.731009
| 238
| 0.296389
| 0
| 0
| 0
| 0
| 0
| 0
| 242
| 0.30137
|
3dca45f1cb27867b123a5f15fcfde334028fa3ca
| 7,964
|
py
|
Python
|
ogc_edr_lib/ogc_api_collection_metadata.py
|
eugenegesdisc/gmuedr
|
e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd
|
[
"MIT"
] | null | null | null |
ogc_edr_lib/ogc_api_collection_metadata.py
|
eugenegesdisc/gmuedr
|
e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd
|
[
"MIT"
] | null | null | null |
ogc_edr_lib/ogc_api_collection_metadata.py
|
eugenegesdisc/gmuedr
|
e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd
|
[
"MIT"
] | null | null | null |
from typing import Tuple, Union
from aiohttp import web
from ogc_edr_lib.ogc_api import OgcApi
import logging
from ogc_edr_lib.ogc_api_collection_metadata_get_queries import (
OgcApiCollectionMetadataGetQueries)
from ogc_edr_lib.ogc_api_collection_metadata_list_data_items import (
OgcApiCollectionMetadataListDataItems
)
from ogc_edr_lib.ogc_api_collection_metadata_list_data_locations import (
OgcApiCollectionMetadataListDataLocations
)
Logger = logging.getLogger(__name__)
class OgcApiCollectionMetadata(OgcApi):
def list_collection_data_locations(
self, request: web.Request, collection_id, bbox=None,
datetime=None, limit=None) -> web.Response:
"""List available location identifers for the collection
List the locations available for the collection
:param collection_id: Identifier (id) of a specific collection
:type collection_id: str
:param bbox: Only features that have a geometry that intersects the
bounding box are selected. The bounding box is provided as four or six
numbers, depending on whether the coordinate reference system includes
a vertical axis (height or depth):
* Lower left corner, coordinate axis 1
* Lower left corner, coordinate axis 2
* Minimum value, coordinate axis 3 (optional)
* Upper right corner, coordinate axis 1
* Upper right corner, coordinate axis 2
* Maximum value, coordinate axis 3 (optional)
The coordinate reference system of the values is specified by
the `crs` query parameter. If the `crs`
query parameter is not defined the coordinate reference system is
defined by the default `crs`
for the query type. If a default `crs`
has not been defined the values will be assumed to be in the WGS 84
longitude/latitude (http://www.opengis.net/def/crs/OGC/1.3/CRS84)
coordinate reference system. For WGS 84 longitude/latitude the values
are in most cases the sequence of minimum longitude, minimum latitude,
maximum longitude and maximum latitude. However, in cases where the
box spans the antimeridian the first value (west-most box edge) is
larger than the third value (east-most box edge). If the vertical
axis is included, the third and the sixth number are the bottom and
the top of the 3-dimensional bounding box. If a feature has multiple
spatial geometry properties, it is the decision of the server whether
only a single spatial geometry property is used to determine the
extent or all relevant geometries.
:type bbox: dict | bytes
:param datetime: Either a date-time or an interval, open or closed.
Date and time expressions adhere to RFC 3339. Open intervals are
expressed using double-dots. Examples:
* A date-time: \"2018-02-12T23:20:50Z\"
* A closed interval:
\"2018-02-12T00:00:00Z/2018-03-18T12:31:12Z\"
* Open intervals: \"2018-02-12T00:00:00Z/..\"
or \"../2018-03-18T12:31:12Z\"
Only features that have a temporal property that intersects the
value of `datetime` are selected. If a feature has multiple
temporal properties, it is the decision of the server whether only
a single temporal property is used to determine the extent or all
relevant temporal properties.
:type datetime: str
:param limit: The optional limit parameter limits the number of results
that are presented in the response document. Minimum = 1.
Maximum = 10000. Default = 10.
:type limit: int
"""
ocmeta = OgcApiCollectionMetadataListDataLocations()
headers, status, content = ocmeta.list_collection_data_locations(
request, collection_id, bbox, datetime, limit)
return headers, status, content
def get_queries(
self, request: web.Request, collection_id, f=None):
"""
List query types supported by the collection
This will provide information about the query types that are supported
by the chosen collection Use content negotiation to request HTML or
JSON.
:param collection_id: Identifier (id) of a specific collection
:type collection_id: str
:param f: format to return the data response in
:type f: str
:returns: tuple of headers, status code, content
"""
ocmeta = OgcApiCollectionMetadataGetQueries()
headers, status, content = ocmeta.get_queries(
request, collection_id, f)
return headers, status, content
def list_data_items(
self, request: web.Request, collection_id,
bbox=None, datetime=None, limit=None):
"""List available items
List the items available in the collection accessible via a unique identifier
:param collection_id: Identifier (id) of a specific collection
:type collection_id: str
:param bbox: Only features that have a geometry that intersects the bounding box are selected. The bounding box is provided as four or six numbers, depending on whether the coordinate reference system includes a vertical axis (height or depth): * Lower left corner, coordinate axis 1 * Lower left corner, coordinate axis 2 * Minimum value, coordinate axis 3 (optional) * Upper right corner, coordinate axis 1 * Upper right corner, coordinate axis 2 * Maximum value, coordinate axis 3 (optional) The coordinate reference system of the values is specified by the `crs` query parameter. If the `crs` query parameter is not defined the coordinate reference system is defined by the default `crs` for the query type. If a default `crs` has not been defined the values will be assumed to be in the WGS 84 longitude/latitude (http://www.opengis.net/def/crs/OGC/1.3/CRS84) coordinate reference system. For WGS 84 longitude/latitude the values are in most cases the sequence of minimum longitude, minimum latitude, maximum longitude and maximum latitude. However, in cases where the box spans the antimeridian the first value (west-most box edge) is larger than the third value (east-most box edge). If the vertical axis is included, the third and the sixth number are the bottom and the top of the 3-dimensional bounding box. If a feature has multiple spatial geometry properties, it is the decision of the server whether only a single spatial geometry property is used to determine the extent or all relevant geometries.
:type bbox: dict | bytes
:param datetime: Either a date-time or an interval, open or closed. Date and time expressions adhere to RFC 3339. Open intervals are expressed using double-dots. Examples: * A date-time: \"2018-02-12T23:20:50Z\" * A closed interval: \"2018-02-12T00:00:00Z/2018-03-18T12:31:12Z\" * Open intervals: \"2018-02-12T00:00:00Z/..\" or \"../2018-03-18T12:31:12Z\" Only features that have a temporal property that intersects the value of `datetime` are selected. If a feature has multiple temporal properties, it is the decision of the server whether only a single temporal property is used to determine the extent or all relevant temporal properties.
:type datetime: str
:param limit: The optional limit parameter limits the number of results that are presented in the response document. Minimum = 1. Maximum = 10000. Default = 10.
:type limit: int
"""
ocmeta = OgcApiCollectionMetadataListDataItems()
headers, status, content = ocmeta.list_data_items(
request, collection_id, bbox, datetime, limit
)
return headers, status, content
| 63.206349
| 1,561
| 0.708815
| 7,470
| 0.937971
| 0
| 0
| 0
| 0
| 0
| 0
| 6,372
| 0.8001
|
3dca6b4523ea884f293c6a6b346cc8182bedf764
| 28
|
py
|
Python
|
tunga/preprocessing/__init__.py
|
tahtaciburak/tunga
|
e71a4fa393d692779ab6d674673c5674d7287dac
|
[
"MIT"
] | 5
|
2020-07-31T19:26:46.000Z
|
2020-10-23T11:49:06.000Z
|
tunga/preprocessing/__init__.py
|
tunga-ml/tunga
|
823fd762054fd513300025cbb1fc799f7e3cf6b1
|
[
"MIT"
] | null | null | null |
tunga/preprocessing/__init__.py
|
tunga-ml/tunga
|
823fd762054fd513300025cbb1fc799f7e3cf6b1
|
[
"MIT"
] | 1
|
2021-09-10T08:24:13.000Z
|
2021-09-10T08:24:13.000Z
|
from .normalization import *
| 28
| 28
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3dccadbdd4f7bd09cd826b80f7957d192a7141e5
| 800
|
py
|
Python
|
runtests.py
|
resurrexi/django-restql
|
6a642a46ae597201214bdaeee5d9e92a62fa4616
|
[
"MIT"
] | 545
|
2019-04-23T12:54:21.000Z
|
2022-03-28T07:59:43.000Z
|
runtests.py
|
resurrexi/django-restql
|
6a642a46ae597201214bdaeee5d9e92a62fa4616
|
[
"MIT"
] | 109
|
2019-05-21T13:48:27.000Z
|
2022-03-18T21:10:32.000Z
|
runtests.py
|
resurrexi/django-restql
|
6a642a46ae597201214bdaeee5d9e92a62fa4616
|
[
"MIT"
] | 44
|
2019-05-15T19:04:01.000Z
|
2022-01-31T04:12:59.000Z
|
#!/usr/bin/env python
import os
import sys
import subprocess
from django.core.management import execute_from_command_line
FLAKE8_ARGS = ['django_restql', 'tests', 'setup.py', 'runtests.py']
WARNING_COLOR = '\033[93m'
END_COLOR = '\033[0m'
def flake8_main(args):
print('Running flake8 code linting')
ret = subprocess.call(['flake8'] + args)
msg = (
WARNING_COLOR + 'flake8 failed\n' + END_COLOR
if ret else 'flake8 passed\n'
)
print(msg)
return ret
def runtests():
ret = flake8_main(FLAKE8_ARGS)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
argv = sys.argv[:1] + ['test'] + sys.argv[1:]
execute_from_command_line(argv)
sys.exit(ret) # Fail build if code linting fails
if __name__ == '__main__':
runtests()
| 22.857143
| 69
| 0.67375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 246
| 0.3075
|
3dccba1140ab8bafa4d46c818af6ac8d4201bac2
| 17,549
|
py
|
Python
|
structured_tables/parser.py
|
CivicKnowledge/structured_tables
|
836ff700f49be51d2a12b2daa3a5460a2fc2fc06
|
[
"BSD-3-Clause"
] | null | null | null |
structured_tables/parser.py
|
CivicKnowledge/structured_tables
|
836ff700f49be51d2a12b2daa3a5460a2fc2fc06
|
[
"BSD-3-Clause"
] | null | null | null |
structured_tables/parser.py
|
CivicKnowledge/structured_tables
|
836ff700f49be51d2a12b2daa3a5460a2fc2fc06
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2016 Civic Knowledge. This file is licensed under the terms of the
# Revised BSD License, included in this distribution as LICENSE
"""
Parser for the Simple Data Package format. The parser consists of several iterable generator
objects.
"""
NO_TERM = '<no_term>' # No parent term -- no '.' -- in term cell
ELIDED_TERM = '<elided_term>' # A '.' in term cell, but no term before it.
class ParserError(Exception):
def __init__(self, *args, **kwargs):
super(ParserError, self).__init__(*args, **kwargs)
self.term = None
class IncludeError(ParserError):
pass
class Term(object):
"""Parses a row into the parts of a term
Public attributes. These are set externally to the constructor.
file_name Filename or URL of faile that contains term
row: Row number of term
col Column number of term
is_arg_child Term was generated from arguments of parent
child_property_type What datatype to use in dict conversion
valid Did term pass validation tests? Usually based on DeclaredTerm values.
"""
def __init__(self, term, value, term_args=[]):
"""
:param term: Simple or compoint term name
:param value: Term value, from second column of spreadsheet
:param term_args: Colums 2+ from term row
"""
self.parent_term, self.record_term = Term.split_term_lower(term)
self.value = value.strip() if value else None
self.args = [x.strip() for x in term_args]
self.section = None # Name of section the term is in.
self.file_name = None
self.row = None
self.col = None
# When converting to a dict, what dict to to use for the self.value value
self.term_value_name = '@value' # May be change in term parsing
# When converting to a dict, what datatype should be used for this term.
# Can be forced to list, scalar, dict or other types.
self.child_property_type = 'any'
self.valid = None
self.is_arg_child = None # If true, term was
self.children = [] # WHen terms are linked, hold term's children.
@classmethod
def split_term(cls, term):
"""
Split a term in to parent and record term components
:param term: combined term text
:return: Tuple of parent and record term
"""
if '.' in term:
parent_term, record_term = term.split('.')
parent_term, record_term = parent_term.strip(), record_term.strip()
if parent_term == '':
parent_term = ELIDED_TERM
else:
parent_term, record_term = NO_TERM, term.strip()
return parent_term, record_term
@classmethod
def split_term_lower(cls, term):
"""
Like split_term, but also lowercases both parent and record term
:param term: combined term text
:return: Tuple of parent and record term
"""
return tuple(e.lower() for e in Term.split_term(term))
def file_ref(self):
"""Return a string for the file, row and column of the term."""
if self.file_name is not None and self.row is not None:
return "{} {}:{} ".format(self.file_name, self.row, self.col)
elif self.row is not None:
return " {}:{} ".format(self.row, self.col)
else:
return ''
def add_child(self, child):
self.children.append(child)
def __repr__(self):
return "<Term: {}{}.{} {} {} >".format(self.file_ref(), self.parent_term,
self.record_term, self.value, self.args)
def __str__(self):
if self.parent_term == NO_TERM:
return "{}{}: {}".format(self.file_ref(), self.record_term, self.value)
elif self.parent_term == ELIDED_TERM:
return "{}.{}: {}".format(self.file_ref(), self.record_term, self.value)
else:
return "{}{}.{}: {}".format(self.file_ref(), self.parent_term, self.record_term, self.value)
class CsvPathRowGenerator(object):
"""An object that generates rows. The current implementation mostly just a wrapper around
csv.reader, but it add a path property so term interperters know where the terms are coming from
"""
def __init__(self, path):
self._path = path
self._f = None
@property
def path(self):
return self._path
def open(self):
if self._path.startswith('http'):
import urllib2
try:
f = urllib2.urlopen(self._path)
except urllib2.URLError:
raise IncludeError("Failed to find file by url: {}".format(self._path))
f.name = self._path # to be symmetric with files.
else:
from os.path import join
try:
f = open(self._path)
except IOError:
raise IncludeError("Failed to find file: {}".format(self._path) )
self._f = f
def close(self):
if self._f:
self._f.close()
self._f = None
def __iter__(self):
import csv
self.open()
# Python 3, should use yield from
for row in csv.reader(self._f):
yield row
self.close()
class CsvDataRowGenerator(object):
"""Generate rows from CSV data, as a string
"""
def __init__(self, data, path = None):
self._data = data
self._path = path or '<none>'
@property
def path(self):
return self._path
def open(self):
pass
def close(self):
pass
def __iter__(self):
import csv
from cStringIO import StringIO
f = StringIO(self._data)
# Python 3, should use yield from
for row in csv.reader(f):
yield row
class RowGenerator(object):
"""An object that generates rows. The current implementation mostly just a wrapper around
csv.reader, but it add a path property so term interperters know where the terms are coming from
"""
def __init__(self, rows, path = None):
self._rows = rows
self._path = path or '<none>'
@property
def path(self):
return self._path
def open(self):
pass
def close(self):
pass
def __iter__(self):
for row in self._rows:
yield row
class TermGenerator(object):
"""Generate terms from a row generator. It will produce a term for each row, and child
terms for any arguments to the row. """
def __init__(self, row_gen):
"""
:param row_gen: an interator that generates rows
:return:
"""
from os.path import dirname, basename
self._row_gen = row_gen
self._path = self._row_gen.path
def __iter__(self):
"""An interator that generates term objects"""
for line_n, row in enumerate(self._row_gen, 1):
if not row[0].strip() or row[0].strip().startswith('#'):
continue
t = Term(row[0].lower(),
row[1] if len(row)>1 else '',
row[2:] if len(row)>2 else [])
t.row = line_n
t.col = 1
t.file_name = self._path
rt_l = t.record_term.lower()
if rt_l == 'include':
yield t
for t in self.include_term_generator(t.value):
yield t
continue # Already yielded the include term
yield t
# Yield any child terms, from the term row arguments
if rt_l != 'section':
for col, value in enumerate(t.args, 0):
if value.strip():
t2 = Term(t.record_term.lower() + '.' + str(col), value, [])
t2.is_arg_child = True
t2.row = line_n
t2.col = col + 2 # The 0th argument starts in col 2
t2.file_name = self._path
yield t2
def include_term_generator(self, include_ref):
from os.path import dirname, join
if not self._path:
raise ParserError("Can't include because don't know current path"
.format(self._root_directory))
if include_ref.startwith('http'):
path = include_ref
else:
path = join(dirname(self._path), include_ref.strip('/'))
return TermGenerator(RowGenerator(path))
class TermInterpreter(object):
"""Takes a stream of terms and sets the parameter map, valid term names, etc """
def __init__(self, term_gen, remove_special=True):
"""
:param term_gen: an an iterator that generates terms
:param remove_special: If true ( default ) remove the special terms from the stream
:return:
"""
from collections import defaultdict
self._remove_special = remove_special
self._term_gen = term_gen
self._param_map = [] # Current parameter map, the args of the last Section term
# _sections and _terms are loaded from Declare documents, in
# handle_declare and import_declare_doc. The Declare doc information
# can also be loaded before parsing, so the Declare term can be eliminated.
self._sections = {} # Declared sections and their arguments
self._terms = {} # Pre-defined terms, plus TermValueName and ChildPropertyType
self.errors = []
@property
def sections(self):
return self._sections
@property
def synonyms(self):
return {k: v['synonym'] for k, v in self._terms.items() if 'synonym' in v}
@property
def terms(self):
return self._terms
@property
def declare_dict(self):
return {
'sections': self.sections,
'terms': self.terms,
}
def as_dict(self):
"""Iterate, link terms and convert to a dict"""
return convert_to_dict(link_terms(self))
def errors_as_dict(self):
errors = []
for e in self.errors:
errors.append({
'file': e.term.file_name,
'row': e.term.row,
'col': e.term.col,
'term': self.join(e.term.parent_term, e.term.record_term),
'error': str(e)
})
return errors
@staticmethod
def join(t1, t2):
return '.'.join((t1, t2))
def __iter__(self):
import copy
last_parent_term = 'root'
# Remapping the default record value to another property name
for t in self._term_gen:
nt = copy.copy(t)
# Substitute synonyms
try:
syn_term = self.synonyms[self.join(t.parent_term, t.record_term)]
nt.parent_term, nt.record_term = Term.split_term_lower(syn_term);
except KeyError:
pass
if nt.parent_term == ELIDED_TERM and last_parent_term:
nt.parent_term = last_parent_term
elif not nt.is_arg_child:
last_parent_term = nt.record_term
# Remap integer record terms to names from the parameter map
try:
nt.record_term = str(self._param_map[int(t.record_term)])
except ValueError:
pass # the record term wasn't an integer
except IndexError:
pass # Probably no parameter map.
# Handle other special terms
if hasattr(self, 'handle_' + t.record_term.lower()):
getattr(self, 'handle_' + t.record_term.lower())(t)
if self._remove_special:
continue
nt.child_property_type = self._terms.get(self.join(nt.parent_term, nt.record_term), {}) \
.get('childpropertytype', 'any')
nt.term_value_name = self._terms.get(self.join(nt.parent_term, nt.record_term), {}) \
.get('termvaluename', '@value')
nt.valid = self.join(nt.parent_term.lower(), nt.record_term.lower()) in self._terms
yield nt
def handle_section(self, t):
self._param_map = [p.lower() if p else i for i, p in enumerate(t.args)]
def handle_declare(self, t):
"""Load the information in the file referenced by a Delare term, but don't
insert the terms in the file into the stream"""
from os.path import dirname, join
if t.value.startswith('http'):
fn = t.value.strip('/')
else:
fn = join(dirname(t.file_name), t.value.strip('/'))
ti = DeclareTermInterpreter(TermGenerator(CsvPathRowGenerator(fn)))
try:
self.import_declare_doc(ti.as_dict())
except IncludeError as e:
e.term = t
self.errors.append(e)
def import_declare_doc(self, d):
"""Import a declare cod that has been parsed and converted to a dict"""
if 'declaresection' in d:
for e in d['declaresection']:
if e:
self._sections[e['section_name'].lower()] = {
'args': [v for k, v in sorted((k, v) for k, v in e.items() if isinstance(k, int))],
'terms': list()
}
if 'declareterm' in d:
for e in d['declareterm']:
terms = self.join(*Term.split_term_lower(e['term_name']))
self._terms[terms] = e
if 'section' in e and e['section']:
if e['section'] not in self._sections:
self._sections[e['section'].lower()] = {
'args': [],
'terms': list()
}
st = self._sections[e['section'].lower()]['terms']
if e['section'] not in st:
st.append(e['term_name'])
if 'declarevalueset' in d:
for e in d['declarevalueset']:
for k,v in self._terms.items():
if 'valueset' in v and e.get('name',None) == v['valueset']:
v['valueset'] = e['value']
class DeclareTermInterpreter(TermInterpreter):
"""
A version of the TermInterpreter specifically for parsing Declare documents. These documents
require some special handling because they declare terms that are required for propertly parsing
Metatab files. These require declarations are pre-declared in this class.
"""
def __init__(self, term_gen, remove_special=False):
super(DeclareTermInterpreter, self).__init__(term_gen, remove_special)
# Configure the parser to output a more useful structure
self._terms.update({
NO_TERM + '.section': {'termvaluename': 'name'},
NO_TERM + '.synonym': {'termvaluename': 'term_name', 'childpropertytype': 'sequence'},
NO_TERM + '.declareterm': {'termvaluename': 'term_name', 'childpropertytype': 'sequence'},
NO_TERM + '.declaresection': {'termvaluename': 'section_name', 'childpropertytype': 'sequence'},
NO_TERM + '.declarevalueset': {'termvaluename': 'name', 'childpropertytype': 'sequence'},
'declarevalueset.value': {'termvaluename': 'value', 'childpropertytype': 'sequence'},
})
def link_terms(term_generator):
"""Return a heirarchy of records from a stream of terms
:param term_generator:
"""
root = Term('Root', None)
last_term_map = {NO_TERM: root}
for term in term_generator:
try:
parent = last_term_map[term.parent_term]
except KeyError as e:
raise ParserError("Failed to find parent term in last term map: {} {} \nTerm: \n{}"
.format(e.__class__.__name__, e, term))
parent.add_child(term)
if not term.is_arg_child and term.parent_term != ELIDED_TERM:
# Recs created from term args don't go in the maps.
# Nor do record term records with elided parent terms
last_term_map[ELIDED_TERM] = term
last_term_map[term.record_term] = term
return root
def convert_to_dict(term):
"""Converts a record heirarchy to nested dicts.
:param term: Root term at which to start conversion
"""
if term.children:
d = {}
for c in term.children:
if c.child_property_type == 'scalar':
d[c.record_term] = convert_to_dict(c)
elif c.child_property_type == 'sequence':
try:
d[c.record_term].append(convert_to_dict(c))
except (KeyError, AttributeError):
# The c.term property doesn't exist, so add a list
d[c.record_term] = [convert_to_dict(c)]
else:
try:
d[c.record_term].append(convert_to_dict(c))
except KeyError:
# The c.term property doesn't exist, so add a scalar
d[c.record_term] = convert_to_dict(c)
except AttributeError as e:
# d[c.term] exists, but is a scalar, so convert it to a list
d[c.record_term] = [d[c.record_term]] + [convert_to_dict(c)]
if term.value:
d[term.term_value_name] = term.value
return d
else:
return term.value
| 30.573171
| 108
| 0.570004
| 15,030
| 0.856459
| 3,405
| 0.194028
| 1,476
| 0.084107
| 0
| 0
| 5,744
| 0.327312
|
3dcde3d12d8ff748623472b864c1c6d69f5873ea
| 1,462
|
py
|
Python
|
plugins/playbook/deploy_cluster/decapod_plugin_playbook_deploy_cluster/monitor_secret.py
|
angry-tony/ceph-lcm-decapod
|
535944d3ee384c3a7c4af82f74041b0a7792433f
|
[
"Apache-2.0"
] | 41
|
2016-11-03T16:40:17.000Z
|
2019-05-23T08:39:17.000Z
|
plugins/playbook/deploy_cluster/decapod_plugin_playbook_deploy_cluster/monitor_secret.py
|
Mirantis/ceph-lcm
|
fad9bad0b94f2ef608362953583b10a54a841d24
|
[
"Apache-2.0"
] | 30
|
2016-10-14T10:54:46.000Z
|
2017-10-20T15:58:01.000Z
|
plugins/playbook/deploy_cluster/decapod_plugin_playbook_deploy_cluster/monitor_secret.py
|
angry-tony/ceph-lcm-decapod
|
535944d3ee384c3a7c4af82f74041b0a7792433f
|
[
"Apache-2.0"
] | 28
|
2016-09-17T01:17:36.000Z
|
2019-07-05T03:32:54.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specified KV model for storing monitor secrets."""
import base64
import os
import struct
import time
from decapod_common.models import kv
class MonitorSecret(kv.KV):
NAMESPACE = "monitor_secret"
@classmethod
def upsert(cls, key, value):
return super().upsert(cls.NAMESPACE, key, value)
@classmethod
def find(cls, keys):
return super().find(cls.NAMESPACE, keys)
@classmethod
def find_one(cls, key):
models = cls.find([key])
if models:
return models[0]
@classmethod
def remove(cls, keys):
return super().remove(cls.NAMESPACE, keys)
def generate_monitor_secret():
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
secret = base64.b64encode(header + key)
secret = secret.decode("utf-8")
return secret
| 25.649123
| 69
| 0.685363
| 484
| 0.331053
| 0
| 0
| 399
| 0.272914
| 0
| 0
| 674
| 0.461012
|
3dce78da1f7ce43271310900e0dcc23b81e61a1a
| 1,135
|
py
|
Python
|
scripts/v1/03-collectAllModels.py
|
groppcw/CLDA
|
efd59d0dde38d6579366d195c3a0d4e6b1021af5
|
[
"Apache-2.0"
] | 6
|
2017-01-31T19:18:59.000Z
|
2020-04-21T17:20:56.000Z
|
scripts/v1/03-collectAllModels.py
|
groppcw/CLDA
|
efd59d0dde38d6579366d195c3a0d4e6b1021af5
|
[
"Apache-2.0"
] | null | null | null |
scripts/v1/03-collectAllModels.py
|
groppcw/CLDA
|
efd59d0dde38d6579366d195c3a0d4e6b1021af5
|
[
"Apache-2.0"
] | 3
|
2017-09-20T21:18:36.000Z
|
2020-07-29T10:00:30.000Z
|
# take a bunch of model_0 model_1 etc files and merge them alphabetically
from settings import *
# for each file, load the file into one giant list
# call sort on the list
# write this output somewhere else
for timestep in range(START_IDX,NUM_TIMES):
model = dict()
#Add the full vocabulary to the dictionary
fdict = open("./input_data/word_ids.dat","r")
for line in fdict:
pieces = (line.replace('\t',' ')).split(' ',1)
key = (pieces[1].strip()).replace('\"','')
value = ''
for unused in range(LOCAL_TOPICS):
value = value + '0 '
value = value.strip() + '\n'
model[key] = value
fdict.close()
#Replace words that actually appear
for num in range(PLDA_CHUNKS):
infile = open("./partial_results/time-"+str(timestep)+"-model_"+str(num),"r")
for line in infile:
pieces = (line.replace('\t',' ')).split(' ',1)
model[pieces[0]] = pieces[1]
infile.close()
outmodel = sorted(model) # gives sorted list of keys
outfile = open("./local_models/time-"+str(timestep)+".model","w")
for key in outmodel:
outfile.write(key + " " + model[key])
outfile.close()
| 26.395349
| 81
| 0.639648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 423
| 0.372687
|
3dd07bf478788d856c11476ddb5329b455ea6168
| 5,428
|
py
|
Python
|
controller/hopfields_registration_server.py
|
SIDN/p4-scion
|
30fc42ac3672a2d862e5537f6990c87ef3c21860
|
[
"BSD-3-Clause"
] | 2
|
2021-05-25T16:17:25.000Z
|
2021-07-16T06:30:27.000Z
|
controller/hopfields_registration_server.py
|
SIDN/p4-scion
|
30fc42ac3672a2d862e5537f6990c87ef3c21860
|
[
"BSD-3-Clause"
] | null | null | null |
controller/hopfields_registration_server.py
|
SIDN/p4-scion
|
30fc42ac3672a2d862e5537f6990c87ef3c21860
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2021, SIDN Labs
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from concurrent import futures
import argparse
import grpc
import logging
from scion_grpc import hopfields_pb2_grpc, hopfields_pb2
from tofino import *
logger = logging.getLogger('scion_hopfields_registration_server')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
class HopFieldsRegistrationServiceServicer(
hopfields_pb2_grpc.HopFieldsRegistrationServiceServicer):
def __init__(self, grpc_addr = 'localhost:50052', client_id = 0, p4_name = "scion", dev = 0):
self.dev_tgt = gc.Target(dev, pipe_id=0xFFFF)
self.interface = gc.ClientInterface(grpc_addr,
client_id=client_id,
device_id=0)
self.interface.bind_pipeline_config(p4_name)
self.bfrt_info = self.interface.bfrt_info_get(p4_name)
self.tbl_mac_verification = TblMacVerification(self.dev_tgt,
self.bfrt_info)
def HopFieldsRegistration(self, request, context):
try:
logger.info("Received hop field registration request")
logger.debug(request)
logger.info("Add hop field to switch tables")
logger.info("SegID: %x", request.segment_id)
logger.info("MAC: %s", request.hop_field.mac.hex())
self.tbl_mac_verification.entry_add_NoAction(
request.segment_id, request.timestamp,
request.hop_field.exp_time, request.hop_field.ingress,
request.hop_field.egress, bytearray(request.hop_field.mac))
# TODO include peer entries
logger.info("Done")
except gc.BfruntimeRpcException as e:
for (_, se) in e.sub_errors_get():
logger.error(se)
raise e
return hopfields_pb2.HopFieldsRegistrationResponse()
def RemoveExpiredHopFields(self, request, context):
try:
logger.info("Checking for expired hop fields")
self.tbl_mac_verification.remove_expired_entries()
logger.info("Done removing expired hop fields")
except gc.BfruntimeRpcException as e:
for (_, se) in e.sub_errors_get():
logger.error(se)
raise e
return hopfields_pb2.RemoveExpiredHopFieldsResponse()
def main():
parser = argparse.ArgumentParser(description="Service to register hop fields and add them to the MAC verification tables at the Tofino switch")
parser.add_argument(
"--grpc_address",
default="localhost:50052",
nargs="?",
help="GRPC address of the Tofino switch (default: localhost:50052)")
parser.add_argument(
"--program_name",
"-p",
default="scion",
nargs="?",
help="P4 program name (default: scion)")
parser.add_argument(
"--listen",
"-l",
default="[::]:10000",
nargs="?",
help="Address to listen on (default: [::]:10000)")
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="Enable output of debug info")
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
logger.info("Starting hop fields registration service")
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
servicer = HopFieldsRegistrationServiceServicer(grpc_addr=args.grpc_address, p4_name=args.program_name)
hopfields_pb2_grpc.add_HopFieldsRegistrationServiceServicer_to_server(
servicer, server)
server.add_insecure_port(args.listen)
try:
server.start()
logger.info("Running")
server.wait_for_termination()
except KeyboardInterrupt:
logger.debug("Received KeyboardInterrupt")
finally:
servicer.interface.tear_down_stream()
if __name__ == "__main__":
main()
| 40.507463
| 147
| 0.680545
| 2,045
| 0.37675
| 0
| 0
| 0
| 0
| 0
| 0
| 2,238
| 0.412307
|